metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "JoshYuJump/django-rework",
"score": 3
} |
#### File: devops/tasks/deploy.py
```python
import os
from ..hosts import get_host_value, connect
from ... import devops
from ...utils import say
class Deploy:
def __init__(self, c):
self.c = connect(c)
self.host_value = get_host_value(c)[1]
self.project = devops.PROJECT
self.env = devops.ENV
self.host = self.host_value.get('host')
self.port = self.host_value.get('port', 22)
self.password = self.host_value.get('password')
# Should reload nginx when it's configurations file changed
self._should_reload_nginx = False
def __call__(self, *args, **kwargs):
# supervisor service name
service = f'{self.project}_{self.env}'
if self.env == 'prod':
env_root = f'/opt/projects/'
branch = 'master'
elif self.env == 'test':
env_root = f'/opt/test-projects/'
branch = 'test'
elif self.env == 'dev':
env_root = f'/opt/dev-projects/'
branch = 'dev'
else:
raise Exception('Un-supported Environment to deploy.')
root = f'{env_root}{self.project}-server/'
say(f'Project root is: {root}')
self.pull(branch, root)
if kwargs.get('infrastructure'):
self.setup_infrastructure(root)
if kwargs.get('requirements_update'):
self.update_requirement(root)
self.migrate(root)
self.collect_static(root)
self.restart(service)
def _copy_nginx(self, root):
"""Copy nginx files"""
nginx_path = f'{root}.deploy/nginx/'
origin = f'{nginx_path}{self.project}_{self.env}.conf'
destination = f'/etc/nginx/conf.d/'
self.c.run(f'cp {origin} {destination}')
self._should_reload_nginx = True
say('Copied nginx configuration successfully')
def _copy_supervisor(self, root):
"""Copy supervisor files
Compatible with Django-Rework ~0.2:
supervisor file in django-rework <= 0.2:
.deploy/supervisor/{self.project}_supervisor_{self.env}.conf
supervisor file in django-rework >= 0.3:
.deploy/supervisor/{self.project}_{self.env}.conf
"""
supervisor_path = f'.deploy/supervisor/'
origin = f'{supervisor_path}{self.project}_{self.env}.conf'
if not os.path.exists(origin):
say(f'Supervisor file: {origin} not exists, try find another...')
origin = f'{supervisor_path}{self.project}_supervisor_{self.env}.conf'
origin = f'{root}{origin}'
destination = f'/etc/supervisor/conf.d/'
self.c.run(f'cp {origin} {destination}')
say('Copied supervisor configuration successfully')
def pull(self, branch, root):
# Pull latest code
say('Pull latest code from remote git')
self.c.run(f'cd {root} && git checkout {branch} && git pull')
def setup_infrastructure(self, root):
say('Setup infrastructure')
self._copy_supervisor(root)
self._copy_nginx(root)
def update_requirement(self, root):
# Update requirements
say('Install requirements')
self.c.run(f'cd {root} && python3 -m pip install -r requirements.txt')
def migrate(self, root):
# Models migrate
say('Migrate database')
self.c.run(
f'cd {root} && python3 manage.py migrate --settings={self.project}.settings.{self.env}'
)
def collect_static(self, root):
# Collect static
say('Collect static')
settings_suffix = f'--settings={self.project}.settings.{self.env}'
self.c.run(f'cd {root} && python3 manage.py collectstatic {settings_suffix} --no-input')
def restart(self, service):
# Restart infrastructure
say('Restart Supervisor and Nginx(if conf changed)')
self.c.run(f'supervisorctl restart {service}')
if self._should_reload_nginx:
self.c.run(f'nginx -s reload')
```
#### File: devops/tasks/environment.py
```python
from ... import devops
class Environment:
def __init__(self, c):
self.c = c
@staticmethod
def set_env(env):
devops.ENV = env
```
#### File: devops/tasks/setup_server.py
```python
from ..hosts import hosts, get_host_value, connect
from ...utils import say
class SetupServer:
def __init__(self, c):
self.c = connect(c)
self.host_value = get_host_value(c)[1]
def __call__(self, *args, **kwargs):
self.setup_python3()
self.check_components('nginx') and self.setup_nginx()
self.check_components('supervisor') and self.setup_supervisor()
self.check_components('mysql') and self.setup_mysql()
self.check_components('redis') and self.setup_redis()
def check_components(self, component):
return component not in self.host_value.get('exclude_components', [])
def setup_python3(self):
"""Install python3 and uWSGI"""
version = '3.7.9'
major_version = version.rsplit('.', 1)[0]
# Install Python
self.c.run('yum -y update')
self.c.run('yum groupinstall "Development tools"')
try:
self.c.run('yum -y install wget gcc make zlib-devel')
except Exception as ex:
print('ex', ex)
self.c.run(f'wget https://www.python.org/ftp/python/{version}/Python-{version}.tgz')
self.c.run(f'tar xzf Python-{version}.tgz')
self.c.run(
'cd Python-{version} && ./configure --with-ssl --prefix=/usr/local && make altinstall'
)
self.c.run(f'ln -s /usr/local/bin/python{major_version} /usr/bin/python3')
self.c.run('python3 -V')
say('Clean up Python setup files')
self.c.run('rm -rf Python-{version}')
# Install Gunicorn
pypi_mirror_suffix = ' -i http://pypi.douban.com/simple/ --trusted-host pypi.douban.com'
self.c.run(f'python3 -m pip install gunicorn {pypi_mirror_suffix}')
def setup_nginx(self):
# Install Nginx
self.c.run('yum install -y nginx')
def setup_supervisor(self):
# Install Supervisor in Python 2
self.c.run('curl https://bootstrap.pypa.io/pip/2.7/get-pip.py -o get-pip.py -o get-pip.py')
self.c.run('python get-pip.py')
self.c.run('python -m pip install supervisor==4.1.0')
self.c.run('supervisord -c /etc/supervisor/supervisord.conf') # launch supervisord
def setup_mysql(self):
download_url = 'https://dev.mysql.com/get/mysql80-community-release-el7-1.noarch.rpm'
self.c.run(f'sudo rpm -Uvh {download_url}')
self.c.run('sudo yum --enablerepo=mysql80-community install mysql-community-server')
self.c.run('systemctl start mysqld.service')
def setup_redis(self):
version = '4.0.14'
download_url = f'http://download.redis.io/releases/redis-{version}.tar.gz'
self.c.run(f'curl -o redis-{version}.tar.gz {download_url}')
self.c.run(f'tar -zxvf redis-{version}.tar.gz')
self.c.run(f'cd redis-{version} & make')
self.c.run(f'cd src && make install PREFIX=/usr/local/redis')
self.c.run(f'mkdir -p /usr/local/redis/conf')
self.c.run(f'cp redis-{version}/redis.conf /usr/local/redis/conf')
# start redis
self.c.run('cd /usr/local/redis && bin/redis-server ./conf/redis.conf')
```
#### File: core/management/app.py
```python
import os
import re
import subprocess
from .handlers.urls import UrlsHandle
from .. import utils
from ..utils import say
def setup_auth_user_model(content):
# Setup `AUTH_USER_MODEL` to `basic.User` if adding app is `users`,
# and generate `basic` App
result = subprocess.run(["django-admin", "startapp", "basic"])
if result.returncode != 0:
say(f'Generate `basic` App failed!', icon='🌶 ', wrap='C')
return content
pattern = '# {!AUTH_USER_MODEL}'
block = """AUTH_USER_MODEL = 'basic.User'"""
content = content.replace(pattern, block)
return content
def add(params):
"""Add apps to project"""
app = params[0].lower()
say(f'Start adding app: {app}')
# Add app name to base settings
settings_file = os.path.join(utils.get_settings_path(), 'base', '__init__.py')
with open(settings_file, 'r+') as file:
# check where app in INSTALLED_APPS
content = file.read()
app_full_name = f'rework.contrib.{app}'
installed_apps_pattern = r'INSTALLED_APPS\s*\=\s*\[[\s\S]*?\]'
installed_apps_match = re.search(installed_apps_pattern, content)
if not installed_apps_match:
print('There is no `INSTALLED_APPS` block in your settings')
return False
installed_apps_block = installed_apps_match.group()
for exist_app in re.findall(r"(?<=').*(?=')", installed_apps_block):
if exist_app == app_full_name:
say(f'[ERROR] App {app_full_name} is already exists')
return False
installed_apps_block = re.sub(
r'\n]',
f"\n '{app_full_name}',\n]",
installed_apps_block,
)
if app == 'users':
installed_apps_block = re.sub(
r'\n]',
f"\n 'basic',\n]",
installed_apps_block,
)
say(f'installed_apps_block {installed_apps_block}')
content = re.sub(installed_apps_pattern, installed_apps_block, content)
if app == 'users':
content = setup_auth_user_model(content)
say(f'content {content}')
file.seek(0)
file.truncate()
file.write(content)
# Added include url to root urls
urls_handler = UrlsHandle()
urls_handler.add_include_urls(app)
say('Added completely!')
```
#### File: core/management/deploy.py
```python
import os
from ..utils import say, copy_template_to_file, get_project_name
from ..utils.command import BaseCommand
from ... import core
class DeployCommand(BaseCommand):
def handle(self, params):
if params == ['--init']:
# template variables
project = get_project_name()
kwargs = {
'django_rework_version': core.__version__,
'project': project,
}
deploy_path = os.path.join(self.base_dir, 'deploy')
copy_queues = [
('.deploy/nginx/project_prod.conf', deploy_path),
('.deploy/nginx/project_test.conf', deploy_path),
('.deploy/supervisor/supervisord.conf', deploy_path),
('.deploy/supervisor/project_prod.conf', deploy_path),
('.deploy/supervisor/project_test.conf', deploy_path),
]
for q in copy_queues:
copy_template_to_file(q[0], self.base_dir, **kwargs)
say('Deploy files copied successfully')
else:
say('Unknown command args!')
def __call__(self, params):
return self.handle(params)
```
#### File: management/handlers/settings.py
```python
import os
import re
import shutil
from rework import core
from rework.core.utils import copy_template_to_file
class SettingsHandle:
"""
Handle django settings files
"""
def __init__(self, project, path):
self.base_dir = os.getcwd()
self.project = project
self.path = path # settings root path, generally is project name
self.base_settings_file = None
def _create_settings_package(self):
# 1. Make a package named `settings`
package = os.path.join(self.path, 'settings')
base_settings_path = os.path.join(package, 'base')
os.makedirs(base_settings_path)
# 2. Move origin settings.py to settings/base/__init__.py
origin_settings_file = os.path.join(self.path, 'settings.py')
target_settings_file = os.path.join(base_settings_path, '__init__.py')
shutil.move(origin_settings_file, target_settings_file)
self.base_settings_file = target_settings_file
def create_multi_envs(self):
kwargs = {
'django_rework_version': core.__version__,
'project': self.project,
}
settings_tpl_path = 'project/settings/'
envs = ['prod', 'test', 'dev', 'local']
for env in envs:
copy_template_to_file(f'{settings_tpl_path}{env}.py', self.base_dir, **kwargs)
@staticmethod
def _save(f, content):
f.seek(0)
f.truncate()
f.write(content)
def _add_tags(self):
with open(self.base_settings_file, 'r+') as f:
content = f.read()
# AUTH_USER_MODEL tag
content = content.replace(
'# Password validation', '\n'.join([
'# {!AUTH_USER_MODEL}',
'',
'# Password validation',
])
)
self._save(f, content)
def _add_installed_apps(self):
"""Initialize apps pre-installed"""
with open(self.base_settings_file, 'r+') as f:
content = f.read()
content = self._insert_app_to_content(content, 'rest_framework')
content = self._insert_app_to_content(content, 'rest_framework_simplejwt')
self._save(f, content)
@staticmethod
def _insert_app_to_content(content, app):
installed_apps_pattern = r'INSTALLED_APPS\s*\=\s*\[[\s\S]*?\]'
installed_apps_match = re.search(installed_apps_pattern, content)
if not installed_apps_match:
print('There is no `INSTALLED_APPS` block in your settings')
return False
installed_apps_block = installed_apps_match.group()
for exist_app in re.findall(r"(?<=').*(?=')", installed_apps_block):
if exist_app == app:
return False
installed_apps_block = re.sub(
r'\n]',
f"\n '{app}',\n]",
installed_apps_block,
)
content = re.sub(installed_apps_pattern, installed_apps_block, content)
return content
def _add_rest_framework_setting(self):
with open(self.base_settings_file, 'r+') as f:
content = f.read()
content += """
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework_simplejwt.authentication.JWTAuthentication',
)
}
"""
self._save(f, content)
def initialize(self):
self._create_settings_package()
# Added multi env setting files
self.create_multi_envs()
# Added template tag to base/__init__.py
self._add_tags()
self._add_installed_apps()
self._add_rest_framework_setting()
```
#### File: core/management/project.py
```python
import os
import subprocess
from .handlers.settings import SettingsHandle
from ..utils import say, copy_template_to_file
from ... import core
def init(params):
"""Initialize django rework project"""
project = params[0]
base_dir = os.getcwd()
project_dir = base_dir
say(f'Initialing project: ``{project}`` using `django-admin` command')
result = subprocess.run(["django-admin", "startproject", *params])
if result.returncode != 0:
say(f'Initialized failed!', icon='🌶 ', wrap='C')
return False
# Changed the settings files to satisfy multi environments
say(f'Changed the settings files to satisfy multi environments')
settings_folder = os.path.join(project_dir, project)
settings_handler = SettingsHandle(project=project, path=settings_folder)
# template variables
kwargs = {
'django_rework_version': core.__version__,
'project': project,
}
settings_handler.initialize()
# fabric DevOps
copy_template_to_file('fabfile.py', base_dir, **kwargs)
# Others
copy_template_to_file('.editorconfig', base_dir, **kwargs)
copy_template_to_file('.gitignore', base_dir, **kwargs)
copy_template_to_file('.style.yapf', base_dir, **kwargs)
copy_template_to_file('requirements.txt', base_dir, **kwargs)
say(f'Initialized completely!', icon='🎨', wrap='C')
``` |
{
"source": "JoshYuJump/grpc_client",
"score": 3
} |
#### File: grpc_client/grpc_client/base_client.py
```python
import logging
from typing import Callable, Optional, Any
from .base_hook import BaseHook
class BaseClient:
"""
Calls a gRPC endpoint to execute an action
:param host: The host to use when connection.
:type host: str
:param port: The port to use when connection.
:type port: str
:param stub_class: The stub client to use for this gRPC call
:type stub_class: gRPC stub class generated from proto file
:param call_func: The client function name to call the gRPC endpoint
:type call_func: gRPC client function name for the endpoint generated from proto file, str
:param data: The data to pass to the rpc call
:type data: A dict with key value pairs as kwargs of the call_func
:param streaming: A flag to indicate if the call is a streaming call
:type streaming: boolean
:param response_callback: The callback function to process the response from gRPC call
:type response_callback: A python function that process the response from gRPC call,
takes in response object
:param log_response: A flag to indicate if we need to log the response
:type log_response: boolean
"""
def __init__(self, *, host: str, port: str, stub_class: Callable) -> None:
self.host = host
self.port = port
self.stub_class = stub_class
self.log = logging.getLogger(self.__class__.__module__ + '.' +
self.__class__.__name__)
def _get_grpc_hook(self) -> BaseHook:
return BaseHook(self.host, self.port)
def execute(self,
call_func: str,
data: Optional[dict] = None,
streaming: bool = False,
response_callback: Optional[Callable] = None,
log_response: bool = False) -> Any:
hook = self._get_grpc_hook()
self.log.info("Calling gRPC service")
# grpc hook always yield
responses = hook.run(self.stub_class,
call_func,
streaming=streaming,
data=data)
for response in responses:
if streaming:
self._handle_response(response, log_response, response_callback)
if response_callback:
self._handle_response(response, log_response, response_callback)
else:
return response
def _handle_response(self, response: Any, log_response,
response_callback) -> None:
if log_response:
self.log.info(repr(response))
if response_callback:
response_callback(response)
``` |
{
"source": "joshz123/tensorflow",
"score": 2
} |
#### File: compiler/tests/fused_batchnorm_test.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.compiler.tests import test_utils
from tensorflow.compiler.tests import xla_test
from tensorflow.python.compat import compat
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import nn
from tensorflow.python.platform import test
DATA_FORMATS = (
("_data_format_NHWC", "NHWC"),
("_data_format_NCHW", "NCHW"),
)
DATA_FORMATS_AND_AVG_FACTORS = (
("_data_format_NHWC_no_averaging", "NHWC", 1.0),
("_data_format_NHWC_averaging", "NHWC", 0.6),
("_data_format_NCHW_no_averaging", "NCHW", 1.0),
("_data_format_NCHW_averaging", "NCHW", 0.6),
)
class FusedBatchNormTest(xla_test.XLATestCase, parameterized.TestCase):
def _reference_training(self, x, scale, offset, old_mean, old_var, epsilon,
exponential_avg_factor, data_format):
if data_format != "NHWC":
raise ValueError("data_format must be NHWC, got %s." % data_format)
x_square = x * x
x_square_sum = np.sum(x_square, (0, 1, 2))
x_sum = np.sum(x, axis=(0, 1, 2))
element_count = np.size(x) / int(np.shape(x)[-1])
mean = x_sum / element_count
var = x_square_sum / element_count - mean * mean
factor = element_count / max(element_count - 1, 1)
corrected_var = var * factor
normalized = (x - mean) / np.sqrt(var + epsilon)
if exponential_avg_factor != 1.0:
mean = (1.0 -
exponential_avg_factor) * old_mean + exponential_avg_factor * mean
corrected_var = (1.0 - exponential_avg_factor
) * old_var + exponential_avg_factor * corrected_var
return (normalized * scale + offset), mean, var, corrected_var
def _reference_grad(self, x, grad_y, scale, mean, var, epsilon, data_format):
# Use the following formulas to calculate gradients:
# grad_scale =
# sum(grad_y * (x - mean)) * rsqrt(var + epsilon)
#
# grad_offset = sum(output_y)
#
# grad_x =
# 1/N * scale * rsqrt(var + epsilon) * (N * grad_y - sum(grad_y) -
# (x - mean) * sum(grad_y * (x - mean)) / (var + epsilon))
if data_format != "NHWC":
raise ValueError("data_format must be NHWC, got %s." % data_format)
grad_x = scale * (grad_y - np.mean(grad_y, axis=(0, 1, 2)) -
(x - mean) * np.mean(grad_y *
(x - mean), axis=(0, 1, 2)) /
(var + epsilon)) / np.sqrt(var + epsilon)
grad_scale = np.sum(
grad_y * (x - mean) / np.sqrt(var + epsilon), axis=(0, 1, 2))
grad_offset = np.sum(grad_y, axis=(0, 1, 2))
return grad_x, grad_scale, grad_offset
@parameterized.named_parameters(*DATA_FORMATS)
def testInference(self, data_format):
channel = 3
x_shape = [2, 2, 6, channel]
scale_shape = [channel]
x_val = np.random.random_sample(x_shape).astype(np.float32)
scale_val = np.random.random_sample(scale_shape).astype(np.float32)
offset_val = np.random.random_sample(scale_shape).astype(np.float32)
epsilon = 0.001
exponential_avg_factor = 1.0
data_format_src = "NHWC"
y_ref, mean_ref, var_ref, _ = self._reference_training(
x_val, scale_val, offset_val, None, None, epsilon,
exponential_avg_factor, data_format_src)
with self.session() as sess, self.test_scope():
# To avoid constant folding
x_val_converted = test_utils.ConvertBetweenDataFormats(
x_val, data_format_src, data_format)
y_ref_converted = test_utils.ConvertBetweenDataFormats(
y_ref, data_format_src, data_format)
t_val = array_ops.placeholder(
np.float32, shape=x_val_converted.shape, name="x")
scale = array_ops.placeholder(np.float32, shape=scale_shape, name="scale")
offset = array_ops.placeholder(
np.float32, shape=scale_shape, name="offset")
y, mean, variance = nn.fused_batch_norm(
t_val,
scale,
offset,
mean=mean_ref,
variance=var_ref,
epsilon=epsilon,
data_format=data_format,
is_training=False)
y_val, _, _ = sess.run([y, mean, variance], {
t_val: x_val_converted,
scale: scale_val,
offset: offset_val
})
self.assertAllClose(y_val, y_ref_converted, atol=1e-3)
def _testLearning(self, use_gradient_checker, data_format,
exponential_avg_factor):
if not compat.forward_compatible(2020, 3,
6) and exponential_avg_factor != 1.0:
self.skipTest("running average not available.")
channel = 3
x_shape = [2, 2, 6, channel]
scale_shape = [channel]
x_val = np.random.random_sample(x_shape).astype(np.float32)
scale_val = np.random.random_sample(scale_shape).astype(np.float32)
offset_val = np.random.random_sample(scale_shape).astype(np.float32)
mean_val = np.random.random_sample(scale_shape).astype(np.float32)
var_val_corr = np.random.random_sample(scale_shape).astype(np.float32)
epsilon = 0.001
data_format_src = "NHWC"
# When in training mode, fused_batchnorm applies an implicit Bessel's
# correction. So we have to use the corrected variance here, as well.
y_ref, mean_ref, _, var_ref_corr = self._reference_training(
x_val, scale_val, offset_val, mean_val, var_val_corr, epsilon,
exponential_avg_factor, data_format_src)
with self.session() as sess, self.test_scope():
# To avoid constant folding
x_val_converted = test_utils.ConvertBetweenDataFormats(
x_val, data_format_src, data_format)
y_ref_converted = test_utils.ConvertBetweenDataFormats(
y_ref, data_format_src, data_format)
t_val = array_ops.placeholder(
np.float32, shape=x_val_converted.shape, name="x")
scale = array_ops.placeholder(np.float32, shape=scale_shape, name="scale")
offset = array_ops.placeholder(
np.float32, shape=scale_shape, name="offset")
if exponential_avg_factor == 1.0:
old_mean = None
old_var = None
else:
old_mean = array_ops.placeholder(
np.float32, shape=scale_shape, name="old_mean")
old_var = array_ops.placeholder(
np.float32, shape=scale_shape, name="old_var")
y, mean, var = nn.fused_batch_norm(
t_val,
scale,
offset,
mean=old_mean,
variance=old_var,
epsilon=epsilon,
exponential_avg_factor=exponential_avg_factor,
data_format=data_format,
is_training=True)
if exponential_avg_factor == 1.0:
feed_dict = {
t_val: x_val_converted,
scale: scale_val,
offset: offset_val,
}
else:
feed_dict = {
t_val: x_val_converted,
scale: scale_val,
offset: offset_val,
old_mean: mean_val,
old_var: var_val_corr
}
# Check gradient.
if use_gradient_checker:
err = gradient_checker.compute_gradient_error(
t_val,
x_val_converted.shape,
y,
x_val_converted.shape,
extra_feed_dict=feed_dict)
self.assertLess(err, 1e-3)
y_tf, mean_tf, var_tf = sess.run([y, mean, var], feed_dict)
self.assertAllClose(y_tf, y_ref_converted, atol=1e-3)
self.assertAllClose(mean_tf, mean_ref, atol=1e-3)
self.assertAllClose(var_tf, var_ref_corr, atol=1e-3)
@parameterized.named_parameters(*DATA_FORMATS_AND_AVG_FACTORS)
def testLearning(self, data_format, exponential_avg_factor):
self._testLearning(False, data_format, exponential_avg_factor)
@parameterized.named_parameters(*DATA_FORMATS_AND_AVG_FACTORS)
def testLearningWithGradientChecker(self, data_format,
exponential_avg_factor):
self._testLearning(True, data_format, exponential_avg_factor)
@parameterized.named_parameters(*DATA_FORMATS)
def testGradientTraining(self, data_format):
# TODO(b/64270657): Use gradient_checker here in addition to comparing with
# this reference implementation.
channel = 3
x_shape = [2, 2, 6, channel]
scale_shape = [channel]
grad_val = np.random.random_sample(x_shape).astype(np.float32)
x_val = np.random.random_sample(x_shape).astype(np.float32)
scale_val = np.random.random_sample(scale_shape).astype(np.float32)
mean_val = np.random.random_sample(scale_shape).astype(np.float32)
var_val = np.random.random_sample(scale_shape).astype(np.float32)
epsilon = 0.001
# The TensorFlow FusedBatchNormGrad training operation takes two inputs with
# implementation defined values. In theory the only correct value these
# inputs are the corresponding reserve_space_{1|2} outputs from the
# FusedBatchNorm training operation. However, in practice, we rely on the
# first one being mean on {C|G}PU, and the second one being variance on CPU
# and inverse(sqrt(variance + epsilon)) on GPU (we test this assumption
# separately).
reserve_space_1_val = mean_val
if self.device == "XLA_GPU":
reserve_space_2_val = np.reciprocal(np.sqrt(var_val + epsilon))
else:
reserve_space_2_val = var_val
data_format_src = "NHWC"
grad_x_ref, grad_scale_ref, grad_offset_ref = self._reference_grad(
x_val, grad_val, scale_val, mean_val, var_val, epsilon, data_format_src)
with self.session() as sess, self.test_scope():
grad_val_converted = test_utils.ConvertBetweenDataFormats(
grad_val, data_format_src, data_format)
x_val_converted = test_utils.ConvertBetweenDataFormats(
x_val, data_format_src, data_format)
grad_x_ref_converted = test_utils.ConvertBetweenDataFormats(
grad_x_ref, data_format_src, data_format)
grad = array_ops.placeholder(
np.float32, shape=x_val_converted.shape, name="grad")
x = array_ops.placeholder(
np.float32, shape=x_val_converted.shape, name="x")
reserve_space_1 = array_ops.placeholder(
np.float32, shape=scale_shape, name="reserve_space_1")
reserve_space_2 = array_ops.placeholder(
np.float32, shape=scale_shape, name="reserve_space_2")
scale = array_ops.placeholder(np.float32, shape=scale_shape, name="scale")
grad_x, grad_scale, grad_offset, _, _ = gen_nn_ops.fused_batch_norm_grad(
grad,
x,
scale,
reserve_space_1,
reserve_space_2,
data_format=data_format,
is_training=True)
grad_x_val, grad_scale_val, grad_offset_val = sess.run(
[grad_x, grad_scale, grad_offset], {
grad: grad_val_converted,
x: x_val_converted,
reserve_space_1: reserve_space_1_val,
reserve_space_2: reserve_space_2_val,
scale: scale_val
})
self.assertAllClose(grad_x_val, grad_x_ref_converted, atol=1e-2)
self.assertAllClose(grad_scale_val, grad_scale_ref, atol=1e-2)
self.assertAllClose(grad_offset_val, grad_offset_ref, atol=1e-3)
@parameterized.named_parameters(*DATA_FORMATS)
def testGradientInference(self, data_format):
# TODO(b/64270657): Use gradient_checker here in addition to comparing with
# this reference implementation.
channel = 3
x_shape = [2, 2, 6, channel]
scale_shape = [channel]
grad_val = np.random.random_sample(x_shape).astype(np.float32)
x_val = np.random.random_sample(x_shape).astype(np.float32)
scale_val = np.random.random_sample(scale_shape).astype(np.float32)
mean_val = np.random.random_sample(scale_shape).astype(np.float32)
var_val = np.random.random_sample(scale_shape).astype(np.float32)
data_format_src = "NHWC"
with self.session() as sess, self.test_scope():
grad_val_converted = test_utils.ConvertBetweenDataFormats(
grad_val, data_format_src, data_format)
x_val_converted = test_utils.ConvertBetweenDataFormats(
x_val, data_format_src, data_format)
grad = array_ops.placeholder(
np.float32, shape=x_val_converted.shape, name="grad")
x = array_ops.placeholder(
np.float32, shape=x_val_converted.shape, name="x")
mean = array_ops.placeholder(np.float32, shape=scale_shape, name="mean")
var = array_ops.placeholder(np.float32, shape=scale_shape, name="var")
scale = array_ops.placeholder(np.float32, shape=scale_shape, name="scale")
with self.test_scope():
out = gen_nn_ops.fused_batch_norm_grad(
grad,
x,
scale,
mean,
var,
data_format=data_format,
is_training=False)
grad_x, grad_scale, grad_offset, _, _ = out
ref_x, ref_scale, ref_offset, _, _ = gen_nn_ops.fused_batch_norm_grad(
grad, x, scale, mean, var, data_format=data_format, is_training=False)
grad_x_val, grad_scale_val, grad_offset_val, = sess.run(
[grad_x, grad_scale, grad_offset], {
grad: grad_val_converted,
x: x_val_converted,
mean: mean_val,
var: var_val,
scale: scale_val
})
grad_x_ref, grad_scale_ref, grad_offset_ref, = sess.run(
[ref_x, ref_scale, ref_offset], {
grad: grad_val_converted,
x: x_val_converted,
mean: mean_val,
var: var_val,
scale: scale_val
})
self.assertAllClose(grad_x_val, grad_x_ref, atol=1e-2)
self.assertAllClose(grad_scale_val, grad_scale_ref, atol=1e-2)
self.assertAllClose(grad_offset_val, grad_offset_ref, atol=1e-3)
if __name__ == "__main__":
test.main()
```
#### File: pyct/static_analysis/reaching_definitions_test.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from tensorflow.python.autograph.pyct import anno
from tensorflow.python.autograph.pyct import cfg
from tensorflow.python.autograph.pyct import naming
from tensorflow.python.autograph.pyct import parser
from tensorflow.python.autograph.pyct import qual_names
from tensorflow.python.autograph.pyct import transformer
from tensorflow.python.autograph.pyct.static_analysis import activity
from tensorflow.python.autograph.pyct.static_analysis import reaching_definitions
from tensorflow.python.platform import test
global_a = 7
global_b = 17
class ReachingDefinitionsAnalyzerTestBase(test.TestCase):
def _parse_and_analyze(self, test_fn):
# TODO(mdan): Use a custom FunctionTransformer here.
node, source = parser.parse_entity(test_fn, future_features=())
entity_info = transformer.EntityInfo(
name=test_fn.__name__,
source_code=source,
source_file=None,
future_features=(),
namespace={})
node = qual_names.resolve(node)
namer = naming.Namer({})
ctx = transformer.Context(entity_info, namer, None)
node = activity.resolve(node, ctx)
graphs = cfg.build(node)
node = reaching_definitions.resolve(node, ctx, graphs,
reaching_definitions.Definition)
return node
def assertHasDefs(self, node, num):
defs = anno.getanno(node, anno.Static.DEFINITIONS)
self.assertEqual(len(defs), num)
for r in defs:
self.assertIsInstance(r, reaching_definitions.Definition)
def assertHasDefinedIn(self, node, expected):
defined_in = anno.getanno(node, anno.Static.DEFINED_VARS_IN)
defined_in_str = set(str(v) for v in defined_in)
if not expected:
expected = ()
if not isinstance(expected, tuple):
expected = (expected,)
self.assertSetEqual(defined_in_str, set(expected))
def assertSameDef(self, first, second):
self.assertHasDefs(first, 1)
self.assertHasDefs(second, 1)
self.assertIs(
anno.getanno(first, anno.Static.DEFINITIONS)[0],
anno.getanno(second, anno.Static.DEFINITIONS)[0])
def assertNotSameDef(self, first, second):
self.assertHasDefs(first, 1)
self.assertHasDefs(second, 1)
self.assertIsNot(
anno.getanno(first, anno.Static.DEFINITIONS)[0],
anno.getanno(second, anno.Static.DEFINITIONS)[0])
class ReachingDefinitionsAnalyzerTest(ReachingDefinitionsAnalyzerTestBase):
def test_conditional(self):
def test_fn(a, b):
a = []
if b:
a = []
return a
node = self._parse_and_analyze(test_fn)
fn_body = node.body
self.assertHasDefs(fn_body[0].targets[0], 1)
self.assertHasDefs(fn_body[1].test, 1)
self.assertHasDefs(fn_body[1].body[0].targets[0], 1)
self.assertHasDefs(fn_body[2].value, 2)
self.assertHasDefinedIn(fn_body[1], ('a', 'b'))
def test_try_in_conditional(self):
def test_fn(a, b): # pylint:disable=unused-argument
a = []
if b:
try:
pass
except: # pylint:disable=bare-except
pass
return a
node = self._parse_and_analyze(test_fn)
fn_body = node.body
self.assertHasDefinedIn(fn_body[1], ('a', 'b'))
self.assertHasDefinedIn(fn_body[1].body[0], ('a', 'b'))
def test_conditional_in_try_in_conditional(self):
def test_fn(a, b):
a = []
if b:
try:
if b:
a = []
except TestException: # pylint:disable=undefined-variable,unused-variable
pass
return a
node = self._parse_and_analyze(test_fn)
fn_body = node.body
self.assertHasDefinedIn(fn_body[1], ('a', 'b'))
self.assertHasDefinedIn(fn_body[1].body[0], ('a', 'b'))
# Note: `TestException` and `e` are not tracked.
self.assertHasDefinedIn(fn_body[1].body[0].body[0], ('a', 'b'))
def test_conditional_in_except_in_conditional(self):
def test_fn(a, b):
a = []
if b:
try:
pass
except TestException as e: # pylint:disable=undefined-variable,unused-variable
if b:
a = []
return a
node = self._parse_and_analyze(test_fn)
fn_body = node.body
self.assertHasDefinedIn(fn_body[1], ('a', 'b'))
self.assertHasDefinedIn(fn_body[1].body[0], ('a', 'b'))
# Note: `TestException` and `e` are not tracked.
self.assertHasDefinedIn(fn_body[1].body[0].handlers[0].body[0], ('a', 'b'))
def test_while(self):
def test_fn(a):
max(a)
while True:
a = a
a = a
return a
node = self._parse_and_analyze(test_fn)
fn_body = node.body
self.assertHasDefs(fn_body[0].value.args[0], 1)
self.assertHasDefs(fn_body[1].body[0].targets[0], 1)
self.assertHasDefs(fn_body[1].body[1].targets[0], 1)
self.assertHasDefs(fn_body[1].body[1].value, 1)
# The loop does have an invariant test, but the CFG doesn't know that.
self.assertHasDefs(fn_body[1].body[0].value, 2)
self.assertHasDefs(fn_body[2].value, 2)
def test_while_else(self):
def test_fn(x, i):
y = 0
while x:
x += i
if i:
break
else:
y = 1
return x, y
node = self._parse_and_analyze(test_fn)
fn_body = node.body
self.assertHasDefs(fn_body[0].targets[0], 1)
self.assertHasDefs(fn_body[1].test, 2)
self.assertHasDefs(fn_body[1].body[0].target, 1)
self.assertHasDefs(fn_body[1].body[1].test, 1)
self.assertHasDefs(fn_body[1].orelse[0].targets[0], 1)
self.assertHasDefs(fn_body[2].value.elts[0], 2)
self.assertHasDefs(fn_body[2].value.elts[1], 2)
def test_for_else(self):
def test_fn(x, i):
y = 0
for i in x:
x += i
if i:
break
else:
continue
else:
y = 1
return x, y
node = self._parse_and_analyze(test_fn)
fn_body = node.body
self.assertHasDefs(fn_body[0].targets[0], 1)
self.assertHasDefs(fn_body[1].target, 1)
self.assertHasDefs(fn_body[1].body[0].target, 1)
self.assertHasDefs(fn_body[1].body[1].test, 1)
self.assertHasDefs(fn_body[1].orelse[0].targets[0], 1)
self.assertHasDefs(fn_body[2].value.elts[0], 2)
self.assertHasDefs(fn_body[2].value.elts[1], 2)
def test_nested_functions(self):
def test_fn(a, b):
a = []
if b:
a = []
def foo():
return a
foo()
return a
node = self._parse_and_analyze(test_fn)
fn_body = node.body
def_of_a_in_if = fn_body[1].body[0].targets[0]
self.assertHasDefs(fn_body[0].targets[0], 1)
self.assertHasDefs(fn_body[1].test, 1)
self.assertHasDefs(def_of_a_in_if, 1)
self.assertHasDefs(fn_body[2].value, 2)
inner_fn_body = fn_body[1].body[1].body
def_of_a_in_foo = inner_fn_body[0].value
self.assertHasDefs(def_of_a_in_foo, 0)
def test_nested_functions_isolation(self):
def test_fn(a):
a = 0
def child():
a = 1
return a
child()
return a
node = self._parse_and_analyze(test_fn)
fn_body = node.body
parent_return = fn_body[3]
child_return = fn_body[1].body[1]
# The assignment `a = 1` makes `a` local to `child`.
self.assertNotSameDef(parent_return.value, child_return.value)
def test_function_call_in_with(self):
def foo(_):
pass
def test_fn(a):
with foo(a):
return a
node = self._parse_and_analyze(test_fn)
fn_body = node.body
self.assertHasDefs(fn_body[0].items[0].context_expr.func, 0)
self.assertHasDefs(fn_body[0].items[0].context_expr.args[0], 1)
def test_mutation_subscript(self):
def test_fn(a):
l = []
l[0] = a
return l
node = self._parse_and_analyze(test_fn)
fn_body = node.body
creation = fn_body[0].targets[0]
mutation = fn_body[1].targets[0].value
use = fn_body[2].value
self.assertSameDef(creation, mutation)
self.assertSameDef(creation, use)
def test_deletion_partial(self):
def test_fn(a):
a = 0
if a:
del a
else:
a = 1
return a
node = self._parse_and_analyze(test_fn)
fn_body = node.body
first_def = fn_body[0].targets[0]
second_def = fn_body[1].orelse[0].targets[0]
use = fn_body[2].value
self.assertNotSameDef(use, first_def)
self.assertSameDef(use, second_def)
def test_deletion_total(self):
def test_fn(a):
if a:
a = 0
else:
a = 1
del a
return a
node = self._parse_and_analyze(test_fn)
fn_body = node.body
use = fn_body[2].value
self.assertHasDefs(use, 0)
def test_replacement(self):
def foo(a):
return a
def test_fn(a):
a = foo(a)
return a
node = self._parse_and_analyze(test_fn)
fn_body = node.body
param = node.args.args[0]
source = fn_body[0].value.args[0]
target = fn_body[0].targets[0]
retval = fn_body[1].value
self.assertSameDef(param, source)
self.assertNotSameDef(source, target)
self.assertSameDef(target, retval)
def test_comprehension_leaking(self):
def test_fn(a):
_ = [x for x in a]
return x # pylint:disable=undefined-loop-variable
node = self._parse_and_analyze(test_fn)
fn_body = node.body
listcomp_target = fn_body[0].value.generators[0].target
retval = fn_body[1].value
# Python2 leaks list comprehension symbols. Python3 doesn't.
# For details, see:
# https://stackoverflow.com/questions/4198906/list-comprehension-rebinds-names-even-after-scope-of-comprehension-is-this-righ
if six.PY2:
self.assertSameDef(retval, listcomp_target)
else:
self.assertHasDefs(retval, 0)
def test_function_definition(self):
def test_fn():
def a():
pass
if a: # pylint:disable=using-constant-test
a = None
return a
node = self._parse_and_analyze(test_fn)
fn_body = node.body
self.assertHasDefs(fn_body[1].test, 1)
self.assertHasDefs(fn_body[1].body[0].targets[0], 1)
self.assertHasDefs(fn_body[2].value, 2)
self.assertHasDefinedIn(fn_body[1], ('a',))
def test_global(self):
def test_fn():
global global_a
global global_b
if global_a:
global_b = []
return global_a, global_b
node = self._parse_and_analyze(test_fn)
fn_body = node.body
self.assertHasDefs(fn_body[2].test, 1)
self.assertHasDefs(fn_body[2].body[0].targets[0], 1)
self.assertHasDefs(fn_body[3].value.elts[0], 1)
self.assertHasDefs(fn_body[3].value.elts[1], 2)
self.assertSameDef(fn_body[2].test, fn_body[3].value.elts[0])
self.assertHasDefinedIn(fn_body[2], ('global_a', 'global_b'))
if __name__ == '__main__':
test.main()
```
#### File: data/service/server_lib.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=invalid-import-order,g-bad-import-order, unused-import
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.data.service import _pywrap_server_lib
class MasterServer(object):
"""An in-process tf.data service master, for use in testing."""
def __init__(self, protocol):
"""Creates and starts a new tf.data master server.
The server will choose an available port. Use `target()` to get the string
for connecting to the server.
Args:
protocol: A string representing the type of protocol to use when creating
channels. For no security, use "grpc". For local credentials, use
"grpc+local", and make sure your binary links in
`data/service:local_credentials`.
"""
self._protocol = protocol
self._server = _pywrap_server_lib.TF_DATA_NewMasterServer(0, protocol)
self._running = True
@property
def target(self):
"""Returns the target for connecting to this server.
The returned string will be in the form protocol://address:port, e.g.
"grpc://localhost:1000".
"""
port = _pywrap_server_lib.TF_DATA_MasterServerBoundPort(self._server)
return "{0}://localhost:{1}".format(self._protocol, port)
def num_tasks(self):
"""Returns the number of tasks on the master."""
return _pywrap_server_lib.TF_DATA_MasterServerNumTasks(self._server)
def stop(self):
"""Shuts down and deletes the server.
This method will block until all outstanding rpcs have completed and the
server has been shut down.
"""
if self._running:
self._running = False
_pywrap_server_lib.TF_DATA_DeleteMasterServer(self._server)
def __del__(self):
self.stop()
class WorkerServer(object):
"""An in-process tf.data service worker, for use in testing."""
def __init__(self, protocol, master_address, port=0):
"""Creates and starts a new tf.data worker server.
The server will choose an available port. Use `target()` to get the string
for connecting to the server.
Args:
protocol: A string representing the type of protocol to use when creating
channels. For no security, use "grpc". For local credentials, use
"grpc+local", and make sure your binary links in
`data/service:local_credentials`.
master_address: The address of the tf.data master server to register with.
port: The port to bind to.
"""
self._protocol = protocol
self._server = _pywrap_server_lib.TF_DATA_NewWorkerServer(
port, protocol, master_address, "localhost:%port%")
self._running = True
@property
def target(self):
"""Returns the target for connecting to this server.
The returned string will be in the form protocol://address:port, e.g.
"grpc://localhost:1000".
"""
port = _pywrap_server_lib.TF_DATA_WorkerServerBoundPort(self._server)
return "{0}://localhost:{1}".format(self._protocol, port)
def stop(self):
"""Shuts down and deletes the server.
This method will block until all outstanding rpcs have completed and the
server has been shut down.
"""
if self._running:
self._running = False
_pywrap_server_lib.TF_DATA_DeleteWorkerServer(self._server)
def __del__(self):
self.stop()
```
#### File: debug/lib/dumping_callback_test_lib.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import shutil
import tempfile
from tensorflow.python.debug.lib import check_numerics_callback
from tensorflow.python.debug.lib import debug_events_reader
from tensorflow.python.debug.lib import dumping_callback
from tensorflow.python.framework import test_util
from tensorflow.python.framework import versions
class DumpingCallbackTestBase(test_util.TensorFlowTestCase):
"""Base test-case class for tfdbg v2 callbacks."""
def setUp(self):
super(DumpingCallbackTestBase, self).setUp()
self.dump_root = tempfile.mkdtemp()
def tearDown(self):
if os.path.isdir(self.dump_root):
shutil.rmtree(self.dump_root, ignore_errors=True)
check_numerics_callback.disable_check_numerics()
dumping_callback.disable_dump_debug_info()
super(DumpingCallbackTestBase, self).tearDown()
def _readAndCheckMetadataFile(self):
"""Read and check the .metadata debug-events file."""
with debug_events_reader.DebugEventsReader(self.dump_root) as reader:
metadata_iter = reader.metadata_iterator()
metadata = next(metadata_iter).debug_event.debug_metadata
self.assertEqual(metadata.tensorflow_version, versions.__version__)
self.assertTrue(metadata.file_version.startswith("debug.Event"))
```
#### File: python/distribute/multi_worker_util_test.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.core.protobuf import cluster_pb2
from tensorflow.python.distribute import multi_worker_util
from tensorflow.python.eager import test
from tensorflow.python.training import server_lib
class NormalizeClusterSpecTest(test.TestCase):
def assert_same_cluster(self, lhs, rhs):
self.assertEqual(
server_lib.ClusterSpec(lhs).as_dict(),
server_lib.ClusterSpec(rhs).as_dict())
def testDictAsInput(self):
cluster_spec = {
"chief": ["127.0.0.1:1234"],
"worker": ["127.0.0.1:8964", "127.0.0.1:2333"],
"ps": ["127.0.0.1:1926", "127.0.0.1:3141"]
}
self.assert_same_cluster(
cluster_spec, multi_worker_util.normalize_cluster_spec(cluster_spec))
def testClusterDefAsInput(self):
cluster_def = cluster_pb2.ClusterDef()
job = cluster_def.job.add()
job.name = "chief"
job.tasks[0] = "127.0.0.1:1234"
job = cluster_def.job.add()
job.name = "worker"
job.tasks[0] = "127.0.0.1:8964"
job.tasks[1] = "127.0.0.1:2333"
job = cluster_def.job.add()
job.name = "ps"
job.tasks[0] = "127.0.0.1:1926"
job.tasks[1] = "127.0.0.1:3141"
self.assert_same_cluster(
cluster_def, multi_worker_util.normalize_cluster_spec(cluster_def))
def testClusterSpecAsInput(self):
cluster_spec = server_lib.ClusterSpec({
"chief": ["127.0.0.1:1234"],
"worker": ["127.0.0.1:8964", "127.0.0.1:2333"],
"ps": ["127.0.0.1:1926", "127.0.0.1:3141"]
})
self.assert_same_cluster(
cluster_spec, multi_worker_util.normalize_cluster_spec(cluster_spec))
def testUnexpectedInput(self):
cluster_spec = ["127.0.0.1:8964", "127.0.0.1:2333"]
with self.assertRaisesRegexp(
ValueError,
"`cluster_spec' should be dict or a `tf.train.ClusterSpec` or a "
"`tf.train.ClusterDef` object"):
multi_worker_util.normalize_cluster_spec(cluster_spec)
class IsChiefTest(test.TestCase):
def testClusterWithChief(self):
cluster_spec = {
"chief": ["127.0.0.1:1234"],
"worker": ["127.0.0.1:8964", "127.0.0.1:2333"],
"ps": ["127.0.0.1:1926", "127.0.0.1:3141"]
}
self.assertTrue(multi_worker_util.is_chief(cluster_spec, "chief", 0))
self.assertFalse(multi_worker_util.is_chief(cluster_spec, "worker", 0))
def testClusterWithoutChief(self):
cluster_spec = {"worker": ["127.0.0.1:8964", "127.0.0.1:2333"]}
self.assertTrue(multi_worker_util.is_chief(cluster_spec, "worker", 0))
self.assertFalse(multi_worker_util.is_chief(cluster_spec, "worker", 1))
with self.assertRaisesRegexp(
ValueError, "`task_type` 'chief' not found in cluster_spec."):
multi_worker_util.is_chief(cluster_spec, "chief", 0)
with self.assertRaisesRegexp(
ValueError, "The `task_id` 2 exceeds the maximum id of worker."):
multi_worker_util.is_chief(cluster_spec, "worker", 2)
def testEvaluatorIsChief(self):
cluster_spec = {
"chief": ["127.0.0.1:1234"],
"worker": ["127.0.0.1:8964", "127.0.0.1:2333"],
"evaluator": ["127.0.0.1:2019"]
}
self.assertTrue(multi_worker_util.is_chief(cluster_spec, "evaluator", 0))
class NumWorkersTest(test.TestCase):
def testCountWorker(self):
cluster_spec = {
"chief": ["127.0.0.1:1234"],
"worker": ["127.0.0.1:8964", "127.0.0.1:2333"],
"ps": ["127.0.0.1:1926", "127.0.0.1:3141"]
}
self.assertEqual(
multi_worker_util.worker_count(cluster_spec, task_type="chief"), 3)
self.assertEqual(
multi_worker_util.worker_count(cluster_spec, task_type="worker"), 3)
def testCountEvaluator(self):
cluster_spec = {
"chief": ["127.0.0.1:1234"],
"worker": ["127.0.0.1:8964", "127.0.0.1:2333"],
"evaluator": ["127.0.0.1:7566"]
}
self.assertEqual(
multi_worker_util.worker_count(cluster_spec, task_type="evaluator"), 1)
def testTaskTypeNotFound(self):
cluster_spec = {}
with self.assertRaisesRegexp(
ValueError, "`task_type` 'worker' not found in cluster_spec."):
multi_worker_util.worker_count(cluster_spec, task_type="worker")
def testCountPs(self):
cluster_spec = {
"chief": ["127.0.0.1:1234"],
"ps": ["127.0.0.1:1926", "127.0.0.1:3141"]
}
# A "ps" job shouldn't call this method.
with self.assertRaisesRegexp(ValueError, "Unexpected `task_type` 'ps'"):
multi_worker_util.worker_count(cluster_spec, task_type="ps")
class IdInClusterTest(test.TestCase):
def testChiefId(self):
cluster_spec = {
"chief": ["127.0.0.1:1234"],
"worker": ["127.0.0.1:8964", "127.0.0.1:2333"],
"ps": ["127.0.0.1:1926", "127.0.0.1:3141"]
}
self.assertEqual(
multi_worker_util.id_in_cluster(cluster_spec, "chief", 0), 0)
def testWorkerId(self):
cluster_spec = {
"chief": ["127.0.0.1:1234"],
"worker": ["127.0.0.1:8964", "127.0.0.1:2333"],
"ps": ["127.0.0.1:1926", "127.0.0.1:3141"]
}
self.assertEqual(
multi_worker_util.id_in_cluster(cluster_spec, "worker", 1), 2)
cluster_spec = {
"worker": ["127.0.0.1:8964", "127.0.0.1:2333"],
"ps": ["127.0.0.1:1926", "127.0.0.1:3141"]
}
self.assertEqual(
multi_worker_util.id_in_cluster(cluster_spec, "worker", 1), 1)
def testEvaluatorId(self):
cluster_spec = {
"chief": ["127.0.0.1:1234"],
"worker": ["127.0.0.1:8964", "127.0.0.1:2333"],
"evaluator": ["127.0.0.1:7566"]
}
self.assertEqual(
multi_worker_util.id_in_cluster(cluster_spec, "evaluator", 0), 0)
def testPsId(self):
cluster_spec = {"chief": ["127.0.0.1:1234"], "ps": ["127.0.0.1:7566"]}
with self.assertRaisesRegexp(ValueError,
"There is no id for task_type 'ps'"):
multi_worker_util.id_in_cluster(cluster_spec, "ps", 0)
def testMultipleChiefs(self):
cluster_spec = {
"chief": ["127.0.0.1:8258", "127.0.0.1:7566"],
}
with self.assertRaisesRegexp(ValueError,
"There must be at most one 'chief' job."):
multi_worker_util.id_in_cluster(cluster_spec, "chief", 0)
class CollectiveLeaderTest(test.TestCase):
def testChiefAsLeader(self):
cluster_spec = {
"chief": ["127.0.0.1:1234"],
"worker": ["127.0.0.1:8964", "127.0.0.1:2333"],
"ps": ["127.0.0.1:1926", "127.0.0.1:3141"]
}
self.assertEqual(
multi_worker_util.collective_leader(cluster_spec, "worker", 0),
"/job:chief/replica:0/task:0")
def testWorkerAsLeader(self):
cluster_spec = {
"worker": ["127.0.0.1:8964", "127.0.0.1:2333"],
"ps": ["127.0.0.1:1926", "127.0.0.1:3141"]
}
self.assertEqual(
multi_worker_util.collective_leader(cluster_spec, "worker", 1),
"/job:worker/replica:0/task:0")
def testLeaderForEvaluator(self):
cluster_spec = {
"chief": ["127.0.0.1:1234"],
"worker": ["127.0.0.1:8964", "127.0.0.1:2333"],
"ps": ["127.0.0.1:1926", "127.0.0.1:3141"],
"evaluator": ["127.0.0.1:2019"]
}
self.assertEqual(
multi_worker_util.collective_leader(cluster_spec, "evaluator", 0), "")
def testLocalLeader(self):
cluster_spec = {}
self.assertEqual(
multi_worker_util.collective_leader(cluster_spec, None, 0), "")
# Most of the validation logic is tested by above tests except for some.
class ClusterSpecValidationTest(test.TestCase):
def testEvaluatorNotInCluster(self):
cluster_spec = {
"chief": ["127.0.0.1:1234"],
"worker": ["127.0.0.1:8964", "127.0.0.1:2333"],
"ps": ["127.0.0.1:1926", "127.0.0.1:3141"]
}
multi_worker_util._validate_cluster_spec(cluster_spec, "chief", 0)
multi_worker_util._validate_cluster_spec(cluster_spec, "worker", 0)
multi_worker_util._validate_cluster_spec(cluster_spec, "ps", 0)
multi_worker_util._validate_cluster_spec(cluster_spec, "evaluator", 0)
def testWorkerNotInCluster(self):
cluster_spec = {
"chief": ["127.0.0.1:1234"],
"ps": ["127.0.0.1:1926", "127.0.0.1:3141"]
}
multi_worker_util._validate_cluster_spec(cluster_spec, "evaluator", 0)
with self.assertRaisesRegexp(
ValueError, "`task_type` 'worker' not found in cluster_spec."):
multi_worker_util._validate_cluster_spec(cluster_spec, "worker", 0)
if __name__ == "__main__":
test.main()
```
#### File: python/eager/forwardprop.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import threading
from tensorflow.python import pywrap_tfe
from tensorflow.python.eager import backprop
from tensorflow.python.eager import backprop_util
from tensorflow.python.eager import def_function
from tensorflow.python.eager import execute
from tensorflow.python.eager import forwardprop_util
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops.unconnected_gradients import UnconnectedGradients
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import nest
from tensorflow.python.util.tf_export import tf_export
# Dictionary mapping from op names to special-cased jvp functions. Otherwise
# backward functions are transposed on the tape.
_SPECIAL_CASES = {}
def _identity_jvp(attr_tuple, inputs, outputs, tangents):
# Special-cased mostly for resource handles, where creating ones Tensors from
# handle data for transposing the backward function on the tape is error-prone
# (even if we get good handle data, partially defined shapes are an issue).
del attr_tuple, inputs, outputs
return [array_ops.identity(t) for t in tangents]
_SPECIAL_CASES["Identity"] = _identity_jvp
def _read_variable_jvp(attr_tuple, inputs, outputs, tangents):
# Like for Identity, this special case means we don't need to create
# variable-shaped Tensors from resource handles.
del attr_tuple, inputs, outputs
return [array_ops.identity(t) for t in tangents]
_SPECIAL_CASES["ReadVariableOp"] = _read_variable_jvp
_TRACE_COUNT_CONSISTENCY_LOCK = threading.Lock()
# Map from op names to number of traces of _jvp_helper. Used to cap the number
# of traces due to shape differences while still specializing where possible.
_TRACE_COUNT = {}
def _jvp_helper(op_name, attr_tuple, inputs, outputs, tangents):
"""Computes a Jacobian-vector product for an op.
Note that this function would be wasteful if executed eagerly. It runs the
backward gradient function and throws away the result just to record its
operations on a GradientTape. These unused ops are pruned away when this
function is traced.
Args:
op_name: A string, the type of operation being executed.
attr_tuple: Attributes of the operation.
inputs: A flat list of input Tensors to the operation.
outputs: A flat list of output Tensors from the operation.
tangents: A flat list of Tensors, same shape as `inputs`.
Returns:
A flat list of tangents corresponding to `outputs`.
"""
with _TRACE_COUNT_CONSISTENCY_LOCK:
# Just make sure writes don't clobber each other's increments; reads in
# _jvp_dispatch do not lock.
_TRACE_COUNT[op_name] = _TRACE_COUNT.get(op_name, 0) + 1
special_case = _SPECIAL_CASES.get(op_name, None)
if special_case is not None:
return special_case(attr_tuple, inputs, outputs, tangents)
if not outputs:
# tape.gradients([], inputs) doesn't make much sense
return []
# Generally inner GradientTapes won't function while outer accumulators are
# recording. We temporarily reset forwardprop state to allow GradientTapes to
# function here.
with forwardprop_util.push_forwardprop_state():
trainable_inputs = []
trainable_indices = []
nontrivial_tangents = []
for input_index, tensor in enumerate(inputs):
if backprop_util.IsTrainable(tensor):
trainable_inputs.append(tensor)
trainable_indices.append(input_index)
nontrivial_tangents.append(tangents[input_index])
with backprop.GradientTape() as transpose_tape:
with backprop.GradientTape() as backfunc_tape:
backfunc_tape.watch(trainable_inputs)
execute.record_gradient(op_name, inputs, attr_tuple, outputs)
forwardprop_aids = []
trainable_outputs = []
nontrivial_output_indices = []
for output_index, output in enumerate(outputs):
if backprop_util.IsTrainable(output):
forwardprop_aids.append(
array_ops.ones_like(output, name="unused_forwardprop_aid"))
trainable_outputs.append(output)
nontrivial_output_indices.append(output_index)
transpose_tape.watch(forwardprop_aids)
grads = backfunc_tape.gradient(
trainable_outputs,
trainable_inputs,
forwardprop_aids,
unconnected_gradients=UnconnectedGradients.ZERO)
nontrivial_output_tangents = transpose_tape.gradient(
grads, forwardprop_aids, output_gradients=nontrivial_tangents)
output_tangents = [None] * len(outputs)
for index, tangent in zip(nontrivial_output_indices,
nontrivial_output_tangents):
output_tangents[index] = tangent
return output_tangents
# TODO(allenl): experimental_relax_shapes for gradients which rely on static
# shape information are underspecialized. We may want hand-written forward
# implementations, or a more satisfying story about how we re-specialize
# gradients which were traced with relaxed shapes (e.g. use conds instead of
# trace-time Python logic).
_jvp_relaxed_shapes = def_function.function(
_jvp_helper, experimental_relax_shapes=True)
_jvp_exact_shapes = def_function.function(
_jvp_helper, experimental_relax_shapes=False)
# The maximum number of exact-shape traces to perform for a single op before
# switching to shape relaxation.
_TRACE_COUNT_LIMIT = 32
def _jvp_dispatch(op_name, attr_tuple, inputs, outputs, tangents):
"""Determine which forwardprop function to call."""
# Note that this _TRACE_COUNT read races with writes. That's fine, it just
# means we may trace a few more exact shapes before moving on to relaxation.
if _TRACE_COUNT.get(op_name, 0) < _TRACE_COUNT_LIMIT:
return _jvp_exact_shapes(
op_name, attr_tuple, inputs, outputs, tangents)
else:
return _jvp_relaxed_shapes(
op_name, attr_tuple, inputs, outputs, tangents)
pywrap_tfe.TFE_Py_RegisterJVPFunction(_jvp_dispatch)
@tf_export("autodiff.ForwardAccumulator", v1=[])
class ForwardAccumulator(object):
"""Computes Jacobian-vector products ("JVP"s) using forward-mode autodiff.
Compare to `tf.GradientTape` which computes vector-Jacobian products ("VJP"s)
using reverse-mode autodiff (backprop). Reverse mode is more attractive when
computing gradients of a scalar-valued function with respect to many inputs
(e.g. a neural network with many parameters and a scalar loss). Forward mode
works best on functions with many outputs and few inputs. Since it does not
hold on to intermediate activations, it is much more memory efficient than
backprop where it is applicable.
Consider a simple linear regression:
>>> x = tf.constant([[2.0, 3.0], [1.0, 4.0]])
>>> dense = tf.keras.layers.Dense(1)
>>> dense.build([None, 2])
>>> with tf.autodiff.ForwardAccumulator(
... primals=dense.kernel,
... tangents=tf.constant([[1.], [0.]])) as acc:
... loss = tf.reduce_sum((dense(x) - tf.constant([1., -1.])) ** 2.)
>>> acc.jvp(loss)
<tf.Tensor: shape=(), dtype=float32, numpy=...>
The example has two variables containing parameters, `dense.kernel` (2
parameters) and `dense.bias` (1 parameter). Considering the training data `x`
as a constant, this means the Jacobian matrix for the function mapping from
parameters to loss has one row and three columns.
With forwardprop, we specify a length-three vector in advance which multiplies
the Jacobian. The `primals` constructor argument is the parameter (a
`tf.Tensor` or `tf.Variable`) we're specifying a vector for, and the
`tangents` argument is the "vector" in Jacobian-vector product. If our goal is
to compute the entire Jacobian matrix, forwardprop computes one column at a
time while backprop computes one row at a time. Since the Jacobian in the
linear regression example has only one row, backprop requires fewer
invocations:
>>> x = tf.constant([[2.0, 3.0], [1.0, 4.0]])
>>> dense = tf.keras.layers.Dense(1)
>>> dense.build([None, 2])
>>> loss_fn = lambda: tf.reduce_sum((dense(x) - tf.constant([1., -1.])) ** 2.)
>>> kernel_fprop = []
>>> with tf.autodiff.ForwardAccumulator(
... dense.kernel, tf.constant([[1.], [0.]])) as acc:
... kernel_fprop.append(acc.jvp(loss_fn()))
>>> with tf.autodiff.ForwardAccumulator(
... dense.kernel, tf.constant([[0.], [1.]])) as acc:
... kernel_fprop.append(acc.jvp(loss_fn()))
>>> with tf.autodiff.ForwardAccumulator(dense.bias, tf.constant([1.])) as acc:
... bias_fprop = acc.jvp(loss_fn())
>>> with tf.GradientTape() as tape:
... loss = loss_fn()
>>> kernel_grad, bias_grad = tape.gradient(loss, (dense.kernel, dense.bias))
>>> np.testing.assert_allclose(
... kernel_grad, tf.stack(kernel_fprop)[:, tf.newaxis])
>>> np.testing.assert_allclose(bias_grad, bias_fprop[tf.newaxis])
Implicit in the `tape.gradient` call is a length-one vector which
left-multiplies the Jacobian, a vector-Jacobian product.
`ForwardAccumulator` maintains JVPs corresponding primal tensors it is
watching, derived from the original `primals` specified in the constructor. As
soon as a primal tensor is deleted, `ForwardAccumulator` deletes the
corresponding JVP.
`acc.jvp(x)` retrieves `acc`'s JVP corresponding to the primal tensor `x`. It
does not perform any computation. `acc.jvp` calls can be repeated as long as
`acc` is accessible, whether the context manager is active or not. New JVPs
are only computed while the context manager is active.
Note that `ForwardAccumulator`s are always applied in the order their context
managers were entered, so inner accumulators will not see JVP computation from
outer accumulators. Take higher-order JVPs from outer accumulators:
>>> primal = tf.constant(1.1)
>>> with tf.autodiff.ForwardAccumulator(primal, tf.constant(1.)) as outer:
... with tf.autodiff.ForwardAccumulator(primal, tf.constant(1.)) as inner:
... primal_out = primal ** tf.constant(3.5)
>>> inner_jvp = inner.jvp(primal_out)
>>> inner_jvp # 3.5 * 1.1 ** 2.5
<tf.Tensor: shape=(), dtype=float32, numpy=4.4417057>
>>> outer.jvp(inner_jvp) # 3.5 * 2.5 * 1.1 ** 1.5
<tf.Tensor: shape=(), dtype=float32, numpy=10.094786>
Reversing the collection in the last line to instead retrieve
`inner.jvp(outer.jvp(primal_out))` will not work.
Strict nesting also applies to combinations of `ForwardAccumulator` and
`tf.GradientTape`. More deeply nested `GradientTape` objects will ignore the
products of outer `ForwardAccumulator` objects. This allows (for example)
memory-efficient forward-over-backward computation of Hessian-vector products,
where the inner `GradientTape` would otherwise hold on to all intermediate
JVPs:
>>> v = tf.Variable([1., 2.])
>>> with tf.autodiff.ForwardAccumulator(
... v,
... # The "vector" in Hessian-vector product.
... tf.constant([1., 0.])) as acc:
... with tf.GradientTape() as tape:
... y = tf.reduce_sum(v ** 3.)
... backward = tape.gradient(y, v)
>>> backward # gradient from backprop
<tf.Tensor: shape=(2,), dtype=float32, numpy=array([ 3., 12.], dtype=float32)>
>>> acc.jvp(backward) # forward-over-backward Hessian-vector product
<tf.Tensor: shape=(2,), dtype=float32, numpy=array([6., 0.], dtype=float32)>
"""
def __init__(self, primals, tangents):
"""Specify tensors to watch and their Jacobian-vector products.
Mathematically, `tangents` is a vector right-multiplying the Jacobian matrix
(a Jacobian-vector product) for the function computed while this accumulator
is active. Since JVPs are computed in forward mode as the computation
happens, this vector must be supplied in advance.
Listing a single tensor multiple times in `primals` raises an
exception. Excluding a tensor from `primals` is equivalent to watching it
with a tangent tensor of zeros.
Args:
primals: A tensor or nested structure of tensors to watch.
tangents: A tensor or nested structure of tensors, with the same nesting
structure as `primals`, with each element being a vector with the same
size as the corresponding primal element.
Raises:
ValueError: If the same tensor or variable is specified multiple times in
`primals`.
"""
self._accumulator = pywrap_tfe.TFE_Py_ForwardAccumulatorNew()
self._recording = False
primal_ids = set()
for primal in nest.flatten(primals):
if id(primal) in primal_ids:
raise ValueError(
"Tensor {} was specified as a primal multiple times. This may "
"indicate an error. If it was intended, please sum the "
"corresponding tangents.")
primal_ids.add(id(primal))
self._watch(primals, tangents)
def __enter__(self):
self._push_accumulator()
return self
def __exit__(self, typ, value, traceback):
if self._recording:
self._pop_accumulator()
def _push_accumulator(self):
if self._recording:
raise ValueError("Accumulator is already recording.")
pywrap_tfe.TFE_Py_ForwardAccumulatorSetAdd(self._accumulator)
self._recording = True
def _pop_accumulator(self):
if not self._recording:
raise ValueError("Accumulator is not recording.")
pywrap_tfe.TFE_Py_ForwardAccumulatorSetRemove(self._accumulator)
self._recording = False
def _watch(self, primals, tangents):
"""Ensures that `primals` are being traced by this accumulator.
Mathematically, `tangents` is a vector right-multiplying the Jacobian matrix
(a Jacobian-vector product) for the function computed while this accumulator
is active. Since JVPs are computed in forward mode as the computation
happens, this vector must be supplied in advance.
Watching a single tensor multiple times sums each of its `tangents`. Any
un-watched tensor has zeros for its tangent vector.
Args:
primals: A Tensor or list of Tensors.
tangents: A Tensor or list of Tensors matching `primals`.
"""
nest.assert_same_structure(primals, tangents)
for t, g in zip(nest.flatten(primals), nest.flatten(tangents)):
if not t.dtype.is_floating:
logging.log_first_n(
logging.WARN, "The dtype of the watched primal must be "
"floating (e.g. tf.float32), got %r", 5, t.dtype)
g = ops.convert_to_tensor(g, dtype=t.dtype)
if hasattr(t, "handle"):
# Run convert_to_tensor to get the captured handle from whichever
# function we're running if necessary.
t = ops.convert_to_tensor(t.handle)
pywrap_tfe.TFE_Py_ForwardAccumulatorWatch(self._accumulator, t, g)
def jvp(self, primals, unconnected_gradients=UnconnectedGradients.NONE):
"""Fetches the Jacobian-vector product computed for `primals`.
Note that this method performs no computation, and simply looks up a JVP
that was already computed (unlike backprop using a `tf.GradientTape`, where
the computation happens on the call to `tape.gradient`).
Args:
primals: A watched Tensor or structure of Tensors to fetch the JVPs for.
unconnected_gradients: A value which can either hold 'none' or 'zero' and
alters the value which will be returned if no JVP was computed for
`primals`. The possible values and effects are detailed in
'tf.UnconnectedGradients' and it defaults to 'none'.
Returns:
Tensors with the same shapes and dtypes as `primals`, or None if no JVP
is available.
"""
unconnected_gradients = UnconnectedGradients(unconnected_gradients)
if self._accumulator is None:
raise ValueError("Called jvp() without first tracing anything.")
def _fetch_jvp(tensor):
if hasattr(tensor, "handle"):
tensor = ops.convert_to_tensor(tensor.handle)
result = pywrap_tfe.TFE_Py_ForwardAccumulatorJVP(self._accumulator,
tensor)
if result is None and unconnected_gradients == UnconnectedGradients.ZERO:
return array_ops.zeros_like(tensor)
return result
return nest.map_structure(_fetch_jvp, primals)
```
#### File: python/feature_column/dense_features_test.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.feature_column import dense_features as df
from tensorflow.python.feature_column import feature_column_v2 as fc
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.platform import test
def _initialized_session(config=None):
sess = session.Session(config=config)
sess.run(variables_lib.global_variables_initializer())
sess.run(lookup_ops.tables_initializer())
return sess
class DenseFeaturesTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes()
def test_retrieving_input(self):
features = {'a': [0.]}
dense_features = df.DenseFeatures(fc.numeric_column('a'))
inputs = self.evaluate(dense_features(features))
self.assertAllClose([[0.]], inputs)
def test_reuses_variables(self):
with context.eager_mode():
sparse_input = sparse_tensor.SparseTensor(
indices=((0, 0), (1, 0), (2, 0)),
values=(0, 1, 2),
dense_shape=(3, 3))
# Create feature columns (categorical and embedding).
categorical_column = fc.categorical_column_with_identity(
key='a', num_buckets=3)
embedding_dimension = 2
def _embedding_column_initializer(shape, dtype, partition_info=None):
del shape # unused
del dtype # unused
del partition_info # unused
embedding_values = (
(1, 0), # id 0
(0, 1), # id 1
(1, 1)) # id 2
return embedding_values
embedding_column = fc.embedding_column(
categorical_column,
dimension=embedding_dimension,
initializer=_embedding_column_initializer)
dense_features = df.DenseFeatures([embedding_column])
features = {'a': sparse_input}
inputs = dense_features(features)
variables = dense_features.variables
# Sanity check: test that the inputs are correct.
self.assertAllEqual([[1, 0], [0, 1], [1, 1]], inputs)
# Check that only one variable was created.
self.assertEqual(1, len(variables))
# Check that invoking dense_features on the same features does not create
# additional variables
_ = dense_features(features)
self.assertEqual(1, len(variables))
self.assertIs(variables[0], dense_features.variables[0])
def test_dense_feature_with_partitioner(self):
with context.eager_mode():
sparse_input = sparse_tensor.SparseTensor(
indices=((0, 0), (1, 0), (2, 0), (3, 0)),
values=(0, 1, 3, 2),
dense_shape=(4, 4))
# Create feature columns (categorical and embedding).
categorical_column = fc.categorical_column_with_identity(
key='a', num_buckets=4)
embedding_dimension = 2
def _embedding_column_initializer(shape, dtype, partition_info=None):
offset = partition_info._var_offset[0]
del shape # unused
del dtype # unused
if offset == 0:
embedding_values = (
(1, 0), # id 0
(0, 1)) # id 1
else:
embedding_values = (
(1, 1), # id 2
(2, 2)) # id 3
return embedding_values
embedding_column = fc.embedding_column(
categorical_column,
dimension=embedding_dimension,
initializer=_embedding_column_initializer)
dense_features = df.DenseFeatures(
[embedding_column],
partitioner=partitioned_variables.fixed_size_partitioner(2))
features = {'a': sparse_input}
inputs = dense_features(features)
variables = dense_features.variables
# Sanity check: test that the inputs are correct.
self.assertAllEqual([[1, 0], [0, 1], [2, 2], [1, 1]], inputs)
# Check that only one variable was created.
self.assertEqual(2, len(variables))
# Check that invoking dense_features on the same features does not create
# additional variables
_ = dense_features(features)
self.assertEqual(2, len(variables))
self.assertIs(variables[0], dense_features.variables[0])
self.assertIs(variables[1], dense_features.variables[1])
def test_feature_column_dense_features_gradient(self):
with context.eager_mode():
sparse_input = sparse_tensor.SparseTensor(
indices=((0, 0), (1, 0), (2, 0)),
values=(0, 1, 2),
dense_shape=(3, 3))
# Create feature columns (categorical and embedding).
categorical_column = fc.categorical_column_with_identity(
key='a', num_buckets=3)
embedding_dimension = 2
def _embedding_column_initializer(shape, dtype, partition_info=None):
del shape # unused
del dtype # unused
del partition_info # unused
embedding_values = (
(1, 0), # id 0
(0, 1), # id 1
(1, 1)) # id 2
return embedding_values
embedding_column = fc.embedding_column(
categorical_column,
dimension=embedding_dimension,
initializer=_embedding_column_initializer)
dense_features = df.DenseFeatures([embedding_column])
features = {'a': sparse_input}
def scale_matrix():
matrix = dense_features(features)
return 2 * matrix
# Sanity check: Verify that scale_matrix returns the correct output.
self.assertAllEqual([[2, 0], [0, 2], [2, 2]], scale_matrix())
# Check that the returned gradient is correct.
grad_function = backprop.implicit_grad(scale_matrix)
grads_and_vars = grad_function()
indexed_slice = grads_and_vars[0][0]
gradient = grads_and_vars[0][0].values
self.assertAllEqual([0, 1, 2], indexed_slice.indices)
self.assertAllEqual([[2, 2], [2, 2], [2, 2]], gradient)
def test_raises_if_empty_feature_columns(self):
with self.assertRaisesRegexp(ValueError,
'feature_columns must not be empty'):
df.DenseFeatures(feature_columns=[])(features={})
def test_should_be_dense_column(self):
with self.assertRaisesRegexp(ValueError, 'must be a .*DenseColumn'):
df.DenseFeatures(feature_columns=[
fc.categorical_column_with_hash_bucket('wire_cast', 4)
])(
features={
'a': [[0]]
})
def test_does_not_support_dict_columns(self):
with self.assertRaisesRegexp(
ValueError, 'Expected feature_columns to be iterable, found dict.'):
df.DenseFeatures(feature_columns={'a': fc.numeric_column('a')})(
features={
'a': [[0]]
})
def test_bare_column(self):
with ops.Graph().as_default():
features = features = {'a': [0.]}
net = df.DenseFeatures(fc.numeric_column('a'))(features)
self.evaluate(variables_lib.global_variables_initializer())
self.evaluate(lookup_ops.tables_initializer())
self.assertAllClose([[0.]], self.evaluate(net))
def test_column_generator(self):
with ops.Graph().as_default():
features = features = {'a': [0.], 'b': [1.]}
columns = (fc.numeric_column(key) for key in features)
net = df.DenseFeatures(columns)(features)
self.evaluate(variables_lib.global_variables_initializer())
self.evaluate(lookup_ops.tables_initializer())
self.assertAllClose([[0., 1.]], self.evaluate(net))
def test_raises_if_duplicate_name(self):
with self.assertRaisesRegexp(
ValueError, 'Duplicate feature column name found for columns'):
df.DenseFeatures(
feature_columns=[fc.numeric_column('a'),
fc.numeric_column('a')])(
features={
'a': [[0]]
})
def test_one_column(self):
price = fc.numeric_column('price')
with ops.Graph().as_default():
features = {'price': [[1.], [5.]]}
net = df.DenseFeatures([price])(features)
self.evaluate(variables_lib.global_variables_initializer())
self.evaluate(lookup_ops.tables_initializer())
self.assertAllClose([[1.], [5.]], self.evaluate(net))
def test_multi_dimension(self):
price = fc.numeric_column('price', shape=2)
with ops.Graph().as_default():
features = {'price': [[1., 2.], [5., 6.]]}
net = df.DenseFeatures([price])(features)
self.evaluate(variables_lib.global_variables_initializer())
self.evaluate(lookup_ops.tables_initializer())
self.assertAllClose([[1., 2.], [5., 6.]], self.evaluate(net))
def test_compute_output_shape(self):
price1 = fc.numeric_column('price1', shape=2)
price2 = fc.numeric_column('price2', shape=4)
with ops.Graph().as_default():
features = {
'price1': [[1., 2.], [5., 6.]],
'price2': [[3., 4., 5., 6.], [7., 8., 9., 10.]]
}
dense_features = df.DenseFeatures([price1, price2])
self.assertEqual((None, 6), dense_features.compute_output_shape((None,)))
net = dense_features(features)
self.evaluate(variables_lib.global_variables_initializer())
self.evaluate(lookup_ops.tables_initializer())
self.assertAllClose([[1., 2., 3., 4., 5., 6.], [5., 6., 7., 8., 9., 10.]],
self.evaluate(net))
def test_raises_if_shape_mismatch(self):
price = fc.numeric_column('price', shape=2)
with ops.Graph().as_default():
features = {'price': [[1.], [5.]]}
with self.assertRaisesRegexp(
Exception,
r'Cannot reshape a tensor with 2 elements to shape \[2,2\]'):
df.DenseFeatures([price])(features)
def test_reshaping(self):
price = fc.numeric_column('price', shape=[1, 2])
with ops.Graph().as_default():
features = {'price': [[[1., 2.]], [[5., 6.]]]}
net = df.DenseFeatures([price])(features)
self.evaluate(variables_lib.global_variables_initializer())
self.evaluate(lookup_ops.tables_initializer())
self.assertAllClose([[1., 2.], [5., 6.]], self.evaluate(net))
def test_multi_column(self):
price1 = fc.numeric_column('price1', shape=2)
price2 = fc.numeric_column('price2')
with ops.Graph().as_default():
features = {'price1': [[1., 2.], [5., 6.]], 'price2': [[3.], [4.]]}
net = df.DenseFeatures([price1, price2])(features)
self.evaluate(variables_lib.global_variables_initializer())
self.evaluate(lookup_ops.tables_initializer())
self.assertAllClose([[1., 2., 3.], [5., 6., 4.]], self.evaluate(net))
def test_cols_to_output_tensors(self):
price1 = fc.numeric_column('price1', shape=2)
price2 = fc.numeric_column('price2')
with ops.Graph().as_default():
cols_dict = {}
features = {'price1': [[1., 2.], [5., 6.]], 'price2': [[3.], [4.]]}
dense_features = df.DenseFeatures([price1, price2])
net = dense_features(features, cols_dict)
self.evaluate(variables_lib.global_variables_initializer())
self.evaluate(lookup_ops.tables_initializer())
self.assertAllClose([[1., 2.], [5., 6.]],
self.evaluate(cols_dict[price1]))
self.assertAllClose([[3.], [4.]], self.evaluate(cols_dict[price2]))
self.assertAllClose([[1., 2., 3.], [5., 6., 4.]], self.evaluate(net))
def test_column_order(self):
price_a = fc.numeric_column('price_a')
price_b = fc.numeric_column('price_b')
with ops.Graph().as_default():
features = {
'price_a': [[1.]],
'price_b': [[3.]],
}
net1 = df.DenseFeatures([price_a, price_b])(features)
net2 = df.DenseFeatures([price_b, price_a])(features)
self.evaluate(variables_lib.global_variables_initializer())
self.evaluate(lookup_ops.tables_initializer())
self.assertAllClose([[1., 3.]], self.evaluate(net1))
self.assertAllClose([[1., 3.]], self.evaluate(net2))
def test_fails_for_categorical_column(self):
animal = fc.categorical_column_with_identity('animal', num_buckets=4)
with ops.Graph().as_default():
features = {
'animal':
sparse_tensor.SparseTensor(
indices=[[0, 0], [0, 1]], values=[1, 2], dense_shape=[1, 2])
}
with self.assertRaisesRegexp(Exception, 'must be a .*DenseColumn'):
df.DenseFeatures([animal])(features)
def test_static_batch_size_mismatch(self):
price1 = fc.numeric_column('price1')
price2 = fc.numeric_column('price2')
with ops.Graph().as_default():
features = {
'price1': [[1.], [5.], [7.]], # batchsize = 3
'price2': [[3.], [4.]] # batchsize = 2
}
with self.assertRaisesRegexp(
ValueError,
r'Batch size \(first dimension\) of each feature must be same.'): # pylint: disable=anomalous-backslash-in-string
df.DenseFeatures([price1, price2])(features)
def test_subset_of_static_batch_size_mismatch(self):
price1 = fc.numeric_column('price1')
price2 = fc.numeric_column('price2')
price3 = fc.numeric_column('price3')
with ops.Graph().as_default():
features = {
'price1': array_ops.placeholder(dtype=dtypes.int64), # batchsize = 3
'price2': [[3.], [4.]], # batchsize = 2
'price3': [[3.], [4.], [5.]] # batchsize = 3
}
with self.assertRaisesRegexp(
ValueError,
r'Batch size \(first dimension\) of each feature must be same.'): # pylint: disable=anomalous-backslash-in-string
df.DenseFeatures([price1, price2, price3])(features)
def test_runtime_batch_size_mismatch(self):
price1 = fc.numeric_column('price1')
price2 = fc.numeric_column('price2')
with ops.Graph().as_default():
features = {
'price1': array_ops.placeholder(dtype=dtypes.int64), # batchsize = 3
'price2': [[3.], [4.]] # batchsize = 2
}
net = df.DenseFeatures([price1, price2])(features)
with _initialized_session() as sess:
with self.assertRaisesRegexp(errors.OpError,
'Dimensions of inputs should match'):
sess.run(net, feed_dict={features['price1']: [[1.], [5.], [7.]]})
def test_runtime_batch_size_matches(self):
price1 = fc.numeric_column('price1')
price2 = fc.numeric_column('price2')
with ops.Graph().as_default():
features = {
'price1': array_ops.placeholder(dtype=dtypes.int64), # batchsize = 2
'price2': array_ops.placeholder(dtype=dtypes.int64), # batchsize = 2
}
net = df.DenseFeatures([price1, price2])(features)
with _initialized_session() as sess:
sess.run(
net,
feed_dict={
features['price1']: [[1.], [5.]],
features['price2']: [[1.], [5.]],
})
def test_multiple_layers_with_same_embedding_column(self):
some_sparse_column = fc.categorical_column_with_hash_bucket(
'sparse_feature', hash_bucket_size=5)
some_embedding_column = fc.embedding_column(
some_sparse_column, dimension=10)
with ops.Graph().as_default():
features = {
'sparse_feature': [['a'], ['x']],
}
all_cols = [some_embedding_column]
df.DenseFeatures(all_cols)(features)
df.DenseFeatures(all_cols)(features)
# Make sure that 2 variables get created in this case.
self.assertEqual(2,
len(ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)))
expected_var_names = [
'dense_features/sparse_feature_embedding/embedding_weights:0',
'dense_features_1/sparse_feature_embedding/embedding_weights:0'
]
self.assertItemsEqual(
expected_var_names,
[v.name for v in ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)])
@test_util.run_deprecated_v1
def test_multiple_layers_with_same_shared_embedding_column(self):
categorical_column_a = fc.categorical_column_with_identity(
key='aaa', num_buckets=3)
categorical_column_b = fc.categorical_column_with_identity(
key='bbb', num_buckets=3)
embedding_dimension = 2
embedding_column_b, embedding_column_a = fc.shared_embedding_columns_v2(
[categorical_column_b, categorical_column_a],
dimension=embedding_dimension)
with ops.Graph().as_default():
features = {
'aaa':
sparse_tensor.SparseTensor(
indices=((0, 0), (1, 0), (1, 1)),
values=(0, 1, 0),
dense_shape=(2, 2)),
'bbb':
sparse_tensor.SparseTensor(
indices=((0, 0), (1, 0), (1, 1)),
values=(1, 2, 1),
dense_shape=(2, 2)),
}
all_cols = [embedding_column_a, embedding_column_b]
df.DenseFeatures(all_cols)(features)
df.DenseFeatures(all_cols)(features)
# Make sure that only 1 variable gets created in this case.
self.assertEqual(1,
len(ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)))
self.assertItemsEqual(
['aaa_bbb_shared_embedding:0'],
[v.name for v in ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)])
@test_util.run_deprecated_v1
def test_multiple_layers_with_same_shared_embedding_column_diff_graphs(self):
categorical_column_a = fc.categorical_column_with_identity(
key='aaa', num_buckets=3)
categorical_column_b = fc.categorical_column_with_identity(
key='bbb', num_buckets=3)
embedding_dimension = 2
embedding_column_b, embedding_column_a = fc.shared_embedding_columns_v2(
[categorical_column_b, categorical_column_a],
dimension=embedding_dimension)
all_cols = [embedding_column_a, embedding_column_b]
with ops.Graph().as_default():
features = {
'aaa':
sparse_tensor.SparseTensor(
indices=((0, 0), (1, 0), (1, 1)),
values=(0, 1, 0),
dense_shape=(2, 2)),
'bbb':
sparse_tensor.SparseTensor(
indices=((0, 0), (1, 0), (1, 1)),
values=(1, 2, 1),
dense_shape=(2, 2)),
}
df.DenseFeatures(all_cols)(features)
# Make sure that only 1 variable gets created in this case.
self.assertEqual(1,
len(ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)))
with ops.Graph().as_default():
features1 = {
'aaa':
sparse_tensor.SparseTensor(
indices=((0, 0), (1, 0), (1, 1)),
values=(0, 1, 0),
dense_shape=(2, 2)),
'bbb':
sparse_tensor.SparseTensor(
indices=((0, 0), (1, 0), (1, 1)),
values=(1, 2, 1),
dense_shape=(2, 2)),
}
df.DenseFeatures(all_cols)(features1)
# Make sure that only 1 variable gets created in this case.
self.assertEqual(1,
len(ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)))
self.assertItemsEqual(
['aaa_bbb_shared_embedding:0'],
[v.name for v in ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)])
@test_util.run_deprecated_v1
def test_with_1d_sparse_tensor(self):
embedding_values = (
(1., 2., 3., 4., 5.), # id 0
(6., 7., 8., 9., 10.), # id 1
(11., 12., 13., 14., 15.) # id 2
)
def _initializer(shape, dtype, partition_info=None):
del shape, dtype, partition_info
return embedding_values
# price has 1 dimension in dense_features
price = fc.numeric_column('price')
# one_hot_body_style has 3 dims in dense_features.
body_style = fc.categorical_column_with_vocabulary_list(
'body-style', vocabulary_list=['hardtop', 'wagon', 'sedan'])
one_hot_body_style = fc.indicator_column(body_style)
# embedded_body_style has 5 dims in dense_features.
country = fc.categorical_column_with_vocabulary_list(
'country', vocabulary_list=['US', 'JP', 'CA'])
embedded_country = fc.embedding_column(
country, dimension=5, initializer=_initializer)
# Provides 1-dim tensor and dense tensor.
features = {
'price':
constant_op.constant([
11.,
12.,
]),
'body-style':
sparse_tensor.SparseTensor(
indices=((0,), (1,)),
values=('sedan', 'hardtop'),
dense_shape=(2,)),
# This is dense tensor for the categorical_column.
'country':
constant_op.constant(['CA', 'US']),
}
self.assertEqual(1, features['price'].shape.ndims)
self.assertEqual(1, features['body-style'].dense_shape.get_shape()[0])
self.assertEqual(1, features['country'].shape.ndims)
net = df.DenseFeatures([price, one_hot_body_style, embedded_country])(
features)
self.assertEqual(1 + 3 + 5, net.shape[1])
with _initialized_session() as sess:
# Each row is formed by concatenating `embedded_body_style`,
# `one_hot_body_style`, and `price` in order.
self.assertAllEqual([[0., 0., 1., 11., 12., 13., 14., 15., 11.],
[1., 0., 0., 1., 2., 3., 4., 5., 12.]],
sess.run(net))
@test_util.run_deprecated_v1
def test_with_1d_unknown_shape_sparse_tensor(self):
embedding_values = (
(1., 2.), # id 0
(6., 7.), # id 1
(11., 12.) # id 2
)
def _initializer(shape, dtype, partition_info=None):
del shape, dtype, partition_info
return embedding_values
# price has 1 dimension in dense_features
price = fc.numeric_column('price')
# one_hot_body_style has 3 dims in dense_features.
body_style = fc.categorical_column_with_vocabulary_list(
'body-style', vocabulary_list=['hardtop', 'wagon', 'sedan'])
one_hot_body_style = fc.indicator_column(body_style)
# embedded_body_style has 5 dims in dense_features.
country = fc.categorical_column_with_vocabulary_list(
'country', vocabulary_list=['US', 'JP', 'CA'])
embedded_country = fc.embedding_column(
country, dimension=2, initializer=_initializer)
# Provides 1-dim tensor and dense tensor.
features = {
'price': array_ops.placeholder(dtypes.float32),
'body-style': array_ops.sparse_placeholder(dtypes.string),
# This is dense tensor for the categorical_column.
'country': array_ops.placeholder(dtypes.string),
}
self.assertIsNone(features['price'].shape.ndims)
self.assertIsNone(features['body-style'].get_shape().ndims)
self.assertIsNone(features['country'].shape.ndims)
price_data = np.array([11., 12.])
body_style_data = sparse_tensor.SparseTensorValue(
indices=((0,), (1,)), values=('sedan', 'hardtop'), dense_shape=(2,))
country_data = np.array([['US'], ['CA']])
net = df.DenseFeatures([price, one_hot_body_style, embedded_country])(
features)
self.assertEqual(1 + 3 + 2, net.shape[1])
with _initialized_session() as sess:
# Each row is formed by concatenating `embedded_body_style`,
# `one_hot_body_style`, and `price` in order.
self.assertAllEqual(
[[0., 0., 1., 1., 2., 11.], [1., 0., 0., 11., 12., 12.]],
sess.run(
net,
feed_dict={
features['price']: price_data,
features['body-style']: body_style_data,
features['country']: country_data
}))
@test_util.run_deprecated_v1
def test_with_rank_0_feature(self):
# price has 1 dimension in dense_features
price = fc.numeric_column('price')
features = {
'price': constant_op.constant(0),
}
self.assertEqual(0, features['price'].shape.ndims)
# Static rank 0 should fail
with self.assertRaisesRegexp(ValueError, 'Feature .* cannot have rank 0'):
df.DenseFeatures([price])(features)
# Dynamic rank 0 should fail
features = {
'price': array_ops.placeholder(dtypes.float32),
}
net = df.DenseFeatures([price])(features)
self.assertEqual(1, net.shape[1])
with _initialized_session() as sess:
with self.assertRaisesOpError('Feature .* cannot have rank 0'):
sess.run(net, feed_dict={features['price']: np.array(1)})
if __name__ == '__main__':
test.main()
```
#### File: python/training/quantize_training_test.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import importer
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import quantize_training
from tensorflow.python.training import saver as saver_module
class PywrapQuantizeTrainingTest(test.TestCase):
# Mainly to verify the python interface is working.
# More tests for this function can be found in the related c++ tests.
def testQuantizeTraining(self):
with session.Session() as sess:
a = constant_op.constant(6.0, shape=[1, 1])
b = constant_op.constant(7.0, shape=[1, 1])
c = math_ops.matmul(a, b, name='matmul')
self.assertEquals(c.eval(), 42.0)
self.assertEquals(len(sess.graph_def.node), 3)
result = quantize_training.do_quantize_training_on_graphdef(
sess.graph_def, 8)
# We just want to guarantee that some rewrite happened.
self.assertGreater(len(result.node), 3)
# Test that save/restoring works for EMA variables generated in the
# quantized training rewrite.
@test_util.run_v1_only('b/120545219')
def testQuantizedSaveRestore(self):
save_path = os.path.join(self.get_temp_dir(), 'quantized_save_restore')
g = ops.Graph()
with session.Session(graph=g) as sess:
a = constant_op.constant(6.0, shape=[1, 1], name='a')
b = variables.VariableV1(
constant_op.constant(7.0, shape=[1, 1]), name='b')
c = math_ops.matmul(a, b, name='matmul')
init_op = variables.global_variables_initializer()
saver = saver_module.Saver({'b': b})
result = quantize_training.do_quantize_training_on_graphdef(
sess.graph_def, 8)
with ops.Graph().as_default() as g, session.Session(graph=g) as sess:
_ = importer.import_graph_def(result, name='')
# Initialize the variable.
self.evaluate(g.get_operation_by_name(init_op.name))
# Run the graph for one step to assign values to the quantization min/max
# variables.
self.evaluate(g.get_tensor_by_name(c.name))
saver.save(sess, save_path)
with ops.Graph().as_default() as g, session.Session(graph=g) as sess:
_ = importer.import_graph_def(result, name='')
# When we restore the saved variabled, the quantization variables should
# be restored as well.
saver.restore(sess, save_path)
self.assertEquals(7.0, sess.run(g.get_tensor_by_name('b:0')))
self.assertEquals(6.0, sess.run(g.get_tensor_by_name('a/Min/Variable:0')))
self.assertEquals(6.0, sess.run(g.get_tensor_by_name('a/Max/Variable:0')))
self.assertEquals(7.0,
sess.run(g.get_tensor_by_name('b/read/Min/Variable:0')))
self.assertEquals(7.0,
sess.run(g.get_tensor_by_name('b/read/Max/Variable:0')))
if __name__ == '__main__':
test.main()
``` |
{
"source": "joshzarrabi/covid-publishing-api",
"score": 3
} |
#### File: joshzarrabi/covid-publishing-api/config.py
```python
from app.utils.logging import file_logger, client_logger
from decouple import config as env_conf
import logging
# To use this config, set up a local Postgres server on 127.0.0.1 port 5432, make a database
# called "data", and create a user named "postgres" with no password and all privileges.
class LocalPSQLConfig:
@property
def SQLALCHEMY_DATABASE_URI(self):
DB_USER = 'postgres'
DB_PASSWORD = ''
DB_HOST = '127.0.0.1'
DB_PORT = '5432'
DB_NAME = 'data'
return 'postgresql+psycopg2://{}:{}@{}:{}/{}'.\
format(DB_USER, DB_PASSWORD, DB_HOST, DB_PORT, DB_NAME)
SQLALCHEMY_TRACK_MODIFICATIONS = False
SQLALCHEMY_RECORD_QUERIES = True
SECRET_KEY = env_conf("SECRET_KEY", cast=str, default="12345")
# by default, access tokens do not expire
JWT_ACCESS_TOKEN_EXPIRES = env_conf('JWT_ACCESS_TOKEN_EXPIRES', cast=int, default=False)
API_WEBHOOK_URL = env_conf('API_WEBHOOK_URL', cast=str, default='')
SLACK_API_TOKEN = env_conf('SLACK_API_TOKEN', cast=str, default='')
SLACK_CHANNEL = env_conf('SLACK_CHANNEL', cast=str, default='')
@staticmethod
def init_app(app):
# The default Flask logger level is set at ERROR, so if you want to see
# INFO level or DEBUG level logs, you need to lower the main loggers
# level first.
app.logger.setLevel(logging.DEBUG)
app.logger.addHandler(file_logger)
app.logger.addHandler(client_logger)
class Production:
@property
def SQLALCHEMY_DATABASE_URI(self):
DB_USER = env_conf('DATABASE_USER')
DB_PASSWORD = env_conf('DATABASE_PASS')
DB_HOST = env_conf('DATABASE_HOST')
DB_PORT = env_conf('DATABASE_PORT')
DB_NAME = env_conf('DATABASE_NAME')
return 'postgresql+psycopg2://{}:{}@{}:{}/{}'.\
format(DB_USER, DB_PASSWORD, DB_HOST, DB_PORT, DB_NAME)
SQLALCHEMY_TRACK_MODIFICATIONS = False
SQLALCHEMY_RECORD_QUERIES = True
SECRET_KEY = env_conf("SECRET_KEY", cast=str, default="12345")
# by default, access tokens do not expire
JWT_ACCESS_TOKEN_EXPIRES = env_conf('JWT_ACCESS_TOKEN_EXPIRES', cast=int, default=False)
API_WEBHOOK_URL = env_conf('API_WEBHOOK_URL', cast=str, default='')
SLACK_API_TOKEN = env_conf('SLACK_API_TOKEN', cast=str, default='')
SLACK_CHANNEL = env_conf('SLACK_CHANNEL', cast=str, default='')
@staticmethod
def init_app(app):
# The default Flask logger level is set at ERROR, so if you want to see
# INFO level or DEBUG level logs, you need to lower the main loggers
# level first.
app.logger.setLevel(logging.DEBUG)
app.logger.addHandler(file_logger)
app.logger.addHandler(client_logger)
class Testing:
"""Configuration for running the test suite"""
TESTING = True
DEBUG = True
@property
def SQLALCHEMY_DATABASE_URI(self):
DB_USER = env_conf('DATABASE_USER')
DB_PASSWORD = env_conf('DATABASE_PASS')
DB_HOST = env_conf('DATABASE_HOST')
DB_PORT = env_conf('DATABASE_PORT')
DB_NAME = env_conf('DATABASE_NAME')
return 'postgresql+psycopg2://{}:{}@{}:{}/{}'.\
format(DB_USER, DB_PASSWORD, DB_HOST, DB_PORT, DB_NAME)
SQLALCHEMY_TRACK_MODIFICATIONS = False
SQLALCHEMY_RECORD_QUERIES = True
SECRET_KEY = env_conf("SECRET_KEY", cast=str, default="12345")
# by default, access tokens do not expire
JWT_ACCESS_TOKEN_EXPIRES = env_conf('JWT_ACCESS_TOKEN_EXPIRES', cast=int, default=False)
API_WEBHOOK_URL = env_conf('API_WEBHOOK_URL', cast=str, default='')
SLACK_API_TOKEN = env_conf('SLACK_API_TOKEN', cast=str, default='')
SLACK_CHANNEL = env_conf('SLACK_CHANNEL', cast=str, default='')
@staticmethod
def init_app(app):
# The default Flask logger level is set at ERROR, so if you want to see
# INFO level or DEBUG level logs, you need to lower the main loggers
# level first.
app.logger.setLevel(logging.DEBUG)
app.logger.addHandler(file_logger)
app.logger.addHandler(client_logger)
class Develop:
"""Development config geared towards docker."""
@property
def SQLALCHEMY_DATABASE_URI(self):
DB_USER = "deploy"
DB_PASSWORD = "<PASSWORD>"
DB_HOST = "db"
DB_PORT = "5432"
DB_NAME = "stories"
return 'postgresql+psycopg2://{}:{}@{}:{}/{}'.\
format(DB_USER, DB_PASSWORD, DB_HOST, DB_PORT, DB_NAME)
SQLALCHEMY_TRACK_MODIFICATIONS = False
SQLALCHEMY_RECORD_QUERIES = True
API_WEBHOOK_URL = env_conf('API_WEBHOOK_URL', cast=str, default='')
SLACK_API_TOKEN = env_conf('SLACK_API_TOKEN', cast=str, default='')
SLACK_CHANNEL = env_conf('SLACK_CHANNEL', cast=str, default='')
# DEBUG = True
# API configurations
SECRET_KEY = env_conf("SECRET_KEY", cast=str, default="12345")
# by default, access tokens do not expire
JWT_ACCESS_TOKEN_EXPIRES = env_conf('JWT_ACCESS_TOKEN_EXPIRES', cast=int, default=False)
@staticmethod
def init_app(app):
"""Initiates application."""
app.logger.setLevel(logging.DEBUG)
app.logger.addHandler(client_logger)
app.logger.addHandler(file_logger)
```
#### File: migrations/versions/cc91b057d1ee_renaming_states_tab_2_columns.py
```python
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '<KEY>'
down_revision = '<KEY>'
branch_labels = None
depends_on = None
# Manually written to do a rename instead of the Alembic-autogenerated drop/add
def upgrade():
op.alter_column('states', 'totalTestResultsColumns', new_column_name='covidTrackingProjectPreferredTotalTestField')
op.alter_column('states', 'totalTestResultsUnits', new_column_name='covidTrackingProjectPreferredTotalTestUnits')
def downgrade():
op.alter_column('states', 'covidTrackingProjectPreferredTotalTestField', new_column_name='totalTestResultsColumns')
op.alter_column('states', 'covidTrackingProjectPreferredTotalTestUnits', new_column_name='totalTestResultsUnits')
```
#### File: migrations/versions/da977436076c_rename_date_to_datadate.py
```python
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'da977436076c'
down_revision = '0d412b93<PASSWORD>'
branch_labels = None
depends_on = None
def upgrade():
op.alter_column('coreData', 'date', nullable=False, new_column_name='dataDate')
def downgrade():
op.alter_column('coreData', 'dataDate', nullable=False, new_column_name='date')
```
#### File: tests/app/editdiff_test.py
```python
from datetime import datetime
import pytz
from app import db
from app.models.data import Batch, State, CoreData
from app.utils.editdiff import EditDiff, ChangedValue, ChangedRow
def test_editdiff(app):
with app.app_context():
changed_values = [ChangedValue(field="positive", old=123, new=456), ChangedValue(field="negative", old=555, new=666)]
changed_rows = [ChangedRow(date="20200903", state="CA", changed_values=changed_values)]
ed = EditDiff(changed_rows, None)
output = ed.plain_text_format()
assert "Rows edited: 1" in output
assert "CA 20200903" in output
assert "positive: 456 (was 123)" in output
assert "New rows" not in output
assert not output.startswith("\n") # ensure no leading newlines
nys = State(state='NY', totalTestResultsFieldDbColumn="posNeg")
bat = Batch(batchNote='test', createdAt=datetime.now(),
isPublished=False, isRevision=False)
db.session.add(bat)
db.session.add(nys)
db.session.flush()
date1 = datetime(2020, 5, 4, 20, 3, tzinfo=pytz.UTC)
core_data_row = CoreData(
lastUpdateIsoUtc=date1.isoformat(), dateChecked=date1.isoformat(),
date=date1, state='NY', batchId=bat.batchId,
positive=20, negative=5)
date2 = datetime(2020, 5, 5, 20, 3, tzinfo=pytz.UTC)
core_data_row2 = CoreData(
lastUpdateIsoUtc=date2.isoformat(), dateChecked=date2.isoformat(),
date=date2, state='NY', batchId=bat.batchId,
positive=25, negative=5)
new_rows = [core_data_row, core_data_row2]
ed = EditDiff(changed_rows, new_rows)
output = ed.plain_text_format()
assert "Rows edited: 1" in output
assert "New rows: 2" in output
assert f"NY {date1.strftime('%Y-%m-%d')}\nNY {date2.strftime('%Y-%m-%d')}" in output
```
#### File: tests/app/edit_test.py
```python
from flask import json, jsonify
from app import db
from app.api.data import any_existing_rows
from app.models.data import *
from common import *
import datetime
def test_edit_state_metadata(app, headers, requests_mock):
client = app.test_client()
# write some initial data
example_filename = os.path.join(os.path.dirname(__file__), 'data.json')
with open(example_filename) as f:
payload_json_str = f.read()
resp = client.post(
"/api/v1/batches",
data=payload_json_str,
content_type='application/json',
headers=headers)
assert resp.status_code == 201
# we should've written 56 states, 4 core data rows, 1 batch
resp = client.get('/api/v1/public/states/info')
assert len(resp.json) == 56
assert resp.json[0]['state'] == "AK"
assert resp.json[0]['twitter'] == "@Alaska_DHSS"
# make a states metadata edit request updating the twitter account for AK
state_data = {
'states': [{
'state': 'AK',
'twitter': 'AlaskaNewTwitter'
}]
}
# ensure the webhook is called on edit
webhook_url = 'http://example.com/web/hook'
app.config['API_WEBHOOK_URL'] = webhook_url
requests_mock.get(webhook_url, json={'it': 'worked'})
resp = client.post(
"/api/v1/states/edit",
data=json.dumps(state_data),
content_type='application/json',
headers=headers)
assert resp.status_code == 201
assert len(resp.json['states']) == 1
assert resp.json['states'][0]['state'] == "AK"
assert resp.json['states'][0]['twitter'] == "AlaskaNewTwitter"
assert requests_mock.call_count == 1
def test_edit_core_data(app, headers, slack_mock, requests_mock):
client = app.test_client()
# Write a batch containing the above data, two days for NY and WA, publish it
resp = client.post(
"/api/v1/batches",
data=json.dumps(daily_push_ny_wa_two_days()),
content_type='application/json',
headers=headers)
assert resp.status_code == 201
batch_id = resp.json['batch']['batchId']
assert slack_mock.chat_postMessage.call_count == 1
# Publish the new batch
resp = client.post("/api/v1/batches/{}/publish".format(batch_id), headers=headers)
assert resp.status_code == 201
assert slack_mock.chat_postMessage.call_count == 2
# make an edit batch for NY for yesterday
# ensure the webhook is called on edit
webhook_url = 'http://example.com/web/hook'
app.config['API_WEBHOOK_URL'] = webhook_url
requests_mock.get(webhook_url, json={'it': 'worked'})
resp = client.post(
"/api/v1/batches/edit",
data=json.dumps(edit_push_ny_yesterday()),
content_type='application/json',
headers=headers)
assert resp.status_code == 201
assert slack_mock.chat_postMessage.call_count == 3
assert requests_mock.call_count == 1
batch_id = resp.json['batch']['batchId']
assert resp.json['batch']['user'] == 'testing'
# test that getting the states daily for NY has the UNEDITED data for yesterday
resp = client.get("/api/v1/public/states/NY/daily")
assert len(resp.json) == 2
unedited = resp.json
for day_data in resp.json:
assert day_data['date'] in ['2020-05-25', '2020-05-24']
if day_data['date'] == '2020-05-25':
assert day_data['positive'] == 20
assert day_data['negative'] == 5
elif day_data['date'] == '2020-05-24':
assert day_data['positive'] == 15
assert day_data['negative'] == 4
# Publish the edit batch
resp = client.post("/api/v1/batches/{}/publish".format(batch_id), headers=headers)
assert resp.status_code == 201
# test that getting the states daily for NY has the edited data for yesterday
resp = client.get("/api/v1/public/states/NY/daily")
assert len(resp.json) == 2
for day_data in resp.json:
assert day_data['date'] in ['2020-05-25', '2020-05-24']
if day_data['date'] == '2020-05-25':
assert day_data['positive'] == 20
assert day_data['negative'] == 5
elif day_data['date'] == '2020-05-24':
assert day_data['positive'] == 16
assert day_data['negative'] == 4
def test_edit_core_data_from_states_daily_empty(app, headers, slack_mock, requests_mock):
client = app.test_client()
# Write a batch containing the above data, two days for NY and WA, publish it
resp = client.post(
"/api/v1/batches",
data=json.dumps(daily_push_ny_wa_two_days()),
content_type='application/json',
headers=headers)
assert resp.status_code == 201
batch_id = resp.json['batch']['batchId']
assert slack_mock.chat_postMessage.call_count == 1
# Publish the new batch
resp = client.post("/api/v1/batches/{}/publish".format(batch_id), headers=headers)
assert resp.status_code == 201
assert slack_mock.chat_postMessage.call_count == 2
# make an empty edit batch for NY for yesterday containing no edits
# ensure the webhook is not called because the edit fails
webhook_url = 'http://example.com/web/hook'
app.config['API_WEBHOOK_URL'] = webhook_url
requests_mock.get(webhook_url, json={'it': 'worked'})
resp = client.post(
"/api/v1/batches/edit_states_daily",
data=json.dumps(edit_push_ny_today_empty()),
content_type='application/json',
headers=headers)
assert resp.status_code == 400
assert slack_mock.chat_postMessage.call_count == 2 # logging unchanged edit to Slack
assert requests_mock.call_count == 0 # should not call the webhook
assert "no edits detected" in resp.data.decode("utf-8")
def test_edit_core_data_from_states_daily(app, headers, slack_mock, requests_mock):
client = app.test_client()
# Write a batch containing the above data, two days for NY and WA, publish it
resp = client.post(
"/api/v1/batches",
data=json.dumps(daily_push_ny_wa_two_days()),
content_type='application/json',
headers=headers)
assert resp.status_code == 201
batch_id = resp.json['batch']['batchId']
assert slack_mock.chat_postMessage.call_count == 1
# Publish the new batch
resp = client.post("/api/v1/batches/{}/publish".format(batch_id), headers=headers)
assert resp.status_code == 201
assert slack_mock.chat_postMessage.call_count == 2
assert slack_mock.files_upload.call_count == 0
# make an edit batch for NY for yesterday, and leave today alone
# ensure the webhook is not called because the edit fails
webhook_url = 'http://example.com/web/hook'
app.config['API_WEBHOOK_URL'] = webhook_url
requests_mock.get(webhook_url, json={'it': 'worked'})
resp = client.post(
"/api/v1/batches/edit_states_daily",
data=json.dumps(edit_push_ny_yesterday_unchanged_today()),
content_type='application/json',
headers=headers)
assert resp.status_code == 201
assert slack_mock.chat_postMessage.call_count == 3
assert slack_mock.files_upload.call_count == 1
assert requests_mock.call_count == 1
assert "state: NY" in slack_mock.chat_postMessage.call_args[1]['text']
assert "Rows edited: 1" in slack_mock.files_upload.call_args[1]['content']
assert "NY 2020-05-24" in slack_mock.files_upload.call_args[1]['content']
assert "positive: 16 (was 15)" in slack_mock.files_upload.call_args[1]['content']
assert "inIcuCurrently: None (was 37)" in slack_mock.files_upload.call_args[1]['content']
batch_id = resp.json['batch']['batchId']
assert resp.json['batch']['user'] == 'testing'
# we've changed positive and removed inIcuCurrently, so both should count as changed
assert len(resp.json['changedFields']) == 2
assert 'positive' in resp.json['changedFields']
assert 'inIcuCurrently' in resp.json['changedFields']
assert resp.json['changedDates'] == '5/24/20'
# confirm that the edit batch only contains one row with yesterday's data
with app.app_context():
batch_obj = Batch.query.get(batch_id)
assert len(batch_obj.coreData) == 1
assert batch_obj.coreData[0].date == datetime.date(2020,5,24)
assert batch_obj.coreData[0].state == 'NY'
assert batch_obj.link == 'https://example.com'
assert batch_obj.user == 'testing'
assert batch_obj.logCategory == 'State Updates'
# make sure metadata got saved correctly also. This should match the above test cases
# in the returned JSON
assert 'positive' in batch_obj.changedFields
assert 'inIcuCurrently' in batch_obj.changedFields
assert batch_obj.changedDatesMin == datetime.date(2020,5,24)
assert batch_obj.changedDatesMax == datetime.date(2020,5,24)
assert batch_obj.numRowsEdited == 1
# getting the states daily for NY has the edited data for yesterday and unchanged for today,
# and the last batch should've been published as part of the "edit from states daily" endpoint
resp = client.get("/api/v1/public/states/NY/daily")
assert len(resp.json) == 2
for day_data in resp.json:
assert day_data['date'] in ['2020-05-25', '2020-05-24']
if day_data['date'] == '2020-05-25':
assert day_data['positive'] == 20
assert day_data['negative'] == 5
assert day_data['inIcuCurrently'] == 33
elif day_data['date'] == '2020-05-24':
assert day_data['positive'] == 16
assert day_data['negative'] == 4
# this value was blanked out in the edit, so it should be removed now
assert 'inIcuCurrently' not in day_data
# test editing 2 non-consecutive dates
resp = client.post(
"/api/v1/batches/edit_states_daily",
data=json.dumps(edit_push_ny_today_and_before_yesterday()),
content_type='application/json',
headers=headers)
assert resp.json['changedFields'] == ['inIcuCurrently']
assert resp.json['changedDates'] == '5/20/20 - 5/25/20'
assert resp.json['numRowsEdited'] == 2
assert resp.json['user'] == 'testing'
# check to see if the row for the new date (BEFORE_YESTERDAY) was added
resp = client.get("/api/v1/public/states/NY/daily")
found_new_date = False
for day_data in resp.json:
if day_data['date'] == '2020-05-20':
found_new_date = True
assert day_data['positive'] == 10
assert day_data['negative'] == 2
assert found_new_date is True
# the slack notification should note the addition of the new row
assert "New rows: 1" in slack_mock.files_upload.call_args[1]['content']
assert "NY 2020-05-20" in slack_mock.files_upload.call_args[1]['content']
# test that sending an edit batch with multiple states fails
resp = client.post(
"/api/v1/batches/edit_states_daily",
data=json.dumps(edit_push_multiple_states()),
content_type='application/json',
headers=headers)
assert resp.status_code == 400
# test that sending an edit batch with no CoreData rows fails
bad_data = edit_push_multiple_states()
bad_data['coreData'] = []
resp = client.post(
"/api/v1/batches/edit_states_daily",
data=json.dumps(bad_data),
content_type='application/json',
headers=headers)
assert resp.status_code == 400
def test_edit_core_data_from_states_daily_timestamps_only(app, headers, slack_mock):
client = app.test_client()
# Write a batch containing the above data, two days for NY and WA, publish it
resp = client.post(
"/api/v1/batches",
data=json.dumps(daily_push_ny_wa_two_days()),
content_type='application/json',
headers=headers)
assert resp.status_code == 201
batch_id = resp.json['batch']['batchId']
assert slack_mock.chat_postMessage.call_count == 1
# Publish the new batch
resp = client.post("/api/v1/batches/{}/publish".format(batch_id), headers=headers)
assert resp.status_code == 201
assert slack_mock.chat_postMessage.call_count == 2
# make an edit batch for NY for yesterday, and leave today alone
resp = client.post(
"/api/v1/batches/edit_states_daily",
data=json.dumps(edit_push_ny_yesterday_change_only_timestamp()),
content_type='application/json',
headers=headers)
assert resp.status_code == 201
assert slack_mock.chat_postMessage.call_count == 3
assert "state: NY" in slack_mock.chat_postMessage.call_args[1]['text']
batch_id = resp.json['batch']['batchId']
assert resp.json['batch']['user'] == 'testing'
# we've changed only lastUpdateIsoUtc, which is lastUpdateTime on output
assert len(resp.json['changedFields']) == 1
assert 'lastUpdateTime' in resp.json['changedFields']
def test_edit_core_data_from_states_daily_partial_update(app, headers, slack_mock):
''' Verify that when sending only part of the fileds, then these fields
are updated, and the other are set with the most recent published
batch values
'''
# setup
client = app.test_client()
# prep
# Write a batch containing the above data, two days for NY and WA, publish it
resp = client.post(
"/api/v1/batches",
data=json.dumps(daily_push_ny_wa_two_days()),
content_type='application/json',
headers=headers)
assert resp.status_code == 201
batch_id = resp.json['batch']['batchId']
assert slack_mock.chat_postMessage.call_count == 1
# Publish the new batch
resp = client.post("/api/v1/batches/{}/publish".format(batch_id), headers=headers)
assert resp.status_code == 201
assert slack_mock.chat_postMessage.call_count == 2
# test
# make an edit batch for NY for yesterday, and leave today alone
resp = client.post(
"/api/v1/batches/edit_states_daily",
data=json.dumps(edit_push_ny_yesterday_change_only_positive()),
content_type='application/json',
headers=headers)
# verify
assert resp.status_code == 201
assert slack_mock.chat_postMessage.call_count == 3
assert "state: NY" in slack_mock.chat_postMessage.call_args[1]['text']
batch_id = resp.json['batch']['batchId']
assert resp.json['batch']['user'] == 'testing'
# submitted a single field, and that's the only field that should change
assert len(resp.json['changedFields']) == 1
assert 'positive' in resp.json['changedFields']
# test that getting the states daily for NY has the UNEDITED data for yesterday
resp = client.get("/api/v1/public/states/NY/daily")
assert len(resp.json) == 2
yesterday = resp.json[1]
assert yesterday['date'] == '2020-05-24'
assert yesterday['positive'] == 16
assert yesterday['negative'] == 4
assert yesterday['inIcuCurrently'] == 37
# test
# make exactly the same edit
resp = client.post(
"/api/v1/batches/edit_states_daily",
data=json.dumps(edit_push_ny_yesterday_change_only_positive()),
content_type='application/json',
headers=headers)
# verify
assert resp.status_code == 400
assert slack_mock.chat_postMessage.call_count == 3
assert "no edits detected" in resp.data.decode("utf-8")
def test_edit_with_valid_and_unknown_fields(app, headers, slack_mock):
''' Verify that when sending edit (or insert) requests without any fields
that are part of the object the edit requests is rejected
'''
# setup
client = app.test_client()
# prep
# Write a batch containing data for NY, WA for 2 days
resp = client.post(
"/api/v1/batches",
data=json.dumps(daily_push_ny_wa_two_days()),
content_type='application/json',
headers=headers)
assert resp.status_code == 201
batch_id = resp.json['batch']['batchId']
assert slack_mock.chat_postMessage.call_count == 1
# Publish the new batch
resp = client.post("/api/v1/batches/{}/publish".format(batch_id), headers=headers)
assert resp.status_code == 201
assert slack_mock.chat_postMessage.call_count == 2
# test
# make an edit batch without any significant field
resp = client.post(
"/api/v1/batches/edit_states_daily",
data=json.dumps(edit_unknown_fields()),
content_type='application/json',
headers=headers)
# verify: nothing was edited
assert resp.status_code == 400
assert slack_mock.chat_postMessage.call_count == 2
assert "no edits detected" in resp.data.decode("utf-8")
``` |
{
"source": "JoshZastrow/personality-analysis",
"score": 3
} |
#### File: JoshZastrow/personality-analysis/main.py
```python
import streamlit as st
import numpy as np
import pandas as pd
import altair as alt
import matplotlib.pyplot as plt
from typing import NamedTuple
from collections import namedtuple as nt
from dataclasses import dataclass
from wordcloud import WordCloud
from components.wordcloud import GroupedColorFunc
@dataclass
class SourceData:
assessments: pd.DataFrame
traits: pd.DataFrame
users: pd.DataFrame
"""
assessments:
assessment_id <int>
domain <str>
facet <str>
asessee_id <int>
assessor_id <int>
assessee_name <str>
assessor_name <str>
rank <float>
score <float>
traits:
trait_id <int>
domain <str>
facet <str>
score <str>
description <str>
users:
user_id <int>
first_name <str>
last_name <str>
"""
st.header("Personality Analysis")
@st.cache
def load_data() -> SourceData:
ASSESSMENT_FPATH = "assets/big_5_data - assessments.csv"
TRAITS_FPATH = "assets/big_5_data - traits.csv"
USERS_FPATH = "assets/big_5_data - users.csv"
assessments = pd.read_csv(ASSESSMENT_FPATH, index_col="assessment_id")
traits = pd.read_csv(TRAITS_FPATH)
users = pd.read_csv(USERS_FPATH, index_col="user_id")
return SourceData(assessments, traits, users)
data = load_data()
users = data.users
assessments = data.assessments
traits = data.traits
del data
person_1 = st.selectbox("Select Candidate", users["first_name"].values)
person_2 = st.selectbox("Select Observer", users["first_name"].values)
col1, col2 = st.columns(2)
def build_wordcloud(assessments: pd.DataFrame, person: str):
domains = assessments.domain.unique()
facets = (
assessments.drop_duplicates(subset=["facet"])
.loc[lambda d: d.domain != d.facet]
.loc[:, ["domain", "facet"]]
)
colors = ["steelblue", "orange", "silver", "red", "yellow"]
color_to_words = {
c: facets.loc[lambda x: x.domain == d, "facet"].to_list()
for c, d in zip(colors, domains)
}
default_color = "grey"
data = assessments.loc[lambda d: d.domain != d.facet].loc[
lambda d: d.assessee_name == person
]
word_list = []
for _, row in data.iterrows():
word_list.extend([row.facet] * row["rank"])
np.random.shuffle(word_list)
text = " ".join(word_list)
wordcloud = WordCloud().generate(text)
colorizer = GroupedColorFunc(color_to_words, default_color)
wordcloud.recolor(color_func=colorizer)
fig, ax = plt.subplots(figsize=(10, 5))
ax.imshow(wordcloud, interpolation="bilinear")
ax.axis("off")
return fig
with col1:
fig = build_wordcloud(assessments, person_1)
st.markdown(f"### {person_1}")
st.write(fig)
with col2:
fig = build_wordcloud(assessments, person_2)
st.markdown(f"### {person_2}")
st.write(fig)
def define_metrics(assessments, person, columns) -> None:
source_data = assessments.loc[lambda d: d.domain == d.facet].pivot(
index="domain",
columns=["assessee_name", "assessor_name"],
values="rank",
)
for idx, row in enumerate(source_data.iterrows()):
domain = idx
domain, data = row
self_review = int(
data[
(data.index.get_level_values(0) == person)
& (data.index.get_level_values(1) == person)
].mean()
)
peer_review = int(
data[
(data.index.get_level_values(0) == person)
& (data.index.get_level_values(1) != person)
].mean()
)
delta = self_review - peer_review
columns[idx].metric(domain, self_review, f"{delta} ({peer_review})")
st.header(f"{person_1}'s Traits")
columns = st.columns(5)
define_metrics(assessments, person_1, columns)
st.header(f"{person_2}'s Traits")
columns = st.columns(5)
define_metrics(assessments, person_2, columns)
# st.markdown(
# """
# There are three things to look at with these metrics. The first being
# how we see ourselves, the second is how we are seen by our other, and the
# third is how personalities influence our assessments.
# ### Under Estimating Each Others Extraversion and Open Mindedness
# One thing that is interesting is that while we both have high levels of
# extraversion, we both gave ourselves higher scores than what we gave each other.
# My own score is in the top 1%, 12% points higher than your assessment (87th percentile).
# Meanwhile, your self-assessment came in at at 78, 40 points higher than my assessment (38).
# One hypothesis is that we are less extroverted when we are around each other, even though we
# both feel more extroverted. This dynamic plays out in the Openness to Experience as well. We are both
# more open to experiences than we give each other credit for.
# ### How Agreeable and Conscientious we see each other
# In both traits, I gave higher marks to myself than you did, and I gave you
# higher marks than you gave yourself. I pegged you at the top 2% of the most
# agreeable people--16 points higher than your own assessment, and I scored you
# 15 points higher on your own rating of conscientiousness. I scored myself higher
# in both these areas as well.
# Perhaps you hold a more realistic perspective on these traits for
# both you and I. Low morality scores may indicate this perspective could be withheld,
# leaving me with a higher sense of optimism. Then again, I do have an inherent sense
# of optimism, a high level of trust combined with dominant extraversion may affect
# my perception of agreeablness for both of us.
# """
# )
# ## MOST UNIQUE CHARACTERISTICS ###########################
# most_diff = (
# combined.sort_values("diff")
# .loc[lambda d: ~(d.domain == d.facet)]
# .tail(5)
# .loc[
# :,
# ["domain", "facet", "rank_josh", "rank_allie", "description"],
# ]
# )
# st.table(most_diff)
# ########################################################
# lollipop_data = assessments.merge(traits, on=["domain", "facet", "score"])
# lollipop = alt.layer(data=lollipop_data).transform_filter(
# filter={"field": "assessee_name", "oneOf": [person_1, person_2]}
# )
# st.table(lollipop_data.head(1))
# for name, group in lollipop_data.groupby(['domain']):
# lollipop += (
# alt.Chart()
# .mark_line(color="grey")
# .encode(
# x="rank:Q",
# y=alt.Y(
# "facet",
# sort=combined.sort_values("rank_josh", ascending=True).facet.values,
# ),
# detail="facet:N",
# tooltip=["description"],
# )
# )
# # Add points for life expectancy in 1955 & 2000
# lollipop += (
# alt.Chart()
# .mark_point(size=100, opacity=1, filled=True)
# .properties(width=800)
# .encode(
# x="rank",
# y=alt.Y(
# "facet",
# sort=combined.sort_values("rank_josh", ascending=True).facet.values,
# ),
# color=alt.Color(
# "name",
# scale=alt.Scale(
# domain=["Josh", "Allie"], range=["skyblue", "maroon"]
# ),
# ),
# tooltip=["description"],
# )
# .interactive()
# )
# # st.table(assessments.head())
# st.altair_chart(lollipop)
# ### RADAR CHARTS ########################################
import plotly.graph_objects as go
domains = assessments.domain.drop_duplicates().to_list()
domain_selection = st.radio("Select Domain:", options=domains)
def build_radar_charts(assessments, person_1, person_2, domain, style="radar"):
source_data = assessments.loc[lambda d: d.domain == domain]
# Josh's perspective of Josh
comp_1 = source_data.loc[
lambda d: (d.assessee_name == person_1) & (d.assessor_name == person_1)
].assign(analysis="{person_1} self review")
# Allie's perspective of Josh
comp_2 = source_data.loc[
lambda d: (d.assessee_name == person_1) & (d.assessor_name == person_2)
].assign(analysis="{person_2} peer review")
# Allie's perspective of Allie
comp_3 = source_data.loc[
lambda d: (d.assessee_name == person_2) & (d.assessor_name == person_2)
].assign(analysis="{person_2} self review")
# Josh's perspective of Allie
comp_4 = source_data.loc[
lambda d: (d.assessee_name == person_2) & (d.assessor_name == person_1)
].assign(analysis="{person_1} peer review")
comp_1 = comp_1.groupby(["facet"]).mean()["rank"].reset_index()
comp_2 = comp_2.groupby(["facet"]).mean()["rank"].reset_index()
comp_3 = comp_3.groupby(["facet"]).mean()["rank"].reset_index()
comp_4 = comp_4.groupby(["facet"]).mean()["rank"].reset_index()
if style == "radar":
fig1 = go.Figure()
# Perpective of Josh
fig1.add_trace(
go.Scatterpolar(
r=comp_1["rank"],
theta=comp_1["facet"],
fill="toself",
name=f"{person_1}'s review ",
)
)
fig1.add_trace(
go.Scatterpolar(
r=comp_3["rank"],
theta=comp_3["facet"],
fill="toself",
name=f"{person_2}'s review",
)
)
fig1.update_layout(
title=dict(text=f"How We See Ourselves"),
polar=dict(radialaxis=dict(visible=True, range=[0, 100])),
showlegend=False,
)
fig2 = go.Figure()
fig2.add_trace(
go.Scatterpolar(
r=comp_2["rank"],
theta=comp_2["facet"],
fill="toself",
name=f"{person_1}",
)
)
fig2.add_trace(
go.Scatterpolar(
r=comp_4["rank"],
theta=comp_4["facet"],
fill="toself",
name=f"{person_2}",
)
)
fig2.update_layout(
title=dict(text=f"How We See Each other"),
polar=dict(radialaxis=dict(visible=True, range=[0, 100])),
showlegend=False,
)
fig3 = go.Figure()
fig3.add_trace(
go.Scatterpolar(
r=comp_1["rank"],
theta=comp_1["facet"],
fill="toself",
name=f"{person_1}",
)
)
fig3.add_trace(
go.Scatterpolar(
r=comp_4["rank"],
theta=comp_4["facet"],
fill="toself",
name=f"{person_2}",
)
)
fig3.update_layout(
title=dict(text=f"How {person_1} sees us"),
polar=dict(radialaxis=dict(visible=True, range=[0, 100])),
showlegend=False,
)
fig4 = go.Figure()
fig4.add_trace(
go.Scatterpolar(
r=comp_2["rank"],
theta=comp_2["facet"],
fill="toself",
name=f"{person_1}",
)
)
fig4.add_trace(
go.Scatterpolar(
r=comp_3["rank"],
theta=comp_3["facet"],
fill="toself",
name=f"{person_2}",
)
)
fig4.update_layout(
title=dict(text=f"How {person_2} sees us"),
polar=dict(radialaxis=dict(visible=True, range=[0, 100])),
showlegend=False,
)
fig5 = go.Figure()
fig5.add_trace(
go.Scatterpolar(
r=comp_1["rank"],
theta=comp_1["facet"],
fill="toself",
name=f"{person_1}",
)
)
fig5.add_trace(
go.Scatterpolar(
r=comp_2["rank"],
theta=comp_2["facet"],
fill="toself",
name=f"{person_2}",
)
)
fig5.update_layout(
title=dict(text=f"Who knows {person_1} best?"),
polar=dict(radialaxis=dict(visible=True, range=[0, 100])),
showlegend=False,
)
fig6 = go.Figure()
fig6.add_trace(
go.Scatterpolar(
r=comp_3["rank"],
theta=comp_3["facet"],
fill="toself",
name=f"{person_1}",
)
)
fig6.add_trace(
go.Scatterpolar(
r=comp_4["rank"],
theta=comp_4["facet"],
fill="toself",
name=f"{person_2}",
)
)
fig6.update_layout(
title=dict(text=f"Who knows {person_2} best?"),
polar=dict(radialaxis=dict(visible=True, range=[0, 100])),
showlegend=False,
)
return fig1, fig2, fig3, fig4, fig5, fig6
fig1, fig2, fig3, fig4, fig5, fig6 = build_radar_charts(
assessments, person_1, person_2, domain_selection
)
col1, col2 = st.columns(2)
with col1:
st.plotly_chart(fig1, use_container_width=True)
with col2:
st.plotly_chart(fig2, use_container_width=True)
col3, col4 = st.columns(2)
with col3:
st.plotly_chart(fig3, use_container_width=True)
with col4:
st.plotly_chart(fig4, use_container_width=True)
col5, col6 = st.columns(2)
with col3:
st.plotly_chart(fig5, use_container_width=True)
with col4:
st.plotly_chart(fig6, use_container_width=True)
def build_comp_table(assessments, person_1, person_2) -> None:
source_data = (
assessments.loc[lambda d: d.domain != d.facet]
.loc[lambda d: d.assessee_name == d.assessor_name]
.pivot(
index="facet",
columns=["assessee_name"],
values="rank",
)
.assign(diff=lambda x: abs(x[person_1] - x[person_2]))
.sort_values("diff", ascending=False)
.reset_index()
# .drop(["diff"], axis=1)
)
return source_data
data = assessments.loc[lambda d: d.domain != d.facet].loc[
lambda d: d.assessee_name == d.assessor_name
]
comp = build_comp_table(assessments, person_1, person_2)
for name, group in assessments.groupby("domain"):
source = group.loc[lambda d: d.assessor_name == person_2].loc[
:, ["assessee_name", "facet", "rank"]
]
# st.table(source)
bars = (
alt.Chart(source)
.mark_bar()
.encode(
y="sum(rank):Q",
x=alt.X("assessee_name:O", title="name"),
color=alt.Color("assessee_name:N", title="test"),
column=alt.Row("facet:N", sort="descending"),
)
.configure_axisX(labelColor="white", tickColor="white")
.configure_header(
titleColor="white",
titleFontSize=14,
labelColor="white",
labelFontSize=10,
)
)
st.altair_chart(bars)
``` |
{
"source": "JoshZastrow/PromiseDate",
"score": 3
} |
#### File: PromiseDate/utils/print_functions.py
```python
def print_columns(cols, selected):
for c in range(0, len(cols) - 4, 4):
for i in range(4):
if cols[c + i] in selected:
print('\x1b[1;31m{:30}\x1b[0m'.format(cols[c + i]), end='')
else:
print('{:30}'.format(cols[c + i]), end='')
print()
``` |
{
"source": "JoshZero87/site",
"score": 2
} |
#### File: bsd/templatetags/bsd_tags.py
```python
from __future__ import unicode_literals
from django import template
from bsd.models import get_bsd_event_url
register = template.Library()
@register.simple_tag
def bsd_event_url(event_id_obfuscated):
return get_bsd_event_url(event_id_obfuscated)
```
#### File: site/calls/signals.py
```python
from __future__ import unicode_literals
from django.db import transaction
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.utils import timezone
from contacts.models import add_phone_opt_out, OptOutType
from .models import CallAnswer, CallQuestion, CallResponse
import logging
logger = logging.getLogger(__name__)
@receiver(post_save, sender=CallResponse)
def call_response_post_save_handler(instance, **kwargs):
call_response = instance
"""
Check if Call Response indicates Contact wants to Opt Out, or Contact phone
is Wrong Number
"""
if (
call_response.question == CallQuestion.opt_out.value[0] and (
call_response.answer == CallAnswer.yes.value[0]
)
) or (
call_response.question == CallQuestion.talk_to_contact_why_not.value[0] and (
call_response.answer == CallAnswer.wrong_number.value[0]
)
):
"""Add Phone Opt Out for Contact phone number"""
call = call_response.call
contact = call.contact
if contact.phone_number is not None:
timestamp = timezone.now().strftime("%Y%m%d%H%M%S")
source = 'Call Response: %s | Contact: %s | %s' % (
str(call_response.id),
str(contact.id),
timestamp,
)
transaction.on_commit(
lambda: add_phone_opt_out(
contact.phone_number,
OptOutType.calling,
source,
)
)
```
#### File: endorsements/migrations/0008_auto_20170118_1803.py
```python
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
def create_election(apps, schema_editor):
Election = apps.get_model('endorsements', 'Election')
Election.objects.create(title='General Election 2016', is_active=False)
Election.objects.create(title='Primary Election 2017', is_active=True)
class Migration(migrations.Migration):
dependencies = [
('endorsements', '0007_auto_20161025_0022'),
]
operations = [
migrations.CreateModel(
name='Election',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=128)),
('is_active', models.BooleanField(default=False)),
],
),
migrations.RunPython(create_election, reverse_code=migrations.RunPython.noop),
migrations.AddField(
model_name='candidate',
name='election',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='endorsements.Election'),
preserve_default=False,
),
migrations.AddField(
model_name='initiative',
name='election',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='endorsements.Election'),
preserve_default=False,
),
]
```
#### File: site/local_groups/admin_views.py
```python
from django.contrib.auth.mixins import PermissionRequiredMixin
from django.contrib.auth.models import User
from django.db.models.signals import post_save
from django.http import HttpResponseRedirect
from django.urls import reverse_lazy
from django.views.generic import FormView
from .forms import GroupLeaderSyncForm
import logging
logger = logging.getLogger(__name__)
class GroupLeaderSyncView(PermissionRequiredMixin, FormView):
"""Sync all Users to update their Group Leader affiliations"""
'''
TODO: need to set request.current_app to self.admin_site.name?
https://docs.djangoproject.com/en/1.10/ref/contrib/admin/#adding-views-to-admin-sites
'''
form_class = GroupLeaderSyncForm
login_url = reverse_lazy(
'admin:local_groups_localgroupaffiliation_changelist'
)
permission_required = 'local_groups.add_localgroupaffiliation'
success_url = reverse_lazy(
'admin:local_groups_localgroupaffiliation_changelist'
)
template_name = 'admin/group_leader_sync.html'
def form_valid(self, form):
"""Trigger post-save signal for all users to sync group leader roles"""
users = User.objects.all()
for user in users:
post_save.send(User, instance=user)
return HttpResponseRedirect(self.get_success_url())
```
#### File: local_groups/migrations/0054_auto_20170829_1740.py
```python
from __future__ import unicode_literals
from django.db import migrations, models
def set_signed_mou_version(apps, schema_editor):
GroupsModel = apps.get_model('local_groups', 'Group')
for group in GroupsModel.objects.all():
if group.status == 'approved' or group.status == 'signed-mou' and group.signed_mou_version == 'none':
print group
group.signed_mou_version = '1.0'
group.save()
def unset_signed_mou_version(apps, schema_editor):
GroupsModel = apps.get_model('local_groups', 'Group')
for group in GroupsModel.objects.all():
if group.status == 'approved' or group.status == 'signed-mou' and group.signed_mou_version == '1.0':
group.signed_mou_version = 'none'
group.save()
class Migration(migrations.Migration):
dependencies = [
('local_groups', '0053_auto_20170829_1740'),
]
operations = [
migrations.RunPython(set_signed_mou_version, reverse_code=unset_signed_mou_version)
]
```
#### File: nominations/migrations/0067_application_staff_20171218_2135.py
```python
from __future__ import unicode_literals
from django.db import migrations, models
# Update all legacy data to fit new format
def update_application_staff_data(apps, schema_editor):
Application = apps.get_model('nominations', 'Application')
for application in Application.objects.all():
if application.staff == "1":
application.staff = "<NAME>"
application.save()
class Migration(migrations.Migration):
dependencies = [
('nominations', '0066_auto_20171212_1747'),
]
operations = [
migrations.AlterField(
model_name='application',
name='staff',
field=models.CharField(blank=True, max_length=64, null=True),
),
migrations.RunPython(update_application_staff_data),
]
```
#### File: site/nominations/models.py
```python
from __future__ import unicode_literals
from ckeditor.fields import RichTextField
from collections import defaultdict
from django.conf import settings
from django.contrib.auth.models import User
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from enum import Enum, unique
from localflavor.us.models import USStateField
from local_groups.models import Group
from pages.models import AlertLevels
from phonenumber_field.modelfields import PhoneNumberField
from wagtail.wagtailadmin.edit_handlers import FieldPanel
from wagtail.wagtailcore.fields import RichTextField as WagtailRichTextField
from wagtail.wagtailsnippets.models import register_snippet
import datetime
import logging
logger = logging.getLogger(__name__)
class Nomination(models.Model):
"""
A nomination form is filled out by the group with basic information about
the group and what it will do to help the candidate.
"""
# TODO: move this into application model
group_nomination_process = models.TextField(
max_length=500,
blank=False,
null=True,
verbose_name="Briefly describe your group's nomination process"
)
STATUSES = (
('incomplete', 'Incomplete'),
('complete', 'Complete'),
)
status = models.CharField(
max_length=16,
choices=STATUSES,
default='incomplete',
blank=True
)
def __unicode__(self):
try:
return self.application.candidate_first_name + ' ' + self.application.candidate_last_name + ' - ' + ' Nomination'
except:
return 'Nomination ' + str(self.pk)
def save(self, *args, **kwargs):
super(Nomination, self).save(*args, **kwargs)
'''
Save the application to update statuses and do other conditional logic
if the nomination has an application, save that application
'''
if hasattr(self, 'application'):
self.application.save()
if self.nominationresponse_set.count() == 0:
for q in NominationQuestion.objects.all():
self.nominationresponse_set.create(question=q)
class NominationQuestion(models.Model):
text = models.TextField()
def __unicode__(self):
return self.text
class NominationResponse(models.Model):
nomination = models.ForeignKey(Nomination)
question = models.ForeignKey(NominationQuestion)
response = models.TextField(max_length=1000)
def __unicode__(self):
return unicode(self.question)
@register_snippet
@python_2_unicode_compatible # provide equivalent __unicode__ and __str__ methods on Python 2
class NominationsPlatformAlert(models.Model):
content = WagtailRichTextField()
show = models.BooleanField(
default=False,
help_text='Show alert on nominations platform pages.'
)
alert_level = models.IntegerField(
choices=[x.value for x in AlertLevels],
default=AlertLevels.warning.value[0],
blank=False,
null=False,
help_text="""
Set the alert style corresponding to Bootstrap 3 alert levels.
See: https://getbootstrap.com/docs/3.3/components/#alerts-dismissible
"""
)
panels = [
FieldPanel('content'),
FieldPanel('show'),
FieldPanel('alert_level')
]
def __str__(self):
return self.content
class Questionnaire(models.Model):
"""
A platform questionnaire is filled out by the candidate with basic information and in-depth policy positions.
"""
STATUSES = (
('incomplete', 'Incomplete'),
('complete', 'Complete'),
('sent', 'Sent to Candidate'),
)
status = models.CharField(max_length=16, choices=STATUSES, default='incomplete', blank=True)
# Candidate Information and Social Media
candidate_first_name = models.CharField(max_length=255, null=True, blank=False, verbose_name="Candidate First Name")
candidate_last_name = models.CharField(max_length=255, null=True, blank=False, verbose_name="Candidate Last Name")
candidate_bio = models.TextField(max_length=1000, blank=False, null=False, verbose_name = "Candidate Bio")
candidate_email = models.EmailField(null=True, blank=False, verbose_name="Candidate Email", max_length=255)
candidate_phone = PhoneNumberField(null=True, blank=True, verbose_name="Candidate Phone Number")
candidate_office = models.CharField(null=True, max_length=255, blank=False, verbose_name="Candidate Office")
candidate_district = models.CharField(null=True, max_length=255, blank=True, verbose_name="Candidate District")
candidate_party = models.CharField(null=True, max_length=255, blank=False, verbose_name="Candidate Party Affiliation")
candidate_held_office = models.NullBooleanField(
verbose_name="Has the candidate ever held public office?"
)
candidate_is_member = models.NullBooleanField(
verbose_name="Is candidate a member of Our Revolution?"
)
candidate_city = models.CharField(null=True, max_length=255, blank=True, verbose_name="Candidate City")
candidate_state = USStateField(max_length=2, null=True, blank=False, verbose_name="Candidate State")
general_election_date = models.DateField(verbose_name = 'General Election Date', null = True, blank = False)
primary_election_date = models.DateField(verbose_name = 'Primary Election Date', null = True, blank = True)
candidate_website_url = models.URLField(null=True, blank=True, verbose_name="Candidate Website URL", max_length=255)
candidate_volunteer_url = models.URLField(null=True, blank=True, verbose_name="Candidate Volunteer URL", max_length=255)
candidate_donate_url = models.URLField(null=True, blank=True, verbose_name="Candidate Donate URL", max_length=255)
candidate_facebook_url = models.URLField(null=True, blank=True, verbose_name="Candidate Facebook URL", max_length=255)
candidate_twitter_url = models.URLField(null=True, blank=True, verbose_name="Candidate Twitter URL", max_length=255)
candidate_instagram_url = models.URLField(null=True, blank=True, verbose_name="Candidate Instagram URL", max_length=255)
candidate_youtube_url = models.URLField(null=True, blank=True, verbose_name="Candidate YouTube URL", max_length=255)
completed_by_candidate = models.NullBooleanField(null=True, blank=True)
def __unicode__(self):
return '%s %s | %s | %s [%s]' % (
self.candidate_first_name,
self.candidate_last_name,
self.candidate_office,
self.general_election_date,
self.id,
)
"""Get response to question about issues, or None"""
def _campaign_issues(self, *args, **kwargs):
response = self.response_set.filter(
question_id=settings.NOMINATIONS_QUESTION_ISSUES_ID,
).first()
position = response.position if response else None
return position
campaign_issues = property(_campaign_issues)
def save(self, skip_application_save=False, *args, **kwargs):
super(Questionnaire, self).save(*args, **kwargs)
if self.response_set.count() == 0:
for q in Question.objects.all():
self.response_set.create(question=q)
'''
Save the application(s) attached to a questionnaire when the
questionnaire is saved.
'''
if not skip_application_save:
for app in self.application_set.all():
app.save()
class Question(models.Model):
text = models.TextField(verbose_name="Question Text")
include_multi_choice = models.BooleanField(default=True, verbose_name="Include Multiple Choice Selection")
def __unicode__(self):
return self.text
class Response(models.Model):
QUESTIONNAIRE_CHOICES = (
('a', 'Strongly Agree'),
('c', 'Somewhat Agree'),
('d', 'Somewhat Disagree'),
('b', 'Strongly Disagree'),
)
questionnaire = models.ForeignKey(Questionnaire)
question = models.ForeignKey(Question)
response = models.CharField(max_length=1, blank=False, null=False, choices=QUESTIONNAIRE_CHOICES)
position = models.TextField(max_length=1000, blank=True, null=True,verbose_name="Candidate's position on this issue:")
def __unicode__(self):
return unicode(self.question)
@unique
class ApplicationType(Enum):
basic = (1, 'Basic Support')
priority = (2, 'Priority Support')
class Application(models.Model):
"""
An application is a single submission for an endorsement. Each application
consists of a group nomination and a candidate questionnaire, and has a
many-to-one relationship with a group.
"""
# See http://www.ncsl.org/research/elections-and-campaigns/primary-types.aspx
primary_election_type_choices = (
(1, 'Closed Primary'),
(2, 'Partially Closed Primary'),
(3, 'Partially Open Primary'),
(4, 'Open to Unaffiliated Voters Primary'),
(5, 'Open Primary'),
(6, 'Top-Two Primary'),
(7, 'Presidential Primary'),
(99, 'Other'),
)
staff_recommendation_choices = (
(1, 'Recommend to Endorse'),
(2, 'Recommend Not to Endorse'),
(3, 'No Recommendation'),
)
application_type = models.IntegerField(
blank=True,
choices=[x.value for x in ApplicationType],
null=True,
)
"""Django User to use instead of legacy auth0 user"""
auth_user = models.ForeignKey(
User,
blank=True,
null=True,
)
fundraising_date_of_filing = models.DateField(
blank=True,
null=True,
verbose_name='Filing Date for Fundraising Report'
)
fundraising_date_accessed = models.DateField(
blank=True,
null=True,
verbose_name='Date fundraising information was accessed'
)
fundraising_source_url = models.URLField(
blank=True,
max_length=255,
null=True,
verbose_name='Fundraising Source URL'
)
"""Legacy field for auth0 user id"""
user_id = models.CharField(max_length=255, null=True, blank=True)
create_dt = models.DateTimeField(auto_now_add=True)
submitted_dt = models.DateTimeField(
null=True,
blank=True,
verbose_name='Submitted at'
)
nomination = models.OneToOneField(
Nomination,
on_delete=models.CASCADE,
primary_key=False,
null=True,
blank=True,
related_name='application',
verbose_name='Group Nomination Form:',
)
primary_election_type = models.IntegerField(
blank=True,
choices=primary_election_type_choices,
null=True,
)
questionnaire = models.ForeignKey(
Questionnaire,
on_delete=models.SET_NULL,
null=True,
blank=True
)
group = models.ForeignKey(Group, to_field="group_id")
rep_email = models.EmailField(
null=True,
blank=False,
verbose_name="Contact Email",
max_length=254
)
rep_first_name = models.CharField(
max_length=35,
null=True,
blank=False,
verbose_name="First Name"
)
rep_last_name = models.CharField(
max_length=35,
null=True,
blank=False,
verbose_name="Last Name"
)
rep_phone = PhoneNumberField(
null=True,
blank=True,
verbose_name="Phone Number"
)
# TODO: change to foreign key and create new object for each new candidate,
# implement autocomplete to minimize duplicate candidates
candidate_first_name = models.CharField(
max_length=255,
null=True,
blank=False,
verbose_name="Candidate First Name"
)
candidate_last_name = models.CharField(
max_length=255,
null=True,
blank=False,
verbose_name="Candidate Last Name"
)
candidate_office = models.CharField(
null=True,
max_length=255,
blank=False,
verbose_name="Candidate Office"
)
candidate_district = models.CharField(
null=True,
max_length=255,
blank=True,
verbose_name="Candidate District"
)
candidate_city = models.CharField(
null=True,
max_length=255,
blank=True,
verbose_name="Candidate City"
)
candidate_state = USStateField(max_length=2, null=True, blank=False)
authorized_email = models.EmailField(
null=True,
blank=True,
verbose_name="Authorized Email",
max_length=254
)
# TODO TECH-840 convert statuses to integer fields
STATUSES = (
(
'needs-group-form-and-questionnaire',
'Needs Group Form and Questionnaire'
),
('needs-questionnaire', 'Needs Questionnaire'),
('needs-group-form', 'Needs Group Form'),
# Deprecated as of 2019-01-08
# ('incomplete', 'Needs Submission'),
('submitted', 'Submitted'),
('needs-research', 'Needs Research'),
('needs-staff-review', 'Needs Staff Review'),
('under-review', 'Under Review'),
('approved', 'Endorsed'),
('removed', 'Not Endorsed'),
('expired', 'Expired'),
('hold', 'Hold'),
)
# Statuses that signify whether a group can still edit an application
EDITABLE_STATUSES = [
'needs-group-form-and-questionnaire',
'needs-questionnaire',
'needs-group-form',
]
status = models.CharField(
max_length=64,
choices=STATUSES,
default='needs-group-form-and-questionnaire'
)
# Volunteer Data Entry
vol_incumbent = models.NullBooleanField(
null=True,
blank=True,
verbose_name='Incumbent?'
)
vol_dem_challenger = models.NullBooleanField(
null=True,
blank=True,
verbose_name='If primary, who are the Democratic challengers?'
)
# TODO: rename to vol_other_candidates and remove old field from code
# and db after a/b deploy issues are resolved
# legacy field
vol_other_progressives = models.TextField(
null=True,
blank=True,
max_length=500,
verbose_name='Other candidates running:',
help_text='Please indicate party affiliation and other progressives. Max length 500 characters.'
)
vol_polling = models.TextField(
null=True,
blank=True,
max_length=500,
verbose_name='Polling:'
)
vol_endorsements = models.TextField(
null=True,
blank=True,
max_length=500,
verbose_name='Endorsements:'
)
vol_advantage = models.CharField(
null=True,
blank=True,
max_length=50,
verbose_name='Previous Election D% or R% Advantage:'
)
vol_turnout = models.CharField(
null=True,
blank=True,
max_length=10,
verbose_name='Previous Election Year Turnout:'
)
vol_win_number = models.IntegerField(
null=True,
blank=True,
verbose_name='Win Number:'
)
vol_fundraising = models.IntegerField(
null=True,
blank=True,
verbose_name='How much money fundraised?'
)
#legacy field
vol_opponent_fundraising = models.IntegerField(
null=True,
blank=True,
verbose_name='How much competitors have fundraised?'
)
vol_crimes = models.TextField(
null=True,
blank=True,
max_length=500,
verbose_name='Crimes or Scandals (please add links to source):'
)
vol_notes = models.TextField(
null=True,
blank=True,
max_length=1000,
verbose_name='Volunteer Notes:',
help_text='Max length 1000 characters.'
)
# Staff only research fields
CLASSIFICATIONS = (
('1', 'I'),
('2', 'II'),
('3', 'III'),
)
VET_STATUSES = (
('0', 'Pending'),
('1', 'Passed'),
('2', 'Failed'),
('3', 'Not Submitted'),
)
"""TODO: remove?"""
RECOMMENDATIONS = (
('1', 'Endorse'),
('2', 'Do Not Endorse')
)
classification_level = models.CharField(
max_length=64,
choices=CLASSIFICATIONS,
default='1'
)
staff = models.CharField(
max_length=64,
blank=True,
null=True,
)
"""TODO: remove?"""
recommendation = models.CharField(
max_length=64,
choices=RECOMMENDATIONS,
default='1'
)
staff_bio = RichTextField(
null=True,
blank=True,
verbose_name='Candidate Bio:',
help_text='This will prepopulate from the candidate questionnaire if left blank.'
)
staff_recommendation = models.IntegerField(
blank=True,
choices=staff_recommendation_choices,
null=True,
)
stand_out_information = RichTextField(
blank=True,
null=True,
)
state_of_the_race = RichTextField(
null=True,
blank=True,
verbose_name='State of the Race:',
)
local_group_info = RichTextField(
null=True,
blank=True,
verbose_name='OR Local Group Info:',
help_text='This will prepopulate from the local group\'s endorsement process if left blank.'
)
staff_notes = RichTextField(
null=True,
blank=True,
verbose_name='Staff Notes or Flags:',
help_text='This will prepopulate from volunteer notes if left blank.'
)
vet_status = models.CharField(
max_length=64,
choices=VET_STATUSES,
default='0'
)
vet = RichTextField(
null=True,
blank=True,
verbose_name='Vet Details:',
)
local_support = RichTextField(
null=True,
blank=True,
verbose_name='Local Support:',
help_text='This will prepopulate from the local group\'s support question if left blank.'
)
def __unicode__(self):
return unicode(self.group) + ' - ' + self.candidate_first_name + ' ' + self.candidate_last_name
def _candidate_name(self):
return self.candidate_first_name + ' ' + self.candidate_last_name
candidate_name = property(_candidate_name)
'''
Group candidates by party and return list
'''
def _candidates_by_party(self):
candidates = defaultdict(list)
for application_candidate in self.applicationcandidate_set.order_by(
'party',
'first_name',
'last_name'
):
candidates[application_candidate.party].append(
application_candidate
)
return candidates.items
candidates_by_party = property(_candidates_by_party)
def auto_populate_research_fields(self):
"""Auto-populate staff write-up fields from already present info"""
if self.questionnaire:
if self.questionnaire.candidate_bio and not self.staff_bio:
self.staff_bio = self.questionnaire.candidate_bio
if self.nomination:
if self.nomination.group_nomination_process and not self.local_group_info:
self.local_group_info = self.nomination.group_nomination_process
# question ID 8 is "What actions will the group take
# and how many people have agreed to volunteer/support?
question = self.nomination.nominationresponse_set.filter(
question_id=8
).first()
if question and not self.local_support:
self.local_support = question.response.encode('utf-8')
if self.vol_notes and not self.staff_notes:
self.staff_notes = self.vol_notes
def create_related_objects(self):
"""Create related nomination and questionnaire for application."""
if not self.nomination:
self.nomination = Nomination.objects.create()
if not self.questionnaire:
self.questionnaire = Questionnaire.objects.create()
def generate_application_status(self):
"""
Returns a generated status based on completion of various items.
Nomination is filled out by the group with basic information about
the group and what it will do to help the candidate.
Questionnaire is filled out by the candidate with basic information and
in-depth policy positions.
"""
if self.status in self.EDITABLE_STATUSES:
if self.nomination.status == 'incomplete':
if self.questionnaire.status == 'complete':
status = 'needs-group-form'
else:
status = 'needs-group-form-and-questionnaire'
else:
# nomination complete
if self.questionnaire.status == 'complete':
# questionnaire complete
"""
Set as submitted if nomination + questionnaire are complete
"""
status = 'submitted'
else:
# needs questionaire
status = 'needs-questionnaire'
else:
status = self.status
return status
def is_editable(self):
"""Returns whether a group can edit this application."""
if self.status in self.EDITABLE_STATUSES:
return True
else:
return False
def save(self, *args, **kwargs):
if not self.nomination or not self.questionnaire:
self.create_related_objects()
self.auto_populate_research_fields()
self.status = self.generate_application_status()
if self.status == 'submitted' and self.submitted_dt is None:
self.submitted_dt = datetime.datetime.now()
super(Application, self).save(*args, **kwargs)
class Meta:
permissions = (
(
"bulk_change_application_status",
"Can bulk change status of applications"
),
(
"export_pdf_application",
"Can export to pdf"
),
(
"admin_application",
"Can admin override application data"
),
)
verbose_name = 'Candidate Application'
class ApplicationCandidate(models.Model):
'''
Information about candidates in a race related to an application
'''
party_choices = (
(1, 'Democratic Party'),
(2, 'Green Party'),
(3, 'Independent/No Party Affiliation'),
(4, 'Republican Party'),
(5, 'Libertarian Party'),
(6, 'Vermont Progressive Party'),
(99, 'Other'),
)
application = models.ForeignKey(Application, on_delete=models.CASCADE)
description = models.CharField(
blank=True,
max_length=500,
null=True,
)
first_name = models.CharField(
blank=True,
max_length=255,
null=True,
)
fundraising = models.IntegerField(
blank=True,
null=True,
verbose_name='Cash on Hand'
)
last_name = models.CharField(
blank=True,
max_length=255,
null=True,
)
party = models.IntegerField(
blank=True,
choices=party_choices,
null=True,
)
website_url = models.URLField(
blank=True,
max_length=255,
null=True,
)
def _name(self):
if self.first_name and self.last_name:
return self.first_name + ' ' + self.last_name
elif self.first_name:
return self.first_name
elif self.last_name:
return self.last_name
else:
return None
name = property(_name)
def __unicode__(self):
return str(self.id) + (' ' + self.name if self.name else '')
class InitiativeApplication(models.Model):
"""Django User to use instead of legacy auth0 user"""
auth_user = models.ForeignKey(
User,
blank=True,
null=True,
)
"""Legacy Auth0 user id"""
user_id = models.CharField(max_length=255, null=True, blank=True)
create_dt = models.DateTimeField(auto_now_add=True)
submitted_dt = models.DateTimeField(null=True, blank=True, verbose_name = 'Submitted at')
group = models.ForeignKey(Group, to_field="group_id")
rep_email = models.EmailField(null=True, blank=False, verbose_name="Contact Email", max_length=254)
rep_first_name = models.CharField(max_length=35, null=True, blank=False, verbose_name="First Name")
rep_last_name = models.CharField(max_length=35, null=True, blank=False, verbose_name="<NAME>")
rep_phone = PhoneNumberField(null=True, blank=True, verbose_name="Phone Number")
name = models.CharField(max_length=254,null=True,blank=False, verbose_name =" Initiative Name")
election_date = models.DateField(verbose_name = 'Election Date', null = True, blank = False)
website_url = models.URLField(null=True, blank=False, verbose_name="Initiative Website URL", max_length=255)
volunteer_url = models.URLField(null=True, blank=True, verbose_name="Volunteer URL", max_length=255)
donate_url = models.URLField(null=True, blank=True, verbose_name="Donate URL", max_length=255)
city = models.CharField(max_length=254,null=True,blank=True)
county = models.CharField(max_length=254,null=True,blank=True)
state = USStateField(max_length=2, null=True, blank=False, verbose_name="State")
description = models.TextField(max_length=500, blank=False, null=True, verbose_name = "What would the initiative do?")
question = models.TextField(max_length=500, blank=True, null=True, verbose_name = "How will the question appear on the ballot?")
vote = models.NullBooleanField(null=True, blank=True, verbose_name='How to vote:')
additional_info = models.TextField(max_length=500, blank=True, null=True, verbose_name = "Any additional information you want to share?")
LOCALITIES = (
('city', 'Citywide'),
('county', 'Countywide'),
('state', 'Statewide'),
)
locality = models.CharField(max_length=16, choices=LOCALITIES, default='state', verbose_name='Is this initiative:')
STATUSES = (
('incomplete', 'Incomplete'),
('submitted', 'Submitted'),
('needs-research','Needs Research'),
('needs-staff-review', 'Needs Staff Review'),
('approved', 'Endorsed'),
('removed', 'Not Endorsed')
)
status = models.CharField(max_length=64, choices=STATUSES, default='submitted')
def __unicode__(self):
return unicode(self.group) + ' - ' + self.name
def save(self, *args, **kwargs):
if self.status == 'submitted' and self.submitted_dt is None:
self.submitted_dt = datetime.datetime.now()
super(InitiativeApplication, self).save(*args, **kwargs)
class Meta:
verbose_name = 'Ballot Initiative Application'
```
#### File: site/nominations/tests.py
```python
from django.test import TestCase
from models import Application
from local_groups.models import Group
class ApplicationTestCase(TestCase):
def setUp(self):
# create group
group = Group.objects.create(
name='Test Group',
group_id=9999
)
# create application
Application.objects.create(
group=group,
rep_email='<EMAIL>',
rep_first_name='John',
rep_last_name='Doe',
rep_phone='6155555555',
candidate_first_name='Test',
candidate_last_name='Candidate',
candidate_office='Governor',
candidate_state='TN'
)
def test_generate_application_status(self):
"""
The status of the Application should be properly set based on the
status of the nomination form (nomination) and the questionnaire.
"""
app = Application.objects.get(
candidate_first_name='Test',
candidate_last_name='Candidate'
)
# nomination form incomplete, questionnaire complete
app.nomination.status = 'incomplete'
app.questionnaire.status = 'incomplete'
app.status = app.generate_application_status()
self.assertEqual(app.status, 'needs-group-form-and-questionnaire')
# nomination form complete, questionnaire incomplete
app.nomination.status = 'complete'
app.questionnaire.status = 'incomplete'
app.status = app.generate_application_status()
self.assertEqual(app.status, 'needs-questionnaire')
# nomination form and questionnare completed
app.nomination.status = 'complete'
app.questionnaire.status = 'complete'
app.status = app.generate_application_status()
self.assertEqual(app.status, 'incomplete')
# nomination form incomplete, questionnaire complete
app.nomination.status = 'incomplete'
app.questionnaire.status = 'complete'
app.status = app.generate_application_status()
self.assertEqual(app.status, 'needs-group-form')
# if application isn't editable, status shouldn't change
app.status = 'submitted'
app.status = app.generate_application_status()
self.assertEqual(app.status, 'submitted')
def test_save(self):
"""When an Application is saved, related objects should be created if
necessary, status should be set properly, research fields should be
auto-populated and submissions datetime should be set if necessary."""
app = Application.objects.get(
candidate_first_name='Test',
candidate_last_name='Candidate'
)
# check for related objects nomination form (nomination)
# and questionnaire
self.assertNotEqual(app.nomination, None)
self.assertNotEqual(app.questionnaire, None)
# check that they are properly related to the application
self.assertEqual(app, app.nomination.application)
self.assertEqual(app, app.questionnaire.application_set.first())
# check that their individual default statuses are properly set
self.assertEqual(app.nomination.status, 'incomplete')
self.assertEqual(app.questionnaire.status, 'incomplete')
# check that generate_application_status is called after
# nomination is saved
app.nomination.status = 'complete'
app.nomination.save()
self.assertEqual(app.nomination.status, 'complete')
self.assertEqual(app.status, 'needs-questionnaire')
# check that generate_application_status is called after
# questionnaire is saved
app.questionnaire.status = 'complete'
app.questionnaire.save()
self.assertEqual(app.questionnaire.status, 'complete')
# this works
self.assertEqual(app.questionnaire.application_set.first(), app)
# but this doesn't?
# self.assertEqual(app.status, 'incomplete')
```
#### File: pages/migrations/0010_populate_news_index.py
```python
from __future__ import unicode_literals
from django.db import migrations
def populate_news_index(apps, schema_editor):
from pages.models import IndexPage, NewsIndex
# Home Page
home_page = IndexPage.objects.get(title='Our Revolution')
# Our Candidates
news_index = NewsIndex(title='News', slug='press')
home_page.add_child(instance=news_index)
def remove_news_index(apps, schema_editor):
from wagtail.wagtailcore.models import Page
news_index = Page.objects.get(title='News')
news_index.delete()
class Migration(migrations.Migration):
dependencies = [
('pages', '0009_auto_20161024_1818'),
]
operations = [
migrations.RunPython(populate_news_index, reverse_code=remove_news_index)
]
```
#### File: pages/migrations/0015_create_redirects.py
```python
from __future__ import unicode_literals
from django.db import migrations
REDIRECTS = {
"/candidates/martin-quezeda": "martin-quezada",
"/candidates/elizabeth-thompson": "elizabeth-thomson",
"/stop-the-tpp": "stop-tpp-now",
"/volunteer-signup": "action",
"/signup": "/",
"/candidate-questionnaire": "candidates",
"/august-30-primary-candidates": "candidates",
"/tpp": "https://go.berniesanders.com/page/s/stop-the-TPP?source=webslashtpp",
"/act": "https://go.berniesanders.com/page/s/volunteer-our-revolution?source=webslashact"
}
def populate_redirects(apps, schema_editor):
from wagtail.wagtailcore.models import Page, Site
from wagtail.wagtailredirects.models import Redirect
for source, target in REDIRECTS.iteritems():
print "Trying %s / %s ..." % (source, target)
data = {
'old_path': source,
'site': Site.objects.get(is_default_site=True),
'is_permanent': True,
}
if '/' in target:
data['redirect_link'] = target
else:
data['redirect_page'] = Page.objects.get(slug=target)
Redirect.objects.create(**data)
def remove_redirects(apps, schema_editor):
from wagtail.wagtailredirects.models import Redirect
Redirect.objects.all().delete()
class Migration(migrations.Migration):
dependencies = [
('pages', '0014_create_stop_tpp_page'),
]
operations = [
migrations.RunPython(populate_redirects, reverse_code=remove_redirects)
]
```
#### File: site/social_redirects/models.py
```python
from django.contrib.sites.models import Site
from django.db import models
class Redirect(models.Model):
title = models.CharField(max_length=200)
description = models.CharField(max_length=1024, blank=True, null=True)
social_image = models.ImageField(null=True, blank=True)
old_path = models.CharField(max_length=200, db_index=True, verbose_name="Redirect From", help_text="This should be an absolute path, excluding the domain name. Example: '/events/search/'.")
new_path = models.CharField(max_length=200, blank=True, verbose_name="Redirect To", help_text="This can be either an absolute path (as above) or a full URL starting with 'http://'.")
site = models.ForeignKey(Site, models.CASCADE)
class Meta:
unique_together = (('site', 'old_path'),)
ordering = ('old_path',)
def __str__(self):
return "%s ---> %s" % (self.old_path, self.new_path)
``` |
{
"source": "joshzwiebel/PencilLearningTest",
"score": 3
} |
#### File: joshzwiebel/PencilLearningTest/application.py
```python
from flask import Flask, render_template, request
from text_generation import generate_next_word
from text_generation import initialize
app = Flask(__name__)
app.secret_key = "<KEY>"
@app.route('/')
def index():
return render_template('template.html', generated_text="your text will appear here")
@app.route('/', methods=['POST'])
def my_form_post():
text = request.form['text']
texty = generate_next_word(str(text))
return render_template('template.html', generated_text=texty)
@app.before_first_request
def initiate():
initialize()
if __name__ == '__main__':
app.debug = True
app.run()
``` |
{
"source": "joshzyj/pull_facebook_data_for_good",
"score": 2
} |
#### File: joshzyj/pull_facebook_data_for_good/pull_mobility.py
```python
import sys
from utils import download_data, move_most_recent_files, get_update_date, remove_empty_files
from mobility import get_file_dates, get_urls
from itertools import compress
def pull_mobility(outdir, keys, country, dl_variables, update, movement_type):
'''
Parameters
----------
outdir : str
Output directory.
keys : list
user credentials [username, password].
country : str
Country name - must match .config file exactly (names with spaces must replace ' ' with '_').
dl_variables : dict
download specific variables in a dict, 'id' = dataset id, 'origin' = dataset origin datetime.datetime object.
update : boolean
Whether an existing dataset is being updated.
Returns
-------
None.
'''
country_output = outdir + "/" + country + '_' + movement_type
base_url = 'https://www.facebook.com/geoinsights-portal/downloads/vector/?id=' + str(dl_variables['id']) + '&ds='
earliest_date = dl_variables['origin']
data_dates = get_file_dates(earliest_date)
if update:
data_dates = list(compress(data_dates, [x > get_update_date(country_output) for x in data_dates]))
if len(data_dates) == 0:
sys.exit('No datasets to download. Exiting.')
urls = get_urls(base_url, data_dates)
start_time = download_data(urls, keys)
move_most_recent_files(country_output, urls, start_time)
remove_empty_files(country_output)
print('Success.')
``` |
{
"source": "joshzzl/POMDPy",
"score": 3
} |
#### File: pomdpy/pomdp/observation_pool.py
```python
from future.utils import with_metaclass
import abc
class ObservationPool(with_metaclass(abc.ABCMeta, object)):
"""
Defines the ObservationPool interface, which allows customization of how the mapping for each
* individual action node is set up.
*
* Using a single class in this way allows certain aspects of the mappings to be stored globally,
* e.g. to keep statistics that are shared across all of the mappings rather than stored on
* a per-mapping basis.
"""
@abc.abstractmethod
def create_observation_mapping(self, action_node):
"""
Creates an observation mapping for the given action node.
:param action_node:
:return: ObservationMapping
"""
``` |
{
"source": "josiah14/linear-algebra",
"score": 4
} |
#### File: the-vector-problems/Python/The_Vector_problems.py
```python
import vector
from math import trunc, sqrt
import GF2
# version code ef5291f09f60+
coursera = 1
# Please fill out this stencil and submit using the provided submission script.
# Some of the GF2 problems require use of the value GF2.one so the stencil imports it.
from GF2 import one
## 1: (Problem 1) Vector Addition Practice 1
#Please express each answer as a list of numbers
p1_v = [-1, 3]
p1_u = [0, 4]
p1_v_plus_u = vector.add(p1_v, p1_u)
p1_v_minus_u = vector.minus(p1_v, p1_u)
p1_three_v_minus_two_u = vector.minus(vector.scalar_mult(3, p1_v), vector.scalar_mult(2, p1_u))
## 2: (Problem 2) Vector Addition Practice 2
p2_u = [-1, 1, 1]
p2_v = [ 2, -1, 5]
p2_v_plus_u = vector.add(p2_v, p2_u)
p2_v_minus_u = vector.minus(p2_v, p2_u)
p2_two_v_minus_u = vector.minus(vector.scalar_mult(2, p2_v), p2_u)
p2_v_plus_two_u = vector.add(p2_v, vector.scalar_mult(2, p2_u))
## 3: (Problem 3) Vector Addition Practice 3
p3_v = [GF2.zero, GF2.one, GF2.one]
p3_u = [GF2.one, GF2.one, GF2.one]
p3_vector_sum_1 = vector.add(p3_v, p3_u)
p3_vector_sum_2 = vector.add(p3_v, p3_u, p3_u)
## 4: (Problem 4) GF2 Vector Addition A
# Please express your solution as a subset of the letters {'a','b','c','d','e','f'}.
# For example, {'a','b','c'} is the subset consisting of:
# a (1100000), b (0110000), and c (0011000).
# The answer should be an empty set, written set(), if the given vector u cannot
# be written as the sum of any subset of the vectors a, b, c, d, e, and f.
p4_vectors = {
'a': [GF2.one, GF2.one, GF2.zero, GF2.zero, GF2.zero, GF2.zero, GF2.zero],
'b': [GF2.zero, GF2.one, GF2.one, GF2.zero, GF2.zero, GF2.zero, GF2.zero],
'c': [GF2.zero, GF2.zero, GF2.one, GF2.one, GF2.zero, GF2.zero, GF2.zero],
'd': [GF2.zero, GF2.zero, GF2.zero, GF2.one, GF2.one, GF2.zero, GF2.zero],
'e': [GF2.zero, GF2.zero, GF2.zero, GF2.zero, GF2.one, GF2.one, GF2.zero],
'f': [GF2.zero, GF2.zero, GF2.zero, GF2.zero, GF2.zero, GF2.one, GF2.one]
}
def match_vector_sums_to(vector_dict, v):
return set(
vector.flatten2(
[ x
for x in vector.permutations_unordered_no_repeat(list(vector_dict))
if vector.add(vector_dict[x[0]], vector_dict[x[1]]) == v
]
)
)
u0 = [GF2.zero, GF2.zero, GF2.one, GF2.zero, GF2.zero, GF2.one, GF2.zero]
u_0010010 = match_vector_sums_to(p4_vectors, u0)
u1 = [GF2.zero, GF2.one, GF2.zero, GF2.zero, GF2.zero, GF2.one, GF2.zero]
u_0100010 = match_vector_sums_to(p4_vectors, u1)
## 5: (Problem 5) GF2 Vector Addition B
# Use the same format as the previous problem
p5_vectors = {
'a': [GF2.one, GF2.one, GF2.one, GF2.zero, GF2.zero, GF2.zero, GF2.zero],
'b': [GF2.zero, GF2.one, GF2.one, GF2.one, GF2.zero, GF2.zero, GF2.zero],
'c': [GF2.zero, GF2.zero, GF2.one, GF2.one, GF2.one, GF2.zero, GF2.zero],
'd': [GF2.zero, GF2.zero, GF2.zero, GF2.one, GF2.one, GF2.one, GF2.zero],
'e': [GF2.zero, GF2.zero, GF2.zero, GF2.zero, GF2.one, GF2.one, GF2.one],
'f': [GF2.zero, GF2.zero, GF2.zero, GF2.zero, GF2.zero, GF2.one, GF2.one]
}
v0 = [GF2.zero, GF2.zero, GF2.one, GF2.zero, GF2.zero, GF2.one, GF2.zero]
v_0010010 = match_vector_sums_to(p5_vectors, v0)
p5_v1 = [GF2.zero, GF2.one, GF2.zero, GF2.zero, GF2.zero, GF2.one, GF2.zero]
v_0100010 = match_vector_sums_to(p5_vectors, p5_v1)
## 6: (Problem 6) Solving Linear Equations over GF(2)
#You should be able to solve this without using a computer.
a = [GF2.one, GF2.one, GF2.zero, GF2.zero]
b = [GF2.one, GF2.zero, GF2.one, GF2.zero]
c = [GF2.one, GF2.one, GF2.one, GF2.one]
x_gf2 = [GF2.one, GF2.zero, GF2.zero, GF2.zero]
x_gf2_comp = vector.GF2_solve_dot_prod_sys(one, a, b, c)
## 7: (Problem 7) Formulating Equations using Dot-Product
#Please provide each answer as a list of numbers
v1 = [2, 3, -4, 1]
v2 = [1, -5, 2, 0]
v3 = [4, 1, -1, -1]
## 8: (Problem 8) Practice with Dot-Product
uv_a = vector.dot_prod([1, 0], [5, 4321])
uv_b = vector.dot_prod([0, 1], [12345, 6])
uv_c = vector.dot_prod([-1, 3], [5, 7])
uv_d = trunc(
vector.dot_prod(
[-(sqrt(2) / 2.0), sqrt(2) / 2.0],
[sqrt(2) / 2.0, -(sqrt(2) / 2.0)]
)
)
``` |
{
"source": "JosiahBradley/mod2win",
"score": 3
} |
#### File: mod2win/levels/level_02.py
```python
import arcade
from mod2win.levels.BaseLevel import BaseLevel
from mod2win.mods.level2 import Mod
import time
class L2(BaseLevel):
def __init__(self, speed, title):
super().__init__(speed=speed, title=title)
self.exit = None
self.gate = None
self.start = time.time()
self.duration = 30
def draw_map(self):
super().draw_map()
# Exit Door
door = self.tile_sprite('signExit')
door.center_x = 14 * self.conf.TILE_RADIUS
door.center_y = 185
self.assets["statics"].append(door)
self.exit = door
# Gate
self.gate = []
for y in range(2 * self.conf.TILE_RADIUS, 10 * self.conf.TILE_RADIUS, 2 * self.conf.TILE_RADIUS):
g = self.tile_sprite('stoneCenter_rounded')
g.center_x = 10 * self.conf.TILE_RADIUS
g.center_y = y
self.assets["block"].append(g)
self.gate.append(g)
def update(self, delta_time):
super().update(delta_time=delta_time)
if self.is_game_over:
return
# Need game over condition, probabaly time based
if time.time() > self.start + self.duration:
self.game_over()
if self.player.center_x > self.exit.center_x:
self.win()
try:
Mod.lift_gate(self.gate)
except AttributeError:
pass
for g in self.gate:
if g.center_y > self.conf.TILE_RADIUS * 8:
g.center_y = self.conf.TILE_RADIUS * 8
if g.center_x > self.conf.TILE_RADIUS * 16:
g.center_x = self.conf.TILE_RADIUS * 16
def on_draw(self):
super().on_draw()
if self.is_game_over:
arcade.draw_text(
"YOU WIN",
self.view_left + self.get_size()[0] * .5,
self.view_bottom + self.get_size()[1] * .5,
arcade.csscolor.ORANGE_RED, 32
)
def win(self):
super().win()
self.audio("gameover1")
self.score += 1000
def main():
""" Main method """
window = L2(speed=1.7, title="LEVEL 2: GATE")
window.setup()
arcade.run()
if __name__ == "__main__":
main()
```
#### File: mod2win/levels/level_launcher.py
```python
from mod2win.tools import obf
import sys
from os import path, name as os_name
import subprocess
mod_path = path.dirname(path.dirname(path.abspath(obf.__file__)))
def launch():
try:
if os_name.lower() != 'nt':
subprocess.run(["python", path.abspath(mod_path + f"/levels/level_0{sys.argv[1]}.pyc")], shell=False)
else:
subprocess.run(["./venv/Scripts/python", path.abspath(mod_path + f"/levels/level_0{sys.argv[1]}.pyc")], shell=False)
except FileNotFoundError as e:
print(f"No level found {e.filename}")
except Exception as e:
pass
def scrub():
for i in range(1, 4):
in_file = path.abspath(mod_path + f"/levels/level_0{i}.py")
out_file = path.abspath(mod_path + f"/levels/level_0{i}.b64")
o = obf.Obf(input_file=in_file, output_file=out_file)
o.hide()
def restore():
for i in range(1, 4):
in_file = path.abspath(mod_path + f"/levels/level_0{i}.b64")
out_file = path.abspath(mod_path + f"/levels/level_0{i}.py")
o = obf.Obf(input_file=in_file, output_file=out_file)
o.unhide()
def _compile():
for i in range(1, 4):
in_file = path.abspath(mod_path + f"/levels/level_0{i}.py")
out_file = path.abspath(mod_path + f"/levels/level_0{i}.pyc")
o = obf.Obf(input_file=in_file, output_file=out_file)
o.obf()
``` |
{
"source": "josiah-co/bigquery_fdw",
"score": 2
} |
#### File: bigquery_fdw/src/fdw.py
```python
from collections import OrderedDict, namedtuple, defaultdict
from multicorn import ColumnDefinition, ForeignDataWrapper, TableDefinition
from multicorn.utils import log_to_postgres, ERROR, WARNING, INFO, DEBUG
from .bqclient import BqClient
DEFAULT_MAPPINGS = {
"STRING": "TEXT",
"INT64": "BIGINT",
"INTEGER": "BIGINT",
"DATE": "DATE",
"FLOAT64": "DOUBLE PRECISION",
"BOOL": "BOOLEAN",
"TIMESTAMP": "TIMESTAMP WITHOUT TIME ZONE",
# note: untested, so commented out
# "BYTES": "BYTES",
# "STRUCT": "STRING",
}
class FDWImportError(Exception):
"raised if 'import foreign schema' ran into an error condition"
class ConstantForeignDataWrapper(ForeignDataWrapper):
# Default vars
client = None # BqClient instance
partitionPseudoColumn = 'partition_date' # Name of the partition pseudo column
# Pseudo column to fetch `count(*)` when using the remote counting and grouping feature
countPseudoColumn = '_fdw_count'
castingRules = None # Dict of casting rules when using the `fdw_casting` option
def __init__(self, options, columns):
"""
Initialize instance, set class level vars
"""
super(ConstantForeignDataWrapper, self).__init__(options, columns)
# Set options
self.setOptions(options)
# Set table columns
self.columns = columns
# Set data types and conversion rules mapping
self.setDatatypes()
self.setConversionRules()
def setOptions(self, options):
"""
Set table options at class level
"""
# Set options at class scope
self.dataset = options.get('fdw_dataset') or options.get('schema')
self.table = options.get('fdw_table') or options.get('tablename')
self.convertToTz = options.get('fdw_convert_tz')
# Set verbose option
self.setOptionVerbose(options.get('fdw_verbose'))
# Set SQL dialect
self.setOptionSqlDialect(options.get('fdw_sql_dialect'))
# Set grouping option
self.setOptionGroupBy(options.get('fdw_group'))
# Set casting rules
self.setOptionCasting(options.get('fdw_casting'))
# Set what to do if imported table has too many columns
self.tooManyColumns = options.get("fdw_colcount") or "error"
if self.tooManyColumns not in ("error", "trim", "skip"):
log_to_postgres("fdw_colcount must be one of 'error', 'trim', 'skip', if provided", ERROR)
self.tooManyColumns = "error"
# Set what to do if imported table columns share a 63
# character prefix
self.sharedPrefix = options.get("fdw_colnames") or "error"
if self.sharedPrefix not in ("error", "skip", "trim"):
log_to_postgres("fdw_colnames must be one of 'error', 'trim', 'skip', if provided", ERROR)
self.sharedPrefix = "error"
def setDatatypes(self):
"""
Set data types mapping
"""
# Create a named tuple
datatype = namedtuple('datatype', 'postgres bq_standard bq_legacy')
datatypes = [
datatype('text', 'STRING', 'STRING'),
# datatype('bytea', 'BYTES', 'BYTES'), # Not supported, need testing for support
datatype('bigint', 'INT64', 'INTEGER'),
datatype('double precision', 'FLOAT64', 'FLOAT'),
datatype('boolean', 'BOOL', 'BOOLEAN'),
datatype('timestamp without time zone', 'TIMESTAMP', 'TIMESTAMP'),
datatype('date', 'DATE', 'DATE'),
datatype('time without time zone', 'TIME', 'TIME'),
datatype('timestamp without time zone', 'DATETIME', 'DATETIME'),
]
self.datatypes = {dtype.postgres: dtype for dtype in datatypes}
def setConversionRules(self):
"""
Set list of allowed conversion rules
"""
# Create a named tuple
conversionRule = namedtuple(
'conversionRule', 'bq_standard_from bq_standard_to')
conversionRules = [
conversionRule('INT64', ['BOOL', 'FLOAT64', 'INT64', 'STRING']),
conversionRule('FLOAT64', ['FLOAT64', 'INT64', 'STRING']),
conversionRule('BOOL', ['BOOL', 'INT64', 'STRING']),
conversionRule('STRING', ['BOOL', 'BYTES', 'DATE', 'DATETIME',
'FLOAT64', 'INT64', 'STRING', 'TIME', 'TIMESTAMP']),
conversionRule('BYTES', ['BYTES', 'STRING']),
conversionRule('DATE', ['DATE', 'DATETIME', 'STRING', 'TIMESTAMP']),
conversionRule(
'DATETIME', ['DATE', 'DATETIME', 'STRING', 'TIME', 'TIMESTAMP']),
conversionRule('TIME', ['STRING', 'TIME']),
conversionRule(
'TIMESTAMP', ['DATE', 'DATETIME', 'STRING', 'TIME', 'TIMESTAMP']),
conversionRule('ARRAY', ['ARRAY']),
conversionRule('STRUCT', ['STRUCT']),
]
self.conversionRules = {
rule.bq_standard_from: rule for rule in conversionRules
}
def setOptionSqlDialect(self, standard_sql=None):
"""
Set a flag for the SQL dialect.
It can be `standard` or `legacy`. `standard` will be the default
"""
self.dialect = 'standard'
if standard_sql == 'legacy':
self.dialect = 'legacy'
# Verbose log
if self.verbose:
log_to_postgres("Set SQL dialect to `" + self.dialect + "`", INFO)
def setOptionGroupBy(self, group):
"""
Set a flag `self.groupBy` as `True` if `group` contains the string 'true'
Otherwise, set it as `False`
"""
if group == 'true':
self.groupBy = True
return
self.groupBy = False
def setOptionVerbose(self, verbose):
"""
Set a flag `self.verbose` as `True` if `verbose` contains the string 'true'
Otherwise, set it as `False`
"""
if verbose == 'true':
self.verbose = True
return
self.verbose = False
def setOptionCasting(self, castingRules):
"""
Conversion rules are received as a string, for example: '{"key": "FLOAT64", "datetime": "DATE"}'
The string will be converted to a dict
"""
if castingRules:
# Cast string as a dict
try:
import ast
self.castingRules = ast.literal_eval(castingRules)
except Exception as e:
log_to_postgres(
"fdw_casting conversion failed: `" + str(e) + "`", ERROR)
# For security reasons, ensure that the string was correctly casted as a dict
try:
if type(self.castingRules) is not dict:
raise ValueError('fdw_casting format is incorrect.')
except Exception as e:
log_to_postgres(
"fdw_casting conversion failed: `" + str(e) + "`", ERROR)
def getClient(self):
"""
Manage a pool of instances of BqClient
If the user uses different private keys in different tables, this method will return
the correct instance of BqClient class associated to the table private key
"""
# Returns an existing instance
if self.client:
return self.client
# Or create a new instance
return self.setClient()
def setClient(self):
"""
Attempt to connect to BigQuery client
"""
try:
# Attempt connection
bq = BqClient()
bq.setClient()
# Verbose log
if self.verbose:
log_to_postgres(
"Connection to BigQuery client with BqClient instance ID " + str(id(bq)), INFO)
# Add to pool
self.client = bq
return bq
except RuntimeError:
log_to_postgres(
"Connection to BigQuery client failed", ERROR)
def execute(self, quals, columns):
"""
Executes a query
"""
# # Verbose log
# if self.verbose:
# log_to_postgres('Quals...', INFO)
# log_to_postgres(quals, INFO)
# log_to_postgres('Columns...', INFO)
# log_to_postgres(columns, INFO)
# Returns instance of BqClient
client = self.getClient()
# Prepare query
query, parameters = self.buildQuery(quals, columns)
# Run query
client.runQuery(query, parameters, self.dialect)
# Return query output
for row in client.readResult():
# Create an ordered dict with the column name and value
# Example: `OrderedDict([('column1', 'value1'), ('column2', value2)])`
line = OrderedDict()
for column in columns:
line[column] = row[column]
yield line
def buildQuery(self, quals, columns):
"""
Builds a BigQuery query
"""
# Set query var
query = ''
# Add SELECT clause
query += 'SELECT ' + self.buildColumnList(columns)
# Add FROM clause
query += " FROM `" + self.dataset + "." + self.table + "` "
# Add WHERE clause
clause, parameters = self.buildWhereClause(quals)
query += clause
# Add group by
if self.groupBy:
groupByColumns = self.buildColumnList(columns, 'GROUP_BY')
if groupByColumns:
query += ' GROUP BY ' + \
self.buildColumnList(columns, 'GROUP_BY')
# Verbose log
if self.verbose:
log_to_postgres("Prepared query: `" + query + "`", INFO)
return query, parameters
def buildColumnList(self, columns, usage='SELECT'):
"""
Build the SELECT clause of the SQL query
"""
clause = ''
# Disable aliases for Group By
useAliases = True
if usage == 'GROUP_BY':
useAliases = False
if columns: # If we have columns
for column in columns:
if column == self.countPseudoColumn: # Pseudo column to count grouped rows
if usage == 'SELECT':
clause += "count(*) " + \
self.addColumnAlias(column, useAliases) + ", "
elif column == self.partitionPseudoColumn: # Partition pseudo column
clause += "_PARTITIONTIME " + \
self.addColumnAlias(column, useAliases) + ", "
else: # Any other column
# Get column data type
dataType = self.getBigQueryDatatype(column)
# Save column original name
columnOriginalName = column
# If the data type is a date or a timestamp
if dataType in ['DATE', 'TIMESTAMP']:
column = self.setTimeZone(column, dataType)
# Data type casting
column = self.castColumn(
column, columnOriginalName, dataType)
clause += column + " " + \
self.addColumnAlias(
columnOriginalName, useAliases) + ", "
# Remove final `, `
clause = clause.strip(', ')
elif usage == 'SELECT': # Otherwise fetch all
clause += "*"
return clause
def setTimeZone(self, column, dataType):
"""
If the option `fdw_convert_tz` is used, convert the time zone automatically from UTC to the desired time zone
"""
# Option is set
if self.convertToTz:
if dataType == 'DATE': # BigQuery column type is `DATE`
return 'DATE(' + column + ', "' + self.convertToTz + '") '
else: # BigQuery column type is `TIMESTAMP`
return 'DATETIME(' + column + ', "' + self.convertToTz + '") '
# Option is not set
return column
def castColumn(self, column, columnOriginalName, dataType):
"""
If the option `fdw_casting` is used, this method will attempt to cast the column to the new type
"""
if self.castingRules and columnOriginalName in self.castingRules: # If we have casting rule for this column
# Get desired casting
castTo = self.castingRules[columnOriginalName]
# Find if we have a matching rule
rule = self.conversionRules.get(dataType.upper())
if rule:
# Check if casting from the original data type to the new one is supported
if castTo.upper() in rule.bq_standard_to:
return 'CAST(' + column + ' as ' + castTo.upper() + ')'
else:
log_to_postgres("Casting from the data type `" + dataType.upper(
) + "` to the data type `" + castTo.upper() + "` is not permitted.", ERROR)
else:
log_to_postgres(
"Casting from the data type `" + dataType.upper() + "` is not permitted.", ERROR)
# Option is not set
return column
def addColumnAlias(self, alias, useAliases=True):
"""
Returns a string "as `alias`" if `useAliases` is `True`
"""
if useAliases:
return " as " + alias
return ''
def buildWhereClause(self, quals):
"""
Build the WHERE clause of the SQL query
"""
clause = ''
parameters = []
# Add WHERE clause
# `quals` example: `[Qual('test', '=', 'test 2'), Qual('test', '~~', '3')]`
if quals:
clause += "WHERE "
for qual in quals:
if qual.field_name == self.partitionPseudoColumn:
clause += "_PARTITIONTIME " + \
str(self.getOperator(qual.operator)) + " ?"
# Format date as a timestamp
value = qual.value.strftime("%Y-%m-%d 00:00:00")
# Force data type to `TIMESTAMP`
parameters.append(self.setParameter(
qual.field_name, 'TIMESTAMP', value))
else:
clause += str(qual.field_name) + " " + \
str(self.getOperator(qual.operator)) + " ?"
parameters.append(self.setParameter(
qual.field_name, self.getBigQueryDatatype(qual.field_name), qual.value))
# Add ` AND `
clause += " AND "
# Remove final ` AND `
clause = clause.strip(' AND ')
return (clause, parameters)
def getOperator(self, operator):
"""
Validate operator
"""
# List of BigQuery operators supported
# Exhaustive list: https://cloud.google.com/bigquery/docs/reference/standard-sql/functions-and-operators#operators
# Non listed operators may or may not work
operators = ['=', '<', '>', '<=', '>=', '!=', '<>', 'LIKE', 'NOT LIKE']
# Mapping between multicorn operators and BigQuery operators
mapping = {}
mapping['~~'] = 'LIKE'
mapping['!~~'] = 'NOT LIKE'
if operator in operators: # Operator is natively supported
return operator
elif operator in mapping: # Multicorn operator has a BigQuery equivalent
return mapping[operator]
else: # Operator is not supported
log_to_postgres(
"Operator `" + operator + "` is not currently supported", ERROR)
def getBigQueryDatatype(self, column, dialect='standard'):
"""
Returns the BigQuery standard SQL data type of a PostgreSQL column
Example: `column1` has the PostgreSQL type `bigint` which is called `INT64` in BigQuery standard SQL
"""
# Get PostgreSQL column type
# Example: `timestamp without time zone`
pgDatatype = self.columns[column].base_type_name
datatype = self.datatypes.get(pgDatatype)
if datatype:
if dialect == 'legacy':
return datatype.bq_legacy
else:
return datatype.bq_standard
# Return a default data type in an attempt to save the day
return 'STRING'
def setParameter(self, column, type_, value):
"""
Set a parameter in BigQuery client
"""
# Verbose log
if self.verbose:
log_to_postgres(
"Add query parameter `" + self.client.varToString(value) + "` for column `" + column + "` with the type `" + type_ + "`", INFO)
return self.client.setParameter(type_, value)
@classmethod
def import_schema(
cls, schema, srv_options, options, restriction_type, restricts=None
):
return cls(srv_options, []).import_schema_bigquery_fdw(
schema, srv_options, options, restriction_type, restricts
)
def import_schema_bigquery_fdw(
self, schema, srv_options, options, restriction_type, restricts=None
):
"""
Pulls in the remote schema.
"""
if restriction_type == 'limit':
only = lambda t: t in restricts
elif restriction_type == 'except':
only = lambda t: t not in restricts
else:
only = None
client = self.getClient()
query = f'''
SELECT table_schema, table_name, column_name, data_type
FROM `{schema}.INFORMATION_SCHEMA.COLUMNS`
ORDER BY ordinal_position;
'''
schemas = set()
client.runQuery(query, (), self.dialect)
tables = defaultdict(list)
for row in client.readResult():
if only and not only(row.table_name):
# doesn't match required fields
continue
schemas.add(row.table_schema)
tables[row.table_schema, row.table_name].append(
(row.column_name, row.data_type)
)
to_insert = []
for (_schema, table), columns in tables.items():
if _schema.lower() != schema.lower():
# wrong schema, we'll skip
continue
# Let's make sure the table is sane-ish with respect to
# column names and counts.
try:
if not self._check_table(table, columns):
# for "skip" in fdw_colcount and "skip_table" in fdw_colnames
continue
except FDWImportError:
# for "error" cases in fdw_colnames and fdw_colcount
return []
# for non-error, trim, and trim_columns
ftable = TableDefinition(table)
ftable.options['schema'] = schema
ftable.options['tablename'] = table
for col, typ in columns:
typ = DEFAULT_MAPPINGS.get(typ, "TEXT")
ftable.columns.append(ColumnDefinition(col, type_name=typ))
to_insert.append(ftable)
if self.verbose:
log_to_postgres("fdw importing table `" + schema + "." + table + "`", WARNING)
return to_insert
def _check_table(self, table, columns):
# column names are going to be truncated, let's make sure that we
# don't have any shared prefixes
shortened = defaultdict(set)
for i, (name, typ) in enumerate(columns):
shortened[name[:63]].add(name)
if len(shortened) != len(columns):
bad_cols = set()
for prefix, dupes in shortened.items():
if len(dupes) > 1:
if self.sharedPrefix == "error":
log_to_postgres(
"fdw not importing table `" + table \
+ "` with identical 63-character prefix `" + prefix \
+ "` columns: " + str(list(dupes)), ERROR)
raise FDWImportError("matching column prefix: " + prefix)
elif self.sharedPrefix == "skip":
if self.verbose:
log_to_postgres(
"fdw not importing table `" + table \
+ "` with identical 63-character prefix `" + prefix \
+ "` columns: " + str(list(dupes)), WARNING)
return False
else: #if self.sharedPrefix == "trim"
if self.verbose:
log_to_postgres(
"fdw not importing columns in table `" + table \
+ "` with identical 63-character prefix `" + prefix \
+ "` columns: " + str(list(dupes)), WARNING)
# make note of our bad columns
bad_cols.update(dupes)
# remove bad columns from our list
columns[:] = [col for col in columns if col[0] not in bad_cols]
if not columns:
# trimmed to 0 due to duplicate column prefixes, that is amazing
if self.verbose:
log_to_postgres(
"fdw not importing table `" + table \
+ "` because all columns share some 63 character prefix with another" \
+ str(list(dupes)), WARNING)
return False
# bigquery can have many columns, let's make sure we're not trying
# to load a table with too many columns.
if len(columns) > 1600:
if self.tooManyColumns == "error":
log_to_postgres(
"fdw not importing table `" + table + "` with " \
+ str(len(columns)) + " columns", ERROR)
raise FDWImportError("too many columns: " + str(len(columns)))
elif self.tooManyColumns == "trim":
if self.verbose:
log_to_postgres(
"fdw trimming " + str(len(columns) - 1600) \
+ " columns from table `" + table + "` on import", WARNING)
del columns[1600:]
else: # skip
if self.verbose:
log_to_postgres(
"fdw skipping table `" + table + "` with " \
+ str(len(columns)) + " columns", WARNING)
return False
return True
``` |
{
"source": "JosiahCraw/Is-the-Build-Done-Yet",
"score": 2
} |
#### File: Is-the-Build-Done-Yet/is_the_build_done_yet/is_the_build_done_yet.py
```python
import requests
import argparse
import json
import subprocess
import os
from pyfiglet import Figlet
from PyInquirer import style_from_dict, Token, prompt
f = Figlet(font='slant')
def create_settings(url, user, light_ids, fail_colour, success_colour):
data = {
"api_user": user,
"url": url,
"lights" : {
"ids": light_ids,
"fail_colour": fail_colour,
"success_colour": success_colour
}
}
home_dir = os.path.expanduser("~")
try:
os.mkdir('{}/.is-my-build-done'.format(home_dir))
except FileExistsError as e:
pass
with open('{}/.is-my-build-done/settings.json'.format(home_dir), 'w+') as settings:
json.dump(data, settings)
def get_settings():
home_dir = os.path.expanduser("~")
with open('{}/.is-my-build-done/settings.json'.format(home_dir), 'r') as settings:
data = json.load(settings)
return data
def check_settings():
return os.path.exists('{}/.is-my-build-done/settings.json'.format(os.path.expanduser("~")))
def rgb_to_cie(r, g, b):
r = int(r)
g = int(g)
b = int(b)
X = 0.4124*r + 0.3576*g + 0.1805*b
Y = 0.2126*r + 0.7152*g + 0.0722*b
Z = 0.0193*r + 0.1192*g + 0.9505*b
x = X / 256
y = Y / 256
return [round(x, 4), round(y, 4)]
def list_lights(url, user):
resp = requests.get('{}/api/{}/lights'.format(url, user))
lights = resp.json()
light_names = list()
for light in lights:
light_data = (lights[light]["name"], light)
light_names.append(light_data)
return light_names
def get_current_colour(url, user, light_id):
light_states = {"ids": []}
for light in light_id:
resp = requests.get('{}/api/{}/lights/{}'.format(url, user, light))
resp = resp.json()
data = resp["state"]
ids = light_states["ids"]
ids.append(light)
light_states[light] = data
return light_states
def set_light_state(url, user, states):
ids = states["ids"]
for light_id in ids:
request_url = '{}/api/{}/lights/{}/state'.format(url, user, light_id)
resp = requests.put(request_url, json=states[light_id])
def set_light(url, user, lights, colour, brightness=255):
cie_colour = rgb_to_cie(colour["r"], colour["g"], colour["b"])
for light in lights:
data = {
"on": True,
"bri": brightness,
"xy": cie_colour,
}
request_url = '{}/api/{}/lights/{}/state'.format(url, user, light)
resp = requests.put(request_url, json=data)
def build_finished(failed):
data = get_settings()
url = data["url"]
user = data["api_user"]
lights = data["lights"]["ids"]
if failed:
set_light(url, user, lights, data["lights"]["fail_colour"])
else:
set_light(url, user, lights, data["lights"]["success_colour"])
def run_setup():
style = style_from_dict({
Token.Separator: '#cc5454',
Token.QuestionMark: '#673ab7 bold',
Token.Selected: '#cc5454', # default
Token.Pointer: '#673ab7 bold',
Token.Instruction: '', # default
Token.Answer: '#<PASSWORD> bold',
Token.Question: '',
})
questions = [
{
'type': 'input',
'name': 'url',
'message': 'Enter Hue Bridge URL (include http://)'
},
{
'type': 'input',
'name': 'api_user',
'message': 'Enter your API user code'
},
{
'type': 'input',
'name': 'fail_colour',
'message': 'Enter Fail Colour of the form <r,g,b>'
},
{
'type': 'input',
'name': 'success_colour',
'message': 'Enter Success Colour of the form <r,g,b>'
}
]
answers = prompt(questions, style=style)
url = answers['url']
user = answers['api_user']
fail_colours = answers["fail_colour"].split(',')
success_colours = answers["success_colour"].split(',')
fail_colour = {
"r": int(fail_colours[0]),
"g": int(fail_colours[1]),
"b": int(fail_colours[2])
}
success_colour = {
"r": int(success_colours[0]),
"g": int(success_colours[1]),
"b": int(success_colours[2])
}
lights = list_lights(url, user)
light_choices = list()
for light in lights:
light_choices.append({'name': light[0]})
questions = [
{
'type': 'checkbox',
'message': 'Select Lights',
'name': 'selected_lights',
'choices': light_choices
}
]
answers = prompt(questions, style=style)
light_ids = list()
for light in answers["selected_lights"]:
for item in lights:
if light == item[0]:
light_ids.append(item[1])
create_settings(url, user, light_ids, fail_colour, success_colour)
def main():
parser = argparse.ArgumentParser(description='Utility for use hue bulbs to set the outcome of a bash command')
parser.add_argument('-r', '--run', nargs='+', help='Accepts command to be run')
parser.add_argument('-s', '--setup', action='store_true', help='Runs utility setup', required=False)
args = parser.parse_args()
if (args.setup):
run_setup()
elif (args.run):
if(not check_settings()):
print("No settings found please use '--setup' to create some")
exit(0)
settings = get_settings()
url = settings["url"]
user = settings["api_user"]
lights = settings["lights"]["ids"]
state = get_current_colour(url, user, lights)
process = subprocess.run(args.run)
if process.returncode == 0:
print(f.renderText('Passed'))
build_finished(False)
else:
print(f.renderText('Failed'))
build_finished(True)
input("Press Any Key to Terminate... ")
set_light_state(url, user, state)
else:
parser.print_help()
if __name__ == "__main__":
main()
``` |
{
"source": "josiahdavis/python_data_analysis",
"score": 3
} |
#### File: josiahdavis/python_data_analysis/python_data_analysis.py
```python
import pandas as pd # Import an already installed python package
import numpy as np
# Running this next line of code assumes that your console working directory is set up correctly
# To set up your working directory
# 1) Put the data and the script in the same working directory
# 2) Select the options buttom in the upper right hand cornder of the editor
# 3) Select "Set console working directory"
# Read a the csv file from your computer (after setting working directory)
ufo = pd.read_csv('ufo_sightings.csv')
# Alternatively, specify the file path
ufo = pd.read_csv('C:/Users/josdavis/Documents/Personal/GitHub/python_data_analysis/ufo_sightings.csv')
# Alterntively read in the file from the internet
ufo = pd.read_csv('https://raw.githubusercontent.com/josiahdavis/python_data_analysis/master/ufo_sightings.csv')
# Finding help on a function
help(pd.read_csv)
# Summarize the data that was just read in
ufo.head(10) # Look at the top x observations
ufo.tail() # Bottom x observations (defaults to 5)
ufo.describe() # describe any numeric columns (unless all columns are non-numeric)
ufo.index # "the index" (aka "the labels")
ufo.columns # column names (which is "an index")
ufo.dtypes # data types of each column
ufo.values # underlying numpy array
ufo.info() # concise summary
# Select a single column (a series)
ufo['State']
ufo.State # This is equivalent
# Select multiple columns (a dataframe)
ufo[['State', 'City','Shape Reported']]
my_cols = ['State', 'City', 'Shape Reported']
ufo[my_cols] # This is equivalent
'''
Filtering and Sorting Data
'''
# Logical filtering
ufo[ufo.State == 'TX']
ufo[~(ufo.State == 'TX')] # Select everything where the test fails
ufo[ufo.State != 'TX'] # Same thing
ufo.City[ufo.State == 'TX']
ufo[ufo.State == 'TX'].City # Same thing
ufo[(ufo.State == 'CA') | (ufo.State =='TX')]
ufo_dallas = ufo[(ufo.City == 'Dallas') & (ufo.State =='TX')]
ufo[ufo.City.isin(['Austin','Dallas', 'Houston'])]
# Sorting
ufo.State.order() # only works for a Series
ufo.sort_index(ascending=False) # sort rows by row labels
ufo.sort_index(ascending=False, inplace=True) # Sort rows inplace
ufo.sort_values(by='State') # sort rows by specific column
ufo.sort_values(by=['State', 'Shape Reported']) # sort by multiple columns
ufo.sort_values(by=['State', 'Shape Reported'], ascending=[False, True], inplace=True) # specify sort order
'''
Modifying Columns
'''
# Add a new column as a function of existing columns
ufo['Location'] = ufo['City'] + ', ' + ufo['State']
ufo.head()
# Rename columns
ufo.rename(columns={'Colors Reported':'Colors', 'Shape Reported':'Shape'}, inplace=True)
# Hide a column (temporarily)
ufo.drop(['Location'], axis=1)
# Delete a column (permanently)
del ufo['Location']
'''
Handling Missing Values
'''
# Missing values are often just excluded
ufo.describe() # Excludes missing values
ufo.Shape.value_counts() # Excludes missing values
ufo.Shape.value_counts(dropna=False) # Includes missing values
# Find missing values in a Series
ufo.Shape.isnull() # True if NaN, False otherwise
ufo.Shape.notnull() # False if NaN, True otherwise
ufo.Shape.isnull().sum() # Count the missing values
# Find missing values in a DataFrame
ufo.isnull()
ufo.isnull().sum()
ufo[(ufo.Shape.notnull()) & (ufo.Colors.notnull())]
# Drop missing values
ufo.dropna() # Drop a row if ANY values are missing
ufo.dropna(how='all') # Drop a row only if ALL values are missing
# Fill in missing values
ufo.Colors.fillna(value='Unknown', inplace=True)
ufo.fillna('Unknown')
'''
EXERCISE: Working with drinks data
'''
# Read drinks.csv (in the drinks_data folder) into a DataFrame called 'drinks'
# Print the first 10 rows
# Examine the data types of all columns
# Print the 'beer_servings' Series
# Calculate the average 'beer_servings' for the entire dataset
# Print all columns, but only show rows where the country is in Europe
# Calculate the average 'beer_servings' for all of Europe
# Only show European countries with 'wine_servings' greater than 300
# Determine which 10 countries have the highest 'total_litres_of_pure_alcohol'
# Determine which country has the highest value for 'beer_servings'
# Count the number of occurrences of each 'continent' value and see if it looks correct
# Determine which countries do not have continent designations
# Determine the number of countries per continent. Does it look right?
'''
SOLUTIONS: Working with drinks data
'''
# Read drinks.csv (in the drinks_data folder) into a DataFrame called 'drinks'
drinks = pd.read_csv('drinks_data/drinks.csv')
# Print the first 10 rows
drinks.head(10)
# Examine the data types of all columns
drinks.dtypes
drinks.info()
# Print the 'beer_servings' Series
drinks.beer_servings
drinks['beer_servings']
# Calculate the average 'beer_servings' for the entire dataset
drinks.describe() # summarize all numeric columns
drinks.beer_servings.describe() # summarize only the 'beer_servings' Series
drinks.beer_servings.mean() # only calculate the mean
# Print all columns, but only show rows where the country is in Europe
drinks[drinks.continent=='EU']
# Calculate the average 'beer_servings' for all of Europe
drinks[drinks.continent=='EU'].beer_servings.mean()
# Only show European countries with 'wine_servings' greater than 300
drinks[(drinks.continent=='EU') & (drinks.wine_servings > 300)]
# Determine which 10 countries have the highest 'total_litres_of_pure_alcohol'
drinks.sort_index(by='total_litres_of_pure_alcohol').tail(10)
# Determine which country has the highest value for 'beer_servings'
drinks[drinks.beer_servings==drinks.beer_servings.max()].country
drinks[['country', 'beer_servings']].sort_index(by='beer_servings', ascending=False).head(5)
# Count the number of occurrences of each 'continent' value and see if it looks correct
drinks.continent.value_counts()
# Determine which countries do not have continent designations
drinks[drinks.continent.isnull()].country
# Due to "na_filter = True" default within pd.read_csv()
help(pd.read_csv)
'''
Indexing and Slicing Data
'''
# Create a new index
ufo.set_index('State', inplace=True)
ufo.index
ufo.index.is_unique
ufo.sort_index(inplace=True)
ufo.head(25)
# loc: filter rows by LABEL, and select columns by LABEL
ufo.loc['FL',:] # row with label FL
ufo.loc[:'FL',:] # rows with labels 'FL' through ''
ufo.loc['FL':'HI', 'City':'Shape'] # rows FL, columns 'City' through 'Shape Reported'
ufo.loc[:, 'City':'Shape'] # all rows, columns 'City' through 'Shape Reported'
ufo.loc[['FL', 'TX'], ['City','Shape']] # rows FL and TX, columns 'City' and 'Shape Reported'
# iloc: filter rows by POSITION, and select columns by POSITION
ufo.iloc[0,:] # row with 0th position (first row)
ufo.iloc[0:3,:] # rows with positions 0 through 2 (not 3)
ufo.iloc[0:3, 0:3] # rows and columns with positions 0 through 2
ufo.iloc[:, 0:3] # all rows, columns with positions 0 through 2
ufo.iloc[[0,2], [0,1]] # 1st and 3rd row, 1st and 2nd column
# Add another level to the index
ufo.set_index('City', inplace=True, append=True) # Adds to existing index
ufo.sort_index(inplace=True)
ufo.head(25)
# Slice using the multi-index
ufo.loc[['ND', 'WY'],:]
ufo.loc['ND':'WY',:]
ufo.loc[('ND', 'Bismarck'),:]
ufo.loc[('ND', 'Bismarck'):('ND','Casselton'),:]
# Reset the index
ufo.reset_index(level='City', inplace=True) # Remove a certain label from the index
ufo.reset_index(inplace=True) # Remove all labels from the index
'''
Analyzing across time
'''
# Reset the index
ufo.reset_index(inplace=True)
# Convert Time column to date-time format (defined in Pandas)
# transforming to time
# Formatting Time: https://docs.python.org/2/library/time.html#time.strftime
ufo.dtypes
ufo['Time'] = pd.to_datetime(ufo['Time'], format="%m/%d/%Y %H:%M")
ufo.dtypes
# Compute date range
ufo.Time.min()
ufo.Time.max()
# Slice using time
ufo[ufo.Time > pd.datetime(1995, 1, 1)]
ufo[(ufo.Time > pd.datetime(1995, 1, 1)) & (ufo.State =='TX')]
# Set the index to time
ufo.set_index('Time', inplace=True)
ufo.sort_index(inplace=True)
ufo.head()
# Access particular times/ranges
ufo.loc['1995',:]
ufo.loc['1995-01',:]
ufo.loc['1995-01-01',:]
# Access range of times/ranges
ufo.loc['1995':,:]
ufo.loc['1995':'1996',:]
ufo.loc['1995-12-01':'1996-01',:]
# Access elements of the timestamp
# Date Componenets: http://pandas.pydata.org/pandas-docs/stable/timeseries.html#time-date-components
ufo.index.year
ufo.index.month
ufo.index.weekday
ufo.index.day
ufo.index.time
ufo.index.hour
# Create a new variable with time element
ufo['Year'] = ufo.index.year
ufo['Month'] = ufo.index.month
ufo['Day'] = ufo.index.day
ufo['Weekday'] = ufo.index.weekday
ufo['Hour'] = ufo.index.hour
'''
Split-Apply-Combine
'''
# for each year, calculate the count of sightings
ufo.groupby('Year').size()
ufo.Year.value_counts() # Same as before
# for each Shape, calculate the first sighting, last sighting, and range of sightings.
ufo.groupby('Shape').Year.min()
ufo.groupby('Shape').Year.max()
# Specify the variable outside of the apply statement
ufo.groupby('Shape').Year.apply(lambda x: x.max())
# Specifiy the variable within the apply statement
ufo.groupby('Shape').apply(lambda x: x.Year.max() - x.Year.min())
# Specify a custom function to use in the apply statement
def get_max_year(df):
try:
return df.Year.max()
except:
return ''
ufo.groupby('Shape').apply(lambda x: get_max_year(x))
# Split/combine can occur on multiple columns at the same time
# For each Weekday / Hour combination, determine a count of sightings
ufo.groupby(['Weekday','Hour']).size()
'''
Merging data
'''
# Read in population data
pop = pd.read_csv('population.csv')
pop.head()
ufo.head()
# Merge the data together
ufo = pd.merge(ufo, pop, on='State', how = 'left')
# Specify keys if columns have different names
ufo = pd.merge(ufo, pop, left_on='State', right_on='State', how = 'left')
# Observe the new Population column
ufo.head()
# Check for values that didn't make it (length)
ufo.Population.isnull().sum()
# Check for values that didn't make it (values)
ufo[ufo.Population.isnull()]
# Change the records that didn't match up using np.where command
ufo['State'] = np.where(ufo['State'] == 'Fl', 'FL', ufo['State'])
# Alternatively, change the state using native python string functionality
ufo['State'] = ufo['State'].str.upper()
# Merge again, this time get all of the records
ufo = pd.merge(ufo, pop, on='State', how = 'left')
'''
Writing Data
'''
ufo.to_csv('ufo_new.csv')
ufo.to_csv('ufo_new.csv', index=False) # Index is not included in the csv
'''
Other useful features
'''
# Detect duplicate rows
ufo.duplicated() # Series of logicals
ufo.duplicated().sum() # count of duplicates
ufo[ufo.duplicated(['State','Time'])] # only show duplicates
ufo[ufo.duplicated()==False] # only show unique rows
ufo_unique = ufo[~ufo.duplicated()] # only show unique rows
ufo.duplicated(['State','Time']).sum() # columns for identifying duplicates
# Replace all instances of a value (supports 'inplace=True' argument)
ufo.Shape.replace('DELTA', 'TRIANGLE') # replace values in a Series
ufo.replace('PYRAMID', 'TRIANGLE') # replace values throughout a DataFrame
# Replace with a dictionary
ufo['Weekday'] = ufo.Weekday.replace({ 0:'Mon', 1:'Tue', 2:'Wed',
3:'Thu', 4:'Fri', 5:'Sat',
6:'Sun'})
# Pivot rows to columns
ufo.groupby(['Weekday','Hour']).size()
ufo.groupby(['Weekday','Hour']).size().unstack(0) # Make first row level a column
ufo.groupby(['Weekday','Hour']).size().unstack(1) # Make second row level a column
# Note: .stack transforms columns to rows
# Randomly sample a DataFrame
idxs = np.random.rand(len(ufo)) < 0.66 # create a Series of booleans
train = ufo[idxs] # will contain about 66% of the rows
test = ufo[~idxs] # will contain the remaining rows
'''
Advanced Examples (w/Plotting)
'''
# Plot the number of sightings over time
ufo.groupby('Year').size().plot(kind='line',
color='r',
linewidth=2,
title='UFO Sightings by year')
# Plot the number of sightings over the day of week and time of day
ufo.groupby(['Weekday','Hour']).size().unstack(0).plot( kind='line',
linewidth=2,
title='UFO Sightings by Time of Day')
# Plot the sightings in in July 2014
ufo[(ufo.Year == 2014) & (ufo.Month == 7)].groupby(['Day']).size().plot( kind='bar',
color='b',
title='UFO Sightings in July 2014')
# Plot multiple plots on the same plot (plots neeed to be in column format)
ufo_fourth = ufo[(ufo.Year.isin([2011, 2012, 2013, 2014])) & (ufo.Month == 7)]
ufo_fourth.groupby(['Year', 'Day']).City.count().unstack(0).plot( kind = 'bar',
subplots=True,
figsize=(7,9))
``` |
{
"source": "josiahdavis/pytorch-reference-lm",
"score": 2
} |
#### File: josiahdavis/pytorch-reference-lm/language-model.py
```python
import argparse
import time
import math
import os
from io import open
# import data
# import model
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.cuda.profiler as profiler
import torch.optim as optim
from apex import pyprof
# pyprof.nvtx.init()
class Dictionary(object):
def __init__(self):
self.word2idx = {}
self.idx2word = []
def add_word(self, word):
if word not in self.word2idx:
self.idx2word.append(word)
self.word2idx[word] = len(self.idx2word) - 1
return self.word2idx[word]
def __len__(self):
return len(self.idx2word)
class Corpus(object):
def __init__(self, path):
self.dictionary = Dictionary()
self.train = self.tokenize(os.path.join(path, "train.txt"))
self.valid = self.tokenize(os.path.join(path, "valid.txt"))
self.test = self.tokenize(os.path.join(path, "test.txt"))
def tokenize(self, path):
"""Tokenizes a text file."""
assert os.path.exists(path)
# Add words to the dictionary
with open(path, "r", encoding="utf8") as f:
for line in f:
words = line.split() + ["<eos>"]
for word in words:
self.dictionary.add_word(word)
# Tokenize file content
with open(path, "r", encoding="utf8") as f:
idss = []
for line in f:
words = line.split() + ["<eos>"]
ids = []
for word in words:
ids.append(self.dictionary.word2idx[word])
idss.append(torch.tensor(ids).type(torch.int64))
ids = torch.cat(idss)
return ids
class TransformerModel(nn.Module):
def __init__(self, ntoken, ninp, nhead, nhid, nlayers, seq_len, dropout=0.5):
super(TransformerModel, self).__init__()
self.src_mask = None
encoder_layers = nn.TransformerEncoderLayer(ninp, nhead, nhid, dropout)
self.transformer_encoder = nn.TransformerEncoder(encoder_layers, nlayers)
self.position_embeddings = nn.Embedding(seq_len, ninp)
self.word_embeddings = nn.Embedding(ntoken, ninp)
self.ninp = ninp
self.decoder = nn.Linear(ninp, ntoken)
self.init_weights()
position = torch.arange(0, seq_len).unsqueeze(1)
self.register_buffer("position", position)
self.dropout = nn.Dropout(p=dropout)
def _generate_square_subsequent_mask(self, sz):
mask = (torch.triu(torch.ones(sz, sz)) == 1).transpose(0, 1)
mask = (
mask.float()
.masked_fill(mask == 0, float("-inf"))
.masked_fill(mask == 1, float(0.0))
)
return mask
def init_weights(self):
initrange = 0.1
self.word_embeddings.weight.data.uniform_(-initrange, initrange)
self.position_embeddings.weight.data.uniform_(-initrange, initrange)
self.decoder.bias.data.zero_()
self.decoder.weight.data.uniform_(-initrange, initrange)
def forward(self, src, has_mask=True):
if has_mask:
device = src.device
if self.src_mask is None or self.src_mask.size(0) != len(src):
mask = self._generate_square_subsequent_mask(len(src)).to(device)
self.src_mask = mask
else:
self.src_mask = None
word_embeddings = self.word_embeddings(src) * math.sqrt(self.ninp)
position = self.position[: src.size(0), :]
position_embeddings = self.position_embeddings(position)
src = self.dropout(word_embeddings + position_embeddings)
output = self.transformer_encoder(src, self.src_mask)
output = self.decoder(output)
return F.log_softmax(output, dim=-1)
def batchify(data, bsz, device):
# Work out how cleanly we can divide the dataset into bsz parts.
nbatch = data.size(0) // bsz
# Trim off any extra elements that wouldn't cleanly fit (remainders).
data = data.narrow(0, 0, nbatch * bsz)
# Evenly divide the data across the bsz batches.
data = data.view(bsz, -1).t().contiguous()
return data.to(device)
def get_batch(source, i, args):
seq_len = min(args.bptt, len(source) - 1 - i)
data = source[i : i + seq_len]
target = source[i + 1 : i + 1 + seq_len].view(-1)
return data, target
def evaluate(model, data_source, corpus, criterion, args):
# Turn on evaluation mode which disables dropout.
model.eval()
total_loss = 0.0
ntokens = len(corpus.dictionary)
with torch.no_grad():
for i in range(0, data_source.size(0) - 1, args.bptt):
data, targets = get_batch(data_source, i, args)
output = model(data)
output_flat = output.view(-1, ntokens)
total_loss += len(data) * criterion(output_flat, targets).item()
return total_loss / (len(data_source) - 1)
parser = argparse.ArgumentParser(
description="PyTorch Wikitext-2 Transformer Language Model"
)
parser.add_argument(
"--data", type=str, default="./data/wikitext-2", help="location of the data corpus"
)
parser.add_argument(
"--emsize", type=int, default=200, help="size of word embeddings (also, d_model)",
)
parser.add_argument(
"--nhid",
type=int,
default=200,
help="number of hidden units per layer (e.g., dim_feedforward)",
)
parser.add_argument("--nlayers", type=int, default=1, help="number of layers")
parser.add_argument("--lr", type=float, default=20, help="initial learning rate")
parser.add_argument("--clip", type=float, default=0.25, help="gradient clipping")
parser.add_argument("--epochs", type=int, default=1, help="upper epoch limit")
parser.add_argument(
"--batch_size", type=int, default=20, metavar="N", help="batch size"
)
parser.add_argument("--bptt", type=int, default=35, help="sequence length")
parser.add_argument(
"--dropout",
type=float,
default=0.2,
help="dropout applied to layers (0 = no dropout)",
)
parser.add_argument(
"--tied", action="store_true", help="tie the word embedding and softmax weights"
)
parser.add_argument("--seed", type=int, default=1111, help="random seed")
parser.add_argument("--cuda", action="store_true", help="use CUDA")
parser.add_argument(
"--log-interval", type=int, default=200, metavar="N", help="report interval"
)
parser.add_argument(
"--save", type=str, default="model.pt", help="path to save the final model"
)
parser.add_argument(
"--nhead",
type=int,
default=1,
help="the number of heads in the encoder of the transformer model",
)
def main():
args = parser.parse_args()
pyprof.nvtx.init()
# Set the random seed manually for reproducibility.
torch.manual_seed(args.seed)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f"Running on device {device}")
corpus = Corpus(args.data)
eval_batch_size = 10
train_data = batchify(corpus.train, args.batch_size, device)
val_data = batchify(corpus.valid, eval_batch_size, device)
test_data = batchify(corpus.test, eval_batch_size, device)
print(f"train_data.shape={train_data.shape}")
print(f"val_data.shape={val_data.shape}")
print(f"test_data.shape={test_data.shape}")
ntokens = len(corpus.dictionary)
print(f"ntokens={ntokens}")
# model = model.TransformerModel(
model = TransformerModel(
ntokens,
args.emsize,
args.nhead,
args.nhid,
args.nlayers,
args.bptt,
args.dropout,
).cuda()
# ).to(device)
print(model)
criterion = nn.CrossEntropyLoss()
print(criterion)
print(f"Using tokens={ntokens}, emsize={args.emsize}, nhid={args.emsize}")
print(
f"""ntokens={ntokens}, emsize={args.emsize},
nhead={args.nhead}, nhid={args.nhid}, nlayers={args.nlayers},
bpttt={args.bptt}, dropout={args.dropout}
"""
)
iter_to_capture = 1
# Loop over epochs.
lr = args.lr
best_val_loss = None
# At any point you can hit Ctrl + C to break out of training early.
with torch.autograd.profiler.emit_nvtx():
for epoch in range(1, args.epochs + 1):
epoch_start_time = time.time()
model.train()
total_loss = 0.0
start_time = time.time()
ntokens = len(corpus.dictionary)
for batch, i in enumerate(range(0, train_data.size(0) - 1, args.bptt)):
data, targets = get_batch(train_data, i, args)
# TODO: Use language modelling abstraction with torchtext
model.zero_grad()
if (epoch == 1) and (batch == iter_to_capture):
profiler.start()
output = model(data)
loss = criterion(output.view(-1, ntokens), targets)
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), args.clip)
for p in model.parameters():
p.data.add_(-lr, p.grad.data)
# TODO: Use an optimizer
if (epoch == 1) and (batch == iter_to_capture):
profiler.stop()
total_loss += loss.item()
if batch % args.log_interval == 0 and batch > 0:
cur_loss = total_loss / args.log_interval
elapsed = time.time() - start_time
print(
"| epoch {:3d} | {:5d}/{:5d} batches | lr {:02.2f} | ms/batch {:5.2f} | "
"loss {:5.2f} | ppl {:8.2f}".format(
epoch,
batch,
len(train_data) // args.bptt,
lr,
elapsed * 1000 / args.log_interval,
cur_loss,
math.exp(cur_loss),
)
)
total_loss = 0
start_time = time.time()
val_loss = evaluate(model, val_data, corpus, criterion, args)
print("-" * 89)
print(
"| end of epoch {:3d} | time: {:5.2f}s | valid loss {:5.2f} | "
"valid ppl {:8.2f}".format(
epoch,
(time.time() - epoch_start_time),
val_loss,
math.exp(val_loss),
)
)
print("-" * 89)
# Save the model if the validation loss is the best we've seen so far.
if not best_val_loss or val_loss < best_val_loss:
best_val_loss = val_loss
else:
# Anneal the learning rate if no improvement has been seen in the validation dataset.
lr /= 4.0
# Run on test data.
test_loss = evaluate(model, test_data, corpus, criterion, args)
print("=" * 89)
print(
"| End of training | test loss {:5.2f} | test ppl {:8.2f}".format(
test_loss, math.exp(test_loss)
)
)
print("=" * 89)
if __name__ == "__main__":
main()
``` |
{
"source": "josiah-d/job_applications",
"score": 4
} |
#### File: josiah-d/job_applications/update_record.py
```python
import datetime as dt
import psycopg2
# db global variables
HOST = 'localhost'
USER = 'postgres'
PASSWORD = '<PASSWORD>'
DATABASE = 'job_apps'
print('Initiating database connections.')
# db connection
conn = psycopg2.connect(host=HOST, database=DATABASE,
user=USER, password=PASSWORD)
print('Database connected.')
class UpdateRecord():
"""Updates a job entry to add to the database"""
def __init__(self, last_contact=False):
if last_contact:
self.company = self.get_company()
else:
self.company = self.get_company()
self.column = self.get_column()
self.value = self.get_value()
def get_company(self):
return input('enter the company name to update:\n> ')
def get_column(self):
return input('enter to column name to update:\n> ')
def get_value(self):
return input(f'input the new value for {self.column}:\n> ')
if __name__ == '__main__':
print('Please provide inputs.\n')
# get inputs
contact_date = input('Update last contact date? (y/n)\n> ')
if contact_date.lower() == 'y':
record = UpdateRecord(last_contact=True)
query = f'''UPDATE jobs
SET last_contact = '{dt.date.today()}'
WHERE company = '{record.company}'
'''
print(f'{record.company} record updated: last_contact {dt.date.today()}.')
else:
record = UpdateRecord()
query = f'''UPDATE jobs
SET {record.column} = '{record.value}', last_contact = '{dt.date.today()}'
WHERE company = '{record.company}'
'''
print(f'{record.company} record updated: {record.column} == {record.value}.')
# push data to the database
with conn.cursor() as c:
c.execute(query)
conn.commit()
``` |
{
"source": "JosiahDub/LittleFreePiBerry",
"score": 2
} |
#### File: LittleFreePiBerry/app/views.py
```python
from app import app
from flask import render_template, jsonify
import os
from instagram_auth import INSTAGRAM_PASSWORD, INSTAGRAM_USERNAME
from instagram import InstagramSession
# For testing outside of the Pi
if os.uname()[4][:3] == 'arm':
from config import camera
def _upload_to_instagram():
insta = InstagramSession()
if insta.login(INSTAGRAM_USERNAME, INSTAGRAM_PASSWORD):
print("good job")
# Example
# filepath = "/tmp/square.jpg"
# print "Uploading " + filepath
# insta = InstagramSession()
# if insta.login(USERNAME, PASSWORD):
# media_id = insta.upload_photo("/tmp/small.jpg")
# print media_id
# if media_id is not None:
# insta.configure_photo(media_id, "")
@app.route('/_take_pic')
def take_pic():
_upload_to_instagram()
return jsonify(success=1)
@app.route('/')
@app.route('/index')
def index():
return render_template('index.html',
title='Home')
``` |
{
"source": "josiahfmccoy/scholasterion",
"score": 3
} |
#### File: josiahfmccoy/scholasterion/dev.py
```python
import os
import subprocess
import sys
import time
def print_header(msg):
print(f"\n######################## {msg} ########################")
def run_flask(name, runner, port, expose=False):
print_header(f'Starting {name}')
webapp_command = f'pipenv run python -m {runner} -p {port}'
if expose:
webapp_command += ' --expose'
proc = subprocess.Popen(
webapp_command, # shell=True,
stdout=sys.stdout, stderr=sys.stderr
)
wait_for_output(proc, 10)
return proc
def run_webapp():
return run_flask(
'WebApp', 'src.run_webapp', os.getenv('APP_PORT', 5000),
expose=os.getenv('EXPOSE_APP', False)
)
def wait_for_output(subproc, timeout=2):
while True:
try:
outs, errs = subproc.communicate(timeout=timeout)
if not outs and not errs:
break
if outs:
print(outs)
if errs:
print(errs)
except subprocess.TimeoutExpired:
break
def all_live(procs):
for proc in procs:
if proc is None:
continue
if proc.poll() is not None:
return False
return True
def check_proc(proc):
return (proc is not None and proc.poll() is None)
DEV_PROCESSES = {
run_webapp: run_webapp()
}
def run_procs(processes, is_main):
try:
while True:
while all_live(list(processes.values())):
time.sleep(1)
for runner, proc in processes.items():
if not check_proc(proc):
processes[runner] = runner()
finally:
print_header("Terminating Flask Applications ...")
for proc in processes.values():
if proc is not None:
proc.terminate()
if __name__ == '__main__':
is_main = os.environ.get('WERKZEUG_RUN_MAIN') != 'true'
if is_main:
print_header("Running Dev Environment")
try:
run_procs(DEV_PROCESSES, is_main)
except KeyboardInterrupt:
pass
```
#### File: backend/languages/utils.py
```python
__all__ = [
'serializable_language'
]
def serializable_language(lang):
s = {
'id': lang.id,
'iso_code': lang.iso_code,
'name': lang.name
}
return s
```
#### File: backend/lexemes/utils.py
```python
import requests
from flask import current_app
from lxml import etree, html
from ...db.services import LexemeService
from ..languages.utils import serializable_language
__all__ = [
'serializable_token', 'serializable_word', 'serializable_lexeme',
'get_token', 'parse_loegion'
]
ALLOW_AUTOLOAD = False
def serializable_token(token):
if not token:
return None
s = {
'identifier': token.identifier,
'form': token.form,
'gloss': token.gloss,
'words': [serializable_word(w) for w in token.words]
}
return s
def serializable_word(word):
if not word:
return None
s = {
'id': word.id,
'form': word.form,
'parsing': word.parsing,
'lexeme': serializable_lexeme(word.lexeme)
}
return s
def serializable_lexeme(lex):
if not lex:
return None
s = {
'id': lex.id,
'lemma': lex.lemma,
'gloss': lex.gloss,
'language': serializable_language(lex.language)
}
return s
def get_token(document, identifier):
fname = document.file_url
parser = etree.XMLParser(remove_blank_text=True)
if not fname.startswith('http'):
fname = current_app.static_path('data/' + fname)
root = etree.parse(fname, parser)
else:
r = requests.get(fname)
root = html.fromstring(r.text)
t = root.xpath(f'//*[@id="{identifier}"]')
if not t:
return None
return t[0].text
def parse_loegion(document, identifier):
from ...workbench.parsing.loegion import LoegionParser, norm_word
t = get_token(document, identifier)
lang = document.collection.language
words = {}
existing = LexemeService.Tokens.get_all(form=t)
for tok in existing:
for w in tok.words:
words[w.id] = w
word_form = norm_word(t)
existing = LexemeService.Words.get_all(form=word_form)
for w in existing:
words[w.id] = w
words = list(words.values())
if not words:
if not ALLOW_AUTOLOAD:
return None
parsed = LoegionParser().parse_word(t)
for lem, parsings in parsed.items():
lex = LexemeService.get_or_create(
lemma=lem,
language_id=lang.id
)
if not lex.gloss:
gloss = gloss_loegion(lex.lemma)
if gloss:
lex.gloss = gloss
w = LexemeService.Words.get_or_create(
form=word_form,
lexeme_id=lex.id
)
words.append(w)
token = LexemeService.Tokens.get_or_create(
identifier=identifier,
form=t,
document_id=document.id,
no_commit=True
)
token.words = words
LexemeService.commit()
return token
def gloss_loegion(lemma):
from ...workbench.glossing.loegion import LoegionGlosser
gloss = LoegionGlosser().gloss(lemma)
return gloss
```
#### File: backend/users/utils.py
```python
__all__ = [
'serializable_user'
]
def serializable_user(user):
if not user:
return None
s = {
'email': user.email,
'username': user.username
}
return s
```
#### File: schema/lexemes/models.py
```python
import sqlalchemy as sa
from ..utils.base import Model
__all__ = [
'Token', 'Word', 'Lexeme'
]
token_words = sa.Table(
'token_words', Model.metadata,
sa.Column('token_id', sa.Integer(), sa.ForeignKey('token.id'), nullable=False),
sa.Column('word_id', sa.Integer(), sa.ForeignKey('word.id'), nullable=False)
)
class Token(Model):
identifier = sa.Column(sa.Unicode(60), nullable=False)
gloss = sa.Column(sa.Unicode(), nullable=True)
document_id = sa.Column(
sa.Integer(), sa.ForeignKey('document.id'), nullable=True
)
document = sa.orm.relationship(
'Document', backref=sa.orm.backref('tokens', cascade='all, delete-orphan')
)
words = sa.orm.relationship(
'Word', secondary=token_words, backref=sa.orm.backref('tokens')
)
__table_args__ = (
sa.UniqueConstraint(
'identifier', 'document_id'
),
)
def __repr__(self):
return (
f"{self.__class__.__qualname__}"
f"('{self.document.title}', '{self.identifier}')"
)
class Word(Model):
form = sa.Column(sa.Unicode(), nullable=False)
parsing = sa.Column(sa.Unicode(255), nullable=True)
gloss = sa.Column(sa.Unicode(), nullable=True)
lexeme_id = sa.Column(
sa.Integer(), sa.ForeignKey('lexeme.id'), nullable=True
)
lexeme = sa.orm.relationship(
'Lexeme', backref=sa.orm.backref('forms', cascade='all, delete-orphan')
)
def __repr__(self):
return (
f"{self.__class__.__qualname__}('{self.form}')"
)
class Lexeme(Model):
lemma = sa.Column(sa.Unicode(), nullable=False)
gloss = sa.Column(sa.Unicode(), nullable=True)
subscript = sa.Column(sa.Integer(), nullable=True)
language_id = sa.Column(
sa.Integer(), sa.ForeignKey('language.id'), nullable=True
)
language = sa.orm.relationship(
'Language',
backref=sa.orm.backref('lexemes', cascade='all, delete-orphan')
)
__table_args__ = (
sa.UniqueConstraint(
'lemma', 'gloss', 'language_id'
),
)
def __repr__(self):
return (
f"{self.__class__.__qualname__}('{self.lemma}')"
)
```
#### File: db/services/generic.py
```python
import os
from ..schema.utils.engine import DataBase
__all__ = ['db', 'GenericService', 'PermissionsMixin']
db_path = os.getenv('DATABASE_URI')
db = DataBase(db_path)
def make_key(**opts):
key = '-'.join([
f'{k}={v}'
for k, v in dict(opts).items()
])
return key
# Caching is currently disabled; too buggy.
# If this ends up being inefficient
# we should return to caching and fix it
class ServiceMetaClass(type):
@property
def db(cls):
return db
def cache(cls, key, value):
try:
cls.__cache
except AttributeError:
cls.__cache = {}
cls.__cache[key] = value
def cached(cls, key):
try:
cls.__cache
except AttributeError:
cls.__cache = {}
# Using 'get' instead of 'pop' here would enable caching
return cls.__cache.pop(key, None)
def clear_cache(cls):
cls.__cache = {}
class GenericService(metaclass=ServiceMetaClass):
__model__ = None
__unique_on__ = []
auto_commit = True
@classmethod
def commit(cls):
cls.clear_cache()
cls.db.session.commit()
@classmethod
def rollback(cls):
cls.clear_cache()
cls.db.session.rollback()
@classmethod
def create(cls, *, no_commit=False, check_unique=True, **kwargs):
opts = dict(kwargs)
for k in list(opts):
opt = opts.pop(k)
if hasattr(opt, 'id') and hasattr(cls.__model__, f'{k}_id'):
opts[f'{k}_id'] = opt.id
else:
opts[k] = opt
if check_unique and cls.__unique_on__:
check = {k: opts.get(k) for k in cls.__unique_on__}
existing_term = cls.get(**check)
if existing_term:
for k, v in kwargs.items():
if hasattr(existing_term, k):
setattr(existing_term, k, v)
cls.db.session.add(existing_term)
if not no_commit and cls.auto_commit:
cls.commit()
return existing_term
with cls.db.session.no_autoflush:
model = cls.__model__(**opts)
cls.db.session.add(model)
cls.cache(make_key(**opts), model)
if not no_commit and cls.auto_commit:
cls.commit()
return model
@classmethod
def get_all(cls, **kwargs):
key = make_key(**kwargs)
if not cls.cached(key):
with cls.db.session.no_autoflush:
model_query = cls.db.session.query(cls.__model__)
if kwargs:
model_query = model_query.filter_by(**kwargs)
models = model_query.all()
cls.cache(key, models)
ret = cls.cached(key) or []
if isinstance(ret, cls.__model__):
ret = [ret]
with cls.db.session.no_autoflush:
ret = [
x if (x in cls.db.session) else cls.db.session.merge(x)
for x in ret
]
return ret
@classmethod
def get(cls, model_id=None, **kwargs):
ret = None
query_options = kwargs.pop('query_options', None)
if model_id is not None:
if not isinstance(model_id, int):
raise TypeError(
'"model_id" must be of type int,'
f' not {model_id.__class__.__name__}'
)
if model_id > 0:
if not cls.cached(model_id):
with cls.db.session.no_autoflush:
q = cls.db.session.query(cls.__model__)
if query_options is not None:
q = q.options(query_options)
model = q.get(
model_id
)
cls.cache(model_id, model)
ret = cls.cached(model_id)
elif kwargs:
models = cls.get_all(**kwargs)
models.append(None)
ret = models[0]
if ret:
with cls.db.session.no_autoflush:
if ret not in cls.db.session:
ret = cls.db.session.merge(ret)
return ret
@classmethod
def get_or_create(cls, model_id=None, no_commit=False, **kwargs):
model = cls.get(model_id=model_id, **kwargs)
if model is None:
check_unique = kwargs.pop('check_unique', False)
model = cls.create(
no_commit=no_commit, check_unique=check_unique, **kwargs
)
with cls.db.session.no_autoflush:
if model not in cls.db.session:
model = cls.db.session.merge(model)
return model
@classmethod
def update(cls, model):
if isinstance(model, list):
models = model
else:
models = [model]
for model in models:
if not isinstance(model, cls.__model__):
raise TypeError(
'"model" must be of type'
f' {cls.__model__.__class__.__name__},'
f' not {model.__class__.__name__}'
)
if model in cls.db.session:
cls.db.session.expunge(model)
cls.db.session.add_all(models)
cls.commit()
@classmethod
def delete(cls, model_or_id, no_commit=False):
if isinstance(model_or_id, int):
model = cls.get(model_or_id)
elif isinstance(model_or_id, cls.__model__):
model = model_or_id
if model not in cls.db.session:
model = cls.db.session.merge(model)
else:
raise TypeError(
'"model_or_id" must be of type int'
f' or {cls.__model__.__class__.__name__},'
f' not {model_or_id.__class__.__name__}'
)
cls.db.session.delete(model)
if not no_commit and cls.auto_commit:
cls.commit()
def __init__(self, model, *args, **kwargs):
self.__instance = model
class PermissionsMixin:
@classmethod
def grant(cls, model, user, permission, no_commit=False):
if not hasattr(model, 'grant'):
raise TypeError(
f'{model.__class__.__qualname__} does not support permissions'
)
model.grant(user, permission)
cls.db.session.add(model)
if not no_commit and cls.auto_commit:
cls.commit()
@classmethod
def clear_permissions(cls, model, except_for=[]):
model._permissions = [ # Clear existing permissions
x for x in model._permissions if x.user_id in except_for
]
```
#### File: workbench/parsing/base.py
```python
import json
import os
try:
from ..processing.processor import TextProcessor
except ValueError:
from processing.processor import TextProcessor
try:
from ..utils import *
except ValueError:
from utils import *
__all__ = ['Parser']
with open(
os.path.join(os.path.dirname(os.path.abspath(__file__)), 'config.json')
) as f:
PARSER_CONFIG = json.load(f)
class Parser:
config = PARSER_CONFIG
_lemma_cache = {}
def __init__(self, logger=None):
if logger is None:
logger = make_logger(self.__class__.__name__)
self.logger = logger
def parse_file(self, filepath):
text = TextProcessor(self.logger).process_file(filepath)
if not text:
return None
words = text.xpath('//span[@class="word"]')
normed = {}
for w in words:
try:
normalized = norm_word(w.text)
except Exception:
import traceback
traceback.print_exc()
raise
normalized = None
if not normalized:
continue
normed.setdefault(normalized, []).append(w.attrib['id'])
lemmas = {}
total = len(list(normed))
for i, (normed, ids) in enumerate(normed.items(), start=1):
self.logger.debug(f'Parsing word {i}/{total} ...')
if normed not in self._lemma_cache:
lems = self.parse_word(normed)
self._lemma_cache[normed] = lems
lems = self._lemma_cache[normed]
for lem, parsings in lems.items():
lemma_info = lemmas.setdefault(lem, {}).setdefault(
normed, {}
)
lemma_info.setdefault('tokens', []).append(ids)
lemma_info.setdefault('parsings', []).extend(parsings)
return {
'formatted_text': text,
'lemma_mapping': lemmas
}
def parse_word(self, word):
raise NotImplementedError()
def normalize_parsing(self, parsing):
p = parsing.strip()
if not p:
return None
if p.startswith('ronoun'):
p = 'p' + p
notes = []
while '(' in p:
p = p.split('(', 1)
note = p[-1].split(')', 1)
p = (p[0] + ' ' + note[-1]).strip()
notes.append(note[0].strip())
parse = {}
for pos, mapped in self.config['parts_of_speech'].items():
if p.startswith(pos):
parse['pos'] = mapped
p = p[len(pos):].strip()
break
for x in p.split(' '):
x = x.strip()
for cat, mapping in self.config['categories'].items():
if x in mapping:
parse[cat] = mapping[x]
break
if ('pos' not in parse) and (x in self.config['parts_of_speech']):
parse['pos'] = self.config['parts_of_speech'][x]
if 'pos' not in parse:
if 'case' in parse:
if 'tense' in parse:
parse['pos'] = 'participle'
else:
parse['pos'] = 'noun'
elif 'tense' in parse:
parse['pos'] = 'verb'
if notes:
parse['notes'] = '; '.join(notes)
return parse
```
#### File: workbench/processing/utils.py
```python
import base64
from random import randint
def make_uid(n=64**8):
uid = base64.b64encode(bytes(str(randint(1, n)), encoding='ascii'))
return uid.decode('ascii')
```
#### File: workbench/scrapers/perseus_github.py
```python
from lxml import etree
from .base import WebScraper
__all__ = ['PerseusGithubClient']
class PerseusGithubClient(WebScraper):
base_url = 'https://raw.githubusercontent.com/PerseusDL'
def _load(self, urn, excerpt=None, filename=None):
urn = urn.lstrip('urn:cts:')
urn = urn.split(':')
urn_path = urn[1].split('.')
url = (
f'{self.base_url}/canonical-{urn[0]}/master/data'
f'/{urn_path[0]}/{urn_path[1]}/{urn[1]}.xml'
)
self.logger.debug(f'Loading {url}')
r = self.session.get(url)
parser = etree.XMLParser(remove_blank_text=True)
html = etree.fromstring(r.content, parser=parser)
body = html.findall(f'.//text//body')[0]
def get_child_text(block, rank=1):
ret = []
blocks = block.xpath(f'child::div{rank}')
if not blocks:
ret.append(' '.join(t for t in block.itertext()))
return ret
for b in blocks:
t = (b.attrib.get('type') or '').strip()
n = (b.attrib.get('n') or '').strip()
name = ' '.join([x for x in [t, n] if x]).strip().title()
child_txt = get_child_text(b, rank=(rank + 1))
if child_txt and name:
child_txt = [name] + child_txt
txt = '\n\n'.join(child_txt)
ret.append(txt)
return ret
text = get_child_text(body)
text = '\n\n'.join(text)
return text
```
#### File: workbench/utils/loggers.py
```python
import logging
import os
def make_logger(name, level=logging.INFO, filepath=None):
if isinstance(level, str):
try:
level = getattr(logging, level.upper())
except AttributeError:
level = logging.INFO
logger = logging.Logger(name)
f = logging.Formatter(
'[%(asctime)s] %(levelname)s: %(message)s'
)
h = logging.StreamHandler()
h.setLevel(level)
h.setFormatter(f)
logger.addHandler(h)
if filepath and os.path.isdir(os.path.dirname(filepath)):
h = logging.FileHandler(filename=filepath, encoding='utf-8')
h.setLevel(level)
h.setFormatter(f)
logger.addHandler(h)
return logger
``` |
{
"source": "josiahjohnston/PowerGenome",
"score": 3
} |
#### File: PowerGenome/powergenome/load_profiles.py
```python
import pandas as pd
import logging
from powergenome.util import reverse_dict_of_lists
logger = logging.getLogger(__name__)
def load_curves(
pudl_engine,
settings,
pudl_table="load_curves_epaipm",
settings_agg_key="region_aggregations",
):
# Settings has a dictionary of lists for regional aggregations. Need
# to reverse this to use in a map method.
region_agg_map = reverse_dict_of_lists(settings[settings_agg_key])
# IPM regions to keep. Regions not in this list will be dropped from the
# dataframe
keep_regions = [
x
for x in settings["model_regions"] + list(region_agg_map)
if x not in region_agg_map.values()
]
# I'd rather use a sql query and only pull the regions of interest but
# sqlalchemy doesn't allow table names to be parameterized.
logger.info("Loading load curves from PUDL")
load_curves = pd.read_sql_table(
pudl_table, pudl_engine, columns=["region_id_epaipm", "time_index", "load_mw"]
)
load_curves = load_curves.loc[load_curves.region_id_epaipm.isin(keep_regions)]
# Set a new column "region" to the old column values. Then replace values for any
# regions that are being aggregated
load_curves.loc[:, "region"] = load_curves.loc[:, "region_id_epaipm"]
load_curves.loc[
load_curves.region_id_epaipm.isin(region_agg_map.keys()), "region"
] = load_curves.loc[
load_curves.region_id_epaipm.isin(region_agg_map.keys()), "region_id_epaipm"
].map(
region_agg_map
)
logger.info("Aggregating load curves in grouped regions")
load_curves_agg = load_curves.groupby(["region", "time_index"]).sum()
lc_wide = load_curves_agg.unstack(level=0)
lc_wide.columns = lc_wide.columns.droplevel()
return lc_wide
``` |
{
"source": "josiah-keller/aoc-2020",
"score": 4
} |
#### File: aoc-2020/11/seats.py
```python
import argparse
class SeatSimulation:
FLOOR = '.'
SEAT = 'L'
OCCUPIED = '#'
def __init__(self, filename):
self.neighbor_tolerance = 4
self.load(filename)
self.initialize_state()
def load(self, filename):
with open(filename, 'r') as f:
self.layout = [[c for c in line if c != '\n'] for line in f.readlines()]
def copy_state(self, state):
return [row[:] for row in state]
def initialize_state(self):
self.state = self.copy_state(self.layout)
def print_state(self):
for row in self.state:
print(''.join(row))
print('\n')
def run_til_settled(self):
next_state = self.calc_next_state()
while next_state != self.state:
self.state = next_state
next_state = self.calc_next_state()
def count_occupied(self):
return sum([row.count(SeatSimulation.OCCUPIED) for row in self.state])
def count_occupied_neighbors(self, row_idx, seat_idx):
count = 0
for i in range(max(0, row_idx - 1), row_idx + 2):
for j in range(max(0, seat_idx - 1), seat_idx + 2):
try:
if i == row_idx and j == seat_idx: continue
if self.state[i][j] == SeatSimulation.OCCUPIED:
count += 1
except IndexError:
pass
return count
def calc_next_state(self):
new_state = self.copy_state(self.state)
for i in range(len(self.state)):
row = self.state[i]
for j in range(len(row)):
seat = row[j]
if seat == SeatSimulation.FLOOR: continue
if seat == SeatSimulation.SEAT:
if self.count_occupied_neighbors(i, j) == 0:
new_state[i][j] = SeatSimulation.OCCUPIED
if seat == SeatSimulation.OCCUPIED:
if self.count_occupied_neighbors(i, j) >= self.neighbor_tolerance:
new_state[i][j] = SeatSimulation.SEAT
return new_state
class LineOfSightSeatSimulation(SeatSimulation):
def __init__(self, filename):
super().__init__(filename)
self.neighbor_tolerance = 5
def trace_los(self, start_row, start_seat, row_delta, seat_delta):
row = start_row + row_delta
seat = start_seat + seat_delta
while row >= 0 and seat >= 0 and row < len(self.state) and seat < len(self.state[row]):
if self.state[row][seat] == SeatSimulation.OCCUPIED:
return True
elif self.state[row][seat] == SeatSimulation.SEAT:
return False
row += row_delta
seat += seat_delta
return False
def count_occupied_neighbors(self, row_idx, seat_idx):
count = 0
for row_delta in range(-1, 2):
for seat_delta in range(-1, 2):
if row_delta == 0 and seat_delta == 0: continue
if self.trace_los(row_idx, seat_idx, row_delta, seat_delta):
count += 1
return count
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('filename', help='Path to the file with the ferry seat layout')
args = parser.parse_args()
print('Simulation 1: tolerate up to 4 immediate neighbors')
simulation_1 = SeatSimulation(args.filename)
simulation_1.run_til_settled()
print('Occupied seats:', simulation_1.count_occupied())
print('Simulation 2: tolerate up to 5 neighbors within line of sight')
simulation_2 = LineOfSightSeatSimulation(args.filename)
simulation_2.run_til_settled()
print('Occupied seats:', simulation_2.count_occupied())
```
#### File: aoc-2020/19/grammar.py
```python
import argparse
class Grammar:
def __init__(self):
self.rules = {}
def add_rule(self, line):
(label, expr) = line.split(':')
if label in self.rules:
raise Exception('Duplicate rule: %s' % label)
self.rules[label] = expr.replace('\n', '')
def _match_impl(self, string, rule='0'):
if string == '':
return (True, 0, -1)
starting_alternation = 0
# ugly way of communicating backtrack back up the call stack
rule_parts = rule.split('!')
normalized_rule = rule_parts[0]
if len(rule_parts) == 2:
starting_alternation = int(rule_parts[1])
if not normalized_rule in self.rules:
raise Exception('Unknown rule: %s' % rule)
expr = self.rules[normalized_rule]
alternations = expr.split('|')
cursor = 0
alternation_idx = 0
while alternation_idx < len(alternations):
alternation = alternations[alternation_idx]
cursor = 0
tokens = alternation.strip().split(' ')
fail = False
for i in range(len(tokens)):
if alternation_idx < starting_alternation:
# this alternation already failed earlier - we're just catching back up!
fail = True
token = tokens[i]
if cursor == len(string):
# nothing left to match - fail out
fail = True
break
if token[0] == '"':
if string[cursor] == token[1]:
cursor += 1
else:
# character doesn't match - fail out
fail = True
break
else:
(match, offset, backtrack_alternation_idx) = self._match_impl(string[cursor:], token)
cursor += offset
if backtrack_alternation_idx > -1:
# the child rule might still have more alternations to try in case
# of backtracking. so we'll construct a new rule string that
# encodes the next alternation index for the child rule and add it
# to *our* alternations
new_tokens = tokens[:]
orig_token = token.split('!')[0]
new_tokens[i] = '%s!%d' % (orig_token, backtrack_alternation_idx)
alternations.append(' '.join(new_tokens))
if not match:
# child rule didn't match - fail out
fail = True
break
if not fail:
# match!
return (True, cursor, alternation_idx + 1)
alternation_idx += 1
# fail
return (False, cursor, -1)
def match(self, string, rule='0'):
(match, cursor, _) = self._match_impl(string, rule)
return match and cursor == len(string)
def read_grammar(f):
grammar = Grammar()
line = f.readline()
while line != '\n':
grammar.add_rule(line)
line = f.readline()
return grammar
def read_inputs(f):
inputs = []
line = f.readline()
while line:
inputs.append(line.replace('\n', ''))
line = f.readline()
return inputs
def read_input(filename):
with open(filename, 'r') as f:
grammar = read_grammar(f)
inputs = read_inputs(f)
return (grammar, inputs)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('filename', help='Path to input file')
parser.add_argument('-v', '--verbose', action='store_true', help='Print list of inputs that matched')
args = parser.parse_args()
(grammar, inputs) = read_input(args.filename)
matches = [grammar.match(input) for input in inputs]
print('Matches:', matches.count(True))
if args.verbose:
for i in range(len(inputs)):
if matches[i]: print(inputs[i])
```
#### File: aoc-2020/20/image.py
```python
import argparse, math
class Tile:
def __init__(self, tile_id, lines):
self.id = tile_id
self.pixels = [list(line) for line in lines]
self.calc_edges()
self.width = len(self.pixels)
def calc_edges(self):
self.top_edge = self.pixels[0][:]
self.bottom_edge = self.pixels[-1][:]
self.left_edge = [line[0] for line in self.pixels]
self.right_edge = [line[-1] for line in self.pixels]
def get_edges_list(self):
"""List of edges in order top, right, bottom, left"""
return [self.top_edge, self.right_edge, self.bottom_edge, self.left_edge]
def rotate_right(self):
new_pixels = [[None for x in range(self.width)] for y in range(self.width)]
for y in range(self.width):
for x in range(self.width):
new_pixels[x][self.width - 1 - y] = self.pixels[y][x]
self.pixels = new_pixels
self.calc_edges()
def flip_horizontal(self):
self.pixels = [line[::-1] for line in self.pixels]
self.calc_edges()
def flip_vertical(self):
self.pixels = self.pixels[::-1]
self.calc_edges()
def duplicate(self):
return Tile(self.id, self.pixels)
def __str__(self):
return 'Tile %d:\n%s' % (self.id, '\n'.join([''.join(line) for line in self.pixels]))
class Edge:
def __init__(self, tile, edge):
self.tile = tile
self.edge = edge
self.match = None
class TilesMap:
def __init__(self, tiles):
self.tiles = tiles
self.width = int(math.sqrt(len(self.tiles)))
self.map = [[None for x in range(self.width)] for y in range(self.width)]
def do_match(self, tile_a, tile_b):
for edge_a in tile_a.get_edges_list():
for edge_b in tile_b.get_edges_list():
if edge_a == edge_b:
return True
return False
def get_neighbor_candidates(self, tile):
return [candidate for candidate in self.tiles if self.do_match(candidate, tile)]
def arrange(self):
edges = []
for tile in self.tiles:
edges += [Edge(tile, edge) for edge in tile.get_edges_list()]
for i in range(len(edges)):
edge = edges[i]
if edge.match is not None:
continue
for j in range(i + 1, len(edges)):
other_edge = edges[j]
if edge.edge == other_edge.edge:
edge.match = other_edge
other_edge.match = edge
break
elif edge.edge == other_edge.edge[::-1]:
edge.match = other_edge
other_edge.match = edge
break
match_counts = {}
for edge in edges:
if edge.match is not None:
if not edge.tile.id in match_counts:
match_counts[edge.tile.id] = 1
else:
match_counts[edge.tile.id] += 1
corner_ids = [tile_id for (tile_id, count) in match_counts.items() if count == 2]
product = 1
for tile_id in corner_ids:
product *= tile_id
print('Product of corner tile IDs:', product)
def read_tiles(filename):
tiles = []
with open(filename, 'r') as f:
lines = []
for line in f:
line = line.replace('\n', '')
if 'Tile' in line:
tile_id = int(line.replace('Tile ', '').replace(':', ''))
elif line != '':
lines.append(line)
else:
tiles.append(Tile(tile_id, lines))
lines = []
if len(lines) > 0:
tiles.append(Tile(tile_id, lines))
return tiles
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('filename', help='Path to the file with image tiles')
args = parser.parse_args()
tiles = read_tiles(args.filename)
tm = TilesMap(tiles)
tm.arrange()
``` |
{
"source": "JosiahKerley/flags",
"score": 3
} |
#### File: flags/sysflags/cli.py
```python
import argparse
from .database import YamlDatabase as DB
from . import utils
def cli():
parser = argparse.ArgumentParser()
parser.add_argument('-S', '--scope', default='directory', help="flag scope")
parser.add_argument('-F', '--output-format', default='yaml', dest='format', help="output format")
parser.add_argument('-g', '--get', help="get a value")
parser.add_argument('-s', '--set', help="set a value")
parser.add_argument('-v', '--value', help="set a value")
parser.add_argument('-d', '--dump', action="store_true", help="dump the database")
args = parser.parse_args()
db = DB(scope=args.scope)
if args.get:
utils.print_formatted_message(db.get(query=args.get), format=args.format)
elif args.set:
utils.print_formatted_message(db.set(query=args.set, value=args.value), format=args.format)
elif args.dump:
utils.print_formatted_message(db.dump(), format=args.format)
```
#### File: flags/sysflags/database.py
```python
from .strategies import *
from . import utils
import os
import re
import sys
import dpath.util
import pickle
from pathlib import Path
from datetime import datetime
dpath.options.ALLOW_EMPTY_STRING_KEYS = True
class Database:
scope = None
namespace = None
snapshot = None
strategy = None
_file_extenstion = None
_database_file_subpath = '{namespace}/snapshot-{snapshot}.{file_extenstion}'
_datetime_fmt = "%Y-%m-%d_%H-%M-%S"
_system_scope_parent_dir = '/etc/flags'
_user_scope_parent_dir = f'{Path.home()}/.config/flags'
_directory_scope_parent_dir = f'{os.getcwd()}/.flags'
def __init__(self, scope='system', snapshot='current', namespace='default', strategy='MergeScopesTopToBottom'):
self.scope = scope
self.snapshot = snapshot
self.namespace = namespace
if strategy:
self.strategy = getattr(sys.modules[__name__], strategy)(cls=self, scope=scope, snapshot=snapshot, namespace=namespace)
self._setup()
def _get_database_filepath(self, subpath=None):
if not subpath:
subpath = self._database_file_subpath.format(
scope=self.scope,
namespace=self.namespace,
snapshot=self.snapshot,
file_extenstion=self._file_extenstion
)
if self.scope == 'system':
return f'{self._system_scope_parent_dir}/{subpath}'
elif self.scope == 'user':
return f'{self._user_scope_parent_dir}/{subpath}'
elif self.scope == 'directory':
return f'{self._directory_scope_parent_dir}/{subpath}'
else:
raise Exception(f'unknown scope {self.scope}')
def _get_database_filedir(self):
return os.path.dirname(self._get_database_filepath())
def _get_datetime_string_now(self):
return datetime.now().strftime(self._datetime_fmt)
def _list_snapshot_filepaths(self):
if os.path.exists(self._get_database_filedir()):
return [f for f in os.listdir(self._get_database_filedir()) if re.match(rf'snapshot-.*.{self._file_extenstion}', f)]
def _bootstrap_empty_file(self, filepath):
os.mknod(filepath)
return True
def _setup_dirs(self):
try:
if not os.path.exists(self._get_database_filedir()):
os.makedirs(self._get_database_filedir())
utils.eprint(f'Creating directory {self._get_database_filedir()}')
except:
utils.eprint(f'Cannot create directory {self._get_database_filedir()}')
def _setup_files(self):
if self.snapshot == 'current':
if not os.path.islink(self._get_database_filepath()) and os.path.exists(self._get_database_filedir()):
if not self._list_snapshot_filepaths():
subpath = self._database_file_subpath.format(
scope=self.scope,
namespace=self.namespace,
snapshot=self._get_datetime_string_now(),
file_extenstion=self._file_extenstion
)
empty_snapshot_filepath = self._get_database_filepath(subpath)
utils.eprint(f'Creating empty db file {empty_snapshot_filepath}')
assert self._bootstrap_empty_file(empty_snapshot_filepath)
os.symlink(empty_snapshot_filepath, self._get_database_filepath())
return True
else:
subpath = self._database_file_subpath.format(
scope=self.scope,
namespace=self.namespace,
snapshot=self.snapshot,
file_extenstion=self._file_extenstion
)
return os.path.isfile(self._get_database_filepath(subpath))
def _setup(self):
self._setup_dirs()
self._setup_files()
def _serialize(self, data: dict):
return pickle.dumps(data)
def _deserialize(self, datastr: str) -> dict:
return pickle.loads(datastr)
def _read_data(self):
with open(self._get_database_filepath(), 'r') as f:
return f.read()
def _write_data(self, data):
with open(self._get_database_filepath(), 'w') as f:
return f.write(data)
def _get_data(self):
if self.strategy:
return self.strategy.get_data()
else:
return self._deserialize(self._read_data())
def _set_data(self, data: dict):
return self._write_data(self._serialize(data))
def dump(self):
return self._get_data()
def get(self, query: str):
try:
return dpath.util.get(self._get_data(), query)
except:
return None
def search(self, query: str):
return dpath.util.search(self._get_data(), query)
def values(self, query: str):
return dpath.util.values(self._get_data(), query)
def set(self, query: str, value=None, writeback=True, recurse=True):
if recurse:
retval = dpath.util.new(self._get_data(), query, value)
else:
retval = dpath.util.set(self._get_data(), query, value)
if writeback:
self._set_data(retval)
return retval
class YamlDatabase(Database):
import yaml
_file_extenstion = 'yaml'
def _bootstrap_empty_file(self, filepath):
with open(filepath, 'w') as f:
return f.write('{}')
def _serialize(self, data: dict):
return self.yaml.dump(data)
def _deserialize(self, datastr: str) -> dict:
return self.yaml.load(datastr, Loader=self.yaml.FullLoader)
class JsonDatabase(Database):
import json
_file_extenstion = 'json'
def _bootstrap_empty_file(self, filepath):
with open(filepath, 'w') as f:
return f.write('{}')
def _serialize(self, data: dict):
return self.json.dumps(data, indent=2)
def _deserialize(self, datastr: str) -> dict:
return self.json.loads(datastr)
```
#### File: flags/sysflags/strategies.py
```python
import dpath.util
class Strategy:
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
self._setup()
def _setup(self):
pass
class MergeStrategy(Strategy):
def get_data(self):
return {}
def _setup(self):
self._cls = self.kwargs['cls'].__class__
self._scope = self.kwargs['scope']
self._snapshot = self.kwargs['snapshot']
self._namespace = self.kwargs['namespace']
self.system = self._cls(scope='system', snapshot=self._snapshot, namespace=self._namespace, strategy=None)
self.user = self._cls(scope='user', snapshot=self._snapshot, namespace=self._namespace, strategy=None)
self.directory = self._cls(scope='directory', snapshot=self._snapshot, namespace=self._namespace, strategy=None)
try:
self.system = self.system._get_data()
except:
self.system = {}
try:
self.user = self.user._get_data()
except:
self.user = {}
try:
self.directory = self.directory._get_data()
except:
self.directory = {}
class MergeScopesTopToBottom(MergeStrategy):
def get_data(self):
return dpath.util.merge(
self.system, dpath.util.merge(
self.user, self.directory
)
)
``` |
{
"source": "JosiahKerley/ovirt-ansible-dpdk-setup",
"score": 2
} |
#### File: ovirt-ansible-dpdk-setup/library/configure_kernel.py
```python
DOCUMENTATION = '''
---
short_description: Configures the kernel to be DPDK compatible.
author: "<NAME> (@leongold)"
description:
- "Module to configure the kernel to be DPDK compatible."
options:
pci_drivers:
description:
- "Dictionary of PCI address to device drivers. DPDK compatible
devices\drivers has their CPU's isolated."
required: true
'''
EXAMPLES = '''
- name: configure kernel
configure_kernel:
pci_drivers:
"0000:00:04.0": "vfio-pci"
'''
RETURN = '''
changed:
description: Describes whether any alterations to the kernel were made.
returned: On success.
type: boolean
sample: true
'''
import os
import subprocess
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.dpdk_setup_common import exec_cmd
from ansible.module_utils.dpdk_setup_common import get_cpu_list
from ansible.module_utils.dpdk_setup_common import DPDK_DRIVERS
class ReadKernelArgsError(Exception):
pass
class UpdateKernelError(Exception):
pass
class SelectCpuPartitioningError(Exception):
pass
def _get_default_kernel():
proc = subprocess.Popen(['grubby', '--default-kernel'],
stdout=subprocess.PIPE)
return proc.stdout.read().strip()
def _remove_stale_hugepages():
for arg in ('default_hugepagesz', 'hugepagesz', 'hugepages'):
while arg in _get_kernel_args():
proc = subprocess.Popen(
['grubby',
'--remove-args="{}"'.format(arg),
'--update-kernel',
_get_default_kernel()
]
)
out, err = proc.communicate()
if err:
raise UpdateKernelError(out)
def _1gb_hugepages_are_supported():
with open('/proc/cpuinfo') as f:
return 'pdpe1gb' in f.read()
def _add_hugepages(nr_2mb_hugepages, nr_1gb_hugepages, use_1gb_hugepages):
use_1gb_hugepages = _1gb_hugepages_are_supported() and use_1gb_hugepages
size = '1G' if use_1gb_hugepages else '2M'
amount = str(nr_1gb_hugepages) if use_1gb_hugepages \
else str(nr_2mb_hugepages)
if _current_hugepages(size, amount):
return False
_remove_stale_hugepages()
args = 'default_hugepagesz={} hugepagesz={} hugepages={}'.format(
size, size, amount
)
proc = subprocess.Popen(['grubby', '--args="{}"'.format(args),
'--update-kernel', _get_default_kernel()])
out, err = proc.communicate()
if err:
raise UpdateKernelError(out)
return True
def _change_isolated_cpus(cpu_list):
VARIABLES_FILE = '/etc/tuned/cpu-partitioning-variables.conf'
changed = False
has_isolated_cores = False
new_lines = []
with open(VARIABLES_FILE) as f:
lines = f.readlines()
for line in lines:
if line.startswith('isolated_cores'):
has_isolated_cores = True
required_line = 'isolated_cores={}'.format(cpu_list)
if line != required_line:
line = required_line
changed = True
new_lines.append(line)
if not has_isolated_cores:
new_lines.append('isolated_cores={}'.format(cpu_list))
changed = True
with open(VARIABLES_FILE, 'w') as f:
f.writelines(new_lines)
return changed
def _current_hugepages(size, amount):
kernel_args = _get_kernel_args()
args_list = kernel_args.split()
return all([
any([arg.startswith('hugepages=' + amount) for arg in args_list]),
any([arg.startswith('hugepagesz=' + size) for arg in args_list]),
any([arg.startswith('default_hugepagesz=' + size) for arg in args_list])
])
def _get_kernel_args():
rc, out, err = exec_cmd(['grubby', '--info', _get_default_kernel()])
if rc != 0:
raise ReadKernelArgsError(err)
return [l.split('=', 1)[1].strip('"')
for l in out.split('\n') if
l.startswith('args')][0]
def _select_cpu_partitioning(cpu_list):
profile = 'cpu-partitioning' if cpu_list else 'balanced'
rc, _, err = exec_cmd(['tuned-adm', 'profile', profile])
if rc != 0:
raise SelectCpuPartitioningError(err)
def _add_iommu():
if _is_iommu_set():
return False
rc, _, err = exec_cmd(['grubby', '--args=iommu=pt intel_iommu=on',
'--update-kernel={}'.format(_get_default_kernel())])
if rc != 0:
raise UpdateKernelError(err)
return True
def _is_iommu_set():
kernel_args = _get_kernel_args()
return 'iommu=pt' in kernel_args and 'intel_iommu=on' in kernel_args
def _configure_kernel(
pci_addresses, nr_2mb_hugepages, nr_1gb_hugepages, use_1gb_hugepages):
cpu_list = get_cpu_list(pci_addresses)
added_hugepages = _add_hugepages(
nr_2mb_hugepages, nr_1gb_hugepages, use_1gb_hugepages
)
changed_isolated_cpus = _change_isolated_cpus(cpu_list)
_select_cpu_partitioning(cpu_list)
added_iommu = _add_iommu()
return any([added_hugepages, changed_isolated_cpus, added_iommu])
def main():
module = AnsibleModule(
argument_spec=dict(
pci_drivers=dict(default=None, type='dict', required=True),
nr_2mb_hugepages=dict(default=None, type='int', required=True),
nr_1gb_hugepages=dict(default=None, type='int', required=True),
use_1gb_hugepages=dict(default=None, type='bool', required=True)
)
)
pci_drivers = module.params.get('pci_drivers')
pci_addresses = [addr for addr, driver in pci_drivers.iteritems()
if driver in DPDK_DRIVERS]
try:
changed = _configure_kernel(
pci_addresses,
module.params.get('nr_2mb_hugepages'),
module.params.get('nr_1gb_hugepages'),
module.params.get('use_1gb_hugepages')
)
except Exception as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
module.exit_json(changed=changed)
if __name__ == "__main__":
main()
``` |
{
"source": "JosiahKerley/python-httpsync",
"score": 2
} |
#### File: JosiahKerley/python-httpsync/setup.py
```python
import os
from setuptools import setup
from setuptools import find_packages
from pip.req import parse_requirements
def get_package_data(package):
walk = [(dirpath.replace(package + os.sep, '', 1), filenames)
for dirpath, dirnames, filenames in os.walk(package)
if not os.path.exists(os.path.join(dirpath, '__init__.py'))]
filepaths = []
for base, filenames in walk:
filepaths.extend([os.path.join(base, filename)
for filename in filenames])
return {package: filepaths}
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
install_reqs = parse_requirements('requirements.txt', session=False)
with open(os.path.join(os.path.dirname(__file__), 'README.md')) as file:
README = file.read()
requirements = [str(ir.req) for ir in install_reqs]
setup(
name = 'HTTPSync',
version = '1.0.1',
description = 'Tool to sync down files from a URL',
author = '<NAME>',
author_email = '<EMAIL>',
url = 'https://github.com/JosiahKerley/python-httpsync',
zip_safe = False,
install_requires = requirements,
packages=find_packages(),
package_data=get_package_data('httpsync'),
entry_points = {
"console_scripts": [
"httpsync = httpsync:run"
]
}
)
``` |
{
"source": "josiahkhor/pdfminer.six",
"score": 3
} |
#### File: pdfminer.six/tests/test_font_size.py
```python
from helpers import absolute_sample_path
from pdfminer.high_level import extract_pages
from pdfminer.layout import LTChar, LTTextBox
def test_font_size():
path = absolute_sample_path('font-size-test.pdf')
for page in extract_pages(path):
for text_box in page:
if isinstance(text_box, LTTextBox):
for line in text_box:
possible_number = line.get_text().strip()
if possible_number.isdigit():
expected_size = int(possible_number)
for char in line:
if isinstance(char, LTChar):
actual_size = int(round(char.size))
print(char, actual_size, expected_size)
assert expected_size == actual_size
else:
print(repr(line.get_text()))
``` |
{
"source": "josiahls/fast-reinforcement-learning",
"score": 3
} |
#### File: fast_rl/core/data_structures.py
```python
import numpy as np
class SumTree(object):
write = 0
def __init__(self, capacity):
"""
Used for PER.
References:
[1] SumTree implementation belongs to: https://github.com/rlcode/per
Notes:
As of 8/23/2019, does not have a license provided. As another note, this code is modified.
Args:
capacity:
"""
self.capacity = capacity
self.tree = np.zeros(2 * capacity - 1)
self.data = np.zeros(capacity, dtype=object)
self.n_entries = 0
def _propagate(self, idx, change):
""" Update to the root node """
parent = (idx - 1) // 2
self.tree[parent] += change
if (np.isscalar(parent) and parent != 0) or (not np.isscalar(parent) and all(parent != 0)):
if not np.isscalar(parent): change[parent == 0] = 0
self._propagate(parent, change)
def get_left(self, index):
return 2 * index + 1
def get_right(self, index):
return self.get_left(index) + 1
def _retrieve(self, idx, s):
""" Finds sample on leaf node """
left = self.get_left(idx)
right = self.get_right(idx)
if left >= len(self.tree):
return idx
if s <= self.tree[left]:
return self._retrieve(left, s)
else:
return self._retrieve(right, s - self.tree[left])
def total(self):
return self.tree[0]
def add(self, p, data):
""" Store priority and sample """
idx = self.write + self.capacity - 1
self.data[self.write] = data
self.update(idx, p)
self.write += 1
if self.write >= self.capacity:
self.write = 0
if self.n_entries < self.capacity:
self.n_entries += 1
def update(self, idx, p):
""" Update priority """
p = p.flatten() if not np.isscalar(p) else p
change = p - self.tree[idx]
self.tree[idx] = p
self._propagate(idx, change)
def get(self, s):
""" Get priority and sample """
idx = self._retrieve(0, s)
data_index = idx - self.capacity + 1
return idx, self.tree[idx], self.data[data_index]
def anneal_weights(self, priorities, beta):
sampling_probabilities = priorities / self.total()
is_weight = np.power(self.n_entries * sampling_probabilities, -beta)
is_weight /= is_weight.max()
return is_weight.astype(float)
def batch_get(self, ss):
return np.array(list(zip(*list([self.get(s) for s in ss if self.get(s)[2] != 0]))))
def print_tree(tree: SumTree):
print('\n')
if tree.n_entries == 0:
print('empty')
return
max_d = int(np.log2(len(tree.tree)))
string_len_max = len(str(tree.tree[-1]))
tree_strings = []
display_values = None
display_indexes = None
for layer in range(max_d + 1):
# Get the indexes in the current layer d
if display_indexes is None:
display_indexes = [[0]]
else:
local_list = []
for i in [_ for _ in display_indexes[-1] if _ < len(tree.tree)]:
if tree.get_left(i) < len(tree.tree): local_list.append(tree.get_left(i))
if tree.get_right(i) < len(tree.tree): local_list.append(tree.get_right(i))
display_indexes.append(local_list)
for layer in display_indexes:
# Get the v contained in current layer d
if display_values is None:
display_values = [[tree.tree[i] for i in layer]]
else:
display_values.append([tree.tree[i] for i in layer])
tab_sizes = []
spacings = []
for i, layer in enumerate(display_values):
# for now ignore string length
tab_sizes.append(0 if i == 0 else (tab_sizes[-1] + 1) * 2)
spacings.append(3 if i == 0 else (spacings[-1] * 2 + 1))
for i, layer in enumerate(display_values):
# tree_strings.append('*' * list(reversed(tab_sizes))[i])
values = ''.join(str(v) + ' ' * (string_len_max * list(reversed(spacings))[i]) for v in layer)
tree_strings.append(' ' * (string_len_max * list(reversed(tab_sizes))[i]) + values)
for tree_string in tree_strings:
print(tree_string)
```
#### File: fast-reinforcement-learning/tests/conftest.py
```python
import pytest
def pytest_addoption(parser):
parser.addoption("--include_performance_tests", action="store_true",
help="Will run the performance tests which do full model testing. This could take a few"
"days to fully accomplish.")
@pytest.fixture()
def include_performance_tests(pytestconfig):
return pytestconfig.getoption("include_performance_tests")
@pytest.fixture()
def skip_performance_check(include_performance_tests):
if not include_performance_tests:
pytest.skip('Skipping due to performance argument not specified. Add --include_performance_tests to not skip')
```
#### File: fast-reinforcement-learning/tests/test_basic_train.py
```python
import os
from fast_rl.agents.dqn import create_dqn_model, FixedTargetDQNModule, dqn_learner
from fast_rl.core.agent_core import ExperienceReplay, torch, GreedyEpsilon
from fast_rl.core.basic_train import load_learner
from fast_rl.core.data_block import MDPDataBunch
def test_fit():
data=MDPDataBunch.from_env('CartPole-v0', render='rgb_array', bs=5, max_steps=20, add_valid=False)
model=create_dqn_model(data, FixedTargetDQNModule, opt=torch.optim.RMSprop)
memory=ExperienceReplay(memory_size=1000, reduce_ram=True)
exploration_method=GreedyEpsilon(epsilon_start=1, epsilon_end=0.1, decay=0.001)
learner=dqn_learner(data=data, model=model, memory=memory, exploration_method=exploration_method)
learner.fit(2)
learner.fit(2)
learner.fit(2)
assert len(data.x.info)==6
assert 0 in data.x.info
assert 5 in data.x.info
def test_to_pickle():
data=MDPDataBunch.from_env('CartPole-v0', render='rgb_array', bs=5, max_steps=20, add_valid=False)
model=create_dqn_model(data, FixedTargetDQNModule, opt=torch.optim.RMSprop)
memory=ExperienceReplay(memory_size=1000, reduce_ram=True)
exploration_method=GreedyEpsilon(epsilon_start=1, epsilon_end=0.1, decay=0.001)
learner=dqn_learner(data=data, model=model, memory=memory, exploration_method=exploration_method)
learner.fit(2)
assert len(data.x.info)==2
assert 0 in data.x.info
assert 1 in data.x.info
data.to_pickle('./data/test_to_pickle')
assert os.path.exists('./data/test_to_pickle_CartPole-v0')
def test_from_pickle():
data=MDPDataBunch.from_pickle('./data/test_to_pickle_CartPole-v0')
model=create_dqn_model(data, FixedTargetDQNModule, opt=torch.optim.RMSprop)
memory=ExperienceReplay(memory_size=1000, reduce_ram=True)
exploration_method=GreedyEpsilon(epsilon_start=1, epsilon_end=0.1, decay=0.001)
learner=dqn_learner(data=data, model=model, memory=memory, exploration_method=exploration_method)
learner.fit(2)
assert len(data.x.info)==4
assert 0 in data.x.info
assert 3 in data.x.info
def test_export_learner():
data=MDPDataBunch.from_env('CartPole-v0', render='rgb_array', bs=5, max_steps=20, add_valid=False)
model=create_dqn_model(data, FixedTargetDQNModule, opt=torch.optim.RMSprop)
memory=ExperienceReplay(memory_size=1000, reduce_ram=True)
exploration_method=GreedyEpsilon(epsilon_start=1, epsilon_end=0.1, decay=0.001)
learner=dqn_learner(data=data, model=model, memory=memory, exploration_method=exploration_method)
learner.fit(2)
learner.export('test_export.pkl')#, pickle_data=True)
learner = load_learner(learner.path, 'test_export.pkl')
learner.fit(2)
```
#### File: fast-reinforcement-learning/tests/test_data_structures.py
```python
from fast_rl.core.data_structures import print_tree, SumTree
def test_sum_tree_with_max_size():
memory = SumTree(10)
values = [1, 1, 1, 1, 1, 1]
data = [f'data with priority: {i}' for i in values]
for element, value in zip(data, values):
memory.add(value, element)
print_tree(memory)
``` |
{
"source": "josiahls/fastrl",
"score": 2
} |
#### File: agents/dqn/core.py
```python
__all__ = ['DQN', 'ArgMaxFeed', 'DiscreteEpsilonRandomSelect', 'Epsilon', 'DQNTrainer']
# Cell
# Python native modules
import os
from collections import deque
# Third party libs
import torch
from torch.nn import *
from fastcore.all import *
from fastai.learner import *
from fastai.torch_basics import *
from fastai.torch_core import *
from fastai.callback.all import *
from torch.utils.tensorboard import SummaryWriter
# Local modules
from ...data.block import *
from ...data.gym import *
from ...agent import *
from ...core import *
from ...memory.experience_replay import *
# Cell
class DQN(Module):
def __init__(self,state_sz:int,action_sz:int,hidden=512):
self.layers=Sequential(
Linear(state_sz,hidden),
ReLU(),
Linear(hidden,action_sz),
)
def forward(self,x): return self.layers(x)
# Cell
class ArgMaxFeed(AgentCallback):
def before_action(self):
raw_action=self.agent.model(self.experience['state'].to(default_device()))
self.agent.raw_action_shape=raw_action.shape
self.agent.action=torch.argmax(raw_action,dim=1).reshape(-1,1)
class DiscreteEpsilonRandomSelect(AgentCallback):
def __init__(self,idx=0,min_epsilon=0.2,max_epsilon=1,max_steps=5000):
store_attr()
self.epsilon=max_epsilon
def before_noise(self):
# Temporarily commenting this out to see if the random action selection is the problem.
# Right now the issue is that we are not getting a lot of terminal examples early enough.
# This is causing the loss to go crazy massive.
# self.mask=torch.randn(size=(self.agent.action.shape[0],))<self.epsilon
# self.experience['randomly_selected']=self.mask.reshape(-1,1)
self.experience['epsilon']=torch.full(self.agent.action.shape,self.epsilon)
self.experience['orignal_actions']=self.agent.action.detach().clone()
# self.agent.action[self.mask]=self.agent.action[self.mask].random_(0,self.agent.raw_action_shape[1])
# self.agent.action=self.agent.action.detach().cpu().numpy()
mask = np.random.random(size=self.action.shape[0]) < self.epsilon
rand_actions = np.random.choice(len(self.action.shape), sum(mask))
actions=self.agent.action.cpu().detach().numpy().reshape((-1,))
actions[mask] = rand_actions
self.agent.action=Tensor(actions).long().reshape(-1,1)
if self.agent.model.training:
self.idx+=1
self.epsilon=max(self.min_epsilon,self.max_epsilon-self.idx/self.max_steps)
# Cell
class Epsilon(Metric):
order=30
epsilon=0
counter=0
def __init__(self,writer:SummaryWriter=None): store_attr()
@property
def value(self): return self.epsilon
def reset(self): self.epsilon=0
def accumulate(self,learn):
for cb in learn.model.cbs:
if type(cb)==DiscreteEpsilonRandomSelect:
self.epsilon=cb.epsilon
self.counter+=1
if self.writer is not None: self.writer.add_scalar('epsilon',self.epsilon,self.counter)
# Cell
class DQNTrainer(Callback):
"Performs traditional training on `next_q`. Requires a callback such as `RegularNextQ`"
def __init__(self,discount=0.99,n_steps=1):
store_attr()
self._xb=None
self.n_batch=0
def after_pred(self):
self.learn.yb=self.xb
# self.learn.xb=self.xb
self._xb=(self.xb,)
self.learn.done_mask=self.xb['done'].reshape(-1,)
self.learn.next_q=self.learn.model.model(self.xb['next_state']).max(dim=1).values.reshape(-1,1)
self.learn.next_q[self.done_mask]=0 #xb[done_mask]['reward']
self.learn.targets=self.xb['reward']+self.learn.next_q*(self.discount**self.n_steps)
self.learn.pred=self.learn.model.model(self.xb['state'])
t_q=self.pred.clone()
t_q.scatter_(1,self.xb['action'],self.targets)
# finalize the xb and yb
self.learn.yb=(t_q,)
# if (self.n_batch-1)%500==0:
# print('The loss should be practically zero: ',self.loss)
# print(self.learn.pred-t_q)
with torch.no_grad():
self.learn.td_error=(self.pred-self.yb[0]).mean(dim=1).reshape(-1,1)**2
def before_backward(self):
self.n_batch+=1
self.learn.xb=self._xb
```
#### File: fastrl/memory/tensorboard.py
```python
__all__ = ['create_event_accumulator', 'diff_tags', 'scalars2value', 'scalar2line', 'images2animation',
'figures_to_html', 'create_experience_replay_fig']
# Cell
# Python native modules
import os
from fnmatch import fnmatch
from warnings import warn
# Third party libs
from PIL import Image
from IPython.display import HTML
from io import BytesIO
import plotly.express as px
import numpy as np
import pandas as pd
from fastcore.all import *
from tensorboard.backend.event_processing import event_accumulator
# Local modules
from ..core import *
# Cell
def create_event_accumulator(
p:Optional[Path]=None, # Path or in-complete path to a tensorboard event directory. ref `pattern`
pattern:str='*', # Will filter paths at `cwd` if `p` is None, or filter at `p`
verbose:bool=False,
images=None, # Reference EventAccumulator `size_guidance`
scalars=None, # Reference EventAccumulator `size_guidance`
audio=None, # Reference EventAccumulator `size_guidance`
histograms=None, # Reference EventAccumulator `size_guidance`
compressed_histograms=None, # Reference EventAccumulator `size_guidance`
)->event_accumulator.EventAccumulator:
"Creates an event accumulator at the first directory at `p`."
p=ifnone(p,Path.cwd())
events=L([o for o in p.ls() if fnmatch(str(o),pattern)]).sorted()
if not events:
raise Exception(f'No events were found at {p}'+('' if pattern is None else f' and pattern {pattern}'))
if len(events)>1 and verbose:
warn(f'Got {len(events)} events. Using the most recent one.')
if verbose:
print('Using: ',str(events[-1]))
ea=event_accumulator.EventAccumulator(str(events[-1]),
size_guidance={
event_accumulator.COMPRESSED_HISTOGRAMS: ifnone(compressed_histograms,500),
event_accumulator.IMAGES: ifnone(images,100),
event_accumulator.AUDIO: ifnone(audio,4),
event_accumulator.SCALARS: ifnone(scalars,0),
event_accumulator.HISTOGRAMS: ifnone(histograms,1),
})
ea.Reload()
return ea
# Cell
def diff_tags(a,b):
return ''.join(set(a.split('/'))-set(b.split('/')))
# Cell
def scalars2value(scalars:list): return list(map(lambda o:o.value,scalars))
def scalar2line(
tag_pattern:str, # Tags that will match this pattern to be displayed.
ea:event_accumulator.EventAccumulator, # The event accumulator to load the images
start:int=0, # Where in the list of images to start loading
end:Optional[int]=None, # Optionally where to stop loading images
step:int=1 # Number of steps betwee image reads
):
tags=[t for t in ea.Tags()['scalars'] if fnmatch(t,tag_pattern)]
if not tags: warn(f'There are not tags with pattern {tag_pattern}, there are: \n{ea.Tags()["scalars"]}')
# slice the scalars
sliced={t:scalars2value(ea.Scalars(t)[start:end:step]) for t in tags}
# Create the data frame
df=pd.DataFrame(
data={'values':np.array(list(sliced.values())).reshape(-1,),
'tags':np.array([[diff_tags(k,tag_pattern)]*len(v)
for k,v in sliced.items()]).reshape(-1,),
'steps':np.array([np.arange(len(v)) for v in sliced.values()]).reshape(-1,)}
)
value_array=np.array(list(sliced.values()))
return px.line(data_frame=df,x='steps',y='values',animation_frame="tags",
title=tag_pattern,
range_y=[value_array.min(),value_array.max()])
# Cell
def _td_gif2np(img):
return np.array(Image.open(BytesIO(img.encoded_image_string)).convert('RGB')).copy()
def images2animation(
tag_pattern:str, # Tags that will match this pattern to be displayed.
ea:event_accumulator.EventAccumulator, # The event accumulator to load the images
start:int=0, # Where in the list of images to start loading
end:Optional[int]=None, # Optionally where to stop loading images
step:int=1 # Number of steps betwee image reads
):
tags=[t for t in ea.Tags()['images'] if fnmatch(t,tag_pattern)]
if not tags:
warn(f'There are no images with pattern {tag_pattern} given tags: '+'\n'.join(ea.Tags()['images']))
return None
imgs=ea.Images(tags[-1])[start:end:step]
if not imgs:
warn(f'There are no images in {tags[-1]}')
return None
np_imgs=[_td_gif2np(o) for o in imgs]
return px.imshow(np.array(np_imgs),animation_frame=0)
# Cell
def figures_to_html(*figs):
html="<html><head></head><body>" + "\n"
for fig in figs: html+=fig.to_html().split('<body>')[1].split('</body>')[0]
html+="</body></html>" + "\n"
display(HTML(html))
# Cell
@delegates(create_event_accumulator)
def create_experience_replay_fig(
tag_pattern_expected_reward:Union[str,list]='experience_replay/*/expected_reward/action_dim_0', # Tags about expected reward
tag_pattern_td_error:str='experience_replay/*/td_error', # Tags about td_error
tag_pattern_images:str='*', # Tags about images.
start:int=0, # Where in the list of images to start loading
end:Optional[int]=None, # Optionally where to stop loading images
step:int=1, # Number of steps betwee image reads
**kwargs):
ea=create_event_accumulator(**kwargs)
common_args=dict(start=start,end=end,step=step)
if isinstance(tag_pattern_expected_reward,str):
expected_reward_line=(scalar2line(tag_pattern_expected_reward,ea=ea,**common_args),)
else:
expected_reward_line=tuple(scalar2line(tper,ea=ea,**common_args)
for tper in tag_pattern_expected_reward)
td_error_line=scalar2line(tag_pattern_td_error,ea=ea,**common_args)
if tag_pattern_images is not None:
images_animation=images2animation(tag_pattern_images,ea=ea,**common_args)
return figures_to_html(
td_error_line,
images_animation,
*expected_reward_line
)
else:
return figures_to_html(
td_error_line,
*expected_reward_line
)
``` |
{
"source": "josiahls/Lowes-Product-Classifier",
"score": 3
} |
#### File: lowes-product-classifier/models/helpers.py
```python
import os
from pathlib import Path
def get_absolute_data_path(data_workspace: str = 'data', parent_up_limit=2, sub_folder_name: str = None):
"""
Gets the absolute path for the 'data' directory.
***Note that this assumes the script is in the lowes-product-classifier***
:param sub_folder_name: A sub folder name. It is the user's responsibility if the sub folder has separators such as
'/' or '\'. Please use os.sep if this is the case.
:param data_workspace: Name of the workspace. Default is 'data'
:param parent_up_limit: The number of upper folders to look through to find the directory
:return: The absolute path to the workspace. IE a string like:
/Users/jlaivins/PycharmProjects/Lowes-Product-Classifier/lowes-product-classifier/data/
"""
absolute_path = ''
for i in range(-1, parent_up_limit):
if i == -1:
curr = str(Path().absolute())
else:
curr = str(Path().absolute().parents[i])
if data_workspace in os.listdir(curr):
absolute_path = curr + os.sep + data_workspace
break
# If the user specifies a sub folder, add it
if sub_folder_name is not None:
absolute_path += sub_folder_name
return absolute_path
``` |
{
"source": "JosiahMg/pyswan",
"score": 3
} |
#### File: pyswan/pyswan/extract_cpca.py
```python
from pyswan.cpca import transform
class GenPlace:
@staticmethod
def parse(message):
df_message = transform([message.target])
for d in df_message.index:
ele = {}
ele['type'] = 'place'
ele['province'] = df_message.loc[d]['province']
ele['city'] = df_message.loc[d]['city']
ele['area'] = df_message.loc[d]['area']
ele['code'] = df_message.loc[d]['adcode']
ele['address'] = df_message.loc[d]['address']
message.extracts.append(ele)
return message
```
#### File: pyswan/pyswan/message.py
```python
class Message:
def __init__(self, target):
self.origin = target # origin input
self.target = target # current output
self.history = [] # store history
self.time_keywords = [] # extracte datetime
self.extracts = []
def insert(self, **kwargs):
element = {}
for pro, info in kwargs.items():
element[pro] = info
self.history.append(element)
def merge(self):
pass
# 过滤掉self.extracts中的filter属性
def get_extracts(self, filter=['pattern']):
res = []
for extract in self.extracts:
tmp = {}
for key, value in extract.items():
if key not in filter:
tmp[key] = value
res.append(tmp)
return res
```
#### File: pyswan/pyswan/numeral.py
```python
import regex as re
from pyswan.constants import zh2arab_tab
from pyswan.message import Message
class ExtractNumeral:
"""
将中文中的数字转换成阿拉伯数字, 如:
两百零三万五千九 -> 2035900
周日 -> 周7
"""
@staticmethod
def simple_zh2arab(message):
'''
简单的替换中文的数字为阿拉伯数字,如:
十二万八千零五 -> 十2万8千05
:param target: 待转换的输入
:return:
'''
# 处理普通的数字
pattern = re.compile("[零一二两三四五六七八九]")
match = pattern.finditer(message.target)
for m in match:
value = str(zh2arab_tab.get(m.group()))
message.insert(start=m.start(), end=m.end(),
body=m.group(), value=value, pattern=pattern)
message.target = pattern.sub(value, message.target, 1)
# 处理周末 周日 周天
pattern = re.compile("(?<=(周|星期))[末日天]")
match = pattern.finditer(message.target)
for m in match:
value = str(zh2arab_tab.get(m.group()))
message.insert(start=m.span()[0], end=m.end(
), body=m.group(), value=value, pattern=pattern)
message.target = pattern.sub(value, message.target, 1)
return message
@staticmethod
def __single_hundreds_digit_arab(message):
'''
处理如: 1百2 -> 120
:param target:
:return:
'''
pattern = re.compile("[1-9]百[1-9](?!十)")
match = pattern.finditer(message.target)
for m in match:
group = m.group()
s = group.split("百")
s = list(filter(None, s))
num = 0
if len(s) == 2:
num += int(s[0]) * 100 + int(s[1]) * 10
message.insert(start=m.span()[0], end=m.end(
), body=m.group(), value=str(num), pattern=pattern)
message.target = pattern.sub(str(num), message.target, 1)
return message
@staticmethod
def __single_thousands_digit_arab(message):
'''
处理如: 1千2 -> 1200
:param target:
:return:
'''
pattern = re.compile("[1-9]千[1-9](?!(百|十))")
match = pattern.finditer(message.target)
for m in match:
group = m.group()
s = group.split("千")
s = list(filter(None, s))
num = 0
if len(s) == 2:
num += int(s[0]) * 1000 + int(s[1]) * 100
message.insert(start=m.start(), end=m.end(),
body=m.group(), value=str(num), pattern=pattern)
message.target = pattern.sub(str(num), message.target, 1)
return message
@staticmethod
def __single_ten_thousands_digit_arab(message):
pattern = re.compile("[1-9]万[1-9](?!(千|百|十))")
match = pattern.finditer(message.target)
for m in match:
group = m.group()
s = group.split("万")
s = list(filter(None, s))
num = 0
if len(s) == 2:
num += int(s[0]) * 10000 + int(s[1]) * 1000
message.insert(start=m.start(), end=m.end(),
body=m.group(), value=str(num), pattern=pattern)
message.target = pattern.sub(str(num), message.target, 1)
return message
@staticmethod
def __tens_digit_arab(message):
'''
需要simple_zh2arab处理后的数据
处理十位数,如 十2 -> 12 3十5 -> 35
:param target: 待转换的输入
:return:
'''
pattern = re.compile("0?[0-9]?十[0-9]?")
match = pattern.finditer(message.target)
for m in match:
group = m.group()
s = group.split("十")
num = 0
if s[0] == '':
num += 10
else:
num += int(s[0])*10
if s[1]:
num += int(s[1])
message.insert(start=m.start(), end=m.end(),
body=m.group(), value=str(num), pattern=pattern)
message.target = pattern.sub(str(num), message.target, 1)
return message
@staticmethod
def __hundreds_digit_arab(message):
'''
需要simple_zh2arab和tens_digit_arab处理后的数据
处理百位数,如 2百25
:param target: 待转换的输入
:return:
'''
pattern = re.compile("0?[1-9]百[0-9]?[0-9]?")
match = pattern.finditer(message.target)
for m in match:
group = m.group()
s = group.split("百")
s = list(filter(None, s))
num = 0
if len(s) == 1:
hundred = int(s[0])
num += hundred * 100
elif len(s) == 2:
hundred = int(s[0])
num += hundred * 100
num += int(s[1])
message.insert(start=m.start(), end=m.end(),
body=m.group(), value=str(num), pattern=pattern)
message.target = pattern.sub(str(num), message.target, 1)
return message
@staticmethod
def __thousands_digit_arab(message):
pattern = re.compile("0?[1-9]千[0-9]?[0-9]?[0-9]?")
match = pattern.finditer(message.target)
for m in match:
group = m.group()
s = group.split("千")
s = list(filter(None, s))
num = 0
if len(s) == 1:
thousand = int(s[0])
num += thousand * 1000
elif len(s) == 2:
thousand = int(s[0])
num += thousand * 1000
num += int(s[1])
message.insert(start=m.start(), end=m.end(),
body=m.group(), value=str(num), pattern=pattern)
message.target = pattern.sub(str(num), message.target, 1)
return message
@staticmethod
def __ten_thousands_digit_arab(message):
"""
thousands_digit_arab执行之后
:param target:
:return:
"""
pattern = re.compile("[0-9]+万[0-9]?[0-9]?[0-9]?[0-9]?")
match = pattern.finditer(message.target)
for m in match:
group = m.group()
s = group.split("万")
s = list(filter(None, s))
num = 0
if len(s) == 1:
tenthousand = int(s[0])
num += tenthousand * 10000
elif len(s) == 2:
tenthousand = int(s[0])
num += tenthousand * 10000
num += int(s[1])
message.insert(start=m.start(), end=m.end(),
body=m.group(), value=str(num), pattern=pattern)
message.target = pattern.sub(str(num), message.target, 1)
return message
@classmethod
def digitize(cls, target):
message = Message(target)
message = cls.simple_zh2arab(message) # 处理如"十二万八千零五 -> 十2万8千05"
message = cls.__single_ten_thousands_digit_arab(message) # 处理如"2万3"
message = cls.__single_thousands_digit_arab(message) # 处理如"2千3"
message = cls.__single_hundreds_digit_arab(message) # 处理如"2百3"
message = cls.__tens_digit_arab(message) # 处理如"2十3"
message = cls.__hundreds_digit_arab(message) # 处理 "5百35"
message = cls.__thousands_digit_arab(message) # 处理 "1千456"
message = cls.__ten_thousands_digit_arab(message) # 处理 "7万2337"
return message
if __name__ == '__main__':
res = ExtractNumeral.digitize('梁山一百零八好汉')
print(res.target)
```
#### File: pyswan/pyswan/parser.py
```python
from pyswan.numeral import ExtractNumeral
from pyswan.extract_time import GenDatetime
from pyswan.extract_number import GenNumber
from pyswan.extract_math_equation import GenMathEquation
from pyswan.extract_cpca import GenPlace
def digitize(target):
return ExtractNumeral.digitize(target).target
def parse(target, dim):
"""
:param target: input data
:param dim:
:return:
"""
supported = ['time', 'number', 'equation', 'place']
to_support = ['temperature', 'ordinal', 'distance', 'volume',
'amount-of-money', 'duration', 'email', 'url', 'phone-number']
if not isinstance(dim, list):
print('dim must be of type list')
return []
if len(dim) == 0:
return []
# 数字化处理 中文数字 -> 阿拉伯数字
message = ExtractNumeral.digitize(target)
for d in dim:
if d not in supported:
continue
if d == 'time':
message = GenDatetime().parse(message)
if d == 'number':
message = GenNumber().parse(message)
if d == 'equation':
message = GenMathEquation().parse(message)
if d == 'place':
message = GenPlace().parse(message)
res = message.get_extracts(filter=['pattern'])
return res
if __name__ == '__main__':
from pprint import pprint
pprint(digitize('梁山一百零八好汉'))
pprint(parse('现在是十二月13日12点50分', dim=['time', 'number']))
pprint(parse('六加十三除以2再乘八等于多少', dim=['equation']))
pprint(parse('徐汇区虹漕路461号五十八号楼五楼', dim=['place']))
``` |
{
"source": "JosiahWI/AutoArch",
"score": 2
} |
#### File: JosiahWI/AutoArch/main.py
```python
import discord
from discord.ext import commands
import cogs
import settings
import random
bot = commands.Bot(command_prefix='=')
bot.add_cog(cogs.Moderation(bot))
bot.add_cog(cogs.Fun(bot))
bot.add_cog(cogs.Utility(bot))
@bot.event
async def on_ready():
activity = discord.Activity(name=bot.command_prefix+'help', type=discord.ActivityType.watching)
await bot.change_presence(activity = activity)
print('Logged in as '+str(bot.user)+'\n---\nGuilds: '+str(len(bot.guilds)))
@bot.event
async def on_command_error(ctx, error):
if isinstance(error, discord.ext.commands.MissingPermissions):
await ctx.send("You don't have the required permissions to use that command.\nMissing permission(s): `"+', '.join(error.missing_perms)+'`.')
if isinstance(error, discord.ext.commands.BotMissingPermissions):
await ctx.send("I don't have permission to do that here!\nAsk a server owner to give me the `"+', '.join(error.missing_perms)+"` permission(s).")
if isinstance(error, discord.ext.commands.MissingRequiredArgument):
await ctx.send("Looks like you're missing a required argument there.")
if isinstance(error, discord.ext.commands.BadArgument):
await ctx.send("Invalid argument(s) provided.")
print(str(error))
class AutoArchHelpCommand(commands.MinimalHelpCommand):
def get_command_signature(self, command):
return '{0.clean_prefix}{1.qualified_name} {1.signature}'.format(self, command)
async def send_bot_help(self, mapping):
ctx = self.context
help_embed = discord.Embed(
title = 'Commands',
description = self.get_opening_note(),
colour = discord.Colour.gold()
)
help_embed.set_footer(text = random.choice(settings.quotes_short))
help_embed.set_author(name=str(ctx.author), icon_url=ctx.author.avatar_url)
for cog in mapping.keys():
name = 'Other'
if cog != None:
name = cog.qualified_name
if hasattr(cog, 'custom_icon'):
name = cog.custom_icon + ' ' + name
help_embed.add_field(name = name, value = ', '.join([command.name for command in mapping[cog]]))
await self.get_destination().send(embed = help_embed)
async def send_command_help(self, command):
ctx = self.context
help_embed = discord.Embed(
title = self.get_command_signature(command),
description = command.help,
colour = discord.Colour.gold()
)
help_embed.set_footer(text = random.choice(settings.quotes_short))
help_embed.set_author(name=str(ctx.author), icon_url=ctx.author.avatar_url)
await self.get_destination().send(embed = help_embed)
async def send_cog_help(self, cog):
ctx = self.context
commands = ', '.join([command.qualified_name for command in cog.get_commands()])
help_embed = discord.Embed(
title = cog.qualified_name,
description = commands,
colour = discord.Colour.gold()
)
help_embed.set_footer(text = random.choice(settings.quotes_short))
help_embed.set_author(name=str(ctx.author), icon_url=ctx.author.avatar_url)
await self.get_destination().send(embed = help_embed)
async def send_group_help(self, group):
ctx = self.context
commands = ', '.join([command.qualified_name for command in group.commands])
help_embed = discord.Embed(
title = group.qualified_name + ' Subcommands',
description = commands,
colour = discord.Colour.gold()
)
help_embed.set_footer(text = random.choice(settings.quotes_short))
help_embed.set_author(name=str(ctx.author), icon_url=ctx.author.avatar_url)
await self.get_destination().send(embed = help_embed)
@bot.command(help = 'Link to the bot\'s GitHub repo.')
async def source(ctx):
source_embed = discord.Embed(
title = 'Bot Source',
description = 'The full source code is available at\n'+settings.source+'\nand is licensed under '+settings.license+'.',
colour = discord.Colour.gold()
)
source_embed.set_thumbnail(url = ctx.bot.user.avatar_url)
source_embed.set_footer(text = random.choice(settings.quotes_short))
source_embed.set_author(name=str(ctx.author), icon_url=ctx.author.avatar_url)
await ctx.send(embed = source_embed)
bot.help_command = AutoArchHelpCommand()
bot.run(settings.token)
``` |
{
"source": "josiahwitheford/fs.smbfs",
"score": 2
} |
#### File: fs.smbfs/tests/test_smbfs.py
```python
from __future__ import absolute_import
from __future__ import unicode_literals
import io
import shutil
import sys
import tempfile
import time
import unittest
import uuid
import smb.base
import fs.errors
import fs.path
import fs.test
from fs.enums import ResourceType
from fs.subfs import ClosingSubFS
from fs.smbfs import SMBFS
from . import utils
from .utils import mock
@unittest.skipUnless(utils.DOCKER, "docker service unreachable.")
class TestSMBFS(fs.test.FSTestCases, unittest.TestCase):
def make_fs(self):
self.dir = fs.path.join('data', uuid.uuid4().hex)
self.smbfs = fs.open_fs('smb://rio:[email protected]/')
self.smbfs.makedirs(self.dir, recreate=True)
return self.smbfs.opendir(self.dir, factory=ClosingSubFS)
@unittest.skip("the filesystem is not case sensitive")
def test_case_sensitive(self):
super(TestSMBFS, self).test_case_sensitive()
def test_connection_error(self):
with utils.mock.patch('fs.smbfs.smbfs.SMBFS.NETBIOS') as n:
n.queryIPForName = utils.mock.MagicMock(return_value = ("TE"))
self.assertRaises(
fs.errors.CreateFailed,
fs.open_fs, 'smb://8.8.8.8?timeout=1'
)
def test_write_denied(self):
_fs = fs.open_fs('smb://127.0.0.1/data')
self.assertRaises(
fs.errors.PermissionDenied,
_fs.openbin, '/test.txt', 'w'
)
def test_openbin_root(self):
_fs = fs.open_fs('smb://rio:[email protected]/')
self.assertRaises(
fs.errors.ResourceNotFound,
_fs.openbin, '/abc'
)
self.assertRaises(
fs.errors.PermissionDenied,
_fs.openbin, '/abc', 'w'
)
def test_openbin_error(self):
self.fs.touch("abc")
with mock.patch.object(self.smbfs, "_new_connection", side_effect=IOError):
self.assertRaises(fs.errors.OperationFailed, self.fs.openbin, "abc")
def test_makedir_root(self):
_fs = fs.open_fs('smb://rio:[email protected]/')
self.assertRaises(
fs.errors.PermissionDenied,
_fs.makedir, '/abc'
)
def test_removedir_root(self):
_fs = fs.open_fs('smb://rio:[email protected]/')
scandir = utils.mock.MagicMock(return_value=iter([]))
with utils.mock.patch.object(_fs, 'scandir', scandir):
self.assertRaises(
fs.errors.PermissionDenied,
_fs.removedir, '/data'
)
def test_seek(self):
self.fs.writetext('foo.txt', 'Hello, World !')
with self.fs.openbin('foo.txt') as handle:
self.assertRaises(ValueError, handle.seek, -2, 0)
self.assertRaises(ValueError, handle.seek, 2, 2)
self.assertRaises(ValueError, handle.seek, -2, 12)
self.assertEqual(handle.seek(2, 1), 2)
self.assertEqual(handle.seek(-1, 1), 1)
self.assertEqual(handle.seek(-2, 1), 0)
self.fs.remove('foo.txt')
def test_makedir(self):
super(TestSMBFS, self).test_makedir()
self.fs.touch('abc')
self.assertRaises(
fs.errors.DirectoryExpected,
self.fs.makedir, '/abc/def'
)
self.assertRaises(
fs.errors.ResourceNotFound,
self.fs.makedir, '/spam/bar'
)
self.assertRaises(
fs.errors.DirectoryExists,
self.fs.delegate_fs().makedir, '/'
)
self.assertRaises(
fs.errors.DirectoryExists,
self.fs.delegate_fs().makedir, 'data'
)
def test_move(self):
super(TestSMBFS, self).test_move()
self.fs.touch('a')
self.fs.touch('b')
self.assertRaises(
fs.errors.DirectoryExpected,
self.fs.move, 'a', 'b/a'
)
self.assertRaises(
fs.errors.DestinationExists,
self.fs.delegate_fs().move,
fs.path.join(self.dir, 'a'),
fs.path.join(self.dir, 'b'),
)
def test_openbin(self):
super(TestSMBFS, self).test_openbin()
self.fs.makedir('spam')
self.assertRaises(
fs.errors.FileExpected,
self.fs.openbin, 'spam'
)
self.fs.touch('abc.txt')
self.assertRaises(
fs.errors.DirectoryExpected,
self.fs.openbin, 'abc.txt/def.txt', 'w'
)
def test_removedir(self):
super(TestSMBFS, self).test_removedir()
self.assertRaises(
fs.errors.RemoveRootError,
self.fs.delegate_fs().removedir, '/'
)
def test_scanshares(self):
share = next(self.fs.delegate_fs().scandir('/', ['basic', 'access']))
self.assertEqual(share.name, 'data')
#self.assertEqual(share.get('access', 'uid'), "S-1-5-21-708263368-3365369569-291063048-1000")
self.assertTrue(share.get('access', 'uid').startswith("S-1-5-21"))
def test_getinfo_root(self):
self.assertEqual(self.fs.delegate_fs().gettype('/'), ResourceType.directory)
self.assertEqual(self.fs.delegate_fs().getsize('/'), 0)
def test_info_access_smb1(self):
self.fs.writetext('test.txt', 'This is a test')
_smb = self.fs.delegate_fs()._smb
with utils.mock.patch.object(_smb, '_getSecurity', new=_smb._getSecurity_SMB1):
try:
info = self.fs.getinfo('test.txt', namespaces=['access'])
except smb.base.NotReadyError:
self.fail("getinfo(..., ['access']) raised an error")
try:
list(self.fs.scandir('/', namespaces=['access']))
except smb.base.NotReadyError:
self.fail("scandir(..., ['access']) raised an error")
def test_getinfo_smb(self):
self.fs.writetext('test.txt', 'This is a test')
info = self.fs.getinfo('test.txt', namespaces=['basic', 'smb'])
self.assertFalse(info.get('smb', 'hidden'))
self.assertFalse(info.get('smb', 'system'))
def test_openbin_w_readinto(self):
with self.fs.openbin("abc", "w") as f:
self.assertRaises(IOError, f.readinto, io.BytesIO())
def test_download_error(self):
self.fs.makedir("/abc")
self.assertRaises(fs.errors.FileExpected, self.fs.download, "/abc", io.BytesIO())
self.assertRaises(fs.errors.ResourceNotFound, self.fs.download, "/def", io.BytesIO())
self.assertRaises(fs.errors.ResourceNotFound, self.fs.download, "/def/ghi", io.BytesIO())
def test_upload_root(self):
_fs = fs.open_fs('smb://rio:[email protected]/')
self.assertRaises(fs.errors.PermissionDenied, _fs.upload, "/abc", io.BytesIO())
def test_upload_error(self):
self.fs.makedir("/abc")
self.assertRaises(fs.errors.FileExpected, self.fs.upload, "/abc", io.BytesIO())
self.assertRaises(fs.errors.ResourceNotFound, self.fs.upload, "/def/ghi", io.BytesIO())
@unittest.skipUnless(utils.DOCKER, "docker service unreachable.")
class TestSMBFSConnection(unittest.TestCase):
user = "rio"
pasw = "<PASSWORD>"
def open_smbfs(self, host_token, port=None, direct_tcp=False):
return SMBFS(host_token, self.user, self.pasw, port=port, direct_tcp=direct_tcp)
@utils.py2expectedFailure
def test_hostname(self):
smbfs = self.open_smbfs("SAMBAALPINE")
def test_ip(self):
smbfs = self.open_smbfs("127.0.0.1")
@mock.patch.object(SMBFS, 'NETBIOS', mock.MagicMock())
def test_hostname_and_ip(self):
smbfs = self.open_smbfs(("SAMBAALPINE", "127.0.0.1"))
SMBFS.NETBIOS.queryIPforName.assert_not_called()
SMBFS.NETBIOS.queryName.assert_not_called()
@mock.patch.object(SMBFS, 'NETBIOS', mock.MagicMock())
def test_ip_and_hostname(self):
smbfs = self.open_smbfs(("127.0.0.1", "SAMBAALPINE"))
SMBFS.NETBIOS.queryIPforName.assert_not_called()
SMBFS.NETBIOS.queryName.assert_not_called()
def test_ip_and_none(self):
smbfs = self.open_smbfs(("127.0.0.1", None))
def test_none_and_ip(self):
smbfs = self.open_smbfs((None, "127.0.0.1"))
@utils.py2expectedFailure
def test_hostname_and_none(self):
smbfs = self.open_smbfs(("SAMBAALPINE", None))
@utils.py2expectedFailure
def test_none_and_hostname(self):
smbfs = self.open_smbfs((None, "SAMBAALPINE"))
def test_none_none(self):
self.assertRaises(
fs.errors.CreateFailed,
self.open_smbfs, (None, None)
)
def test_none(self):
self.assertRaises(
fs.errors.CreateFailed,
self.open_smbfs, None
)
def test_default_smb_port(self):
smbfs = self.open_smbfs("127.0.0.1")
self.assertEqual(smbfs._smb.sock.getpeername()[1], 139)
def test_explicit_smb_port(self):
smbfs = self.open_smbfs("127.0.0.1", port=445, direct_tcp=True)
self.assertEqual(smbfs._smb.sock.getpeername()[1], 445)
``` |
{
"source": "josiah-wolf-oberholtzer/consort",
"score": 2
} |
#### File: consort/consort/__init__.py
```python
from consort._version import __version_info__, __version__
del _version
from consort.tools import *
from abjad.tools import lilypondparsertools
lilypondparsertools.LilyPondParser.register_markup_function('vstrut', [])
del lilypondparsertools
DEBUG = False
def debug(*message):
r'''Prints `message` in red when `consort.DEBUG` is true.
Returns none.
'''
RED = '\033[91m'
END = '\033[0m'
if DEBUG:
if len(message) == 1:
message = message[0]
else:
message = ' '.join(str(_) for _ in message)
print(RED + str(message) + END)
```
#### File: consort/tools/DynamicExpression.py
```python
import abjad
from abjad import attach
from abjad import iterate
from abjad import select
from abjad.tools import abctools
from abjad.tools import lilypondnametools
from abjad.tools import mathtools
from abjad.tools import schemetools
from abjad.tools import scoretools
from abjad.tools import spannertools
class DynamicExpression(abctools.AbjadValueObject):
r"""A dynamic phrasing expression.
::
>>> dynamic_expression = consort.DynamicExpression(
... dynamic_tokens='f p pp pp',
... transitions=('flared', None),
... )
>>> print(format(dynamic_expression))
consort.tools.DynamicExpression(
dynamic_tokens=abjad.CyclicTuple(
['f', 'p', 'pp', 'pp']
),
transitions=abjad.CyclicTuple(
['flared', None]
),
)
.. container:: example
::
>>> music = abjad.Staff(r'''
... { c'4 d'4 e'4 f'4 }
... { g'4 a'4 b'4 }
... { c''4 }
... ''')
>>> print(format(music))
\new Staff {
{
c'4
d'4
e'4
f'4
}
{
g'4
a'4
b'4
}
{
c''4
}
}
::
>>> dynamic_expression(music)
>>> print(format(music))
\new Staff {
{
\once \override Hairpin.stencil = #flared-hairpin
c'4 \f \>
d'4
e'4
f'4
}
{
g'4 \p \>
a'4
b'4
}
{
c''4 \pp
}
}
.. container:: example
::
>>> music = abjad.Staff(r'''
... { c'4 d'4 e'4 f'4 }
... { g'4 a'4 b'4 }
... { c''4 c'4 }
... ''')
::
>>> dynamic_expression(music, seed=2)
>>> print(format(music))
\new Staff {
{
c'4 \pp
d'4
e'4
f'4
}
{
g'4 \<
a'4
b'4
}
{
\once \override Hairpin.stencil = #flared-hairpin
c''4 \f \>
c'4 \p
}
}
.. container:: example
::
>>> music = abjad.Staff("{ c'4 }")
>>> dynamic_expression(music, seed=1)
>>> print(format(music))
\new Staff {
{
c'4 \p
}
}
.. container:: example
::
>>> music = abjad.Staff("{ c'4 d'4 }")
>>> dynamic_expression(music, seed=1)
>>> print(format(music))
\new Staff {
{
c'4 \p \>
d'4 \pp
}
}
.. container:: example
::
>>> music = abjad.Staff("{ r4 c'4 r4 } { r4 d'4 r4 } { r4 e' r4 } ")
>>> dynamic_expression(music)
>>> print(format(music))
\new Staff {
{
r4
\once \override Hairpin.stencil = #flared-hairpin
c'4 \f \>
r4
}
{
r4
d'4 \p \>
r4
}
{
r4
e'4 \pp
r4
}
}
.. container:: example
::
>>> music = abjad.Staff("{ c'16 c'16 }")
>>> dynamic_expression(music)
>>> print(format(music))
\new Staff {
{
c'16 \f
c'16
}
}
.. container:: example
::
>>> music = abjad.Staff("{ c'1 }")
>>> dynamic_expression = consort.DynamicExpression(
... dynamic_tokens='fp',
... )
>>> dynamic_expression(music)
>>> print(format(music))
\new Staff {
{
c'1 \fp
}
}
.. container:: example
::
>>> music = abjad.Staff("{ c'1 }")
>>> dynamic_expression = consort.DynamicExpression(
... dynamic_tokens='fp',
... unsustained=True,
... )
>>> dynamic_expression(music)
>>> print(format(music))
\new Staff {
{
c'1 \p
}
}
.. container:: example
::
>>> music = abjad.Staff(r'''
... { c'4 d'4 e'4 }
... { c'4 d'4 e'4 }
... { c'4 d'4 e'4 }
... { c'4 d'4 e'4 }
... { c'4 d'4 e'4 }
... ''')
>>> dynamic_expression = consort.DynamicExpression(
... division_period=2,
... dynamic_tokens='p ppp',
... start_dynamic_tokens='niente',
... stop_dynamic_tokens='niente',
... )
>>> dynamic_expression(music)
>>> print(format(music))
\new Staff {
{
\once \override Hairpin.circled-tip = ##t
c'4 \<
d'4
e'4
}
{
c'4
d'4
e'4
}
{
\once \override Hairpin.circled-tip = ##t
c'4 \p \>
d'4
e'4
}
{
c'4
d'4
e'4
}
{
c'4
d'4
e'4 \!
}
}
.. container:: exmaple
::
>>> music = abjad.Staff("{ c'8. } { e'8. } { g'8. }")
>>> dynamic_expression = consort.DynamicExpression(
... division_period=2,
... dynamic_tokens='p ppp',
... start_dynamic_tokens='niente',
... stop_dynamic_tokens='niente',
... )
>>> dynamic_expression(music)
>>> print(format(music))
\new Staff {
{
\once \override Hairpin.circled-tip = ##t
c'8. \<
}
{
e'8.
}
{
g'8. \p
}
}
.. container:: example
::
>>> music = abjad.Staff(r'''
... { c'8 ~ c'4 }
... \times 3/4 { d'16 d' d' d' r d' d' r }
... ''')
>>> dynamic_expression = consort.DynamicExpression(
... dynamic_tokens='mf mp fff',
... start_dynamic_tokens='f',
... stop_dynamic_tokens='mf',
... )
>>> dynamic_expression(music)
>>> print(format(music))
\new Staff {
{
c'8 \f ~ \>
c'4
}
\tweak text #tuplet-number::calc-fraction-text
\times 3/4 {
d'16 \mf
d'16
d'16
d'16
r16
d'16
d'16
r16
}
}
"""
### CLASS VARIABLES ###
__slots__ = (
'_division_period',
'_dynamic_tokens',
'_only_first',
'_start_dynamic_tokens',
'_stop_dynamic_tokens',
'_transitions',
'_unsustained',
)
_transition_types = (
'constante',
'flared',
'simple',
None,
)
### INITIALIZER ###
def __init__(
self,
dynamic_tokens=('ppp',),
division_period=None,
only_first=None,
start_dynamic_tokens=None,
stop_dynamic_tokens=None,
transitions=None,
unsustained=None,
):
dynamic_tokens = self._tokens_to_cyclic_tuple(dynamic_tokens)
assert dynamic_tokens
self._dynamic_tokens = dynamic_tokens
if division_period is not None:
division_period = int(division_period)
assert 0 < division_period
self._division_period = division_period
self._start_dynamic_tokens = self._tokens_to_cyclic_tuple(
start_dynamic_tokens)
self._stop_dynamic_tokens = self._tokens_to_cyclic_tuple(
stop_dynamic_tokens)
if isinstance(transitions, (str, type(None))):
transitions = [transitions]
assert all(_ in self._transition_types for _ in transitions)
transitions = abjad.CyclicTuple(transitions)
self._transitions = transitions
if only_first is not None:
only_first = bool(only_first)
self._only_first = only_first
if unsustained is not None:
unsustained = bool(unsustained)
self._unsustained = unsustained
### SPECIAL METHODS ###
def __call__(self, music, name=None, seed=0):
import consort
original_seed = seed
current_dynamic = None
current_hairpin = None
selections, components = self._get_selections(music)
#print(selections)
#print(components)
length = len(components)
if self.only_first:
length = 1
components = components[:1]
for index, component in enumerate(components[:-1]):
selection = selections[index]
dynamic, hairpin, hairpin_override = self._get_attachments(
index, length, seed, original_seed)
if dynamic != current_dynamic:
attach(dynamic, component, name=name)
current_dynamic = dynamic
if self.unsustained:
inner_leaves = selection[1:-1]
prototype = abjad.Rest
if (
len(inner_leaves) and
all(isinstance(_, prototype) for _ in inner_leaves)
):
hairpin = None
if hairpin is not None:
attach(hairpin, selection, name=name)
current_hairpin = hairpin
if current_hairpin is not None and hairpin_override is not None:
attach(hairpin_override, component, name=name)
seed += 1
dynamic, _, _ = self._get_attachments(
length - 1, length, seed, original_seed)
if self.unsustained:
if dynamic is not None:
if length == 1:
if not selections or len(selections[0]) < 4:
if dynamic.name in dynamic._composite_dynamic_name_to_steady_state_dynamic_name:
dynamic_name = dynamic._composite_dynamic_name_to_steady_state_dynamic_name[dynamic.name]
dynamic = consort.Dynamic(dynamic_name)
if dynamic != current_dynamic:
attach(dynamic, components[-1], name=name)
if dynamic.name == 'niente' and current_hairpin:
next_leaf = components[-1]._get_leaf(1)
if next_leaf is not None:
current_hairpin._append(next_leaf)
### PRIVATE METHODS ###
def _get_attachments(self, index, length, seed, original_seed):
import consort
dynamic_seed = seed
if self.start_dynamic_tokens:
dynamic_seed -= 1
this_token = None
next_token = None
this_dynamic = None
next_dynamic = None
hairpin = None
hairpin_override = None
if length == 1:
if self.start_dynamic_tokens:
this_token = self.start_dynamic_tokens[original_seed]
elif self.stop_dynamic_tokens:
this_token = self.stop_dynamic_tokens[original_seed]
else:
this_token = self.dynamic_tokens[dynamic_seed]
if this_token == 'n<PASSWORD>':
this_token = self.dynamic_tokens[dynamic_seed]
elif length == 2:
if index == 0:
if self.start_dynamic_tokens:
this_token = self.start_dynamic_tokens[original_seed]
else:
this_token = self.dynamic_tokens[dynamic_seed]
if self.stop_dynamic_tokens:
next_token = self.stop_dynamic_tokens[original_seed]
else:
next_token = self.dynamic_tokens[dynamic_seed + 1]
elif index == 1:
if self.stop_dynamic_tokens:
this_token = self.stop_dynamic_tokens[original_seed]
if (
this_token == 'n<PASSWORD>' and
self.start_dynamic_tokens and
self.start_dynamic_tokens[original_seed] == 'niente'
):
this_token = self.dynamic_tokens[dynamic_seed]
else:
this_token = self.dynamic_tokens[dynamic_seed]
if this_token == next_token == '<PASSWORD>':
next_token = self.dynamic_tokens[dynamic_seed]
else:
#print('!!!', index)
if index == 0:
if self.start_dynamic_tokens:
this_token = self.start_dynamic_tokens[original_seed]
next_token = self.dynamic_tokens[dynamic_seed + 1]
#print('A1', this_token, next_token)
else:
this_token = self.dynamic_tokens[dynamic_seed]
next_token = self.dynamic_tokens[dynamic_seed + 1]
#print('A2', this_token, next_token)
elif index == length - 1: # Last component.
if self.stop_dynamic_tokens:
this_token = self.stop_dynamic_tokens[original_seed]
#print('B1', this_token, next_token)
else:
this_token = self.dynamic_tokens[dynamic_seed]
#print('B2', this_token, next_token)
elif index == length - 2: # Next to last component.
this_token = self.dynamic_tokens[dynamic_seed]
if self.stop_dynamic_tokens:
next_token = self.stop_dynamic_tokens[original_seed]
#print('C1', this_token, next_token)
else:
next_token = self.dynamic_tokens[dynamic_seed + 1]
#print('C2', this_token, next_token)
else:
this_token = self.dynamic_tokens[dynamic_seed]
next_token = self.dynamic_tokens[dynamic_seed + 1]
#print('D1', this_token, next_token)
this_dynamic = consort.Dynamic(this_token)
this_dynamic_ordinal = mathtools.NegativeInfinity()
if this_dynamic.name != 'niente':
this_dynamic_ordinal = this_dynamic.ordinal
if next_token is not None:
next_dynamic = consort.Dynamic(next_token)
next_dynamic_ordinal = mathtools.NegativeInfinity()
if next_dynamic.name != 'niente':
next_dynamic_ordinal = next_dynamic.ordinal
if next_dynamic is not None:
if this_dynamic_ordinal < next_dynamic_ordinal:
hairpin = spannertools.Hairpin('<', include_rests=True)
elif next_dynamic_ordinal < this_dynamic_ordinal:
hairpin = spannertools.Hairpin('>', include_rests=True)
if hairpin is not None:
transition = self.transitions[seed]
if transition == 'constante':
hairpin = spannertools.Hairpin('<', include_rests=True)
if transition in ('flared', 'constante'):
hairpin_override = lilypondnametools.LilyPondGrobOverride(
grob_name='Hairpin',
is_once=True,
property_path='stencil',
value=schemetools.Scheme('{}-hairpin'.format(transition)),
)
if this_dynamic.name == 'niente' or next_dynamic.name == 'niente':
hairpin_override = lilypondnametools.LilyPondGrobOverride(
grob_name='Hairpin',
is_once=True,
property_path='circled-tip',
value=True,
)
#print(index, this_dynamic, next_dynamic, hairpin)
return this_dynamic, hairpin, hairpin_override
def _partition_selections(self, music):
period = self.division_period or 1
selections = [
select(list(iterate(_).by_leaf())) for _ in music
]
parts = abjad.Sequence(selections).partition_by_counts(
[period], cyclic=True, overhang=True)
parts = [list(_) for _ in parts]
if len(parts[-1]) < period and 1 < len(parts):
part = parts.pop()
parts[-1].extend(part)
selections = []
for part in parts:
selection = part[0]
for next_selection in part[1:]:
selection = selection + next_selection
selections.append(selection)
return selections
def _reorganize_selections(self, selections):
prototype = (abjad.Note, abjad.Chord)
for i, leaf in enumerate(selections[0]):
if isinstance(leaf, prototype):
break
selections[0] = selections[0][i:]
for i, leaf in enumerate(reversed(selections[-1])):
if isinstance(leaf, prototype):
break
if i == 0:
i = None
else:
i = -i
selections[-1] = selections[-1][:i]
if len(selections) == 1:
return selections
for i in range(len(selections) - 1):
selection_one, selection_two = selections[i], selections[i + 1]
for j, leaf in enumerate(selection_two):
if isinstance(leaf, prototype):
break
if 0 < j:
left, right = selection_two[:j], selection_two[j:]
selection_one = selection_one + left
selection_two = right
selections[i] = selection_one
selections[i + 1] = selection_two
return selections
def _get_selections(self, music):
r"""Gets selections and attach components from `music`.
.. container:: example
::
>>> music = abjad.Staff(r'''
... { c'4 d'4 e'4 f'4 }
... { g'4 a'4 b'4 }
... { c''4 }
... ''')
>>> dynamic_expression = consort.DynamicExpression("f")
>>> result = dynamic_expression._get_selections(music)
>>> selections, attach_components = result
>>> for _ in selections:
... _
...
Selection([Note("c'4"), Note("d'4"), Note("e'4"), Note("f'4"), Note("g'4")])
Selection([Note("g'4"), Note("a'4"), Note("b'4"), Note("c''4")])
::
>>> for _ in attach_components:
... _
...
Note("c'4")
Note("g'4")
Note("c''4")
.. container:: example
::
>>> music = abjad.Staff(r'''
... { c'4 d'4 e'4 }
... { f'4 g'4 a'4 }
... { b'4 c''4 }
... ''')
>>> dynamic_expression = consort.DynamicExpression("f")
>>> result = dynamic_expression._get_selections(music)
>>> selections, attach_components = result
>>> for _ in selections:
... _
...
Selection([Note("c'4"), Note("d'4"), Note("e'4"), Note("f'4")])
Selection([Note("f'4"), Note("g'4"), Note("a'4"), Note("b'4")])
Selection([Note("b'4"), Note("c''4")])
::
>>> for _ in attach_components:
... _
...
Note("c'4")
Note("f'4")
Note("b'4")
Note("c''4")
.. container:: example
::
>>> music = abjad.Staff(r'''
... { c'8 d'8 e'8 }
... { f'8 g'8 a'8 }
... { b'32 c''16. }
... ''')
>>> result = dynamic_expression._get_selections(music)
>>> selections, attach_components = result
>>> for _ in selections:
... _
...
Selection([Note("c'8"), Note("d'8"), Note("e'8"), Note("f'8")])
Selection([Note("f'8"), Note("g'8"), Note("a'8"), Note("b'32"), Note("c''16.")])
::
>>> for _ in attach_components:
... _
...
Note("c'8")
Note("f'8")
Note("c''16.")
.. container:: example
::
>>> music = abjad.Staff("{ r4 c'4 r4 } { r4 d'4 r4 } { r4 e' r4 } ")
>>> result = dynamic_expression._get_selections(music)
>>> selections, attach_components = result
>>> for _ in selections:
... _
...
Selection([Note("c'4"), Rest('r4'), Rest('r4'), Note("d'4")])
Selection([Note("d'4"), Rest('r4'), Rest('r4'), Note("e'4")])
::
>>> for _ in attach_components:
... _
...
Note("c'4")
Note("d'4")
Note("e'4")
.. container:: example
::
>>> music = abjad.Staff("{ c'8. } { e'8. } { g'8. }")
>>> dynamic_expression = consort.DynamicExpression(
... division_period=2,
... dynamic_tokens='p ppp',
... start_dynamic_tokens='niente',
... stop_dynamic_tokens='niente',
... )
>>> result = dynamic_expression._get_selections(music)
>>> selections, attach_components = result
>>> for _ in selections:
... _
...
Selection([Note("c'8."), Note("e'8."), Note("g'8.")])
::
>>> for _ in attach_components:
... _
...
Note("c'8.")
Note("g'8.")
"""
#print('---', music)
initial_selections = self._partition_selections(music)
#print(' ', initial_selections)
initial_selections = self._reorganize_selections(initial_selections)
#print(' ', initial_selections)
attach_components = []
selections = []
assert len(initial_selections)
for i, selection in enumerate(initial_selections):
#print(' ', i, selection)
if i < len(initial_selections) - 1:
#print(' ', 'A')
selection = selection + (initial_selections[i + 1][0],)
selections.append(selection)
attach_components.append(selection[0])
elif (
(selection.get_duration() <= abjad.Duration(1, 8) and
1 < len(selections)) or len(selection) == 1
):
#print(' ', 'B')
attach_components.append(selection[-1])
if selections:
#print(' ', 'B1')
selections[-1] = selections[-1] + selection[1:]
elif (
abjad.Duration(1, 8) < (
selection[-1]._get_timespan().start_offset -
selection[0]._get_timespan().start_offset
)
):
#print(' ', 'C')
selections.append(selection)
attach_components.append(selection[0])
attach_components.append(selection[-1])
else:
#print(' ', 'D')
attach_components.append(selection[0])
#print(' ', initial_selections)
#print(' ', attach_components)
return selections, attach_components
def _tokens_to_cyclic_tuple(self, tokens):
import consort
if tokens is None:
return tokens
if isinstance(tokens, str):
tokens = tokens.split()
for token in tokens:
if token == 'niente':
continue
assert token in consort.Dynamic._dynamic_names
assert len(tokens)
tokens = abjad.CyclicTuple(tokens)
return tokens
### PUBLIC PROPERTIES ###
@property
def division_period(self):
return self._division_period
@property
def dynamic_tokens(self):
return self._dynamic_tokens
@property
def only_first(self):
return self._only_first
@property
def period(self):
return self._period
@property
def start_dynamic_tokens(self):
return self._start_dynamic_tokens
@property
def stop_dynamic_tokens(self):
return self._stop_dynamic_tokens
@property
def transitions(self):
return self._transitions
@property
def unsustained(self):
return self._unsustained
```
#### File: consort/tools/HarmonicExpression.py
```python
import abjad
from consort.tools.LogicalTieExpression import LogicalTieExpression
class HarmonicExpression(LogicalTieExpression):
r'''A harmonic expression.
::
>>> harmonic_expression = consort.HarmonicExpression()
>>> print(format(harmonic_expression))
consort.tools.HarmonicExpression(
touch_interval=abjad.NamedInterval('+P4'),
)
::
>>> staff = abjad.Staff("c'4 d'4 ~ d'4 e'4")
>>> logical_tie = abjad.inspect(staff[1]).get_logical_tie()
>>> harmonic_expression(logical_tie)
>>> print(format(staff))
\new Staff {
c'4
<
d'
\tweak style #'harmonic
g'
>4 ~
<
d'
\tweak style #'harmonic
g'
>4
e'4
}
'''
### CLASS VARIABLES ###
__slots__ = (
'_touch_interval',
)
### INITIALIZER ###
def __init__(
self,
touch_interval='P4',
):
touch_interval = abjad.NamedInterval(touch_interval)
self._touch_interval = touch_interval
### SPECIAL METHODS ###
def __call__(
self,
logical_tie,
pitch_range=None,
):
for i, leaf in enumerate(logical_tie):
stopped_pitch = leaf.written_pitch
touched_pitch = stopped_pitch.transpose(self.touch_interval)
chord = abjad.Chord(leaf)
chord.written_pitches = [stopped_pitch, touched_pitch]
#chord.note_heads[0].is_parenthesized = True
#chord.note_heads[0].tweak.font_size = -4
chord.note_heads[1].tweak.style = 'harmonic'
self._replace(leaf, chord)
### PUBLIC PROPERTIES ###
@property
def touch_interval(self):
return self._touch_interval
```
#### File: consort/tools/Instrument.py
```python
import abjad
from abjad.tools import instrumenttools
from abjad.tools import lilypondnametools
from abjad.tools import markuptools
from abjad.tools import systemtools
class Instrument(instrumenttools.Instrument):
r'''A fancy instrument indicator.
::
>>> instrument_one = consort.Instrument(
... instrument_name_markup='Bassoon',
... short_instrument_name_markup='Bsn.',
... instrument_change_markup='B!',
... )
>>> instrument_two = consort.Instrument(
... instrument_name_markup='Cuica',
... short_instrument_name_markup='Cu.',
... instrument_change_markup='C!',
... )
::
>>> staff = abjad.Staff("c'4 d'4 e'4 f'4 g'4 a'4 b'4 c''4")
>>> abjad.attach(instrument_one, staff[0])
>>> abjad.attach(instrument_two, staff[2])
>>> abjad.attach(instrument_two, staff[4])
>>> abjad.attach(instrument_one, staff[6])
>>> print(format(staff))
\new Staff {
\set Staff.instrumentName = \markup { Bassoon }
\set Staff.shortInstrumentName = \markup { Bsn. }
c'4
d'4
\set Staff.instrumentName = \markup { Cuica }
\set Staff.shortInstrumentName = \markup { Cu. }
e'4 ^ \markup { C! }
f'4
g'4
a'4
\set Staff.instrumentName = \markup { Bassoon }
\set Staff.shortInstrumentName = \markup { Bsn. }
b'4 ^ \markup { B! }
c''4
}
'''
### CLASS VARIABLES ###
__slots__ = (
'_instrument_change_markup',
)
### INITIALIZER ###
def __init__(
self,
instrument_name=None,
short_instrument_name=None,
instrument_name_markup=None,
short_instrument_name_markup=None,
allowable_clefs=None,
pitch_range=None,
sounding_pitch_of_written_middle_c=None,
instrument_change_markup=None,
):
instrumenttools.Instrument.__init__(
self,
instrument_name=instrument_name,
short_instrument_name=short_instrument_name,
instrument_name_markup=instrument_name_markup,
short_instrument_name_markup=short_instrument_name_markup,
allowable_clefs=allowable_clefs,
pitch_range=pitch_range,
sounding_pitch_of_written_middle_c=sounding_pitch_of_written_middle_c,
)
if instrument_change_markup is not None:
instrument_change_markup = markuptools.Markup(
instrument_change_markup, direction=Up)
self._instrument_change_markup = instrument_change_markup
### PRIVATE METHODS ###
def _get_lilypond_format_bundle(self, component):
bundle = systemtools.LilyPondFormatBundle()
previous_instrument = component._get_effective(type(self), n=-1)
if isinstance(component, abjad.Container):
previous_leaf = next(abjad.iterate(component).by_leaf())._get_leaf(-1)
else:
previous_leaf = component._get_leaf(-1)
if self.instrument_change_markup and previous_instrument != self:
if previous_instrument or previous_leaf:
bundle.right.markup.append(self.instrument_change_markup)
if previous_instrument == self:
return bundle
if isinstance(component, abjad.Leaf):
if self.instrument_name_markup is not None:
context_setting = lilypondnametools.LilyPondContextSetting(
context_name=self._scope_name,
context_property='instrumentName',
value=abjad.new(self.instrument_name_markup, direction=None),
)
bundle.update(context_setting)
if self.short_instrument_name_markup is not None:
context_setting = lilypondnametools.LilyPondContextSetting(
context_name=self._scope_name,
context_property='shortInstrumentName',
value=abjad.new(self.short_instrument_name_markup, direction=None),
)
bundle.update(context_setting)
else:
if self.instrument_name_markup is not None:
string = 'instrumentName = {}'.format(
self.instrument_name_markup,
)
bundle.context_settings.append(string)
if self.short_instrument_name_markup is not None:
string = 'shortInstrumentName = {}'.format(
self.short_instrument_name_markup,
)
bundle.context_settings.append(string)
return bundle
### PUBLIC PROPERTIES ###
@property
def instrument_change_markup(self):
return self._instrument_change_markup
@property
def instrument_name_markup(self):
return self._instrument_name_markup
@property
def short_instrument_name_markup(self):
return self._short_instrument_name_markup
```
#### File: consort/tools/MusicSetting.py
```python
import abjad
import collections
from abjad.tools import abctools
from abjad.tools import rhythmmakertools
from abjad.tools import systemtools
class MusicSetting(abctools.AbjadValueObject):
r'''A music setting.
::
>>> red_setting = consort.MusicSetting(
... timespan_maker=consort.TaleaTimespanMaker(
... initial_silence_talea=rhythmmakertools.Talea(
... counts=(0, 4),
... denominator=16,
... ),
... playing_talea=rhythmmakertools.Talea(
... counts=(4, 8, 4),
... denominator=16,
... ),
... ),
... viola_rh=consort.tools.MusicSpecifier(),
... violin_1_rh=consort.tools.MusicSpecifier(),
... violin_2_rh=consort.tools.MusicSpecifier(),
... )
>>> print(format(red_setting))
consort.tools.MusicSetting(
timespan_maker=consort.tools.TaleaTimespanMaker(
initial_silence_talea=rhythmmakertools.Talea(
counts=[0, 4],
denominator=16,
),
playing_talea=rhythmmakertools.Talea(
counts=[4, 8, 4],
denominator=16,
),
playing_groupings=(1,),
repeat=True,
silence_talea=rhythmmakertools.Talea(
counts=[4],
denominator=16,
),
step_anchor=Right,
synchronize_groupings=False,
synchronize_step=False,
),
viola_rh=consort.tools.MusicSpecifier(),
violin_1_rh=consort.tools.MusicSpecifier(),
violin_2_rh=consort.tools.MusicSpecifier(),
)
::
>>> segment_timespan = abjad.Timespan(1, 2)
>>> from abjad.tools import templatetools
>>> score_template = consort.StringQuartetScoreTemplate()
>>> timespan_inventory = red_setting(
... layer=1,
... score_template=score_template,
... segment_timespan=segment_timespan,
... )
::
>>> print(format(timespan_inventory))
abjad.TimespanList(
[
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(1, 1),
stop_offset=abjad.Offset(5, 4),
layer=1,
music_specifier=consort.tools.MusicSpecifier(),
voice_name='Violin 1 Bowing Voice',
),
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(1, 1),
stop_offset=abjad.Offset(3, 2),
layer=1,
music_specifier=consort.tools.MusicSpecifier(),
voice_name='Viola Bowing Voice',
),
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(5, 4),
stop_offset=abjad.Offset(3, 2),
layer=1,
music_specifier=consort.tools.MusicSpecifier(),
voice_name='Violin 2 Bowing Voice',
),
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(3, 2),
stop_offset=abjad.Offset(2, 1),
layer=1,
music_specifier=consort.tools.MusicSpecifier(),
voice_name='Violin 1 Bowing Voice',
),
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(7, 4),
stop_offset=abjad.Offset(2, 1),
layer=1,
music_specifier=consort.tools.MusicSpecifier(),
voice_name='Viola Bowing Voice',
),
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(7, 4),
stop_offset=abjad.Offset(2, 1),
layer=1,
music_specifier=consort.tools.MusicSpecifier(),
voice_name='Violin 2 Bowing Voice',
),
]
)
::
>>> red_setting = abjad.new(
... red_setting,
... silenced_contexts=[
... 'viola_lh',
... 'cello',
... ],
... )
>>> print(format(red_setting))
consort.tools.MusicSetting(
timespan_maker=consort.tools.TaleaTimespanMaker(
initial_silence_talea=rhythmmakertools.Talea(
counts=[0, 4],
denominator=16,
),
playing_talea=rhythmmakertools.Talea(
counts=[4, 8, 4],
denominator=16,
),
playing_groupings=(1,),
repeat=True,
silence_talea=rhythmmakertools.Talea(
counts=[4],
denominator=16,
),
step_anchor=Right,
synchronize_groupings=False,
synchronize_step=False,
),
silenced_contexts=('cello', 'viola_lh'),
viola_rh=consort.tools.MusicSpecifier(),
violin_1_rh=consort.tools.MusicSpecifier(),
violin_2_rh=consort.tools.MusicSpecifier(),
)
::
>>> timespan_inventory = red_setting(
... layer=1,
... score_template=score_template,
... segment_timespan=segment_timespan,
... )
>>> print(format(timespan_inventory))
abjad.TimespanList(
[
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(1, 1),
stop_offset=abjad.Offset(5, 4),
layer=1,
music_specifier=consort.tools.MusicSpecifier(),
voice_name='Violin 1 Bowing Voice',
),
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(1, 1),
stop_offset=abjad.Offset(3, 2),
layer=1,
music_specifier=consort.tools.MusicSpecifier(),
voice_name='Viola Bowing Voice',
),
consort.tools.SilentTimespan(
start_offset=abjad.Offset(1, 1),
stop_offset=abjad.Offset(2, 1),
layer=1,
voice_name='Cello Bowing Voice',
),
consort.tools.SilentTimespan(
start_offset=abjad.Offset(1, 1),
stop_offset=abjad.Offset(2, 1),
layer=1,
voice_name='Cello Fingering Voice',
),
consort.tools.SilentTimespan(
start_offset=abjad.Offset(1, 1),
stop_offset=abjad.Offset(2, 1),
layer=1,
voice_name='Viola Fingering Voice',
),
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(5, 4),
stop_offset=abjad.Offset(3, 2),
layer=1,
music_specifier=consort.tools.MusicSpecifier(),
voice_name='Violin 2 Bowing Voice',
),
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(3, 2),
stop_offset=abjad.Offset(2, 1),
layer=1,
music_specifier=consort.tools.MusicSpecifier(),
voice_name='Violin 1 Bowing Voice',
),
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(7, 4),
stop_offset=abjad.Offset(2, 1),
layer=1,
music_specifier=consort.tools.MusicSpecifier(),
voice_name='Viola Bowing Voice',
),
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(7, 4),
stop_offset=abjad.Offset(2, 1),
layer=1,
music_specifier=consort.tools.MusicSpecifier(),
voice_name='Violin 2 Bowing Voice',
),
]
)
'''
### CLASS VARIABLES ###
__slots__ = (
'_music_specifiers',
'_silenced_contexts',
'_timespan_identifier',
'_timespan_maker',
)
### INITIALIZER ###
def __init__(
self,
timespan_identifier=None,
timespan_maker=None,
silenced_contexts=None,
color=None,
**music_specifiers
):
import consort
prototype = (
consort.CompositeMusicSpecifier,
consort.MusicSpecifier,
consort.MusicSpecifierSequence,
str, # for demonstration purposes only
)
for abbreviation, music_specifier in sorted(music_specifiers.items()):
if isinstance(music_specifier, prototype):
continue
elif music_specifier is None:
music_specifier = consort.MusicSpecifier(
rhythm_maker=rhythmmakertools.NoteRhythmMaker(
tie_specifier=rhythmmakertools.TieSpecifier(
tie_across_divisions=True,
),
),
)
music_specifiers[abbreviation] = music_specifier
elif isinstance(music_specifier, collections.Sequence) and \
all(isinstance(x, prototype) for x in music_specifier):
music_specifier = consort.MusicSpecifierSequence(
music_specifiers=music_specifier,
)
music_specifiers[abbreviation] = music_specifier
else:
raise ValueError(music_specifier)
if color is not None:
for abbreviation, music_specifier in sorted(music_specifiers.items()):
if isinstance(music_specifier, consort.MusicSpecifier):
music_specifier = abjad.new(music_specifier, color=color)
elif isinstance(music_specifier, consort.CompositeMusicSpecifier):
primary = abjad.new(
music_specifier.primary_music_specifier,
music_specifiers=[
abjad.new(_, color=color) for _ in
music_specifier.primary_music_specifier
],
)
secondary = abjad.new(
music_specifier.secondary_music_specifier,
music_specifiers=[
abjad.new(_, color=color) for _ in
music_specifier.secondary_music_specifier
],
)
music_specifier = abjad.new(
music_specifier,
primary_music_specifier=primary,
secondary_music_specifier=secondary,
)
elif isinstance(music_specifier, consort.MusicSpecifierSequence):
music_specifier = abjad.new(
music_specifier,
music_specifiers=[
abjad.new(_, color=color) for _ in
music_specifier.music_specifiers
],
)
music_specifiers[abbreviation] = music_specifier
self._music_specifiers = music_specifiers
if silenced_contexts is not None:
silenced_contexts = (str(_) for _ in silenced_contexts)
silenced_contexts = tuple(sorted(set(silenced_contexts)))
self._silenced_contexts = silenced_contexts
if timespan_identifier is not None:
prototype = (
abjad.Timespan,
abjad.TimespanList,
consort.RatioPartsExpression,
)
if not isinstance(timespan_identifier, prototype):
timespan_identifier = \
consort.RatioPartsExpression.from_sequence(
timespan_identifier)
assert isinstance(timespan_identifier, prototype)
self._timespan_identifier = timespan_identifier
if timespan_maker is not None:
assert isinstance(timespan_maker,
consort.TimespanMaker), \
timespan_maker
else:
timespan_maker = consort.FloodedTimespanMaker()
self._timespan_maker = timespan_maker
### SPECIAL METHODS ###
def __call__(
self,
layer=None,
score=None,
score_template=None,
segment_timespan=None,
timespan_inventory=None,
timespan_quantization=None,
):
if score is None:
score = score_template()
if timespan_inventory is None:
timespan_inventory = abjad.TimespanList()
if not self.music_specifiers:
return timespan_inventory
music_specifiers = self.resolve_music_specifiers(
score_template,
score=score,
)
silenced_context_names = self.resolve_silenced_contexts(
score_template,
score=score,
)
target_timespans = self.resolve_target_timespans(
segment_timespan,
timespan_quantization,
)
for i, target_timespan in enumerate(target_timespans):
timespan_maker = self.timespan_maker.rotate(i)
timespan_inventory = timespan_maker(
layer=layer,
music_specifiers=music_specifiers,
silenced_context_names=silenced_context_names,
target_timespan=target_timespan,
timespan_inventory=timespan_inventory,
)
return timespan_inventory
def __getattr__(self, item):
if item in self.music_specifiers:
return self.music_specifiers[item]
elif item == 'color':
return None
return object.__getattribute__(self, item)
### PRIVATE METHODS ###
def _get_format_specification(self):
agent = systemtools.StorageFormatAgent(self)
names = list(agent.signature_keyword_names)
names.extend(sorted(self.music_specifiers))
template_names = tuple(names)
if 'color' in names:
names.remove('color')
return systemtools.FormatSpecification(
client=self,
storage_format_kwargs_names=names,
template_names=template_names,
)
### PUBLIC METHODS ###
def resolve_music_specifiers(
self,
score_template,
score=None,
):
import consort
assert score_template is not None
if score is None:
score = score_template()
all_abbreviations = score_template.context_name_abbreviations
prototype = (
consort.CompositeMusicSpecifier,
consort.MusicSpecifierSequence,
)
triples = []
for abbreviation, music_specifier in self.music_specifiers.items():
if not isinstance(music_specifier, prototype):
music_specifier = consort.MusicSpecifierSequence(
music_specifiers=music_specifier,
)
context_name = all_abbreviations[abbreviation]
context = score[context_name]
context_index = abjad.inspect(context).get_parentage().score_index
context_name = context.name
if isinstance(music_specifier, consort.CompositeMusicSpecifier):
composite_pairs = score_template.composite_context_pairs
one, two = composite_pairs[abbreviation]
primary_voice_name = all_abbreviations[one]
secondary_voice_name = all_abbreviations[two]
music_specifier = abjad.new(
music_specifier,
primary_voice_name=primary_voice_name,
secondary_voice_name=secondary_voice_name,
)
triple = (
context_index,
context_name,
music_specifier,
)
triples.append(triple)
triples.sort(key=lambda x: x[0])
music_specifiers = collections.OrderedDict()
for context_index, context_name, music_specifier in triples:
music_specifiers[context_name] = music_specifier
return music_specifiers
def resolve_silenced_contexts(
self,
score_template,
score=None,
):
assert score_template is not None
if score is None:
score = score_template()
all_abbreviations = score_template.context_name_abbreviations
composite_pairs = getattr(
score_template,
'composite_context_pairs',
{},
)
silenced_context_names = set()
silenced_contexts = self.silenced_contexts or ()
for abbreviation in silenced_contexts:
if abbreviation in composite_pairs:
one, two = composite_pairs[abbreviation]
primary_voice_name = all_abbreviations[one]
secondary_voice_name = all_abbreviations[two]
silenced_context_names.add(primary_voice_name)
silenced_context_names.add(secondary_voice_name)
elif abbreviation in all_abbreviations:
context_name = all_abbreviations[abbreviation]
silenced_context_names.add(context_name)
else:
message = 'Unresolvable context abbreviation: {}'
message = message.format(abbreviation)
raise Exception(message)
return silenced_context_names
def resolve_target_timespans(
self,
segment_timespan,
timespan_quantization=None,
):
import consort
assert isinstance(segment_timespan, abjad.Timespan)
timespan_identifier = self.timespan_identifier
if timespan_identifier is None:
target_timespans = abjad.TimespanList([
segment_timespan,
])
elif isinstance(self.timespan_identifier, abjad.Timespan):
if timespan_identifier.stop_offset == Infinity:
timespan_identifier = abjad.new(
timespan_identifier,
stop_offset=segment_timespan.stop_offset,
)
segment_timespan = abjad.Timespan(start_offset=0)
target_timespans = segment_timespan & timespan_identifier
else:
if isinstance(timespan_identifier, consort.RatioPartsExpression):
mask_timespans = timespan_identifier(segment_timespan)
else:
mask_timespans = timespan_identifier
target_timespans = abjad.TimespanList()
for mask_timespan in mask_timespans:
available_timespans = segment_timespan & mask_timespan
target_timespans.extend(available_timespans)
if timespan_quantization is not None:
target_timespans.round_offsets(
timespan_quantization,
must_be_well_formed=True,
)
return target_timespans
### PUBLIC PROPERTIES ###
@property
def music_specifiers(self):
return self._music_specifiers
@property
def silenced_contexts(self):
return self._silenced_contexts
@property
def timespan_identifier(self):
return self._timespan_identifier
@property
def timespan_maker(self):
return self._timespan_maker
```
#### File: consort/tools/PerformedTimespan.py
```python
import abjad
from abjad.tools import markuptools
from abjad.tools import mathtools
from abjad.tools import systemtools
class PerformedTimespan(abjad.Timespan):
r'''A Consort timespan.
::
>>> timespan = consort.PerformedTimespan()
>>> print(format(timespan))
consort.tools.PerformedTimespan(
start_offset=NegativeInfinity,
stop_offset=Infinity,
)
'''
### CLASS VARIABLES ###
__slots__ = (
'_forbid_fusing',
'_forbid_splitting',
'_divisions',
'_layer',
'_minimum_duration',
'_music',
'_music_specifier',
'_original_start_offset',
'_original_stop_offset',
'_voice_name',
)
### INITIALIZER ###
def __init__(
self,
start_offset=mathtools.NegativeInfinity(),
stop_offset=mathtools.Infinity(),
divisions=None,
forbid_fusing=None,
forbid_splitting=None,
layer=None,
minimum_duration=None,
music=None,
music_specifier=None,
original_start_offset=None,
original_stop_offset=None,
voice_name=None,
):
abjad.Timespan.__init__(
self,
start_offset=start_offset,
stop_offset=stop_offset,
)
if divisions is not None:
divisions = tuple(abjad.Duration(_) for _ in divisions)
assert sum(divisions) == self.duration
self._divisions = divisions
if forbid_fusing is not None:
forbid_fusing = bool(forbid_fusing)
self._forbid_fusing = forbid_fusing
if forbid_splitting is not None:
forbid_splitting = bool(forbid_splitting)
self._forbid_splitting = forbid_splitting
if layer is not None:
layer = int(layer)
self._layer = layer
if minimum_duration is not None:
minimum_duration = abjad.Duration(minimum_duration)
self._minimum_duration = minimum_duration
#if music is not None:
# assert inspect(music).get_duration() == self.duration
self._music = music
#if music_specifier is not None:
# assert isinstance(music_specifier, consort.MusicSpecifier), \
# music_specifier
self._music_specifier = music_specifier
if original_start_offset is not None:
original_start_offset = abjad.Offset(original_start_offset)
else:
original_start_offset = self.start_offset
self._original_start_offset = original_start_offset
if original_stop_offset is not None:
original_stop_offset = abjad.Offset(original_stop_offset)
else:
original_stop_offset = self.stop_offset
self._original_stop_offset = original_stop_offset
self._voice_name = voice_name
### SPECIAL METHODS ###
def __lt__(self, expr):
if abjad.Timespan.__lt__(self, expr):
return True
if not abjad.Timespan.__gt__(self, expr):
if hasattr(expr, 'voice_name'):
return self.voice_name < expr.voice_name
return False
### PRIVATE METHODS ###
def _as_postscript(
self,
postscript_x_offset,
postscript_y_offset,
postscript_scale,
):
start = (float(self.start_offset) * postscript_scale)
start -= postscript_x_offset
stop = (float(self.stop_offset) * postscript_scale)
stop -= postscript_x_offset
ps = markuptools.Postscript()
ps = ps.moveto(start, postscript_y_offset)
ps = ps.lineto(stop, postscript_y_offset)
ps = ps.stroke()
ps = ps.moveto(start, postscript_y_offset + 0.75)
ps = ps.lineto(start, postscript_y_offset - 0.75)
ps = ps.stroke()
ps = ps.moveto(stop, postscript_y_offset + 0.75)
ps = ps.lineto(stop, postscript_y_offset - 0.75)
ps = ps.stroke()
if self.layer is not None:
ps = ps.moveto(start, postscript_y_offset)
ps = ps.rmoveto(0.25, 0.5)
ps = ps.show(str(self.layer))
return ps
def _get_format_specification(self):
agent = systemtools.StorageFormatAgent(self)
names = agent.signature_keyword_names
if self.original_start_offset == self.start_offset:
names.remove('original_start_offset')
if self.original_stop_offset == self.stop_offset:
names.remove('original_stop_offset')
return systemtools.FormatSpecification(
storage_format_kwargs_names=names,
)
### PUBLIC METHODS ###
def split_at_offset(self, offset):
offset = abjad.Offset(offset)
result = abjad.TimespanList()
if self._start_offset < offset < self._stop_offset:
left_divisions, right_divisions = None, None
if self.divisions is not None:
left_divisions, right_divisions = abjad.split_sequence(
self.divisions,
[offset - self.start_offset],
overhang=True,
)
left = abjad.new(
self,
start_offset=self._start_offset,
stop_offset=offset,
divisions=left_divisions,
)
right = abjad.new(
self,
start_offset=offset,
stop_offset=self._stop_offset,
divisions=right_divisions,
)
if left.duration:
result.append(left)
if right.duration:
result.append(right)
else:
result.append(abjad.new(self))
return result
### PUBLIC PROPERTIES ###
@property
def divisions(self):
return self._divisions
@property
def forbid_fusing(self):
return self._forbid_fusing
@property
def forbid_splitting(self):
return self._forbid_splitting
@property
def is_left_broken(self):
if self.original_start_offset is not None:
if self.original_start_offset != self.start_offset:
return True
return False
@property
def is_right_broken(self):
if self.original_stop_offset is not None:
if self.original_stop_offset != self.stop_offset:
return True
return False
@property
def layer(self):
return self._layer
@property
def minimum_duration(self):
return self._minimum_duration
@property
def music(self):
return self._music
@property
def music_specifier(self):
return self._music_specifier
@property
def original_start_offset(self):
return self._original_start_offset
@property
def original_stop_offset(self):
return self._original_stop_offset
@property
def voice_name(self):
return self._voice_name
```
#### File: consort/tools/PitchSpecifier.py
```python
import abjad
from abjad.tools import abctools
from abjad.tools import pitchtools
class PitchSpecifier(abctools.AbjadValueObject):
r'''A pitch specifier.
::
>>> pitch_specifier = consort.PitchSpecifier(
... pitch_segments=(
... "c' e' g'",
... "fs' gs'",
... "b",
... ),
... ratio=(1, 2, 3),
... )
>>> print(format(pitch_specifier))
consort.tools.PitchSpecifier(
pitch_segments=(
abjad.PitchSegment(
(
abjad.NamedPitch("c'"),
abjad.NamedPitch("e'"),
abjad.NamedPitch("g'"),
),
item_class=abjad.NamedPitch,
),
abjad.PitchSegment(
(
abjad.NamedPitch("fs'"),
abjad.NamedPitch("gs'"),
),
item_class=abjad.NamedPitch,
),
abjad.PitchSegment(
(
abjad.NamedPitch('b'),
),
item_class=abjad.NamedPitch,
),
),
ratio=abjad.Ratio((1, 2, 3)),
)
Pitch specifiers can be instantiated from a string of pitch names:
::
>>> pitch_specifier = consort.PitchSpecifier("c' e' g' a'")
>>> print(format(pitch_specifier))
consort.tools.PitchSpecifier(
pitch_segments=(
abjad.PitchSegment(
(
abjad.NamedPitch("c'"),
abjad.NamedPitch("e'"),
abjad.NamedPitch("g'"),
abjad.NamedPitch("a'"),
),
item_class=abjad.NamedPitch,
),
),
ratio=abjad.Ratio((1,)),
)
Pitch specifiers can be instantiated from a single pitch:
::
>>> pitch_specifier = consort.PitchSpecifier(abjad.NamedPitch("ds'"))
>>> print(format(pitch_specifier))
consort.tools.PitchSpecifier(
pitch_segments=(
abjad.PitchSegment(
(
abjad.NamedPitch("ds'"),
),
item_class=abjad.NamedPitch,
),
),
ratio=abjad.Ratio((1,)),
)
'''
### CLASS VARIABLES ###
__slots__ = (
'_pitch_segments',
'_ratio',
)
### INITIALIZER ###
def __init__(
self,
pitch_segments=None,
ratio=None,
):
if pitch_segments is not None:
if isinstance(pitch_segments, pitchtools.Pitch):
pitch_segments = abjad.PitchSegment([pitch_segments])
elif isinstance(pitch_segments, str):
pitch_segments = abjad.PitchSegment(pitch_segments)
if isinstance(pitch_segments, abjad.PitchSegment):
pitch_segments = [pitch_segments]
coerced_pitch_segments = []
for pitch_segment in pitch_segments:
pitch_segment = abjad.PitchSegment(
pitch_segment,
item_class=abjad.NamedPitch,
)
if not pitch_segment:
pitch_segment = abjad.PitchSegment("c'")
coerced_pitch_segments.append(pitch_segment)
pitch_segments = tuple(coerced_pitch_segments)
assert len(pitch_segments)
if pitch_segments and not ratio:
ratio = [1] * len(pitch_segments)
if ratio is not None:
ratio = abjad.Ratio([abs(x) for x in ratio])
assert len(ratio) == len(pitch_segments)
self._pitch_segments = pitch_segments
self._ratio = ratio
### PUBLIC METHODS ###
def get_timespans(self, stop_offset):
r'''Gets pitch segment timespans.
::
>>> pitch_specifier = consort.PitchSpecifier(
... pitch_segments=(
... "c' e' g'",
... "fs' g'",
... "b",
... ),
... ratio=(1, 2, 3),
... )
>>> timespans = pitch_specifier.get_timespans(stop_offset=10)
>>> print(format(timespans))
consort.tools.TimespanCollection(
[
abjad.AnnotatedTimespan(
start_offset=abjad.Offset(0, 1),
stop_offset=abjad.Offset(5, 3),
annotation=abjad.PitchSegment(
(
abjad.NamedPitch("c'"),
abjad.NamedPitch("e'"),
abjad.NamedPitch("g'"),
),
item_class=abjad.NamedPitch,
),
),
abjad.AnnotatedTimespan(
start_offset=abjad.Offset(5, 3),
stop_offset=abjad.Offset(5, 1),
annotation=abjad.PitchSegment(
(
abjad.NamedPitch("fs'"),
abjad.NamedPitch("g'"),
),
item_class=abjad.NamedPitch,
),
),
abjad.AnnotatedTimespan(
start_offset=abjad.Offset(5, 1),
stop_offset=abjad.Offset(10, 1),
annotation=abjad.PitchSegment(
(
abjad.NamedPitch('b'),
),
item_class=abjad.NamedPitch,
),
),
]
)
'''
import consort
timespans = consort.TimespanCollection()
if not self.ratio or not self.pitch_segments:
pitch_segment = abjad.PitchSegment("c'")
annotated_timespan = abjad.AnnotatedTimespan(
annotation=pitch_segment,
start_offset=0,
stop_offset=stop_offset,
),
timespans.insert(annotated_timespan)
else:
target_timespan = abjad.Timespan(
start_offset=0,
stop_offset=stop_offset,
)
divided_timespans = target_timespan.divide_by_ratio(self.ratio)
for i, timespan in enumerate(divided_timespans):
pitch_segment = self._pitch_segments[i]
annotated_timespan = abjad.AnnotatedTimespan(
annotation=pitch_segment,
start_offset=timespan.start_offset,
stop_offset=timespan.stop_offset,
)
timespans.insert(annotated_timespan)
return timespans
def rotate(self, rotation):
r'''Rotates pitch specifier.
::
>>> pitch_specifier = consort.PitchSpecifier(
... pitch_segments=(
... "c' e' g'",
... "fs' gs'",
... "b",
... ),
... ratio=(1, 2, 3),
... )
>>> rotated_pitch_specifier = pitch_specifier.rotate(1)
>>> print(format(rotated_pitch_specifier))
consort.tools.PitchSpecifier(
pitch_segments=(
abjad.PitchSegment(
(
abjad.NamedPitch('b'),
),
item_class=abjad.NamedPitch,
),
abjad.PitchSegment(
(
abjad.NamedPitch("c'"),
abjad.NamedPitch('f'),
abjad.NamedPitch('a'),
),
item_class=abjad.NamedPitch,
),
abjad.PitchSegment(
(
abjad.NamedPitch("fs'"),
abjad.NamedPitch("e'"),
),
item_class=abjad.NamedPitch,
),
),
ratio=abjad.Ratio((3, 1, 2)),
)
Returns new pitch specifier.
'''
import consort
rotation = int(rotation)
pitch_segments = tuple(
_.rotate(rotation, stravinsky=True)
for _ in self.pitch_segments
)
pitch_segments = consort.rotate(pitch_segments, rotation)
ratio = consort.rotate(self.ratio, rotation)
return abjad.new(
self,
pitch_segments=pitch_segments,
ratio=ratio,
)
def transpose(self, expr=0):
r'''Transposes pitch specifier.
::
>>> pitch_specifier = consort.PitchSpecifier(
... pitch_segments=(
... "c' e' g'",
... "fs' gs'",
... "b",
... ),
... ratio=(1, 2, 3),
... )
>>> transposed_pitch_specifier = pitch_specifier.transpose('M2')
>>> print(format(transposed_pitch_specifier))
consort.tools.PitchSpecifier(
pitch_segments=(
abjad.PitchSegment(
(
abjad.NamedPitch("d'"),
abjad.NamedPitch("fs'"),
abjad.NamedPitch("a'"),
),
item_class=abjad.NamedPitch,
),
abjad.PitchSegment(
(
abjad.NamedPitch("gs'"),
abjad.NamedPitch("as'"),
),
item_class=abjad.NamedPitch,
),
abjad.PitchSegment(
(
abjad.NamedPitch("cs'"),
),
item_class=abjad.NamedPitch,
),
),
ratio=abjad.Ratio((1, 2, 3)),
)
Returns new pitch specifier.
'''
pitch_segments = (_.transpose(expr) for _ in self.pitch_segments)
return abjad.new(self, pitch_segments=pitch_segments)
### PUBLIC PROPERTIES ###
@property
def ratio(self):
return self._ratio
@property
def pitch_segments(self):
return self._pitch_segments
```
#### File: consort/tools/RatioPartsExpression.py
```python
import abjad
from abjad.tools import abctools
class RatioPartsExpression(abctools.AbjadObject):
r'''Ratio parts expression.
.. container:: example
::
>>> expression = consort.RatioPartsExpression(
... ratio=(1, 2, 1),
... parts=(0, 2),
... )
>>> print(format(expression))
consort.tools.RatioPartsExpression(
parts=(0, 2),
ratio=abjad.Ratio((1, 2, 1)),
)
::
>>> timespan = abjad.Timespan(
... start_offset=abjad.Duration(1, 2),
... stop_offset=abjad.Duration(3, 2),
... )
>>> for x in expression(timespan):
... x
...
Timespan(start_offset=Offset(1, 2), stop_offset=Offset(3, 4))
Timespan(start_offset=Offset(5, 4), stop_offset=Offset(3, 2))
.. container:: example
::
>>> expression = consort.RatioPartsExpression(
... ratio=(1, 2, 1),
... parts=(0, 2),
... mask_timespan=abjad.Timespan(
... start_offset=(1, 4),
... ),
... )
::
>>> timespan = abjad.Timespan(0, 4)
>>> for x in expression(timespan):
... x
...
Timespan(start_offset=Offset(1, 4), stop_offset=Offset(1, 1))
Timespan(start_offset=Offset(3, 1), stop_offset=Offset(4, 1))
'''
### CLASS VARIABLES ###
__slots__ = (
'_parts',
'_ratio',
'_mask_timespan',
)
### INITIALIZER ###
def __init__(
self,
parts=0,
ratio=(1, 1),
mask_timespan=None,
):
if not isinstance(ratio, abjad.Ratio):
ratio = abjad.Ratio(ratio)
self._ratio = ratio
if isinstance(parts, int):
parts = (parts,)
assert all(0 <= _ < len(ratio) for _ in parts)
parts = tuple(sorted(set(parts)))
self._parts = parts
if mask_timespan is not None:
assert isinstance(mask_timespan, abjad.Timespan)
self._mask_timespan = mask_timespan
### SPECIAL METHODS ###
def __call__(self, timespan):
assert isinstance(timespan, abjad.Timespan)
divided_timespan = timespan.divide_by_ratio(self.ratio)
timespans = abjad.TimespanList()
for part in self.parts:
timespans.append(divided_timespan[part])
if self.mask_timespan is not None:
timespans & self.mask_timespan
return timespans
### PUBLIC METHODS ###
@staticmethod
def from_sequence(sequence):
r'''Creates a ratio parts expression from `sequence`.
::
>>> ratio = [-1, 2, -1, 1, -1]
>>> expression = consort.RatioPartsExpression.from_sequence(ratio)
>>> print(format(expression))
consort.tools.RatioPartsExpression(
parts=(1, 3),
ratio=abjad.Ratio((1, 2, 1, 1, 1)),
)
Returns new ratio parts expression.
'''
assert all(sequence)
assert len(sequence)
ratio = []
parts = []
for i, x in enumerate(sequence):
if 0 < x:
parts.append(i)
ratio.append(abs(x))
result = RatioPartsExpression(
parts=parts,
ratio=ratio,
)
return result
### PUBLIC PROPERTIES ###
@property
def mask_timespan(self):
return self._mask_timespan
@property
def parts(self):
return self._parts
@property
def ratio(self):
return self._ratio
```
#### File: consort/tools/rotate.py
```python
def rotate(iterable, n=0):
n = n or 0
items = []
if len(iterable):
n = n % len(iterable)
for item in iterable[-n:len(iterable)] + iterable[:-n]:
items.append(item)
return type(iterable)(items)
```
#### File: consort/tools/StopTrillSpan.py
```python
import abjad
from abjad.tools import abctools
from abjad.tools import scoretools
from abjad.tools import systemtools
class StopTrillSpan(abctools.AbjadValueObject):
__slots__ = ()
def _get_lilypond_format_bundle(self, component):
import consort
parentage = abjad.inspect(component).get_parentage()
prototype = scoretools.GraceContainer
grace_container = None
for parent in parentage:
if isinstance(parent, prototype):
grace_container = parent
break
if grace_container is None:
return
prototype = consort.ConsortTrillSpanner
carrier = grace_container._carrier
spanners = abjad.inspect(carrier).get_spanners(prototype)
if not spanners:
return
bundle = systemtools.LilyPondFormatBundle()
bundle.right.spanner_stops.append(r'\stopTrillSpan')
return bundle
```
#### File: consort/tools/TimespanCollection.py
```python
from abjad.tools import abctools
from abjad.tools import systemtools
class TimespanCollection(abctools.AbjadObject):
r'''A mutable always-sorted collection of timespans.
::
>>> timespans = (
... abjad.Timespan(0, 3),
... abjad.Timespan(1, 3),
... abjad.Timespan(1, 2),
... abjad.Timespan(2, 5),
... abjad.Timespan(6, 9),
... )
>>> timespan_collection = consort.TimespanCollection(timespans)
'''
### CLASS VARIABLES ###
__slots__ = (
'_root_node',
)
### INITIALIZER ###
def __init__(
self,
timespans=None,
):
self._root_node = None
if timespans is not None and timespans:
self.insert(timespans)
### SPECIAL METHODS ###
def __contains__(self, timespan):
r'''Is true if this timespan collection contains `timespan`. Otherwise
false.
::
>>> timespans = (
... abjad.Timespan(0, 3),
... abjad.Timespan(1, 3),
... abjad.Timespan(1, 2),
... abjad.Timespan(2, 5),
... abjad.Timespan(6, 9),
... )
>>> timespan_collection = consort.TimespanCollection(timespans)
::
>>> timespans[0] in timespan_collection
True
::
>>> abjad.Timespan(-1, 100) in timespan_collection
False
Returns boolean.
'''
assert TimespanCollection._is_timespan(timespan)
candidates = self.find_timespans_starting_at(timespan.start_offset)
result = timespan in candidates
return result
def __getitem__(self, i):
r'''Gets timespan at index `i`.
::
>>> timespans = (
... abjad.Timespan(0, 3),
... abjad.Timespan(1, 3),
... abjad.Timespan(1, 2),
... abjad.Timespan(2, 5),
... abjad.Timespan(6, 9),
... )
>>> timespan_collection = consort.TimespanCollection(timespans)
::
>>> timespan_collection[-1]
Timespan(start_offset=Offset(6, 1), stop_offset=Offset(9, 1))
::
>>> for timespan in timespan_collection[:3]:
... timespan
...
Timespan(start_offset=Offset(0, 1), stop_offset=Offset(3, 1))
Timespan(start_offset=Offset(1, 1), stop_offset=Offset(2, 1))
Timespan(start_offset=Offset(1, 1), stop_offset=Offset(3, 1))
Returns timespan or timespans.
'''
def recurse_by_index(node, index):
if node.node_start_index <= index < node.node_stop_index:
return node.payload[index - node.node_start_index]
elif node.left_child and index < node.node_start_index:
return recurse_by_index(node.left_child, index)
elif node.right_child and node.node_stop_index <= index:
return recurse_by_index(node.right_child, index)
def recurse_by_slice(node, start, stop):
result = []
if node is None:
return result
if start < node.node_start_index and node.left_child:
result.extend(recurse_by_slice(node.left_child, start, stop))
if start < node.node_stop_index and node.node_start_index < stop:
node_start = start - node.node_start_index
if node_start < 0:
node_start = 0
node_stop = stop - node.node_start_index
result.extend(node.payload[node_start:node_stop])
if node.node_stop_index <= stop and node.right_child:
result.extend(recurse_by_slice(node.right_child, start, stop))
return result
if isinstance(i, int):
if self._root_node is None:
raise IndexError
if i < 0:
i = self._root_node.subtree_stop_index + i
if i < 0 or self._root_node.subtree_stop_index <= i:
raise IndexError
return recurse_by_index(self._root_node, i)
elif isinstance(i, slice):
if self._root_node is None:
return []
indices = i.indices(self._root_node.subtree_stop_index)
start, stop = indices[0], indices[1]
return recurse_by_slice(self._root_node, start, stop)
raise TypeError('Indices must be integers or slices, got {}'.format(i))
def __iter__(self):
r'''Iterates timespans in this timespan collection.
::
>>> timespans = (
... abjad.Timespan(0, 3),
... abjad.Timespan(1, 3),
... abjad.Timespan(1, 2),
... abjad.Timespan(2, 5),
... abjad.Timespan(6, 9),
... )
>>> timespan_collection = consort.TimespanCollection(timespans)
::
>>> for timespan in timespan_collection:
... timespan
...
Timespan(start_offset=Offset(0, 1), stop_offset=Offset(3, 1))
Timespan(start_offset=Offset(1, 1), stop_offset=Offset(2, 1))
Timespan(start_offset=Offset(1, 1), stop_offset=Offset(3, 1))
Timespan(start_offset=Offset(2, 1), stop_offset=Offset(5, 1))
Timespan(start_offset=Offset(6, 1), stop_offset=Offset(9, 1))
Returns generator.
'''
def recurse(node):
if node is not None:
if node.left_child is not None:
for timespan in recurse(node.left_child):
yield timespan
for timespan in node.payload:
yield timespan
if node.right_child is not None:
for timespan in recurse(node.right_child):
yield timespan
return recurse(self._root_node)
def __len__(self):
r'''Gets length of this timespan collection.
::
>>> timespans = (
... abjad.Timespan(0, 3),
... abjad.Timespan(1, 3),
... abjad.Timespan(1, 2),
... abjad.Timespan(2, 5),
... abjad.Timespan(6, 9),
... )
>>> timespan_collection = consort.TimespanCollection(timespans)
::
>>> len(timespan_collection)
5
Returns integer.
'''
if self._root_node is None:
return 0
return self._root_node.subtree_stop_index
def __setitem__(self, i, new):
r'''Sets timespans at index `i` to `new`.
::
>>> timespans = (
... abjad.Timespan(0, 3),
... abjad.Timespan(1, 3),
... abjad.Timespan(1, 2),
... abjad.Timespan(2, 5),
... abjad.Timespan(6, 9),
... )
>>> timespan_collection = consort.TimespanCollection(timespans)
::
>>> timespan_collection[:3] = [abjad.Timespan(100, 200)]
Returns none.
'''
if isinstance(i, (int, slice)):
old = self[i]
self.remove(old)
self.insert(new)
else:
message = 'Indices must be ints or slices, got {}'.format(i)
raise TypeError(message)
def __sub__(self, timespan):
r'''Delete material that intersects `timespan`:
::
>>> timespan_collection = consort.TimespanCollection([
... abjad.Timespan(0, 16),
... abjad.Timespan(5, 12),
... abjad.Timespan(-2, 8),
... ])
::
>>> timespan = abjad.Timespan(5, 10)
>>> result = timespan_collection - timespan
::
>>> print(format(timespan_collection))
consort.tools.TimespanCollection(
[
abjad.Timespan(
start_offset=abjad.Offset(-2, 1),
stop_offset=abjad.Offset(5, 1),
),
abjad.Timespan(
start_offset=abjad.Offset(0, 1),
stop_offset=abjad.Offset(5, 1),
),
abjad.Timespan(
start_offset=abjad.Offset(10, 1),
stop_offset=abjad.Offset(12, 1),
),
abjad.Timespan(
start_offset=abjad.Offset(10, 1),
stop_offset=abjad.Offset(16, 1),
),
]
)
Operates in place and returns timespan collection.
'''
intersecting_timespans = self.find_timespans_intersecting_timespan(
timespan)
self.remove(intersecting_timespans)
for intersecting_timespan in intersecting_timespans:
for x in (intersecting_timespan - timespan):
self.insert(x)
return self
### PRIVATE METHODS ###
def _insert_node(self, node, start_offset):
import consort
if node is None:
return consort.TimespanCollectionNode(start_offset)
if start_offset < node.start_offset:
node.left_child = self._insert_node(node.left_child, start_offset)
elif node.start_offset < start_offset:
node.right_child = self._insert_node(node.right_child, start_offset)
return self._rebalance(node)
def _insert_timespan(self, timespan):
self._root_node = self._insert_node(
self._root_node,
timespan.start_offset,
)
node = self._search(self._root_node, timespan.start_offset)
node.payload.append(timespan)
node.payload.sort(key=lambda x: x.stop_offset)
@staticmethod
def _is_timespan(expr):
if hasattr(expr, 'start_offset') and hasattr(expr, 'stop_offset'):
return True
return False
def _rebalance(self, node):
if node is not None:
if 1 < node.balance:
if 0 <= node.right_child.balance:
node = self._rotate_right_right(node)
else:
node = self._rotate_right_left(node)
elif node.balance < -1:
if node.left_child.balance <= 0:
node = self._rotate_left_left(node)
else:
node = self._rotate_left_right(node)
assert -1 <= node.balance <= 1
return node
def _remove_node(self, node, start_offset):
if node is not None:
if node.start_offset == start_offset:
if node.left_child and node.right_child:
next_node = node.right_child
while next_node.left_child:
next_node = next_node.left_child
node._start_offset = next_node._start_offset
node._payload = next_node._payload
node.right_child = self._remove_node(
node.right_child,
next_node.start_offset,
)
else:
node = node.left_child or node.right_child
elif start_offset < node.start_offset:
node.left_child = self._remove_node(
node.left_child,
start_offset,
)
elif node.start_offset < start_offset:
node.right_child = self._remove_node(
node.right_child,
start_offset,
)
return self._rebalance(node)
def _remove_timespan(self, timespan, old_start_offset=None):
start_offset = timespan.start_offset
if old_start_offset is not None:
start_offset = old_start_offset
node = self._search(self._root_node, start_offset)
if node is None:
return
if timespan in node.payload:
node.payload.remove(timespan)
if not node.payload:
self._root_node = self._remove_node(
self._root_node,
start_offset,
)
if isinstance(timespan, TimespanCollection):
timespan._parents.remove(self)
def _rotate_left_left(self, node):
next_node = node.left_child
node.left_child = next_node.right_child
next_node.right_child = node
return next_node
def _rotate_left_right(self, node):
node.left_child = self._rotate_right_right(node.left_child)
next_node = self._rotate_left_left(node)
return next_node
def _rotate_right_left(self, node):
node.right_child = self._rotate_left_left(node.right_child)
next_node = self._rotate_right_right(node)
return next_node
def _rotate_right_right(self, node):
next_node = node.right_child
node.right_child = next_node.left_child
next_node.left_child = node
return next_node
def _search(self, node, start_offset):
if node is not None:
if node.start_offset == start_offset:
return node
elif node.left_child and start_offset < node.start_offset:
return self._search(node.left_child, start_offset)
elif node.right_child and node.start_offset < start_offset:
return self._search(node.right_child, start_offset)
return None
def _update_indices(
self,
node,
):
def recurse(
node,
parent_stop_index=None,
):
if node is None:
return
if node.left_child is not None:
recurse(
node.left_child,
parent_stop_index=parent_stop_index,
)
node._node_start_index = node.left_child.subtree_stop_index
node._subtree_start_index = node.left_child.subtree_start_index
elif parent_stop_index is None:
node._node_start_index = 0
node._subtree_start_index = 0
else:
node._node_start_index = parent_stop_index
node._subtree_start_index = parent_stop_index
node._node_stop_index = node.node_start_index + len(node.payload)
node._subtree_stop_index = node.node_stop_index
if node.right_child is not None:
recurse(
node.right_child,
parent_stop_index=node.node_stop_index,
)
node._subtree_stop_index = node.right_child.subtree_stop_index
recurse(node)
def _update_offsets(
self,
node,
):
if node is None:
return
stop_offset_low = min(x.stop_offset for x in node.payload)
stop_offset_high = max(x.stop_offset for x in node.payload)
if node.left_child:
left_child = self._update_offsets(
node.left_child,
)
if left_child.stop_offset_low < stop_offset_low:
stop_offset_low = left_child.stop_offset_low
if stop_offset_high < left_child.stop_offset_high:
stop_offset_high = left_child.stop_offset_high
if node.right_child:
right_child = self._update_offsets(
node.right_child,
)
if right_child.stop_offset_low < stop_offset_low:
stop_offset_low = right_child.stop_offset_low
if stop_offset_high < right_child.stop_offset_high:
stop_offset_high = right_child.stop_offset_high
node._stop_offset_low = stop_offset_low
node._stop_offset_high = stop_offset_high
return node
def _get_format_specification(self):
values = []
timespans = [x for x in self]
if timespans:
values.append(timespans)
names = []
return systemtools.FormatSpecification(
client=self,
storage_format_args_values=values,
storage_format_kwargs_names=names,
)
### PUBLIC METHODS ###
def find_timespans_starting_at(self, offset):
results = []
node = self._search(self._root_node, offset)
if node is not None:
results.extend(node.payload)
return tuple(results)
def find_timespans_stopping_at(self, offset):
def recurse(node, offset):
result = []
if node is not None:
if node.stop_offset_low <= offset <= node.stop_offset_high:
for timespan in node.payload:
if timespan.stop_offset == offset:
result.append(timespan)
if node.left_child is not None:
result.extend(recurse(node.left_child, offset))
if node.right_child is not None:
result.extend(recurse(node.right_child, offset))
return result
results = recurse(self._root_node, offset)
results.sort(key=lambda x: (x.start_offset, x.stop_offset))
return tuple(results)
def find_timespans_overlapping_offset(self, offset):
r'''Finds timespans overlapping `offset`.
::
>>> timespans = (
... abjad.Timespan(0, 3),
... abjad.Timespan(1, 3),
... abjad.Timespan(1, 2),
... abjad.Timespan(2, 5),
... abjad.Timespan(6, 9),
... )
>>> timespan_collection = consort.TimespanCollection(timespans)
::
>>> for x in timespan_collection.find_timespans_overlapping_offset(1.5):
... x
...
Timespan(start_offset=Offset(0, 1), stop_offset=Offset(3, 1))
Timespan(start_offset=Offset(1, 1), stop_offset=Offset(2, 1))
Timespan(start_offset=Offset(1, 1), stop_offset=Offset(3, 1))
Returns tuple of 0 or more timespans.
'''
def recurse(node, offset, indent=0):
result = []
if node is not None:
if node.start_offset < offset < node.stop_offset_high:
result.extend(recurse(node.left_child, offset, indent + 1))
for timespan in node.payload:
if offset < timespan.stop_offset:
result.append(timespan)
result.extend(recurse(node.right_child, offset, indent + 1))
elif offset <= node.start_offset:
result.extend(recurse(node.left_child, offset, indent + 1))
return result
results = recurse(self._root_node, offset)
results.sort(key=lambda x: (x.start_offset, x.stop_offset))
return tuple(results)
def find_timespans_intersecting_timespan(self, timespan):
r'''Finds timespans overlapping `timespan`.
::
>>> timespans = (
... abjad.Timespan(0, 3),
... abjad.Timespan(1, 3),
... abjad.Timespan(1, 2),
... abjad.Timespan(2, 5),
... abjad.Timespan(6, 9),
... )
>>> timespan_collection = consort.TimespanCollection(timespans)
::
>>> timespan = abjad.Timespan(2, 4)
>>> for x in timespan_collection.find_timespans_intersecting_timespan(timespan):
... x
...
Timespan(start_offset=Offset(0, 1), stop_offset=Offset(3, 1))
Timespan(start_offset=Offset(1, 1), stop_offset=Offset(3, 1))
Timespan(start_offset=Offset(2, 1), stop_offset=Offset(5, 1))
Returns tuple of 0 or more timespans.
'''
def recurse(node, timespan):
result = []
if node is not None:
if timespan.intersects_timespan(node):
result.extend(recurse(node.left_child, timespan))
for candidate_timespan in node.payload:
if candidate_timespan.intersects_timespan(timespan):
result.append(candidate_timespan)
result.extend(recurse(node.right_child, timespan))
elif (timespan.start_offset <= node.start_offset) or \
(timespan.stop_offset <= node.start_offset):
result.extend(recurse(node.left_child, timespan))
return result
results = recurse(self._root_node, timespan)
results.sort(key=lambda x: (x.start_offset, x.stop_offset))
return tuple(results)
def get_simultaneity_at(self, offset):
r'''Gets simultaneity at `offset`.
::
>>> timespans = (
... abjad.Timespan(0, 3),
... abjad.Timespan(1, 3),
... abjad.Timespan(1, 2),
... abjad.Timespan(2, 5),
... abjad.Timespan(6, 9),
... )
>>> timespan_collection = consort.TimespanCollection(timespans)
::
>>> timespan_collection.get_simultaneity_at(1)
<TimespanSimultaneity(1 <<3>>)>
::
>>> timespan_collection.get_simultaneity_at(6.5)
<TimespanSimultaneity(6.5 <<1>>)>
'''
import consort
start_timespans = self.find_timespans_starting_at(offset)
stop_timespans = self.find_timespans_stopping_at(offset)
overlap_timespans = self.find_timespans_overlapping_offset(offset)
simultaneity = consort.TimespanSimultaneity(
timespan_collection=self,
overlap_timespans=overlap_timespans,
start_timespans=start_timespans,
start_offset=offset,
stop_timespans=stop_timespans,
)
return simultaneity
def get_start_offset_after(self, offset):
r'''Gets start offst in this timespan collection after `offset`.
::
>>> timespans = (
... abjad.Timespan(0, 3),
... abjad.Timespan(1, 3),
... abjad.Timespan(1, 2),
... abjad.Timespan(2, 5),
... abjad.Timespan(6, 9),
... )
>>> timespan_collection = consort.TimespanCollection(timespans)
::
>>> timespan_collection.get_start_offset_after(-1)
Offset(0, 1)
::
>>> timespan_collection.get_start_offset_after(0)
Offset(1, 1)
::
>>> timespan_collection.get_start_offset_after(1)
Offset(2, 1)
::
>>> timespan_collection.get_start_offset_after(2)
Offset(6, 1)
::
>>> timespan_collection.get_start_offset_after(6) is None
True
'''
def recurse(node, offset):
if node is None:
return None
result = None
if node.start_offset <= offset and node.right_child:
result = recurse(node.right_child, offset)
elif offset < node.start_offset:
result = recurse(node.left_child, offset) or node
return result
result = recurse(self._root_node, offset)
if result is None:
return None
return result.start_offset
def get_start_offset_before(self, offset):
r'''Gets start offst in this timespan collection before `offset`.
::
>>> timespans = (
... abjad.Timespan(0, 3),
... abjad.Timespan(1, 3),
... abjad.Timespan(1, 2),
... abjad.Timespan(2, 5),
... abjad.Timespan(6, 9),
... )
>>> timespan_collection = consort.TimespanCollection(timespans)
::
>>> timespan_collection.get_start_offset_before(7)
Offset(6, 1)
::
>>> timespan_collection.get_start_offset_before(6)
Offset(2, 1)
::
>>> timespan_collection.get_start_offset_before(2)
Offset(1, 1)
::
>>> timespan_collection.get_start_offset_before(1)
Offset(0, 1)
::
>>> timespan_collection.get_start_offset_before(0) is None
True
'''
def recurse(node, offset):
if node is None:
return None
result = None
if node.start_offset < offset:
result = recurse(node.right_child, offset) or node
elif offset <= node.start_offset and node.left_child:
result = recurse(node.left_child, offset)
return result
result = recurse(self._root_node, offset)
if result is None:
return None
return result.start_offset
def index(self, timespan):
assert self._is_timespan(timespan)
node = self._search(self._root_node, timespan.start_offset)
if node is None or timespan not in node.payload:
raise ValueError('{} not in timespan collection.'.format(timespan))
index = node.payload.index(timespan) + node.node_start_index
return index
def insert(self, timespans):
r'''Inserts `timespans` into this timespan collection.
::
>>> timespan_collection = consort.TimespanCollection()
>>> timespan_collection.insert(abjad.Timespan(1, 3))
>>> timespan_collection.insert((
... abjad.Timespan(0, 4),
... abjad.Timespan(2, 6),
... ))
::
>>> for x in timespan_collection:
... x
...
Timespan(start_offset=Offset(0, 1), stop_offset=Offset(4, 1))
Timespan(start_offset=Offset(1, 1), stop_offset=Offset(3, 1))
Timespan(start_offset=Offset(2, 1), stop_offset=Offset(6, 1))
`timespans` may be a single timespan or an iterable of timespans.
Returns none.
'''
if self._is_timespan(timespans):
timespans = [timespans]
for timespan in timespans:
if not self._is_timespan(timespan):
continue
self._insert_timespan(timespan)
self._update_indices(self._root_node)
self._update_offsets(self._root_node)
def iterate_simultaneities(
self,
reverse=False,
):
r'''Iterates simultaneities in this timespan collection.
::
>>> timespans = (
... abjad.Timespan(0, 3),
... abjad.Timespan(1, 3),
... abjad.Timespan(1, 2),
... abjad.Timespan(2, 5),
... abjad.Timespan(6, 9),
... )
>>> timespan_collection = consort.TimespanCollection(timespans)
::
>>> for x in timespan_collection.iterate_simultaneities():
... x
...
<TimespanSimultaneity(0 <<1>>)>
<TimespanSimultaneity(1 <<3>>)>
<TimespanSimultaneity(2 <<3>>)>
<TimespanSimultaneity(6 <<1>>)>
::
>>> for x in timespan_collection.iterate_simultaneities(
... reverse=True):
... x
...
<TimespanSimultaneity(6 <<1>>)>
<TimespanSimultaneity(2 <<3>>)>
<TimespanSimultaneity(1 <<3>>)>
<TimespanSimultaneity(0 <<1>>)>
Returns generator.
'''
if reverse:
start_offset = self.latest_start_offset
simultaneity = self.get_simultaneity_at(start_offset)
yield simultaneity
simultaneity = simultaneity.previous_simultaneity
while simultaneity is not None:
yield simultaneity
simultaneity = simultaneity.previous_simultaneity
else:
start_offset = self.earliest_start_offset
simultaneity = self.get_simultaneity_at(start_offset)
yield simultaneity
simultaneity = simultaneity.next_simultaneity
while simultaneity is not None:
yield simultaneity
simultaneity = simultaneity.next_simultaneity
def iterate_simultaneities_nwise(
self,
n=3,
reverse=False,
):
r'''Iterates simultaneities in this timespan collection in groups of
`n`.
::
>>> timespans = (
... abjad.Timespan(0, 3),
... abjad.Timespan(1, 3),
... abjad.Timespan(1, 2),
... abjad.Timespan(2, 5),
... abjad.Timespan(6, 9),
... )
>>> timespan_collection = consort.TimespanCollection(timespans)
::
>>> for x in timespan_collection.iterate_simultaneities_nwise(n=2):
... x
...
(<TimespanSimultaneity(0 <<1>>)>, <TimespanSimultaneity(1 <<3>>)>)
(<TimespanSimultaneity(1 <<3>>)>, <TimespanSimultaneity(2 <<3>>)>)
(<TimespanSimultaneity(2 <<3>>)>, <TimespanSimultaneity(6 <<1>>)>)
::
>>> for x in timespan_collection.iterate_simultaneities_nwise(
... n=2, reverse=True):
... x
...
(<TimespanSimultaneity(2 <<3>>)>, <TimespanSimultaneity(6 <<1>>)>)
(<TimespanSimultaneity(1 <<3>>)>, <TimespanSimultaneity(2 <<3>>)>)
(<TimespanSimultaneity(0 <<1>>)>, <TimespanSimultaneity(1 <<3>>)>)
Returns generator.
'''
n = int(n)
assert 0 < n
if reverse:
for simultaneity in self.iterate_simultaneities(reverse=True):
simultaneities = [simultaneity]
while len(simultaneities) < n:
next_simultaneity = simultaneities[-1].next_simultaneity
if next_simultaneity is None:
break
simultaneities.append(next_simultaneity)
if len(simultaneities) == n:
yield tuple(simultaneities)
else:
for simultaneity in self.iterate_simultaneities():
simultaneities = [simultaneity]
while len(simultaneities) < n:
previous_simultaneity = simultaneities[-1].previous_simultaneity
if previous_simultaneity is None:
break
simultaneities.append(previous_simultaneity)
if len(simultaneities) == n:
yield tuple(reversed(simultaneities))
def remove(self, timespans):
r'''Removes timespans from this timespan collection.
::
>>> timespans = (
... abjad.Timespan(0, 3),
... abjad.Timespan(1, 3),
... abjad.Timespan(1, 2),
... abjad.Timespan(2, 5),
... abjad.Timespan(6, 9),
... )
>>> timespan_collection = consort.TimespanCollection(timespans)
::
>>> timespan_collection.remove(timespans[1:-1])
::
>>> for timespan in timespan_collection:
... timespan
...
Timespan(start_offset=Offset(0, 1), stop_offset=Offset(3, 1))
Timespan(start_offset=Offset(6, 1), stop_offset=Offset(9, 1))
'''
if self._is_timespan(timespans):
timespans = [timespans]
for timespan in timespans:
if not self._is_timespan(timespan):
continue
self._remove_timespan(timespan)
self._update_indices(self._root_node)
self._update_offsets(self._root_node)
### PUBLIC PROPERTIES ###
@property
def all_offsets(self):
offsets = set()
for timespan in self:
offsets.add(timespan.start_offset)
offsets.add(timespan.stop_offset)
return tuple(sorted(offsets))
@property
def all_start_offsets(self):
start_offsets = set()
for timespan in self:
start_offsets.add(timespan.start_offset)
return tuple(sorted(start_offsets))
@property
def all_stop_offsets(self):
stop_offsets = set()
for timespan in self:
stop_offsets.add(timespan.stop_offset)
return tuple(sorted(stop_offsets))
@property
def earliest_start_offset(self):
def recurse(node):
if node.left_child is not None:
return recurse(node.left_child)
return node.start_offset
if self._root_node is not None:
return recurse(self._root_node)
return float('-inf')
@property
def earliest_stop_offset(self):
if self._root_node is not None:
return self._root_node.stop_offset_low
return float('inf')
@property
def latest_start_offset(self):
def recurse(node):
if node.right_child is not None:
return recurse(node._right_child)
return node.start_offset
if self._root_node is not None:
return recurse(self._root_node)
return float('-inf')
@property
def latest_stop_offset(self):
if self._root_node is not None:
return self._root_node.stop_offset_high
return float('inf')
@property
def start_offset(self):
return self.earliest_start_offset
@property
def stop_offset(self):
return self.latest_stop_offset
```
#### File: consort/tools/TimespanInventoryMapping.py
```python
import abjad
class TimespanListMapping(dict):
### SPECIAL METHODS ###
def __illustrate__(self, range_=None, scale=None):
timespan_inventory = abjad.TimespanList()
for key, value in self.items():
timespan_inventory.extend(value)
return timespan_inventory.__illustrate__(
key='voice_name',
range_=range_,
scale=scale,
)
```
#### File: consort/tools/TimespanSpecifier.py
```python
import abjad
from abjad.tools import abctools
class TimespanSpecifier(abctools.AbjadValueObject):
### CLASS VARIABLES ###
__slots__ = (
'_forbid_fusing',
'_forbid_splitting',
'_minimum_duration',
)
### INITIALIZER ###
def __init__(
self,
forbid_fusing=None,
forbid_splitting=None,
minimum_duration=None,
):
if forbid_fusing is not None:
forbid_fusing = bool(forbid_fusing)
self._forbid_fusing = forbid_fusing
if forbid_splitting is not None:
forbid_splitting = bool(forbid_splitting)
self._forbid_splitting = forbid_splitting
if minimum_duration is not None:
minimum_duration = abjad.Duration(minimum_duration)
self._minimum_duration = minimum_duration
### PUBLIC PROPERTIES ###
@property
def forbid_fusing(self):
return self._forbid_fusing
@property
def forbid_splitting(self):
return self._forbid_splitting
@property
def minimum_duration(self):
return self._minimum_duration
```
#### File: consort/test/test_consort___copy__.py
```python
import abjad
import copy
import inspect
import pytest
from abjad.tools import documentationtools
from abjad.tools import scoretools
classes = documentationtools.list_all_classes('consort')
@pytest.mark.parametrize('class_', classes)
def test_consort___copy___01(class_):
r'''All concrete classes with a storage format can copy.
'''
if ('_storage_format_specification' not in dir(class_) or
'_get_format_specification' not in dir(class_)):
return
if inspect.isabstract(class_):
return
instance_one = class_()
instance_two = copy.copy(instance_one)
instance_one_format = format(instance_one, 'storage')
instance_two_format = format(instance_two, 'storage')
if not issubclass(class_, abjad.Container):
assert instance_one_format == instance_two_format
# TODO: eventually this second asset should also pass
#assert instance_one == instance_two
```
#### File: consort/test/test_SegmentMaker_rehearsal_mark.py
```python
import abjad
import collections
import consort
from abjad.tools import systemtools
from abjad.tools import templatetools
segment_metadata = collections.OrderedDict(
segment_count=3,
segment_number=2,
)
def test_SegmentMaker_rehearsal_mark_01():
segment_maker = consort.SegmentMaker(
discard_final_silence=True,
desired_duration_in_seconds=4,
omit_stylesheets=True,
score_template=templatetools.GroupedRhythmicStavesScoreTemplate(
staff_count=1,
),
settings=None,
tempo=abjad.MetronomeMark((1, 4), 60),
permitted_time_signatures=((4, 4),),
)
lilypond_file, metadata = segment_maker(
segment_metadata=segment_metadata,
)
assert format(lilypond_file) == abjad.String.normalize(
r'''
\version "2.19.65"
\language "english"
#(ly:set-option 'relative-includes #t)
\score {
\context Score = "Grouped Rhythmic Staves Score" <<
\tag #'time
\context TimeSignatureContext = "Time Signature Context" {
{
\tempo 4=60
\time 4/4
\mark \markup {
\box
\pad-around
#0.5
\caps
A
}
s1 * 1
}
}
\context StaffGroup = "Grouped Rhythmic Staves Staff Group" <<
\context RhythmicStaff = "Staff 1" {
\bar "||"
\context Voice = "Voice 1" {
{
% [Voice 1] Measure 1
{
\stopStaff
\once \override Staff.StaffSymbol.line-positions = #'(0)
\startStaff
R1 * 1
\stopStaff
\startStaff
}
}
}
}
>>
>>
}
''')
def test_SegmentMaker_rehearsal_mark_02():
segment_maker = consort.SegmentMaker(
discard_final_silence=True,
desired_duration_in_seconds=4,
name='A transitional segment',
omit_stylesheets=True,
score_template=templatetools.GroupedRhythmicStavesScoreTemplate(
staff_count=1,
),
settings=None,
tempo=abjad.MetronomeMark((1, 4), 60),
permitted_time_signatures=((4, 4),),
)
lilypond_file, metadata = segment_maker(
segment_metadata=segment_metadata,
)
assert format(lilypond_file) == abjad.String.normalize(
r'''
\version "2.19.65"
\language "english"
#(ly:set-option 'relative-includes #t)
\score {
\context Score = "Grouped Rhythmic Staves Score" <<
\tag #'time
\context TimeSignatureContext = "Time Signature Context" {
{
\tempo 4=60
\time 4/4
\mark \markup {
\concat
{
\box
\pad-around
#0.5
\caps
A
" "
\fontsize
#-3
"A transitional segment"
}
}
s1 * 1
}
}
\context StaffGroup = "Grouped Rhythmic Staves Staff Group" <<
\context RhythmicStaff = "Staff 1" {
\bar "||"
\context Voice = "Voice 1" {
{
% [Voice 1] Measure 1
{
\stopStaff
\once \override Staff.StaffSymbol.line-positions = #'(0)
\startStaff
R1 * 1
\stopStaff
\startStaff
}
}
}
}
>>
>>
}
''')
``` |
{
"source": "josiah-wolf-oberholtzer/discograph",
"score": 2
} |
#### File: discograph/discograph/app.py
```python
import os
import traceback
from flask import Flask
from flask import g
from flask import jsonify
from flask import make_response
from flask import render_template
from flask import request
from flask.ext.compress import Compress
from flask.ext.mobility import Mobility
from werkzeug.contrib.cache import FileSystemCache
from werkzeug.contrib.cache import RedisCache
from werkzeug.contrib.fixers import ProxyFix
from discograph import api
from discograph import ui
from discograph import exceptions
app = Flask(__name__)
app.config.from_object('discograph.config.DevelopmentConfiguration')
app.config.from_object('discograph.locals')
app.fcache = FileSystemCache(
app.config['FILE_CACHE_PATH'],
default_timeout=app.config['FILE_CACHE_TIMEOUT'],
threshold=app.config['FILE_CACHE_THRESHOLD'],
)
if not os.path.exists(app.config['FILE_CACHE_PATH']):
os.makedirs(app.config['FILE_CACHE_PATH'])
app.rcache = RedisCache()
app.register_blueprint(api.blueprint, url_prefix='/api')
app.register_blueprint(ui.blueprint)
app.wsgi_app = ProxyFix(app.wsgi_app)
Mobility(app)
Compress(app)
@app.after_request
def inject_rate_limit_headers(response):
try:
requests, remaining, reset = map(int, g.view_limits)
except (AttributeError, ValueError):
return response
else:
h = response.headers
h.add('X-RateLimit-Remaining', remaining)
h.add('X-RateLimit-Limit', requests)
h.add('X-RateLimit-Reset', reset)
return response
@app.errorhandler(Exception)
def handle_error(error):
if app.debug:
traceback.print_exc()
status_code = getattr(error, 'status_code', 400)
if request.endpoint.startswith('api'):
response = jsonify({
'success': False,
'status': status_code,
'message': getattr(error, 'message', 'Error')
})
else:
rendered_template = render_template('error.html', error=error)
response = make_response(rendered_template)
response.status_code = status_code
return response
@app.errorhandler(404)
def handle_error_404(error):
status_code = 404
error = exceptions.APIError(
message='Not Found',
status_code=status_code,
)
rendered_template = render_template('error.html', error=error)
response = make_response(rendered_template)
response.status_code = status_code
return response
@app.errorhandler(500)
def handle_error_500(error):
status_code = 500
error = exceptions.APIError(
message='Something Broke',
status_code=status_code,
)
rendered_template = render_template('error.html', error=error)
response = make_response(rendered_template)
response.status_code = status_code
return response
if __name__ == '__main__':
app.run(debug=True)
```
#### File: discograph/library/PostgresRelation.py
```python
import itertools
import peewee
import random
import multiprocessing
import re
import traceback
from abjad.tools import datastructuretools
from discograph.library.PostgresModel import PostgresModel
from playhouse import postgres_ext
class PostgresRelation(PostgresModel):
### CLASS VARIABLES ###
class EntityType(datastructuretools.Enumeration):
ARTIST = 1
LABEL = 2
class BootstrapWorker(multiprocessing.Process):
corpus = {}
def __init__(self, indices):
multiprocessing.Process.__init__(self)
self.indices = indices
def run(self):
proc_name = self.name
for release_id in self.indices:
try:
PostgresRelation.bootstrap_pass_one_inner(
release_id,
self.corpus,
annotation=proc_name,
)
except:
traceback.print_exc()
aggregate_roles = (
'Compiled By',
'Curated By',
'DJ Mix',
'Hosted By',
'Presenter',
)
word_pattern = re.compile('\s+')
### PEEWEE FIELDS ###
entity_one_type = peewee.IntegerField(index=False)
entity_one_id = peewee.IntegerField(index=False)
entity_two_type = peewee.IntegerField(index=False)
entity_two_id = peewee.IntegerField(index=False)
role = peewee.CharField(index=False)
releases = postgres_ext.BinaryJSONField(null=True, index=False)
### PEEWEE META ###
class Meta:
db_table = 'relations'
primary_key = peewee.CompositeKey(
'entity_one_type',
'entity_one_id',
'entity_two_type',
'entity_two_id',
'role',
)
indexes = (
((
'entity_one_type', 'entity_one_id',
'entity_two_type', 'entity_two_id',
'role'), True),
((
'entity_two_type', 'entity_two_id',
'entity_one_type', 'entity_one_id',
'role'), True),
)
### PRIVATE METHODS ###
@classmethod
def _as_artist_credits(cls, companies):
artists = []
for company in companies:
artist = {
'name': company['name'],
'id': company['id'],
'roles': [{'name': company['entity_type_name']}],
}
artists.append(artist)
return artists
### PUBLIC METHODS ###
def as_json(self):
data = {
'key': self.link_key,
'role': self.role,
'source': self.json_entity_one_key,
'target': self.json_entity_two_key,
}
if hasattr(self, 'distance'):
data['distance'] = self.distance
if hasattr(self, 'pages'):
data['pages'] = tuple(sorted(self.pages))
return data
@classmethod
def bootstrap(cls):
cls.drop_table(True)
cls.create_table()
cls.bootstrap_pass_one()
@classmethod
def bootstrap_pass_one(cls, pessimistic=False):
import discograph
indices = discograph.PostgresRelease.get_indices(pessimistic)
workers = [cls.BootstrapWorker(_) for _ in indices]
for worker in workers:
worker.start()
for worker in workers:
worker.join()
for worker in workers:
worker.terminate()
@classmethod
def bootstrap_pass_one_inner(cls, release_id, corpus, annotation=''):
import discograph
database = cls._meta.database
with database.execution_context(with_transaction=False):
release_cls = discograph.PostgresRelease
query = release_cls.select().where(release_cls.id == release_id)
if not query.count():
return
document = query.get()
relations = cls.from_release(document)
print('{} (Pass 1) [{}]\t(id:{})\t[{}] {}'.format(
cls.__name__.upper(),
annotation,
document.id,
len(relations),
document.title,
))
for relation in relations:
instance, created = cls.create_or_get(
entity_one_type=relation['entity_one_type'],
entity_one_id=relation['entity_one_id'],
entity_two_type=relation['entity_two_type'],
entity_two_id=relation['entity_two_id'],
role=relation['role'],
)
if created:
instance.releases = {}
instance.random = random.random()
if 'release_id' in relation:
release_id = relation['release_id']
year = relation.get('year')
if not instance.releases:
instance.releases = {}
instance.releases[release_id] = year
instance.save()
@classmethod
def from_release(cls, release):
import discograph
triples = set()
artists, labels, is_compilation = cls.get_release_setup(release)
triples.update(cls.get_artist_label_relations(
artists,
labels,
is_compilation,
))
aggregate_roles = {}
if is_compilation:
iterator = itertools.product(labels, release.extra_artists)
else:
iterator = itertools.product(artists, release.extra_artists)
for entity_two, credit in iterator:
for role in credit['roles']:
role = role['name']
if role not in discograph.CreditRole.all_credit_roles:
continue
elif role in cls.aggregate_roles:
if role not in aggregate_roles:
aggregate_roles[role] = []
aggregate_credit = (cls.EntityType.ARTIST, credit['id'])
aggregate_roles[role].append(aggregate_credit)
continue
entity_one = (cls.EntityType.ARTIST, credit['id'])
triples.add((entity_one, role, entity_two))
if is_compilation:
iterator = itertools.product(labels, release.companies)
else:
iterator = itertools.product(artists, release.companies)
for entity_one, company in iterator:
role = company['entity_type_name']
if role not in discograph.CreditRole.all_credit_roles:
continue
entity_two = (cls.EntityType.LABEL, company['id'])
triples.add((entity_one, role, entity_two))
all_track_artists = set()
for track in release.tracklist:
track_artists = set(
(cls.EntityType.ARTIST, _['id'])
for _ in track.get('artists', ())
)
all_track_artists.update(track_artists)
if not track.get('extra_artists'):
continue
track_artists = track_artists or artists or labels
iterator = itertools.product(track_artists, track['extra_artists'])
for entity_two, credit in iterator:
for role in credit.get('roles', ()):
role = role['name']
if role not in discograph.CreditRole.all_credit_roles:
continue
entity_one = (cls.EntityType.ARTIST, credit['id'])
triples.add((entity_one, role, entity_two))
for role, aggregate_artists in aggregate_roles.items():
iterator = itertools.product(all_track_artists, aggregate_artists)
for track_artist, aggregate_artist in iterator:
entity_one = aggregate_artist
entity_two = track_artist
triples.add((entity_one, role, entity_two))
triples = sorted(triples)
relations = cls.from_triples(triples, release=release)
return relations
@classmethod
def get_artist_label_relations(cls, artists, labels, is_compilation):
triples = set()
iterator = itertools.product(artists, labels)
if is_compilation:
role = 'Compiled On'
else:
role = 'Released On'
for artist, label in iterator:
triples.add((artist, role, label))
return triples
@classmethod
def get_random(cls, roles=None):
n = random.random()
where_clause = (cls.random > n)
if roles:
where_clause &= (cls.role.in_(roles))
query = cls.select().where(where_clause).order_by(
cls.random, cls.role).limit(1)
print('Query:', query)
while not query.count():
n = random.random()
where_clause = (cls.random > n)
if roles:
where_clause &= (cls.role.in_(roles))
query = cls.select()
query = query.where(where_clause)
query = query.order_by(cls.random)
query = query.limit(1)
print('Query:', query)
return query.get()
@classmethod
def get_release_setup(cls, release):
is_compilation = False
artists = set(
(cls.EntityType.ARTIST, _['id'])
for _ in release.artists
)
labels = set(
(cls.EntityType.LABEL, _.get('id'))
for _ in release.labels
if _.get('id')
)
if len(artists) == 1 and release.artists[0]['name'] == 'Various':
is_compilation = True
artists.clear()
for track in release.tracklist:
artists.update(
(cls.EntityType.ARTIST, _['id'])
for _ in track.get('artists', ())
)
#for format_ in release.formats:
# for description in format_.get('descriptions', ()):
# if description == 'Compilation':
# is_compilation = True
# break
return artists, labels, is_compilation
@classmethod
def from_triples(cls, triples, release=None):
relations = []
for entity_one, role, entity_two in triples:
entity_one_type, entity_one_id = entity_one
entity_two_type, entity_two_id = entity_two
relation = dict(
entity_one_id=entity_one_id,
entity_one_type=entity_one_type,
entity_two_id=entity_two_id,
entity_two_type=entity_two_type,
role=role,
)
if release is not None:
relation['release_id'] = release.id
if release.release_date is not None:
relation['year'] = release.release_date.year
relations.append(relation)
return relations
@classmethod
def search(
cls,
entity_id,
entity_type=1,
roles=None,
year=None,
query_only=False,
):
where_clause = (
(cls.entity_one_id == entity_id) &
(cls.entity_one_type == entity_type)
)
where_clause |= (
(cls.entity_two_id == entity_id) &
(cls.entity_two_type == entity_type)
)
if roles:
where_clause &= (cls.role.in_(roles))
if year is not None:
year_clause = cls.year.is_null(True)
if isinstance(year, int):
year_clause |= cls.year == year
else:
year_clause |= cls.year.between(year[0], year[1])
where_clause &= year_clause
query = cls.select().where(where_clause)
if query_only:
return query
return list(query)
@classmethod
def search_multi(cls, entity_keys, roles=None):
assert entity_keys
artist_ids, label_ids = [], []
for entity_type, entity_id in entity_keys:
if entity_type == 1:
artist_ids.append(entity_id)
elif entity_type == 2:
label_ids.append(entity_id)
if artist_ids:
artist_where_clause = (
((cls.entity_one_type == 1) &
(cls.entity_one_id.in_(artist_ids))) |
((cls.entity_two_type == 1) &
(cls.entity_two_id.in_(artist_ids)))
)
if label_ids:
label_where_clause = (
((cls.entity_one_type == 2) &
(cls.entity_one_id.in_(label_ids))) |
((cls.entity_two_type == 2) &
(cls.entity_two_id.in_(label_ids)))
)
if artist_ids and label_ids:
where_clause = artist_where_clause | label_where_clause
elif artist_ids:
where_clause = artist_where_clause
elif label_ids:
where_clause = label_where_clause
if roles:
where_clause &= (cls.role.in_(roles))
query = cls.select().where(where_clause)
relations = {}
for relation in query:
relations[relation.link_key] = relation
return relations
@classmethod
def search_bimulti(
cls,
lh_entities,
rh_entities,
roles=None,
year=None,
verbose=True,
):
def build_query(lh_type, lh_ids, rh_type, rh_ids):
where_clause = cls.entity_one_type == lh_type
where_clause &= cls.entity_two_type == rh_type
where_clause &= cls.entity_one_id.in_(lh_ids)
where_clause &= cls.entity_two_id.in_(rh_ids)
if roles:
where_clause &= cls.role.in_(roles)
if year is not None:
year_clause = cls.year.is_null(True)
if isinstance(year, int):
year_clause |= cls.year == year
else:
year_clause |= cls.year.between(year[0], year[1])
where_clause &= year_clause
query = cls.select().where(where_clause)
return query
lh_artist_ids = []
lh_label_ids = []
rh_artist_ids = []
rh_label_ids = []
for entity_type, entity_id in lh_entities:
if entity_type == 1:
lh_artist_ids.append(entity_id)
else:
lh_label_ids.append(entity_id)
for entity_type, entity_id in rh_entities:
if entity_type == 1:
rh_artist_ids.append(entity_id)
else:
rh_label_ids.append(entity_id)
queries = []
if lh_artist_ids:
lh_type, lh_ids = 1, lh_artist_ids
if rh_artist_ids:
rh_type, rh_ids = 1, rh_artist_ids
query = build_query(lh_type, lh_ids, rh_type, rh_ids)
queries.append(query)
if rh_label_ids:
rh_type, rh_ids = 2, rh_label_ids
query = build_query(lh_type, lh_ids, rh_type, rh_ids)
queries.append(query)
if lh_label_ids:
lh_type, lh_ids = 2, lh_label_ids
if rh_artist_ids:
rh_type, rh_ids = 1, rh_artist_ids
query = build_query(lh_type, lh_ids, rh_type, rh_ids)
queries.append(query)
if rh_label_ids:
rh_type, rh_ids = 2, rh_label_ids
query = build_query(lh_type, lh_ids, rh_type, rh_ids)
queries.append(query)
relations = []
for query in queries:
#print(query)
relations.extend(query)
relations = {
relation.link_key: relation
for relation in relations
}
return relations
### PUBLIC PROPERTIES ###
@property
def entity_one_key(self):
return (self.entity_one_type, self.entity_one_id)
@property
def entity_two_key(self):
return (self.entity_two_type, self.entity_two_id)
@property
def json_entity_one_key(self):
if self.entity_one_type == 1:
return 'artist-{}'.format(self.entity_one_id)
elif self.entity_one_type == 2:
return 'label-{}'.format(self.entity_one_id)
raise ValueError(self.entity_one_key)
@property
def json_entity_two_key(self):
if self.entity_two_type == 1:
return 'artist-{}'.format(self.entity_two_id)
elif self.entity_two_type == 2:
return 'label-{}'.format(self.entity_two_id)
raise ValueError(self.entity_two_key)
@property
def link_key(self):
source = self.json_entity_one_key
target = self.json_entity_two_key
role = self.word_pattern.sub('-', self.role).lower()
pieces = [
source,
role,
target,
]
return '-'.join(str(_) for _ in pieces)
```
#### File: library/test/test_RelationGrapher.py
```python
import discograph
import json
from abjad import stringtools
from playhouse import test_utils
class Test(discograph.DiscographTestCase):
"""
Problematic networks:
- 296570: 306 nodes, 13688 links, 5 pages: 149, 4, 4, 4, 158
- 1946151: unbalanced paging
- 491160: bifurcated dense alias networks
"""
json_kwargs = {
'indent': 4,
'separators': (',', ': '),
'sort_keys': True,
}
@classmethod
def setUpClass(cls):
cls.setUpTestDB()
def test___call___01(self):
print(discograph.PostgresEntity._meta.database)
artist = discograph.PostgresEntity.get(entity_type=1, name='Seefeel')
roles = ['Alias', 'Member Of']
grapher = discograph.RelationGrapher(
artist,
degree=1,
roles=roles,
)
network = grapher.__call__()
actual = json.dumps(network, **self.json_kwargs)
expected = stringtools.normalize('''
{
"center": {
"key": "artist-152882",
"name": "<NAME>"
},
"links": [
{
"key": "artist-152882-member-of-artist-2561672",
"pages": [
1
],
"role": "Member Of",
"source": "artist-152882",
"target": "artist-2561672"
},
{
"key": "artist-152882-member-of-artist-32550",
"pages": [
1
],
"role": "Member Of",
"source": "artist-152882",
"target": "artist-32550"
},
{
"key": "artist-32550-alias-artist-2561672",
"pages": [
1
],
"role": "Alias",
"source": "artist-32550",
"target": "artist-2561672"
}
],
"nodes": [
{
"cluster": 1,
"distance": 1,
"id": 32550,
"key": "artist-32550",
"links": [
"artist-152882-member-of-artist-32550",
"artist-32550-alias-artist-2561672"
],
"missing": 10,
"name": "The Time",
"pages": [
1
],
"size": 11,
"type": "artist"
},
{
"distance": 0,
"id": 152882,
"key": "artist-152882",
"links": [
"artist-152882-member-of-artist-2561672",
"artist-152882-member-of-artist-32550"
],
"missing": 0,
"name": "<NAME>",
"pages": [
1
],
"size": 0,
"type": "artist"
},
{
"cluster": 1,
"distance": 1,
"id": 2561672,
"key": "artist-2561672",
"links": [
"artist-152882-member-of-artist-2561672",
"artist-32550-alias-artist-2561672"
],
"missing": 6,
"name": "The Original 7ven",
"pages": [
1
],
"size": 7,
"type": "artist"
}
],
"pages": 1
}
''')
assert actual == expected
def test___call___02(self):
artist = discograph.PostgresEntity.get(entity_type=1, name='<NAME>')
roles = ['Alias', 'Member Of']
grapher = discograph.RelationGrapher(
artist,
degree=2,
max_nodes=5,
roles=roles,
)
network = grapher.__call__()
actual = json.dumps(network, **self.json_kwargs)
expected = stringtools.normalize('''
{
"center": {
"key": "artist-152882",
"name": "<NAME>"
},
"links": [
{
"key": "artist-100600-member-of-artist-2561672",
"pages": [
1
],
"role": "Member Of",
"source": "artist-100600",
"target": "artist-2561672"
},
{
"key": "artist-100600-member-of-artist-32550",
"pages": [
1
],
"role": "Member Of",
"source": "artist-100600",
"target": "artist-32550"
},
{
"key": "artist-113965-member-of-artist-2561672",
"pages": [
2
],
"role": "Member Of",
"source": "artist-113965",
"target": "artist-2561672"
},
{
"key": "artist-113965-member-of-artist-32550",
"pages": [
2
],
"role": "Member Of",
"source": "artist-113965",
"target": "artist-32550"
},
{
"key": "artist-152882-member-of-artist-2561672",
"pages": [
1,
2,
3
],
"role": "Member Of",
"source": "artist-152882",
"target": "artist-2561672"
},
{
"key": "artist-152882-member-of-artist-32550",
"pages": [
1,
2,
3
],
"role": "Member Of",
"source": "artist-152882",
"target": "artist-32550"
},
{
"key": "artist-23446-member-of-artist-32550",
"pages": [
3
],
"role": "Member Of",
"source": "artist-23446",
"target": "artist-32550"
},
{
"key": "artist-241356-member-of-artist-2561672",
"pages": [
3
],
"role": "Member Of",
"source": "artist-241356",
"target": "artist-2561672"
},
{
"key": "artist-241356-member-of-artist-32550",
"pages": [
3
],
"role": "Member Of",
"source": "artist-241356",
"target": "artist-32550"
},
{
"key": "artist-242702-member-of-artist-32550",
"pages": [
3
],
"role": "Member Of",
"source": "artist-242702",
"target": "artist-32550"
},
{
"key": "artist-32550-alias-artist-2561672",
"pages": [
1,
2,
3
],
"role": "Alias",
"source": "artist-32550",
"target": "artist-2561672"
},
{
"key": "artist-354129-member-of-artist-2561672",
"pages": [
2
],
"role": "Member Of",
"source": "artist-354129",
"target": "artist-2561672"
},
{
"key": "artist-354129-member-of-artist-32550",
"pages": [
2
],
"role": "Member Of",
"source": "artist-354129",
"target": "artist-32550"
},
{
"key": "artist-37806-member-of-artist-2561672",
"pages": [
2
],
"role": "Member Of",
"source": "artist-37806",
"target": "artist-2561672"
},
{
"key": "artist-37806-member-of-artist-32550",
"pages": [
2
],
"role": "Member Of",
"source": "artist-37806",
"target": "artist-32550"
},
{
"key": "artist-409502-member-of-artist-32550",
"pages": [
1
],
"role": "Member Of",
"source": "artist-409502",
"target": "artist-32550"
},
{
"key": "artist-453969-member-of-artist-32550",
"pages": [
1
],
"role": "Member Of",
"source": "artist-453969",
"target": "artist-32550"
},
{
"key": "artist-55449-member-of-artist-2561672",
"pages": [
1
],
"role": "Member Of",
"source": "artist-55449",
"target": "artist-2561672"
},
{
"key": "artist-55449-member-of-artist-32550",
"pages": [
1
],
"role": "Member Of",
"source": "artist-55449",
"target": "artist-32550"
}
],
"nodes": [
{
"distance": 2,
"id": 23446,
"key": "artist-23446",
"links": [
"artist-23446-member-of-artist-32550"
],
"missing": 1,
"name": "<NAME>",
"pages": [
3
],
"size": 0,
"type": "artist"
},
{
"cluster": 1,
"distance": 1,
"id": 32550,
"key": "artist-32550",
"links": [
"artist-100600-member-of-artist-32550",
"artist-113965-member-of-artist-32550",
"artist-152882-member-of-artist-32550",
"artist-23446-member-of-artist-32550",
"artist-241356-member-of-artist-32550",
"artist-242702-member-of-artist-32550",
"artist-32550-alias-artist-2561672",
"artist-354129-member-of-artist-32550",
"artist-37806-member-of-artist-32550",
"artist-409502-member-of-artist-32550",
"artist-453969-member-of-artist-32550",
"artist-55449-member-of-artist-32550"
],
"missing": 0,
"missingByPage": {
"1": 6,
"2": 7,
"3": 7
},
"name": "<NAME>",
"pages": [
1,
2,
3
],
"size": 11,
"type": "artist"
},
{
"distance": 2,
"id": 37806,
"key": "artist-37806",
"links": [
"artist-37806-member-of-artist-2561672",
"artist-37806-member-of-artist-32550"
],
"missing": 2,
"name": "<NAME>",
"pages": [
2
],
"size": 0,
"type": "artist"
},
{
"distance": 2,
"id": 55449,
"key": "artist-55449",
"links": [
"artist-55449-member-of-artist-2561672",
"artist-55449-member-of-artist-32550"
],
"missing": 3,
"name": "<NAME>",
"pages": [
1
],
"size": 0,
"type": "artist"
},
{
"distance": 2,
"id": 100600,
"key": "artist-100600",
"links": [
"artist-100600-member-of-artist-2561672",
"artist-100600-member-of-artist-32550"
],
"missing": 1,
"name": "<NAME>",
"pages": [
1
],
"size": 0,
"type": "artist"
},
{
"distance": 2,
"id": 113965,
"key": "artist-113965",
"links": [
"artist-113965-member-of-artist-2561672",
"artist-113965-member-of-artist-32550"
],
"missing": 4,
"name": "<NAME>",
"pages": [
2
],
"size": 0,
"type": "artist"
},
{
"distance": 0,
"id": 152882,
"key": "artist-152882",
"links": [
"artist-152882-member-of-artist-2561672",
"artist-152882-member-of-artist-32550"
],
"missing": 0,
"name": "<NAME>",
"pages": [
1,
2,
3
],
"size": 0,
"type": "artist"
},
{
"cluster": 2,
"distance": 2,
"id": 241356,
"key": "artist-241356",
"links": [
"artist-241356-member-of-artist-2561672",
"artist-241356-member-of-artist-32550"
],
"missing": 4,
"name": "<NAME>",
"pages": [
3
],
"size": 0,
"type": "artist"
},
{
"cluster": 3,
"distance": 2,
"id": 242702,
"key": "artist-242702",
"links": [
"artist-242702-member-of-artist-32550"
],
"missing": 5,
"name": "<NAME>",
"pages": [
3
],
"size": 0,
"type": "artist"
},
{
"distance": 2,
"id": 354129,
"key": "artist-354129",
"links": [
"artist-354129-member-of-artist-2561672",
"artist-354129-member-of-artist-32550"
],
"missing": 1,
"name": "<NAME>",
"pages": [
2
],
"size": 0,
"type": "artist"
},
{
"distance": 2,
"id": 409502,
"key": "artist-409502",
"links": [
"artist-409502-member-of-artist-32550"
],
"missing": 1,
"name": "<NAME>",
"pages": [
1
],
"size": 0,
"type": "artist"
},
{
"distance": 2,
"id": 453969,
"key": "artist-453969",
"links": [
"artist-453969-member-of-artist-32550"
],
"missing": 2,
"name": "<NAME>",
"pages": [
1
],
"size": 0,
"type": "artist"
},
{
"cluster": 1,
"distance": 1,
"id": 2561672,
"key": "artist-2561672",
"links": [
"artist-100600-member-of-artist-2561672",
"artist-113965-member-of-artist-2561672",
"artist-152882-member-of-artist-2561672",
"artist-241356-member-of-artist-2561672",
"artist-32550-alias-artist-2561672",
"artist-354129-member-of-artist-2561672",
"artist-37806-member-of-artist-2561672",
"artist-55449-member-of-artist-2561672"
],
"missing": 0,
"missingByPage": {
"1": 4,
"2": 3,
"3": 5
},
"name": "The Original 7ven",
"pages": [
1,
2,
3
],
"size": 7,
"type": "artist"
}
],
"pages": 3
}
''')
assert actual == expected
def test___call___03(self):
artist = discograph.PostgresEntity.get(entity_type=1, name='<NAME>')
roles = ['Alias', 'Member Of']
grapher = discograph.RelationGrapher(
artist,
degree=2,
link_ratio=2,
roles=roles,
)
network = grapher.__call__()
actual = json.dumps(network, **self.json_kwargs)
expected = stringtools.normalize('''
{
"center": {
"key": "artist-152882",
"name": "<NAME>"
},
"links": [
{
"key": "artist-100600-member-of-artist-2561672",
"pages": [
1
],
"role": "Member Of",
"source": "artist-100600",
"target": "artist-2561672"
},
{
"key": "artist-100600-member-of-artist-32550",
"pages": [
1
],
"role": "Member Of",
"source": "artist-100600",
"target": "artist-32550"
},
{
"key": "artist-113965-member-of-artist-2561672",
"pages": [
1
],
"role": "Member Of",
"source": "artist-113965",
"target": "artist-2561672"
},
{
"key": "artist-113965-member-of-artist-32550",
"pages": [
1
],
"role": "Member Of",
"source": "artist-113965",
"target": "artist-32550"
},
{
"key": "artist-152882-member-of-artist-2561672",
"pages": [
1
],
"role": "Member Of",
"source": "artist-152882",
"target": "artist-2561672"
},
{
"key": "artist-152882-member-of-artist-32550",
"pages": [
1
],
"role": "Member Of",
"source": "artist-152882",
"target": "artist-32550"
},
{
"key": "artist-23446-member-of-artist-32550",
"pages": [
1
],
"role": "Member Of",
"source": "artist-23446",
"target": "artist-32550"
},
{
"key": "artist-241356-member-of-artist-2561672",
"pages": [
1
],
"role": "Member Of",
"source": "artist-241356",
"target": "artist-2561672"
},
{
"key": "artist-241356-member-of-artist-32550",
"pages": [
1
],
"role": "Member Of",
"source": "artist-241356",
"target": "artist-32550"
},
{
"key": "artist-242702-member-of-artist-32550",
"pages": [
1
],
"role": "Member Of",
"source": "artist-242702",
"target": "artist-32550"
},
{
"key": "artist-32550-alias-artist-2561672",
"pages": [
1
],
"role": "Alias",
"source": "artist-32550",
"target": "artist-2561672"
},
{
"key": "artist-354129-member-of-artist-2561672",
"pages": [
1
],
"role": "Member Of",
"source": "artist-354129",
"target": "artist-2561672"
},
{
"key": "artist-354129-member-of-artist-32550",
"pages": [
1
],
"role": "Member Of",
"source": "artist-354129",
"target": "artist-32550"
},
{
"key": "artist-37806-member-of-artist-2561672",
"pages": [
1
],
"role": "Member Of",
"source": "artist-37806",
"target": "artist-2561672"
},
{
"key": "artist-37806-member-of-artist-32550",
"pages": [
1
],
"role": "Member Of",
"source": "artist-37806",
"target": "artist-32550"
},
{
"key": "artist-409502-member-of-artist-32550",
"pages": [
1
],
"role": "Member Of",
"source": "artist-409502",
"target": "artist-32550"
},
{
"key": "artist-453969-member-of-artist-32550",
"pages": [
1
],
"role": "Member Of",
"source": "artist-453969",
"target": "artist-32550"
},
{
"key": "artist-55449-member-of-artist-2561672",
"pages": [
1
],
"role": "Member Of",
"source": "artist-55449",
"target": "artist-2561672"
},
{
"key": "artist-55449-member-of-artist-32550",
"pages": [
1
],
"role": "Member Of",
"source": "artist-55449",
"target": "artist-32550"
}
],
"nodes": [
{
"distance": 2,
"id": 23446,
"key": "artist-23446",
"links": [
"artist-23446-member-of-artist-32550"
],
"missing": 1,
"name": "<NAME>",
"pages": [
1
],
"size": 0,
"type": "artist"
},
{
"cluster": 1,
"distance": 1,
"id": 32550,
"key": "artist-32550",
"links": [
"artist-100600-member-of-artist-32550",
"artist-113965-member-of-artist-32550",
"artist-152882-member-of-artist-32550",
"artist-23446-member-of-artist-32550",
"artist-241356-member-of-artist-32550",
"artist-242702-member-of-artist-32550",
"artist-32550-alias-artist-2561672",
"artist-354129-member-of-artist-32550",
"artist-37806-member-of-artist-32550",
"artist-409502-member-of-artist-32550",
"artist-453969-member-of-artist-32550",
"artist-55449-member-of-artist-32550"
],
"missing": 0,
"name": "<NAME>",
"pages": [
1
],
"size": 11,
"type": "artist"
},
{
"distance": 2,
"id": 37806,
"key": "artist-37806",
"links": [
"artist-37806-member-of-artist-2561672",
"artist-37806-member-of-artist-32550"
],
"missing": 2,
"name": "<NAME>",
"pages": [
1
],
"size": 0,
"type": "artist"
},
{
"distance": 2,
"id": 55449,
"key": "artist-55449",
"links": [
"artist-55449-member-of-artist-2561672",
"artist-55449-member-of-artist-32550"
],
"missing": 3,
"name": "<NAME>",
"pages": [
1
],
"size": 0,
"type": "artist"
},
{
"distance": 2,
"id": 100600,
"key": "artist-100600",
"links": [
"artist-100600-member-of-artist-2561672",
"artist-100600-member-of-artist-32550"
],
"missing": 1,
"name": "<NAME>",
"pages": [
1
],
"size": 0,
"type": "artist"
},
{
"distance": 2,
"id": 113965,
"key": "artist-113965",
"links": [
"artist-113965-member-of-artist-2561672",
"artist-113965-member-of-artist-32550"
],
"missing": 4,
"name": "<NAME>",
"pages": [
1
],
"size": 0,
"type": "artist"
},
{
"distance": 0,
"id": 152882,
"key": "artist-152882",
"links": [
"artist-152882-member-of-artist-2561672",
"artist-152882-member-of-artist-32550"
],
"missing": 0,
"name": "<NAME>",
"pages": [
1
],
"size": 0,
"type": "artist"
},
{
"cluster": 2,
"distance": 2,
"id": 241356,
"key": "artist-241356",
"links": [
"artist-241356-member-of-artist-2561672",
"artist-241356-member-of-artist-32550"
],
"missing": 4,
"name": "<NAME>",
"pages": [
1
],
"size": 0,
"type": "artist"
},
{
"cluster": 3,
"distance": 2,
"id": 242702,
"key": "artist-242702",
"links": [
"artist-242702-member-of-artist-32550"
],
"missing": 5,
"name": "<NAME>",
"pages": [
1
],
"size": 0,
"type": "artist"
},
{
"distance": 2,
"id": 354129,
"key": "artist-354129",
"links": [
"artist-354129-member-of-artist-2561672",
"artist-354129-member-of-artist-32550"
],
"missing": 1,
"name": "<NAME>",
"pages": [
1
],
"size": 0,
"type": "artist"
},
{
"distance": 2,
"id": 409502,
"key": "artist-409502",
"links": [
"artist-409502-member-of-artist-32550"
],
"missing": 1,
"name": "<NAME>",
"pages": [
1
],
"size": 0,
"type": "artist"
},
{
"distance": 2,
"id": 453969,
"key": "artist-453969",
"links": [
"artist-453969-member-of-artist-32550"
],
"missing": 2,
"name": "<NAME>",
"pages": [
1
],
"size": 0,
"type": "artist"
},
{
"cluster": 1,
"distance": 1,
"id": 2561672,
"key": "artist-2561672",
"links": [
"artist-100600-member-of-artist-2561672",
"artist-113965-member-of-artist-2561672",
"artist-152882-member-of-artist-2561672",
"artist-241356-member-of-artist-2561672",
"artist-32550-alias-artist-2561672",
"artist-354129-member-of-artist-2561672",
"artist-37806-member-of-artist-2561672",
"artist-55449-member-of-artist-2561672"
],
"missing": 0,
"name": "The Original 7ven",
"pages": [
1
],
"size": 7,
"type": "artist"
}
],
"pages": 1
}
''')
assert actual == expected
def test___call___04(self):
r'''Missing count takes into account structural roles: members,
aliases, groups, sublabels, parent labels, etc.
'''
artist = discograph.PostgresEntity.get(
entity_type=1,
entity_id=1362698,
)
roles = ['Alias', 'Member Of']
grapher = discograph.RelationGrapher(
artist,
degree=12,
roles=roles,
)
network = grapher.__call__()
actual = json.dumps(network, **self.json_kwargs)
expected = stringtools.normalize('''
{
"center": {
"key": "artist-1362698",
"name": "Revolution (13)"
},
"links": [
{
"key": "artist-144943-alias-artist-535046",
"pages": [
1
],
"role": "Alias",
"source": "artist-144943",
"target": "artist-535046"
},
{
"key": "artist-144943-member-of-artist-1362698",
"pages": [
1
],
"role": "Member Of",
"source": "artist-144943",
"target": "artist-1362698"
},
{
"key": "artist-271585-member-of-artist-1362698",
"pages": [
1
],
"role": "Member Of",
"source": "artist-271585",
"target": "artist-1362698"
}
],
"nodes": [
{
"cluster": 1,
"distance": 1,
"id": 144943,
"key": "artist-144943",
"links": [
"artist-144943-alias-artist-535046",
"artist-144943-member-of-artist-1362698"
],
"missing": 0,
"name": "<NAME>",
"pages": [
1
],
"size": 0,
"type": "artist"
},
{
"distance": 1,
"id": 271585,
"key": "artist-271585",
"links": [
"artist-271585-member-of-artist-1362698"
],
"missing": 0,
"name": "Neuroti-k",
"pages": [
1
],
"size": 0,
"type": "artist"
},
{
"cluster": 1,
"distance": 2,
"id": 535046,
"key": "artist-535046",
"links": [
"artist-144943-alias-artist-535046"
],
"missing": 0,
"name": "<NAME>",
"pages": [
1
],
"size": 0,
"type": "artist"
},
{
"distance": 0,
"id": 1362698,
"key": "artist-1362698",
"links": [
"artist-144943-member-of-artist-1362698",
"artist-271585-member-of-artist-1362698"
],
"missing": 0,
"name": "Revolution (13)",
"pages": [
1
],
"size": 2,
"type": "artist"
}
],
"pages": 1
}
''')
assert actual == expected
def test___call___05(self):
artist = discograph.PostgresEntity.get(
entity_type=2,
name='<NAME>',
)
roles = ['Recorded At']
grapher = discograph.RelationGrapher(
artist,
degree=2,
roles=roles,
)
network = grapher.__call__() # Should not error.
```
#### File: discograph/test/test_ui.py
```python
import discograph
import unittest
class TestCase(unittest.TestCase):
def setUp(self):
self.app = discograph.app.test_client()
def test_index(self):
response = self.app.get('/')
assert response.status == '200 OK'
def test_artist_200(self):
response = self.app.get('/artist/32550')
assert response.status == '200 OK'
def test_label_404(self):
response = self.app.get('/label/2')
assert response.status == '200 OK'
def test_error(self):
response = self.app.get('/malformed')
assert response.status == '404 NOT FOUND'
``` |
{
"source": "josiah-wolf-oberholtzer/on-exactitude-in-science",
"score": 2
} |
#### File: api/maps/cli.py
```python
import asyncio
import logging
from contextlib import ExitStack
from pathlib import Path
import click
from uqbar.io import Profiler
from maps import goblin, loader
@click.group()
@click.pass_context
def cli(ctx):
ctx.ensure_object(dict)
logging.basicConfig(
format="%(asctime)s %(name)s %(levelname)-8s %(message)s",
datefmt="%Y-%m-%dT%H:%M:%S",
)
logging.getLogger("maps").setLevel(logging.INFO)
@cli.group()
@click.option("-g", "--graph", default="graph")
@click.pass_context
def schema(ctx, graph):
ctx.obj["graph"] = graph
@cli.group()
@click.option("-g", "--graph", default="graph")
@click.pass_context
def data(ctx, graph):
ctx.obj["graph"] = graph
@schema.command("load")
@click.pass_context
def schema_load(ctx):
async def run():
graph_name = ctx.obj["graph"]
manager = goblin.GoblinManager()
async with manager as goblin_app:
await goblin.install_schema(goblin_app, graph_name=graph_name)
asyncio.run(run())
@data.command("load")
@click.option(
"-p",
"--path",
default="/data",
type=click.Path(exists=True, file_okay=False, dir_okay=True),
)
@click.option("-l", "--limit", default=None, type=int)
@click.option("-w", "--workers", default=8)
@click.option("--profile/--no-profile", default=False)
@click.pass_context
def data_load(ctx, path: str, limit, workers, profile):
async def run():
aliases = {"graph": "g", "testgraph": "tg"}
manager = goblin.GoblinManager(aliases={"g": aliases[ctx.obj["graph"]]})
with ExitStack() as stack:
if profile:
stack.enter_context(Profiler())
async with manager as goblin_app:
await loader.load(
goblin_app, Path(path), consumer_count=workers, limit=limit
)
asyncio.run(run())
```
#### File: api/tests/test_loader.py
```python
import asyncio
import logging
import random
from pathlib import Path
import pytest
from aiogremlin.process.graph_traversal import __
from maps import loader, xml
@pytest.fixture
async def session(goblin_app):
yield await goblin_app.session()
@pytest.mark.asyncio
@pytest.mark.parametrize("consumer_count", [1, 8])
async def test_loader_run(goblin_app, session, consumer_count, caplog):
caplog.set_level(logging.INFO, logger="maps")
limit = 50
expected_vertex_counts = {
"artist": 50,
"company": 50,
"master": 50,
"release": 50,
"track": 246,
}
expected_edge_counts = {
"Alias Of": 5,
"Arranged By": 15,
"Co-producer": 1,
"Copyright (c)": 5,
"DJ Mix": 1,
"Distributed By": 2,
"Includes": 246,
"Keyboards": 1,
"Manufactured By": 2,
"Member Of": 7,
"Mixed By": 1,
"Performer": 4,
"Phonographic Copyright (p)": 5,
"Presenter": 1,
"Producer": 20,
"Recorded By": 4,
"Released On": 50,
"Released": 34,
"Remix": 15,
"Strings": 1,
"Subsidiary Of": 1,
"Written-By": 13,
}
for attempt in range(2):
await loader.load(
goblin_app,
Path(__file__).parent,
consumer_count=consumer_count,
limit=limit,
)
await asyncio.sleep(2)
actual_vertex_counts = await (
session.traversal().V().groupCount().by(__.label())
).next()
actual_edge_counts = await (
session.traversal().E().groupCount().by(__.values("name"))
).next()
print(f"Attempt {attempt}")
assert actual_vertex_counts == expected_vertex_counts
assert actual_edge_counts == expected_edge_counts
@pytest.mark.asyncio
async def test_load_artist_vertex(session):
entity_id = random.randint(0, 1000)
xml_artist = xml.Artist(entity_id=entity_id, name="Foo")
assert (
await session.g.V().has("artist", "artist_id", entity_id).count().next()
) == 0
await loader.load_artist_vertex(session, xml_artist)
values_a = (
await session.g.V().has("artist", "artist_id", entity_id).valueMap().next()
)
last_modified_a = values_a.pop("last_modified")[0]
random_a = values_a.pop("random")[0]
assert values_a == {"artist_id": [entity_id], "name": ["Foo"]}
xml_artist.name = "Foo 2"
await loader.load_artist_vertex(session, xml_artist)
values_b = (
await session.g.V().has("artist", "artist_id", entity_id).valueMap().next()
)
last_modified_b = values_b.pop("last_modified")[0]
random_b = values_b.pop("random")[0]
assert values_b == {"artist_id": [entity_id], "name": ["Foo 2"]}
assert random_b != random_a
assert last_modified_b > last_modified_a
@pytest.mark.asyncio
async def test_load_company_vertex(session):
entity_id = random.randint(0, 1000)
xml_company = xml.Company(entity_id=entity_id, name="Bar")
assert (
await session.g.V().has("company", "company_id", entity_id).count().next()
) == 0
await loader.load_company_vertex(session, xml_company)
values_a = (
await session.g.V().has("company", "company_id", entity_id).valueMap().next()
)
last_modified_a = values_a.pop("last_modified")[0]
random_a = values_a.pop("random")[0]
assert values_a == {"company_id": [entity_id], "name": ["Bar"]}
xml_company.name = "Bar 2"
await loader.load_company_vertex(session, xml_company)
values_b = (
await session.g.V().has("company", "company_id", entity_id).valueMap().next()
)
last_modified_b = values_b.pop("last_modified")[0]
random_b = values_b.pop("random")[0]
assert values_b == {"company_id": [entity_id], "name": ["Bar 2"]}
assert random_b != random_a
assert last_modified_b > last_modified_a
@pytest.mark.asyncio
async def test_load_release_vertex_properties(session):
entity_id = random.randint(0, 1000)
xml_release = xml.Release(
entity_id=entity_id,
country="US",
formats=['12"', "EP", "33\xe2\x85\x93"],
name="Baz",
tracks=[
xml.Track(entity_id=f"{entity_id}-A", name="Track A", position="A"),
xml.Track(entity_id=f"{entity_id}-B", name="Track B", position="B"),
],
)
assert (
await session.g.V().has("release", "release_id", entity_id).count().next()
) == 0
await loader.load_release_vertex_and_edges(session, xml_release, 0)
values_a = (
await session.g.V().has("release", "release_id", entity_id).valueMap().next()
)
values_aa = (
await session.g.V().has("track", "track_id", f"{entity_id}-A").valueMap().next()
)
values_ab = (
await session.g.V().has("track", "track_id", f"{entity_id}-B").valueMap().next()
)
last_modified_a = values_a.pop("last_modified")[0]
random_a = values_a.pop("random")[0]
values_aa.pop("last_modified")
values_ab.pop("last_modified")
values_aa.pop("random")
values_ab.pop("random")
assert values_a == {
"country": ["US"],
"formats": ['12"', "EP", "33\xe2\x85\x93"],
"name": ["Baz"],
"primacy": [1],
"release_id": [entity_id],
}
assert values_aa == {
"country": ["US"],
"formats": ['12"', "EP", "33\xe2\x85\x93"],
"name": ["Track A"],
"position": ["A"],
"primacy": [1],
"track_id": [f"{entity_id}-A"],
}
assert values_ab == {
"country": ["US"],
"formats": ['12"', "EP", "33\xe2\x85\x93"],
"name": ["Track B"],
"position": ["B"],
"primacy": [1],
"track_id": [f"{entity_id}-B"],
}
xml_release.name = "Baz 2"
xml_release.formats = ["EP", "33⅓", "Vinyl"]
await loader.load_release_vertex_and_edges(session, xml_release, 0)
values_b = (
await session.g.V().has("release", "release_id", entity_id).valueMap().next()
)
values_ba = (
await session.g.V().has("track", "track_id", f"{entity_id}-A").valueMap().next()
)
values_bb = (
await session.g.V().has("track", "track_id", f"{entity_id}-B").valueMap().next()
)
last_modified_b = values_b.pop("last_modified")[0]
random_b = values_b.pop("random")[0]
values_ba.pop("last_modified")
values_bb.pop("last_modified")
values_ba.pop("random")
values_bb.pop("random")
assert values_b == {
"country": ["US"],
"formats": ["EP", "Vinyl", "33⅓"],
"name": ["<NAME>"],
"primacy": [1],
"release_id": [entity_id],
}
assert random_b != random_a
assert last_modified_b > last_modified_a
assert values_ba == {
"country": ["US"],
"formats": ["EP", "Vinyl", "33⅓"],
"name": ["Track A"],
"position": ["A"],
"primacy": [1],
"track_id": [f"{entity_id}-A"],
}
assert values_bb == {
"country": ["US"],
"formats": ["EP", "Vinyl", "33⅓"],
"name": ["Track B"],
"position": ["B"],
"primacy": [1],
"track_id": [f"{entity_id}-B"],
}
``` |
{
"source": "josiah-wolf-oberholtzer/supriya",
"score": 2
} |
#### File: supriya/nonrealtime/bases.py
```python
import abc
import functools
from uqbar.objects import get_repr
from supriya.system import SupriyaObject
class SessionObject(SupriyaObject):
"""
A non-realtime session object, analogous to ServerObject.
"""
### CLASS VARIABLES ###
__documentation_section__ = "Session Internals"
__slots__ = ()
### INITIALIZER ###
@abc.abstractmethod
def __init__(self, session):
import supriya.nonrealtime
prototype = (supriya.nonrealtime.Session, type(None))
assert isinstance(session, prototype)
self._session = session
### SPECIAL METHODS ###
def __repr__(self):
return get_repr(self, multiline=False)
### PUBLIC METHODS ###
@staticmethod
def require_offset(function):
@functools.wraps(function)
def wrapper(self, *args, **kwargs):
import supriya.nonrealtime
if isinstance(self, supriya.nonrealtime.Session):
session = self
else:
session = self.session
if "offset" not in kwargs or kwargs["offset"] is None:
if not session._active_moments:
raise ValueError("No active moment.")
offset = session._active_moments[-1].offset
kwargs["offset"] = offset
if isinstance(self, SessionObject):
if not (self.start_offset <= kwargs["offset"] <= self.stop_offset):
raise ValueError(
"Offset {} must intersect [{}, {}]".format(
float(offset), self.start_offset, self.stop_offset
)
)
with session.at(kwargs["offset"]):
return function(self, *args, **kwargs)
return wrapper
### PUBLIC PROPERTIES ###
@property
def session(self):
return self._session
```
#### File: supriya/supriya/querytree.py
```python
from collections.abc import Sequence
from supriya import ParameterRate
from supriya.system import SupriyaValueObject
class QueryTreeControl(SupriyaValueObject):
### CLASS VARIABLES ###
__slots__ = ("_control_value", "_control_name_or_index")
### INITIALIZER ###
def __init__(self, control_name_or_index=None, control_value=None):
self._control_value = control_value
self._control_name_or_index = control_name_or_index
### SPECIAL METHODS ###
def __str__(self):
key = self._control_name_or_index
value = self._control_value
try:
value = round(value, 6)
except Exception:
pass
string = "{}: {!s}".format(key, value)
return string
### PUBLIC METHODS ###
@classmethod
def from_control(cls, control):
import supriya.realtime
control_name = control.name
if isinstance(control.value, supriya.realtime.Bus):
control_value = str(control.value)
else:
control_value = float(control.value)
return cls(control_value=control_value, control_name_or_index=control_name)
### PUBLIC PROPERTIES ###
@property
def control_name_or_index(self):
return self._control_name_or_index
@property
def control_value(self):
return self._control_value
class QueryTreeSynth(SupriyaValueObject, Sequence):
### CLASS VARIABLES ###
__slots__ = ("_controls", "_extra", "_name", "_node_id", "_synthdef_name")
### INITIALIZER ###
def __init__(
self, node_id=None, synthdef_name=None, controls=None, name=None, **extra
):
self._controls = controls
self._extra = tuple(sorted(extra.items()))
self._node_id = node_id
self._synthdef_name = synthdef_name
self._name = name
### SPECIAL METHODS ###
def __format__(self, format_spec):
result = self._get_str_format_pieces(unindexed=format_spec == "unindexed")
result = "\n".join(result)
return result
def __getitem__(self, item):
return self._controls[item]
def __len__(self):
return len(self._controls)
def __str__(self):
result = self._get_str_format_pieces()
result = "\n".join(result)
return result
### PRIVATE METHODS ###
@classmethod
def _from_nrt_synth(
cls,
state,
node,
include_controls=False,
include_timespans=False,
id_mapping=None,
):
from supriya.nonrealtime import Bus, BusGroup, Synth
from supriya.synthdefs import SynthDef
assert isinstance(node, Synth)
node_id = node.session_id
synthdef_name = node.synthdef
if isinstance(synthdef_name, SynthDef):
synthdef_name = synthdef_name.actual_name
controls = []
if include_controls:
settings = node._collect_settings(
state.offset, persistent=True, id_mapping=id_mapping
)
synthdef, synth_kwargs = node.synthdef, node.synth_kwargs
for name, parameter in sorted(synthdef.parameters.items()):
value = parameter.value
if name in synth_kwargs:
value = synth_kwargs[name]
if name in settings:
value = settings[name]
if (
parameter.parameter_rate == ParameterRate.SCALAR
or parameter.name in ("in_", "out")
):
if value in id_mapping:
value = id_mapping[value]
value = float(value)
elif isinstance(value, (Bus, BusGroup)) and value in id_mapping:
value = value.get_map_symbol(id_mapping[value])
else:
value = float(value)
control = QueryTreeControl(
control_name_or_index=name, control_value=value
)
controls.append(control)
extra = {}
if include_timespans:
extra.update(timespan=[node.start_offset, node.stop_offset])
query_tree_synth = QueryTreeSynth(
node_id=node_id, synthdef_name=synthdef_name, controls=controls, **extra
)
return query_tree_synth
def _get_str_format_pieces(self, unindexed=False):
result = []
node_id = self.node_id
if unindexed:
node_id = "..."
string = f"{node_id} {self.synthdef_name}"
if self.name:
string += f" ({self.name})"
if self.extra:
string += (
" ("
+ ", ".join("{}: {}".format(key, value) for key, value in self.extra)
+ ")"
)
result.append(string)
if self.controls:
control_string = ", ".join(str(control) for control in self.controls)
control_string = " " + control_string
result.append(control_string)
return result
### PUBLIC METHODS ###
def annotate(self, annotation_map):
return type(self)(
node_id=self.node_id,
controls=self.controls,
synthdef_name=self.synthdef_name,
name=annotation_map.get(self.node_id),
**dict(self.extra),
)
@classmethod
def from_response(cls, response):
return cls(
node_id=response.node_id,
controls=[
QueryTreeControl(control_name_or_index=name, control_value=value)
for name, value in (response.synthdef_controls.items() or {})
],
synthdef_name=response.synthdef_name,
)
@classmethod
def from_synth(cls, synth, include_controls=False):
import supriya.commands
import supriya.realtime
import supriya.synthdefs
assert isinstance(synth, supriya.realtime.Synth)
node_id = synth.node_id
synthdef_name = synth.synthdef
if isinstance(synthdef_name, supriya.synthdefs.SynthDef):
synthdef_name = synthdef_name.actual_name
controls = []
if include_controls:
for control_name in synth:
control = QueryTreeControl.from_control(synth.controls[control_name])
controls.append(control)
query_tree_synth = QueryTreeSynth(
node_id=node_id, synthdef_name=synthdef_name, controls=tuple(controls)
)
return query_tree_synth
def to_dict(self):
"""
Convert QueryTreeSynth to JSON-serializable dictionary.
::
>>> query_tree_synth = supriya.querytree.QueryTreeSynth(
... node_id=1001,
... synthdef_name="c1aa521afab5b0c0ce3d744690951649",
... controls=(
... supriya.querytree.QueryTreeControl(
... control_name_or_index="level", control_value=1.0,
... ),
... supriya.querytree.QueryTreeControl(
... control_name_or_index="out", control_value=0.0,
... ),
... ),
... )
::
>>> import json
>>> result = query_tree_synth.to_dict()
>>> result = json.dumps(result, indent=4, separators=(",", ": "), sort_keys=True,)
>>> print(result)
{
"controls": {
"level": 1.0,
"out": 0.0
},
"node_id": 1001,
"synthdef": "c1aa521afab5b0c0ce3d744690951649"
}
"""
result = {
"node_id": self.node_id,
"synthdef": self.synthdef_name,
"controls": {},
}
for control in self.controls:
name = control.control_name_or_index
result["controls"][name] = control.control_value
return result
### PUBLIC PROPERTIES ###
@property
def controls(self):
return self._controls
@property
def extra(self):
return self._extra
@property
def name(self):
return self._name
@property
def node_id(self):
return self._node_id
@property
def synthdef_name(self):
return self._synthdef_name
class QueryTreeGroup(SupriyaValueObject, Sequence):
### CLASS VARIABLES ###
__slots__ = ("_children", "_extra", "_name", "_node_id")
### INITIALIZER ###
def __init__(self, node_id=None, children=None, name=None, **extra):
self._children = tuple(children or ())
self._extra = tuple(sorted(extra.items()))
self._node_id = node_id
self._name = name
### SPECIAL METHODS ###
def __format__(self, format_spec):
result = self._get_str_format_pieces(unindexed=format_spec == "unindexed")
result = "\n".join(result)
result = "NODE TREE {}".format(result)
return result
def __getitem__(self, item):
return self._children[item]
def __len__(self):
return len(self._children)
def __str__(self):
result = self._get_str_format_pieces()
result = "\n".join(result)
result = "NODE TREE {}".format(result)
return result
### PRIVATE METHODS ###
@classmethod
def _from_nrt_group(
cls,
state,
node,
include_controls=False,
include_timespans=False,
id_mapping=None,
):
import supriya.commands
import supriya.nonrealtime
assert isinstance(node, supriya.nonrealtime.Group)
node_id = node.session_id
children = []
for child in state.nodes_to_children.get(node) or ():
if isinstance(child, supriya.nonrealtime.Group):
child = QueryTreeGroup._from_nrt_group(
state,
child,
include_controls=include_controls,
include_timespans=include_timespans,
id_mapping=id_mapping,
)
elif isinstance(child, supriya.nonrealtime.Synth):
child = QueryTreeSynth._from_nrt_synth(
state,
child,
include_controls=include_controls,
include_timespans=include_timespans,
id_mapping=id_mapping,
)
else:
raise ValueError(child)
children.append(child)
children = tuple(children)
extra = {}
if include_timespans:
extra.update(timespan=[node.start_offset, node.stop_offset])
query_tree_group = QueryTreeGroup(node_id=node_id, children=children, **extra)
return query_tree_group
def _get_str_format_pieces(self, unindexed=False):
result = []
node_id = self.node_id
if unindexed:
node_id = "..."
string = f"{node_id} group"
if self.name:
string += f" ({self.name})"
if self.extra:
string += (
" ("
+ ", ".join("{}: {}".format(key, value) for key, value in self.extra)
+ ")"
)
result.append(string)
for child in self.children:
for line in child._get_str_format_pieces(unindexed=unindexed):
result.append(" {}".format(line))
return result
### PUBLIC METHODS ###
def annotate(self, annotation_map):
return type(self)(
node_id=self.node_id,
children=[child.annotate(annotation_map) for child in self.children],
name=annotation_map.get(self.node_id),
**dict(self.extra),
)
@classmethod
def from_group(cls, group, include_controls=False):
import supriya.commands
import supriya.realtime
assert isinstance(group, supriya.realtime.Group)
node_id = group.node_id
children = []
for child in group.children:
if isinstance(child, supriya.realtime.Group):
child = QueryTreeGroup.from_group(
child, include_controls=include_controls
)
elif isinstance(child, supriya.realtime.Synth):
child = QueryTreeSynth.from_synth(
child, include_controls=include_controls
)
else:
raise ValueError(child)
children.append(child)
children = tuple(children)
query_tree_group = QueryTreeGroup(node_id=node_id, children=children)
return query_tree_group
@classmethod
def from_response(cls, response):
return cls(node_id=response.node_id)
@classmethod
def from_state(cls, state, include_controls=False, include_timespans=False):
id_mapping = state.session._build_id_mapping()
root_node = state.session.root_node
query_tree_group = cls._from_nrt_group(
state,
root_node,
include_controls=include_controls,
include_timespans=include_timespans,
id_mapping=id_mapping,
)
return query_tree_group
def to_dict(self):
"""
Convert QueryTreeGroup to JSON-serializable dictionary.
::
>>> query_tree_group = supriya.querytree.QueryTreeGroup(
... node_id=1002,
... children=(
... supriya.querytree.QueryTreeSynth(
... node_id=1105,
... synthdef_name="dca557070c6b57164557041ac746fb3f",
... controls=(
... supriya.querytree.QueryTreeControl(
... control_name_or_index="damping",
... control_value=0.06623425334692,
... ),
... supriya.querytree.QueryTreeControl(
... control_name_or_index="duration",
... control_value=3.652155876159668,
... ),
... supriya.querytree.QueryTreeControl(
... control_name_or_index="level",
... control_value=0.894767701625824,
... ),
... supriya.querytree.QueryTreeControl(
... control_name_or_index="out", control_value=16.0,
... ),
... supriya.querytree.QueryTreeControl(
... control_name_or_index="room_size",
... control_value=0.918643176555634,
... ),
... ),
... ),
... supriya.querytree.QueryTreeSynth(
... node_id=1098,
... synthdef_name="cc754c63533fdcf412a44ef6adb1a8f0",
... controls=(
... supriya.querytree.QueryTreeControl(
... control_name_or_index="duration",
... control_value=5.701356887817383,
... ),
... supriya.querytree.QueryTreeControl(
... control_name_or_index="level",
... control_value=0.959683060646057,
... ),
... supriya.querytree.QueryTreeControl(
... control_name_or_index="out", control_value=16.0,
... ),
... supriya.querytree.QueryTreeControl(
... control_name_or_index="pitch_dispersion",
... control_value=0.040342573076487,
... ),
... supriya.querytree.QueryTreeControl(
... control_name_or_index="pitch_shift",
... control_value=10.517594337463379,
... ),
... supriya.querytree.QueryTreeControl(
... control_name_or_index="time_dispersion",
... control_value=0.666014134883881,
... ),
... supriya.querytree.QueryTreeControl(
... control_name_or_index="window_size",
... control_value=1.014111995697021,
... ),
... ),
... ),
... ),
... )
::
>>> import json
>>> result = query_tree_group.to_dict()
>>> result = json.dumps(result, indent=4, separators=(",", ": "), sort_keys=True,)
>>> print(result)
{
"children": [
{
"controls": {
"damping": 0.06623425334692,
"duration": 3.652155876159668,
"level": 0.894767701625824,
"out": 16.0,
"room_size": 0.918643176555634
},
"node_id": 1105,
"synthdef": "dca557070c6b57164557041ac746fb3f"
},
{
"controls": {
"duration": 5.701356887817383,
"level": 0.959683060646057,
"out": 16.0,
"pitch_dispersion": 0.040342573076487,
"pitch_shift": 10.517594337463379,
"time_dispersion": 0.666014134883881,
"window_size": 1.014111995697021
},
"node_id": 1098,
"synthdef": "cc754c63533fdcf412a44ef6adb1a8f0"
}
],
"node_id": 1002
}
"""
result = {
"node_id": self.node_id,
"children": [x.to_dict() for x in self.children],
}
return result
### PUBLIC PROPERTIES ###
@property
def children(self):
return self._children
@property
def extra(self):
return self._extra
@property
def name(self):
return self._name
@property
def node_id(self):
return self._node_id
```
#### File: supriya/realtime/servers.py
```python
import asyncio
import logging
import re
import threading
from typing import Optional, Set
from uqbar.objects import new
import supriya.exceptions
from supriya.commands import ( # type: ignore
FailResponse,
GroupNewRequest,
GroupQueryTreeRequest,
NotifyRequest,
QuitRequest,
SyncRequest,
)
from supriya.enums import CalculationRate, NodeAction
from supriya.osc.protocols import (
AsyncOscProtocol,
HealthCheck,
OscProtocolOffline,
ThreadedOscProtocol,
)
from supriya.querytree import QueryTreeGroup, QueryTreeSynth
from supriya.scsynth import Options, find
from .allocators import BlockAllocator, NodeIdAllocator
from .buffers import Buffer, BufferGroup
from .buses import Bus, BusGroup
from .meters import Meters
from .nodes import Group, Synth
from .protocols import AsyncProcessProtocol, SyncProcessProtocol
from .recorder import Recorder
try:
from .shm import ServerSHM
except (ImportError, ModuleNotFoundError):
ServerSHM = None
logger = logging.getLogger("supriya.server")
DEFAULT_IP_ADDRESS = "127.0.0.1"
DEFAULT_PORT = 57110
class BaseServer:
### INITIALIZER ###
def __init__(self):
# address
self._ip_address = None
self._port = None
# process
self._client_id = None
self._is_owner = False
self._is_running = False
self._latency = 0.1
self._maximum_logins = None
self._options = Options()
self._osc_protocol = None
self._process_protocol = None
self._status = None
self._shm = None
# allocators
self._audio_bus_allocator = None
self._buffer_allocator = None
self._control_bus_allocator = None
self._node_id_allocator = None
self._sync_id = 0
# proxy mappings
self._synthdefs = {}
### SPECIAL METHODS ###
def __repr__(self):
if not self.is_running:
return f"<{type(self).__name__}: offline>"
string = "<{name}: {protocol}://{ip}:{port}, "
string += "{inputs}i{outputs}o>"
return string.format(
name=type(self).__name__,
protocol=self.options.protocol,
ip=self.ip_address,
port=self.port,
inputs=self.options.input_bus_channel_count,
outputs=self.options.output_bus_channel_count,
)
### PRIVATE METHODS ###
def _handle_failed_response(self, message):
logger.warning("Fail: {}".format(message))
def _handle_status_reply_response(self, message):
from supriya.commands import Response
response = Response.from_osc_message(message)
self._status = response
def _handle_synthdef_removed_response(self, message):
from supriya.commands import Response
response = Response.from_osc_message(message)
synthdef_name = response.synthdef_name
self._synthdefs.pop(synthdef_name, None)
def _setup_allocators(self):
self._audio_bus_allocator = BlockAllocator(
heap_maximum=self._options.audio_bus_channel_count,
heap_minimum=self._options.first_private_bus_id,
)
self._buffer_allocator = BlockAllocator(heap_maximum=self._options.buffer_count)
self._control_bus_allocator = BlockAllocator(
heap_maximum=self._options.control_bus_channel_count
)
self._node_id_allocator = NodeIdAllocator(
initial_node_id=self._options.initial_node_id, client_id=self.client_id
)
self._sync_id = self.client_id << 26
def _setup_osc_callbacks(self):
self._osc_protocol.register(
pattern="/d_removed", procedure=self._handle_synthdef_removed_response
)
self._osc_protocol.register(
pattern="/status.reply", procedure=self._handle_status_reply_response
)
self._osc_protocol.register(
pattern="/fail", procedure=self._handle_failed_response
)
def _setup_shm(self):
if ServerSHM is None:
return
self._shm = ServerSHM(self.port, self.options.control_bus_channel_count)
def _teardown_allocators(self):
self._audio_bus_allocator = None
self._buffer_allocator = None
self._control_bus_allocator = None
self._node_id_allocator = None
self._sync_id = 0
def _teardown_shm(self):
self._shm = None
### PUBLIC METHODS ###
def send(self, message):
if not message:
raise ValueError
if not self.is_running:
raise supriya.exceptions.ServerOffline
self._osc_protocol.send(message)
return self
### PUBLIC PROPERTIES ###
@property
def audio_bus_allocator(self):
return self._audio_bus_allocator
@property
def buffer_allocator(self):
return self._buffer_allocator
@property
def client_id(self):
return self._client_id
@property
def control_bus_allocator(self):
return self._control_bus_allocator
@property
def ip_address(self):
return self._ip_address
@property
def is_owner(self):
return self._is_owner
@property
def is_running(self):
return self._is_running
@property
def latency(self):
return self._latency
@latency.setter
def latency(self, latency):
self._latency = float(latency)
@property
def maximum_logins(self):
return self._maximum_logins
@property
def next_sync_id(self):
sync_id = self._sync_id
self._sync_id += 1
return sync_id
@property
def node_id_allocator(self):
return self._node_id_allocator
@property
def osc_protocol(self):
return self._osc_protocol
@property
def options(self):
return self._options
@property
def port(self):
return self._port
@property
def process_protocol(self):
return self._process_protocol
@property
def status(self):
return self._status
class AsyncServer(BaseServer):
### CLASS VARIABLES ###
_servers: Set["AsyncServer"] = set()
### INTIALIZER ###
def __init__(self):
BaseServer.__init__(self)
self._boot_future = None
self._quit_future = None
### SPECIAL METHODS ###
def __contains__(self, expr):
if isinstance(expr, supriya.synthdefs.SynthDef):
name = expr.actual_name
if name in self._synthdefs and self._synthdefs[name] == expr:
return True
return False
### PRIVATE METHODS ###
async def _connect(self):
self._osc_protocol = AsyncOscProtocol()
await self._osc_protocol.connect(
ip_address=self._ip_address,
port=self._port,
healthcheck=HealthCheck(
request_pattern=["/status"],
response_pattern=["/status.reply"],
callback=self._shutdown,
max_attempts=5,
timeout=1.0,
backoff_factor=1.5,
),
)
self._is_running = True
self._setup_osc_callbacks()
await self._setup_notifications()
self._setup_allocators()
if self.client_id == 0:
await self._setup_default_groups()
await self._setup_system_synthdefs()
self.boot_future.set_result(True)
self._servers.add(self)
async def _disconnect(self):
self._is_running = False
self._is_owner = False
self._client_id = None
self._maximum_logins = None
self._teardown_shm()
await self._osc_protocol.disconnect()
await self._osc_protocol.exit_future
self._teardown_allocators()
if self in self._servers:
self._servers.remove(self)
self.quit_future.set_result(True)
if not self.boot_future.done():
self.boot_future.set_result(False)
async def _setup_default_groups(self):
request = GroupNewRequest(
items=[
GroupNewRequest.Item(1, i, 0) for i in range(1, self.maximum_logins + 1)
]
)
self.send(request.to_osc())
async def _setup_notifications(self):
request = NotifyRequest(True)
response = await request.communicate_async(server=self)
if isinstance(response, FailResponse):
await self._shutdown()
raise supriya.exceptions.TooManyClients
self._client_id, self._maximum_logins = response.action[1], response.action[2]
async def _setup_system_synthdefs(self):
pass
async def _shutdown(self):
if not self.is_running:
return
elif self.is_owner:
await self.quit()
else:
await self.disconnect()
### PUBLIC METHODS ###
async def boot(
self,
*,
ip_address: str = DEFAULT_IP_ADDRESS,
port: int = DEFAULT_PORT,
scsynth_path: Optional[str] = None,
options: Optional[Options] = None,
**kwargs,
) -> "AsyncServer":
if self._is_running:
raise supriya.exceptions.ServerOnline
port = port or DEFAULT_PORT
loop = asyncio.get_running_loop()
self._boot_future = loop.create_future()
self._quit_future = loop.create_future()
self._options = new(options or Options(), **kwargs)
scsynth_path = find(scsynth_path)
self._process_protocol = AsyncProcessProtocol()
await self._process_protocol.boot(self._options, scsynth_path, port)
if not await self._process_protocol.boot_future:
self._boot_future.set_result(False)
self._quit_future.set_result(True)
raise supriya.exceptions.ServerCannotBoot
self._ip_address = ip_address
self._is_owner = True
self._port = port
await self._connect()
return self
async def connect(
self, *, ip_address: str = DEFAULT_IP_ADDRESS, port: int = DEFAULT_PORT
) -> "AsyncServer":
if self._is_running:
raise supriya.exceptions.ServerOnline
loop = asyncio.get_running_loop()
self._boot_future = loop.create_future()
self._quit_future = loop.create_future()
self._ip_address = ip_address
self._is_owner = False
self._port = port
await self._connect()
return self
async def disconnect(self) -> "AsyncServer":
if not self._is_running:
raise supriya.exceptions.ServerOffline
if self._is_owner:
raise supriya.exceptions.OwnedServerShutdown(
"Cannot disconnect from owned server."
)
await self._disconnect()
return self
async def query(self, include_controls=True) -> QueryTreeGroup:
request = GroupQueryTreeRequest(node_id=0, include_controls=include_controls)
response = await request.communicate_async(server=self)
return response.query_tree_group
async def quit(self, force: bool = False) -> "AsyncServer":
if not self._is_running:
return self
if not self._is_owner and not force:
raise supriya.exceptions.UnownedServerShutdown(
"Cannot quit unowned server without force flag."
)
try:
await QuitRequest().communicate_async(server=self, sync=True, timeout=1)
except (OscProtocolOffline, asyncio.TimeoutError):
pass
if self._process_protocol is not None:
self._process_protocol.quit()
await self._disconnect()
return self
### PUBLIC PROPERTIES ###
@property
def boot_future(self):
return self._boot_future
@property
def default_group(self):
return self.client_id + 1
@property
def quit_future(self):
return self._quit_future
class Server(BaseServer):
"""
An scsynth server proxy.
::
>>> import supriya.realtime
>>> server = supriya.realtime.Server.default()
>>> server.boot()
<Server: udp://127.0.0.1:57110, 8i8o>
::
>>> server.quit()
<Server: offline>
"""
### CLASS VARIABLES ###
_default_server = None
_servers: Set["Server"] = set()
### INITIALIZER ###
def __init__(self):
BaseServer.__init__(self)
self._lock = threading.RLock()
# proxies
self._audio_input_bus_group = None
self._audio_output_bus_group = None
self._default_group = None
self._root_node = None
self._meters = Meters(self)
self._recorder = Recorder(self)
# proxy mappings
self._audio_buses = {}
self._buffer_proxies = {}
self._buffers = {}
self._control_bus_proxies = {}
self._control_buses = {}
self._nodes = {}
self._pending_synths = {}
### SPECIAL METHODS ###
def __contains__(self, expr):
import supriya.realtime
import supriya.synthdefs
if isinstance(expr, supriya.realtime.Node):
if expr.server is not self:
return False
node_id = expr.node_id
if node_id in self._nodes and self._nodes[node_id] is expr:
return True
elif isinstance(expr, supriya.synthdefs.SynthDef):
name = expr.actual_name
if name in self._synthdefs and self._synthdefs[name] == expr:
return True
elif isinstance(expr, supriya.realtime.ServerObject):
return expr.server is self
return False
def __getitem__(self, item):
"""
Get ``item`` from server.
::
>>> server = supriya.Server.default().boot()
>>> supriya.Synth(name="foo").allocate()
<+ Synth: 1000 default (foo)>
::
>>> server[1000]
<+ Synth: 1000 default (foo)>
::
>>> server["foo"]
<+ Synth: 1000 default (foo)>
::
>>> server["b10"]
<+ Buffer: 10>
::
>>> server["a0"]
<+ Bus: 0 (audio)>
::
>>> server["c16"]
<+ Bus: 16 (control)>
::
>>> server = server.quit()
>>> server["c16"]
Traceback (most recent call last):
...
supriya.exceptions.ServerOffline
"""
import supriya
if not self.is_running:
raise supriya.exceptions.ServerOffline
if isinstance(item, str):
match = re.match(r"b(?P<id>\d+)", item)
if match:
id_ = int(match.groupdict()["id"])
return supriya.realtime.Buffer(id_).allocate(server=self)
match = re.match(r"c(?P<id>\d+)", item)
if match:
id_ = int(match.groupdict()["id"])
return supriya.realtime.Bus(id_, "control").allocate(server=self)
match = re.match(r"a(?P<id>\d+)", item)
if match:
id_ = int(match.groupdict()["id"])
return supriya.realtime.Bus(id_, "audio").allocate(server=self)
result = self.root_node[item]
elif isinstance(item, int):
result = self._nodes.get(item)
else:
raise ValueError(item)
if isinstance(result, set) and len(result) == 1:
return tuple(result)[0]
return result
def __graph__(self):
"""
Graph server.
::
>>> import supriya
>>> server = supriya.Server.default().boot()
>>> group = supriya.Group(
... [supriya.Synth(), supriya.Group([supriya.Synth(), supriya.Synth(),]),]
... ).allocate()
::
>>> graph = server.__graph__()
>>> print(format(graph, "graphviz"))
digraph G {
graph [bgcolor=transparent,
color=lightslategrey,
dpi=72,
fontname=Arial,
outputorder=edgesfirst,
overlap=prism,
penwidth=2,
rankdir=TB,
ranksep=0.5,
splines=spline,
style="dotted, rounded"];
node [fontname=Arial,
fontsize=12,
penwidth=2,
shape=Mrecord,
style="filled, rounded"];
edge [penwidth=2];
"root-node-0" [fillcolor=lightsalmon2,
label="{ <f_0_0> RootNode | <f_0_1> id: 0 }"];
"group-1" [fillcolor=lightsteelblue2,
label="{ <f_0_0> Group | <f_0_1> id: 1 }"];
"group-1000" [fillcolor=lightsteelblue2,
label="{ <f_0_0> Group | <f_0_1> id: 1000 }"];
"synth-1001" [fillcolor=lightgoldenrod2,
label="{ <f_0_0> Synth | <f_0_1> id: 1001 }"];
"group-1002" [fillcolor=lightsteelblue2,
label="{ <f_0_0> Group | <f_0_1> id: 1002 }"];
"synth-1003" [fillcolor=lightgoldenrod2,
label="{ <f_0_0> Synth | <f_0_1> id: 1003 }"];
"synth-1004" [fillcolor=lightgoldenrod2,
label="{ <f_0_0> Synth | <f_0_1> id: 1004 }"];
"root-node-0" -> "group-1";
"group-1" -> "group-1000";
"group-1000" -> "synth-1001";
"group-1000" -> "group-1002";
"group-1002" -> "synth-1003";
"group-1002" -> "synth-1004";
}
::
>>> supriya.graph(server) # doctest: +SKIP
"""
return self.root_node.__graph__()
### PRIVATE METHODS ###
def _as_node_target(self):
return self.default_group
def _connect(self):
self._osc_protocol = ThreadedOscProtocol()
self._osc_protocol.connect(
ip_address=self.ip_address,
port=self.port,
healthcheck=HealthCheck(
request_pattern=["/status"],
response_pattern=["/status.reply"],
callback=self._shutdown,
max_attempts=5,
timeout=1.0,
backoff_factor=1.5,
),
)
self._is_running = True
self._setup_osc_callbacks()
self._setup_notifications()
self._setup_allocators()
self._setup_proxies()
if self.client_id == 0:
self._setup_default_groups()
self._setup_system_synthdefs()
self._servers.add(self)
def _disconnect(self):
logger.info("disconnecting")
self._is_running = False
self._is_owner = False
self._client_id = None
self._maximum_logins = None
self._teardown_shm()
self._osc_protocol.disconnect()
self._teardown_proxies()
self._teardown_allocators()
if self in self._servers:
self._servers.remove(self)
logger.info("disconnected")
def _get_buffer_proxy(self, buffer_id):
import supriya.realtime
buffer_proxy = self._buffer_proxies.get(buffer_id)
if not buffer_proxy:
buffer_proxy = supriya.realtime.BufferProxy(
buffer_id=buffer_id, server=self
)
self._buffer_proxies[buffer_id] = buffer_proxy
return buffer_proxy
def _get_control_bus_proxy(self, bus_id):
import supriya.realtime
import supriya.synthdefs
control_bus_proxy = self._control_bus_proxies.get(bus_id)
if not control_bus_proxy:
control_bus_proxy = supriya.realtime.BusProxy(
bus_id=bus_id,
calculation_rate=supriya.CalculationRate.CONTROL,
server=self,
)
self._control_bus_proxies[bus_id] = control_bus_proxy
return control_bus_proxy
def _handle_buffer_info_response(self, message):
from supriya.commands import Response
response = Response.from_osc_message(message)
for item in response.items:
buffer_proxy = self._get_buffer_proxy(item.buffer_id)
if buffer_proxy:
buffer_proxy._handle_response(item)
def _handle_control_bus_set_response(self, message):
from supriya.commands import Response
response = Response.from_osc_message(message)
for item in response:
bus_id = item.bus_id
bus_proxy = self._get_control_bus_proxy(bus_id)
bus_proxy._value = item.bus_value
def _handle_control_bus_setn_response(self, message):
from supriya.commands import Response
response = Response.from_osc_message(message)
for item in response:
starting_bus_id = item.starting_bus_id
for i, value in enumerate(item.bus_values):
bus_id = starting_bus_id + i
bus_proxy = self._get_control_bus_proxy(bus_id)
bus_proxy._value = value
def _handle_node_info_response(self, message):
from supriya.commands import Response
from supriya.realtime import Group, Synth
response = Response.from_osc_message(message)
with self._lock:
node_id = response.node_id
node = self._nodes.get(node_id)
if node is not None:
node._handle_response(response)
elif response.action == NodeAction.NODE_CREATED:
if response.is_group:
node = Group()
else:
node = self._pending_synths.pop(node_id, Synth())
node._register_with_local_server(server=self, node_id=response.node_id)
parent = self._nodes[response.parent_id]
node._set_parent(parent)
if response.previous_node_id:
previous_child = self._nodes[response.previous_node_id]
index = parent.index(previous_child)
parent._children.insert(index + 1, node)
else:
parent._children.append(node)
def _handle_synthdef_removed_response(self, message):
from supriya.commands import Response
response = Response.from_osc_message(message)
synthdef_name = response.synthdef_name
self._synthdefs.pop(synthdef_name, None)
def _rehydrate(self):
from supriya.realtime import Group, Synth
def recurse(query_tree_node, node):
for query_tree_child in query_tree_node.children:
if isinstance(query_tree_child, QueryTreeGroup):
group = Group()
group._register_with_local_server(
node_id=query_tree_child.node_id, server=self
)
node._children.append(group)
recurse(query_tree_child, group)
elif isinstance(query_tree_child, QueryTreeSynth):
synth = Synth()
synth._register_with_local_server(
node_id=query_tree_child.node_id, server=self
)
node._children.append(synth)
for query_tree_control in query_tree_child.children:
pass
recurse(self.query(), self.root_node)
def _setup_notifications(self):
request = NotifyRequest(True)
response = request.communicate(server=self)
if isinstance(response, FailResponse):
self._shutdown()
raise supriya.exceptions.TooManyClients
if len(response.action) == 2: # supernova doesn't provide a max logins value
self._client_id, self._maximum_logins = response.action[1], 1
else:
self._client_id, self._maximum_logins = response.action[1:3]
def _setup_default_groups(self):
default_groups = [
supriya.Group(node_id_is_permanent=True) for _ in range(self.maximum_logins)
]
self.root_node.extend(default_groups)
self._default_group = default_groups[self.client_id]
def _setup_proxies(self):
import supriya.realtime
self._audio_input_bus_group = supriya.realtime.AudioInputBusGroup(self)
self._audio_output_bus_group = supriya.realtime.AudioOutputBusGroup(self)
self._root_node = supriya.realtime.RootNode(server=self)
self._nodes[0] = self._root_node
def _setup_osc_callbacks(self):
super()._setup_osc_callbacks()
self._osc_protocol.register(
pattern="/b_info", procedure=self._handle_buffer_info_response
)
self._osc_protocol.register(
pattern="/c_set", procedure=self._handle_control_bus_set_response
)
self._osc_protocol.register(
pattern="/c_setn", procedure=self._handle_control_bus_setn_response
)
for pattern in (
"/n_end",
"/n_go",
"/n_info",
"/n_move",
"/n_off",
"/n_on",
"/n_set",
"/n_setn",
):
self._osc_protocol.register(
pattern=pattern, procedure=self._handle_node_info_response
)
def _setup_system_synthdefs(self, local_only=False):
import supriya.assets.synthdefs
import supriya.synthdefs
system_synthdefs = []
for name in dir(supriya.assets.synthdefs):
if not name.startswith("system_"):
continue
system_synthdef = getattr(supriya.assets.synthdefs, name)
if not isinstance(system_synthdef, supriya.synthdefs.SynthDef):
continue
system_synthdefs.append(system_synthdef)
if local_only:
for synthdef in system_synthdefs:
synthdef._register_with_local_server(self)
else:
supriya.synthdefs.SynthDef._allocate_synthdefs(system_synthdefs, self)
def _teardown_proxies(self):
for set_ in tuple(self._audio_buses.values()):
for x in tuple(set_):
x.free()
for set_ in tuple(self._control_buses.values()):
for x in tuple(set_):
x.free()
for set_ in tuple(self._buffers.values()):
for x in tuple(set_):
x.free()
for x in tuple(self._nodes.values()):
x.free()
self._audio_buses.clear()
self._audio_input_bus_group = None
self._audio_output_bus_group = None
self._buffers.clear()
self._buffer_proxies.clear()
self._control_buses.clear()
self._control_bus_proxies.clear()
self._default_group = None
self._nodes.clear()
self._root_node = None
self._synthdefs.clear()
def _shutdown(self):
if not self.is_running:
return
logger.info("shutting down")
if self.is_owner:
self.quit()
else:
self.disconnect()
### PUBLIC METHODS ###
def add_buffer(
self,
channel_count: int = None,
frame_count: int = None,
starting_frame: int = None,
file_path: str = None,
) -> Buffer:
"""
Add a buffer.
::
>>> server = supriya.Server().boot()
>>> server.add_buffer(channel_count=2, frame_count=1024)
<+ Buffer: 0>
"""
buffer_ = Buffer()
if file_path:
channel_indices = None
if channel_count:
channel_indices = tuple(range(channel_count))
buffer_.allocate_from_file(
file_path,
channel_indices=channel_indices,
frame_count=frame_count,
server=self,
starting_frame=starting_frame,
)
else:
buffer_.allocate(
channel_count=channel_count, frame_count=frame_count, server=self
)
return buffer_
def add_buffer_group(
self, buffer_count: int = 1, channel_count: int = None, frame_count: int = None
) -> BufferGroup:
"""
Add a buffer group.
::
>>> server = supriya.Server().boot()
>>> server.add_buffer_group(buffer_count=8, channel_count=1, frame_count=1024)
<+ BufferGroup{8}: 0>
"""
buffer_group = BufferGroup(buffer_count)
buffer_group.allocate(
channel_count=channel_count, frame_count=frame_count, server=self
)
return buffer_group
def add_bus(self, calculation_rate: int = CalculationRate.CONTROL) -> Bus:
"""
Add a bus.
::
>>> server = supriya.Server().boot()
>>> server.add_bus()
<+ Bus: 0 (control)>
"""
bus = Bus(calculation_rate=calculation_rate)
bus.allocate(server=self)
return bus
def add_bus_group(
self, bus_count: int = 1, calculation_rate: int = CalculationRate.CONTROL
) -> BusGroup:
"""
Add a bus group.
::
>>> server = supriya.Server().boot()
>>> server.add_bus_group(4, "audio")
<+ BusGroup{4}: 16 (audio)>
"""
bus_group = BusGroup(bus_count=bus_count, calculation_rate=calculation_rate)
bus_group.allocate(server=self)
return bus_group
def add_group(self, add_action: int = None) -> Group:
"""
Add a group relative to the default group via ``add_action``.
::
>>> server = supriya.Server().boot()
>>> print(server.query())
NODE TREE 0 group
1 group
::
>>> group = server.add_group()
>>> print(server.query())
NODE TREE 0 group
1 group
1000 group
"""
return self.default_group.add_group(add_action=add_action)
def add_synth(self, synthdef=None, add_action: int = None, **kwargs) -> Synth:
"""
Add a synth relative to the default group via ``add_action``.
::
>>> server = supriya.Server().boot()
>>> print(server.query())
NODE TREE 0 group
1 group
::
>>> synth = server.add_synth()
>>> print(server.query())
NODE TREE 0 group
1 group
1000 default
out: 0.0, amplitude: 0.1, frequency: 440.0, gate: 1.0, pan: 0.5
"""
return self.default_group.add_synth(
synthdef=synthdef, add_action=add_action, **kwargs
)
def add_synthdef(self, synthdef) -> "Server":
synthdef.allocate(server=self)
return self
def boot(
self,
*,
ip_address: str = DEFAULT_IP_ADDRESS,
port: int = DEFAULT_PORT,
scsynth_path: Optional[str] = None,
options: Optional[Options] = None,
**kwargs,
):
if self.is_running:
raise supriya.exceptions.ServerOnline
port = port or DEFAULT_PORT
self._options = new(options or Options(), **kwargs)
scsynth_path = find(scsynth_path)
self._process_protocol = SyncProcessProtocol()
self._process_protocol.boot(self._options, scsynth_path, port)
self._ip_address = ip_address
self._is_owner = True
self._port = port
self._connect()
return self
def connect(
self, *, ip_address: str = DEFAULT_IP_ADDRESS, port: int = DEFAULT_PORT
) -> "Server":
if self.is_running:
raise supriya.exceptions.ServerOnline
self._ip_address = ip_address
self._is_owner = False
self._port = port
self._connect()
if self.client_id > 0:
self._setup_system_synthdefs(local_only=True)
self._rehydrate()
self._default_group = self._nodes[self.client_id + 1]
return self
def disconnect(self) -> "Server":
if not self.is_running:
raise supriya.exceptions.ServerOffline
if self._is_owner:
raise supriya.exceptions.OwnedServerShutdown(
"Cannot disconnect from owned server."
)
self._disconnect()
return self
def quit(self, force: bool = False) -> "Server":
if not self.is_running:
return self
if not self._is_owner and not force:
raise supriya.exceptions.UnownedServerShutdown(
"Cannot quit unowned server without force flag."
)
if self.recorder.is_recording:
self.recorder.stop()
try:
QuitRequest().communicate(server=self)
except OscProtocolOffline:
pass
if self._process_protocol is not None:
self._process_protocol.quit()
self._disconnect()
return self
@classmethod
def default(cls) -> "Server":
if cls._default_server is None:
cls._default_server = Server()
return cls._default_server
def query(self, include_controls=True) -> QueryTreeGroup:
request = GroupQueryTreeRequest(node_id=0, include_controls=include_controls)
response = request.communicate(server=self)
return response.query_tree_group
def reboot(self, options: Optional[Options] = None, **kwargs) -> "Server":
self.quit()
self.boot(options=options, **kwargs)
return self
def reset(self) -> "Server":
self.send(["/d_freeAll"])
self.send(["/g_freeAll", 0])
self.send(["/clearSched"])
self.sync()
self._teardown_proxies()
self._teardown_allocators()
self._setup_allocators()
self._setup_proxies()
self._setup_default_groups()
self._setup_system_synthdefs()
self.sync()
return self
def sync(self, sync_id: Optional[int] = None) -> "Server":
if not self.is_running:
return self
if sync_id is None:
sync_id = self.next_sync_id
request = SyncRequest(sync_id=sync_id)
request.communicate(server=self)
return self
### PUBLIC PROPERTIES ###
@property
def audio_input_bus_group(self):
return self._audio_input_bus_group
@property
def audio_output_bus_group(self):
return self._audio_output_bus_group
@property
def default_group(self):
return self._default_group
@property
def meters(self):
return self._meters
@property
def recorder(self):
return self._recorder
@property
def root_node(self):
return self._root_node
```
#### File: tests/patterns/test_ChainPattern.py
```python
import pytest
from supriya.patterns import ChainPattern, EventPattern, NoteEvent, SequencePattern
from supriya.patterns.testutils import MockUUID as M
from supriya.patterns.testutils import run_pattern_test
@pytest.mark.parametrize(
"stop_at, input_a, input_b1, input_b2, input_c, expected, is_infinite",
[
(
None,
SequencePattern([1, 2, 3]),
SequencePattern([4, 5]),
SequencePattern([7, 8, 9]),
SequencePattern([10, 11]),
[NoteEvent(M("A"), a=1, b=7, c=10), NoteEvent(M("B"), a=2, b=8, c=11)],
False,
),
(
None,
SequencePattern([1, 2, 3], None),
SequencePattern([4, 5], None),
SequencePattern([7, 8, 9]),
SequencePattern([10, 11]),
[NoteEvent(M("A"), a=1, b=7, c=10), NoteEvent(M("B"), a=2, b=8, c=11)],
False,
),
(
None,
SequencePattern([1, 2, 3], None),
SequencePattern([4, 5], None),
SequencePattern([7, 8, 9], None),
SequencePattern([10, 11], None),
[
NoteEvent(M("A"), a=1, b=7, c=10),
NoteEvent(M("B"), a=2, b=8, c=11),
NoteEvent(M("C"), a=3, b=9, c=10),
NoteEvent(M("D"), a=1, b=7, c=11),
NoteEvent(M("E"), a=2, b=8, c=10),
NoteEvent(M("F"), a=3, b=9, c=11),
],
True,
),
],
)
def test(stop_at, input_a, input_b1, input_b2, input_c, expected, is_infinite):
pattern = ChainPattern(
EventPattern(a=input_a, b=input_b1),
EventPattern(b=input_b2),
EventPattern(c=input_c),
)
run_pattern_test(pattern, expected, is_infinite, stop_at)
```
#### File: tests/patterns/test_CompositeEvent.py
```python
import uuid
import pytest
from supriya.patterns.events import CompositeEvent, NodeFreeEvent, NullEvent, Priority
id_ = uuid.uuid4()
@pytest.mark.parametrize(
"event, offset, expected",
[
(
CompositeEvent([NullEvent(delta=0.25), NodeFreeEvent(id_, delta=0.0)]),
0.0,
[(0.25, Priority.START, NodeFreeEvent(id_))],
),
(
CompositeEvent([NullEvent(delta=0.5), NodeFreeEvent(id_, delta=0.0)]),
2.5,
[(3.0, Priority.START, NodeFreeEvent(id_))],
),
],
)
def test_expand(event, offset, expected):
print(event)
actual = event.expand(offset)
assert actual == expected
```
#### File: tests/patterns/test_EventPattern.py
```python
import pytest
from supriya.patterns import EventPattern, NoteEvent, SequencePattern
from supriya.patterns.testutils import MockUUID as M
from supriya.patterns.testutils import run_pattern_test
@pytest.mark.parametrize(
"stop_at, input_a, input_b, expected, is_infinite",
[
(
None,
SequencePattern([1, 2, 3], None),
SequencePattern([4, 5], None),
[
NoteEvent(M("A"), a=1, b=4),
NoteEvent(M("B"), a=2, b=5),
NoteEvent(M("C"), a=3, b=4),
NoteEvent(M("D"), a=1, b=5),
NoteEvent(M("E"), a=2, b=4),
NoteEvent(M("F"), a=3, b=5),
],
True,
),
(
None,
SequencePattern([1, 2, 3], None),
SequencePattern([4, 5], 1),
[NoteEvent(M("A"), a=1, b=4), NoteEvent(M("B"), a=2, b=5)],
False,
),
(
None,
SequencePattern([1, 2, 3], None),
SequencePattern([4, 5], 2),
[
NoteEvent(M("A"), a=1, b=4),
NoteEvent(M("B"), a=2, b=5),
NoteEvent(M("C"), a=3, b=4),
NoteEvent(M("D"), a=1, b=5),
],
False,
),
(
None,
SequencePattern([1, 2, 3], 1),
SequencePattern([4, 5], 1),
[NoteEvent(M("A"), a=1, b=4), NoteEvent(M("B"), a=2, b=5)],
False,
),
(
None,
SequencePattern([1, 2, 3], 1),
SequencePattern([4, 5], None),
[
NoteEvent(M("A"), a=1, b=4),
NoteEvent(M("B"), a=2, b=5),
NoteEvent(M("C"), a=3, b=4),
],
False,
),
(
None,
SequencePattern([1, 2, 3], 1),
4,
[
NoteEvent(M("A"), a=1, b=4),
NoteEvent(M("B"), a=2, b=4),
NoteEvent(M("C"), a=3, b=4),
],
False,
),
],
)
def test(stop_at, input_a, input_b, expected, is_infinite):
pattern = EventPattern(a=input_a, b=input_b)
run_pattern_test(pattern, expected, is_infinite, stop_at)
```
#### File: tests/patterns/test_Event.py
```python
import uuid
from supriya.patterns import NoteEvent
def test___eq__():
event_one = NoteEvent(uuid.uuid4())
event_two = NoteEvent(uuid.uuid4())
non_event = 23
assert event_one == event_one
assert event_two == event_two
assert event_one != event_two
assert event_one != non_event
assert event_two != non_event
```
#### File: tests/patterns/test_RandomPattern.py
```python
import pytest
from supriya.patterns import RandomPattern
@pytest.mark.parametrize(
"minimum, maximum, iterations, is_infinite",
[
(0.0, 1.0, None, True),
(0.0, 1.0, 1, False),
(0.45, 0.55, None, True),
(0.0, (1.0, 2.0), None, True),
],
)
def test(minimum, maximum, iterations, is_infinite):
pattern = RandomPattern(minimum=minimum, maximum=maximum, iterations=iterations)
assert pattern.distribution == RandomPattern.Distribution.WHITE_NOISE
assert pattern.is_infinite == is_infinite
assert pattern.iterations == iterations
assert pattern.maximum == maximum
assert pattern.minimum == minimum
iterator = iter(pattern)
ceased = True
actual = []
for _ in range(1000):
try:
actual.append(next(iterator))
except StopIteration:
break
else:
ceased = False
if is_infinite:
assert not ceased
else:
assert len(actual) == iterations
# TODO: Verify minimum / maximum bounds
assert len(set(actual)) == len(actual)
```
#### File: tests/patterns/test_SeedPattern.py
```python
import pytest
from supriya.patterns import ChoicePattern, SeedPattern, SequencePattern
from supriya.patterns.testutils import run_pattern_test
@pytest.mark.parametrize(
"stop_at, pattern, expected, is_infinite",
[
(None, SequencePattern([1, 2, 3, 4]), [1, 2, 3, 4], False),
(1, SequencePattern([1, 2, 3, 4]), [1], False),
],
)
def test(stop_at, pattern, expected, is_infinite):
pattern = SeedPattern(pattern)
run_pattern_test(pattern, expected, is_infinite, stop_at)
def test_random():
pattern = ChoicePattern([1, 2, 3])
assert len(set(tuple(pattern) for _ in range(10))) > 1
pattern = SeedPattern(ChoicePattern([1, 2, 3]))
assert len(set(tuple(pattern) for _ in range(10))) == 1
```
#### File: tests/patterns/test_UnaryOpPattern.py
```python
import pytest
from supriya.patterns import SequencePattern, UnaryOpPattern
from supriya.patterns.testutils import run_pattern_test
@pytest.mark.parametrize(
"stop_at, operator, input_, expected, is_infinite",
[
(None, "-", 1, [-1], True),
(None, "-", [1], [(-1,)], True),
(None, "-", [[1]], [((-1,),)], True),
(None, "-", [[[1]]], [(((-1,),),)], True),
(None, "-", [1, 2], [(-1, -2)], True),
(None, "-", SequencePattern([1, 2, 3]), [-1, -2, -3], False),
(None, "-", SequencePattern([1, 2, 3], None), [-1, -2, -3], True),
],
)
def test(stop_at, operator, input_, expected, is_infinite):
pattern = UnaryOpPattern(operator, input_)
run_pattern_test(pattern, expected, is_infinite, stop_at)
```
#### File: tests/providers/test_RealtimeProvider.py
```python
import time
import pytest
from uqbar.strings import normalize
from supriya.assets.synthdefs import default
from supriya.enums import AddAction, CalculationRate
from supriya.providers import (
BufferProxy,
BusGroupProxy,
BusProxy,
GroupProxy,
Provider,
ProviderMoment,
RealtimeProvider,
SynthProxy,
)
from supriya.utils import locate
def test_RealtimeProvider_init_error():
with pytest.raises(ValueError):
RealtimeProvider(23)
def test_RealtimeProvider_add_buffer_1(server):
provider = Provider.from_context(server)
file_path = locate("supriya.assets:audio/pulse_44100sr_16bit_octo.wav")
with server.osc_protocol.capture() as transcript:
with provider.at(1.2345):
proxy = provider.add_buffer(file_path=file_path)
time.sleep(0.1)
assert isinstance(proxy, BufferProxy)
assert [entry.message.to_list() for entry in transcript] == [
[1.3345, [["/b_allocRead", 0, str(file_path), 0, -1]]],
["/done", "/b_allocRead", 0],
]
def test_RealtimeProvider_add_bus_1(server):
provider = Provider.from_context(server)
with provider.at(1.2345):
bus_proxy_one = provider.add_bus(calculation_rate="audio")
bus_proxy_two = provider.add_bus()
assert bus_proxy_one == BusProxy(
calculation_rate=CalculationRate.AUDIO, identifier=16, provider=provider
)
assert bus_proxy_two == BusProxy(
calculation_rate=CalculationRate.CONTROL, identifier=0, provider=provider
)
def test_RealtimeProvider_add_bus_error(server):
"""
Must be control or audio rate.
"""
provider = Provider.from_context(server)
with pytest.raises(ValueError):
provider.add_bus()
with provider.at(0):
with pytest.raises(ValueError):
provider.add_bus(calculation_rate="scalar")
def test_RealtimeProvider_add_bus_group_1(server):
provider = Provider.from_context(server)
with server.osc_protocol.capture() as transcript:
with provider.at(None):
bus_group_proxy_one = provider.add_bus_group(channel_count=2)
bus_group_proxy_two = provider.add_bus_group(channel_count=4)
assert bus_group_proxy_one == BusGroupProxy(
calculation_rate=CalculationRate.CONTROL,
channel_count=2,
identifier=0,
provider=provider,
)
assert bus_group_proxy_two == BusGroupProxy(
calculation_rate=CalculationRate.CONTROL,
channel_count=4,
identifier=2,
provider=provider,
)
assert [entry.message for entry in transcript] == []
def test_RealtimeProvider_add_bus_group_error(server):
"""
Must be 1 or more channels and control or audio rate.
"""
provider = Provider.from_context(server)
with pytest.raises(ValueError):
provider.add_bus_group()
with provider.at(0):
with pytest.raises(ValueError):
provider.add_bus_group(channel_count=0)
with pytest.raises(ValueError):
provider.add_bus_group(calculation_rate="scalar")
def test_RealtimeProvider_add_group_1(server):
provider = Provider.from_context(server)
seconds = time.time()
with server.osc_protocol.capture() as transcript:
with provider.at(seconds) as provider_moment:
group_proxy = provider.add_group()
assert group_proxy == GroupProxy(identifier=1000, provider=provider)
assert provider_moment == ProviderMoment(
provider=provider,
seconds=seconds,
bus_settings=[],
node_additions=[(group_proxy, AddAction.ADD_TO_HEAD, server.default_group)],
node_removals=[],
node_reorderings=[],
node_settings=[],
)
assert [entry.message.to_list() for entry in transcript] == [
[seconds + provider.latency, [["/g_new", 1000, 0, 1]]]
]
time.sleep(0.1)
assert str(server.query()) == normalize(
"""
NODE TREE 0 group
1 group
1000 group
"""
)
def test_RealtimeProvider_add_group_2(server):
provider = Provider.from_context(server)
seconds = time.time()
with server.osc_protocol.capture() as transcript:
with provider.at(None):
group_proxy_one = provider.add_group()
with provider.at(seconds + 0.01) as provider_moment:
group_proxy_two = provider.add_group(target_node=group_proxy_one)
assert group_proxy_two == GroupProxy(identifier=1001, provider=provider)
assert provider_moment == ProviderMoment(
provider=provider,
seconds=seconds + 0.01,
bus_settings=[],
node_additions=[(group_proxy_two, AddAction.ADD_TO_HEAD, group_proxy_one)],
node_removals=[],
node_reorderings=[],
node_settings=[],
)
assert [entry.message.to_list() for entry in transcript] == [
[None, [["/g_new", 1000, 0, 1]]],
[seconds + 0.01 + provider.latency, [["/g_new", 1001, 0, 1000]]],
]
time.sleep(0.1)
assert str(server.query()) == normalize(
"""
NODE TREE 0 group
1 group
1000 group
1001 group
"""
)
def test_RealtimeProvider_add_group_error(server):
"""
Requires moment.
"""
provider = Provider.from_context(server)
with pytest.raises(ValueError):
provider.add_group()
def test_RealtimeProvider_add_synth_1(server):
provider = Provider.from_context(server)
seconds = time.time()
with server.osc_protocol.capture() as transcript:
with provider.at(seconds) as provider_moment:
synth_proxy = provider.add_synth(amplitude=0.3, frequency=333)
assert synth_proxy == SynthProxy(
identifier=1000,
provider=provider,
synthdef=default,
settings=dict(amplitude=0.3, frequency=333),
)
assert provider_moment == ProviderMoment(
provider=provider,
seconds=seconds,
bus_settings=[],
node_additions=[(synth_proxy, AddAction.ADD_TO_HEAD, server.default_group)],
node_removals=[],
node_reorderings=[],
node_settings=[],
)
assert [entry.message.to_list() for entry in transcript] == [
[
seconds + provider.latency,
[["/s_new", "default", 1000, 0, 1, "amplitude", 0.3, "frequency", 333]],
]
]
time.sleep(0.1)
assert str(server.query()) == normalize(
"""
NODE TREE 0 group
1 group
1000 default
out: 0.0, amplitude: 0.3, frequency: 333.0, gate: 1.0, pan: 0.5
"""
)
def test_RealtimeProvider_add_synth_2(server):
provider = Provider.from_context(server)
seconds = time.time()
with server.osc_protocol.capture() as transcript:
with provider.at(None):
group_proxy = provider.add_group()
with provider.at(seconds + 0.01) as provider_moment:
synth_proxy = provider.add_synth(
target_node=group_proxy, amplitude=0.5, frequency=666
)
assert synth_proxy == SynthProxy(
identifier=1001,
provider=provider,
synthdef=default,
settings=dict(amplitude=0.5, frequency=666),
)
assert provider_moment == ProviderMoment(
provider=provider,
seconds=seconds + 0.01,
bus_settings=[],
node_additions=[(synth_proxy, AddAction.ADD_TO_HEAD, group_proxy)],
node_removals=[],
node_reorderings=[],
node_settings=[],
)
assert [entry.message.to_list() for entry in transcript] == [
[None, [["/g_new", 1000, 0, 1]]],
[
seconds + 0.01 + provider.latency,
[["/s_new", "default", 1001, 0, 1000, "amplitude", 0.5, "frequency", 666]],
],
]
time.sleep(0.1)
assert str(server.query()) == normalize(
"""
NODE TREE 0 group
1 group
1000 group
1001 default
out: 0.0, amplitude: 0.5, frequency: 666.0, gate: 1.0, pan: 0.5
"""
)
def test_RealtimeProvider_add_synth_3(server):
provider = Provider.from_context(server)
with server.osc_protocol.capture() as transcript:
with provider.at(None):
audio_bus_proxy = provider.add_bus("audio")
control_bus_proxy = provider.add_bus("control")
synth_proxy = provider.add_synth(
amplitude=control_bus_proxy, out=audio_bus_proxy
)
assert synth_proxy == SynthProxy(
identifier=1000,
provider=provider,
synthdef=default,
settings=dict(amplitude=control_bus_proxy, out=audio_bus_proxy),
)
assert [entry.message.to_list() for entry in transcript] == [
[None, [["/s_new", "default", 1000, 0, 1, "amplitude", "c0", "out", 16.0]]]
]
assert str(server.query()) == normalize(
"""
NODE TREE 0 group
1 group
1000 default
out: 16.0, amplitude: c0, frequency: 440.0, gate: 1.0, pan: 0.5
"""
)
def test_RealtimeProvider_add_synth_error(server):
"""
Requires moment.
"""
provider = Provider.from_context(server)
with pytest.raises(ValueError):
provider.add_synth()
def test_RealtimeProvider_free_buffer(server):
provider = Provider.from_context(server)
file_path = locate("supriya.assets:audio/pulse_44100sr_16bit_octo.wav")
with provider.at(1.2345):
proxy = provider.add_buffer(file_path=file_path)
time.sleep(0.1)
with server.osc_protocol.capture() as transcript:
with provider.at(2.3456):
proxy.free()
assert [entry.message.to_list() for entry in transcript] == [
[2.4456, [["/b_free", 0]]]
]
def test_RealtimeProvider_free_bus_1(server):
provider = Provider.from_context(server)
seconds = time.time()
with server.osc_protocol.capture() as transcript:
with provider.at(seconds):
audio_bus = provider.add_bus(calculation_rate=CalculationRate.AUDIO)
control_bus_a = provider.add_bus()
control_bus_b = provider.add_bus()
control_bus_c = provider.add_bus()
with provider.at(seconds + 0.01):
audio_bus.free()
control_bus_a.free()
control_bus_d = provider.add_bus()
assert audio_bus.identifier == 16
assert control_bus_a.identifier == 0
assert control_bus_b.identifier == 1
assert control_bus_c.identifier == 2
assert control_bus_d.identifier == 0
assert [entry.message for entry in transcript] == []
def test_RealtimeProvider_free_bus_error(server):
provider = Provider.from_context(server)
with provider.at(0):
bus_proxy = provider.add_bus()
with pytest.raises(ValueError):
provider.free_bus(bus_proxy)
def test_RealtimeProvider_free_bus_group_1(server):
provider = Provider.from_context(server)
seconds = time.time()
with server.osc_protocol.capture() as transcript:
with provider.at(seconds):
audio_bus_group = provider.add_bus_group(
channel_count=2, calculation_rate=CalculationRate.AUDIO
)
control_bus_group_a = provider.add_bus_group(channel_count=2)
control_bus_group_b = provider.add_bus_group(channel_count=3)
control_bus_group_c = provider.add_bus_group(channel_count=2)
with provider.at(seconds + 0.01):
audio_bus_group.free()
control_bus_group_a.free()
control_bus_group_d = provider.add_bus_group(channel_count=2)
assert audio_bus_group.identifier == 16
assert control_bus_group_a.identifier == 0
assert control_bus_group_b.identifier == 2
assert control_bus_group_c.identifier == 5
assert control_bus_group_d.identifier == 0
assert [entry.message for entry in transcript] == []
def test_RealtimeProvider_free_bus_group_error(server):
provider = Provider.from_context(server)
with provider.at(0):
bus_group_proxy = provider.add_bus_group()
with pytest.raises(ValueError):
provider.free_bus_group(bus_group_proxy)
def test_RealtimeProvider_free_node_error(server):
"""
Requires moment.
"""
provider = Provider.from_context(server)
with provider.at(0):
group_proxy = provider.add_group()
with pytest.raises(ValueError):
provider.free_node(group_proxy)
def test_RealtimeProvider_free_node_1(server):
provider = Provider.from_context(server)
with server.osc_protocol.capture() as transcript:
with provider.at(None):
group_proxy = provider.add_group()
synth_proxy = provider.add_synth()
with provider.at(None):
group_proxy.free()
synth_proxy.free()
assert [entry.message.to_list() for entry in transcript] == [
[None, [["/g_new", 1000, 0, 1], ["/s_new", "default", 1001, 0, 1]]],
[None, [["/n_free", 1000], ["/n_set", 1001, "gate", 0]]],
]
time.sleep(0.1)
assert str(server.query()) == normalize(
"""
NODE TREE 0 group
1 group
1001 default
out: 0.0, amplitude: 0.1, frequency: 440.0, gate: 0.0, pan: 0.5
"""
)
def test_RealtimeProvider_move_node_1(server):
provider = Provider.from_context(server)
seconds = time.time()
with server.osc_protocol.capture() as transcript:
with provider.at(seconds):
group_proxy_one = provider.add_group()
group_proxy_two = provider.add_group()
provider.move_node(group_proxy_one, AddAction.ADD_TO_TAIL, group_proxy_two)
assert [entry.message.to_list() for entry in transcript] == [
[
seconds + provider.latency,
[["/g_new", 1000, 0, 1], ["/g_new", 1001, 0, 1], ["/g_tail", 1001, 1000]],
]
]
time.sleep(0.1)
assert str(server.query()) == normalize(
"""
NODE TREE 0 group
1 group
1001 group
1000 group
"""
)
def test_RealtimeProvider_move_node_error(server):
"""
Requires moment.
"""
provider = Provider.from_context(server)
with provider.at(0):
group_proxy_one = provider.add_group()
group_proxy_two = provider.add_group()
with pytest.raises(ValueError):
group_proxy_one.move(AddAction.ADD_TO_HEAD, group_proxy_two)
def test_RealtimeProvider_set_bus_1(server):
provider = Provider.from_context(server)
seconds = time.time()
with server.osc_protocol.capture() as transcript:
with provider.at(seconds):
bus_group_proxy = provider.add_bus_group(channel_count=4)
for i, bus_proxy in enumerate(bus_group_proxy):
bus_proxy.set_(pow(2, i))
assert [entry.message.to_list() for entry in transcript] == [
[seconds + provider.latency, [["/c_set", 0, 1.0, 1, 2.0, 2, 4.0, 3, 8.0]]]
]
def test_RealtimeProvider_set_bus_error(server):
provider = Provider.from_context(server)
with provider.at(1.2345):
audio_bus_proxy = provider.add_bus(calculation_rate=CalculationRate.AUDIO)
control_bus_proxy = provider.add_bus()
with pytest.raises(ValueError):
audio_bus_proxy.set_(0.1234)
with pytest.raises(ValueError):
control_bus_proxy.set_(0.1234)
def test_RealtimeProvider_set_node_1(server):
provider = Provider.from_context(server)
seconds = time.time()
with provider.at(seconds):
group_proxy = provider.add_group()
with server.osc_protocol.capture() as transcript:
with provider.at(seconds + 0.01):
group_proxy["foo"] = 23
assert [entry.message.to_list() for entry in transcript] == [
[seconds + 0.01 + provider.latency, [["/n_set", 1000, "foo", 23]]]
]
def test_RealtimeProvider_set_node_2(server):
provider = Provider.from_context(server)
with provider.at(None):
synth_proxy = provider.add_synth()
time.sleep(0.01)
assert str(server.query()) == normalize(
"""
NODE TREE 0 group
1 group
1000 default
out: 0.0, amplitude: 0.1, frequency: 440.0, gate: 1.0, pan: 0.5
"""
)
with server.osc_protocol.capture() as transcript:
with provider.at(None):
synth_proxy["frequency"] = 443
assert [entry.message.to_list() for entry in transcript] == [
[None, [["/n_set", 1000, "frequency", 443.0]]]
]
time.sleep(0.01)
assert str(server.query()) == normalize(
"""
NODE TREE 0 group
1 group
1000 default
out: 0.0, amplitude: 0.1, frequency: 443.0, gate: 1.0, pan: 0.5
"""
)
def test_RealtimeProvider_set_node_3(server):
provider = Provider.from_context(server)
with provider.at(None):
synth_proxy = provider.add_synth()
bus_proxy = provider.add_bus()
time.sleep(0.01)
with server.osc_protocol.capture() as transcript:
with provider.at(None):
synth_proxy["frequency"] = bus_proxy
assert [entry.message.to_list() for entry in transcript] == [
[None, [["/n_set", 1000, "frequency", "c0"]]]
]
time.sleep(0.01)
assert str(server.query()) == normalize(
"""
NODE TREE 0 group
1 group
1000 default
out: 0.0, amplitude: 0.1, frequency: c0, gate: 1.0, pan: 0.5
"""
)
time.sleep(0.01)
with server.osc_protocol.capture() as transcript:
with provider.at(None):
synth_proxy["frequency"] = 443
assert [entry.message.to_list() for entry in transcript] == [
[None, [["/n_set", 1000, "frequency", 443.0]]]
]
time.sleep(0.01)
assert str(server.query()) == normalize(
"""
NODE TREE 0 group
1 group
1000 default
out: 0.0, amplitude: 0.1, frequency: 443.0, gate: 1.0, pan: 0.5
"""
)
def test_RealtimeProvider_set_node_error(server):
provider = Provider.from_context(server)
with provider.at(None):
group_proxy = provider.add_group()
synth_proxy = provider.add_synth()
with pytest.raises(ValueError):
group_proxy["foo"] = 23
with pytest.raises(ValueError):
synth_proxy["foo"] = 23
```
#### File: tests/realtime/test_realtime_Group_insert.py
```python
import uqbar.strings
import supriya.assets.synthdefs
import supriya.realtime
def test_01(server):
group = supriya.realtime.Group().allocate()
server_state = str(server.query(False))
assert server_state == uqbar.strings.normalize(
"""
NODE TREE 0 group
1 group
1000 group
"""
)
synth_a = supriya.realtime.Synth(supriya.assets.synthdefs.test)
group.insert(0, synth_a)
server_state = str(server.query(False))
assert server_state == uqbar.strings.normalize(
"""
NODE TREE 0 group
1 group
1000 group
1001 test
"""
)
synth_b = supriya.realtime.Synth(supriya.assets.synthdefs.test)
group.insert(0, synth_b)
server_state = str(server.query(False))
assert server_state == uqbar.strings.normalize(
"""
NODE TREE 0 group
1 group
1000 group
1002 test
1001 test
"""
)
synth_c = supriya.realtime.Synth(supriya.assets.synthdefs.test)
group.insert(1, synth_c)
server_state = str(server.query(False))
assert server_state == uqbar.strings.normalize(
"""
NODE TREE 0 group
1 group
1000 group
1002 test
1003 test
1001 test
"""
)
synth_d = supriya.realtime.Synth(supriya.assets.synthdefs.test)
group.insert(3, synth_d)
server_state = str(server.query(False))
assert server_state == uqbar.strings.normalize(
"""
NODE TREE 0 group
1 group
1000 group
1002 test
1003 test
1001 test
1004 test
"""
)
```
#### File: tests/realtime/test_realtime_Group_pop.py
```python
import uqbar.strings
import supriya.assets.synthdefs
import supriya.realtime
def test_01(server):
group_a = supriya.realtime.Group()
group_a.allocate()
synth_a = supriya.realtime.Synth(supriya.assets.synthdefs.test)
group_a.append(synth_a)
group_b = supriya.realtime.Group()
group_a.append(group_b)
synth_b = supriya.realtime.Synth(supriya.assets.synthdefs.test)
group_b.append(synth_b)
synth_c = supriya.realtime.Synth(supriya.assets.synthdefs.test)
group_b.append(synth_c)
group_c = supriya.realtime.Group()
group_b.append(group_c)
synth_d = supriya.realtime.Synth(supriya.assets.synthdefs.test)
group_a.append(synth_d)
server_state = str(server.query(False))
assert server_state == uqbar.strings.normalize(
"""
NODE TREE 0 group
1 group
1000 group
1001 test
1002 group
1003 test
1004 test
1005 group
1006 test
"""
)
group_a.pop()
server_state = str(server.query(False))
assert server_state == uqbar.strings.normalize(
"""
NODE TREE 0 group
1 group
1000 group
1001 test
1002 group
1003 test
1004 test
1005 group
"""
)
group_b.pop(1)
server_state = str(server.query(False))
assert server_state == uqbar.strings.normalize(
"""
NODE TREE 0 group
1 group
1000 group
1001 test
1002 group
1003 test
1005 group
"""
)
group_a.pop(0)
server_state = str(server.query(False))
assert server_state == uqbar.strings.normalize(
"""
NODE TREE 0 group
1 group
1000 group
1002 group
1003 test
1005 group
"""
)
group_b.pop()
server_state = str(server.query(False))
assert server_state == uqbar.strings.normalize(
"""
NODE TREE 0 group
1 group
1000 group
1002 group
1003 test
"""
)
group_a.pop()
server_state = str(server.query(False))
assert server_state == uqbar.strings.normalize(
"""
NODE TREE 0 group
1 group
1000 group
"""
)
assert not group_b.is_allocated
assert not group_c.is_allocated
assert not synth_a.is_allocated
assert not synth_b.is_allocated
assert not synth_c.is_allocated
assert not synth_d.is_allocated
```
#### File: tests/realtime/test_realtime_Node_replace_with.py
```python
import uqbar.strings
import supriya.assets.synthdefs
import supriya.realtime
def test_01(server):
synth_a = supriya.realtime.Synth(supriya.assets.synthdefs.test)
synth_b = supriya.realtime.Synth(supriya.assets.synthdefs.test)
synth_c = supriya.realtime.Synth(supriya.assets.synthdefs.test)
synth_d = supriya.realtime.Synth(supriya.assets.synthdefs.test)
synth_e = supriya.realtime.Synth(supriya.assets.synthdefs.test)
synth_a.allocate()
synth_b.allocate()
server_state = str(server.query(False))
assert server_state == uqbar.strings.normalize(
"""
NODE TREE 0 group
1 group
1001 test
1000 test
"""
)
synth_a.replace_with(synth_c)
server_state = str(server.query(False))
assert server_state == uqbar.strings.normalize(
"""
NODE TREE 0 group
1 group
1001 test
1002 test
"""
)
synth_b.replace_with([synth_d, synth_e])
server_state = str(server.query(False))
assert server_state == uqbar.strings.normalize(
"""
NODE TREE 0 group
1 group
1003 test
1004 test
1002 test
"""
)
synth_c.replace_with([synth_a, synth_e])
server_state = str(server.query(False))
assert server_state == uqbar.strings.normalize(
"""
NODE TREE 0 group
1 group
1003 test
1005 test
1004 test
"""
)
``` |
{
"source": "josiah-wolf-oberholtzer/tloen",
"score": 2
} |
#### File: tests/domain/test_Application_quit.py
```python
import pytest
from tloen.domain import Application
@pytest.mark.asyncio
async def test_1():
application = await Application.new()
await application.boot()
await application.quit()
assert application.status == Application.Status.OFFLINE
assert application.primary_context.provider is None
```
#### File: tests/domain/test_Application_render.py
```python
import pytest
from uqbar.strings import normalize
from tloen.domain import Application
@pytest.mark.asyncio
async def test_1():
application = Application()
context = await application.add_context()
await context.add_track()
session = await application.render()
assert application.status == Application.Status.OFFLINE
assert session.to_strings() == normalize(
"""
0.0:
NODE TREE 0 group
1000 group
1001 group
1002 group
1009 group
1010 group
1011 group
1012 group
1003 mixer/patch[fb,gain]/2x2
1008 group
1004 mixer/levels/2
1013 group
1005 mixer/levels/2
1014 group
1006 mixer/patch[gain,hard,replace]/2x2
1015 group
1029 mixer/patch[gain]/2x2
1007 mixer/levels/2
1016 group
1022 group
1023 group
1024 group
1017 mixer/patch[fb,gain]/2x2
1018 mixer/levels/2
1025 group
1019 mixer/levels/2
1026 group
1020 mixer/patch[gain,hard,replace]/2x2
1027 group
1028 mixer/patch/2x2
1021 mixer/levels/2
1030 group
1036 group
1037 group
1038 group
1039 group
1031 mixer/patch[fb,gain]/2x2
1032 mixer/levels/2
1040 group
1033 mixer/levels/2
1041 group
1034 mixer/patch[gain,hard,replace]/2x2
1042 group
1043 mixer/patch/2x2
1035 mixer/levels/2
inf:
NODE TREE 0 group
"""
)
```
#### File: tests/domain/test_Arpeggiator.py
```python
import asyncio
import logging
import pytest
from supriya.assets.synthdefs import default
from tloen.domain import Application, Arpeggiator, Instrument
from tloen.midi import NoteOffMessage, NoteOnMessage
@pytest.fixture(autouse=True)
def logger(caplog):
caplog.set_level(logging.DEBUG, logger="tloen.domain")
caplog.set_level(logging.DEBUG, logger="supriya.clocks")
@pytest.fixture
async def application():
application = Application()
context = await application.add_context(name="Context")
await context.add_track(name="Track")
await application.boot()
yield application
await application.quit()
@pytest.mark.timeout(10)
@pytest.mark.asyncio
async def test_timeout(application):
await application["Track"].add_device(Arpeggiator)
await application["Track"].add_device(Instrument, synthdef=default)
await asyncio.sleep(0.1)
await application.transport.perform([NoteOnMessage(pitch=60, velocity=100)])
await asyncio.sleep(1.0)
@pytest.mark.asyncio
async def test_query_1(application):
"""
Arpeggiator does not modify the server node tree.
"""
before = str(await application["Track"].query())
await application["Track"].add_device(Arpeggiator)
await asyncio.sleep(0.1)
after = str(await application["Track"].query())
assert before == after
@pytest.mark.asyncio
async def test_osc_transcript(application):
"""
Arpeggiator instantiation does not send any OSC messages.
"""
with application["Context"].provider.server.osc_protocol.capture() as transcript:
await application["Track"].add_device(Arpeggiator)
assert len(transcript.sent_messages) == 0
@pytest.mark.asyncio
async def test_midi_transcript_1(mocker, application):
time_mock = mocker.patch.object(application.transport._clock, "get_current_time")
time_mock.return_value = 0.0
arpeggiator = await application["Track"].add_device(Arpeggiator)
assert not application.transport.is_running
with arpeggiator.capture() as transcript:
await application.transport.perform([NoteOnMessage(pitch=60, velocity=100)])
assert application.transport.is_running
await asyncio.sleep(0.1)
time_mock.return_value = 0.5
await asyncio.sleep(0.1)
assert [(_.label, _.moment.offset, _.message) for _ in transcript] == [
("I", 0.0, NoteOnMessage(pitch=60, velocity=100)),
("O", 0.0, NoteOnMessage(pitch=60, velocity=100)),
("O", 0.0625, NoteOffMessage(pitch=60)),
("O", 0.0625, NoteOnMessage(pitch=60, velocity=100)),
("O", 0.125, NoteOffMessage(pitch=60)),
("O", 0.125, NoteOnMessage(pitch=60, velocity=100)),
("O", 0.1875, NoteOffMessage(pitch=60)),
("O", 0.1875, NoteOnMessage(pitch=60, velocity=100)),
("O", 0.25, NoteOffMessage(pitch=60)),
("O", 0.25, NoteOnMessage(pitch=60, velocity=100)),
]
@pytest.mark.asyncio
async def test_midi_transcript_2(mocker, application):
time_mock = mocker.patch.object(application.transport._clock, "get_current_time")
time_mock.return_value = 0.0
arpeggiator = await application["Track"].add_device(Arpeggiator)
assert not application.transport.is_running
with arpeggiator.capture() as transcript:
await application.transport.perform([NoteOnMessage(pitch=60, velocity=100)])
await application.transport.perform([NoteOnMessage(pitch=63, velocity=100)])
await application.transport.perform([NoteOnMessage(pitch=67, velocity=100)])
assert application.transport.is_running
await asyncio.sleep(0.1)
time_mock.return_value = 0.5
await asyncio.sleep(0.1)
assert [(_.label, _.moment.offset, _.message) for _ in transcript] == [
("I", 0.0, NoteOnMessage(pitch=60, velocity=100)),
("I", 0.0, NoteOnMessage(pitch=63, velocity=100)),
("I", 0.0, NoteOnMessage(pitch=67, velocity=100)),
("O", 0.0, NoteOnMessage(pitch=60, velocity=100)),
("O", 0.0625, NoteOffMessage(pitch=60)),
("O", 0.0625, NoteOnMessage(pitch=63, velocity=100)),
("O", 0.125, NoteOffMessage(pitch=63)),
("O", 0.125, NoteOnMessage(pitch=67, velocity=100)),
("O", 0.1875, NoteOffMessage(pitch=67)),
("O", 0.1875, NoteOnMessage(pitch=60, velocity=100)),
("O", 0.25, NoteOffMessage(pitch=60)),
("O", 0.25, NoteOnMessage(pitch=63, velocity=100)),
]
```
#### File: tests/domain/test_Chain_move.py
```python
import pytest
from tloen.domain import Application, RackDevice
@pytest.mark.asyncio
async def test_1():
"""
Unbooted, move one chain before another
"""
application = Application()
context = await application.add_context()
track = await context.add_track()
rack_device = await track.add_device(RackDevice)
chain_one = await rack_device.add_chain()
chain_two = await rack_device.add_chain()
await chain_two.move(rack_device, 0)
assert list(rack_device.chains) == [chain_two, chain_one]
@pytest.mark.asyncio
async def test_2():
"""
Booted, move one chain before another
"""
application = Application()
context = await application.add_context()
track = await context.add_track()
rack_device = await track.add_device(RackDevice)
chain_one = await rack_device.add_chain()
chain_two = await rack_device.add_chain()
await application.boot()
with context.provider.server.osc_protocol.capture() as transcript:
await chain_two.move(rack_device, 0)
assert list(rack_device.chains) == [chain_two, chain_one]
assert len(transcript.sent_messages) == 1
_, message = transcript.sent_messages[0]
assert message.to_list() == [None, [["/g_head", 1017, 1032]]]
@pytest.mark.asyncio
async def test_3():
"""
Booted, with cross-referencing sends, move one chain before another
"""
application = Application()
context = await application.add_context()
track = await context.add_track()
rack_device = await track.add_device(RackDevice)
chain_one = await rack_device.add_chain()
chain_two = await rack_device.add_chain()
await chain_one.add_send(chain_two)
await chain_two.add_send(chain_one)
await application.boot()
with context.provider.server.osc_protocol.capture() as transcript:
await chain_two.move(rack_device, 0)
assert len(transcript.sent_messages) == 1
_, message = transcript.sent_messages[0]
assert message.to_list() == [
None,
[
[
"/s_new",
"mixer/patch[gain]/2x2",
1078,
0,
1044,
"in_",
28.0,
"out",
24.0,
],
[
"/s_new",
"mixer/patch[fb,gain]/2x2",
1079,
0,
1030,
"in_",
24.0,
"out",
26.0,
],
["/g_head", 1017, 1032],
["/n_set", 1046, "gate", 0],
["/n_set", 1047, "gate", 0],
],
]
@pytest.mark.asyncio
async def test_4():
"""
Booted, move one chain from one rack device to another
"""
application = Application()
context = await application.add_context()
track = await context.add_track()
rack_device_one = await track.add_device(RackDevice)
rack_device_two = await track.add_device(RackDevice)
chain = await rack_device_one.add_chain()
await application.boot()
with context.provider.server.osc_protocol.capture() as transcript:
await chain.move(rack_device_two, 0)
assert len(transcript.sent_messages) == 1
_, message = transcript.sent_messages[0]
assert message.to_list() == [
None,
[
[
"/s_new",
"mixer/patch[gain]/2x2",
1066,
0,
1030,
"in_",
24.0,
"out",
26.0,
],
["/g_tail", 1035, 1018],
["/n_set", 1031, "gate", 0],
],
]
```
#### File: tests/domain/test_Chain_unmute.py
```python
import asyncio
import pytest
from tloen.domain import Application, AudioEffect, RackDevice
@pytest.mark.asyncio
async def test_repeat(dc_index_synthdef_factory):
"""
Unmuting more than once is a no-op
"""
application = Application(channel_count=1)
context = await application.add_context()
track = await context.add_track()
rack = await track.add_device(RackDevice)
chain = await rack.add_chain()
await chain.add_device(AudioEffect, synthdef=dc_index_synthdef_factory)
await application.boot()
await chain.mute()
await asyncio.sleep(0.2)
assert [int(_) for _ in context.master_track.rms_levels["input"]] == [0]
await chain.unmute()
await asyncio.sleep(0.2)
assert [int(_) for _ in context.master_track.rms_levels["input"]] == [1]
with context.provider.server.osc_protocol.capture() as transcript:
await chain.unmute()
assert not len(transcript.sent_messages)
```
#### File: tests/domain/test_CueTrack___init__.py
```python
from uuid import UUID
from tloen.domain import CueTrack, Target
def test_1():
cue_track = CueTrack()
assert cue_track.application is None
assert cue_track.channel_count == 2
assert cue_track.effective_channel_count == 2
assert cue_track.context is None
assert cue_track.graph_order == ()
assert cue_track.name is None
assert cue_track.parent is None
assert cue_track.provider is None
assert isinstance(cue_track.receive_target, Target)
assert isinstance(cue_track.send_target, Target)
assert isinstance(cue_track.uuid, UUID)
assert len(cue_track.devices) == 0
assert len(cue_track.postfader_sends) == 0
assert len(cue_track.prefader_sends) == 0
```
#### File: tests/domain/test_fixtures.py
```python
import pytest
from uqbar.strings import normalize
def test_dc_index_synthdef_factory(dc_index_synthdef_factory):
synthdef = dc_index_synthdef_factory.build(name="test")
assert normalize(str(synthdef)) == normalize(
"""
synthdef:
name: test
ugens:
- Control.ir: null
- Control.kr: null
- BinaryOpUGen(EQUAL).kr/0:
left: Control.kr[0:index]
right: 0.0
- BinaryOpUGen(EQUAL).kr/1:
left: Control.kr[0:index]
right: 1.0
- DC.ar:
source: 1.0
- BinaryOpUGen(MULTIPLICATION).ar/0:
left: DC.ar[0]
right: BinaryOpUGen(EQUAL).kr/0[0]
- BinaryOpUGen(MULTIPLICATION).ar/1:
left: DC.ar[0]
right: BinaryOpUGen(EQUAL).kr/1[0]
- Out.ar:
bus: Control.ir[0:out]
source[0]: BinaryOpUGen(MULTIPLICATION).ar/0[0]
source[1]: BinaryOpUGen(MULTIPLICATION).ar/1[0]
"""
)
@pytest.mark.asyncio
async def test_track_mute_solo_application(track_mute_solo_application):
await track_mute_solo_application.boot()
assert format(
await track_mute_solo_application.primary_context.query(), "unindexed"
) == normalize(
"""
NODE TREE ... group (Context)
... group (Tracks)
... group (a)
... group (Parameters)
... group (gain)
... group (panning)
... group (Receives)
... mixer/patch[fb,gain]/8x8 (Input)
active: 1.0, gain: 0.0, gate: 1.0, in_: 16.0, lag: 0.01, out: 24.0
... group (SubTracks)
... mixer/levels/8 (InputLevels)
out: 24.0, gate: 1.0, lag: 0.01
... group (Devices)
... group (AudioEffect)
... mixer/patch[replace]/8x8 (DeviceIn)
active: 1.0, gate: 1.0, in_: 24.0, lag: 0.01, out: 32.0
... group (Body)
... 7e3d216f841357d2a2e2ab2c3415df6f
out: 32.0, index: 0.0
... mixer/patch[hard,mix]/8x8 (DeviceOut)
active: 1.0, gate: 1.0, hard_gate: 1.0, in_: 32.0, lag: 0.01, mix: 1.0, out: 24.0
... mixer/levels/8 (PrefaderLevels)
out: 24.0, gate: 1.0, lag: 0.01
... group (PreFaderSends)
... mixer/patch[gain,hard,replace]/8x8 (Output)
active: 1.0, gain: c0, gate: 1.0, hard_gate: 1.0, in_: 24.0, lag: 0.01, out: 24.0
... group (PostFaderSends)
... mixer/patch[gain]/8x8 (Send)
active: 1.0, gain: 0.0, gate: 1.0, in_: 24.0, lag: 0.01, out: 216.0
... mixer/levels/8 (PostfaderLevels)
out: 24.0, gate: 1.0, lag: 0.01
... group (b)
... group (Parameters)
... group (gain)
... group (panning)
... group (Receives)
... mixer/patch[fb,gain]/8x8 (Input)
active: 1.0, gain: 0.0, gate: 1.0, in_: 40.0, lag: 0.01, out: 48.0
... group (SubTracks)
... group (ba)
... group (Parameters)
... group (gain)
... group (panning)
... group (Receives)
... mixer/patch[fb,gain]/8x8 (Input)
active: 1.0, gain: 0.0, gate: 1.0, in_: 56.0, lag: 0.01, out: 64.0
... group (SubTracks)
... mixer/levels/8 (InputLevels)
out: 64.0, gate: 1.0, lag: 0.01
... group (Devices)
... group (AudioEffect)
... mixer/patch[replace]/8x8 (DeviceIn)
active: 1.0, gate: 1.0, in_: 64.0, lag: 0.01, out: 72.0
... group (Body)
... 7e3d216f841357d2a2e2ab2c3415df6f
out: 72.0, index: 2.0
... mixer/patch[hard,mix]/8x8 (DeviceOut)
active: 1.0, gate: 1.0, hard_gate: 1.0, in_: 72.0, lag: 0.01, mix: 1.0, out: 64.0
... mixer/levels/8 (PrefaderLevels)
out: 64.0, gate: 1.0, lag: 0.01
... group (PreFaderSends)
... mixer/patch[gain,hard,replace]/8x8 (Output)
active: 1.0, gain: c4, gate: 1.0, hard_gate: 1.0, in_: 64.0, lag: 0.01, out: 64.0
... group (PostFaderSends)
... mixer/patch[gain]/8x8 (Send)
active: 1.0, gain: 0.0, gate: 1.0, in_: 64.0, lag: 0.01, out: 48.0
... mixer/levels/8 (PostfaderLevels)
out: 64.0, gate: 1.0, lag: 0.01
... group (bb)
... group (Parameters)
... group (gain)
... group (panning)
... group (Receives)
... mixer/patch[fb,gain]/8x8 (Input)
active: 1.0, gain: 0.0, gate: 1.0, in_: 80.0, lag: 0.01, out: 88.0
... group (SubTracks)
... mixer/levels/8 (InputLevels)
out: 88.0, gate: 1.0, lag: 0.01
... group (Devices)
... group (AudioEffect)
... mixer/patch[replace]/8x8 (DeviceIn)
active: 1.0, gate: 1.0, in_: 88.0, lag: 0.01, out: 96.0
... group (Body)
... 7e3d216f841357d2a2e2ab2c3415df6f
out: 96.0, index: 3.0
... mixer/patch[hard,mix]/8x8 (DeviceOut)
active: 1.0, gate: 1.0, hard_gate: 1.0, in_: 96.0, lag: 0.01, mix: 1.0, out: 88.0
... mixer/levels/8 (PrefaderLevels)
out: 88.0, gate: 1.0, lag: 0.01
... group (PreFaderSends)
... mixer/patch[gain,hard,replace]/8x8 (Output)
active: 1.0, gain: c6, gate: 1.0, hard_gate: 1.0, in_: 88.0, lag: 0.01, out: 88.0
... group (PostFaderSends)
... mixer/patch[gain]/8x8 (Send)
active: 1.0, gain: 0.0, gate: 1.0, in_: 88.0, lag: 0.01, out: 48.0
... mixer/levels/8 (PostfaderLevels)
out: 88.0, gate: 1.0, lag: 0.01
... mixer/levels/8 (InputLevels)
out: 48.0, gate: 1.0, lag: 0.01
... group (Devices)
... group (AudioEffect)
... mixer/patch[replace]/8x8 (DeviceIn)
active: 1.0, gate: 1.0, in_: 48.0, lag: 0.01, out: 104.0
... group (Body)
... 7e3d216f841357d2a2e2ab2c3415df6f
out: 104.0, index: 1.0
... mixer/patch[hard,mix]/8x8 (DeviceOut)
active: 1.0, gate: 1.0, hard_gate: 1.0, in_: 104.0, lag: 0.01, mix: 1.0, out: 48.0
... mixer/levels/8 (PrefaderLevels)
out: 48.0, gate: 1.0, lag: 0.01
... group (PreFaderSends)
... mixer/patch[gain,hard,replace]/8x8 (Output)
active: 1.0, gain: c2, gate: 1.0, hard_gate: 1.0, in_: 48.0, lag: 0.01, out: 48.0
... group (PostFaderSends)
... mixer/patch[gain]/8x8 (Send)
active: 1.0, gain: 0.0, gate: 1.0, in_: 48.0, lag: 0.01, out: 216.0
... mixer/levels/8 (PostfaderLevels)
out: 48.0, gate: 1.0, lag: 0.01
... group (c)
... group (Parameters)
... group (gain)
... group (panning)
... group (Receives)
... mixer/patch[fb,gain]/8x8 (Input)
active: 1.0, gain: 0.0, gate: 1.0, in_: 112.0, lag: 0.01, out: 120.0
... group (SubTracks)
... group (ca)
... group (Parameters)
... group (gain)
... group (panning)
... group (Receives)
... mixer/patch[fb,gain]/8x8 (Input)
active: 1.0, gain: 0.0, gate: 1.0, in_: 128.0, lag: 0.01, out: 136.0
... group (SubTracks)
... mixer/levels/8 (InputLevels)
out: 136.0, gate: 1.0, lag: 0.01
... group (Devices)
... group (AudioEffect)
... mixer/patch[replace]/8x8 (DeviceIn)
active: 1.0, gate: 1.0, in_: 136.0, lag: 0.01, out: 144.0
... group (Body)
... 7e3d216f841357d2a2e2ab2c3415df6f
out: 144.0, index: 5.0
... mixer/patch[hard,mix]/8x8 (DeviceOut)
active: 1.0, gate: 1.0, hard_gate: 1.0, in_: 144.0, lag: 0.01, mix: 1.0, out: 136.0
... mixer/levels/8 (PrefaderLevels)
out: 136.0, gate: 1.0, lag: 0.01
... group (PreFaderSends)
... mixer/patch[gain,hard,replace]/8x8 (Output)
active: 1.0, gain: c10, gate: 1.0, hard_gate: 1.0, in_: 136.0, lag: 0.01, out: 136.0
... group (PostFaderSends)
... mixer/patch[gain]/8x8 (Send)
active: 1.0, gain: 0.0, gate: 1.0, in_: 136.0, lag: 0.01, out: 120.0
... mixer/levels/8 (PostfaderLevels)
out: 136.0, gate: 1.0, lag: 0.01
... group (cb)
... group (Parameters)
... group (gain)
... group (panning)
... group (Receives)
... mixer/patch[fb,gain]/8x8 (Input)
active: 1.0, gain: 0.0, gate: 1.0, in_: 152.0, lag: 0.01, out: 160.0
... group (SubTracks)
... group (cba)
... group (Parameters)
... group (gain)
... group (panning)
... group (Receives)
... mixer/patch[fb,gain]/8x8 (Input)
active: 1.0, gain: 0.0, gate: 1.0, in_: 168.0, lag: 0.01, out: 176.0
... group (SubTracks)
... mixer/levels/8 (InputLevels)
out: 176.0, gate: 1.0, lag: 0.01
... group (Devices)
... group (AudioEffect)
... mixer/patch[replace]/8x8 (DeviceIn)
active: 1.0, gate: 1.0, in_: 176.0, lag: 0.01, out: 184.0
... group (Body)
... 7e3d216f841357d2a2e2ab2c3415df6f
out: 184.0, index: 7.0
... mixer/patch[hard,mix]/8x8 (DeviceOut)
active: 1.0, gate: 1.0, hard_gate: 1.0, in_: 184.0, lag: 0.01, mix: 1.0, out: 176.0
... mixer/levels/8 (PrefaderLevels)
out: 176.0, gate: 1.0, lag: 0.01
... group (PreFaderSends)
... mixer/patch[gain,hard,replace]/8x8 (Output)
active: 1.0, gain: c14, gate: 1.0, hard_gate: 1.0, in_: 176.0, lag: 0.01, out: 176.0
... group (PostFaderSends)
... mixer/patch[gain]/8x8 (Send)
active: 1.0, gain: 0.0, gate: 1.0, in_: 176.0, lag: 0.01, out: 160.0
... mixer/levels/8 (PostfaderLevels)
out: 176.0, gate: 1.0, lag: 0.01
... mixer/levels/8 (InputLevels)
out: 160.0, gate: 1.0, lag: 0.01
... group (Devices)
... group (AudioEffect)
... mixer/patch[replace]/8x8 (DeviceIn)
active: 1.0, gate: 1.0, in_: 160.0, lag: 0.01, out: 192.0
... group (Body)
... 7e3d216f841357d2a2e2ab2c3415df6f
out: 192.0, index: 6.0
... mixer/patch[hard,mix]/8x8 (DeviceOut)
active: 1.0, gate: 1.0, hard_gate: 1.0, in_: 192.0, lag: 0.01, mix: 1.0, out: 160.0
... mixer/levels/8 (PrefaderLevels)
out: 160.0, gate: 1.0, lag: 0.01
... group (PreFaderSends)
... mixer/patch[gain,hard,replace]/8x8 (Output)
active: 1.0, gain: c12, gate: 1.0, hard_gate: 1.0, in_: 160.0, lag: 0.01, out: 160.0
... group (PostFaderSends)
... mixer/patch[gain]/8x8 (Send)
active: 1.0, gain: 0.0, gate: 1.0, in_: 160.0, lag: 0.01, out: 120.0
... mixer/levels/8 (PostfaderLevels)
out: 160.0, gate: 1.0, lag: 0.01
... mixer/levels/8 (InputLevels)
out: 120.0, gate: 1.0, lag: 0.01
... group (Devices)
... group (AudioEffect)
... mixer/patch[replace]/8x8 (DeviceIn)
active: 1.0, gate: 1.0, in_: 120.0, lag: 0.01, out: 200.0
... group (Body)
... 7e3d216f841357d2a2e2ab2c3415df6f
out: 200.0, index: 4.0
... mixer/patch[hard,mix]/8x8 (DeviceOut)
active: 1.0, gate: 1.0, hard_gate: 1.0, in_: 200.0, lag: 0.01, mix: 1.0, out: 120.0
... mixer/levels/8 (PrefaderLevels)
out: 120.0, gate: 1.0, lag: 0.01
... group (PreFaderSends)
... mixer/patch[gain,hard,replace]/8x8 (Output)
active: 1.0, gain: c8, gate: 1.0, hard_gate: 1.0, in_: 120.0, lag: 0.01, out: 120.0
... group (PostFaderSends)
... mixer/patch[gain]/8x8 (Send)
active: 1.0, gain: 0.0, gate: 1.0, in_: 120.0, lag: 0.01, out: 216.0
... mixer/levels/8 (PostfaderLevels)
out: 120.0, gate: 1.0, lag: 0.01
... group (MasterTrack)
... group (Parameters)
... group (gain)
... group (Receives)
... mixer/patch[fb,gain]/8x8 (Input)
active: 1.0, gain: 0.0, gate: 1.0, in_: 208.0, lag: 0.01, out: 216.0
... mixer/levels/8 (InputLevels)
out: 216.0, gate: 1.0, lag: 0.01
... group (Devices)
... mixer/levels/8 (PrefaderLevels)
out: 216.0, gate: 1.0, lag: 0.01
... group (PreFaderSends)
... mixer/patch[gain,hard,replace]/8x8 (Output)
active: 1.0, gain: c16, gate: 1.0, hard_gate: 1.0, in_: 216.0, lag: 0.01, out: 216.0
... group (PostFaderSends)
... mixer/patch/8x2 (DirectOut)
active: 1.0, gate: 1.0, in_: 216.0, lag: 0.01, out: 0.0
... mixer/levels/8 (PostfaderLevels)
out: 216.0, gate: 1.0, lag: 0.01
... group (CueTrack)
... group (Parameters)
... group (gain)
... group (mix)
... group (Receives)
... mixer/patch[fb,gain]/2x2 (Input)
active: 1.0, gain: 0.0, gate: 1.0, in_: 224.0, lag: 0.01, out: 226.0
... mixer/levels/2 (InputLevels)
out: 226.0, gate: 1.0, lag: 0.01
... group (Devices)
... mixer/levels/2 (PrefaderLevels)
out: 226.0, gate: 1.0, lag: 0.01
... group (PreFaderSends)
... mixer/patch[gain,hard,replace]/2x2 (Output)
active: 1.0, gain: c17, gate: 1.0, hard_gate: 1.0, in_: 226.0, lag: 0.01, out: 226.0
... group (PostFaderSends)
... mixer/patch/2x2 (DirectOut)
active: 1.0, gate: 1.0, in_: 226.0, lag: 0.01, out: 2.0
... mixer/levels/2 (PostfaderLevels)
out: 226.0, gate: 1.0, lag: 0.01
"""
)
@pytest.mark.asyncio
async def test_channel_count_application(channel_count_application):
await channel_count_application.boot()
assert format(
await channel_count_application.primary_context.query(), "unindexed"
) == normalize(
"""
NODE TREE ... group (Context)
... group (Tracks)
... group (One)
... group (Parameters)
... group (gain)
... group (panning)
... group (Receives)
... mixer/patch[fb,gain]/2x2 (Input)
active: 1.0, gain: 0.0, gate: 1.0, in_: 16.0, lag: 0.01, out: 18.0
... group (SubTracks)
... mixer/levels/2 (InputLevels)
out: 18.0, gate: 1.0, lag: 0.01
... group (Devices)
... group (Rack)
... mixer/patch[gain]/2x2 (RackIn)
active: 1.0, gain: 0.0, gate: 1.0, in_: 18.0, lag: 0.01, out: 20.0
... group (ChainContainer)
... group (Chain)
... group (Parameters)
... group (gain)
... group (panning)
... group (Receives)
... mixer/patch[fb,gain]/2x2 (Input)
active: 1.0, gain: 0.0, gate: 1.0, in_: 22.0, lag: 0.01, out: 24.0
... mixer/levels/2 (InputLevels)
out: 24.0, gate: 1.0, lag: 0.01
... group (Devices)
... group (Device)
... mixer/patch[replace]/2x2 (DeviceIn)
active: 1.0, gate: 1.0, in_: 24.0, lag: 0.01, out: 26.0
... group (Body)
... ab5b942cf898e9d22891fff080fee99e
out: 26.0, index: 0.0
... mixer/patch[hard,mix]/2x2 (DeviceOut)
active: 1.0, gate: 1.0, hard_gate: 1.0, in_: 26.0, lag: 0.01, mix: 1.0, out: 24.0
... mixer/levels/2 (PrefaderLevels)
out: 24.0, gate: 1.0, lag: 0.01
... group (PreFaderSends)
... mixer/patch[gain,hard,replace]/2x2 (Output)
active: 1.0, gain: c2, gate: 1.0, hard_gate: 1.0, in_: 24.0, lag: 0.01, out: 24.0
... group (PostFaderSends)
... mixer/patch[gain]/2x2 (Send)
active: 1.0, gain: 0.0, gate: 1.0, in_: 24.0, lag: 0.01, out: 20.0
... mixer/levels/2 (PostfaderLevels)
out: 24.0, gate: 1.0, lag: 0.01
... mixer/patch[hard,mix]/2x2 (RackOut)
active: 1.0, gate: 1.0, hard_gate: 1.0, in_: 20.0, lag: 0.01, mix: 1.0, out: 18.0
... mixer/levels/2 (PrefaderLevels)
out: 18.0, gate: 1.0, lag: 0.01
... group (PreFaderSends)
... mixer/patch[gain,hard,replace]/2x2 (Output)
active: 1.0, gain: c0, gate: 1.0, hard_gate: 1.0, in_: 18.0, lag: 0.01, out: 18.0
... group (PostFaderSends)
... mixer/patch[gain]/2x2 (Send)
active: 1.0, gain: 0.0, gate: 1.0, in_: 18.0, lag: 0.01, out: 38.0
... mixer/levels/2 (PostfaderLevels)
out: 18.0, gate: 1.0, lag: 0.01
... group (Two)
... group (Parameters)
... group (gain)
... group (panning)
... group (Receives)
... mixer/patch[fb,gain]/2x2 (Input)
active: 1.0, gain: 0.0, gate: 1.0, in_: 28.0, lag: 0.01, out: 30.0
... group (SubTracks)
... group (Three)
... group (Parameters)
... group (gain)
... group (panning)
... group (Receives)
... mixer/patch[fb,gain]/2x2 (Input)
active: 1.0, gain: 0.0, gate: 1.0, in_: 32.0, lag: 0.01, out: 34.0
... group (SubTracks)
... mixer/levels/2 (InputLevels)
out: 34.0, gate: 1.0, lag: 0.01
... group (Devices)
... mixer/levels/2 (PrefaderLevels)
out: 34.0, gate: 1.0, lag: 0.01
... group (PreFaderSends)
... mixer/patch[gain,hard,replace]/2x2 (Output)
active: 1.0, gain: c6, gate: 1.0, hard_gate: 1.0, in_: 34.0, lag: 0.01, out: 34.0
... group (PostFaderSends)
... mixer/patch[gain]/2x2 (Send)
active: 1.0, gain: 0.0, gate: 1.0, in_: 34.0, lag: 0.01, out: 30.0
... mixer/levels/2 (PostfaderLevels)
out: 34.0, gate: 1.0, lag: 0.01
... mixer/levels/2 (InputLevels)
out: 30.0, gate: 1.0, lag: 0.01
... group (Devices)
... mixer/levels/2 (PrefaderLevels)
out: 30.0, gate: 1.0, lag: 0.01
... group (PreFaderSends)
... mixer/patch[gain,hard,replace]/2x2 (Output)
active: 1.0, gain: c4, gate: 1.0, hard_gate: 1.0, in_: 30.0, lag: 0.01, out: 30.0
... group (PostFaderSends)
... mixer/patch[gain]/2x2 (Send)
active: 1.0, gain: 0.0, gate: 1.0, in_: 30.0, lag: 0.01, out: 38.0
... mixer/levels/2 (PostfaderLevels)
out: 30.0, gate: 1.0, lag: 0.01
... group (MasterTrack)
... group (Parameters)
... group (gain)
... group (Receives)
... mixer/patch[fb,gain]/2x2 (Input)
active: 1.0, gain: 0.0, gate: 1.0, in_: 36.0, lag: 0.01, out: 38.0
... mixer/levels/2 (InputLevels)
out: 38.0, gate: 1.0, lag: 0.01
... group (Devices)
... mixer/levels/2 (PrefaderLevels)
out: 38.0, gate: 1.0, lag: 0.01
... group (PreFaderSends)
... mixer/patch[gain,hard,replace]/2x2 (Output)
active: 1.0, gain: c8, gate: 1.0, hard_gate: 1.0, in_: 38.0, lag: 0.01, out: 38.0
... group (PostFaderSends)
... mixer/patch/2x2 (DirectOut)
active: 1.0, gate: 1.0, in_: 38.0, lag: 0.01, out: 0.0
... mixer/levels/2 (PostfaderLevels)
out: 38.0, gate: 1.0, lag: 0.01
... group (CueTrack)
... group (Parameters)
... group (gain)
... group (mix)
... group (Receives)
... mixer/patch[fb,gain]/2x2 (Input)
active: 1.0, gain: 0.0, gate: 1.0, in_: 40.0, lag: 0.01, out: 42.0
... mixer/levels/2 (InputLevels)
out: 42.0, gate: 1.0, lag: 0.01
... group (Devices)
... mixer/levels/2 (PrefaderLevels)
out: 42.0, gate: 1.0, lag: 0.01
... group (PreFaderSends)
... mixer/patch[gain,hard,replace]/2x2 (Output)
active: 1.0, gain: c9, gate: 1.0, hard_gate: 1.0, in_: 42.0, lag: 0.01, out: 42.0
... group (PostFaderSends)
... mixer/patch/2x2 (DirectOut)
active: 1.0, gate: 1.0, in_: 42.0, lag: 0.01, out: 2.0
... mixer/levels/2 (PostfaderLevels)
out: 42.0, gate: 1.0, lag: 0.01
"""
)
@pytest.mark.asyncio
async def test_chain_mute_solo_application(chain_mute_solo_application):
await chain_mute_solo_application.boot()
assert format(
await chain_mute_solo_application.primary_context.query(), "unindexed"
) == normalize(
"""
NODE TREE ... group (Context)
... group (Tracks)
... group (Track)
... group (Parameters)
... group (gain)
... group (panning)
... group (Receives)
... mixer/patch[fb,gain]/8x8 (Input)
active: 1.0, gain: 0.0, gate: 1.0, in_: 16.0, lag: 0.01, out: 24.0
... group (SubTracks)
... mixer/levels/8 (InputLevels)
out: 24.0, gate: 1.0, lag: 0.01
... group (Devices)
... group (outer/a)
... mixer/patch[gain]/8x8 (RackIn)
active: 1.0, gain: 0.0, gate: 1.0, in_: 24.0, lag: 0.01, out: 32.0
... group (ChainContainer)
... group (outer/a/a)
... group (Parameters)
... group (gain)
... group (panning)
... group (Receives)
... mixer/patch[fb,gain]/8x8 (Input)
active: 1.0, gain: 0.0, gate: 1.0, in_: 40.0, lag: 0.01, out: 48.0
... mixer/levels/8 (InputLevels)
out: 48.0, gate: 1.0, lag: 0.01
... group (Devices)
... group (AudioEffect)
... mixer/patch[replace]/8x8 (DeviceIn)
active: 1.0, gate: 1.0, in_: 48.0, lag: 0.01, out: 56.0
... group (Body)
... 7e3d216f841357d2a2e2ab2c3415df6f
out: 56.0, index: 0.0
... mixer/patch[hard,mix]/8x8 (DeviceOut)
active: 1.0, gate: 1.0, hard_gate: 1.0, in_: 56.0, lag: 0.01, mix: 1.0, out: 48.0
... mixer/levels/8 (PrefaderLevels)
out: 48.0, gate: 1.0, lag: 0.01
... group (PreFaderSends)
... mixer/patch[gain,hard,replace]/8x8 (Output)
active: 1.0, gain: c2, gate: 1.0, hard_gate: 1.0, in_: 48.0, lag: 0.01, out: 48.0
... group (PostFaderSends)
... mixer/patch[gain]/8x8 (Send)
active: 1.0, gain: 0.0, gate: 1.0, in_: 48.0, lag: 0.01, out: 32.0
... mixer/levels/8 (PostfaderLevels)
out: 48.0, gate: 1.0, lag: 0.01
... group (outer/a/b)
... group (Parameters)
... group (gain)
... group (panning)
... group (Receives)
... mixer/patch[fb,gain]/8x8 (Input)
active: 1.0, gain: 0.0, gate: 1.0, in_: 64.0, lag: 0.01, out: 72.0
... mixer/levels/8 (InputLevels)
out: 72.0, gate: 1.0, lag: 0.01
... group (Devices)
... group (inner/a)
... mixer/patch[gain]/8x8 (RackIn)
active: 1.0, gain: 0.0, gate: 1.0, in_: 72.0, lag: 0.01, out: 80.0
... group (ChainContainer)
... group (inner/a/a)
... group (Parameters)
... group (gain)
... group (panning)
... group (Receives)
... mixer/patch[fb,gain]/8x8 (Input)
active: 1.0, gain: 0.0, gate: 1.0, in_: 88.0, lag: 0.01, out: 96.0
... mixer/levels/8 (InputLevels)
out: 96.0, gate: 1.0, lag: 0.01
... group (Devices)
... group (AudioEffect)
... mixer/patch[replace]/8x8 (DeviceIn)
active: 1.0, gate: 1.0, in_: 96.0, lag: 0.01, out: 104.0
... group (Body)
... 7e3d216f841357d2a2e2ab2c3415df6f
out: 104.0, index: 2.0
... mixer/patch[hard,mix]/8x8 (DeviceOut)
active: 1.0, gate: 1.0, hard_gate: 1.0, in_: 104.0, lag: 0.01, mix: 1.0, out: 96.0
... mixer/levels/8 (PrefaderLevels)
out: 96.0, gate: 1.0, lag: 0.01
... group (PreFaderSends)
... mixer/patch[gain,hard,replace]/8x8 (Output)
active: 1.0, gain: c6, gate: 1.0, hard_gate: 1.0, in_: 96.0, lag: 0.01, out: 96.0
... group (PostFaderSends)
... mixer/patch[gain]/8x8 (Send)
active: 1.0, gain: 0.0, gate: 1.0, in_: 96.0, lag: 0.01, out: 80.0
... mixer/levels/8 (PostfaderLevels)
out: 96.0, gate: 1.0, lag: 0.01
... group (inner/a/b)
... group (Parameters)
... group (gain)
... group (panning)
... group (Receives)
... mixer/patch[fb,gain]/8x8 (Input)
active: 1.0, gain: 0.0, gate: 1.0, in_: 112.0, lag: 0.01, out: 120.0
... mixer/levels/8 (InputLevels)
out: 120.0, gate: 1.0, lag: 0.01
... group (Devices)
... group (AudioEffect)
... mixer/patch[replace]/8x8 (DeviceIn)
active: 1.0, gate: 1.0, in_: 120.0, lag: 0.01, out: 128.0
... group (Body)
... 7e3d216f841357d2a2e2ab2c3415df6f
out: 128.0, index: 3.0
... mixer/patch[hard,mix]/8x8 (DeviceOut)
active: 1.0, gate: 1.0, hard_gate: 1.0, in_: 128.0, lag: 0.01, mix: 1.0, out: 120.0
... mixer/levels/8 (PrefaderLevels)
out: 120.0, gate: 1.0, lag: 0.01
... group (PreFaderSends)
... mixer/patch[gain,hard,replace]/8x8 (Output)
active: 1.0, gain: c8, gate: 1.0, hard_gate: 1.0, in_: 120.0, lag: 0.01, out: 120.0
... group (PostFaderSends)
... mixer/patch[gain]/8x8 (Send)
active: 1.0, gain: 0.0, gate: 1.0, in_: 120.0, lag: 0.01, out: 80.0
... mixer/levels/8 (PostfaderLevels)
out: 120.0, gate: 1.0, lag: 0.01
... mixer/patch[hard,mix]/8x8 (RackOut)
active: 1.0, gate: 1.0, hard_gate: 1.0, in_: 80.0, lag: 0.01, mix: 1.0, out: 72.0
... group (AudioEffect)
... mixer/patch[replace]/8x8 (DeviceIn)
active: 1.0, gate: 1.0, in_: 72.0, lag: 0.01, out: 136.0
... group (Body)
... 7e3d216f841357d2a2e2ab2c3415df6f
out: 136.0, index: 1.0
... mixer/patch[hard,mix]/8x8 (DeviceOut)
active: 1.0, gate: 1.0, hard_gate: 1.0, in_: 136.0, lag: 0.01, mix: 1.0, out: 72.0
... mixer/levels/8 (PrefaderLevels)
out: 72.0, gate: 1.0, lag: 0.01
... group (PreFaderSends)
... mixer/patch[gain,hard,replace]/8x8 (Output)
active: 1.0, gain: c4, gate: 1.0, hard_gate: 1.0, in_: 72.0, lag: 0.01, out: 72.0
... group (PostFaderSends)
... mixer/patch[gain]/8x8 (Send)
active: 1.0, gain: 0.0, gate: 1.0, in_: 72.0, lag: 0.01, out: 32.0
... mixer/levels/8 (PostfaderLevels)
out: 72.0, gate: 1.0, lag: 0.01
... mixer/patch[hard,mix]/8x8 (RackOut)
active: 1.0, gate: 1.0, hard_gate: 1.0, in_: 32.0, lag: 0.01, mix: 1.0, out: 24.0
... group (outer/b)
... mixer/patch[gain]/8x8 (RackIn)
active: 1.0, gain: 0.0, gate: 1.0, in_: 24.0, lag: 0.01, out: 144.0
... group (ChainContainer)
... group (outer/b/a)
... group (Parameters)
... group (gain)
... group (panning)
... group (Receives)
... mixer/patch[fb,gain]/8x8 (Input)
active: 1.0, gain: 0.0, gate: 1.0, in_: 152.0, lag: 0.01, out: 160.0
... mixer/levels/8 (InputLevels)
out: 160.0, gate: 1.0, lag: 0.01
... group (Devices)
... group (AudioEffect)
... mixer/patch[replace]/8x8 (DeviceIn)
active: 1.0, gate: 1.0, in_: 160.0, lag: 0.01, out: 168.0
... group (Body)
... 7e3d216f841357d2a2e2ab2c3415df6f
out: 168.0, index: 4.0
... mixer/patch[hard,mix]/8x8 (DeviceOut)
active: 1.0, gate: 1.0, hard_gate: 1.0, in_: 168.0, lag: 0.01, mix: 1.0, out: 160.0
... mixer/levels/8 (PrefaderLevels)
out: 160.0, gate: 1.0, lag: 0.01
... group (PreFaderSends)
... mixer/patch[gain,hard,replace]/8x8 (Output)
active: 1.0, gain: c10, gate: 1.0, hard_gate: 1.0, in_: 160.0, lag: 0.01, out: 160.0
... group (PostFaderSends)
... mixer/patch[gain]/8x8 (Send)
active: 1.0, gain: 0.0, gate: 1.0, in_: 160.0, lag: 0.01, out: 144.0
... mixer/levels/8 (PostfaderLevels)
out: 160.0, gate: 1.0, lag: 0.01
... group (outer/b/b)
... group (Parameters)
... group (gain)
... group (panning)
... group (Receives)
... mixer/patch[fb,gain]/8x8 (Input)
active: 1.0, gain: 0.0, gate: 1.0, in_: 176.0, lag: 0.01, out: 184.0
... mixer/levels/8 (InputLevels)
out: 184.0, gate: 1.0, lag: 0.01
... group (Devices)
... group (inner/b)
... mixer/patch[gain]/8x8 (RackIn)
active: 1.0, gain: 0.0, gate: 1.0, in_: 184.0, lag: 0.01, out: 192.0
... group (ChainContainer)
... group (inner/b/a)
... group (Parameters)
... group (gain)
... group (panning)
... group (Receives)
... mixer/patch[fb,gain]/8x8 (Input)
active: 1.0, gain: 0.0, gate: 1.0, in_: 200.0, lag: 0.01, out: 208.0
... mixer/levels/8 (InputLevels)
out: 208.0, gate: 1.0, lag: 0.01
... group (Devices)
... group (AudioEffect)
... mixer/patch[replace]/8x8 (DeviceIn)
active: 1.0, gate: 1.0, in_: 208.0, lag: 0.01, out: 216.0
... group (Body)
... 7e3d216f841357d2a2e2ab2c3415df6f
out: 216.0, index: 6.0
... mixer/patch[hard,mix]/8x8 (DeviceOut)
active: 1.0, gate: 1.0, hard_gate: 1.0, in_: 216.0, lag: 0.01, mix: 1.0, out: 208.0
... mixer/levels/8 (PrefaderLevels)
out: 208.0, gate: 1.0, lag: 0.01
... group (PreFaderSends)
... mixer/patch[gain,hard,replace]/8x8 (Output)
active: 1.0, gain: c14, gate: 1.0, hard_gate: 1.0, in_: 208.0, lag: 0.01, out: 208.0
... group (PostFaderSends)
... mixer/patch[gain]/8x8 (Send)
active: 1.0, gain: 0.0, gate: 1.0, in_: 208.0, lag: 0.01, out: 192.0
... mixer/levels/8 (PostfaderLevels)
out: 208.0, gate: 1.0, lag: 0.01
... group (inner/b/b)
... group (Parameters)
... group (gain)
... group (panning)
... group (Receives)
... mixer/patch[fb,gain]/8x8 (Input)
active: 1.0, gain: 0.0, gate: 1.0, in_: 224.0, lag: 0.01, out: 232.0
... mixer/levels/8 (InputLevels)
out: 232.0, gate: 1.0, lag: 0.01
... group (Devices)
... group (AudioEffect)
... mixer/patch[replace]/8x8 (DeviceIn)
active: 1.0, gate: 1.0, in_: 232.0, lag: 0.01, out: 240.0
... group (Body)
... 7e3d216f841357d2a2e2ab2c3415df6f
out: 240.0, index: 7.0
... mixer/patch[hard,mix]/8x8 (DeviceOut)
active: 1.0, gate: 1.0, hard_gate: 1.0, in_: 240.0, lag: 0.01, mix: 1.0, out: 232.0
... mixer/levels/8 (PrefaderLevels)
out: 232.0, gate: 1.0, lag: 0.01
... group (PreFaderSends)
... mixer/patch[gain,hard,replace]/8x8 (Output)
active: 1.0, gain: c16, gate: 1.0, hard_gate: 1.0, in_: 232.0, lag: 0.01, out: 232.0
... group (PostFaderSends)
... mixer/patch[gain]/8x8 (Send)
active: 1.0, gain: 0.0, gate: 1.0, in_: 232.0, lag: 0.01, out: 192.0
... mixer/levels/8 (PostfaderLevels)
out: 232.0, gate: 1.0, lag: 0.01
... mixer/patch[hard,mix]/8x8 (RackOut)
active: 1.0, gate: 1.0, hard_gate: 1.0, in_: 192.0, lag: 0.01, mix: 1.0, out: 184.0
... group (AudioEffect)
... mixer/patch[replace]/8x8 (DeviceIn)
active: 1.0, gate: 1.0, in_: 184.0, lag: 0.01, out: 248.0
... group (Body)
... 7e3d216f841357d2a2e2ab2c3415df6f
out: 248.0, index: 5.0
... mixer/patch[hard,mix]/8x8 (DeviceOut)
active: 1.0, gate: 1.0, hard_gate: 1.0, in_: 248.0, lag: 0.01, mix: 1.0, out: 184.0
... mixer/levels/8 (PrefaderLevels)
out: 184.0, gate: 1.0, lag: 0.01
... group (PreFaderSends)
... mixer/patch[gain,hard,replace]/8x8 (Output)
active: 1.0, gain: c12, gate: 1.0, hard_gate: 1.0, in_: 184.0, lag: 0.01, out: 184.0
... group (PostFaderSends)
... mixer/patch[gain]/8x8 (Send)
active: 1.0, gain: 0.0, gate: 1.0, in_: 184.0, lag: 0.01, out: 144.0
... mixer/levels/8 (PostfaderLevels)
out: 184.0, gate: 1.0, lag: 0.01
... mixer/patch[hard,mix]/8x8 (RackOut)
active: 1.0, gate: 1.0, hard_gate: 1.0, in_: 144.0, lag: 0.01, mix: 1.0, out: 24.0
... mixer/levels/8 (PrefaderLevels)
out: 24.0, gate: 1.0, lag: 0.01
... group (PreFaderSends)
... mixer/patch[gain,hard,replace]/8x8 (Output)
active: 1.0, gain: c0, gate: 1.0, hard_gate: 1.0, in_: 24.0, lag: 0.01, out: 24.0
... group (PostFaderSends)
... mixer/patch[gain]/8x8 (Send)
active: 1.0, gain: 0.0, gate: 1.0, in_: 24.0, lag: 0.01, out: 264.0
... mixer/levels/8 (PostfaderLevels)
out: 24.0, gate: 1.0, lag: 0.01
... group (MasterTrack)
... group (Parameters)
... group (gain)
... group (Receives)
... mixer/patch[fb,gain]/8x8 (Input)
active: 1.0, gain: 0.0, gate: 1.0, in_: 256.0, lag: 0.01, out: 264.0
... mixer/levels/8 (InputLevels)
out: 264.0, gate: 1.0, lag: 0.01
... group (Devices)
... mixer/levels/8 (PrefaderLevels)
out: 264.0, gate: 1.0, lag: 0.01
... group (PreFaderSends)
... mixer/patch[gain,hard,replace]/8x8 (Output)
active: 1.0, gain: c18, gate: 1.0, hard_gate: 1.0, in_: 264.0, lag: 0.01, out: 264.0
... group (PostFaderSends)
... mixer/patch/8x2 (DirectOut)
active: 1.0, gate: 1.0, in_: 264.0, lag: 0.01, out: 0.0
... mixer/levels/8 (PostfaderLevels)
out: 264.0, gate: 1.0, lag: 0.01
... group (CueTrack)
... group (Parameters)
... group (gain)
... group (mix)
... group (Receives)
... mixer/patch[fb,gain]/2x2 (Input)
active: 1.0, gain: 0.0, gate: 1.0, in_: 272.0, lag: 0.01, out: 274.0
... mixer/levels/2 (InputLevels)
out: 274.0, gate: 1.0, lag: 0.01
... group (Devices)
... mixer/levels/2 (PrefaderLevels)
out: 274.0, gate: 1.0, lag: 0.01
... group (PreFaderSends)
... mixer/patch[gain,hard,replace]/2x2 (Output)
active: 1.0, gain: c19, gate: 1.0, hard_gate: 1.0, in_: 274.0, lag: 0.01, out: 274.0
... group (PostFaderSends)
... mixer/patch/2x2 (DirectOut)
active: 1.0, gate: 1.0, in_: 274.0, lag: 0.01, out: 2.0
... mixer/levels/2 (PostfaderLevels)
out: 274.0, gate: 1.0, lag: 0.01
"""
)
```
#### File: tests/domain/test_Instrument_perform.py
```python
import asyncio
import pytest
from supriya.assets.synthdefs import default
from uqbar.strings import normalize
from tloen.domain import Application, Instrument
from tloen.midi import NoteOnMessage
@pytest.fixture
async def application():
application = Application()
context = await application.add_context(name="Context")
await context.add_track(name="Track")
await application.boot()
yield application
await application.quit()
@pytest.mark.asyncio
async def test_1(application):
track = application.primary_context["Track"]
instrument = await track.add_device(Instrument, synthdef=default)
await asyncio.sleep(0.01)
async with instrument.lock(instrument, 0.0):
with instrument.capture() as transcript:
await instrument.perform([NoteOnMessage(pitch=57, velocity=100)])
await asyncio.sleep(0.01)
assert list(transcript) == [
instrument.CaptureEntry(
moment=None, label="I", message=NoteOnMessage(pitch=57, velocity=100)
)
]
assert format(await instrument.query(), "unindexed") == normalize(
"""
NODE TREE ... group (Instrument)
... mixer/patch[replace]/2x2 (DeviceIn)
active: 1.0, gate: 1.0, in_: 18.0, lag: 0.01, out: 28.0
... group (Body)
... default
out: 28.0, amplitude: 0.620001, frequency: 220.0, gate: 1.0, pan: 0.5
... mixer/patch[hard,mix]/2x2 (DeviceOut)
active: 1.0, gate: 1.0, hard_gate: 1.0, in_: 28.0, lag: 0.01, mix: 1.0, out: 18.0
"""
)
async with instrument.lock(instrument, 0.0):
with instrument.capture() as transcript:
await instrument.perform([NoteOnMessage(pitch=57, velocity=127)])
await asyncio.sleep(0.01)
assert list(transcript) == [
instrument.CaptureEntry(
moment=None, label="I", message=NoteOnMessage(pitch=57, velocity=127)
)
]
assert format(await instrument.query(), "unindexed") == normalize(
"""
NODE TREE ... group (Instrument)
... mixer/patch[replace]/2x2 (DeviceIn)
active: 1.0, gate: 1.0, in_: 18.0, lag: 0.01, out: 28.0
... group (Body)
... default
out: 28.0, amplitude: 1.0, frequency: 220.0, gate: 1.0, pan: 0.5
... default
out: 28.0, amplitude: 0.620001, frequency: 220.0, gate: 0.0, pan: 0.5
... mixer/patch[hard,mix]/2x2 (DeviceOut)
active: 1.0, gate: 1.0, hard_gate: 1.0, in_: 28.0, lag: 0.01, mix: 1.0, out: 18.0
"""
)
```
#### File: tests/domain/test_Track_gain.py
```python
import asyncio
import pytest
from tloen.domain import Application, AudioEffect
@pytest.mark.asyncio
async def test_gain(dc_index_synthdef_factory):
application = Application(channel_count=1)
context = await application.add_context()
track = await context.add_track()
await track.add_device(AudioEffect, synthdef=dc_index_synthdef_factory)
await application.boot()
await asyncio.sleep(0.1)
assert track.rms_levels["prefader"] == (1.0,)
assert track.rms_levels["postfader"] == (1.0,)
assert context.master_track.rms_levels["input"] == (1.0,)
with context.provider.server.osc_protocol.capture() as transcript:
await track.parameters["gain"].set_(-6.0)
assert len(transcript.sent_messages) == 1
_, message = transcript.sent_messages[0]
assert message.to_list() == [None, [["/c_set", 0, -6.0]]]
await asyncio.sleep(0.2)
assert track.rms_levels["prefader"] == (1.0,)
assert round(track.rms_levels["postfader"][0], 4) == 0.5012
assert round(context.master_track.rms_levels["input"][0], 4) == 0.5012
```
#### File: tests/domain/test_Track_group.py
```python
import pytest
from tloen.domain import Application, Track
@pytest.mark.asyncio
async def test_1():
track_a = Track()
track_b = Track()
group_track = await Track.group([track_a, track_b])
assert isinstance(group_track, Track)
assert list(group_track.tracks) == [track_a, track_b]
assert group_track.application is track_a.application
assert group_track.channel_count is None
assert group_track.name is None
assert group_track.parent is None
assert group_track.provider is track_a.provider
assert not group_track.is_cued
assert not group_track.is_muted
assert not group_track.is_soloed
assert track_a.parent is group_track.tracks
assert track_b.parent is group_track.tracks
@pytest.mark.asyncio
async def test_2():
application = Application()
context = await application.add_context()
track_a = await context.add_track()
track_b = await context.add_track()
track_c = await context.add_track()
group_track = await Track.group([track_b, track_c])
assert list(context.tracks) == [track_a, group_track]
assert list(group_track.tracks) == [track_b, track_c]
assert group_track.application is application
assert group_track.parent is context.tracks
assert group_track.provider is context.provider
assert track_b.provider is context.provider
assert track_c.provider is context.provider
@pytest.mark.asyncio
async def test_3():
application = Application()
context = await application.add_context()
track_a = await context.add_track()
track_b = await context.add_track()
track_c = await context.add_track()
await application.boot()
group_track = await Track.group([track_b, track_c])
assert list(context.tracks) == [track_a, group_track]
assert list(group_track.tracks) == [track_b, track_c]
assert group_track.application is application
assert group_track.parent is context.tracks
assert group_track.provider is context.provider
assert track_b.provider is context.provider
assert track_c.provider is context.provider
```
#### File: tests/domain/test_Track_remove_devices.py
```python
import asyncio
import pytest
from supriya.synthdefs import SynthDefFactory
from tloen.domain import Application, AudioEffect
@pytest.fixture
def synthdef_factory():
return (
SynthDefFactory()
.with_channel_count(2)
.with_input()
.with_signal_block(lambda builder, source, state: (source * -2) + 0.25)
.with_gate(0.01, 0.01)
.with_output(replacing=True)
)
@pytest.mark.asyncio
async def test_1(synthdef_factory):
"""
Remove one device
"""
application = Application()
context = await application.add_context()
track = await context.add_track()
device = await track.add_device(AudioEffect, synthdef=synthdef_factory)
await track.remove_devices(device)
assert list(track.devices) == []
assert device.application is None
assert device.graph_order == ()
assert device.parent is None
assert device.provider is None
@pytest.mark.asyncio
async def test_2(synthdef_factory):
"""
Remove two devices
"""
application = Application()
context = await application.add_context()
track = await context.add_track()
device_one = await track.add_device(AudioEffect, synthdef=synthdef_factory)
device_two = await track.add_device(AudioEffect, synthdef=synthdef_factory)
await track.remove_devices(device_one, device_two)
assert list(track.devices) == []
assert device_one.application is None
assert device_one.graph_order == ()
assert device_one.parent is None
assert device_one.provider is None
assert device_two.application is None
assert device_two.graph_order == ()
assert device_two.parent is None
assert device_two.provider is None
@pytest.mark.asyncio
async def test_3(synthdef_factory):
"""
Remove first device, leaving second untouched
"""
application = Application()
context = await application.add_context()
track = await context.add_track()
device_one = await track.add_device(AudioEffect, synthdef=synthdef_factory)
device_two = await track.add_device(AudioEffect, synthdef=synthdef_factory)
await track.remove_devices(device_one)
assert list(track.devices) == [device_two]
assert device_one.application is None
assert device_one.graph_order == ()
assert device_one.parent is None
assert device_one.provider is None
assert device_two.application is context.application
assert device_two.graph_order == (3, 0, 0, 0, 5, 0)
assert device_two.parent is track.devices
assert device_two.provider is None
@pytest.mark.asyncio
async def test_4(synthdef_factory):
"""
Boot, remove first device, leaving second untouched
"""
application = Application()
context = await application.add_context()
track = await context.add_track()
device_one = await track.add_device(AudioEffect, synthdef=synthdef_factory)
device_two = await track.add_device(AudioEffect, synthdef=synthdef_factory)
await application.boot()
with context.provider.server.osc_protocol.capture() as transcript:
await track.remove_devices(device_one)
await asyncio.sleep(0.1)
assert list(track.devices) == [device_two]
assert device_one.application is None
assert device_one.graph_order == ()
assert device_one.parent is None
assert device_one.provider is None
assert device_two.application is context.application
assert device_two.graph_order == (3, 0, 0, 0, 5, 0)
assert device_two.parent is track.devices
assert device_two.provider is context.provider
assert len(transcript.sent_messages) == 1
_, message = transcript.sent_messages[0]
assert message.to_list() == [None, [["/n_set", 1014, "gate", 0]]]
assert track.peak_levels == dict(
input=(0.0, 0.0), postfader=(0.25, 0.25), prefader=(0.25, 0.25)
)
assert context.master_track.peak_levels == dict(
input=(0.25, 0.25), postfader=(0.25, 0.25), prefader=(0.25, 0.25)
)
```
#### File: tloen/tloen/bases.py
```python
import asyncio
import dataclasses
@dataclasses.dataclass
class Command:
future: asyncio.Future = dataclasses.field(init=False, compare=False, hash=False)
def __post_init__(self):
try:
self.future = asyncio.get_running_loop().create_future()
except Exception:
self.future = None
@dataclasses.dataclass
class Event:
pass
```
#### File: tloen/domain/audioeffects.py
```python
from typing import Union
from supriya import ugens
from supriya.enums import AddAction
from supriya.synthdefs import SynthDef, SynthDefFactory
from .devices import AllocatableDevice
from .parameters import BusParameter, Float
class AudioEffect(AllocatableDevice):
### INITIALIZER ###
def __init__(
self,
*,
synthdef: Union[SynthDef, SynthDefFactory],
name=None,
synthdef_kwargs=None,
parameters=None,
parameter_map=None,
uuid=None,
):
AllocatableDevice.__init__(
self,
name=name,
parameters=parameters,
parameter_map=parameter_map,
synthdef=synthdef,
synthdef_kwargs=synthdef_kwargs,
uuid=uuid,
)
### PRIVATE METHODS ###
def _allocate_synths(self, provider, channel_count, *, synth_pair=None):
synthdef = self.synthdef
if isinstance(synthdef, SynthDefFactory):
synthdef = synthdef.build(channel_count=self.effective_channel_count)
synth_target, synth_action = synth_pair or (
self.node_proxies["body"],
AddAction.ADD_TO_HEAD,
)
self._node_proxies["synth"] = provider.add_synth(
add_action=synth_action,
synthdef=synthdef,
target_node=synth_target,
**self._build_kwargs(),
)
def _reallocate(self, difference):
channel_count = self.effective_channel_count
synth_synth = self._node_proxies.pop("synth")
self._free_audio_buses()
self._allocate_audio_buses(self.provider, channel_count)
self._allocate_synths(
self.provider,
self.effective_channel_count,
synth_pair=(synth_synth, AddAction.ADD_AFTER),
)
synth_synth.free()
class Limiter(AudioEffect):
### INITIALIZER ###
# TODO: This should support a multiple-mono approach
def __init__(self, *, name=None, uuid=None):
gain_spec = Float(default=0, minimum=-96, maximum=6)
frequency_1_spec = Float(default=200, minimum=20, maximum=22050)
frequency_2_spec = Float(default=2000, minimum=20, maximum=22050)
AudioEffect.__init__(
self,
name=name,
parameters={
parameter.name: parameter
for parameter in [
BusParameter("frequency_1", frequency_1_spec),
BusParameter("frequency_2", frequency_2_spec),
BusParameter("band_1_gain", gain_spec),
BusParameter("band_2_gain", gain_spec),
BusParameter("band_3_gain", gain_spec),
BusParameter("band_1_limit", gain_spec),
BusParameter("band_2_limit", gain_spec),
BusParameter("band_3_limit", gain_spec),
BusParameter("band_1_pregain", gain_spec),
BusParameter("band_2_pregain", gain_spec),
BusParameter("band_3_pregain", gain_spec),
]
},
parameter_map={
name: name
for name in [
"frequency_1",
"frequency_2",
"band_1_gain",
"band_2_gain",
"band_3_gain",
"band_1_limit",
"band_2_limit",
"band_3_limit",
"band_1_pregain",
"band_2_pregain",
"band_3_pregain",
]
},
synthdef=self.build_synthdef(),
uuid=uuid,
)
def build_synthdef(self):
def signal_block(builder, source, state):
frequency_1 = builder["frequency_1"].minimum(builder["frequency_2"])
frequency_2 = builder["frequency_1"].maximum(builder["frequency_2"])
band_1 = ugens.LPF.ar(frequency=frequency_1, source=source)
band_2 = ugens.LPF.ar(frequency=frequency_2, source=source - band_1)
band_3 = source - band_2 - band_1 # TODO: optimize this
bands = [band_1, band_2, band_3]
limiters = []
for i, band in enumerate(bands, 1):
limiter = ugens.Limiter.ar(
source=band * builder[f"band_{i}_pregain"].db_to_amplitude(),
level=builder[f"band_{i}_limit"].db_to_amplitude(),
)
limiters.append(limiter * builder[f"band_{i}_gain"].db_to_amplitude())
return ugens.Mix.multichannel(
sources=limiters, channel_count=state["channel_count"],
)
factory = (
SynthDefFactory(
frequency_1=200,
frequency_2=2000,
band_1_gain=0,
band_1_limit=0,
band_1_pregain=0,
band_2_gain=0,
band_2_limit=0,
band_2_pregain=0,
band_3_gain=0,
band_3_limit=0,
band_3_pregain=0,
)
.with_channel_count(2)
.with_gate()
.with_input()
.with_output(replacing=True)
.with_signal_block(signal_block)
)
return factory
class Reverb(AudioEffect):
### INITIALIZER ###
# TODO: This should support a multiple-mono approach
def __init__(self, *, name=None, uuid=None):
AudioEffect.__init__(
self, name=name, synthdef=self.build_synthdef(), uuid=uuid,
)
def build_synthdef(self):
def signal_block(builder, source, state):
return ugens.FreeVerb.ar(source=source, mix=1.0)
factory = (
SynthDefFactory()
.with_channel_count(2)
.with_gate()
.with_input()
.with_output(replacing=True)
.with_signal_block(signal_block)
)
return factory
```
#### File: tloen/domain/notes.py
```python
class NoteSelector:
_valid_operators = frozenset(("==", "!=", "<", ">", "<=", ">="))
def __init__(self, clip, filters=None):
self._clip = clip
self._filters = tuple(filters or ())
def __iter__(self):
source = iter(self._clip)
for filter_ in self._filters:
filter_name, filter_args = filter_[0], filter_[1:]
filter_func = getattr(self, "_" + filter_name)
source = filter_func(source, *filter_args)
yield from source
def __and__(self, selector):
pass
def __or__(self, selector):
pass
def __invert__(self):
pass
def __xor__(self):
pass
def _and_selector(self, source, selector):
pass
def _or_selector(self, source, selector):
pass
def _xor_selector(self, source, selector):
pass
def _invert(self, source):
pass
def _between_offsets(self, source, start_offset, stop_offset):
if start_offset is None and stop_offset is not None:
for note in source:
if note.stop_offset <= stop_offset:
yield note
elif start_offset is not None and stop_offset is None:
for note in source:
if start_offset <= note.start_offset:
yield note
elif start_offset < stop_offset:
for note in source:
if (
start_offset <= note.start_offset
and note.stop_offset <= stop_offset
):
yield note
elif stop_offset < start_offset:
for note in source:
if note.stop_offset <= stop_offset or start_offset <= note.start_offset:
yield note
else:
yield from source
def _between_pitches(self, source, start_pitch, stop_pitch):
if start_pitch is None and stop_pitch is not None:
for note in source:
if note.pitch <= stop_pitch:
yield note
elif start_pitch is not None and stop_pitch is None:
for note in source:
if start_pitch <= note.pitch:
yield note
elif start_pitch < stop_pitch:
for note in source:
if start_pitch <= note.pitch <= stop_pitch:
yield note
elif stop_pitch < start_pitch:
for note in source:
if note.pitch <= stop_pitch or start_pitch <= note.pitch:
yield note
else:
yield from source
def _with_durations(self, source, durations, operator):
pass
def _with_pitches(self, source, pitches, operator):
pass
def _with_pitch_classes(self, source, pitch_classes, operator):
pass
def between_offsets(self, start_offset=None, stop_offset=None):
if start_offset is not None:
start_offset = float(start_offset)
if stop_offset is not None:
stop_offset = float(stop_offset)
filter_ = ("between_offsets", start_offset, stop_offset)
filters = self._filters + (filter_,)
return type(self)(self._clip, filters)
def between_pitches(self, start_pitch=None, stop_pitch=None):
if start_pitch is not None:
start_pitch = float(start_pitch)
if stop_pitch is not None:
stop_pitch = float(stop_pitch)
filter_ = ("between_pitches", start_pitch, stop_pitch)
filters = self._filters + (filter_,)
return type(self)(self._clip, filters)
def with_durations(self, durations, operator="=="):
pass
def with_pitches(self, pitches, operator="=="):
pass
def with_pitch_classes(self, pitch_classes, operator="=="):
pass
def delete(self):
self._clip.remove_notes(self)
def replace(self, notes):
self._clip.remove_notes(self)
self._clip.add_notes(notes)
def transpose(self, transposition):
notes = list(self)
self._clip.remove_notes(self)
self._clip.add_notes([note.transpose(transposition) for note in notes])
def translate(self, translation):
notes = list(self)
self._clip.remove_notes(self)
self._clip.add_notes(
[note.translate(translation, translation) for note in notes]
)
def translate_offsets(self, start_translation=None, stop_translation=None):
notes = list(self)
self._clip.remove_notes(self)
self._clip.add_notes(
[note.translate(start_translation, stop_translation) for note in notes]
)
```
#### File: tloen/domain/sends.py
```python
import abc
from typing import Optional, Set, Union
from uuid import UUID, uuid4
from supriya.enums import AddAction, CalculationRate
from supriya.providers import NodeProxy
from supriya.typing import Default
from .bases import Allocatable, ApplicationObject
from .synthdefs import build_patch_synthdef
# Send SynthDef creation is universal
# Let's generalize it to support all inter-object comms
class SendObject(Allocatable):
### INITIALIZER ###
def __init__(self, name=None, uuid=None):
self._gain = 0.0
self._uuid = uuid or uuid4()
Allocatable.__init__(self, name=name)
### SPECIAL METHODS ###
def __str__(self):
node_proxy_id = int(self.node_proxy) if self.node_proxy is not None else "?"
obj_name = type(self).__name__
return "\n".join(
[
f"<{obj_name} [{node_proxy_id}] {self.uuid}>",
*(f" {line}" for child in self for line in str(child).splitlines()),
]
)
### PRIVATE METHODS ###
def _reallocate(self, difference):
Allocatable._reallocate(self, difference)
node_proxy = self._node_proxies.pop("node")
self._allocate(
self.provider, target_node=node_proxy, add_action=AddAction.ADD_AFTER
)
node_proxy.free()
### PUBLIC METHODS ###
@classmethod
def build_synthdef(
cls,
source_channel_count,
target_channel_count,
*,
feedback=False,
calculation_rate=CalculationRate.AUDIO,
):
return build_patch_synthdef(
source_channel_count,
target_channel_count,
feedback=feedback,
calculation_rate=calculation_rate,
)
def set_gain(self, gain):
pass
### PUBLIC PROPERTIES ###
@property
def gain(self):
return self._gain
@property
def uuid(self) -> UUID:
return self._uuid
class Patch(SendObject):
### INITIALIZER ###
def __init__(self, *, name=None, uuid=None):
SendObject.__init__(self, name=name, uuid=uuid)
### PRIVATE METHODS ###
def _allocate(self, provider, target_node, add_action):
# TODO: Primary node proxy should be a dedicated group
Allocatable._allocate(self, provider, target_node, add_action)
self._allocate_synths(self.parent.node_proxy, AddAction.ADD_TO_HEAD)
def _allocate_synths(self, target_node, add_action):
self._node_proxies["node"] = self.provider.add_synth(
active=self.is_active,
add_action=add_action,
in_=self.cached_state["source_bus"],
out=self.cached_state["target_bus"],
name=self.label,
synthdef=self.build_synthdef(
self.cached_state["source_channel_count"],
self.cached_state["target_channel_count"],
feedback=self.cached_state["feedback"],
),
target_node=target_node,
)
def _get_state(self):
return dict(
application=self.application,
feedback=self.feedback,
index=self.parent.index(self) if self.parent else None,
parent=self.parent,
source=self.effective_source,
source_anchor=self.source_anchor,
source_bus=self.source_bus,
source_channel_count=self.source_channel_count,
source_provider=self.source_provider,
target=self.effective_target,
target_anchor=self.target_anchor,
target_bus=self.target_bus,
target_channel_count=self.target_channel_count,
target_provider=self.target_provider,
)
def _reconcile(
self,
target_node: Optional[NodeProxy] = None,
add_action: Optional[int] = None,
dispose_only: bool = False,
**kwargs,
):
difference = self._get_state_difference()
if "application" in difference:
old_application, new_application = difference.pop("application")
if old_application:
self._deapplicate(old_application)
if new_application:
self._applicate(new_application)
if "source_anchor" in difference:
old_anchor, new_anchor = difference.pop("source_anchor")
if old_anchor is not None and hasattr(old_anchor, "_dependencies"):
old_anchor._dependencies.remove(self)
if new_anchor is not None and hasattr(new_anchor, "_dependencies"):
new_anchor._dependencies.add(self)
if "target_anchor" in difference:
old_anchor, new_anchor = difference.pop("target_anchor")
if old_anchor is not None and hasattr(old_anchor, "_dependencies"):
old_anchor._dependencies.remove(self)
if new_anchor is not None and hasattr(new_anchor, "_dependencies"):
new_anchor._dependencies.add(self)
if "source_provider" in difference or "target_provider" in difference:
source, target = self.effective_source, self.effective_target
old_source_provider, new_source_provider = difference.pop(
"source_provider",
(source.provider, source.provider) if source else (None, None),
)
old_target_provider, new_target_provider = difference.pop(
"target_provider",
(target.provider, target.provider) if target else (None, None),
)
if old_source_provider and (old_source_provider is old_target_provider):
self._deallocate(old_source_provider, dispose_only=dispose_only)
if new_source_provider and (new_source_provider is new_target_provider):
self._allocate(new_source_provider, target_node, add_action)
elif self.provider and any(
[
"feedback" in difference,
"source_bus" in difference,
"source_channel_count" in difference,
"target_bus" in difference,
"target_channel_count" in difference,
]
):
self._reallocate(difference)
def _serialize(self):
serialized, auxiliary_entities = super()._serialize()
serialized["spec"]["target"] = (
str(self.effective_target.uuid)
if not isinstance(self.target, Default)
else "default"
)
return serialized, auxiliary_entities
### PUBLIC METHODS ###
### PUBLIC PROPERTIES ###
@property
def effective_source(self):
return self.source
@property
def effective_target(self):
return self.target
@property
def feedback(self):
source_anchor = self.source_anchor
target_anchor = self.target_anchor
if source_anchor is not None and target_anchor is not None:
return target_anchor.graph_order < source_anchor.graph_order
return None
@property
@abc.abstractmethod
def source(self):
raise NotImplementedError
@property
@abc.abstractmethod
def source_anchor(self):
raise NotImplementedError
@property
@abc.abstractmethod
def source_bus(self):
raise NotImplementedError
@property
def source_channel_count(self):
effective_source = self.effective_source
if effective_source:
return effective_source.effective_channel_count
return None
@property
def source_provider(self):
effective_source = self.effective_source
if effective_source:
return effective_source.provider
return None
@property
@abc.abstractmethod
def target(self):
raise NotImplementedError
@property
@abc.abstractmethod
def target_anchor(self):
raise NotImplementedError
@property
@abc.abstractmethod
def target_bus(self):
raise NotImplementedError
@property
def target_channel_count(self):
effective_target = self.effective_target
if effective_target:
return effective_target.effective_channel_count
return None
@property
def target_provider(self):
effective_target = self.effective_target
if effective_target:
return effective_target.provider
return None
class Send(Patch):
### INITIALIZER ###
def __init__(self, target: Union[Allocatable, Default], *, name=None, uuid=None):
if not isinstance(target, Default):
if not hasattr(target, "send_target"):
raise ValueError
self._target = target
Patch.__init__(self, name=name, uuid=uuid)
### PRIVATE METHODS ###
@classmethod
async def _deserialize(cls, data, application) -> bool:
parent_uuid = UUID(data["meta"]["parent"])
parent = application.registry.get(parent_uuid)
if parent is None:
return True
if data["spec"]["target"] == "default":
target = Default()
else:
target_uuid = UUID(data["spec"]["target"])
target = application.registry.get(target_uuid)
if target is None:
return True
send = cls(
name=data["meta"].get("name"),
uuid=UUID(data["meta"]["uuid"]),
target=target,
)
if data["spec"]["position"] == "prefader":
parent.prefader_sends._append(send)
elif data["spec"]["position"] == "postfader":
parent.postfader_sends._append(send)
else:
raise ValueError(f"Unknown position: {data['position']}")
return False
### PUBLIC METHODS ###
@classmethod
def build_synthdef(
cls,
source_channel_count,
target_channel_count,
*,
feedback=False,
calculation_rate=CalculationRate.AUDIO,
):
return build_patch_synthdef(
source_channel_count,
target_channel_count,
feedback=feedback,
gain=True,
calculation_rate=calculation_rate,
)
### PUBLIC PROPERTIES ###
@property
def effective_source(self):
return self.source
@property
def effective_target(self):
if not isinstance(self.target, Default):
return self.target
for parent in self.parentage[1:]:
if hasattr(parent, "default_send_target"):
return parent.default_send_target
return None
@property
def source(self):
if self.parent and self.parent.parent:
return self.parent.parent
return None
@property
def source_anchor(self):
return self
@property
def source_bus(self):
source = self.effective_source
if source:
return source.audio_bus_proxies.get("output")
return None
@property
def target(self):
return self._target
@property
def target_anchor(self):
effective_target = self.effective_target
return effective_target.send_target if effective_target else None
@property
def target_bus(self):
target = self.effective_target
if not target:
return None
if self.feedback:
return target.audio_bus_proxies.get("input")
return target.audio_bus_proxies.get("output")
class Receive(Patch):
### INITIALIZER ###
def __init__(self, source: Union[Allocatable, Default], *, name=None, uuid=None):
if not isinstance(source, Default):
if not hasattr(source, "receive_target"):
raise ValueError
self._source = source
Patch.__init__(self, name=name, uuid=uuid)
### PRIVATE METHODS ###
@classmethod
async def _deserialize(cls, data, application) -> bool:
parent_uuid = UUID(data["meta"]["parent"])
parent = application.registry.get(parent_uuid)
if parent is None:
return True
if data["spec"]["source"] == "default":
source = Default()
else:
source_uuid = UUID(data["spec"]["source"])
source = application.registry.get(source_uuid)
if source is None:
return True
send = cls(
name=data["meta"].get("name"),
uuid=UUID(data["meta"]["uuid"]),
source=source,
)
if data["spec"]["position"] == "prefader":
parent.prefader_sends._append(send)
elif data["spec"]["position"] == "postfader":
parent.postfader_sends._append(send)
else:
raise ValueError(f"Unknown position: {data['position']}")
return False
### PUBLIC PROPERTIES ###
@property
def effective_source(self):
if not isinstance(self.source, Default):
return self.source
for parent in self.parentage[1:]:
if hasattr(parent, "default_receive_target"):
return parent.default_receive_target
return None
@property
def effective_target(self):
return self.target
@property
def source(self):
return self._source
@property
def source_anchor(self):
effective_source = self.effective_source
return effective_source.receive_target if effective_source else None
@property
def source_bus(self):
source = self.effective_source
if not source:
return None
return source.audio_bus_proxies.get("output")
@property
def target(self):
if self.parent and self.parent.parent:
return self.parent.parent
return None
@property
def target_anchor(self):
return self
@property
def target_bus(self):
target = self.effective_target
if target:
return target.audio_bus_proxies.get("output")
return None
class DirectIn(SendObject):
### INITIALIZER ###
def __init__(
self, source_bus_id: int, source_channel_count: int, *, name=None, uuid=None
):
self._source_bus_id = int(source_bus_id)
self._source_channel_count = int(source_channel_count)
SendObject.__init__(self, name=name, uuid=uuid)
### PRIVATE METHODS ###
def _allocate(self, provider, target_node, add_action):
Allocatable._allocate(self, provider, target_node, add_action)
self._node_proxies["node"] = provider.add_synth(
in_=self.source_bus_id,
out=self.parent.parent.audio_bus_proxies["output"],
synthdef=self.build_synthdef(
self.source_channel_count, self.effective_channel_count
),
name=self.label,
target_node=self.parent.node_proxy,
)
### PUBLIC PROPERTIES ###
@property
def source_bus_id(self):
return self._source_bus_id
@property
def source_channel_count(self):
return self._source_channel_count
class DirectOut(SendObject):
### INITIALIZER ###
def __init__(
self, target_bus_id: int, target_channel_count: int, *, name=None, uuid=None
):
self._target_bus_id = int(target_bus_id)
self._target_channel_count = int(target_channel_count)
SendObject.__init__(self, name=name, uuid=uuid)
### PRIVATE METHODS ###
def _allocate(self, provider, target_node, add_action):
Allocatable._allocate(self, provider, target_node, add_action)
self._node_proxies["node"] = provider.add_synth(
active=self.is_active,
in_=self.parent.parent.audio_bus_proxies["output"],
out=self.target_bus_id,
synthdef=self.build_synthdef(
self.effective_channel_count, self.target_channel_count
),
name=self.label,
target_node=self.parent.node_proxy,
)
@classmethod
async def _deserialize(cls, data, application):
parent_uuid = UUID(data["meta"]["parent"])
parent = application.registry.get(parent_uuid)
if parent is None:
return True
send = cls(
name=data["meta"].get("name"),
uuid=UUID(data["meta"]["uuid"]),
target_bus_id=data["spec"]["target_bus_id"],
target_channel_count=data["spec"]["target_channel_count"],
)
if data["spec"]["position"] == "prefader":
parent.prefader_sends._append(send)
elif data["spec"]["position"] == "postfader":
parent.postfader_sends._append(send)
else:
raise ValueError(f"Unknown position: {data['position']}")
def _reallocate(self, difference):
Allocatable._reallocate(self, difference)
node_proxy = self._node_proxies.pop("node")
self._allocate(
self.provider, target_node=node_proxy, add_action=AddAction.ADD_AFTER
)
node_proxy.free()
def _serialize(self):
serialized, auxiliary_entities = super()._serialize()
serialized["spec"].update(
target_bus_id=self.target_bus_id,
target_channel_count=self.target_channel_count,
)
return serialized, auxiliary_entities
### PUBLIC PROPERTIES ###
@property
def target_bus_id(self):
return self._target_bus_id
@property
def target_channel_count(self):
return self._target_channel_count
class Target(ApplicationObject):
### INITIALIZER ###
def __init__(self, *, label=None):
ApplicationObject.__init__(self)
self._dependencies: Set[Send] = set()
self._label = label
### SPECIAL METHODS ###
def __str__(self):
return f"<{self.label} ({len(self._dependencies)})>"
### PUBLIC PROPERTIES ###
@property
def label(self):
return self._label or type(self).__name__
```
#### File: tloen/textui/__init__.py
```python
import asyncio
import urwid
from ..commands.applications import (
BootApplication,
ExitToTerminal,
QuitApplication,
)
from ..commands.slots import FireSlot
from ..commands.transports import ToggleTransport
from ..pubsub import PubSub
from .transport import TransportWidget
from .tree import ContextTree, DeviceTree, ParameterTree
class Application:
def __init__(self, command_queue, pubsub=None, registry=None):
self.command_queue = command_queue
self.pubsub = pubsub or PubSub()
self.registry = registry if registry is not None else {}
transport_widget = TransportWidget(self.pubsub)
context_tree = ContextTree(
self.command_queue, pubsub=self.pubsub, registry=self.registry,
)
device_tree = DeviceTree(
self.command_queue, pubsub=self.pubsub, registry=self.registry,
)
parameter_tree = ParameterTree(
self.command_queue, pubsub=self.pubsub, registry=self.registry,
)
header = urwid.Columns([urwid.Text("\nt / l / ö / n"), transport_widget])
body = urwid.Columns(
[
urwid.LineBox(context_tree, title="tracks", title_align="left"),
urwid.LineBox(device_tree, title="devices", title_align="left"),
urwid.LineBox(parameter_tree, title="parameters", title_align="left"),
],
dividechars=1,
)
footer = urwid.LineBox(urwid.Text("..."))
self.widget = urwid.Frame(body, header=header, footer=footer)
self.handlers = {
"ctrl q": QuitApplication(),
"ctrl b": BootApplication(),
"ctrl c": ExitToTerminal(),
"ctrl g": FireSlot(),
" ": ToggleTransport(),
}
def unhandled_input(self, key):
command = self.handlers.get(key)
if command is None:
return
self.command_queue.put_nowait(command)
async def run_async(self):
loop = asyncio.get_running_loop()
self.exit_future = loop.create_future()
self.main_loop = urwid.MainLoop(
self.widget,
event_loop=urwid.AsyncioEventLoop(loop=loop),
unhandled_input=self.unhandled_input,
)
try:
self.main_loop.start()
self.main_loop.screen.tty_signal_keys(
"undefined", "undefined", "undefined", "undefined", "undefined"
)
await self.exit_future
finally:
self.main_loop.stop()
def exit(self):
self.exit_future.set_result(True)
``` |
{
"source": "josiah-wolf-oberholtzer/uqbar",
"score": 2
} |
#### File: josiah-wolf-oberholtzer/uqbar/setup.py
```python
import pathlib
import sys
from distutils.version import LooseVersion
import setuptools
package_name = "uqbar"
def read_version():
root_path = pathlib.Path(__file__).parent
version_path = root_path / package_name / "_version.py"
with version_path.open() as file_pointer:
file_contents = file_pointer.read()
local_dict = {}
exec(file_contents, None, local_dict)
return local_dict["__version__"]
description = "Tools for building documentation with Sphinx, Graphviz and LaTeX"
classifiers = [
"Development Status :: 3 - Alpha",
"Environment :: Console",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Operating System :: MacOS",
"Operating System :: POSIX",
"Programming Language :: Python :: 3.6",
]
install_requires = [
"Sphinx >= 3.2.0",
"Unidecode >= 1.0.0",
"black",
"sphinx-rtd-theme >= 0.5.0",
]
if LooseVersion(sys.version.split()[0]) < LooseVersion("3.7.0"):
install_requires.append("dataclasses")
extras_require = {
"test": [
"flake8 >= 3.9.0",
"isort >= 5.9.0",
"mypy >= 0.900",
"pytest >= 6.2.0",
"pytest-cov >= 2.12.0",
"types-PyYAML",
"types-docutils",
]
}
keywords = ["sphinx", "graphviz", "latex", "documentation"]
with open("README.md", "r") as file_pointer:
long_description = file_pointer.read()
version = read_version()
if __name__ == "__main__":
setuptools.setup(
author="<NAME>",
author_email="<EMAIL>",
classifiers=classifiers,
description=description,
extras_require=extras_require,
include_package_data=True,
install_requires=install_requires,
keywords=keywords,
long_description=description,
name=package_name,
packages=[package_name],
url="https://github.com/josiah-wolf-oberholtzer/uqbar",
version=version,
zip_safe=False,
)
```
#### File: uqbar/tests/test_book_sphinx.py
```python
import sys
from distutils.version import LooseVersion
import pytest
from docutils.parsers.rst import directives
import uqbar.book.sphinx
from uqbar.book.console import ConsoleError, ConsoleInput, ConsoleOutput
from uqbar.book.extensions import GraphExtension
from uqbar.book.sphinx import UqbarBookDirective
from uqbar.strings import normalize
@pytest.fixture(scope="module", autouse=True)
def register_directives():
directives.register_directive("book", UqbarBookDirective)
yield
directives._directives.pop("book")
source_a = """
::
>>> string = 'Hello, world!'
::
>>> for i in range(3):
... string += " {}".format(i)
...
::
>>> print(string)
Hello, world!
"""
source_b = """
::
>>> import uqbar.graphs
>>> g = uqbar.graphs.Graph()
>>> n1 = uqbar.graphs.Node()
>>> n2 = uqbar.graphs.Node()
>>> g.extend([n1, n2])
>>> e = n1.attach(n2)
::
>>> print(format(g, "graphviz"))
::
>>> uqbar.graphs.Grapher(g)()
"""
source_c = """
::
>>> import uqbar.graphs
>>> g = uqbar.graphs.Graph()
>>> n1 = uqbar.graphs.Node()
>>> g.append(n1)
>>> for i in range(3):
... n2 = uqbar.graphs.Node()
... g.append(n2)
... e = n1.attach(n2)
... uqbar.graphs.Grapher(g)()
... print(i)
... n1 = n2
...
"""
source_d = """
.. book::
:hide:
>>> import uqbar.graphs
>>> g = uqbar.graphs.Graph()
>>> n1 = uqbar.graphs.Node()
>>> n2 = uqbar.graphs.Node()
>>> g.extend([n1, n2])
>>> e = n1.attach(n2)
>>> uqbar.graphs.Grapher(g)()
"""
def test_parse_rst_01():
document = uqbar.book.sphinx.parse_rst(source_a)
assert normalize(document.pformat()) == normalize(
"""
<document source="test">
<literal_block xml:space="preserve">
>>> string = 'Hello, world!'
<literal_block xml:space="preserve">
>>> for i in range(3):
... string += " {}".format(i)
...
<literal_block xml:space="preserve">
>>> print(string)
Hello, world!
"""
)
def test_parse_rst_02():
document = uqbar.book.sphinx.parse_rst(source_b)
assert normalize(document.pformat()) == normalize(
"""
<document source="test">
<literal_block xml:space="preserve">
>>> import uqbar.graphs
>>> g = uqbar.graphs.Graph()
>>> n1 = uqbar.graphs.Node()
>>> n2 = uqbar.graphs.Node()
>>> g.extend([n1, n2])
>>> e = n1.attach(n2)
<literal_block xml:space="preserve">
>>> print(format(g, "graphviz"))
<literal_block xml:space="preserve">
>>> uqbar.graphs.Grapher(g)()
"""
)
def test_collect_literal_blocks_01():
document = uqbar.book.sphinx.parse_rst(source_a)
blocks = uqbar.book.sphinx.collect_literal_blocks(document)
expected = [
"""
<literal_block xml:space="preserve">
>>> string = 'Hello, world!'
""",
"""
<literal_block xml:space="preserve">
>>> for i in range(3):
... string += " {}".format(i)
...
""",
"""
<literal_block xml:space="preserve">
>>> print(string)
Hello, world!
""",
]
actual = [normalize(block.pformat()) for block in blocks]
expected = [normalize(text) for text in expected]
assert actual == expected
def test_collect_literal_blocks_02():
document = uqbar.book.sphinx.parse_rst(source_b)
blocks = uqbar.book.sphinx.collect_literal_blocks(document)
expected = [
"""
<literal_block xml:space="preserve">
>>> import uqbar.graphs
>>> g = uqbar.graphs.Graph()
>>> n1 = uqbar.graphs.Node()
>>> n2 = uqbar.graphs.Node()
>>> g.extend([n1, n2])
>>> e = n1.attach(n2)
""",
"""
<literal_block xml:space="preserve">
>>> print(format(g, "graphviz"))
""",
"""
<literal_block xml:space="preserve">
>>> uqbar.graphs.Grapher(g)()
""",
]
actual = [normalize(block.pformat()) for block in blocks]
expected = [normalize(text) for text in expected]
assert actual == expected
def test_interpret_code_blocks_01():
document = uqbar.book.sphinx.parse_rst(source_a)
blocks = uqbar.book.sphinx.collect_literal_blocks(document)
node_mapping = uqbar.book.sphinx.interpret_code_blocks(blocks)
assert list(node_mapping.values()) == [
[ConsoleInput(string=">>> string = 'Hello, world!'\n")],
[
ConsoleInput(
string='>>> for i in range(3):\n... string += " {}".format(i)\n... \n'
)
],
[
ConsoleInput(string=">>> print(string)\n"),
ConsoleOutput(string="Hello, world! 0 1 2\n"),
],
]
def test_interpret_code_blocks_02():
def logger_func(message):
messages.append(message)
error_message = (
"Traceback (most recent call last):\n" ' File "<stdin>", line 1, in <module>\n'
)
if LooseVersion(sys.version.split()[0]) < LooseVersion("3.7"):
error_message += "TypeError: must be str, not int\n"
else:
error_message += 'TypeError: can only concatenate str (not "int") to str\n'
messages = []
source = normalize(
"""
This will interpret happily.
::
>>> "1" + 2
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
{error_message}
""".format(
error_message=error_message
)
)
document = uqbar.book.sphinx.parse_rst(source)
blocks = uqbar.book.sphinx.collect_literal_blocks(document)
# This has a traceback, so it passes.
uqbar.book.sphinx.interpret_code_blocks(blocks, logger_func=logger_func)
assert messages == [error_message]
messages[:] = []
source = normalize(
"""
This will not interpret happily.
::
>>> for i in range(1, 4):
... i / 0
...
"This is fine"
"""
)
document = uqbar.book.sphinx.parse_rst(source)
blocks = uqbar.book.sphinx.collect_literal_blocks(document)
with pytest.raises(ConsoleError):
# This does not have a traceback, so it fails.
uqbar.book.sphinx.interpret_code_blocks(blocks, logger_func=logger_func)
assert messages == [
(
"Traceback (most recent call last):\n"
' File "<stdin>", line 2, in <module>\n'
"ZeroDivisionError: division by zero\n"
)
]
messages[:] = []
# This passes because we force it to.
uqbar.book.sphinx.interpret_code_blocks(
blocks, allow_exceptions=True, logger_func=logger_func
)
assert messages == [
(
"Traceback (most recent call last):\n"
' File "<stdin>", line 2, in <module>\n'
"ZeroDivisionError: division by zero\n"
)
]
def test_rebuild_document_01():
document = uqbar.book.sphinx.parse_rst(source_a)
blocks = uqbar.book.sphinx.collect_literal_blocks(document)
node_mapping = uqbar.book.sphinx.interpret_code_blocks(blocks)
uqbar.book.sphinx.rebuild_document(document, node_mapping)
assert normalize(document.pformat()) == normalize(
"""
<document source="test">
<literal_block xml:space="preserve">
>>> string = 'Hello, world!'
<literal_block xml:space="preserve">
>>> for i in range(3):
... string += " {}".format(i)
...
<literal_block xml:space="preserve">
>>> print(string)
Hello, world! 0 1 2
"""
)
def test_rebuild_document_02():
document = uqbar.book.sphinx.parse_rst(source_b)
blocks = uqbar.book.sphinx.collect_literal_blocks(document)
extensions = [GraphExtension]
node_mapping = uqbar.book.sphinx.interpret_code_blocks(
blocks, extensions=extensions
)
uqbar.book.sphinx.rebuild_document(document, node_mapping)
assert normalize(document.pformat()) == normalize(
"""
<document source="test">
<literal_block xml:space="preserve">
>>> import uqbar.graphs
>>> g = uqbar.graphs.Graph()
>>> n1 = uqbar.graphs.Node()
>>> n2 = uqbar.graphs.Node()
>>> g.extend([n1, n2])
>>> e = n1.attach(n2)
<literal_block xml:space="preserve">
>>> print(format(g, "graphviz"))
digraph G {
node_0;
node_1;
node_0 -> node_1;
}
<literal_block xml:space="preserve">
>>> uqbar.graphs.Grapher(g)()
<graphviz_block layout="dot" xml:space="preserve">
digraph G {
node_0;
node_1;
node_0 -> node_1;
}
"""
)
def test_rebuild_document_03():
document = uqbar.book.sphinx.parse_rst(source_c)
blocks = uqbar.book.sphinx.collect_literal_blocks(document)
extensions = [GraphExtension]
node_mapping = uqbar.book.sphinx.interpret_code_blocks(
blocks, extensions=extensions
)
uqbar.book.sphinx.rebuild_document(document, node_mapping)
assert normalize(document.pformat()) == normalize(
"""
<document source="test">
<literal_block xml:space="preserve">
>>> import uqbar.graphs
>>> g = uqbar.graphs.Graph()
>>> n1 = uqbar.graphs.Node()
>>> g.append(n1)
>>> for i in range(3):
... n2 = uqbar.graphs.Node()
... g.append(n2)
... e = n1.attach(n2)
... uqbar.graphs.Grapher(g)()
... print(i)
... n1 = n2
...
<graphviz_block layout="dot" xml:space="preserve">
digraph G {
node_0;
node_1;
node_0 -> node_1;
}
<literal_block xml:space="preserve">
0
<graphviz_block layout="dot" xml:space="preserve">
digraph G {
node_0;
node_1;
node_2;
node_0 -> node_1;
node_1 -> node_2;
}
<literal_block xml:space="preserve">
1
<graphviz_block layout="dot" xml:space="preserve">
digraph G {
node_0;
node_1;
node_2;
node_3;
node_0 -> node_1;
node_1 -> node_2;
node_2 -> node_3;
}
<literal_block xml:space="preserve">
2
"""
)
def test_rebuild_document_04():
document = uqbar.book.sphinx.parse_rst(source_d)
blocks = uqbar.book.sphinx.collect_literal_blocks(document)
extensions = [GraphExtension]
node_mapping = uqbar.book.sphinx.interpret_code_blocks(
blocks, extensions=extensions
)
uqbar.book.sphinx.rebuild_document(document, node_mapping)
assert normalize(document.pformat()) == normalize(
"""
<document source="test">
<graphviz_block layout="dot" xml:space="preserve">
digraph G {
node_0;
node_1;
node_0 -> node_1;
}
"""
)
```
#### File: uqbar/tests/test_cli.py
```python
import io
import pytest
import uqbar.cli
import uqbar.io
from uqbar.strings import normalize
class MeowCLI(uqbar.cli.CLI):
alias = "meow"
scripting_group = "mammals"
short_description = "speak like a cat"
def _process_args(self, arguments):
if arguments.loud:
print("MEOW!")
else:
print("Mew.")
def _setup_argument_parser(self, parser):
parser.add_argument("--loud", action="store_true", help="be adamant")
class SquawkCLI(uqbar.cli.CLI):
alias = "squawk"
scripting_group = "birds"
short_description = "speak like a bird"
def _process_args(self, arguments):
if arguments.loud:
print("CAW!")
else:
print("Cheap.")
def _setup_argument_parser(self, parser):
parser.add_argument("--loud", action="store_true", help="be adamant")
class WoofCLI(uqbar.cli.CLI):
alias = "woof"
scripting_group = "mammals"
short_description = "speak like a dog"
def _process_args(self, arguments):
if arguments.loud:
print("WOOF!")
else:
print("Wuf.")
def _setup_argument_parser(self, parser):
parser.add_argument("--loud", action="store_true", help="be adamant")
class VoxAggregator(uqbar.cli.CLIAggregator):
@property
def cli_classes(self):
return [MeowCLI, SquawkCLI, WoofCLI]
def test_call():
string_io = io.StringIO()
with uqbar.io.RedirectedStreams(string_io, string_io):
with pytest.raises(SystemExit):
VoxAggregator()("mammals meow --loud")
assert normalize(string_io.getvalue()) == normalize(
"""
MEOW!
"""
)
def test_call_help():
string_io = io.StringIO()
with uqbar.io.RedirectedStreams(string_io, string_io):
with pytest.raises(SystemExit):
VoxAggregator()("mammals meow --help")
assert normalize(string_io.getvalue()) == normalize(
"""
usage: meow-cli [-h] [--version] [--loud]
speak like a cat
optional arguments:
-h, --help show this help message and exit
--version show program's version number and exit
--loud be adamant
"""
)
def test_help():
string_io = io.StringIO()
with uqbar.io.RedirectedStreams(string_io, string_io):
with pytest.raises(SystemExit):
VoxAggregator()("help")
assert normalize(string_io.getvalue()) == normalize(
"""
usage: vox-aggregator [-h] [--version] {help,list,birds,mammals} ...
optional arguments:
-h, --help show this help message and exit
--version show program's version number and exit
subcommands:
{help,list,birds,mammals}
help print subcommand help
list list subcommands
birds {squawk} subcommand(s)
mammals {meow, woof} subcommand(s)
"""
)
def test_list():
string_io = io.StringIO()
with uqbar.io.RedirectedStreams(string_io, string_io):
with pytest.raises(SystemExit):
VoxAggregator()("list")
assert normalize(string_io.getvalue()) == normalize(
"""
[birds]
squawk: speak like a bird
[mammals]
meow: speak like a cat
woof: speak like a dog
"""
)
```
#### File: uqbar/tests/test_graphs_Node.py
```python
import unittest
import uqbar.graphs
from uqbar.strings import normalize
class TestCase(unittest.TestCase):
def test___init__(self):
node = uqbar.graphs.Node()
assert node.name is None
node = uqbar.graphs.Node(name="foo")
assert node.name == "foo"
assert not node.attributes
attributes = uqbar.graphs.Attributes(mode="node", color="blue")
node = uqbar.graphs.Node(name="foo", attributes=attributes)
assert node.name == "foo"
assert len(node.attributes) == 1
assert node.attributes["color"] == node.attributes.Color("blue")
def test__get_canonical_name(self):
node = uqbar.graphs.Node()
assert node._get_canonical_name() == "node_0"
node = uqbar.graphs.Node(name="foo")
assert node._get_canonical_name() == "foo"
node = uqbar.graphs.Node(name="Foo Bar Baz")
assert node._get_canonical_name() == "Foo Bar Baz"
graph = uqbar.graphs.Graph()
node = uqbar.graphs.Node()
graph.append(node)
assert node._get_canonical_name() == "node_0"
graph = uqbar.graphs.Graph()
subgraph = uqbar.graphs.Graph()
node = uqbar.graphs.Node()
graph.append(subgraph)
subgraph.append(node)
assert node._get_canonical_name() == "node_0_0"
graph = uqbar.graphs.Graph()
node_a = uqbar.graphs.Node(name="foo")
node_b = uqbar.graphs.Node(name="foo")
node_c = uqbar.graphs.Node(name="bar")
graph.extend([node_a, node_b, node_c])
assert node_a._get_canonical_name() == "foo_0"
assert node_b._get_canonical_name() == "foo_1"
assert node_c._get_canonical_name() == "bar"
def test___format___str(self):
node = uqbar.graphs.Node()
assert format(node) == repr(node)
node = uqbar.graphs.Node(name="foo")
assert format(node) == repr(node)
attributes = uqbar.graphs.Attributes(mode="node", color="blue")
node = uqbar.graphs.Node(name="foo", attributes=attributes)
assert format(node) == repr(node)
def test___format___graphviz(self):
node = uqbar.graphs.Node()
assert format(node, "graphviz") == "node_0;"
node = uqbar.graphs.Node(name="foo")
assert format(node, "graphviz") == "foo;"
attributes = uqbar.graphs.Attributes(
mode="node",
color="blue",
fontname="Times New Roman",
fontsize=11.5,
shape="oval",
)
node = uqbar.graphs.Node(name="foo", attributes=attributes)
assert format(node, "graphviz") == normalize(
"""
foo [color=blue,
fontname="Times New Roman",
fontsize=11.5,
shape=oval];
"""
)
```
#### File: uqbar/apis/ModuleDocumenter.py
```python
import importlib
import pathlib
import types
from typing import List, MutableMapping, Sequence, Tuple, Type # noqa
import uqbar # noqa
from uqbar.apis.ClassDocumenter import ClassDocumenter
from uqbar.apis.FunctionDocumenter import FunctionDocumenter
from uqbar.apis.MemberDocumenter import MemberDocumenter
class ModuleDocumenter:
"""
A basic module documenter.
::
>>> import uqbar.apis
>>> documenter = uqbar.apis.ModuleDocumenter(
... 'uqbar.io',
... module_documenters=[
... uqbar.apis.ModuleDocumenter('uqbar.io.Timer'),
... ],
... )
>>> print(str(documenter))
.. _uqbar--io:
<BLANKLINE>
io
==
<BLANKLINE>
.. automodule:: uqbar.io
<BLANKLINE>
.. currentmodule:: uqbar.io
<BLANKLINE>
.. toctree::
<BLANKLINE>
Timer
<BLANKLINE>
.. autofunction:: find_common_prefix
<BLANKLINE>
.. autofunction:: find_executable
<BLANKLINE>
.. autofunction:: relative_to
<BLANKLINE>
.. autofunction:: walk
<BLANKLINE>
.. autofunction:: write
.. tip::
Subclass :py:class:`~uqbar.apis.ModuleDocumenter` to implement your own
custom module documentation output.
You'll need to provide your desired reStructuredText output
via an overridden
:py:meth:`~uqbar.apis.ModuleDocumenter.ModuleDocumenter.__str__`
implementation.
See :py:class:`~uqbar.apis.SummarizingModuleDocumenter` for an example.
:param package_path: the module path of the module to document
:param document_private_members: whether to documenter private module members
:param member_documenter_classes: a list of
:py:class:`~uqbar.apis.MemberDocumenter` subclasses, defining what classes
to use to identify and document module members
:param module_documenters: a list of of documenters for submodules and
subpackages of the documented module; these are generated by an
:py:class:`~uqbar.apis.APIBuilder` instance rather than the module
documenter directly
:param api_builder: an :py:class:`~uqbar.apis.APIBuilder` instance
"""
### CLASS VARIABLES ###
__documentation_section__ = "Documenters"
### INITIALIZER ###
def __init__(
self,
package_path: str,
document_private_members: bool = False,
member_documenter_classes: Sequence[Type[MemberDocumenter]] = None,
module_documenters: Sequence["ModuleDocumenter"] = None,
) -> None:
self._package_path = package_path
client = importlib.import_module(package_path)
assert isinstance(client, types.ModuleType)
self._client = client
self._document_private_members = bool(document_private_members)
if member_documenter_classes is None:
member_documenter_classes = [ClassDocumenter, FunctionDocumenter]
for _ in member_documenter_classes:
assert issubclass(_, MemberDocumenter), _
self._member_documenter_classes = tuple(member_documenter_classes)
if module_documenters is not None:
for submodule_documenter in module_documenters:
assert isinstance(submodule_documenter, ModuleDocumenter)
module_documenters = tuple(module_documenters)
self._module_documenters = module_documenters or ()
self._member_documenters = self._populate()
### SPECIAL METHODS ###
def __str__(self) -> str:
result = self._build_preamble()
result.extend(self._build_toc(self.module_documenters or []))
for documenter in self._member_documenters:
result.extend(["", str(documenter)])
return "\n".join(result)
### PRIVATE METHODS ###
def _populate(self) -> Sequence[MemberDocumenter]:
documenters = []
for name in sorted(dir(self.client)):
if name.startswith("_") and not self.document_private_members:
continue
client = getattr(self.client, name)
for class_ in self.member_documenter_classes:
if class_.validate_client(client, self.package_path):
path = "{}.{}".format(client.__module__, client.__name__)
documenter = class_(path)
documenters.append(documenter)
break
return tuple(documenters)
### PRIVATE METHODS ###
def _build_toc(self, documenters, **kwargs) -> List[str]:
result: List[str] = []
if not documenters:
return result
result.extend(["", ".. toctree::"])
result.append("")
module_documenters = [_ for _ in documenters if isinstance(_, type(self))]
for module_documenter in module_documenters:
path = self._build_toc_path(module_documenter)
if path:
result.append(" {}".format(path))
return result
def _build_toc_path(self, documenter):
path = documenter.package_path.partition(self.package_path + ".")[-1]
base, _, name = path.rpartition(".")
if name.lower() == "index":
path = base + "._" + name
if not isinstance(documenter, ModuleDocumenter):
path = path.rpartition(".")[0]
elif documenter.is_package:
path += "/index"
if path.lower() == "index":
path = "_" + path
return path
def _build_preamble(self) -> List[str]:
result: List[str] = [
".. _{}:".format(self.reference_name),
"",
self.package_name,
"=" * len(self.package_name),
"",
".. automodule:: {}".format(self.package_path),
"",
".. currentmodule:: {}".format(self.package_path),
]
return result
### PUBLIC PROPERTIES ###
@property
def client(self) -> object:
return self._client
@property
def is_package(self) -> bool:
return hasattr(self.client, "__path__")
@property
def document_private_members(self) -> bool:
return self._document_private_members
@property
def documentation_path(self) -> pathlib.Path:
path = pathlib.Path(".").joinpath(*self.package_path.split("."))
if self.is_package:
path = path.joinpath("index")
elif path.name.lower() == "index":
name = path.name
path = path.parent.joinpath("_" + name)
return path.with_suffix(".rst")
@property
def is_nominative(self) -> bool:
if self.is_package or len(self.member_documenters) != 1:
return False
parts = self.member_documenters[0].package_path.split(".")
return parts[-1] == parts[-2]
@property
def member_documenter_classes(self) -> Sequence[Type[MemberDocumenter]]:
return self._member_documenter_classes
@property
def member_documenters(self) -> Sequence[MemberDocumenter]:
return self._member_documenters
@property
def member_documenters_by_section(
self,
) -> Sequence[Tuple[str, Sequence[MemberDocumenter]]]:
result: MutableMapping[str, List[MemberDocumenter]] = {}
for documenter in self.member_documenters:
result.setdefault(documenter.documentation_section, []).append(documenter)
return sorted(result.items())
@property
def module_documenters(self) -> Sequence["ModuleDocumenter"]:
return self._module_documenters
@property
def package_name(self) -> str:
if "." in self.package_path:
return self._package_path.rpartition(".")[-1]
return self._package_path
@property
def package_path(self) -> str:
return self._package_path
@property
def reference_name(self) -> str:
return self.package_path.replace("_", "-").replace(".", "--")
```
#### File: uqbar/apis/RootDocumenter.py
```python
import pathlib
class RootDocumenter:
"""
A basic root documenter.
This documenter generates only one reStructuredText document: the root API
index page, which contains information about all of the modules traversed
by an :py:class:`~uqbar.apis.APIBuilder`.
Output is a basic `toctree` directive.
::
>>> import uqbar.apis
>>> documenter = uqbar.apis.RootDocumenter(
... module_documenters=[
... uqbar.apis.ModuleDocumenter('uqbar.io'),
... uqbar.apis.ModuleDocumenter('uqbar.strings'),
... ],
... )
>>> print(str(documenter))
API
===
<BLANKLINE>
.. toctree::
<BLANKLINE>
uqbar/io/index
uqbar/strings
<BLANKLINE>
.. tip::
Subclass :py:class:`~uqbar.apis.RootDocumenter` to
implement your own custom module documentation output.
You'll need to provide your desired reStructuredText output
via an overridden
:py:meth:`~uqbar.apis.RootDocumenter.RootDocumenter.__str__`
implementation.
See :py:class:`~uqbar.apis.SummarizingRootDocumenter` for an example.
:param module_documenters: a list of of documenters for modules and
packages of the root documenter; these are generated by an
:py:class:`~uqbar.apis.APIBuilder` instance rather than the module
documenter directly
"""
### CLASS VARIABLES ###
__documentation_section__ = "Documenters"
### INITIALIZER ###
def __init__(self, module_documenters=None, title="API"):
import uqbar.apis
if module_documenters is not None:
assert all(
isinstance(_, uqbar.apis.ModuleDocumenter) for _ in module_documenters
), module_documenters
module_documenters = tuple(module_documenters)
self._module_documenters = module_documenters or ()
self._title = title
### SPECIAL METHODS ###
def __str__(self):
result = [self.title, "=" * len(self.title), ""]
if self.module_documenters:
result.extend([".. toctree::", ""])
for module_documenter in self.module_documenters:
path = module_documenter.package_path.replace(".", "/")
if module_documenter.is_package:
path = "{}/index".format(path)
result.append(" {}".format(path))
result.append("")
return "\n".join(result)
### PUBLIC PROPERTIES ###
@property
def documentation_path(self):
return pathlib.Path("index.rst")
@property
def module_documenters(self):
return self._module_documenters
@property
def title(self):
return self._title
```
#### File: uqbar/apis/SummarizingClassDocumenter.py
```python
import enum
import inspect
from typing import List, Tuple
from uqbar.apis.ClassDocumenter import ClassDocumenter
class SummarizingClassDocumenter(ClassDocumenter):
"""
A summarizing class documenter.
Organizes class members by category, separated by category title and
horizontal rule.
Categories include:
- Special methods
- Methods
- Class and static methods
- Read/write properties
- Read-only properties
::
>>> import uqbar.apis
>>> path = 'uqbar.apis.SummarizingClassDocumenter.SummarizingClassDocumenter'
>>> documenter = uqbar.apis.SummarizingClassDocumenter(path)
>>> documentation = str(documenter)
>>> print(documentation)
.. autoclass:: SummarizingClassDocumenter
<BLANKLINE>
.. raw:: html
<BLANKLINE>
<hr/>
<BLANKLINE>
.. rubric:: Attributes Summary
:class: class-header
<BLANKLINE>
.. autosummary::
:nosignatures:
<BLANKLINE>
__str__
ignored_special_methods
<BLANKLINE>
.. raw:: html
<BLANKLINE>
<hr/>
<BLANKLINE>
.. rubric:: Special methods
:class: class-header
<BLANKLINE>
.. automethod:: SummarizingClassDocumenter.__str__
<BLANKLINE>
.. raw:: html
<BLANKLINE>
<hr/>
<BLANKLINE>
.. rubric:: Class & static methods
:class: class-header
<BLANKLINE>
.. container:: inherited
<BLANKLINE>
.. automethod:: SummarizingClassDocumenter.validate_client
<BLANKLINE>
.. raw:: html
<BLANKLINE>
<hr/>
<BLANKLINE>
.. rubric:: Read-only properties
:class: class-header
<BLANKLINE>
.. container:: inherited
<BLANKLINE>
.. autoattribute:: SummarizingClassDocumenter.client
<BLANKLINE>
.. container:: inherited
<BLANKLINE>
.. autoattribute:: SummarizingClassDocumenter.documentation_section
<BLANKLINE>
.. container:: inherited
<BLANKLINE>
.. autoattribute:: SummarizingClassDocumenter.package_path
:param package_path: the module path and name of the member to document
"""
### CLASS VARIABLES ###
__documentation_section__ = "Documenters"
ignored_special_methods: Tuple[str, ...] = (
"__dict__",
"__getattribute__",
"__getnewargs__",
"__getstate__",
"__init__",
"__reduce__",
"__reduce_ex__",
"__setstate__",
"__sizeof__",
"__subclasshook__",
"fromkeys",
"pipe_cloexec",
)
### SPECIAL METHODS ###
def __str__(self) -> str:
name = getattr(self.client, "__name__")
if issubclass(self.client, Exception): # type: ignore
return ".. autoexception:: {}".format(name)
attributes = self._classify_class_attributes()
(
class_methods,
data,
methods,
readonly_properties,
readwrite_properties,
special_methods,
static_methods,
) = attributes
result = [".. autoclass:: {}".format(name)]
if issubclass(self.client, enum.Enum): # type: ignore
result.extend([" :members:", " :undoc-members:"])
else:
result.extend(self._build_member_autosummary(attributes))
result.extend(
self._build_attribute_section(
special_methods, "automethod", "Special methods"
)
)
result.extend(self._build_attribute_section(methods, "automethod", "Methods"))
result.extend(
self._build_attribute_section(
sorted(class_methods + static_methods, key=lambda x: x.name),
"automethod",
"Class & static methods",
)
)
result.extend(
self._build_attribute_section(
readwrite_properties, "autoattribute", "Read/write properties"
)
)
result.extend(
self._build_attribute_section(
readonly_properties, "autoattribute", "Read-only properties"
)
)
return "\n".join(result)
### PRIVATE METHODS ###
def _build_attribute_section(
self, attributes, directive: str, title: str
) -> List[str]:
result: List[str] = []
if not attributes:
return result
result.extend(
[
"",
" .. raw:: html",
"",
" <hr/>",
"",
" .. rubric:: {}".format(title),
" :class: class-header",
]
)
for attribute in attributes:
result.append("")
autodoc_directive = " .. {}:: {}.{}".format(
directive, getattr(self.client, "__name__"), attribute.name
)
if attribute.defining_class is self.client:
result.append(autodoc_directive)
else:
result.append(" .. container:: inherited")
result.append("")
result.append(" {}".format(autodoc_directive))
return result
def _build_member_autosummary(self, attributes) -> List[str]:
result: List[str] = []
all_attributes: List[inspect.Attribute] = []
for attribute_section in attributes:
all_attributes.extend(
attribute
for attribute in attribute_section
if attribute.defining_class is self.client
)
all_attributes.sort(key=lambda x: x.name)
if not all_attributes:
return result
result.extend(
[
"",
" .. raw:: html",
"",
" <hr/>",
"",
" .. rubric:: {}".format("Attributes Summary"),
" :class: class-header",
"",
" .. autosummary::",
" :nosignatures:",
"",
]
)
for attribute in all_attributes:
result.append(" {}".format(attribute.name))
return result
def _classify_class_attributes(self):
class_methods = []
data = []
methods = []
readonly_properties = []
readwrite_properties = []
special_methods = []
static_methods = []
attrs = inspect.classify_class_attrs(self.client)
for attr in attrs:
if attr.defining_class is object:
continue
elif (
getattr(self.client, "__documentation_ignore_inherited__", None)
and attr.defining_class is not self.client
):
continue
# Handle un-gettable attrs like Flask-SQLAlchemy's Model's `query`
try:
getattr(self.client, attr.name)
except Exception:
continue
if attr.kind == "method":
if attr.name not in self.ignored_special_methods:
if attr.name.startswith("__"):
special_methods.append(attr)
elif not attr.name.startswith("_"):
methods.append(attr)
elif attr.kind == "class method":
if attr.name not in self.ignored_special_methods:
if attr.name.startswith("__"):
special_methods.append(attr)
elif not attr.name.startswith("_"):
class_methods.append(attr)
elif attr.kind == "static method":
if attr.name not in self.ignored_special_methods:
if attr.name.startswith("__"):
special_methods.append(attr)
elif not attr.name.startswith("_"):
static_methods.append(attr)
elif attr.kind == "property" and not attr.name.startswith("_"):
if attr.object.fset is None:
readonly_properties.append(attr)
else:
readwrite_properties.append(attr)
elif (
attr.kind == "data"
and not attr.name.startswith("_")
and attr.name not in getattr(self.client, "__slots__", ())
):
data.append(attr)
class_methods = tuple(sorted(class_methods))
data = tuple(sorted(data))
methods = tuple(sorted(methods))
readonly_properties = tuple(sorted(readonly_properties))
readwrite_properties = tuple(sorted(readwrite_properties))
special_methods = tuple(sorted(special_methods))
static_methods = tuple(sorted(static_methods))
result = (
class_methods,
data,
methods,
readonly_properties,
readwrite_properties,
special_methods,
static_methods,
)
return result
```
#### File: uqbar/containers/UniqueTreeList.py
```python
from .UniqueTreeContainer import UniqueTreeContainer
class UniqueTreeList(UniqueTreeContainer):
"""
A list-like node in a "unique" tree.
List nodes may contain zero or more other nodes.
Unique tree nodes may have at most one parent and may appear only once in
the tree.
"""
### INITIALIZER ###
def __init__(self, children=None, name=None):
super().__init__(name=name)
self._children = []
if children is not None:
self[:] = children
### SPECIAL METHODS ###
def __delitem__(self, i):
if isinstance(i, str):
children = tuple(self._named_children[i])
for child in children:
parent = child.parent
del parent[parent.index(child)]
return
if isinstance(i, int):
if i < 0:
i = len(self) + i
i = slice(i, i + 1)
self.__setitem__(i, [])
self._mark_entire_tree_for_later_update()
def __getitem__(self, expr):
if isinstance(expr, (int, slice)):
return self._children[expr]
elif isinstance(expr, str):
result = sorted(self._named_children[expr], key=lambda x: x.graph_order)
if len(result) == 1:
return result[0]
return result
raise ValueError(expr)
def __setitem__(self, i, new_items):
if isinstance(i, int):
new_items = self._prepare_setitem_single(new_items)
start_index, stop_index, _ = slice(i, i + 1).indices(len(self))
else:
new_items = self._prepare_setitem_multiple(new_items)
start_index, stop_index, _ = i.indices(len(self))
old_items = self[start_index:stop_index]
self._validate(new_items, old_items, start_index, stop_index)
self._set_items(new_items, old_items, start_index, stop_index)
self._mark_entire_tree_for_later_update()
### PRIVATE METHODS ###
def _prepare_setitem_multiple(self, expr):
return list(expr)
def _prepare_setitem_single(self, expr):
return [expr]
def _set_items(self, new_items, old_items, start_index, stop_index):
for old_item in old_items:
old_item._set_parent(None)
for new_item in new_items:
new_item._set_parent(self)
self._children.__setitem__(slice(start_index, start_index), new_items)
def _validate(self, new_nodes, old_nodes, start_index, stop_index):
parentage = self.parentage
for new_node in new_nodes:
if not isinstance(new_node, self._node_class):
raise ValueError(f"Expected {self._node_class}, got {type(new_node)}")
elif new_node in parentage:
raise ValueError("Cannot set parent node as child.")
### PUBLIC METHODS ###
def append(self, expr):
self.__setitem__(slice(len(self), len(self)), [expr])
def extend(self, expr):
self.__setitem__(slice(len(self), len(self)), expr)
def index(self, expr):
for i, child in enumerate(self._children):
if child is expr:
return i
message = "{!r} not in {!r}."
message = message.format(expr, self)
raise ValueError(message)
def insert(self, i, expr):
self.__setitem__(slice(i, i), [expr])
def pop(self, i=-1):
node = self[i]
del self[i]
return node
def remove(self, node):
i = self.index(node)
del self[i]
```
#### File: uqbar/ext/ipython.py
```python
from xml.dom import minidom # type: ignore
from uqbar.graphs import Grapher
def load_ipython_extension(ipython):
patch_grapher()
def patch_grapher():
def get_format(self):
return "svg"
def open_output_path(self, output_path):
from IPython.core.display import display_svg
with output_path.open() as file_pointer:
contents = file_pointer.read()
delete_attributes = True
document = minidom.parseString(contents)
svg_element = document.getElementsByTagName("svg")[0]
view_box = svg_element.getAttribute("viewBox")
view_box = [float(_) for _ in view_box.split()]
if delete_attributes:
if svg_element.attributes.get("height", None):
del svg_element.attributes["height"]
if svg_element.attributes.get("width", None):
del svg_element.attributes["width"]
else:
height = "{}pt".format(int(view_box[-1] * 0.6))
width = "{}pt".format(int(view_box[-2] * 0.6))
svg_element.setAttribute("height", height)
svg_element.setAttribute("width", width)
svg_element.setAttribute("preserveAspectRatio", "xMinYMin")
contents = document.toprettyxml()
display_svg(contents, raw=True)
Grapher.get_format = get_format
Grapher.open_output_path = open_output_path
```
#### File: uqbar/graphs/Edge.py
```python
from typing import Mapping, Optional, Tuple, Union
import uqbar.graphs # noqa
from uqbar.graphs.Attributes import Attributes
class Edge(object):
"""
A Graphviz edge.
"""
### CLASS VARIABLES ###
__documentation_section__ = "Core Classes"
### INITIALIZER ###
def __init__(
self,
attributes: Union[Mapping[str, object], Attributes] = None,
is_directed: bool = True,
head_port_position: str = None,
tail_port_position: str = None,
) -> None:
from .Attachable import Attachable
from .Node import Node
self._attributes = Attributes("edge", **(attributes or {}))
self._head: Optional[Union[Node, Attachable]] = None
self._head_port_position = head_port_position
self._is_directed = bool(is_directed)
self._tail: Optional[Union[Node, Attachable]] = None
self._tail_port_position = tail_port_position
### SPECIAL METHODS ###
def __format__(self, format_spec=None) -> str:
# TODO: make the format specification options machine-readable
if format_spec == "graphviz":
return self.__format_graphviz__()
return str(self)
def __format_graphviz__(self) -> str:
from .Attachable import Attachable
from .Node import Node
connection = "->"
if not self.is_directed:
connection = "--"
tail_parts = []
tail_node = None
tail_port_name = None
if isinstance(self.tail, Node):
tail_node = self.tail
elif isinstance(self.tail, Attachable):
tail_node = self.tail._get_node()
tail_port_name = self.tail._get_port_name()
if tail_node is None:
raise ValueError
tail_parts.append(tail_node._get_canonical_name())
if tail_port_name:
tail_parts.append(tail_port_name)
if self.tail_port_position:
tail_parts.append(self.tail_port_position)
tail_name = ":".join(Attributes._format_value(part) for part in tail_parts)
head_parts = []
head_node = None
head_port_name = None
if isinstance(self.head, Node):
head_node = self.head
elif isinstance(self.head, Attachable):
head_node = self.head._get_node()
head_port_name = self.head._get_port_name()
if head_node is None:
raise ValueError
head_parts.append(head_node._get_canonical_name())
if head_port_name:
head_parts.append(head_port_name)
if self.head_port_position:
head_parts.append(self.head_port_position)
head_name = ":".join(Attributes._format_value(part) for part in head_parts)
edge_definition = "{} {} {}".format(tail_name, connection, head_name)
result = [edge_definition]
if len(self.attributes):
attributes = format(self.attributes, "graphviz").split("\n")
result[0] = "{} {}".format(result[0], attributes[0])
result.extend(attributes[1:])
else:
result[-1] += ";"
return "\n".join(result)
### PRIVATE METHODS ###
def _get_highest_parent(self) -> "uqbar.graphs.Graph":
if self.tail is None:
raise ValueError(self.tail)
elif self.head is None:
raise ValueError(self.head)
highest_parent: Optional[uqbar.graphs.Graph] = None
tail_parentage = list(self.tail.parentage[1:])
head_parentage = list(self.head.parentage[1:])
while (
len(tail_parentage)
and len(head_parentage)
and tail_parentage[-1] is head_parentage[-1]
):
highest_parent = tail_parentage[-1]
tail_parentage.pop()
head_parentage.pop()
if highest_parent is None:
message = "highest parent can not be none."
raise Exception(message)
return highest_parent
### PUBLIC METHODS ###
def attach(
self,
tail: Union["uqbar.graphs.Node", "uqbar.graphs.Attachable"],
head: Union["uqbar.graphs.Node", "uqbar.graphs.Attachable"],
) -> "Edge":
from .Attachable import Attachable
from .Node import Node
prototype = (Node, Attachable)
assert isinstance(tail, prototype)
assert isinstance(head, prototype)
self.detach()
tail._edges.add(self)
head._edges.add(self)
self._tail = tail
self._head = head
return self
def detach(self) -> "Edge":
if self.tail is not None:
self.tail._edges.remove(self)
self._tail = None
if self.head is not None:
self.head._edges.remove(self)
self._head = None
return self
### PUBLIC PROPERTIES ###
@property
def attributes(self) -> Attributes:
return self._attributes
@property
def head(self) -> Optional[Union["uqbar.graphs.Node", "uqbar.graphs.Attachable"]]:
return self._head
@property
def head_graph_order(self) -> Tuple[int, ...]:
if self.head is None:
return ()
return self.head.graph_order
@property
def head_port_position(self) -> Optional[str]:
return self._head_port_position
@property
def is_directed(self) -> bool:
return self._is_directed
@property
def tail(self) -> Optional[Union["uqbar.graphs.Node", "uqbar.graphs.Attachable"]]:
return self._tail
@property
def tail_graph_order(self) -> Tuple[int, ...]:
if self.tail is None:
return ()
return self.tail.graph_order
@property
def tail_port_position(self) -> Optional[str]:
return self._tail_port_position
```
#### File: uqbar/graphs/Graph.py
```python
from typing import Dict, List, Mapping, Set, Tuple, Union # noqa
from uqbar.containers import UniqueTreeList
from .Attributes import Attributes
from .Edge import Edge # noqa
from .Node import Node # noqa
class Graph(UniqueTreeList):
"""
A Graphviz graph, subgraph or cluster.
"""
### CLASS VARIABLES ###
__documentation_section__ = "Core Classes"
### INITIALIZER ###
def __init__(
self,
children=None,
*,
attributes: Union[Mapping[str, object], Attributes] = None,
edge_attributes: Union[Mapping[str, object], Attributes] = None,
is_cluster: bool = False,
is_digraph: bool = True,
name: str = None,
node_attributes: Union[Mapping[str, object], Attributes] = None,
) -> None:
UniqueTreeList.__init__(self, name=name, children=children)
self._attributes = Attributes(
"cluster" if is_cluster else "graph", **(attributes or {})
)
self._edge_attributes = Attributes("edge", **(edge_attributes or {}))
self._node_attributes = Attributes("node", **(node_attributes or {}))
self._is_cluster = bool(is_cluster)
self._is_digraph = bool(is_digraph)
### SPECIAL METHODS ###
def __format__(self, format_spec: str = None) -> str:
# TODO: make the format specification options machine-readable
if format_spec == "graphviz":
return self.__format_graphviz__()
return str(self)
def __format_graphviz__(self) -> str:
def recurse(graph):
indent = " "
result = []
if not graph.parent:
name = graph.name or "G"
if graph.is_digraph:
string = "digraph {} {{".format(Attributes._format_value(name))
else:
string = "graph {} {{".format(Attributes._format_value(name))
else:
if graph.name is not None:
name = graph.name
if graph.is_cluster:
name = "cluster_{}".format(name)
else:
name = graph._get_canonical_name()
string = "subgraph {} {{".format(Attributes._format_value(name))
result.append(string)
if graph.attributes:
attributes = "graph {}".format(
format(graph.attributes, "graphviz")
).split("\n")
result.extend(indent + line for line in attributes)
if graph.node_attributes:
attributes = "node {}".format(
format(graph.node_attributes, "graphviz")
).split("\n")
result.extend(indent + line for line in attributes)
if graph.edge_attributes:
attributes = "edge {}".format(
format(graph.edge_attributes, "graphviz")
).split("\n")
result.extend(indent + line for line in attributes)
for child in graph:
if isinstance(child, type(self)):
lines = (indent + line for line in recurse(child))
else:
lines = (
indent + line for line in format(child, "graphviz").split("\n")
)
result.extend(lines)
if graph in edge_parents:
for edge in edge_parents[graph]:
lines = format(edge, "graphviz").split("\n")
result.extend(indent + line for line in lines)
result.append("}")
return result
all_edges: Set[Edge] = set()
for child in self.depth_first():
for edge in getattr(child, "edges", ()):
if edge.tail.root is not edge.head.root:
continue
all_edges.add(edge)
edge_parents: Dict[Graph, List[Edge]] = {}
for edge in sorted(
all_edges, key=lambda edge: (edge.tail_graph_order, edge.head_graph_order)
):
highest_parent = edge._get_highest_parent()
edge_parents.setdefault(highest_parent, []).append(edge)
return "\n".join(recurse(self))
### PRIVATE METHODS ###
def _get_canonical_name(self) -> str:
name_prefix = "graph"
if self.is_cluster:
name_prefix = "cluster"
if self.name is not None:
name = self.name
root = self.root
if root:
instances = root[self.name]
if not isinstance(instances, type(self)):
name = "{}_{}".format(name, instances.index(self))
suffix = name
elif self.graph_order:
suffix = "_".join(str(x) for x in self.graph_order)
else:
suffix = "0"
return "{}_{}".format(name_prefix, suffix)
### PRIVATE PROPERTIES ###
@property
def _node_class(self) -> Tuple[type, ...]:
import uqbar.graphs
return (uqbar.graphs.Graph, uqbar.graphs.Node)
### PUBLIC PROPERTIES ###
@property
def attributes(self) -> Attributes:
return self._attributes
@property
def edge_attributes(self) -> Attributes:
return self._edge_attributes
@property
def is_cluster(self) -> bool:
return self._is_cluster
@property
def is_digraph(self) -> bool:
return self._is_digraph
@property
def node_attributes(self) -> Attributes:
return self._node_attributes
```
#### File: uqbar/graphs/RecordField.py
```python
from typing import Optional
import uqbar.graphs # noqa
from uqbar.containers import UniqueTreeNode
from .Attachable import Attachable
class RecordField(Attachable, UniqueTreeNode):
"""
A Graphviz record field.
::
>>> import uqbar.graphs
>>> field = uqbar.graphs.RecordField(label='My Label')
>>> print(format(field, 'graphviz'))
<f_0> My Label
Port names are generated based on the graph order of the field within its
parent node:
::
>>> other_field = uqbar.graphs.RecordField(label='Other Label')
>>> group = uqbar.graphs.RecordGroup()
>>> node = uqbar.graphs.Node(attributes=dict(shape='Mrecord'))
>>> cluster = uqbar.graphs.Graph(is_cluster=True)
>>> graph = uqbar.graphs.Graph()
::
>>> graph.append(cluster)
>>> cluster.append(node)
>>> node.append(group)
>>> group.extend([other_field, field])
>>> print(format(field, 'graphviz'))
<f_0_1> My Label
If a node contains record fields or groups, their format contributions
override any string label in the node's attributes:
::
>>> print(format(node, 'graphviz'))
node_0_0 [label="{ <f_0_0> Other Label | <f_0_1> My Label }",
shape=Mrecord];
::
>>> node.attributes['label'] = 'Foo'
>>> print(format(node, 'graphviz'))
node_0_0 [label="{ <f_0_0> Other Label | <f_0_1> My Label }",
shape=Mrecord];
::
>>> node[:] = []
>>> print(format(node, 'graphviz'))
node_0_0 [label=Foo,
shape=Mrecord];
"""
### CLASS VARIABLES ###
__documentation_section__ = "Record Field Classes"
### INITIALIZER ###
def __init__(self, label: str = None, *, name: str = None) -> None:
UniqueTreeNode.__init__(self, name=name)
Attachable.__init__(self)
if label is not None:
label = str(label)
self._label = label
### SPECIAL METHODS ###
def __format__(self, format_spec: str = None) -> str:
# TODO: make the format specification options machine-readable
if format_spec == "graphviz":
return self.__format_graphviz__()
return str(self)
def __format_graphviz__(self) -> str:
result = "<{}>".format(self._get_port_name())
if self.label:
result = "{} {}".format(result, self.label)
return result
### PUBLIC PROPERTIES ###
@property
def label(self) -> Optional[str]:
return self._label
```
#### File: uqbar/graphs/RecordGroup.py
```python
from typing import Tuple # noqa
from uqbar.containers import UniqueTreeList
class RecordGroup(UniqueTreeList):
"""
A Graphviz record field group.
::
>>> import uqbar.graphs
>>> group = uqbar.graphs.RecordGroup()
>>> group.extend([
... uqbar.graphs.RecordField(),
... uqbar.graphs.RecordGroup([
... uqbar.graphs.RecordField(),
... uqbar.graphs.RecordField(),
... ]),
... uqbar.graphs.RecordField(),
... ])
>>> print(format(group, 'graphviz'))
{ <f_0> | { <f_0> | <f_0> } | <f_0> }
"""
### CLASS VARIABLES ###
__documentation_section__ = "Record Field Classes"
### INITIALIZER ###
def __init__(self, children=None, *, name: str = None) -> None:
UniqueTreeList.__init__(self, name=name, children=children)
### SPECIAL METHODS ###
def __format__(self, format_spec=None) -> str:
# TODO: make the format specification options machine-readable
if format_spec == "graphviz":
return self.__format_graphviz__()
return str(self)
def __format_graphviz__(self) -> str:
result = " | ".join(_ for _ in (format(_, "graphviz") for _ in self) if _)
if result:
result = "{{ {} }}".format(result)
return result
### PRIVATE PROPERTIES ###
@property
def _node_class(self) -> Tuple[type, ...]:
import uqbar.graphs
return (uqbar.graphs.RecordField, uqbar.graphs.RecordGroup)
```
#### File: uqbar/io/__init__.py
```python
import collections
import os
import pathlib
from typing import Generator, List, Optional, Sequence, Tuple, Union
from .DirectoryChange import DirectoryChange # noqa
from .Profiler import Profiler # noqa
from .RedirectedStreams import RedirectedStreams # noqa
from .Timer import Timer # noqa
def find_common_prefix(
paths: Sequence[Union[str, pathlib.Path]]
) -> Optional[pathlib.Path]:
"""
Find the common prefix of two or more paths.
::
>>> import pathlib
>>> one = pathlib.Path('foo/bar/baz')
>>> two = pathlib.Path('foo/quux/biz')
>>> three = pathlib.Path('foo/quux/wuux')
::
>>> import uqbar.io
>>> str(uqbar.io.find_common_prefix([one, two, three]))
'foo'
:param paths: paths to inspect
"""
counter: collections.Counter = collections.Counter()
for path in paths:
path = pathlib.Path(path)
counter.update([path])
counter.update(path.parents)
valid_paths = sorted(
[path for path, count in counter.items() if count >= len(paths)],
key=lambda x: len(x.parts),
)
if valid_paths:
return valid_paths[-1]
return None
def find_executable(name: str, flags=os.X_OK) -> List[str]:
"""
Finds executable `name`.
Similar to Unix ``which`` command.
Returns list of zero or more full paths to `name`.
"""
result = []
extensions = [x for x in os.environ.get("PATHEXT", "").split(os.pathsep) if x]
path = os.environ.get("PATH", None)
if path is None:
return []
for path in os.environ.get("PATH", "").split(os.pathsep):
path = os.path.join(path, name)
if os.access(path, flags):
result.append(path)
for extension in extensions:
path_extension = path + extension
if os.access(path_extension, flags):
result.append(path_extension)
return result
def relative_to(
source_path: Union[str, pathlib.Path], target_path: Union[str, pathlib.Path]
) -> pathlib.Path:
"""
Generates relative path from ``source_path`` to ``target_path``.
Handles the case of paths without a common prefix.
::
>>> import pathlib
>>> source = pathlib.Path('foo/bar/baz')
>>> target = pathlib.Path('foo/quux/biz')
::
>>> target.relative_to(source)
Traceback (most recent call last):
...
ValueError: 'foo/quux/biz' does not start with 'foo/bar/baz'
::
>>> import uqbar.io
>>> str(uqbar.io.relative_to(source, target))
'../../quux/biz'
:param source_path: the source path
:param target_path: the target path
"""
source_path = pathlib.Path(source_path).absolute()
if source_path.is_file():
source_path = source_path.parent
target_path = pathlib.Path(target_path).absolute()
common_prefix = find_common_prefix([source_path, target_path])
if not common_prefix:
raise ValueError("No common prefix")
source_path = source_path.relative_to(common_prefix)
target_path = target_path.relative_to(common_prefix)
result = pathlib.Path(*[".."] * len(source_path.parts))
return result / target_path
def walk(
root_path: Union[str, pathlib.Path], top_down: bool = True
) -> Generator[
Tuple[pathlib.Path, Sequence[pathlib.Path], Sequence[pathlib.Path]], None, None
]:
"""
Walks a directory tree.
Like :py:func:`os.walk` but yielding instances of :py:class:`pathlib.Path`
instead of strings.
:param root_path: foo
:param top_down: bar
"""
root_path = pathlib.Path(root_path)
directory_paths, file_paths = [], []
for path in sorted(root_path.iterdir()):
if path.is_dir():
directory_paths.append(path)
else:
file_paths.append(path)
if top_down:
yield root_path, directory_paths, file_paths
for directory_path in directory_paths:
yield from walk(directory_path, top_down=top_down)
if not top_down:
yield root_path, directory_paths, file_paths
def write(
contents: str,
path: Union[str, pathlib.Path],
verbose: bool = False,
logger_func=None,
) -> bool:
"""
Writes ``contents`` to ``path``.
Checks if ``path`` already exists and only write out new contents if the
old contents do not match.
Creates any intermediate missing directories.
:param contents: the file contents to write
:param path: the path to write to
:param verbose: whether to print output
"""
print_func = logger_func or print
path = pathlib.Path(path)
if path.exists():
with path.open("r") as file_pointer:
old_contents = file_pointer.read()
if old_contents == contents:
if verbose:
print_func("preserved {}".format(path))
return False
else:
with path.open("w") as file_pointer:
file_pointer.write(contents)
if verbose:
print_func("rewrote {}".format(path))
return True
elif not path.exists():
if not path.parent.exists():
path.parent.mkdir(parents=True)
with path.open("w") as file_pointer:
file_pointer.write(contents)
if verbose:
print_func("wrote {}".format(path))
return True
```
#### File: uqbar/uqbar/objects.py
```python
import collections
import inspect
def _dispatch_formatting(expr):
if isinstance(expr, (list, tuple)):
return _get_sequence_repr(expr)
return repr(expr)
def _get_object_signature(expr):
expr = type(expr)
# print('E-I-ID', id(expr.__init__))
# print('E-N-ID', id(expr.__new__))
# print('o-I-ID', id(object.__init__))
# print('o-N-ID', id(object.__new__))
# print('IEQ?', expr.__init__ == object.__init__)
# print('NEQ?', expr.__new__ == object.__new__)
# attrs = {_.name: _ for _ in inspect.classify_class_attrs(expr)}
# print('I?', attrs['__init__'])
# print('N?', attrs['__new__'])
if expr.__new__ is not object.__new__:
return inspect.signature(expr.__new__)
if expr.__init__ is not object.__init__:
return inspect.signature(expr.__init__)
return None
def _get_sequence_repr(expr):
prototype = (bool, int, float, str, type(None))
if all(isinstance(x, prototype) for x in expr):
result = repr(expr)
if len(result) < 50:
return result
if isinstance(expr, list):
braces = "[", "]"
else:
braces = "(", ")"
result = [braces[0]]
for x in expr:
for line in repr(x).splitlines():
result.append(" " + line)
result[-1] += ","
result.append(braces[-1])
return "\n".join(result)
def compare_objects(object_one, object_two, coerce=False):
if coerce:
try:
object_two = type(object_one)(object_two)
except (ValueError, TypeError):
return False
object_one_values = type(object_one), get_vars(object_one)
try:
object_two_values = type(object_two), get_vars(object_two)
except AttributeError:
object_two_values = type(object_two), object_two
return object_one_values == object_two_values
def get_hash(expr):
args, var_args, kwargs = get_vars(expr)
hash_values = [type(expr)]
for key, value in args.items():
if isinstance(value, list):
value = tuple(value)
elif isinstance(value, set):
value = frozenset(value)
elif isinstance(value, dict):
value = tuple(sorted(value.items()))
args[key] = value
hash_values.append(tuple(args.items()))
hash_values.append(tuple(var_args))
for key, value in kwargs.items():
if isinstance(value, list):
value = tuple(value)
elif isinstance(value, set):
value = frozenset(value)
elif isinstance(value, dict):
value = tuple(sorted(value.items()))
kwargs[key] = value
hash_values.append(tuple(sorted(kwargs.items())))
return hash(tuple(hash_values))
def get_repr(expr, multiline=None, suppress_defaults=True):
"""
Build a repr string for ``expr`` from its vars and signature.
::
>>> class MyObject:
... def __init__(self, arg1, arg2, *var_args, foo=None, bar=None, **kwargs):
... self.arg1 = arg1
... self.arg2 = arg2
... self.var_args = var_args
... self.foo = foo
... self.bar = bar
... self.kwargs = kwargs
...
>>> my_object = MyObject('a', 'b', 'c', 'd', foo='x', quux=['y', 'z'])
::
>>> import uqbar
>>> print(uqbar.objects.get_repr(my_object))
MyObject(
'a',
'b',
'c',
'd',
foo='x',
quux=['y', 'z'],
)
"""
signature = _get_object_signature(expr)
if signature is None:
return "{}()".format(type(expr).__name__)
defaults = {}
for name, parameter in signature.parameters.items():
if parameter.default is not inspect._empty:
defaults[name] = parameter.default
args, var_args, kwargs = get_vars(expr)
args_parts = collections.OrderedDict()
var_args_parts = []
kwargs_parts = {}
has_lines = bool(multiline)
parts = []
# Format keyword-optional arguments.
# print(type(expr), args)
for i, (key, value) in enumerate(args.items()):
arg_repr = _dispatch_formatting(value)
if "\n" in arg_repr:
has_lines = True
args_parts[key] = arg_repr
# Format *args
for arg in var_args:
arg_repr = _dispatch_formatting(arg)
if "\n" in arg_repr:
has_lines = True
var_args_parts.append(arg_repr)
# Format **kwargs
for key, value in sorted(kwargs.items()):
if suppress_defaults and key in defaults and value == defaults[key]:
continue
value = _dispatch_formatting(value)
arg_repr = "{}={}".format(key, value)
has_lines = True
kwargs_parts[key] = arg_repr
for _, part in args_parts.items():
parts.append(part)
parts.extend(var_args_parts)
for _, part in sorted(kwargs_parts.items()):
parts.append(part)
# If we should format on multiple lines, add the appropriate formatting.
if has_lines and parts and multiline is not False:
for i, part in enumerate(parts):
parts[i] = "\n".join(" " + line for line in part.split("\n"))
parts.append(")")
parts = ",\n".join(parts)
return "{}(\n{}".format(type(expr).__name__, parts)
parts = ", ".join(parts)
return "{}({})".format(type(expr).__name__, parts)
def get_vars(expr):
"""
Get ``args``, ``var args`` and ``kwargs`` for an object ``expr``.
::
>>> class MyObject:
... def __init__(self, arg1, arg2, *var_args, foo=None, bar=None, **kwargs):
... self.arg1 = arg1
... self.arg2 = arg2
... self.var_args = var_args
... self.foo = foo
... self.bar = bar
... self.kwargs = kwargs
...
>>> my_object = MyObject('a', 'b', 'c', 'd', foo='x', quux=['y', 'z'])
::
>>> import uqbar
>>> args, var_args, kwargs = uqbar.objects.get_vars(my_object)
::
>>> args
OrderedDict([('arg1', 'a'), ('arg2', 'b')])
::
>>> var_args
['c', 'd']
::
>>> kwargs
{'foo': 'x', 'quux': ['y', 'z']}
"""
# print('TYPE?', type(expr))
signature = _get_object_signature(expr)
if signature is None:
return ({}, [], {})
# print('SIG?', signature)
args = collections.OrderedDict()
var_args = []
kwargs = {}
if expr is None:
return args, var_args, kwargs
for i, (name, parameter) in enumerate(signature.parameters.items()):
# print(' ', parameter)
if i == 0 and name in ("self", "cls", "class_", "klass"):
continue
if parameter.kind is inspect._POSITIONAL_ONLY:
try:
args[name] = getattr(expr, name)
except AttributeError:
args[name] = expr[name]
elif (
parameter.kind is inspect._POSITIONAL_OR_KEYWORD
or parameter.kind is inspect._KEYWORD_ONLY
):
found = False
for x in (name, "_" + name):
try:
value = getattr(expr, x)
found = True
break
except AttributeError:
try:
value = expr[x]
found = True
break
except (KeyError, TypeError):
pass
if not found:
raise ValueError("Cannot find value for {!r}".format(name))
if parameter.default is inspect._empty:
args[name] = value
elif parameter.default != value:
kwargs[name] = value
else:
kwargs[name] = parameter.default
elif parameter.kind is inspect._VAR_POSITIONAL:
value = None
try:
value = expr[:]
except TypeError:
value = getattr(expr, name)
if value:
var_args.extend(value)
elif parameter.kind is inspect._VAR_KEYWORD:
items = {}
if hasattr(expr, "items"):
items = expr.items()
elif hasattr(expr, name):
mapping = getattr(expr, name)
if not isinstance(mapping, dict):
mapping = dict(mapping)
items = mapping.items()
elif hasattr(expr, "_" + name):
mapping = getattr(expr, "_" + name)
if not isinstance(mapping, dict):
mapping = dict(mapping)
items = mapping.items()
for key, value in items:
if key not in args:
kwargs[key] = value
return args, var_args, kwargs
def new(expr, *args, **kwargs):
"""
Template an object.
::
>>> class MyObject:
... def __init__(self, arg1, arg2, *var_args, foo=None, bar=None, **kwargs):
... self.arg1 = arg1
... self.arg2 = arg2
... self.var_args = var_args
... self.foo = foo
... self.bar = bar
... self.kwargs = kwargs
...
>>> my_object = MyObject('a', 'b', 'c', 'd', foo='x', quux=['y', 'z'])
::
>>> import uqbar
>>> new_object = uqbar.objects.new(my_object, foo=666, bar=1234)
>>> print(uqbar.objects.get_repr(new_object))
MyObject(
'a',
'b',
'c',
'd',
bar=1234,
foo=666,
quux=['y', 'z'],
)
Original object is unchanged:
::
>>> print(uqbar.objects.get_repr(my_object))
MyObject(
'a',
'b',
'c',
'd',
foo='x',
quux=['y', 'z'],
)
"""
# TODO: Clarify old vs. new variable naming here.
current_args, current_var_args, current_kwargs = get_vars(expr)
new_kwargs = current_kwargs.copy()
recursive_arguments = {}
for key in tuple(kwargs):
if "__" in key:
value = kwargs.pop(key)
key, _, subkey = key.partition("__")
recursive_arguments.setdefault(key, []).append((subkey, value))
for key, pairs in recursive_arguments.items():
recursed_object = current_args.get(key, current_kwargs.get(key))
if recursed_object is None:
continue
kwargs[key] = new(recursed_object, **dict(pairs))
if args:
current_var_args = args
for key, value in kwargs.items():
if key in current_args:
current_args[key] = value
else:
new_kwargs[key] = value
new_args = list(current_args.values()) + list(current_var_args)
return type(expr)(*new_args, **new_kwargs)
```
#### File: uqbar/sphinx/api.py
```python
import importlib
import pathlib
import types
from typing import Any, Dict, List
from sphinx.util import logging
from sphinx.util.console import bold, darkgreen, darkred, purple # type: ignore
import uqbar.apis
logger = logging.getLogger(__name__)
def logger_func(string):
if string.startswith("preserved"):
return
elif string.startswith("rewrote"):
string = purple(string)
elif string.startswith("pruned"):
string = darkred(string)
elif string.startswith("wrote"):
string = darkgreen(string)
logger.info("{} {}".format(bold("[uqbar-api]"), string))
def on_builder_inited(app):
"""
Hooks into Sphinx's ``builder-inited`` event.
Builds out the ReST API source.
"""
config = app.builder.config
target_directory = (
pathlib.Path(app.builder.env.srcdir) / config.uqbar_api_directory_name
)
initial_source_paths: List[str] = []
source_paths = config.uqbar_api_source_paths
for source_path in source_paths:
if isinstance(source_path, types.ModuleType):
if hasattr(source_path, "__path__"):
initial_source_paths.extend(getattr(source_path, "__path__"))
else:
initial_source_paths.extend(source_path.__file__)
continue
try:
module = importlib.import_module(source_path)
if hasattr(module, "__path__"):
initial_source_paths.extend(getattr(module, "__path__"))
else:
initial_source_paths.append(module.__file__)
except ImportError:
initial_source_paths.append(source_path)
root_documenter_class = config.uqbar_api_root_documenter_class
if isinstance(root_documenter_class, str):
module_name, _, class_name = root_documenter_class.rpartition(".")
module = importlib.import_module(module_name)
root_documenter_class = getattr(module, class_name)
module_documenter_class = config.uqbar_api_module_documenter_class
if isinstance(module_documenter_class, str):
module_name, _, class_name = module_documenter_class.rpartition(".")
module = importlib.import_module(module_name)
module_documenter_class = getattr(module, class_name)
# Don't modify the list in Sphinx's config. Sphinx won't pickle class
# references, and strips them from the saved config. That leads to Sphinx
# believing that the config has changed on every run.
member_documenter_classes = list(config.uqbar_api_member_documenter_classes or [])
for i, member_documenter_class in enumerate(member_documenter_classes):
if isinstance(member_documenter_class, str):
module_name, _, class_name = member_documenter_class.rpartition(".")
module = importlib.import_module(module_name)
member_documenter_classes[i] = getattr(module, class_name)
api_builder = uqbar.apis.APIBuilder(
initial_source_paths=initial_source_paths,
target_directory=target_directory,
document_empty_modules=config.uqbar_api_document_empty_modules,
document_private_members=config.uqbar_api_document_private_members,
document_private_modules=config.uqbar_api_document_private_modules,
member_documenter_classes=member_documenter_classes or None,
module_documenter_class=module_documenter_class,
omit_root=config.uqbar_api_omit_root,
root_documenter_class=root_documenter_class,
title=config.uqbar_api_title,
logger_func=logger_func,
)
api_builder()
def setup(app) -> Dict[str, Any]:
"""
Sets up Sphinx extension.
"""
app.add_config_value("uqbar_api_directory_name", "api", "env")
app.add_config_value("uqbar_api_document_empty_modules", False, "env")
app.add_config_value("uqbar_api_document_private_members", False, "env")
app.add_config_value("uqbar_api_document_private_modules", False, "env")
app.add_config_value("uqbar_api_member_documenter_classes", None, "env")
app.add_config_value("uqbar_api_module_documenter_class", None, "env")
app.add_config_value("uqbar_api_omit_root", False, "env")
app.add_config_value("uqbar_api_root_documenter_class", None, "env")
app.add_config_value("uqbar_api_source_paths", None, "env")
app.add_config_value("uqbar_api_title", "API", "html")
app.connect("builder-inited", on_builder_inited)
return {
"version": uqbar.__version__,
"parallel_read_safe": True,
"parallel_write_safe": True,
}
``` |
{
"source": "josiahw/SimpleSVM",
"score": 3
} |
#### File: josiahw/SimpleSVM/SimpleSVM.py
```python
import numpy
import numpy.linalg
def polyKernel(a,b,pwr):
return numpy.dot(a,b)**pwr #numpy.dot(a,a) - numpy.dot(b,b) # -1 #
def rbfKernel(a,b,gamma):
return numpy.exp(-gamma * numpy.linalg.norm(a - b))
class SimpleSVM:
w = None
a = None
b = None
C = None
sv = None
kernel = None
kargs = ()
tolerance = None
verbose = True
def __init__(self,
C,
tolerance = 0.001,
kernel = numpy.dot,
kargs = ()
):
"""
The parameters are:
- C: SVC cost
- tolerance: gradient descent solution accuracy
- kernel: the kernel function do use as k(a, b, *kargs)
- kargs: extra parameters for the kernel
"""
self.C = C
self.kernel = kernel
self.tolerance = tolerance
self.kargs = kargs
def fit(self, X, y):
"""
Fit to data X with labels y.
"""
"""
Construct the Q matrix for solving
"""
ysigned = y * 2 - 1
Q = numpy.zeros((len(data),len(data)))
for i in xrange(len(data)):
for j in xrange(i,len(data)):
Qval = ysigned[i] * ysigned[j]
Qval *= self.kernel(*(
(data[i,:], data[j,:])
+ self.kargs
))
Q[i,j] = Q[j,i] = Qval
"""
Solve for a and w simultaneously by coordinate descent.
This means no quadratic solver is needed!
The support vectors correspond to non-zero values in a.
"""
self.w = numpy.zeros(X.shape[1])
self.a = numpy.zeros(X.shape[0])
delta = 10000000000.0
while delta > self.tolerance:
delta = 0.
for i in xrange(len(data)):
g = numpy.dot(Q[i,:], self.a) - 1.0
adelta = self.a[i] - min(max(self.a[i] - g/Q[i,i], 0.0), self.C)
self.w += adelta * X[i,:]
delta += abs(adelta)
self.a[i] -= adelta
if self.verbose:
print "Descent step magnitude:", delta
#print Q #self.a
self.sv = X[self.a > 0.0, :]
self.a = (self.a * ysigned)[self.a > 0.0]
if self.verbose:
print "Number of support vectors:", len(self.a)
"""
Select support vectors and solve for b to get the final classifier
"""
self.b = self._predict(self.sv[0,:])[0]
if self.a[0] > 0:
self.b *= -1
if self.verbose:
print "Bias value:", self.b
def _predict(self, X):
if (len(X.shape) < 2):
X = X.reshape((1,-1))
clss = numpy.zeros(len(X))
for i in xrange(len(X)):
for j in xrange(len(self.sv)):
clss[i] += self.a[j] * self.kernel(* ((self.sv[j,:],X[i,:]) + self.kargs))
return clss
def predict(self, X):
"""
Predict classes for data X.
"""
return self._predict(X) > self.b
if __name__ == '__main__':
import sklearn.datasets
data = sklearn.datasets.load_digits(2).data
labels = sklearn.datasets.load_digits(2).target
C = 100.0
clss = SimpleSVM(C,0.001,rbfKernel,(0.5,))
clss.fit(data,labels)
t = clss.predict(data)
print "Error", numpy.sum((labels-t)**2) / float(len(data))
#print sum(a > 0)
#print w
``` |
{
"source": "josiasal/tweetinvi",
"score": 2
} |
#### File: tweetinvi/tools/tweetinvi-builder.py
```python
import os
import re
import glob
import argparse
import sys
from shutil import copyfile, rmtree
from pathlib import Path
parser = argparse.ArgumentParser()
parser.add_argument('--version', nargs=1, default='5.0.4')
parser.add_argument('--pre', nargs=1, default='')
parser.add_argument('--build-version', action='store_true')
parser.add_argument('--nuget-push', action='store_true')
args = parser.parse_args()
version = args.version
nugetVersion = version + args.pre
if args.build_version:
print(nugetVersion)
sys.exit(0)
if args.nuget_push:
print('This is going to publish 3 packages on nuget.org as version' + nugetVersion + '. Please confirm by typing "continue"')
answer = input()
if answer == 'continue':
print('publishing on nuget...')
os.system('cd TweetinviAPI && nuget push TweetinviAPI.' + nugetVersion + '.nupkg -Source nuget.org')
os.system('cd TweetinviAPI-Symbols && nuget push TweetinviAPI.' + nugetVersion + '.snupkg -Source nuget.org')
os.system('cd TweetinviAspNet && nuget push TweetinviAPI.AspNetPlugin.' + nugetVersion + '.nupkg -Source nuget.org')
else:
print('nuget push aborted')
sys.exit(0)
srcFolders = os.listdir('../src/')
tweetinviProjects = list(filter(lambda folder: "Tweetinvi" in folder, srcFolders))
def replace(filepath, regex, with_content):
with open(filepath, 'r') as file:
filedata = file.read()
filedata = re.sub(regex, with_content, filedata)
with open(filepath, 'w') as file:
file.write(filedata)
def remove_files_in(path):
files = glob.glob(path + '/*')
for f in files:
os.remove(f)
def createPath(path):
try:
os.makedirs(path)
except FileExistsError:
print("Directory ", path, " already exists")
def update_version():
nuspecVersionRegex = re.compile(r'<version>.*</version>')
nuspecNewVersion = '<version>' + nugetVersion + '</version>'
replace('./TweetinviAPI/TweetinviAPI.nuspec', nuspecVersionRegex, nuspecNewVersion)
replace('./TweetinviAPI-Symbols/TweetinviAPI-Symbols.nuspec', nuspecVersionRegex, nuspecNewVersion)
replace('./TweetinviAspNet/TweetinviAPI.AspNetPlugin.nuspec', nuspecVersionRegex, nuspecNewVersion)
nugetDependency = '<dependency id="TweetinviAPI" version="' + nugetVersion + '" />'
aspnetPluginTweetinviDependencyVersionRegex = re.compile(r'<dependency id="TweetinviAPI" version=".*" \/>')
replace('./TweetinviAspNet/TweetinviAPI.AspNetPlugin.nuspec', aspnetPluginTweetinviDependencyVersionRegex, nugetDependency)
userAgentRegex = re.compile(r'"User-Agent",\s"Tweetinvi/.+"')
replace('../src/Tweetinvi.WebLogic/TwitterClientHandler.cs', userAgentRegex, '"User-Agent", "Tweetinvi/' + nugetVersion + '"')
print('updated nuspec versions to ' + version)
csprojVersionRegex = re.compile(r'<VersionPrefix>.*</VersionPrefix>')
csprojNewVersion = '<VersionPrefix>' + version + '</VersionPrefix>'
for projectPath in tweetinviProjects:
filePath = '../src/' + projectPath + '/' + projectPath + '.csproj'
print('updating ' + projectPath + ' version to ' + version)
replace(filePath, csprojVersionRegex, csprojNewVersion)
def clean_build():
print('Cleaning build folders')
releasePath = '../src/Tweetinvi/bin/'
if os.path.exists(releasePath):
rmtree(releasePath)
releasePath = '../src/Tweetinvi.AspNet/bin/'
if os.path.exists(releasePath):
rmtree(releasePath)
def compile_tweetinvi():
print('Compiling Tweetinvi...')
os.system('dotnet build -c release ../src/Tweetinvi')
os.system('dotnet build -c release ../src/Tweetinvi.AspNet')
def clean_nuget_folder():
print('Cleaning nuget build...')
cachePackagePath = 'C:/Users/linvi/.nuget/packages/tweetinviapi/' + version
if os.path.exists(cachePackagePath):
rmtree(cachePackagePath)
cachePackagePath = 'C:/Users/linvi/.nuget/packages/tweetinviapi.aspnetplugin/' + version
if os.path.exists(cachePackagePath):
rmtree(cachePackagePath)
files = glob.glob('./TweetinviAPI/*.nupkg')
for f in files:
os.remove(f)
remove_files_in('./TweetinviAPI/lib/netstandard1.4')
remove_files_in('./TweetinviAPI/lib/netstandard2.0')
createPath('./TweetinviAPI/lib/netstandard1.4')
createPath('./TweetinviAPI/lib/netstandard2.0')
files = glob.glob('./TweetinviAPI-Symbols/*.snupkg')
for f in files:
os.remove(f)
remove_files_in('./TweetinviAPI-Symbols/lib/netstandard1.4')
remove_files_in('./TweetinviAPI-Symbols/lib/netstandard2.0')
createPath('./TweetinviAPI-Symbols/lib/netstandard1.4')
createPath('./TweetinviAPI-Symbols/lib/netstandard2.0')
files = glob.glob('./TweetinviAspNet/*.nupkg')
for f in files:
os.remove(f)
createPath('./TweetinviAspNet/lib/netstandard2.0')
remove_files_in('./TweetinviAspNet/lib/netcoreapp2.1')
remove_files_in('./TweetinviAspNet/lib/netcoreapp3.1')
createPath('./TweetinviAspNet/lib/netstandard2.0')
createPath('./TweetinviAspNet/lib/netcoreapp2.1')
createPath('./TweetinviAspNet/lib/netcoreapp3.1')
def build_tweetinvi_nuget_package():
print('Building nuget package...')
tweetinviBuildFiles = os.listdir('../src/Tweetinvi/bin/release/netstandard2.0')
tweetinviDllFiles = list(filter(re.compile(r'.*\.dll').search, tweetinviBuildFiles))
tweetinviXmlFiles = list(filter(re.compile(r'.*\.xml').search, tweetinviBuildFiles))
for dll in tweetinviDllFiles:
copyfile('../src/Tweetinvi/bin/release/netstandard1.4/' + dll, './TweetinviAPI/lib/netstandard1.4/' + dll)
copyfile('../src/Tweetinvi/bin/release/netstandard2.0/' + dll, './TweetinviAPI/lib/netstandard2.0/' + dll)
for xml in tweetinviXmlFiles:
copyfile('../src/Tweetinvi/bin/release/netstandard1.4/' + xml, './TweetinviAPI/lib/netstandard1.4/' + xml)
copyfile('../src/Tweetinvi/bin/release/netstandard2.0/' + xml, './TweetinviAPI/lib/netstandard2.0/' + xml)
os.system('cd TweetinviAPI && nuget pack')
def build_tweetinvi_nuget_symbols():
print('building symbols package')
tweetinviBuildFiles = os.listdir('../src/Tweetinvi/bin/release/netstandard2.0')
symbolsFilter = re.compile(r'.*\.pdb')
tweetinviSymbolFiles = list(filter(symbolsFilter.search, tweetinviBuildFiles))
for pdb in tweetinviSymbolFiles:
copyfile('../src/Tweetinvi/bin/release/netstandard1.4/' + pdb, './TweetinviAPI-Symbols/lib/netstandard1.4/' + pdb)
copyfile('../src/Tweetinvi/bin/release/netstandard2.0/' + pdb, './TweetinviAPI-Symbols/lib/netstandard2.0/' + pdb)
os.system('cd TweetinviAPI-Symbols && nuget pack')
os.rename('./TweetinviAPI-Symbols/TweetinviAPI.' + nugetVersion + '.nupkg', './TweetinviAPI-Symbols/TweetinviAPI.' + nugetVersion + '.snupkg')
def build_aspNet_nuget_package():
def copy_aspnet_file(filepath):
copyfile('../src/Tweetinvi.AspNet/bin/release/' + filepath, './TweetinviAspNet/lib/' + filepath)
Path("./TweetinviAspNet/lib/netstandard2.0").mkdir(parents=True, exist_ok=True)
Path("./TweetinviAspNet/lib/netcoreapp2.1").mkdir(parents=True, exist_ok=True)
Path("./TweetinviAspNet/lib/netcoreapp3.1").mkdir(parents=True, exist_ok=True)
copy_aspnet_file('netstandard2.0/Tweetinvi.AspNet.dll')
copy_aspnet_file('netstandard2.0/Tweetinvi.AspNet.xml')
copy_aspnet_file('netcoreapp2.1/Tweetinvi.AspNet.dll')
copy_aspnet_file('netcoreapp2.1/Tweetinvi.AspNet.xml')
copy_aspnet_file('netcoreapp3.1/Tweetinvi.AspNet.dll')
copy_aspnet_file('netcoreapp3.1/Tweetinvi.AspNet.xml')
os.system('cd TweetinviAspNet && nuget pack')
update_version()
clean_build()
compile_tweetinvi()
clean_nuget_folder()
build_tweetinvi_nuget_package()
build_tweetinvi_nuget_symbols()
build_aspNet_nuget_package()
``` |
{
"source": "JosiasAurel/python-hashnode",
"score": 3
} |
#### File: python-hashnode/hashnode/hashnode.py
```python
from gql import Client, gql
from gql.transport.aiohttp import AIOHTTPTransport
class Hashnode(object):
def __init__(self, api_token: str):
self.api_token = api_token
TRANSPORT = AIOHTTPTransport(
url="https://api.hashnode.com/", headers={"Authorization": api_token})
# create graphql client to interact with the hashnode api
client = Client(
transport=TRANSPORT, fetch_schema_from_transport=True)
self.client = client # set the class client as the hashnode graphql api client
# the below are queries
def get_user_info(self, username: str):
query = gql("""
query($username: String!) {
user(username: $username) {
_id,
name,
username,
blogHandle,
followers,
publicationDomain,
tagline,
isEvangelist,
dateJoined,
socialMedia,
numFollowing,
numFollowers,
coverImage,
location,
photo,
numPosts,
numReactions,
publication
}
}
""")
params = {
"username": username
}
res = self.client.execute(query, variable_values=params)
return res
def get_feed(self, feed_type: str, page=0):
# accepted feed type :
# BEST
# FEATURED
# NEW
# COMMUNITY
query = gql("""
query($type:FeedType!, $page: Int) {
storiesFeed(type: $type, page: $page) {
_id,
title,
author {
name,
username,
blogHandle,
photo
},
tags {
name,
logo,
slug,
wiki,
managers {
role,
user { name, blogHandle
}
}
},
slug,
cuid,
type,
coverImage,
brief,
dateUpdated,
followersCount,
popularity,
totalReactions,
dateAdded,
responseCount,
dateFeatured,
responseCount,
reactionsByCurrentUser {image, name},
bookmarkedIn,
isAnonymous,
poll {totalVotes},
replyCount,
contentMarkdown
}
}
""")
params = {
"type": feed_type,
"page": page
}
res = self.client.execute(query, variable_values=params)
return res
def get_amas(self, page=0):
query = gql(
"""
query($page: Int) {
amas(page: $page) {
_id,
title,
author {
name,
username,
blogHandle,
photo
},
tags {
name,
logo,
slug,
wiki,
managers {
role,
user {
name, blogHandle
}
},
slug,
cuid,
type,
coverImage,
brief,
dateUpdated,
followersCount,
popularity,
totalReactions,
dateAdded,
responseCount,
dateFeatured,
responseCount,
reactionsByCurrentUser {image, name},
bookmarkedIn,
isAnonymous,
poll {totalVotes},
replyCount,
contentMarkdown
}
}
"""
)
params = {
"page": page
}
res = self.client.execute(query, variable_values=params)
return res
def get_post(self, slug: str, hostname: str):
query = gql(
"""
query($slug: String!, $hostname: String) {
post(slug: $slug, hostname:$hostname) {
_id,
cuid,
slug,
title,
type,
author {
name,
username,
blogHandle,
photo
},
dateAdded,
tags {
name,
logo,
slug,
wiki,
managers {
role,
user {name, blogHandle}
}
},
contributors,
coverImage,
brief,
dateUpdated,
isFeatured,
reactions,
replyCount,
responseCount,
sourceFromGithub,
isRepublished,
followersCount,
untaggedFrom,
reactionsByCurrentUser {image, name},
poll {totalVotes},
popularity,
content,
contentMarkdown
}
}
"""
)
params = {
"slug": slug,
"hostname": hostname
}
res = self.client.execute(query, variable_values=params)
return res
def get_tag_categories(self):
query = gql(
"""
{
tagCategories {
_id,
name,
isActive,
priority,
slug,
tags {
name,
logo,
slug,
wiki,
managers {role, user{name, blogHandle}}
}
}
}
"""
)
result = self.client.execute(query)
return result
# here marks the end of queries and the beginning of mutations
def follow_user(self, user_id: str):
mutation = gql(
"""
mutation($userId: String!) {
followUser(userId: $userId) {
code,
success,
message
}
}
"""
)
params = {
"userId": user_id
}
result = self.client.execute(mutation, variable_values=params)
return result
def create_story(self, story: str):
mutation = gql(
"""
mutation($input: CreateStoryInput!) {
createStory(input:$input) {
code,
success,
message,
post
}
}
"""
)
params = {
"input": story
}
result = self.client.execute(mutation, variable_values=params)
return result
def create_publication_story(self, story_input: str, publication_id: str, hide_from_hashnode_feed: bool = False):
mutation = gql(
"""
mutation($input: CreateStoryInput!, publicationId: String!, $hideFromHashnodeFeed: Boolean) {
createPublicationStory(input: $input, publicationId: $publicationId, hideFromHashnodeFeed:$hideFromHashnodeFeed) {
code,
success,
message,
post
}
}
"""
)
params = {
"input": story_input,
"publicationId": publication_id,
"hideFromHashnodeFeed": hide_from_hashnode_feed
}
result = self.client.execute(mutation, variable_values=params)
return result
def update_story(self, post_id: str, story: str):
mutation = gql(
"""
mutation($postId: String!, $input: UpdateStoryInput!) {
updateStory(postId:$postId, input:$input) {
code,
success,
message,
post
}
}
""")
params = {
"postId": post_id,
"input": story
}
result = self.client.execute(mutation, variable_values=params)
return result
def react_to_story(self, reaction):
mutation = gql(
"""
mutation($input: ReactToPostInput!) {
reactToStory(input:$input) {
code,
success,
message
}
}
"""
)
params = {
"input": reaction
}
result = self.client.execute(mutation, variable_values=params)
return result
def delete_post(self, post_id: str):
mutation = gql(
"""
mutation($id: String!) {
deletePost(id:$id) {
code,
success,
message
}
}
"""
)
params = {
"id": post_id
}
result = self.client.execute(mutation, variable_values=params)
return result
def create_response(self, response: str):
mutation = gql(
"""
mutation($input: CreateResponseInput!) {
createResponse(input:$input) {
code,
success,
message,
response
}
}
"""
)
params = {
"input": response
}
result = self.client.execute(mutation, variable_values=params)
return result
def update_reponse(self, response_id: str, post_id: str, content: str):
mutation = gql(
"""
mutation($responseId: String!, $postId: String, $contentInMarkdown: String!) {
updateResponse(responseId:$responseId, postId:$postId, contentInMarkdown:$contentInMarkdown) {
code,
success,
message,
response
}
}
"""
)
params = {
"responseId": response_id,
"postId": post_id,
"contentInMarkdown": content
}
result = self.client.execute(mutation, variable_values=params)
return result
def react_to_response(self, response: str):
mutation = gql(
"""
mutation($input: ReactToResponseInput!) {
reactToResponse(input: $input) {
code,
success,
message
}
}
""")
params = {
"input": response
}
result = self.client.execute(mutation, variable_values=params)
return result
def delete_response(self, response_id: str, post_id: str):
mutation = gql(
"""
mutation($responseId: String!, $postId: String!) {
deleteResponse(responseId:$responseId, postId:$postId) {
code,
success,
message
}
}
"""
)
params = {
"responseId": response_id,
"postId": post_id
}
result = self.client.execute(mutation, variable_values=params)
return result
def create_reply(self, reply: str):
mutation = gql(
"""
mutation($input: CreateReplyInput!) {
createReply(input: $input) {
code,
success,
message,
reply
}
}
"""
)
params = {
"input": reply
}
result = self.client.execute(mutation, variable_values=params)
return result
def update_reply(self, reply_id: str, response_id: str, post_id: str, new_reply: str):
mutation = gql(
"""
mutation($replyId: String!, $responseId: String!, $postId: String!, $contentInMarkdown: String!) {
updateReply(replyId:$replyId, responseId:$responseId, postId:$postId, contentInMarkdown:$contentInMarkdown) {
code,
success,
message,
reply
}
}
"""
)
params = {
"replyId": reply_id,
"responseId": response_id,
"postId": post_id,
"contentInMarkdown": new_reply
}
result = self.client.execute(mutation, variable_values=params)
return result
def react_to_reply(self, reply: str):
mutation = gql(
"""
mutation($input: ReactToReplyInput!) {
reactToReply(input:$input) {
code,
success,
message,
reply
}
}
"""
)
params = {
"input": reply
}
result = self.client.execute(mutation, variable_values=params)
return result
def delete_reply(self, reply_id: str, response_id: str, post_id: str):
mutation = gql(
"""
mutation($replyId: String!, $responseId: String!, $postId: String!) {
deleteReply(replyId:$replyId, responseId:$responseId, postId:$postId) {
code,
success,
message
}
}
"""
)
params = {
"replyId": reply_id,
"responseId": response_id,
"postId": post_id
}
result = self.client.execute(mutation, variable_values=params)
return result
"""
Executong results with the gql
result = client.execute(query:str)
"""
``` |
{
"source": "josiasMO/raspi_integrations",
"score": 2
} |
#### File: josiasMO/raspi_integrations/Gprs.py
```python
import subprocess
import threading
import socket
import os, sys
import time
from time import sleep
import logging
import ctypes
import binascii
import serial
from datetime import datetime
def crc16(datos, offset, length):
crctab16 = (
0x0000, 0x1189, 0x2312, 0x329B, 0x4624, 0x57AD, 0x6536, 0x74BF,
0x8C48, 0x9DC1, 0xAF5A, 0xBED3, 0xCA6C, 0xDBE5, 0xE97E, 0xF8F7,
0x1081, 0x0108, 0x3393, 0x221A, 0x56A5, 0x472C, 0x75B7, 0x643E,
0x9CC9, 0x8D40, 0xBFDB, 0xAE52, 0xDAED, 0xCB64, 0xF9FF, 0xE876,
0x2102, 0x308B, 0x0210, 0x1399, 0x6726, 0x76AF, 0x4434, 0x55BD,
0xAD4A, 0xBCC3, 0x8E58, 0x9FD1, 0xEB6E, 0xFAE7, 0xC87C, 0xD9F5,
0x3183, 0x200A, 0x1291, 0x0318, 0x77A7, 0x662E, 0x54B5, 0x453C,
0xBDCB, 0xAC42, 0x9ED9, 0x8F50, 0xFBEF, 0xEA66, 0xD8FD, 0xC974,
0x4204, 0x538D, 0x6116, 0x709F, 0x0420, 0x15A9, 0x2732, 0x36BB,
0xCE4C, 0xDFC5, 0xED5E, 0xFCD7, 0x8868, 0x99E1, 0xAB7A, 0xBAF3,
0x5285, 0x430C, 0x7197, 0x601E, 0x14A1, 0x0528, 0x37B3, 0x263A,
0xDECD, 0xCF44, 0xFDDF, 0xEC56, 0x98E9, 0x8960, 0xBBFB, 0xAA72,
0x6306, 0x728F, 0x4014, 0x519D, 0x2522, 0x34AB, 0x0630, 0x17B9,
0xEF4E, 0xFEC7, 0xCC5C, 0xDDD5, 0xA96A, 0xB8E3, 0x8A78, 0x9BF1,
0x7387, 0x620E, 0x5095, 0x411C, 0x35A3, 0x242A, 0x16B1, 0x0738,
0xFFCF, 0xEE46, 0xDCDD, 0xCD54, 0xB9EB, 0xA862, 0x9AF9, 0x8B70,
0x8408, 0x9581, 0xA71A, 0xB693, 0xC22C, 0xD3A5, 0xE13E, 0xF0B7,
0x0840, 0x19C9, 0x2B52, 0x3ADB, 0x4E64, 0x5FED, 0x6D76, 0x7CFF,
0x9489, 0x8500, 0xB79B, 0xA612, 0xD2AD, 0xC324, 0xF1BF, 0xE036,
0x18C1, 0x0948, 0x3BD3, 0x2A5A, 0x5EE5, 0x4F6C, 0x7DF7, 0x6C7E,
0xA50A, 0xB483, 0x8618, 0x9791, 0xE32E, 0xF2A7, 0xC03C, 0xD1B5,
0x2942, 0x38CB, 0x0A50, 0x1BD9, 0x6F66, 0x7EEF, 0x4C74, 0x5DFD,
0xB58B, 0xA402, 0x9699, 0x8710, 0xF3AF, 0xE226, 0xD0BD, 0xC134,
0x39C3, 0x284A, 0x1AD1, 0x0B58, 0x7FE7, 0x6E6E, 0x5CF5, 0x4D7C,
0xC60C, 0xD785, 0xE51E, 0xF497, 0x8028, 0x91A1, 0xA33A, 0xB2B3,
0x4A44, 0x5BCD, 0x6956, 0x78DF, 0x0C60, 0x1DE9, 0x2F72, 0x3EFB,
0xD68D, 0xC704, 0xF59F, 0xE416, 0x90A9, 0x8120, 0xB3BB, 0xA232,
0x5AC5, 0x4B4C, 0x79D7, 0x685E, 0x1CE1, 0x0D68, 0x3FF3, 0x2E7A,
0xE70E, 0xF687, 0xC41C, 0xD595, 0xA12A, 0xB0A3, 0x8238, 0x93B1,
0x6B46, 0x7ACF, 0x4854, 0x59DD, 0x2D62, 0x3CEB, 0x0E70, 0x1FF9,
0xF78F, 0xE606, 0xD49D, 0xC514, 0xB1AB, 0xA022, 0x92B9, 0x8330,
0x7BC7, 0x6A4E, 0x58D5, 0x495C, 0x3DE3, 0x2C6A, 0x1EF1, 0x0F78,
)
fcs = 0xFFFF
i = offset
while i < length + offset:
fcs = ((fcs >> 8) ^ crctab16[(fcs ^ datos[i]) & 0xFF])
i += 1
return ctypes.c_ushort(~fcs).value
def encoder(data):
"""encode sysjourney data
0x20, 0x20, data_lenth, packet_id, data0, data1, ...., datan, check_h, check_l, 0x0D, 0x0A
"""
strpackage = '2020' + '%002x' % len(data) + '19'
for elem in data:
strpackage = strpackage + '%002x' % int(elem)
send_data = binascii.unhexlify(strpackage)
chk_p = ' '.join(strpackage[i: i + 2] for i in range(0, len(strpackage), 2))
chk_p = chk_p.split()
chk_p = [int(p, 16) for p in chk_p]
send_chk = crc16(chk_p[2:], 0, len(chk_p) - 2)
send_chk = bytearray([(send_chk & 0xFF00) >> 8, send_chk & 0x00FF])
return (send_data + send_chk + b"\r\n")
class GPRS(object):
def __init__(self, host='rasp.dalmago.xyz', port=5555):
self.proc = None
self.host = host
self.port = port
def __ppp0status(self):
"""Check if the ppp0 connection is ready"""
try:
f = open('/sys/class/net/ppp0/operstate','r')
if (f.readline() == 'unknown\n'):
print('[PPPD]: ppp0 interface is UP')
return 1
except:
return 0
def __conn(self, data):
"""Open tcp socket and send data to the specified host"""
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(10) #long timeout for gprs connections
s.connect((self.host, self.port))
s.sendall(data)
recv = s.recv(1024)
s.close()
return recv
def __procout(self, proc):
"""handle pppd subprocess output"""
for line in iter(proc.stdout.readline, b''):
print('[PPPD]: {0}'.format(line.decode('utf-8')), end='')
def get_time(self):
"""
Get time from gprs tracker, and clean the serial buffer
return a list with the date and time
[y_high, y_low, month, day, hour, minute, second ]
"""
try:
conn = serial.Serial("/dev/ttyS0", 115200, timeout=1)
conn.flushInput()
#get gprs time
conn.write('AT+CCLK?'.encode('utf-8')+b'\r\n')
conn.flush()
except serial.SerialException:
return ['0']*7
received = conn.read(56).decode('utf-8').split('+CCLK: ')
for line in received:
if(line.startswith('"')):
time = line[:19]
else:
time = None
#clean buffer and close pyserial
conn.flushInput()
conn.flushOutput()
conn.close()
#check if the gprs tracker answered with the time
try:
if (len(time) > 1):
#convert string to datetime
time = datetime.strptime(time, '"%y/%m/%d,%H:%M:%S"')
date_list = [time.year - 2000, \
time.month, time.day, time.hour, time.minute, time.second]
return [str(a) for a in date_list]
else: return ['0']*7
except (IndexError,UnboundLocalError,TypeError): return ['0']*7
def get_terminalID(self):
"""
Request product serial number identification(IMEI)
return a list with 8 integer numbers
"""
try:
conn = serial.Serial("/dev/ttyS0", 115200, timeout=1)
conn.flushInput()
conn.write('ATE1'.encode('utf-8')+b'\r\n')
time.sleep(0.1)
#get device imei
conn.write('AT+CGSN'.encode('utf-8')+b'\r\n')
conn.flush()
received = conn.read(56).decode('utf-8').split('AT+')
conn.write('ATE0'.encode('utf-8')+b'\r\n')
conn.flushInput()
conn.flushOutput()
except serial.SerialException:
print("Serial exeption during get imei")
return ['0']*8
#close serial connection
conn.close()
for line in received:
if(line.startswith('CGSN')):
received = line[10:25]
received = '0'+received
else:
received = None
if(received != None):
received = [received[i:i+2] for i in range(0, len(received), 2)]
else:
return ['0']*8
return received
def send(self,data):
"""start pppd subprocess, handle the data form user"""
proc = subprocess.Popen(['pppd','call','gprs'],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
t = threading.Thread(target=self.__procout, args=(proc,))
t.start()
recv = None
try:
while proc.poll() is None:
if(self.__ppp0status()):
r = encoder(data)
recv = self.__conn (r)
if recv is not None:
#the server should send back:
# //0x00 = tudo ok
# //0x08 = identicador não cadastrado
# //0x09 = veículo ocupado por outro motorista
# //0x0A = motorista não cadastrado
# //0x0B = motorista em outro veículo
# //0x0C = linha não cadastrada
# //0x0D = veículo em outra linha
# //0x0E = combinação veiculo x motorista já cadastrada
# //0x0F = jornada não criada antes de finalizar
# //0xFF = Erro desconhecido
if ord(recv) not in [250, 8 ,9, 10, 11, 12 ,13 , 14, 15 ,255]:
recv = None
proc.terminate()
time.sleep(0.5)
except socket.error as exc:
proc.terminate()
print ("socket.error : %s" % exc)
finally:
print('[PPPD]: End pppd process')
try:
proc.wait(timeout=2)
print('== subprocess exited with rc =', proc.returncode)
except subprocess.TimeoutExpired:
print('subprocess did not terminate in time')
t.join()
print ('==Received from server %s bytes==' % recv)
if recv is not None:
return ord(recv)
else:
return None
# if __name__ == "__main__":
# g = GPRS()
# print (g.get_time())
#
# # g.send(['0', '15','0', '35','0', '43', '0'] + g.get_time())
# # time.sleep(2)
``` |
{
"source": "Josicamats/rpachallenge",
"score": 3
} |
#### File: Josicamats/rpachallenge/WebNavigation.py
```python
import logging
from RPA.Browser.Selenium import Selenium
from RPA.Tables import Tables
from RPA.FileSystem import FileSystem
browser = Selenium()
tb = Tables()
fs = FileSystem()
class WebNavigation:
def __init__(self) -> None:
self.logger = logging.getLogger(__name__)
#set download directory
def set_download_directory(self, directory):
try:
browser.set_download_directory(directory, True)
except Exception as err:
self.logger.error("Set download directory fails: " + str(err))
raise SystemError("Set download directory fails: " + str(err))
# Open specified website
def open_website(self, url: str):
try:
browser.open_available_browser(url)
browser.maximize_browser_window()
browser.set_browser_implicit_wait(30)
#self.browser.wait_until_page_contains_element()
except Exception as err:
self.logger.error("Login website fails bc: " + str(err))
raise SystemError("Login website fails bc: " + str(err))
# Click specified button
def click_button(self, button: str):
try:
browser.wait_until_element_is_visible(button)
browser.click_element_when_visible(button)
#browser.set_browser_implicit_wait(30)
except Exception as err:
self.logger.error("Click button failes bc: " + str(err))
raise SystemError("Click button failes bc: " + str(err))
# Close browsers:
def close_browser(self):
try:
browser.close_all_browsers()
except Exception as err:
self.logger.error("Close all browsers fails bc: " + str(err))
raise SystemError("Close all browsers fails bc: " + str(err))
# Get Agencies Data:
def get_agencies(self):
try:
total = 0
count_agencies = len(browser.find_elements("(//*[contains(@class, 'col-sm-4 text-center noUnderline')])"))
agencies = []
for i in range(1,10):
for j in range (1,4):
total = total + 1
if total <= count_agencies:
agency = browser.find_element("xpath://*[@id='agency-tiles-widget']/div/div["+str(i)+"]/div["+str(j)+"]/div/div/div/div[1]/a/span[1]").text
agencies.append(agency)
#dt_agencies = tb.create_table(agencies)
return agencies
except Exception as err:
self.logger.error("Unable to get Agencies names bc: " + str(err))
raise SystemError("Unable to get Agencies names bc: " + str(err))
# Get Amounts Data:
def get_amounts(self):
try:
total = 0
count_agencies = len(browser.find_elements("(//*[contains(@class, 'col-sm-4 text-center noUnderline')])"))
amounts = []
for i in range(1,10):
for j in range (1,4):
total = total + 1
if total <= count_agencies:
amount = browser.find_element("xpath://*[@id='agency-tiles-widget']/div/div["+str(i)+"]/div["+str(j)+"]/div/div/div/div[1]/a/span[2]").text
amounts.append(amount)
#dt_amounts = tb.create_table(amounts)
return amounts
except Exception as err:
self.logger.error("Unable to get amounts of each agency bc: " + str(err))
raise SystemError("Unable to get amounts of each agency bc: " + str(err))
#Scraping data from table
def get_table_data(self):
try:
browser.select_from_list_by_value("//*[@id='investments-table-object_length']/label/select","-1")
browser.set_browser_implicit_wait(5)
row_count = len(browser.find_elements("//*[@id='investments-table-object']/tbody/tr"))
col_count = len(browser.find_elements("//*[@id='investments-table-object']/tbody/tr[1]/td"))
data = tb.create_table()
columns = ["A","B","C","D","E","F","G"]
for col in columns:
tb.add_table_column(data, col)
for n in range(1, row_count+1):
browser.select_from_list_by_value("//*[@id='investments-table-object_length']/label/select","-1")
rows = []
row = 0
for m in range(1, col_count+1):
browser.select_from_list_by_value("//*[@id='investments-table-object_length']/label/select","-1")
path = "//*[@id='investments-table-object']/tbody/tr["+str(n)+"]/td["+str(m)+"]"
table_data = browser.find_element(path).text
rows.append(table_data)
if(columns[row] == 'A'):
directory = "C:\\ROBOTS\\RPAChallenge\\Challenge\\output"
download_pdf(table_data, directory)
row = row + 1
tb.add_table_row(data, rows)
return data
except Exception as err:
self.logger.error("Scraping data from table fails: " + str(err))
raise SystemError("Scraping data from table fails: " + str(err))
# Download Specified Business Case if Exists Link
def download_pdf(file: str, directory):
try:
tableURL = "/drupal/summary/393/" + file
exist = browser.does_page_contain_link(tableURL)
if(exist):
link = browser.find_element('//a[@href="'+tableURL+'"]')
browser.click_link(link)
browser.set_browser_implicit_wait(30)
pdfPath = browser.find_element("//*[@id='business-case-pdf']/a")
browser.click_link(pdfPath)
while(fs.does_file_not_exist(directory+"\\"+file+".pdf")):
browser.set_browser_implicit_wait(10)
browser.go_back()
browser.go_back()
except:
pass
``` |
{
"source": "josiearcuri/Cutoffs",
"score": 3
} |
#### File: Cutoffs/HKplus/HKplus.py
```python
import numpy as np
import pandas as pd
import time, sys
import numba
import math
import matplotlib.pyplot as plt
from matplotlib.colors import LinearSegmentedColormap
import matplotlib.colors as mcolors
import matplotlib.gridspec as gridspec
from matplotlib import cm
import matplotlib as mpl
from scipy.stats import norm
import scipy.interpolate
from scipy.interpolate import InterpolatedUnivariateSpline
from scipy.spatial import distance
def update_progress(progress, start_time):
"""progress bar from https://stackoverflow.com/questions/3160699/python-progress-bar
update_progress() : Displays or updates a console progress bar
Accepts a float between 0 and 1. Any int will be converted to a float.
A value under 0 represents a 'halt'.
A value at 1 or bigger represents 100%"""
barLength = 20 # Modify this to change the length of the progress bar
status = ""
if isinstance(progress, int):
progress = float(progress)
if not isinstance(progress, float):
progress = 0
status = "error: progress var must be float\r\n"
if progress < 0:
progress = 0
status = "Halt...\r\n"
if progress >= 1:
progress = 1
status = "Done...\r\n"
block = int(round(barLength*progress))
text = "\rPercent: [{0}] {1}% {2}".format( "#"*block + "-"*int(round(barLength-block)), int(progress*100), status)
sys.stdout.write(text + "-- %s minute(s) --" % int(round((time.time() - start_time)/60)))
sys.stdout.flush()
class Channel:
"""class for Channel objects"""
def __init__(self,x,y,W,D,MR):
"""initialize Channel object
x, y, z - coordinates of centerline
W - channel width
D - channel depth
MR - migration rate from last centerline"""
self.x = x
self.y = y
self.W = W
self.D = D
self.MR = MR
class Cutoff:
"""class for Cutoff objects"""
def __init__(self,x,y,W,dist, time, cut_len, rad, cllen):
"""initialize Cutoff object
x, y, - coordinates of centerline
W - channel width
dist - distance along centerline of upstream most node of cutoff bend.
time - year of occurrence
cut_len - ditance removed from centerline by cutoff
radius - radius of curvature
cllen - centerline length after cutoff
"""
self.x = x
self.y = y
self.W = W
self.dist = dist
self.time = time
self.cut_len = cut_len
self.radius = rad
self.cllen = cllen
class ChannelBelt:
"""class for ChannelBelt objects"""
def __init__(self, channels, cutoffs, cl_times, cutoff_times, cutoff_dists, decay_rate, bump_scale, cut_thresh, sinuosity):
"""initialize ChannelBelt object
channels - list of Channel objects
cutoffs - list of Cutoff objects
cl_times - list of ages of Channel objects
cutoff_times - list of ages of Cutoff objects
cutoff_dists - list of cutoff distances downstream
decay_rate - rate at which nonlocal effects dissipate
bump_scale - amplitude of nonlocal effects bump, scalar
cut_thresh - how many cutoffs to simulate"""
self.channels = channels
self.cutoffs = cutoffs
self.cl_times = cl_times
self.cutoff_times = cutoff_times
self.cutoff_dists = cutoff_dists
self.decay_rate = decay_rate
self.bump_scale = bump_scale
self.cut_thresh = cut_thresh
self.sinuosity = sinuosity
def migrate_years(self,nit,saved_ts,deltas,pad, crdist,Cf,kl,dt,dens=1000):
"""function for computing migration rates along channel centerlines and moving them, limited by number of iterations
inputs:
nit - number of iterations
saved_ts - which time steps will be saved
deltas - distance between nodes on centerline
pad - padding for upstream bc (number of nodepoints along centerline)
crdist - threshold distance at which cutoffs occur
Cf - dimensionless Chezy friction factor
kl - migration rate constant (m/s)
dt - time step (s)"""
start_time = time.time()
channel = self.channels[-1] # first channel is the same as last channel of input
x = channel.x; y = channel.y; W = channel.W; D = channel.D;
k = 1.0 # constant in HK equation
xc = [] # initialize cutoff coordinates
yc = []
cut_dist = []# initialize cutoff distance ds array
cut_len = []# initialize cutoff length removal array
# determine age of last channel:
if len(self.cl_times)>0:
last_cl_time = self.cl_times[-1]
else:
last_cl_time = 0
dx, dy, ds, s = compute_derivatives(x,y)
omega = -1.0 # constant in curvature calculation (Howard and Knutson, 1984)
gamma = 2.5 # from Ikeda et al., 1981 and Howard and Knutson, 1984
ne = np.zeros_like(x) #array to keep track of nonlocal effects
MR = 0
DS = 0
for itn in range(nit): # main loop
update_progress(itn/nit, start_time)
ne = update_nonlocal_effects(ne, s, self.decay_rate, self.bump_scale, cut_dist, cut_len) #update array of ne with last itn's cutoff(s) and decay old ne
klarray = nominal_rate(kl, ne)## compute array of nominal migration rate in m/s with nonlocal effects accounted for
curv = compute_curvature(x,y)#get curvature of bends before cutoffs happen
x, y, R1 = migrate_one_step(x,y,W,klarray,dt,k,Cf,D,pad,omega,gamma)
x,y,xc,yc,cut_dist, cut_len, ind1, ind2 = cut_off_cutoffs(x,y,s,crdist,deltas) # find and execute cutoffs
x,y,dx,dy,ds,s = resample_centerline(x,y,deltas) # resample centerline
Sin = get_sinuosity(x,y,s)
if len(xc)>0: # save cutoff data
rad = get_radii(curv, ind1, ind2, W)
cutoff = Cutoff(xc,yc,W,cut_dist[0],last_cl_time+(itn)*dt/(365*24*60*60.0), cut_len[0], rad, s[-1]) # create cutoff object
#keep track of year cutoff occurs, where it occurs, and save an object.
self.cutoff_times.append(last_cl_time+(itn)*dt/(365*24*60*60.0))
self.cutoff_dists.append(cut_dist)
self.cutoffs.append(cutoff)
# saving centerlines:
if np.mod(itn,saved_ts)==0 or itn == nit-1:
channel = Channel(x,y,W,D, MR) # create channel object, save year
self.cl_times.append(last_cl_time+(itn)*dt/(365*24*60*60.0))
self.channels.append(channel)
self.sinuosity.append(Sin)
def migrate_cuts(self,saved_ts,deltas,pad, crdist,Cf,kl,dt,dens=1000):
"""function for computing migration rates along channel centerlines and moving them, limited by number of cutoffs the channel experiences
inputs:
saved_ts - which time steps will be saved
deltas - distance between nodes on centerline
pad - padding for upstream bc (number of nodepoints along centerline)
crdist - threshold distance at which cutoffs occur
Cf - dimensionless Chezy friction factor
kl - migration rate constant (m/s)
dt - time step (s)"""
start_time = time.time()
channel = self.channels[-1] # first channel is the same as last channel of input
x = channel.x; y = channel.y; W = channel.W; D = channel.D;
k = 1.0 # constant in HK equation
xc = [] # initialize cutoff coordinates
yc = []
cut_dist = []# initialize cutoff distance ds array
cut_len = []# initialize cutoff length removal array
# determine age of last channel:
if len(self.cl_times)>0:
last_cl_time = self.cl_times[-1]
else:
last_cl_time = 0
dx, dy, ds, s = compute_derivatives(x,y)
omega = -1.0 # constant in curvature calculation (Howard and Knutson, 1984)
gamma = 2.5 # from Ikeda et al., 1981 and Howard and Knutson, 1984
ne = np.zeros_like(x) #array to keep track of nonlocal effects
ymax = self.bump_scale*kl*2
itn = 0
while len(self.cutoffs)<self.cut_thresh: # main loop
itn = itn+1
update_progress(len(self.cutoffs)/self.cut_thresh, start_time)
ne = update_nonlocal_effects(ne, s, self.decay_rate, self.bump_scale, cut_dist, cut_len) #update array of ne with last itn's cutoff(s) and decay old ne
curv = compute_curvature(x,y)
klarray = nominal_rate(kl, ne)## compute array of nominal migration rate in m/s with nonlocal effects accounted for
x, y, MR = migrate_one_step(x,y,W,klarray,dt,k,Cf,D,pad,omega,gamma)
x,y,xc,yc,cut_dist, cut_len,ind1, ind2 = cut_off_cutoffs(x,y,s,crdist,deltas) # find and execute cutoffs
x,y,dx,dy,ds,s = resample_centerline(x,y,deltas) # resample centerline
Sin = get_sinuosity(x,y,s)
if len(xc)>0: # save cutoff data
rad = get_radii(curv, ind1, ind2, W)
cutoff = Cutoff(xc,yc,W,cut_dist,last_cl_time+(itn)*dt/(365*24*60*60.0), cut_len, rad, s[-1]) # create cutoff object
#keep track of year cutoff occurs, where it occurs, and save an object.
self.cutoff_times.append(last_cl_time+(itn)*dt/(365*24*60*60.0))
self.cutoff_dists.append(cut_dist)
self.cutoffs.append(cutoff)
# saving centerlines:
if np.mod(itn,saved_ts)==0 or len(self.cutoffs)>=self.cut_thresh:
channel = Channel(x,y,W,D,MR) # create channel object, save year
self.cl_times.append(last_cl_time+(itn)*dt/(365*24*60*60.0))
self.channels.append(channel)
self.sinuosity.append(Sin)
def migrate_bendtracking(self,saved_ts,deltas,pad, crdist,Cf,kl,dt,dens=1000):
"""function for computing migration rates along channel centerlines and moving them, limited by number of cutoffs the channel experiences
inputs:
saved_ts - which time steps will be saved
deltas - distance between nodes on centerline
pad - padding for upstream bc (number of nodepoints along centerline)
crdist - threshold distance at which cutoffs occur
Cf - dimensionless Chezy friction factor
kl - migration rate constant (m/s)
dt - time step (s)"""
start_time = time.time()
channel = self.channels[-1] # first channel is the same as last channel of input
x = channel.x; y = channel.y; W = channel.W; D = channel.D;
k = 1.0 # constant in HK equation
xc = [] # initialize cutoff coordinates
yc = []
cut_dist = []# initialize cutoff distance ds array
cut_len = []# initialize cutoff length removal array
# determine age of last channel:
if len(self.cl_times)>0:
last_cl_time = self.cl_times[-1]
else:
last_cl_time = 0
dx, dy, ds, s = compute_derivatives(x,y)
omega = -1.0 # constant in curvature calculation (Howard and Knutson, 1984)
gamma = 2.5 # from Ikeda et al., 1981 and Howard and Knutson, 1984
ne = np.zeros_like(x) #array to keep track of nonlocal effects
ymax = self.bump_scale*kl*2
itn = 0
while len(self.cutoffs)<self.cut_thresh: # main loop
itn = itn+1
update_progress(len(self.cutoffs)/self.cut_thresh, start_time)
ne = update_nonlocal_effects(ne, s, self.decay_rate, self.bump_scale, cut_dist, cut_len) #update array of ne with last itn's cutoff(s) and decay old ne
curv = compute_curvature(x,y)
klarray = nominal_rate(kl, ne)## compute array of nominal migration rate in m/s with nonlocal effects accounted for
x, y, MR = migrate_one_step(x,y,W,klarray,dt,k,Cf,D,pad,omega,gamma)
x,y,xc,yc,cut_dist, cut_len,ind1, ind2 = cut_off_cutoffs(x,y,s,crdist,deltas) # find and execute cutoffs
x,y,dx,dy,ds,s = resample_centerline(x,y,deltas) # resample centerline
if len(xc)>0: # save cutoff data
rad = get_radii(curv, ind1, ind2, W)
cutoff = Cutoff(xc,yc,W,cut_dist,last_cl_time+(itn)*dt/(365*24*60*60.0), cut_len, rad, s[-1]) # create cutoff object
#keep track of year cutoff occurs, where it occurs, and save an object.
self.cutoff_times.append(last_cl_time+(itn)*dt/(365*24*60*60.0))
self.cutoff_dists.append(cut_dist)
self.cutoffs.append(cutoff)
# saving centerlines:
if np.mod(itn,saved_ts)==0 or len(self.cutoffs)>=self.cut_thresh:
channel = Channel(x,y,W,D,MR) # create channel object, save year
self.cl_times.append(last_cl_time+(itn)*dt/(365*24*60*60.0))
self.channels.append(channel)
def plot_channels(self):
cot = np.array(self.cutoff_times)
sclt = np.array(self.cl_times)
times = np.unique(np.sort(np.hstack((cot,sclt))))
# set up min and max x and y coordinates of the plot:
xmin = np.min(self.channels[0].x)
xmax = 1
ymax = 1
for i in range(len(self.channels)):
ymax = max(ymax, np.max(np.abs(self.channels[i].y)))
xmax = max(xmax, np.max(np.abs(self.channels[i].x)))
ymax = ymax+1000# add a bit of space on top and bottom
ymin = -1*ymax
# size figure so that its size matches the size of the model:
fig, ax = plt.subplots(figsize=(10,(ymax-ymin)*10/(xmax-xmin)))
cmap = cm.get_cmap('gray_r',len(sclt))
ax.set_xlim([xmin-1000,xmax+1000])
ax.set_ylim([ymin,ymax])
plt.axis('equal')
ax.plot([xmin, xmin+5000],[ymin, ymin], 'k', linewidth=2)
ax.text(xmin+1500, ymin+200+100, '5 km', fontsize=8)
order = 0
for i in range(0,len(times)):
if times[i] in cot:
ind = np.where(cot==times[i])[0][0]
for j in range(0,len(self.cutoffs[ind].x)):
x1 = self.cutoffs[ind].x[j]
y1 = self.cutoffs[ind].y[j]
W = self.channels[-1].W
xm, ym = get_channel_banks(x1,y1,W)
order += 1
if times[i]==cot[-1] and j==(len(self.cutoffs[ind].x)-1):
plt.fill(xm,ym,facecolor='r',edgecolor = 'none', alpha = .5,zorder=order, label= 'cutoff')
else:
plt.fill(xm,ym,facecolor='r',edgecolor = 'none', alpha = .5,zorder=order, label= '_nolegend_')
if times[i] in sclt:
ind = np.where(sclt==times[i])[0][0]
x1 = self.channels[ind].x
y1 = self.channels[ind].y
W = self.channels[ind].W
xm, ym = get_channel_banks(x1,y1,W)
order += 1
plt.fill(xm,ym,facecolor=cmap(ind/len(sclt)),edgecolor = 'k', linewidth=0.1,zorder=order, label= '_nolegend_')
ax.legend(frameon = False, loc = 'lower left',bbox_to_anchor=(7000/(xmax+2000), 0, .1, .1), markerscale = .1)
ax.axis('off')
return fig
def plot_channels2(self,ncutoff, directory):
cot = np.array(self.cutoff_times)
sclt = np.array(self.cl_times)
# set up min and max x and y coordinates of the plot:
xmin = np.min(self.channels[0].x)
xmax = 1
ymax = 1
for i in range(len(self.channels)):
ymax = max(ymax, np.max(np.abs(self.channels[i].y)))
xmax = max(xmax, np.max(np.abs(self.channels[i].x)))
ymax = ymax+1000# add a bit of space on top and bottom
ymin = -1*ymax
# size figure so that its size matches the size of the model:
cmap = cm.get_cmap('gray_r',self.bump_scale)
order = 0
figs=[]
cmap = LinearSegmentedColormap.from_list('custom red', ['#ffffff','#de2d26'], N=256)
ind = np.where(sclt>=cot[ncutoff])[0][0]
times = np.arange(0, len(sclt))[(ind-1):1+(ind+(4*4))]
fig, axs = plt.subplots(6,1, figsize = (2.5, 5), constrained_layout = True)
cx1 = self.cutoffs[ncutoff].x[0]
cy1 = self.cutoffs[ncutoff].y[0]
dist = self.cutoffs[ncutoff].cut_len
loc = self.cutoffs[ncutoff].dist
bump = self.bump_scale
decay = self.decay_rate
steps = np.arange(1, 18, 4)
ne = np.zeros_like(self.channels[times[0]].x)
s = np.zeros_like(self.channels[times[0]].x)
for t in range(0,len(times)):
W = self.channels[-1].W
x1 = self.channels[times[t]].x
y1 = self.channels[times[t]].y
dx, dy, ds, s = compute_derivatives(x1,y1)
ne = update_nonlocal_effects(ne, s, decay, bump, [], [])
xm, ym = get_channel_banks(x1,y1,W)
order += 1
central_node = np.where(x1>=cx1[0])[0][0]
upstream_node = np.where(s>=s[central_node]-dist)[0][0]
downstream_node = np.where(s>=s[central_node]+dist)[0][0]
ne_x = x1[upstream_node-1:downstream_node+1]
ne_y = y1[upstream_node-1:downstream_node+1]
ne_mag = ne[upstream_node-1:downstream_node+1]
if t==0:
ne = update_nonlocal_effects(ne, s, decay, bump,np.array(loc), np.array(dist))
central_node = np.where(x1>=cx1[0])[0][0]
upstream_node = np.where(s>=s[central_node]-dist)[0][0]
downstream_node = np.where(s>=s[central_node]+dist)[0][0]
ne_x = x1[upstream_node-1:downstream_node+1]
ne_y = y1[upstream_node-1:downstream_node+1]
ne_mag = ne[upstream_node-1:downstream_node+1]
axs[0].set_xlim([cx1[0]-dist[0]/1.85,cx1[0]+dist[0]/1.85])
axs[0].fill(xm,ym,facecolor='k',edgecolor = 'k', alpha = 1, linewidth=0.1,zorder=order)
#axs[i].fill(xm[idx,y1,facecolor='r',edgecolor = 'none', alpha = .5,zorder=order, label= '_nolegend_')
#plt.plot(chx1[central], chy1[chx1<=x1[0]], color = 'k', linewidth = 1, zorder = order)
axs[0].annotate(text ='year '+str(int(t)) , xy = (.9,.9), xycoords='axes fraction', fontsize = 10)
axs[0].axis('off')
if t in steps:
nextind = np.where(steps==t)[0][0]+1
axs[nextind].fill(xm,ym,facecolor='k',edgecolor = 'k', alpha = 1, linewidth=0.1,zorder=order, label= 'year '+str(2+int((t-1)/4)))
#axs[i].fill(xm[idx,y1,facecolor='r',edgecolor = 'none', alpha = .5,zorder=order, label= '_nolegend_')
#plt.plot(chx1[central], chy1[chx1<=x1[0]], color = 'k', linewidth = 1, zorder = order)
axs[nextind].annotate(text ='year '+str(1+int((t-1)/4)) , xy = (.9,.9), xycoords='axes fraction', fontsize = 10)
axs[nextind].axis('off')
nemap = axs[nextind].scatter(ne_x[ne_mag>0], ne_y[ne_mag>0], s= 40, c = ne_mag[ne_mag>0], vmin = 0, vmax = bump, cmap = cmap)
axs[nextind].set_xlim([cx1[0]-dist[0]/1.85,cx1[0]+dist[0]/1.85])
#axs[t].set_ylim([ymin,ymax])
norm = mpl.colors.Normalize(vmin=0, vmax=bump)
fig.colorbar(mpl.cm.ScalarMappable(norm=norm, cmap=cmap),orientation='horixontal', ax = axs, label = 'nonlocal effect [*]', shrink = 0.6)
#axs[-1].set_xlim([cx1[0]-dist[0]/4,cx1[0]+dist[0]/4])
return fig
def cutoff_distributions(self, year, filepath, mode):
"""pull cutoff data from channel belt object and export csv, return dataframe for plotting
year - last centerline year
filepath - where cutoff info csv and distrubution plot are saved
mode - for plotting - "OnlyCurvature or "NonlocalEffects"
"""
#pull cutoff locations, downstrem distance, time from channel belt as a pandas dataframe
distances = [i.dist[0] for i in self.cutoffs]
times = [i.time for i in self.cutoffs]
radius = [i.radius for i in self.cutoffs]
cutlen = [i.cut_len[0] for i in self.cutoffs]
cllen = [i.cllen for i in self.cutoffs]
cuts = pd.DataFrame({'downstream_distance': distances, 'time': times, 'radius':radius, 'cutlen': cutlen, 'cllen': cllen })
#save distribution to csv
newcuts = cuts.to_csv(filepath+mode+str(len(cuts['time']))+"_cutoffs_distribution.csv", index_label = "Cutoff")
plot_cuts(cuts,self.channels[-1].W, filepath)
return cuts
def MR_time(self, filepath):
MR = [[bend for bend in i.MR] for i in self.channels[1:]]
clt = np.array(self.cl_times[1:])
MRdf= pd.DataFrame(MR).dropna(axis=1, how = 'all').dropna(axis=0, how = 'all')
MRdf.to_csv(filepath, index=False, header=False)
MRdf = pd.read_csv(filepath, sep = ',', header=None).to_numpy()
fig, axs = plt.subplots(1,1, figsize=(6,8))
plot_segmented_MR(MRdf,np.nanmean(MRdf), fig, axs, 'k', filepath[-16:-18])
def plot_cuts(cuts,W, filepath):
fig = plt.figure(figsize = (5,5))
plt.rcParams.update({'font.size': 10})
plt.scatter(cuts['downstream_distance']/W,cuts['time'], c = 'black', s = 1.5, edgecolor = 'black')
ncuts = len(cuts['time'])
plt.ylabel("time (years)")
plt.xlim(left=0)
plt.ylim(bottom=0)
plt.xlabel("distance downstream (ch-w)")
return fig
def resample_centerline(x,y,deltas):
dx, dy, ds, s = compute_derivatives(x,y) # compute derivatives
# resample centerline so that 'deltas' is roughly constant
# [parametric spline representation of curve]
tck, u = scipy.interpolate.splprep([x,y],s=0)
unew = np.linspace(0,1,1+int(s[-1]/deltas)) # vector for resampling
out = scipy.interpolate.splev(unew,tck) # resampling
x, y = out[0], out[1] # assign new coordinate values
dx, dy, ds, s = compute_derivatives(x,y) # recompute derivatives
return x,y,dx,dy,ds,s
def nominal_rate(kl, ne):
"""update nominal migration rate with nonlocal effects array"""
new_kl = kl*(1+ne)
return new_kl
def migrate_one_step(x,y,W,klarray,dt,k,Cf,D,pad,omega,gamma):
dx, dy, ds, s = compute_derivatives(x,y)
curv = W*compute_curvature(x,y)# dimensionless curvature
R0 = klarray*curv #nominal migration rate with local curvature
alpha = k*2*Cf/D # exponent for convolution function G
R1 = compute_migration_rate(pad,len(x),ds,alpha,omega,gamma,R0)
# calculate bend-by-bend migration rate
MR = segmented_MR(curv, R1, s)
# calculate new centerline coordinates:
dy_ds = dy/ds
dx_ds = dx/ds
# move x and y coordinates:
x = x + R1*dy_ds*dt
y = y - R1*dx_ds*dt
return x,y, MR
def generate_initial_channel(W,D,deltas,pad):
"""generate straight Channel object with some noise added that can serve
as input for initializing a ChannelBelt object
from MeanderPy
W - channel width
D - channel depth
deltas - distance between nodes on centerline
pad - padding (number of nodepoints along centerline)"""
cl_length = ((50*W)*10)# length of noisy part of initial centerline
pad1 = pad//10
#padding at upstream end can be shorter than padding on downstream end
if pad1<5:
pad1 = 5
x = np.linspace(0, cl_length+(2*pad1)*deltas, int(cl_length/deltas+(2*pad1))+1) # x coordinate
y = 10.0 * (2*np.random.random_sample(int(cl_length/deltas)+1,)-1)
y = np.hstack((np.zeros((pad1),),y,np.zeros((pad1),))) # y coordinate
MR = np.zeros_like(x)
return Channel(x,y,W,D, MR)
def load_initial_channel(filepath, W, D, deltas):
"""generate initial channel from centerline csv that can serve
as input for initializing a ChannelBelt object. must be fine enough resolution to not warrant smoothing
filepath - csv with x, y coordinates and no headers
W - channel width
D - channel depth
deltas - distance between nodes on centerline"""
df = pd.read_csv(filepath, sep = ',', header=None).values
x = df[:,0]
y = df[:,1]
MR = np.zeros(int(len(x)/30))
return Channel(x,y,W,D,MR)
def generate_channel_from_file(filelist, deltas, matlab_corr= -1):
"""function for creating a MeanderPy Channel object from an externally-sourced centerline in .csv file format.
inputs:
filelist - filelist must be a list of filepaths. thie first should be a csv containing x and y values for each point on a centerline. The second should be the widths of each point along the centerline
slope - average channel slope (default = .01)
D_in - channel depth (m, default = 10)
smooth_factor - fraction of centerline points to sample from spline (default = 1/4)
matlab_corr - 1 if y-axis need not be flipped, -1 for centerlines exported from matlab and need flipping
outputs:
ch - MeanderPy object of channel centerline
x - uninterpolated array of x coordinates
y - uninterpolated array of y coordinates
cl_len - length of centerline(m) """
#use pandas to load x,y coordinates and widths of centerline from csv
varlist = [pd.read_csv(file, sep = ',', header=None).values for file in filelist]
x = varlist[0][:,0]*30 ## x-dim array in Sm
y = varlist[0][:,1]*matlab_corr*30 ##southern hemisphere y-dim array in m
# shift coordinates so all are positive
if sum(n < 0 for n in y) > 0:
y = y-min(y)## y-dim earray in m
if sum(n < 0 for n in x) > 0:
x = x-min(x)
#average over widths to get a reach-constant width scalar
W = np.mean(varlist[1][:,0])*30
## water depth scalar#
D = np.exp(np.log(W/18.8)/1.41)
# Linear length along the line, add a zero for first point:
points = np.vstack([x, y]).T
distance = np.cumsum( np.sqrt(np.sum( np.diff(points, axis=0)**2, axis=1 )) )
distance = np.insert(distance, 0, 0)
# Build a list of the spline function, one for each dimension:
splines = [InterpolatedUnivariateSpline(distance, coords) for coords in points.T]
# Compute the spline for the smoothed(sampled) distances:
points_fitted = np.vstack([spl(np.linspace(0, distance[-1],int(distance[-1]/(W//2)))) for spl in splines])
## z-dim array, interpolated with constant slope along points of centerline. assumes centerline points are equidistantly placed along original centerline.
#deltas = round(distance[-1]/(len(points_fitted[0])-1))
MR = np.zeros_like(points_fitted[0])
return Channel(points_fitted[0],points_fitted[1],W,D, MR), D, W
@numba.jit(nopython=True)
def compute_migration_rate(pad,ns,ds,alpha,omega,gamma,R0):
"""compute migration rate as weighted sum of upstream curvatures
pad - padding (number of nodepoints along centerline)
ns - number of points in centerline
ds - distances between points in centerline
omega - constant in HK model
gamma - constant in HK model
R0 - nominal migration rate (dimensionless curvature * migration rate constant)"""
R1 = np.zeros(ns) # preallocate adjusted channel migration rate
pad_up = ns-pad
#########Periodic Boundary#########################
for i in range(2,pad):
si2 = np.hstack((np.array([0]),np.cumsum(np.hstack((ds[i-1::-1], ds[ns-1:pad_up:-1])))))
G = np.exp(-alpha*si2) # convolution vector for downstream boundary to wrap around
R1[i] = omega*R0[i] + gamma*np.sum(np.hstack((R0[i::-1], R0[ns-1:pad_up:-1]))*G)/np.sum(G) # main equation, weighted sum of curvatures upstream from downstream boundary - periodic boundary condition
#####################################################
for i in range(pad,ns):
si2 = np.hstack((np.array([0]),np.cumsum(ds[i-1::-1]))) # distance along centerline, backwards from current point
G = np.exp(-alpha*si2) # convolution vector
R1[i] = omega*R0[i] + gamma*np.sum(R0[i::-1]*G)/np.sum(G) # main equation
return R1
def compute_derivatives(x,y):
"""function for computing first derivatives of a curve (centerline)
x,y are cartesian coodinates of the curve
outputs:
dx - first derivative of x coordinate
dy - first derivative of y coordinate
ds - distances between consecutive points along the curve
s - cumulative distance along the curve"""
if len(x) < 2:
dx = [0]; dy = [0]; ds = [0]; s = [0]
else:
dx = np.gradient(x) # first derivatives
dy = np.gradient(y)
ds = np.sqrt(dx**2+dy**2)
s = np.hstack((0,np.cumsum(ds[1:])))
return dx, dy, ds, s
def compute_curvature(x,y):
"""function for computing first derivatives and curvature of a curve (centerline)
x,y are cartesian coodinates of the curve
outputs:
dx - first derivative of x coordinate
dy - first derivative of y coordinate
ds - distances between consecutive points along the curve
s - cumulative distance along the curve
curvature - curvature of the curve (in 1/units of x and y)"""
dx = np.gradient(x) # first derivatives
dy = np.gradient(y)
ddx = np.gradient(dx) # second derivatives
ddy = np.gradient(dy)
curvature = (dx*ddy-dy*ddx)/((dx**2+dy**2)**1.5)
return curvature
def kth_diag_indices(a,k):
"""function for finding diagonal indices with k offset
[from https://stackoverflow.com/questions/10925671/numpy-k-th-diagonal-indices]"""
rows, cols = np.diag_indices_from(a)
if k<0:
return rows[:k], cols[-k:]
elif k>0:
return rows[k:], cols[:-k]
else:
return rows, cols
def find_cutoffs(x,y,crdist,deltas):
"""function for identifying locations of cutoffs along a centerline
and the indices of the segments that will become part of the oxbows
from MeanderPy
x,y - coordinates of centerline
crdist - critical cutoff distance
deltas - distance between neighboring points along the centerline"""
diag_blank_width = int((crdist+20*deltas)/deltas)
# distance matrix for centerline points:
dist = distance.cdist(np.array([x,y]).T,np.array([x,y]).T)
dist[dist>crdist] = np.NaN # set all values that are larger than the cutoff threshold to NaN
# set matrix to NaN along the diagonal zone:
for k in range(-diag_blank_width,diag_blank_width+1):
rows, cols = kth_diag_indices(dist,k)
dist[rows,cols] = np.NaN
i1, i2 = np.where(~np.isnan(dist))
ind1 = i1[np.where(i1<i2)[0]] # get rid of unnecessary indices
ind2 = i2[np.where(i1<i2)[0]] # get rid of unnecessary indices
return ind1, ind2 # return indices of cutoff points and cutoff coordinates
def cut_off_cutoffs(x,y,s,crdist,deltas):
"""function for executing cutoffs - removing oxbows from centerline and storing cutoff coordinates
from meanderpy
x,y - coordinates of centerline
crdist - critical cutoff distance
deltas - distance between neighboring points along the centerline
outputs:
x,y - updated coordinates of centerline
xc, yc - lists with coordinates of cutoff segments
cl_dist - distance cutoff occurs down centerline"""
xc = []
yc = []
cl_dist = []
cut_len = []
max_curv = []
ind1, ind2 = find_cutoffs(x,y,crdist,deltas) # initial check for cutoffs
ind1_save = []
ind2_save = []
if len(ind1)>0:
ind1_save = ind1[0]
ind2_save = ind2[0]
while len(ind1)>0:
xc.append(x[ind1[0]:ind2[0]+1]) # x coordinates of cutoff
yc.append(y[ind1[0]:ind2[0]+1]) # y coordinates of cutoff
dx, dy, ds, s_little = compute_derivatives(x[:ind1[0]+1],y[:ind1[0]+1])#compute derivatives upstream of cutoff
cl_dist.append(s_little[-1]) #cutoff distance downstream
dx, dy, ds, s_between = compute_derivatives(xc[-1],yc[-1])#compute derivatives along cutoff bend
cut_len.append(s_between[-1]) #length removed by cutoff
x = np.hstack((x[:ind1[0]+1],x[ind2[0]:])) # x coordinates after cutoff
y = np.hstack((y[:ind1[0]+1],y[ind2[0]:])) # y coordinates after cutoff
ind1, ind2 = find_cutoffs(x,y,crdist,deltas)
return x,y,xc,yc, cl_dist, cut_len, ind1_save, ind2_save
def get_channel_banks(x,y,W):
"""function for finding coordinates of channel banks, given a centerline and a channel width
from meanderpy
x,y - coordinates of centerline
W - channel width
outputs:
xm, ym - coordinates of channel banks (both left and right banks)"""
x1 = x.copy()
y1 = y.copy()
x2 = x.copy()
y2 = y.copy()
ns = len(x)
dx = np.diff(x); dy = np.diff(y)
ds = np.sqrt(dx**2+dy**2)
x1[:-1] = x[:-1] + 0.5*W*np.diff(y)/ds
y1[:-1] = y[:-1] - 0.5*W*np.diff(x)/ds
x2[:-1] = x[:-1] - 0.5*W*np.diff(y)/ds
y2[:-1] = y[:-1] + 0.5*W*np.diff(x)/ds
x1[ns-1] = x[ns-1] + 0.5*W*(y[ns-1]-y[ns-2])/ds[ns-2]
y1[ns-1] = y[ns-1] - 0.5*W*(x[ns-1]-x[ns-2])/ds[ns-2]
x2[ns-1] = x[ns-1] - 0.5*W*(y[ns-1]-y[ns-2])/ds[ns-2]
y2[ns-1] = y[ns-1] + 0.5*W*(x[ns-1]-x[ns-2])/ds[ns-2]
xm = np.hstack((x1,x2[::-1]))
ym = np.hstack((y1,y2[::-1]))
return xm, ym
def update_nonlocal_effects(ne, s, decay, scale, cut_dist, cut_len, thresh = .05):
#reshape array to fit new centerline
ne = np.interp(np.arange(len(s)),np.arange(len(ne)), ne)
###decay old NE
ne_new = ne*np.exp(-decay)
### remove ne that are less than some threshold, default = .05 (1/20 of background rate)
ne_new[np.where(ne_new<(thresh))[0]] = 0
for k in range(len(cut_dist)): #for each cutoff, add new NE
#gaussian bump
mu = cut_dist[k]
print(cut_len[k])
sigma = (cut_len[k]*1.19)/2 # want 88% of the bump within 1.19*cut_len, rest is negligible
y_bump = norm.pdf(s, mu, sigma)
ne_new = ne_new + ((scale)*y_bump/np.max(y_bump))
return ne_new
def get_sinuosity(x,y,s):
dist = np.hypot(abs(x[-1]-x[0]), abs(y[-1]-y[0]))
Sin = s[-1]/dist
return Sin
def plot_sinuosity(time, sin):
fig, ax = plt.subplots(1,1)
ax.plot(range(0,len(sin)), sin, 'k', label = "mean S = "+ str(np.mean(sin)))
plt.legend()
ax.set_title("Sinuosity")
return fig
def get_radii(c, ind1, ind2, W):
#unsigned curvature
radii = abs(c)
# Width divided by maximum dimensionless curvature of cutoff bend
max_rad = W/(np.max(radii[ind1:ind2])*W)
return max_rad
def plot_distribution(cuts,W, filepath):
x = cuts['downstream_distance']/W
y = cuts['time']- min(cuts['time'])
# definitions for the axes
left, width = 0.1, 0.65
bottom, height = 0.1, 0.65
spacing = 0.005
rect_scatter = [left, bottom, width, height]
rect_histx = [left, bottom + height + spacing, width, 0.2]
rect_histy = [left + width + spacing, bottom, 0.2, height]
# start with a square Figure
fig, ax = plt.subplots(1,1,figsize=(3, 3))
plt.rcParams.update({'font.size': 10})
#ax = fig.add_axes([0,0,1,1])
#ax_histx = fig.add_axes(rect_histx, sharex=ax)
#ax_histy = fig.add_axes(rect_histy, sharey=ax)
ax.scatter(x, y, c = 'black', s = 10, alpha = .8)
# use the previously defined function
# scatter_hist(x, y, ax, ax_histx, ax_histy)
ax.set_ylabel("time (years)")
#plt.xlim(left=0)
#plt.ylim(bottom=0)
ax.set_xlabel("distance downstream (ch-w)")
ax.set_xlim([0,1500])
ax.set_ylim([0,250])
return fig
def scatter_hist(x, y, ax, ax_histx, ax_histy):
# no labels
ax_histx.tick_params(axis="x", labelbottom=False)
ax_histy.tick_params(axis="y", labelleft=False)
# the scatter plot:
ax.scatter(x, y, c = 'black', s = 2)
# now determine nice limits by hand:
xbinwidth = 25
ybinwidth = 10
xmax = np.max(np.abs(x))
ymax = np.max(np.abs(y))
xlim = (int(xmax/xbinwidth) + 1) * xbinwidth
ylim = (int(ymax/ybinwidth) + 1) * ybinwidth
xbins = np.arange(0, xlim + xbinwidth, xbinwidth)
ybins = np.arange(0, ylim + ybinwidth, ybinwidth)
ax_histx.hist(x, bins=xbins)
ax_histy.hist(y, bins=ybins, orientation='horizontal')
def segmented_MR(curv, R1, s, n=90):
"""
approximate a bend-by-bend nth percentile lateral migration rate.
Inputs:
curve: array of curvature for every node along the centerline.
R1: array of already computed migration distances for every node along the centerline.
ds: array of cumulative distance downstream between nodes.
n: percentile
Output:
MR: array of nth percentile migration rate for each segment.
upstream: distance downstream of each segment start
downstream: distance downstream of each segment end
"""
R1 = np.array(R1)*365*24*60*60.0 #m/yr instead of m/sec
infs1 = np.where(np.logical_and(curv[1:] > 0, curv[:-1] < 0))[0] + 1
infs2 = np.where(np.logical_and(curv[1:] < 0, curv[:-1] > 0))[0] + 1
idx = np.sort(np.concatenate((infs1, infs2)))
#nodes = np.array([(curv[i-2]*curv[i])< 0 and (curv[i+2]*curv[i])> 0 for i in range(2,len(curv)-2)])
#idx = np.where(nodes==1)[0]
idx = [idx[i] for i in range(len(idx)-1) if idx[i+1]-idx[i] >5]
MR = [np.percentile(np.abs(R1[idx[i]:idx[i+1]]), 100) for i in range(len(idx)-1)]
return MR
def moving_average(matrix, window):
"""
averages migration rates on each bend over a set time window upwind.
inputs
matrix: n years by m bend migration rate numpy array
window: how many indices to average over, as years in the past
output
mid: moving average migration rate over window years for each bend
"""
years = len(matrix)
bends = np.min([np.count_nonzero(~np.isnan(matrix[i])) for i in range(years)])
mid = np.zeros(shape = (years, bends))
mid[0,:bends] = matrix[0][:bends]
for year in range(1,years):
if year < window:
mid[year, :] = np.nanmean(matrix[:year][:bends])
else:
mid[year, :] = np.nanmean(matrix[year-window:(year+1)][:bends])
return mid
def plot_segmented_MR(MR,obmr, fig, axs, c, name, dt = 1):
mean_all = round(np.nanmean(MR[:, 2:-2]),3)
max_all = round(np.nanmax(MR[:, 2:-2]), 3)
#heatmap = ax1.imshow(MR, cmap = 'gist_heat')
#heatmap = axs[1].imshow(MR/100, cmap = 'cividis', vmin = 0, vmax =np.nanmax(MR/100), aspect = 'auto', origin = 'lower')
#axs[1].set_xlabel('distance downstream (bend #)')
#cb = fig.colorbar(heatmap, ax=axs[1])
#cb.set_label("maximum migration rate (ch-w/yr)")
#mid = moving_average(MR, 2)/100
#min
#axs[0].plot(MR/100, range(len(MR)), alpha=.2, c = 'k')
#mean
# MR_ddt = np.nanpercentile((MR[1:, :] - MR[:-1, :])/MR[1:, :], 50, axis = 1)
#f_spec, Pxx_mid = periodogram(np.nanpercentile(MR[:, 10:], 50, axis = 1),dt, scaling = 'spectrum')
#f_spec, Pxx_mid = welch(np.nanpercentile(MR[:, :], 50, axis = 1), dt, nperseg=500, scaling = 'spectrum')
#f, t, Zxx = stft(np.nanpercentile(MR[:, :], 50, axis = 1), nperseg=100)
#line =axs.pcolormesh(t, f, np.abs(Zxx), vmin=0,vmax=int(np.nanpercentile(Zxx, 90)), shading='gouraud')
#filter
#Pxx_mid[1:-1]= np.convolve(Pxx_mid, np.ones(3)/3, mode='valid')
#cumu = np.cumsum(Pxx_mid)
#midfreq = f_spec[cumu>=(cumu[-1]/2)][0]
#freqscale = (f_spec[1:])/(500/len(MR[:, 0]))
#Pxx_mid = savgol_filter(Pxx_mid,5,3)
# Pxx_den = medfilt(Pxx_den)
#upper = np.percentile(Pxx_mid, 99)
#upper_noise = np.percentile(Pxx_mid[int(len(Pxx_mid)/2):], 95)
#NP = np.mean(Pxx_mid[-int((len(Pxx_mid)/4)):])
#peaks, _ = find_peaks(Pxx_mid)
#[proms, leftbases, rightbases] = peak_prominences(Pxx_mid, peaks)
#mids = ((rightbases-leftbases)/2) +leftbases
#newmids = mids[proms>=np.percentile(proms, 90)]
#print(newmids)
#input()
#proms = proms[proms>=np.percentile(proms, 90)]
#idx = proms.argsort()[:]
#proms = proms[idx]
#newmids = newmids[idx]
#lower_noise = np.percentile(Pxx_mid[-int(len(Pxx_mid)/5):], 1)
#amp = Pxx_mid.max()
#stn = amp/NP
#maxfreq = f_spec[Pxx_mid == Pxx_mid.max()]
line = axs.hist(MR.flatten(), range=(0,500), bins= range(0, 50),color= c, linewidth = 1.5, histtype='step')
#line = axs.semilogy(f[1:], NP/(np.power(f, 2))[1:],alpha=.8, linewidth = 1, ls = "--", c = 'brown', label = '_nolegend_')
#line = axs.semilogy(f_spec[1:], 1/(np.power(f_spec, .8))[1:],alpha=.8, linewidth = 1, ls = "--", c = 'pink', label = '_nolegend_')
#line = axs.semilogy(f[1:], NP+(f[1:]*0),alpha=.8, linewidth = 1, ls = "--", c = 'purple', label = '_nolegend_')
#line = axs.semilogy(f_spec[1:]/(500/len(MR[:, 0])), 1/(np.ones(len(f_spec)-1)*(upper_noise/NP)),alpha=.5, linewidth = .5, ls = "--", c = 'grey', label = '_nolegend_')
#line = axs.semilogy(f_spec[1:]/(500/len(MR[:, 0])), 1/(np.ones(len(f_spec)-1)*(lower_noise/NP)),alpha=.5, linewidth = .5, ls = "--", c = 'grey', label = '_nolegend_')
#line = axs.loglog((f_spec[1:]), 1/(f_spec[1:]**2),alpha=.8, linewidth = 1, ls = "--", c = 'brown', label = '_nolegend_')
#line = axs.loglog((f_spec[1:]), 1/(f_spec[1:]**1),alpha=.8, linewidth = 1, ls = "--", c = 'pink', label = '_nolegend_')
#line = axs.loglog((f_spec[1:]), (np.ones(len(f_spec)-1)),alpha=.8, linewidth = 1, ls = "--", c = 'grey', label = '_nolegend_')
#line = axs.semilogy((f_spec[1:]), Pxx_mid[1:]/NP,alpha=.8, linewidth = 1, c = c)#, label = name + " SNR = "+ str(round(stn, 3)))
#axs.scatter(newmids, np.ones(len(newmids)),marker = 'o', color = c,alpha = .8, label = '_nolegend_')
#axs.set_xlim([0, 100])
#xtickz = axs.get_xticks()
#axs.set_xticklabels(1/xtickz)
#axs.plot(, range(len(MR[:,0])), alpha=1, c = 'r', label = 'max = '+str(max_all))
axs.legend(loc = 'upper right', fontsize=8, frameon = False)
print("mean obmr = "+str(round(mean_all,5)))
return fig, axs, line
```
#### File: Cutoffs/SpaceTime/SpaceTime.py
```python
import numpy as np
import math
import matplotlib.pyplot as plt
import itertools as it
"""
class of functions to statistically compare cutoff 2-d spatiotemporal point processes to simulated complete spatial randomness. This is an implementation of Ripley's K function for 2-D space time.
"""
class RipleysKEstimator_spacetime:
def __init__(self,t_max, d_max, t_min, d_min, width, dt):
"""initialize estimator
t_max - int, last year of model iteration
d_max - int, longest centerline length
t_min - int, 0
d_min - in, 0
width - constant channel width in m"""
self.t_max = t_max # last year
self.d_max = d_max # max centerline length
self.t_min = t_min # 0
self.d_min = d_min # 0
self.width = width
self.dt = dt
def __call__(self, cutoffs, mode, max_search_d, max_search_t, plotornot):
"""
perform main function
"""
return self.mc_env(cutoffs = cutoffs, nit=99, mode = mode, max_search_d=max_search_d, max_search_t=max_search_t, plotornot=plotornot)
def _pairwise_diffs(self, data):
"""
compute array of distances between every point in 1D
data - 1-D array of deltas, space or time
"""
npts = len(data)
diff = np.zeros(shape=(npts * (npts - 1) // 2), dtype=np.double)
datai = np.zeros(shape=(npts * (npts - 1) // 2), dtype=np.double)
k = 0
for i in range(npts - 1):
for j in range(i+1, npts):
diff[k] = abs(data[i] - data[j])
datai[k] = data[i]
k += 1
return datai, diff
def _near_neigh(self,data):
"""
compute array of distance between every point and its nearest neighbor in 1D
data - 1-D array of deltas, space or time
"""
npts = len(data)
diff = np.zeros(shape=npts, dtype=np.double)
for i in range(npts):
others= np.hstack((data[i::-1], data[i+1:]))
mask = np.ones(len(data), dtype=bool)
mask[i] = False
others = others[mask]
diff[i] = np.min(abs(data[i] - others))
return diff
def _weights(self, xi, yi, diff_d, diff_t):
"""
compute weights for edge effect correction. one over intersecting area
"""
npts = 500
weights = np.ones(shape=(npts * (npts - 1) // 2), dtype=np.double)
#top
weights[(((self.t_max- yi) - diff_t )<= 0)] =2
#bottom
weights[((yi-diff_t) <= 0)] =2
weights[(((self.d_max- xi) - diff_d )<= 0)] =2
weights[((xi-diff_d) <= 0)] =2
return weights
def _evaluate(self, data, dist_space, dist_time, mode):
"""
INPUTS
data - 2-D array of N by 2 size where N is number of cutoffs in dataset, column [:,0] records distance downstream and [:,1] is time.
dist_space - 1-D array of radii to search in for intensity estimates
dist_time - 1-D array of durations to search in for intensity estimates
mode - statistical measurement to be made, str, either 'K_st, 'K', 'G', or 'H'
OUTPUTS
stat_d - 1-D Ripleys K at distances dist_space
stat_t - 1-D temporal Ripleys K at durations dist_time
stat_dt - 2-D spatiotemporal Ripleys K of M by N{n} size, where M{t} is dist_space, T is dist_time, and each array member is the intensity of the sampled point process at n search distance and t search time.
"""
data = np.asarray(data)
npts = len(data)
stat_d = np.zeros(len(dist_space)) #1-D spatial statistic
stat_t = np.zeros(len(dist_time))#1-D temporal statistic
stat_dt = np.zeros((len(dist_space), len(dist_time))) #2D space-time statistic
null = stat_dt.copy()
if mode == "H":
"""
H , the probability of finding neighbors in search dist
"""
deltaspace = self._pairwise_diffs(data[:,0])
deltatime = self._pairwise_diffs(data[:,1])
for i in range(len(dist_space)):
d_indicator = (deltaspace <=dist_space[i])
stat_d[i] = (d_indicator).sum()
for i in range(len(dist_time)):
t_indicator = (deltatime<=dist_time[i])
stat_t[i] = (t_indicator).sum()
stat_t = 2*stat_t/(npts*(npts-1))
stat_d = 2*stat_d/(npts*(npts-1))
return (stat_d, stat_t)
if mode == "G":
"""
G, probability the nearest neighbor is within search dist
"""
deltaspace = self._near_neigh(data[:,0])
deltatime = self._near_neigh(data[:,1])
for i in range(len(dist_space)):
d_indicator = (deltaspace <=dist_space[i])
stat_d[i] = (d_indicator).sum()
for i in range(len(dist_time)):
t_indicator = (deltatime<=dist_time[i])
stat_t[i] = (t_indicator).sum()
stat_t = stat_t/(npts)
stat_d = stat_d/(npts)
return (stat_d, stat_t)
if mode == "K":
"""
number of additional events near other events on time scales of dist_time and spatial scales of dist_space, 2 1-d plots
"""
xi, deltaspace = self._pairwise_diffs(data[:,0])
yi, deltatime = self._pairwise_diffs(data[:,1])
for i in range(len(dist_space)):
d_indicator = (deltaspace <=dist_space[i])
stat_d[i] = (d_indicator*xi).sum()
for i in range(len(dist_time)):
t_indicator = (deltatime<=dist_time[i])
stat_t[i] = (t_indicator*yi).sum()
stat_t = 2*(self.t_max*stat_t/(npts*(npts-1)))
stat_d = 2*(self.d_max*stat_d/((npts-1)*npts))
return (stat_d, stat_t)
if mode == "K_st":
"""
number of additional events near other events given sepcific search distances and durations. 2D heatmap
"""
xi, deltaspace = self._pairwise_diffs(data[:,0])
yi, deltatime = self._pairwise_diffs(data[:,1])
weights = self._weights(xi, yi, deltaspace, deltatime)
for x in range(len(dist_space)):
for t in range(len(dist_time)):
dt_indicator = (deltatime<=dist_time[t])&(deltaspace <=dist_space[x])
stat_dt[x,t] = (dt_indicator*weights).sum()
stat_dt = (self.d_max*self.t_max*stat_dt)/(npts*(npts-1))
return(stat_dt)
def mc_env(self,cutoffs, nit, mode, max_search_d, max_search_t, plotornot):
"""
generate random distibutions in same space + time ranges as data
"""
rng = np.random.default_rng(seed = 80)
data = cutoffs[['downstream_distance', 'time']].to_numpy()
num_samples = len(cutoffs.time)
r_time = np.linspace(self.dt,self.dt*max_search_t, max_search_t)
r_space = np.linspace(self.width,self.width*max_search_d, max_search_d)
mc_d = np.zeros((len(r_space), nit))
mc_t = np.zeros((len(r_time), nit))
mc_dt = np.zeros((len(r_space), len(r_time), nit))
z = np.zeros((num_samples, 2))
for i in range(nit):
z[:,0] = rng.random(size = num_samples)*self.d_max
z[:,1] = rng.random(size = num_samples)*self.t_max
k_dt = self._evaluate(data=z, dist_time=r_time, dist_space=r_space, mode='K_st')
mc_dt[:, :, i]= k_dt
k_d, k_t = self._evaluate(data=z, dist_time=r_time, dist_space=r_space, mode='K')
mc_d[:,i] = k_d
mc_t[:,i] = k_t
if mode == 'K_st':
## monte carlo envelope - limits on probable randomness
upper_dt = np.percentile(mc_dt, 97.5, axis = 2)
lower_dt = np.percentile(mc_dt, 2.5, axis = 2)
middle_dt = np.ma.mean(mc_dt, axis = 2)
upper_d = np.ma.max(mc_d, axis = 1)
lower_d = np.ma.min(mc_d, axis = 1)
upper_t = np.ma.max(mc_t, axis = 1)
lower_t = np.ma.min(mc_t, axis = 1)
#K values
stat_d, stat_t = self._evaluate(data=data, dist_time=r_time, dist_space=r_space, mode='K')
#space-time K
stat_dt = self._evaluate(data=data, dist_time=r_time, dist_space=r_space, mode=mode)
#dependent_clustered = (stat_dt>np.multiply(upper_d.reshape(len(upper_d),1),upper_t))
#dependent_regular = (stat_dt<np.multiply(lower_d.reshape(len(lower_d),1),lower_t))
#locations of statictically nonrandom, dependent K values
#significantly more aggregated than upper mc env, and
clustered = (stat_dt>upper_dt)
regular = (stat_dt<lower_dt)
sig_mask = (clustered+regular)
#stat_d_times_stat_t = np.multiply(stat_d.reshape(len(stat_d),1),stat_t.reshape(1,len(stat_t)))
normalized = stat_dt-middle_dt
if plotornot == 1:
self.plot_st(r_space, r_time, normalized, sig_mask, np.sum(normalized), lower_dt-middle_dt, upper_dt-middle_dt)#
return [normalized,middle_dt, upper_dt-middle_dt, lower_dt-middle_dt]
else:
#monte carlo envelope
upper_d = np.ma.max(mc_d, axis = 1)/(r_space*2)-1
upper_t = np.ma.max(mc_t, axis = 1)/(r_time*2)-1
lower_d = np.ma.min(mc_d, axis = 1)/(r_space*2)-1
lower_t = np.ma.min(mc_t, axis = 1)/(r_time*2)-1
#Simulated Poisson distrubution
middle_d = np.ma.mean(mc_d, axis = 1)/(r_space*2)-1
middle_t = np.ma.mean(mc_t, axis = 1)/(r_time*2)-1
#K values
stat_d, stat_t = self._evaluate(data=data, dist_time=r_time, dist_space=r_space, mode=mode)
#normalize to what's expected under poisson
stat_d = (stat_d)/(r_space*2) -1
stat_t = (stat_t)/(r_time*2) -1
self.plot(upper_d,upper_t, lower_d, lower_t, middle_d, middle_t, r_space, r_time, stat_d, stat_t, num_samples)
def plot_st(self, r_space, r_time, normalized, sig_mask, D, lowerlim, upperlim):
plt.rcParams.update({'font.size': 10})
fig,ax = plt.subplots(figsize = (8,4))
cmap = plt.get_cmap('PiYG')
vscale = np.max(abs(normalized))
#im = ax.imshow(np.ma.masked_values(normalized, 0),origin='lower' cmap = cmap)
im =ax.pcolormesh(np.swapaxes(normalized,0,1), cmap = cmap,vmin = -np.max(np.abs(lowerlim, upperlim)), vmax = np.max(np.abs(lowerlim, upperlim)))
#plt.pcolor(np.ma.masked_values(np.swapaxes(normalized*sig_mask,0,1),0), edgecolors='k', linewidths=4, alpha=0.)
im2 =ax.pcolormesh(np.ma.masked_values(np.swapaxes(normalized*sig_mask,0,1)/np.swapaxes(normalized*sig_mask,0,1),0), zorder=2, linewidths = .01,facecolor='none', edgecolors='k',cmap='gray')
plt.title('D ='+str(D), pad = 10)
cbar = ax.figure.colorbar(im, ax=ax)#, ticks = [-2,-1,0,1,2])
cbar.ax.set_ylabel("D(d,t) = K_hat(d,t)-K(d,t)", va="bottom", rotation=-90)
#cbar.ax.set_yticklabels(['<-2', '-1', '0','1','>2'])
ax.set_ylim(bottom=0, top=max(r_time/self.dt))
ax.set_xlim(left=0, right=max(r_space/self.width))
ax.set_ylabel('time window (years)')
ax.set_xlabel('search distance (ch-w)')
ax.set_xticks(r_space/(self.width), minor=True)
ax.set_yticks(r_time/self.dt, minor=True)
ax.set_xticks(r_space/self.width-.5)
ax.set_yticks(r_time/self.dt-.5)
ax.set_yticklabels((r_time).astype(int))
ax.set_xticklabels((r_space/100).astype(int))#, rotation='vertical')
ax.tick_params(axis = 'both', which = 'major', top =False, bottom = False, left = False, right = False)
#ax.grid(True, which='minor', color='k', linewidth=.1)
return fig,
def plot(self,upper_d,upper_t, lower_d, lower_t, middle_d, middle_t, r_space, r_time, stat_d, stat_t, num_samples):
#1-d spatial Ripley's K
fig = plt.figure()
#plot CSR envelope
plt.plot(r_space/self.width, upper_d, color='red', ls=':', label='_nolegend_', linewidth = .5)
plt.plot(r_space/self.width, lower_d, color='red', ls=':', label='_nolegend_', linewidth = .5)
plt.plot(r_space/self.width, middle_d, color='red', ls=':', label='CSR', linewidth = .5)
plt.plot(r_space/self.width, stat_d, color = "black", linewidth = .5,label = str(num_samples)+ ' cutoffs')
plt.legend(loc = 'lower right')
plt.xlabel("d along centerline (ch-w)")
plt.ylabel('K/2-d')
plt.title("Homegrown 1D space EDF")
#plt.savefig(resultdir + str(year)+"yrs_Space_Ripley_"+mode+".jpg", dpi = 500)
plt.show()
#1-D Temporal Ripley's K
fig2 = plt.figure()
#plot CSR envelope
plt.plot(r_time, upper_t, color='red', ls=':',linewidth = .5, label='_nolegend_')
plt.plot(r_time, lower_t, color='red', ls=':',linewidth = .5, label='_nolegend_')
plt.plot(r_time, middle_t, color='red', ls=':',linewidth = .5, label='CSR')
plt.plot(r_time, stat_t, color = "black", linewidth = .5, label =str(num_samples)+ ' cutoffs')
plt.legend(loc = 'lower right')
plt.xlabel("t in years")
plt.ylabel('K/2 -t')
plt.title("Homegrown 1D time EDF")
plt.show()
``` |
{
"source": "josieheartthrob/guescii",
"score": 3
} |
#### File: guescii/source/game.py
```python
import string, subprocess, random
from source.gamedata import Data, EXACT, SIMILAR
from shellpages import Page, ParseError
# Global aliases
EXACT_CHAR, SIMILAR_CHAR = EXACT, SIMILAR
class Game(object):
"""The main class that runs an actual game."""
def __init__(self, settings, options, order):
"""Assumes settings is a settings dictionary;
Options is a dictionary of options;
Order is sequence of characters that represents the option order."""
options = options.copy()
options['/g'] = self._guess
self.__place = 0
self._settings = settings.copy()
self._answer = self._build_answer()
self._data = Data(settings)
self._page = Page(self._data.header, self._data.__str__(),
options, order, self._parse)
#-----Public properties-----
@property
def page(self):
return self._page
#-----Private properties-----
@property
def _place(self):
return self.__place
@_place.setter
def _place(self, other):
if other-self.__place != 1:
raise ValueError('place can only increment by 1')
elif self.__place+1 == self._settings['attempts']:
self._data.answer = self._answer
self.__place = other
#-----Private methods-----
def _build_answer(self):
"""Create a randomized answer combination."""
# Helper Variables
types = string.lowercase[:self._settings['types']]
# Main algorithm
answer = ''
for i in xrange(self._settings['length']):
answer += random.choice(types)
return answer
def _build_hint(self, guess):
"""Return a string that gives the user info about their guess.
Arguments:
guess ----- a string entered by the user
"""
# Here's a funny thing: In Tatham's source code they reference a
# wolfram alpha page that has the formula for this. But I actually
# figured this out before I even knew Tatham's code was open-source -
# because I'm a fucking math-genius.
# Helper variables
guess_map = {c: guess.count(c) for c in set(guess)}
answer_map = {c: self._answer.count(c) for c in set(self._answer)}
# Main algorithm
exact = sum([1 for i, c in enumerate(guess) if c == self._answer[i]])
similar = (sum([min(guess_map[c], answer_map[c]) for
c in answer_map if c in guess_map]) - exact)
return EXACT_CHAR*exact + SIMILAR_CHAR*similar
def _parse(self, data):
"""Parse data to call an option or evaluate a guess.
Arguments:
data ----- a string entered by the user
"""
# Helper Variables
types = string.lowercase[:self._settings['types']]
guess = data.replace(' ', '')
# Main algorithm
if data in self._page.options.keys():
return data, (), {}
elif self._data.answer.replace(' ', '') == self._answer:
raise ParseError('Game over. Please choose an option.')
elif len(guess) != self._settings['length']:
raise ParseError('Guess must be exactly ' +
'{} letters long'.format(self._settings['length']))
elif not set(guess).issubset(set(types)):
raise ParseError('Guess must be composed of ' +
'[{}]'.format(types.replace('', ' ')[1:-1]))
else:
return '/g', [guess], {}
def _guess(self, combo):
"""Display accuracy of the given combination to the user.
Arguments:
combo ----- A combination string
Side Effects:
Modifies the private data property's answer, guesses,
and hints properties.
Modifies the private page property's body property.
"""
self._data.add_guess(combo, self._place)
hint = self._build_hint(combo)
self._data.add_hint(hint, self._place)
if combo == self._answer:
self._data.answer = self._answer
self._place += 1
self._page.body = self._data.__str__()
#------------------Testing--------------------
def test():
from shellpages import Option
from sys import exit
def close():
raw_input('> ')
subprocess.call('cls', shell=True)
exit()
settings = {'types': 6, 'length': 4, 'attempts': 5}
options = {'q': Option('q', 'quit', close)}
game = Game(settings, options, ['q'])
while True:
game._page()
if __name__ == '__main__':
test()
``` |
{
"source": "josieheartthrob/testtools",
"score": 4
} |
#### File: josieheartthrob/testtools/testtools.py
```python
function_message = 'expected: %s\nactual: %s'
method_message = '"%s" attribute\n' + function_message
def run_function_tests(function, cases, expected_values):
"""Run tests on a function
Assumes function returns a value
Assumes cases is an ordered collection of dictionaries as defined by
arguments in the module documentation
Assumes expected_values is an ordered collection of obects as the expected
values of the function called with the arguments from cases at the same
index
Returns a tuple of a bool (result) and a string (message) as defined by
results in the module documentation
"""
message = function_message
for i, testcase in enumerate(cases):
expected = expected_values[i]
actual = function(*testcase.get('args', ()), **testcase.get('kwargs', {}))
yield actual == expected, message % (expected, actual)
def run_void_method_tests(instances, method_name, cases, targets):
"""Run tests on a void mutator method
Assumes instances is a list of object instances of a class. Each instance
has the method 'method_name'
Assumes method_name is a string as the name of the method being tested.
Assumes cases is an ordered collection of dictionaries as defined by
arguments in the module documentation. must be same length as isntances.
Assumes targets is a list of dictionaries that map strings (attribute)
to objects to objects (expected) where each attribute is the name of an
attribute mutated by this method and each expected is the expected value.
must be same length as instances.
Returns a tuple of a bool (result) and a string (message) as defined by
results in the module documentation. Each individual attribute test has no
local return order, but each attribute group returns in aggregate in the
same order as targets.
"""
message = method_message
for i, testcase in enumerate(cases):
instance = instances[i]
method = getattr(instance, method_name)
method(*testcase.get('args', ()), **testcase.get('kwargs', {}))
for attribute, expected in targets[i].items():
actual = getattr(instance, attribute)
yield expected == actual, message % (attribute, expected, actual)
def get_instances(ClassName, arguments, attributes):
"""Get instances of a class with specific attributes
Assumes ClassName is a class type
Assumes arguments is an ordered collection of dictionaries as defined
in the module documentation
Assumes attributes is a list of dictionaries that maps strings (name)
to objects (value). name is the name of an attribute in ClassName, and
value is what the value of the attribute should be. must be same length
as arguments.
Returns a list of instances of ClassName such that each relevant attribute
is as specified in attributes
"""
instances = []
for instance in arguments:
args = instance.get('args', ())
kwargs = instance.get('kwargs', {})
instances.append(ClassName(*args, **kwargs))
for i, instance in enumerate(instances):
for name, value in attributes[i].items():
setattr(instance, name, value)
return instances
``` |
{
"source": "JosieHong/Handwritten-digit-recognition-based-on-CNN",
"score": 3
} |
#### File: JosieHong/Handwritten-digit-recognition-based-on-CNN/CIFARHelper.py
```python
import numpy as np
def one_hot_encode(vec, vals=10):
n = len(vec)
out = np.zeros((n, vals))
out[range(n), vec] = 1
return out
class CifarHelper(object):
def __init__(self, all_data):
self.i = 0
batch_meta = all_data[0]
data_batch1 = all_data[1]
data_batch2 = all_data[2]
data_batch3 = all_data[3]
data_batch4 = all_data[4]
data_batch5 = all_data[5]
test_batch = all_data[6]
# Grabs a list of all the data batches for training
self.all_train_batches = [data_batch1,data_batch2,data_batch3,data_batch4,data_batch5]
# Grabs a list of all the test batches (really just one batch)
self.test_batch = [test_batch]
# Intialize some empty variables for later on
self.training_images = None
self.training_labels = None
self.test_images = None
self.test_labels = None
def set_up_images(self):
print("Setting Up Training Images and Labels")
# Vertically stacks the training images
self.training_images = np.vstack([d[b"data"] for d in self.all_train_batches])
train_len = len(self.training_images)
# Reshapes and normalizes training images
self.training_images = self.training_images.reshape(train_len,3,32,32).transpose(0,2,3,1)/255
# One hot Encodes the training labels (e.g. [0,0,0,1,0,0,0,0,0,0])
self.training_labels = one_hot_encode(np.hstack([d[b"labels"] for d in self.all_train_batches]), 10)
print("Setting Up Test Images and Labels")
# Vertically stacks the test images
self.test_images = np.vstack([d[b"data"] for d in self.test_batch])
test_len = len(self.test_images)
# Reshapes and normalizes test images
self.test_images = self.test_images.reshape(test_len,3,32,32).transpose(0,2,3,1)/255
# One hot Encodes the test labels (e.g. [0,0,0,1,0,0,0,0,0,0])
self.test_labels = one_hot_encode(np.hstack([d[b"labels"] for d in self.test_batch]), 10)
def next_batch(self, batch_size):
# Note that the 100 dimension in the reshape call is set by an assumed batch size of 100
x = self.training_images[self.i:self.i+batch_size].reshape(100,32,32,3)
y = self.training_labels[self.i:self.i+batch_size]
self.i = (self.i + batch_size) % len(self.training_images)
return x, y
```
#### File: JosieHong/Handwritten-digit-recognition-based-on-CNN/ResNet_model.py
```python
import tensorflow as tf
from tensorflow.python.training import moving_averages
def variable_weight(name, shape, initializer, trainable=True):
return tf.get_variable(name, shape=shape, dtype=tf.float32,
initializer=initializer, trainable=trainable)
def conv_layer(x, num_outputs, kernel_size, stride=1, scope="conv2d"):
input_channels = x.get_shape()[-1]
with tf.variable_scope(scope):
kernel = variable_weight("kernel", [kernel_size, kernel_size, input_channels, num_outputs],
tf.contrib.layers.xavier_initializer_conv2d())
return tf.nn.conv2d(x, kernel, strides=[1, stride, stride, 1], padding="SAME")
def fc_layer(x, num_outputs, scope="fc"):
input_channels = x.get_shape()[-1]
with tf.variable_scope(scope):
W = variable_weight("weight", [input_channels, num_outputs],
tf.contrib.layers.xavier_initializer())
b = variable_weight("bias", [num_outputs,],
tf.zeros_initializer())
return tf.nn.xw_plus_b(x, W, b)
# batch norm layer
def batch_norm(x, decay=0.999, epsilon=1e-03, scope="scope"):
x_shape = x.get_shape()
input_channels = x_shape[-1]
reduce_dims = list(range(len(x_shape) - 1))
with tf.variable_scope(scope):
beta = variable_weight("beta", [input_channels,],
initializer=tf.zeros_initializer())
gamma = variable_weight("gamma", [input_channels,],
initializer=tf.ones_initializer())
# for inference
moving_mean = variable_weight("moving_mean", [input_channels,],
initializer=tf.zeros_initializer(), trainable=False)
moving_variance = variable_weight("moving_variance", [input_channels],
initializer=tf.ones_initializer(), trainable=False)
mean, variance = tf.nn.moments(x, axes=reduce_dims)
update_move_mean = moving_averages.assign_moving_average(moving_mean, mean, decay=decay)
update_move_variance = moving_averages.assign_moving_average(moving_variance, variance, decay=decay)
tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, update_move_mean)
tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, update_move_variance)
return tf.nn.batch_normalization(x, mean, variance, beta, gamma, epsilon)
def pool_layer(x, pool_size, pool_stride, name, padding='SAME', pooling_Mode='Max_Pool'):
if pooling_Mode=='Max_Pool':
return tf.nn.max_pool(x, [1, pool_size, pool_size, 1], [1, pool_stride, pool_stride, 1], padding = padding, name = name)
if pooling_Mode=='Avg_Pool':
return tf.nn.avg_pool(x, [1, pool_size, pool_size, 1], [1, pool_stride, pool_stride, 1], padding = padding, name = name)
class ResNet50(object):
def __init__(self, x, keep_prob, num_classes):
self.X =x
self.KEEP_PROB = keep_prob
self.NUM_CLASSES = num_classes
self._build_model()
def _build_model(self):
conv1 = conv_layer(self.X, 64, 7, 2, scope="conv1") # -> [batch, 112, 112, 64]
bn1 = tf.nn.relu(batch_norm(conv1, scope="bn1"))
maxpool1 = pool_layer(bn1, 3, 2, name="maxpool1", pooling_Mode = 'Max_Pool') # -> [batch, 56, 56, 64]
block2 = self._block(maxpool1, 256, 3, init_stride=1, scope="block2") # -> [batch, 56, 56, 256]
block3 = self._block(block2, 512, 4, scope="block3") # -> [batch, 28, 28, 512]
block4 = self._block(block3, 1024, 6, scope="block4") # -> [batch, 14, 14, 1024]
block5 = self._block(block4, 2048, 3, scope="block5") # -> [batch, 7, 7, 2048]
avgpool5 = pool_layer(block5, 7, 7, name="avgpool5", pooling_Mode = 'Avg_Pool') # -> [batch, 1, 1, 2048]
spatialsqueeze = tf.squeeze(avgpool5, [1, 2], name="SpatialSqueeze") # -> [batch, 2048]
self.logits = fc_layer(spatialsqueeze, self.NUM_CLASSES, "fc6") # -> [batch, num_classes]
self.output = tf.nn.softmax(self.logits)
def _block(self, x, n_out, n, init_stride=2, scope="block"):
with tf.variable_scope(scope):
h_out = n_out // 4
out = self._bottleneck(x, h_out, n_out, stride=init_stride, scope="bottlencek1")
for i in range(1, n):
out = self._bottleneck(out, h_out, n_out, scope=("bottlencek%s" % (i + 1)))
return out
def _bottleneck(self, x, h_out, n_out, stride=None, scope="bottleneck"):
input_channels = x.get_shape()[-1]
if stride is None:
stride = 1 if input_channels == n_out else 2
with tf.variable_scope(scope):
h = conv_layer(x, h_out, 1, stride=stride, scope="conv_1")
h = batch_norm(h, scope="bn_1")
h = tf.nn.relu(h)
h = conv_layer(h, h_out, 3, stride=1, scope="conv_2")
h = batch_norm(h, scope="bn_2")
h = tf.nn.relu(h)
h = conv_layer(h, n_out, 1, stride=1, scope="conv_3")
h = batch_norm(h, scope="bn_3")
if input_channels != n_out:
shortcut = conv_layer(x, n_out, 1, stride=stride, scope="conv_4")
shortcut = batch_norm(shortcut, scope="bn_4")
else:
shortcut = x
return tf.nn.relu(shortcut + h)
```
#### File: JosieHong/Handwritten-digit-recognition-based-on-CNN/VGG16_model.py
```python
import tensorflow as tf
def conv_layer(x, num_filters, name, filter_height = 3, filter_width = 3, stride = 1, padding = 'SAME'):
input_channels = int(x.get_shape()[-1])
with tf.variable_scope(name) as scope:
W = tf.get_variable('weights', shape = [filter_height, filter_width, input_channels, num_filters],
initializer = tf.random_normal_initializer(mean = 0, stddev = 0.01))
b = tf.get_variable('biases', shape = [num_filters],
initializer = tf.constant_initializer(0.0))
conv = tf.nn.conv2d(x, W, strides = [1, stride, stride, 1], padding = padding)
z = tf.nn.bias_add(conv, b)
return tf.nn.relu(z, name = scope.name)
def fc_layer(x, input_size, output_size, name, activation = 'relu'):
with tf.variable_scope(name) as scope:
W = tf.get_variable('weights', shape = [input_size, output_size],
initializer = tf.random_normal_initializer(mean = 0, stddev = 0.01))
b = tf.get_variable('biases', shape = [output_size],
initializer = tf.constant_initializer(1.0))
z = tf.nn.bias_add(tf.matmul(x, W), b, name = scope.name)
if activation == 'relu':
# Apply ReLu non linearity.
return tf.nn.relu(z, name = scope.name)
elif activation == 'softmax':
return tf.nn.softmax(z, name = scope.name)
else:
return z
def max_pool(x, name, filter_height = 2, filter_width = 2, stride = 2, padding = 'SAME'):
return tf.nn.max_pool(x, ksize = [1, filter_height, filter_width, 1],
strides = [1, stride, stride, 1], padding = padding,
name = name)
def dropout(x, keep_prob):
return tf.nn.dropout(x, keep_prob = keep_prob)
class VGG16(object):
def __init__(self, x, keep_prob, num_classes):
self.X = x
self.KEEP_PROB = keep_prob
self.NUM_CLASSES = num_classes
self._build_model()
def _build_model(self):
# Block 1
block1_conv1 = conv_layer(self.X, 64, name = 'block1_conv1')
block1_conv2 = conv_layer(block1_conv1, 64, name='block1_conv2')
block1_pool = max_pool(block1_conv2, name='block1_pool')
# Block 2
block2_conv1 = conv_layer(block1_pool, 128, name = 'block2_conv1')
block2_conv2 = conv_layer(block2_conv1, 128, name = 'block2_conv2')
block2_pool = max_pool(block2_conv2, name = 'block2_pool')
# Block 3
block3_conv1 = conv_layer(block2_pool, 256, name = 'block3_conv1')
block3_conv2 = conv_layer(block3_conv1, 256, name = 'block3_conv2')
block3_conv3 = conv_layer(block3_conv2, 256, name = 'block3_conv3')
block3_pool = max_pool(block3_conv3, name = 'block3_pool')
# Block 4
block4_conv1 = conv_layer(block3_pool, 512, name = 'block4_conv1')
block4_conv2 = conv_layer(block4_conv1, 512, name = 'block4_conv2')
block4_conv3 = conv_layer(block4_conv2, 512, name = 'block4_conv3')
block4_pool = max_pool(block4_conv3, name = 'block4_pool')
# Block 5
block5_conv1 = conv_layer(block4_pool, 512, name = 'block5_conv1')
block5_conv2 = conv_layer(block5_conv1, 512, name = 'block5_conv2')
block5_conv3 = conv_layer(block5_conv2, 512, name = 'block5_conv3')
block5_pool = max_pool(block5_conv3, name = 'block5_pool')
# Full connection layers
# In the original paper implementaion this will be:
#flattened = tf.reshape(block5_pool, [-1, 7*7*512])
#fc1 = fc_layer(flattened, 7*7*512, 7*7*512, name = 'fc1')
flattened = tf.reshape(block5_pool, [-1, 1*1*512])
fc1 = fc_layer(flattened, 1*1*512, 1*1*512, name = 'fc1', activation = 'relu')
dropout1 = dropout(fc1, self.KEEP_PROB)
# In the original paper implementaion this will be:
#fc2 = fc_layer(dropout1, 7*7*512, 7*7*512, name = 'fc1')
fc2 = fc_layer(dropout1, 1*1*512, 1*1*512, name = 'fc2', activation = 'relu')
dropout2 = dropout(fc2, self.KEEP_PROB)
# In the original paper implementaion this will be:
#self.fc3 = fc_layer(dropout2, 7*7*512, self.NUM_CLASSES, name = 'fc3', relu = False)
fc3 = fc_layer(dropout2, 1*1*512, self.NUM_CLASSES, name = 'fc3', activation = 'softmax')
self.output = fc3
``` |
{
"source": "JosieHong/VOS_with_Seg-Siam",
"score": 2
} |
#### File: mmdet/datasets/davis.py
```python
import os.path as osp
import warnings
import math
import cv2
import mmcv
import numpy as np
from imagecorruptions import corrupt
from mmcv.parallel import DataContainer as DC
import torch
from .utils import random_scale, to_tensor
from .registry import DATASETS
from .coco_seg import Coco_Seg_Dataset, INF
@DATASETS.register_module
class DAVIS_Seg_Dataset(Coco_Seg_Dataset):
# davis 2016
# CLASSES = ('aerobatics', 'bear', 'bike-packing', 'blackswan', 'bmx-bumps',
# 'bmx-trees', 'boat', 'boxing-fisheye', 'breakdance', 'breakdance-flare',
# 'bus', 'camel', 'car-race', 'car-roundabout', 'car-shadow',
# 'car-turn', 'carousel', 'cat-girl', 'cats-car', 'chamaleon',
# 'classic-car', 'color-run', 'cows', 'crossing', 'dance-jump',
# 'dance-twirl', 'dancing', 'deer', 'disc-jockey', 'dog',
# 'dog-agility', 'dog-gooses', 'dogs-jump', 'dogs-scale', 'drift-chicane',
# 'drift-straight', 'drift-turn', 'drone', 'elephant', 'flamingo',
# 'giant-slalom', 'girl-dog', 'goat', 'gold-fish', 'golf',
# 'guitar-violin', 'gym', 'helicopter', 'hike', 'hockey',
# 'horsejump-high', 'horsejump-low', 'horsejump-stick', 'hoverboard', 'india',
# 'judo', 'kid-football', 'kite-surf', 'kite-walk', 'koala',
# 'lab-coat', 'lady-running', 'libby', 'lindy-hop', 'loading',
# 'lock', 'longboard', 'lucia', 'mallard-fly', 'mallard-water',
# 'man-bike', 'mbike-trick', 'miami-surf', 'monkeys-trees', 'motocross-bumps',
# 'motocross-jump', 'motorbike', 'mtb-race', 'night-race', 'orchid',
# 'paragliding', 'paragliding-launch', 'parkour', 'people-sunset', 'pigs',
# 'planes-crossing', 'planes-water', 'rallye', 'rhino', 'rollerblade',
# 'rollercoaster', 'salsa', 'schoolgirls', 'scooter-black', 'scooter-board',
# 'scooter-gray', 'seasnake', 'sheep', 'shooting', 'skate-jump',
# 'skate-park', 'slackline', 'snowboard', 'soapbox', 'soccerball',
# 'stroller', 'stunt', 'subway', 'surf', 'swing',
# 'tandem', 'tennis', 'tennis-vest', 'tractor', 'tractor-sand',
# 'train', 'tuk-tuk', 'upside-down', 'varanus-cage', 'walking')
# davis 2017
CLASSES = ('bear', 'bike-packing', 'blackswan', 'bmx-bumps',
'bmx-trees', 'boat', 'boxing-fisheye', 'breakdance', 'breakdance-flare',
'bus', 'camel', 'car-roundabout', 'car-shadow',
'car-turn', 'cat-girl', 'classic-car', 'color-run', 'cows', 'crossing', 'dance-jump',
'dance-twirl', 'dancing', 'deer', 'disc-jockey', 'dog',
'dog-agility', 'dog-gooses', 'dogs-jump', 'dogs-scale', 'drift-chicane',
'drift-straight', 'drift-turn', 'drone', 'elephant', 'flamingo',
'goat', 'gold-fish',
'hike', 'hockey',
'horsejump-high', 'horsejump-low', 'india',
'judo', 'kid-football', 'kite-surf', 'kite-walk', 'koala',
'lab-coat', 'lady-running', 'libby', 'lindy-hop', 'loading',
'longboard', 'lucia', 'mallard-fly', 'mallard-water',
'mbike-trick', 'miami-surf', 'motocross-bumps',
'motocross-jump', 'motorbike', 'night-race',
'paragliding', 'paragliding-launch', 'parkour', 'pigs',
'planes-water', 'rallye', 'rhino', 'rollerblade',
'schoolgirls', 'scooter-black', 'scooter-board',
'scooter-gray', 'sheep', 'shooting',
'skate-park', 'snowboard', 'soapbox', 'soccerball',
'stroller', 'stunt', 'surf', 'swing',
'tennis', 'tractor-sand',
'train', 'tuk-tuk', 'upside-down', 'varanus-cage', 'walking')
def __init__(self,
ann_file,
img_prefix,
img_scale,
img_norm_cfg,
refer_scale=(127,127),
num_polar=36,
multiscale_mode='value',
size_divisor=None,
proposal_file=None,
num_max_proposals=1000,
flip_ratio=0,
with_mask=True,
with_crowd=True,
with_label=True,
with_semantic_seg=False,
seg_prefix=None,
seg_scale_factor=1,
extra_aug=None,
resize_keep_ratio=True,
corruption=None,
corruption_severity=1,
skip_img_without_anno=True,
test_mode=False,
strides=[8, 16, 32, 64, 128],
regress_ranges=[(-1, 64), (64, 128),
(128, 256), (256, 512), (512, 1e8)]):
super(DAVIS_Seg_Dataset, self).__init__(ann_file,
img_prefix,
img_scale,
img_norm_cfg,
multiscale_mode,
size_divisor,
proposal_file,
num_max_proposals,
flip_ratio,
with_mask,
with_crowd,
with_label,
with_semantic_seg,
seg_prefix,
seg_scale_factor,
extra_aug,
resize_keep_ratio,
corruption,
corruption_severity,
skip_img_without_anno,
test_mode)
self.refer_scale = refer_scale
self.strides = strides
self.regress_ranges = regress_ranges
assert num_polar in [36, 72, 180]
self.num_polar = num_polar
def prepare_train_img(self, idx):
img_info = self.img_infos[idx]
img = mmcv.imread(osp.join(self.img_prefix, img_info['filename']))
# corruption
if self.corruption is not None:
img = corrupt(
img,
severity=self.corruption_severity,
corruption_name=self.corruption)
# load proposals if necessary
if self.proposals is not None:
proposals = self.proposals[idx][:self.num_max_proposals]
# TODO: Handle empty proposals properly. Currently images with
# no proposals are just ignored, but they can be used for
# training in concept.
if len(proposals) == 0:
return None
if not (proposals.shape[1] == 4 or proposals.shape[1] == 5):
raise AssertionError(
'proposals should have shapes (n, 4) or (n, 5), '
'but found {}'.format(proposals.shape))
if proposals.shape[1] == 5:
scores = proposals[:, 4, None]
proposals = proposals[:, :4]
else:
scores = None
ann = self.get_ann_info(idx)
gt_bboxes = ann['bboxes']
gt_labels = ann['labels']
if self.with_crowd:
gt_bboxes_ignore = ann['bboxes_ignore']
# skip the image if there is no valid gt bbox
if len(gt_bboxes) == 0 and self.skip_img_without_anno:
warnings.warn('Skip the image "%s" that has no valid gt bbox' %
osp.join(self.img_prefix, img_info['filename']))
return None
# apply transforms
flip = True if np.random.rand() < self.flip_ratio else False
# randomly sample a scale
img_scale = random_scale(self.img_scales, self.multiscale_mode)
img, img_shape, pad_shape, scale_factor = self.img_transform(img, img_scale, flip, keep_ratio=self.resize_keep_ratio)
img = img.copy()
# get img_refer from first frame
first_frame_idx = img_info["first_frame"]
refer_info = self.img_infos[first_frame_idx]
refer_ann = self.get_ann_info(first_frame_idx)
img_refer = mmcv.imread(osp.join(self.img_prefix, refer_info['filename']))
# crop the bbox
img_refer = torch.squeeze(torch.Tensor(mmcv.imcrop(img_refer, refer_ann["bboxes"])))
# resize to refer_scale
img_refer = torch.Tensor(mmcv.imresize(np.float32(img_refer), self.refer_scale, return_scale=False)).permute(2, 0, 1)
if self.with_seg:
gt_seg = mmcv.imread(
osp.join(self.seg_prefix,
img_info['filename'].replace('jpg', 'png')),
flag='unchanged')
gt_seg = self.seg_transform(gt_seg.squeeze(), img_scale, flip)
gt_seg = mmcv.imrescale(
gt_seg, self.seg_scale_factor, interpolation='nearest')
gt_seg = gt_seg[None, ...]
if self.proposals is not None:
proposals = self.bbox_transform(proposals, img_shape, scale_factor,
flip)
proposals = np.hstack([proposals, scores
]) if scores is not None else proposals
gt_bboxes = self.bbox_transform(gt_bboxes, img_shape, scale_factor,
flip)
if self.with_crowd:
gt_bboxes_ignore = self.bbox_transform(gt_bboxes_ignore, img_shape,
scale_factor, flip)
if self.with_mask:
gt_masks = self.mask_transform(ann['masks'], pad_shape,
scale_factor, flip)
ori_shape = (img_info['height'], img_info['width'], 3)
img_meta = dict(
ori_shape=ori_shape,
img_shape=img_shape,
pad_shape=pad_shape,
scale_factor=scale_factor,
flip=flip)
data = dict(
img=DC(to_tensor(img), stack=True),
img_meta=DC(img_meta, cpu_only=True),
gt_bboxes=DC(to_tensor(gt_bboxes)),
img_refer=DC(to_tensor(img_refer), stack=True))
if self.with_label:
data['gt_labels'] = DC(to_tensor(gt_labels))
if self.with_crowd:
data['gt_bboxes_ignore'] = DC(to_tensor(gt_bboxes_ignore))
if self.with_mask:
data['gt_masks'] = DC(gt_masks, cpu_only=True)
#--------------------offline ray label generation-----------------------------
self.center_sample = True
self.use_mask_center = True
self.radius = 1.5
featmap_sizes = self.get_featmap_size(pad_shape)
# featmap_sizes: [[32, 32], [16, 16], [8, 8]]
num_levels = len(self.strides)
all_level_points = self.get_points(featmap_sizes)
# level 0 points: torch.Size([1024, 2])
# level 1 points: torch.Size([256, 2])
# level 2 points: torch.Size([64, 2])
self.num_points_per_level = [i.size()[0] for i in all_level_points]
expanded_regress_ranges = [
all_level_points[i].new_tensor(self.regress_ranges[i])[None].expand_as(
all_level_points[i]) for i in range(num_levels)
]
concat_regress_ranges = torch.cat(expanded_regress_ranges, dim=0)
concat_points = torch.cat(all_level_points, 0)
gt_masks = gt_masks[:len(gt_bboxes)]
gt_bboxes = torch.Tensor(gt_bboxes)
gt_labels = torch.Tensor(gt_labels)
_labels, _bbox_targets, _mask_targets = self.polar_target_single(
gt_bboxes,gt_masks,gt_labels,concat_points, concat_regress_ranges, self.num_polar)
data['_gt_labels'] = DC(_labels)
data['_gt_bboxes'] = DC(_bbox_targets)
data['_gt_masks'] = DC(_mask_targets)
#--------------------offline ray label generation-----------------------------
return data
def get_featmap_size(self, shape):
h,w = shape[:2]
featmap_sizes = []
for i in self.strides:
featmap_sizes.append([int(h / i)+1, int(w / i)+1])
return featmap_sizes
def prepare_test_img(self, idx):
"""Prepare an image for testing (multi-scale and flipping)"""
img_info = self.img_infos[idx]
img = mmcv.imread(osp.join(self.img_prefix, img_info['filename']))
# corruption
if self.corruption is not None:
img = corrupt(
img,
severity=self.corruption_severity,
corruption_name=self.corruption)
# load proposals if necessary
if self.proposals is not None:
proposal = self.proposals[idx][:self.num_max_proposals]
if not (proposal.shape[1] == 4 or proposal.shape[1] == 5):
raise AssertionError(
'proposals should have shapes (n, 4) or (n, 5), '
'but found {}'.format(proposal.shape))
else:
proposal = None
# get img_refer from first frame
first_frame_idx = img_info["first_frame"]
refer_info = self.img_infos[first_frame_idx]
refer_ann = self.get_ann_info(first_frame_idx)
img_refer = mmcv.imread(osp.join(self.img_prefix, refer_info['filename']))
# crop the bbox
img_refer = torch.squeeze(torch.Tensor(mmcv.imcrop(img_refer, refer_ann["bboxes"])))
# resize to refer_scale
img_refer = torch.Tensor(mmcv.imresize(np.float32(img_refer), self.refer_scale, return_scale=False)).permute(2, 0, 1)
def prepare_single(img, scale, flip, proposal=None):
_img, img_shape, pad_shape, scale_factor = self.img_transform(
img, scale, flip, keep_ratio=self.resize_keep_ratio)
_img = to_tensor(_img)
_img_meta = dict(
ori_shape=(img_info['height'], img_info['width'], 3),
img_shape=img_shape,
pad_shape=pad_shape,
scale_factor=scale_factor,
flip=flip)
if proposal is not None:
if proposal.shape[1] == 5:
score = proposal[:, 4, None]
proposal = proposal[:, :4]
else:
score = None
_proposal = self.bbox_transform(proposal, img_shape,
scale_factor, flip)
_proposal = np.hstack([_proposal, score
]) if score is not None else _proposal
_proposal = to_tensor(_proposal)
else:
_proposal = None
return _img, _img_meta, _proposal
imgs = []
img_metas = []
img_refers = []
proposals = []
for scale in self.img_scales:
_img, _img_meta, _proposal = prepare_single(
img, scale, False, proposal)
imgs.append(_img)
img_metas.append(DC(_img_meta, cpu_only=True))
img_refers.append(DC(to_tensor(img_refer), stack=True))
proposals.append(_proposal)
if self.flip_ratio > 0:
_img, _img_meta, _proposal = prepare_single(
img, scale, True, proposal)
imgs.append(_img)
img_metas.append(DC(_img_meta, cpu_only=True))
img_refers.append(DC(to_tensor(img_refer), stack=True))
proposals.append(_proposal)
data = dict(img=imgs,
img_meta=img_metas,
img_refer=img_refers)
if self.proposals is not None:
data['proposals'] = proposals
return data
# fit different polar nunbers
def polar_target_single(self, gt_bboxes, gt_masks, gt_labels, points, regress_ranges, num_polar):
num_points = points.size(0)
num_gts = gt_labels.size(0)
if num_gts == 0:
return gt_labels.new_zeros(num_points), \
gt_bboxes.new_zeros((num_points, 4))
areas = (gt_bboxes[:, 2] - gt_bboxes[:, 0] + 1) * (
gt_bboxes[:, 3] - gt_bboxes[:, 1] + 1)
# TODO: figure out why these two are different
# areas = areas[None].expand(num_points, num_gts)
areas = areas[None].repeat(num_points, 1)
regress_ranges = regress_ranges[:, None, :].expand(
num_points, num_gts, 2)
gt_bboxes = gt_bboxes[None].expand(num_points, num_gts, 4)
#xs ys 分别是points的x y坐标
xs, ys = points[:, 0], points[:, 1]
xs = xs[:, None].expand(num_points, num_gts)
ys = ys[:, None].expand(num_points, num_gts)
left = xs - gt_bboxes[..., 0]
right = gt_bboxes[..., 2] - xs
top = ys - gt_bboxes[..., 1]
bottom = gt_bboxes[..., 3] - ys
bbox_targets = torch.stack((left, top, right, bottom), -1) #feature map上所有点对于gtbox的上下左右距离 [num_pix, num_gt, 4]
#mask targets 也按照这种写 同时labels 得从bbox中心修改成mask 重心
mask_centers = []
mask_contours = []
#第一步 先算重心 return [num_gt, 2]
for mask in gt_masks:
cnt, contour = self.get_single_centerpoint(mask)
contour = contour[0]
contour = torch.Tensor(contour).float()
y, x = cnt
mask_centers.append([x,y])
mask_contours.append(contour)
mask_centers = torch.Tensor(mask_centers).float()
# 把mask_centers assign到不同的层上,根据regress_range和重心的位置
mask_centers = mask_centers[None].expand(num_points, num_gts, 2)
#---------------------------------------------------------------------------
# condition1: inside a gt bbox
# add center sample
if self.center_sample:
if self.use_mask_center:
inside_gt_bbox_mask = self.get_mask_sample_region(gt_bboxes,
mask_centers,
self.strides,
self.num_points_per_level,
xs,
ys,
radius=self.radius)
else:
inside_gt_bbox_mask = self.get_sample_region(gt_bboxes,
self.strides,
self.num_points_per_level,
xs,
ys,
radius=self.radius)
else:
inside_gt_bbox_mask = bbox_targets.min(-1)[0] > 0
# condition2: limit the regression range for each location
max_regress_distance = bbox_targets.max(-1)[0]
inside_regress_range = (
max_regress_distance >= regress_ranges[..., 0]) & (
max_regress_distance <= regress_ranges[..., 1])
areas[inside_gt_bbox_mask == 0] = INF
areas[inside_regress_range == 0] = INF
min_area, min_area_inds = areas.min(dim=1)
labels = gt_labels[min_area_inds]
labels[min_area == INF] = 0 #[num_gt] 介于0-80
bbox_targets = bbox_targets[range(num_points), min_area_inds]
pos_inds = labels.nonzero().reshape(-1)
mask_targets = torch.zeros(num_points, num_polar).float()
pos_mask_ids = min_area_inds[pos_inds]
for p,id in zip(pos_inds, pos_mask_ids):
x, y = points[p]
pos_mask_contour = mask_contours[id]
# SiamPolar: interpolate
new_contour = []
contour_length = len(pos_mask_contour)
for i in range(contour_length):
new_contour.append(pos_mask_contour[i])
# new_contour.append((3*pos_mask_contour[i]+pos_mask_contour[(i+1)%contour_length])/4)
new_contour.append((pos_mask_contour[i]+pos_mask_contour[(i+1)%contour_length])/2)
# new_contour.append((pos_mask_contour[i]+3*pos_mask_contour[(i+1)%contour_length])/4)
new_pos_mask_contour = torch.cat(new_contour, dim=0).unsqueeze(1)
# print(pos_mask_contour.size())
# print(new_pos_mask_contour.size())
# print(new_pos_mask_contour)
# exit()
dists, coords = self.get_coordinates(x, y, new_pos_mask_contour, num_polar)
mask_targets[p] = dists
return labels, bbox_targets, mask_targets
def get_coordinates(self, c_x, c_y, pos_mask_contour, num_polar):
ct = pos_mask_contour[:, 0, :]
x = ct[:, 0] - c_x
y = ct[:, 1] - c_y
# angle = np.arctan2(x, y)*180/np.pi
angle = torch.atan2(x, y) * 180 / np.pi
angle[angle < 0] += 360
angle = angle.int()
# dist = np.sqrt(x ** 2 + y ** 2)
dist = torch.sqrt(x ** 2 + y ** 2)
angle, idx = torch.sort(angle)
dist = dist[idx]
# generate num_polar angles
new_coordinate = {}
step_size = int(360/num_polar)
for i in range(0, 360, step_size):
if i in angle:
d = dist[angle==i].max()
new_coordinate[i] = d
elif i + 1 in angle:
d = dist[angle == i+1].max()
new_coordinate[i] = d
elif i - 1 in angle:
d = dist[angle == i-1].max()
new_coordinate[i] = d
elif i + 2 in angle:
d = dist[angle == i+2].max()
new_coordinate[i] = d
elif i - 2 in angle:
d = dist[angle == i-2].max()
new_coordinate[i] = d
elif i + 3 in angle:
d = dist[angle == i+3].max()
new_coordinate[i] = d
elif i - 3 in angle:
d = dist[angle == i-3].max()
new_coordinate[i] = d
# josie.add
elif i + 4 in angle:
d = dist[angle == i+4].max()
new_coordinate[i] = d
elif i - 4 in angle:
d = dist[angle == i-4].max()
new_coordinate[i] = d
elif i + 5 in angle:
d = dist[angle == i+5].max()
new_coordinate[i] = d
elif i - 5 in angle:
d = dist[angle == i-5].max()
new_coordinate[i] = d
distances = torch.zeros(num_polar)
for a in range(0, 360, step_size):
if not a in new_coordinate.keys():
new_coordinate[a] = torch.tensor(1e-6)
distances[a//step_size] = 1e-6
else:
distances[a//step_size] = new_coordinate[a]
return distances, new_coordinate
def __getitem__(self, idx):
if self.test_mode:
return self.prepare_test_img(idx)
while True:
data = self.prepare_train_img(idx)
if data is None:
idx = self._rand_another(idx)
continue
return data
def merge_contours(self, contours):
alpha = 0.25
# init
b = contours[0][:, 0, :]
cx, cy = b.mean(axis=0)
# guarantee that the threshold is at the same level as the object size
# thrx = contours[0][:, 0, :][:, 0].max() - contours[0][:, 0, :][:, 0].min()
# thry = contours[0][:, 0, :][:, 1].max() - contours[0][:, 0, :][:, 1].min()
records = [0 for i in range(len(contours))]
new_contours = [contours[0]]
records[0] = 1
flag = True
while (flag == True):
flag = False
for i in range(1, len(contours)-1):
tmp = contours[i][:, 0, :]
tx, ty = tmp.mean(axis=0)
if records[i] == 0:
d = math.sqrt((cx - tx) ** 2 + (cy - ty) ** 2)
lx = b[:, 0].max() - b[:, 0].min() + tmp[:, 0].max() - tmp[:, 0].min()
ly = b[:, 1].max() - b[:, 1].min() + tmp[:, 1].max() - tmp[:, 1].min()
l = math.sqrt(lx ** 2 + ly ** 2)
# print("d: {}, l: {}".format(d, l))
if d <= alpha * l:
# print("Add a new contour!")
new_contours.append(contours[i])
records[i] = 1
flag = True
cx = (cx + tx) / 2
cy = (cy + ty) / 2
return new_contours
def get_single_centerpoint(self, mask):
contours, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
contours.sort(key=lambda x: cv2.contourArea(x), reverse=True) # only save the biggest one
'''debug IndexError: list index out of range'''
if len(contours) == 0:
return None, None
count = contours[0][:, 0, :]
try:
center = self.get_centerpoint(count)
except:
x,y = count.mean(axis=0)
center = [int(x), int(y)]
if len(contours) > 1:
# keep the contours near the biggest contour
new_contours = self.merge_contours(contours)
else:
new_contours = [contours[0]] # the biggest contour
return center, new_contours
```
#### File: mmdet/datasets/tsd_max.py
```python
import os.path as osp
import warnings
import mmcv
import numpy as np
from imagecorruptions import corrupt
from mmcv.parallel import DataContainer as DC
import torch
from .utils import random_scale, to_tensor
from .registry import DATASETS
from .coco_seg import Coco_Seg_Dataset, INF
@DATASETS.register_module
class TSD_MAX_Seg_Dataset(Coco_Seg_Dataset):
CLASSES = ('Section8', 'Section6', 'Section63', 'Section33', 'Section11',
'Section2', 'Section48', 'Section13', 'Section64', 'Section4',
'Section75')
def __init__(self,
ann_file,
img_prefix,
img_scale,
img_norm_cfg,
refer_scale=(127,127),
num_polar=36,
multiscale_mode='value',
size_divisor=None,
proposal_file=None,
num_max_proposals=1000,
flip_ratio=0,
with_mask=True,
with_crowd=True,
with_label=True,
with_semantic_seg=False,
seg_prefix=None,
seg_scale_factor=1,
extra_aug=None,
resize_keep_ratio=True,
corruption=None,
corruption_severity=1,
skip_img_without_anno=True,
test_mode=False,
strides=[8, 16, 32, 64, 128],
regress_ranges=[(-1, 64), (64, 128),
(128, 256), (256, 512), (512, 1e8)]):
super(TSD_MAX_Seg_Dataset, self).__init__(ann_file,
img_prefix,
img_scale,
img_norm_cfg,
multiscale_mode,
size_divisor,
proposal_file,
num_max_proposals,
flip_ratio,
with_mask,
with_crowd,
with_label,
with_semantic_seg,
seg_prefix,
seg_scale_factor,
extra_aug,
resize_keep_ratio,
corruption,
corruption_severity,
skip_img_without_anno,
test_mode)
self.refer_scale = refer_scale
self.strides = strides
self.regress_ranges = regress_ranges
assert num_polar in [36, 72]
self.num_polar = num_polar
def prepare_train_img(self, idx):
img_info = self.img_infos[idx]
img = mmcv.imread(osp.join(self.img_prefix[:-11], img_info['filename']))
# corruption
if self.corruption is not None:
img = corrupt(
img,
severity=self.corruption_severity,
corruption_name=self.corruption)
# load proposals if necessary
if self.proposals is not None:
proposals = self.proposals[idx][:self.num_max_proposals]
# TODO: Handle empty proposals properly. Currently images with
# no proposals are just ignored, but they can be used for
# training in concept.
if len(proposals) == 0:
return None
if not (proposals.shape[1] == 4 or proposals.shape[1] == 5):
raise AssertionError(
'proposals should have shapes (n, 4) or (n, 5), '
'but found {}'.format(proposals.shape))
if proposals.shape[1] == 5:
scores = proposals[:, 4, None]
proposals = proposals[:, :4]
else:
scores = None
ann = self.get_ann_info(idx)
gt_bboxes = ann['bboxes']
gt_labels = ann['labels']
if self.with_crowd:
gt_bboxes_ignore = ann['bboxes_ignore']
# skip the image if there is no valid gt bbox
if len(gt_bboxes) == 0 and self.skip_img_without_anno:
warnings.warn('Skip the image "%s" that has no valid gt bbox' %
osp.join(self.img_prefix, img_info['filename']))
return None
# apply transforms
flip = True if np.random.rand() < self.flip_ratio else False
# randomly sample a scale
img_scale = random_scale(self.img_scales, self.multiscale_mode)
img, img_shape, pad_shape, scale_factor = self.img_transform(img, img_scale, flip, keep_ratio=self.resize_keep_ratio)
img = img.copy()
# get img_refer from first frame
first_frame_idx = img_info["first_frame"]
refer_info = self.img_infos[first_frame_idx]
refer_ann = self.get_ann_info(first_frame_idx)
img_refer = mmcv.imread(osp.join(self.img_prefix[:-11], refer_info['filename']))
# crop the bbox
img_refer = torch.squeeze(torch.Tensor(mmcv.imcrop(img_refer, refer_ann["bboxes"])))
# resize to refer_scale
img_refer = torch.Tensor(mmcv.imresize(np.float32(img_refer), self.refer_scale, return_scale=False)).permute(2, 0, 1)
if self.with_seg:
gt_seg = mmcv.imread(
osp.join(self.seg_prefix,
img_info['filename'].replace('jpg', 'png')),
flag='unchanged')
gt_seg = self.seg_transform(gt_seg.squeeze(), img_scale, flip)
gt_seg = mmcv.imrescale(
gt_seg, self.seg_scale_factor, interpolation='nearest')
gt_seg = gt_seg[None, ...]
if self.proposals is not None:
proposals = self.bbox_transform(proposals, img_shape, scale_factor,
flip)
proposals = np.hstack([proposals, scores
]) if scores is not None else proposals
gt_bboxes = self.bbox_transform(gt_bboxes, img_shape, scale_factor,
flip)
if self.with_crowd:
gt_bboxes_ignore = self.bbox_transform(gt_bboxes_ignore, img_shape,
scale_factor, flip)
if self.with_mask:
gt_masks = self.mask_transform(ann['masks'], pad_shape,
scale_factor, flip)
ori_shape = (img_info['height'], img_info['width'], 3)
img_meta = dict(
ori_shape=ori_shape,
img_shape=img_shape,
pad_shape=pad_shape,
scale_factor=scale_factor,
flip=flip)
data = dict(
img=DC(to_tensor(img), stack=True),
img_meta=DC(img_meta, cpu_only=True),
gt_bboxes=DC(to_tensor(gt_bboxes)),
img_refer=DC(to_tensor(img_refer), stack=True))
if self.with_label:
data['gt_labels'] = DC(to_tensor(gt_labels))
if self.with_crowd:
data['gt_bboxes_ignore'] = DC(to_tensor(gt_bboxes_ignore))
if self.with_mask:
data['gt_masks'] = DC(gt_masks, cpu_only=True)
#--------------------offline ray label generation-----------------------------
self.center_sample = True
self.use_mask_center = True
self.radius = 1.5
featmap_sizes = self.get_featmap_size(pad_shape)
# featmap_sizes: [[32, 32], [16, 16], [8, 8]]
num_levels = len(self.strides)
all_level_points = self.get_points(featmap_sizes)
# level 0 points: torch.Size([1024, 2])
# level 1 points: torch.Size([256, 2])
# level 2 points: torch.Size([64, 2])
self.num_points_per_level = [i.size()[0] for i in all_level_points]
expanded_regress_ranges = [
all_level_points[i].new_tensor(self.regress_ranges[i])[None].expand_as(
all_level_points[i]) for i in range(num_levels)
]
concat_regress_ranges = torch.cat(expanded_regress_ranges, dim=0)
concat_points = torch.cat(all_level_points, 0)
gt_masks = gt_masks[:len(gt_bboxes)]
gt_bboxes = torch.Tensor(gt_bboxes)
gt_labels = torch.Tensor(gt_labels)
_labels, _bbox_targets, _mask_targets = self.polar_target_single(
gt_bboxes,gt_masks,gt_labels,concat_points, concat_regress_ranges, self.num_polar)
data['_gt_labels'] = DC(_labels)
data['_gt_bboxes'] = DC(_bbox_targets)
data['_gt_masks'] = DC(_mask_targets)
#--------------------offline ray label generation-----------------------------
return data
def get_featmap_size(self, shape):
h,w = shape[:2]
featmap_sizes = []
for i in self.strides:
featmap_sizes.append([int(h / i)+1, int(w / i)+1])
return featmap_sizes
def prepare_test_img(self, idx):
"""Prepare an image for testing (multi-scale and flipping)"""
img_info = self.img_infos[idx]
img = mmcv.imread(osp.join(self.img_prefix[:-11], img_info['filename']))
# corruption
if self.corruption is not None:
img = corrupt(
img,
severity=self.corruption_severity,
corruption_name=self.corruption)
# load proposals if necessary
if self.proposals is not None:
proposal = self.proposals[idx][:self.num_max_proposals]
if not (proposal.shape[1] == 4 or proposal.shape[1] == 5):
raise AssertionError(
'proposals should have shapes (n, 4) or (n, 5), '
'but found {}'.format(proposal.shape))
else:
proposal = None
# get img_refer from first frame
first_frame_idx = img_info["first_frame"]
refer_info = self.img_infos[first_frame_idx]
refer_ann = self.get_ann_info(first_frame_idx)
img_refer = mmcv.imread(osp.join(self.img_prefix[:-11], refer_info['filename']))
# crop the bbox
img_refer = torch.squeeze(torch.Tensor(mmcv.imcrop(img_refer, refer_ann["bboxes"])))
# resize to refer_scale
img_refer = torch.Tensor(mmcv.imresize(np.float32(img_refer), self.refer_scale, return_scale=False)).permute(2, 0, 1)
def prepare_single(img, scale, flip, proposal=None):
_img, img_shape, pad_shape, scale_factor = self.img_transform(
img, scale, flip, keep_ratio=self.resize_keep_ratio)
_img = to_tensor(_img)
_img_meta = dict(
ori_shape=(img_info['height'], img_info['width'], 3),
img_shape=img_shape,
pad_shape=pad_shape,
scale_factor=scale_factor,
flip=flip)
if proposal is not None:
if proposal.shape[1] == 5:
score = proposal[:, 4, None]
proposal = proposal[:, :4]
else:
score = None
_proposal = self.bbox_transform(proposal, img_shape,
scale_factor, flip)
_proposal = np.hstack([_proposal, score
]) if score is not None else _proposal
_proposal = to_tensor(_proposal)
else:
_proposal = None
return _img, _img_meta, _proposal
imgs = []
img_metas = []
img_refers = []
proposals = []
for scale in self.img_scales:
_img, _img_meta, _proposal = prepare_single(
img, scale, False, proposal)
imgs.append(_img)
img_metas.append(DC(_img_meta, cpu_only=True))
img_refers.append(DC(to_tensor(img_refer), stack=True))
proposals.append(_proposal)
if self.flip_ratio > 0:
_img, _img_meta, _proposal = prepare_single(
img, scale, True, proposal)
imgs.append(_img)
img_metas.append(DC(_img_meta, cpu_only=True))
img_refers.append(DC(to_tensor(img_refer), stack=True))
proposals.append(_proposal)
data = dict(img=imgs,
img_meta=img_metas,
img_refer=img_refers)
if self.proposals is not None:
data['proposals'] = proposals
return data
# fit different polar nunbers
def polar_target_single(self, gt_bboxes, gt_masks, gt_labels, points, regress_ranges, num_polar):
num_points = points.size(0)
num_gts = gt_labels.size(0)
if num_gts == 0:
return gt_labels.new_zeros(num_points), \
gt_bboxes.new_zeros((num_points, 4))
areas = (gt_bboxes[:, 2] - gt_bboxes[:, 0] + 1) * (
gt_bboxes[:, 3] - gt_bboxes[:, 1] + 1)
# TODO: figure out why these two are different
# areas = areas[None].expand(num_points, num_gts)
areas = areas[None].repeat(num_points, 1)
regress_ranges = regress_ranges[:, None, :].expand(
num_points, num_gts, 2)
gt_bboxes = gt_bboxes[None].expand(num_points, num_gts, 4)
#xs ys 分别是points的x y坐标
xs, ys = points[:, 0], points[:, 1]
xs = xs[:, None].expand(num_points, num_gts)
ys = ys[:, None].expand(num_points, num_gts)
left = xs - gt_bboxes[..., 0]
right = gt_bboxes[..., 2] - xs
top = ys - gt_bboxes[..., 1]
bottom = gt_bboxes[..., 3] - ys
bbox_targets = torch.stack((left, top, right, bottom), -1) #feature map上所有点对于gtbox的上下左右距离 [num_pix, num_gt, 4]
#mask targets 也按照这种写 同时labels 得从bbox中心修改成mask 重心
mask_centers = []
mask_contours = []
#第一步 先算重心 return [num_gt, 2]
for mask in gt_masks:
cnt, contour = self.get_single_centerpoint(mask)
contour = contour[0]
contour = torch.Tensor(contour).float()
y, x = cnt
mask_centers.append([x,y])
mask_contours.append(contour)
mask_centers = torch.Tensor(mask_centers).float()
# 把mask_centers assign到不同的层上,根据regress_range和重心的位置
mask_centers = mask_centers[None].expand(num_points, num_gts, 2)
#-------------------------------------------------------------------------------------------------------------------------------------------------------------
# condition1: inside a gt bbox
# add center sample
if self.center_sample:
if self.use_mask_center:
inside_gt_bbox_mask = self.get_mask_sample_region(gt_bboxes,
mask_centers,
self.strides,
self.num_points_per_level,
xs,
ys,
radius=self.radius)
else:
inside_gt_bbox_mask = self.get_sample_region(gt_bboxes,
self.strides,
self.num_points_per_level,
xs,
ys,
radius=self.radius)
else:
inside_gt_bbox_mask = bbox_targets.min(-1)[0] > 0
# condition2: limit the regression range for each location
max_regress_distance = bbox_targets.max(-1)[0]
inside_regress_range = (
max_regress_distance >= regress_ranges[..., 0]) & (
max_regress_distance <= regress_ranges[..., 1])
areas[inside_gt_bbox_mask == 0] = INF
areas[inside_regress_range == 0] = INF
min_area, min_area_inds = areas.min(dim=1)
labels = gt_labels[min_area_inds]
labels[min_area == INF] = 0 #[num_gt] 介于0-80
bbox_targets = bbox_targets[range(num_points), min_area_inds]
pos_inds = labels.nonzero().reshape(-1)
mask_targets = torch.zeros(num_points, num_polar).float()
pos_mask_ids = min_area_inds[pos_inds]
for p,id in zip(pos_inds, pos_mask_ids):
x, y = points[p]
pos_mask_contour = mask_contours[id]
# SiamPolar: interpolate
new_contour = []
contour_length = len(pos_mask_contour)
for i in range(contour_length):
new_contour.append(pos_mask_contour[i])
# new_contour.append((3*pos_mask_contour[i]+pos_mask_contour[(i+1)%contour_length])/4)
new_contour.append((pos_mask_contour[i]+pos_mask_contour[(i+1)%contour_length])/2)
# new_contour.append((pos_mask_contour[i]+3*pos_mask_contour[(i+1)%contour_length])/4)
new_pos_mask_contour = torch.cat(new_contour, dim=0).unsqueeze(1)
# print(pos_mask_contour.size())
# print(new_pos_mask_contour.size())
# print(new_pos_mask_contour)
# exit()
dists, coords = self.get_coordinates(x, y, new_pos_mask_contour, num_polar)
mask_targets[p] = dists
return labels, bbox_targets, mask_targets
def get_coordinates(self, c_x, c_y, pos_mask_contour, num_polar):
ct = pos_mask_contour[:, 0, :]
x = ct[:, 0] - c_x
y = ct[:, 1] - c_y
# angle = np.arctan2(x, y)*180/np.pi
angle = torch.atan2(x, y) * 180 / np.pi
angle[angle < 0] += 360
angle = angle.int()
# dist = np.sqrt(x ** 2 + y ** 2)
dist = torch.sqrt(x ** 2 + y ** 2)
angle, idx = torch.sort(angle)
dist = dist[idx]
# generate num_polar angles
new_coordinate = {}
step_size = int(360/num_polar)
for i in range(0, 360, step_size):
if i in angle:
d = dist[angle==i].max()
new_coordinate[i] = d
elif i + 1 in angle:
d = dist[angle == i+1].max()
new_coordinate[i] = d
elif i - 1 in angle:
d = dist[angle == i-1].max()
new_coordinate[i] = d
elif i + 2 in angle:
d = dist[angle == i+2].max()
new_coordinate[i] = d
elif i - 2 in angle:
d = dist[angle == i-2].max()
new_coordinate[i] = d
elif i + 3 in angle:
d = dist[angle == i+3].max()
new_coordinate[i] = d
elif i - 3 in angle:
d = dist[angle == i-3].max()
new_coordinate[i] = d
# josie.add
elif i + 4 in angle:
d = dist[angle == i+4].max()
new_coordinate[i] = d
elif i - 4 in angle:
d = dist[angle == i-4].max()
new_coordinate[i] = d
elif i + 5 in angle:
d = dist[angle == i+5].max()
new_coordinate[i] = d
elif i - 5 in angle:
d = dist[angle == i-5].max()
new_coordinate[i] = d
distances = torch.zeros(num_polar)
for a in range(0, 360, step_size):
if not a in new_coordinate.keys():
new_coordinate[a] = torch.tensor(1e-6)
distances[a//step_size] = 1e-6
else:
distances[a//step_size] = new_coordinate[a]
return distances, new_coordinate
def __getitem__(self, idx):
if self.test_mode:
return self.prepare_test_img(idx)
while True:
data = self.prepare_train_img(idx)
if data is None:
idx = self._rand_another(idx)
continue
return data
```
#### File: models/backbones/siam_resnet.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
from ..registry import BACKBONES
from mmcv.cnn import constant_init, kaiming_init
from .resnet import ResNet
@BACKBONES.register_module
class SiamResNet(nn.Module):
""" This is a simese network using ResNet bacbone and returning every
blocks' feature map.
"""
def __init__(self,
depth,
template_depth,
template_pretrained,
num_stages=4,
strides=(1, 2, 2, 2),
dilations=(1, 1, 1, 1),
out_indices=(0, 1, 2, 3),
style='pytorch',
frozen_stages=-1,
conv_cfg=None,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
dcn=None,
stage_with_dcn=(False, False, False, False),
gcb=None,
stage_with_gcb=(False, False, False, False),
gen_attention=None,
stage_with_gen_attention=((), (), (), ()),
with_cp=False,
zero_init_residual=True,
correlation_blocks=[3, 4, 5],
attention_blocks=None):
super(SiamResNet, self).__init__()
self.template_backbone = ResNet(template_depth,
num_stages,
strides,
dilations,
out_indices,
style,
frozen_stages,
conv_cfg,
norm_cfg,
norm_eval,
dcn,
stage_with_dcn,
gcb,
stage_with_gcb,
gen_attention,
stage_with_gen_attention,
with_cp,
zero_init_residual)
self.template_pretrained = template_pretrained
self.search_backbone = ResNet(depth,
num_stages,
strides,
dilations,
out_indices,
style,
frozen_stages,
conv_cfg,
norm_cfg,
norm_eval,
dcn,
stage_with_dcn,
gcb,
stage_with_gcb,
gen_attention,
stage_with_gen_attention,
with_cp,
zero_init_residual)
# Cross Correlation
self.correlation_blocks = [correlation_block-2
for correlation_block in correlation_blocks] # start from block2
self.match_batchnorm = nn.BatchNorm2d(1)
self.softmax = nn.Softmax(dim=1)
self.gama = nn.Parameter(torch.zeros(1))
def forward(self, x1, x2):
"""
Args:
x1 (torch.Tensor): The search region image of dimensions
[B, C, H', W']. Usually the shape is [4, 3, 255, 255].
x2 (torch.Tensor): The reference patch of dimensions [B, C, H, W].
Usually the shape is [4, 3, 127, 127].
Returns:
block2, block3, block4, block5: The outputs of each block,
some are fused with response maps.
"""
# extract features
search_blocks = self.search_backbone(x1)
template_blocks = self.template_backbone(x2)
# init outs
outs = [search_block for search_block in search_blocks]
# re-cross correlation
for correlation_block in self.correlation_blocks:
embedding_search = search_blocks[correlation_block]
embedding_template = template_blocks[correlation_block]
# re-correlation
match_map = self.match_corr(embedding_search,
embedding_template,
embedding_search.shape[2:])
match_map = match_map.repeat(1, embedding_template.size()[1], 1, 1)
corr_value = self.softmax(match_map)*embedding_search
outs[correlation_block] = self.gama*corr_value + embedding_search
return tuple(outs)
def init_weights(self, pretrained=None):
for m in self.modules():
if isinstance(m, nn.Conv2d):
kaiming_init(m)
elif isinstance(m, nn.BatchNorm2d):
constant_init(m, 1)
self.search_backbone.init_weights(pretrained)
self.template_backbone.init_weights(self.template_pretrained)
def match_corr(self, embed_srch, embed_ref, upsc_size):
""" reference: https://github.com/rafellerc/Pytorch-SiamFC
Matches the two embeddings using the correlation layer. As per usual
it expects input tensors of the form [B, C, H, W].
Args:
embed_ref: (torch.Tensor) The embedding of the reference image, or
the template of reference (the average of many embeddings for
example).
embed_srch: (torch.Tensor) The embedding of the search image.
Returns:
match_map: (torch.Tensor) The correlation between
"""
# print('embed_srch: ', embed_srch.size(), "embed_ref: ", embed_ref.size())
b, c, h, w = embed_srch.shape
# Here the correlation layer is implemented using a trick with the
# conv2d function using groups in order to do the correlation with
# batch dimension. Basically we concatenate each element of the batch
# in the channel dimension for the search image (making it
# [1 x (B.C) x H' x W']) and setting the number of groups to the size of
# the batch. This grouped convolution/correlation is equivalent to a
# correlation between the two images, though it is not obvious.
match_map = F.conv2d(embed_srch.view(1, b * c, h, w), embed_ref, groups=b)
# Here we reorder the dimensions to get back the batch dimension.
match_map = match_map.permute(1, 0, 2, 3)
match_map = self.match_batchnorm(match_map)
match_map = F.interpolate(match_map, upsc_size, mode='bilinear', align_corners=False)
return match_map
``` |
{
"source": "Josie-Li/ZazuML-easy_AutoML",
"score": 2
} |
#### File: ZazuML-easy_AutoML/dataloop_services/trial_module.py
```python
import logging
import os
import torch
import json
import dtlpy as dl
from importlib import import_module
from dataloop_services.plugin_utils import maybe_download_data
from logging_utils import init_logging
class ServiceRunner(dl.BaseServiceRunner):
"""
Plugin runner class
"""
def __init__(self, package_name):
logging.getLogger('dtlpy').setLevel(logging.WARN)
self.package_name = package_name
self.path_to_metrics = 'metrics.json'
self.path_to_tensorboard_dir = 'runs'
self.path_to_logs = 'logger.conf'
self.logger = init_logging(__name__, filename=self.path_to_logs)
self.logger.info(self.package_name + ' initialized')
def run(self, dataset, train_query, val_query, model_specs, hp_values, configs=None, progress=None):
maybe_download_data(dataset, train_query, val_query)
# get project
# project = dataset.project
assert isinstance(dataset, dl.entities.Dataset)
project = dl.projects.get(project_id=dataset.projects[0])
# start tune
cls = getattr(import_module('.adapter', 'object_detecter.' + model_specs['name']), 'AdapterModel')
#TODO: without roberto work with path / or github
inputs_dict = {'devices': {'gpu_index': 0}, 'model_specs': model_specs, 'hp_values': hp_values}
#json save
#TODO: make sure you dont run two runs in concurrency and have two saving the same thing twice
torch.save(inputs_dict, 'checkpoint.pt')
adapter = cls()
adapter.load()
if hasattr(adapter, 'reformat'):
adapter.reformat()
if hasattr(adapter, 'data_loader'):
adapter.data_loader()
if hasattr(adapter, 'preprocess'):
adapter.preprocess()
if hasattr(adapter, 'build'):
adapter.build()
self.logger.info('commencing training . . . ')
adapter.train()
self.logger.info('training finished')
save_info = {
'package_name': self.package_name,
'execution_id': progress.execution.id
}
checkpoint_path = adapter.save()
# upload metrics as artifact
self.logger.info('uploading metrics to dataloop')
project.artifacts.upload(filepath=self.path_to_logs,
package_name=save_info['package_name'],
execution_id=save_info['execution_id'])
# this is the same as uplading metrics because the map is saved under checkpoint['metrics']['val_accuracy']
project.artifacts.upload(filepath=checkpoint_path,
package_name=save_info['package_name'],
execution_id=save_info['execution_id'])
project.artifacts.upload(filepath=self.path_to_tensorboard_dir,
package_name=save_info['package_name'],
execution_id=save_info['execution_id'])
# change to clean up
adapter.delete_stuff()
self.logger.info('finished uploading checkpoint and logs')
self.logger.info('FINISHED SESSION')
```
#### File: ZazuML-easy_AutoML/spec/models_stuff.py
```python
from .spec_base import Spec
class ModelsSpec(Spec):
def __init__(self, spec_data=None):
if not spec_data:
pass
super().__init__(spec_data)
pass
def validate(self):
# if 'model_space' not in self.spec_data:
# raise Exception("Model spec must have a model_space field")
#
# if 'task' not in self.spec_data:
# raise Exception("Recipe must have a task field")
pass
@property
def models_space(self):
new_dic = {}
for model_name, model_dic in self.spec_data.items():
new_dic[model_name] = []
for rating in ['accuracy_rating', 'speed_rating', 'memory_rating']:
new_dic[model_name].append(model_dic['model_space'][rating])
return new_dic
# @property
# def task(self):
# return self.spec_data['task']
``` |
{
"source": "josieljunior/API-COF",
"score": 2
} |
#### File: API-COF/api/models.py
```python
from django.db import models
class Default(models.Model):
description = models.CharField(max_length=300, blank=False, null=False)
value = models.FloatField(blank=False, null=False)
date = models.DateField(blank=False, null=False)
def __str__(self):
return self.description
class Expense(Default):
pass
class Income(Default):
pass
``` |
{
"source": "JosielSantos/virtual-piano",
"score": 3
} |
#### File: virtual-piano/src/channel.py
```python
import os.path
from notes_manager import NotesManager
from util import app, validate
class Channel:
DIRECTION_LEFT = -1
DIRECTION_MIDDLE = 0
DIRECTION_RIGHT = 1
notes_on = []
__notes_manager = None
__start_note = 48
def __init__(self, instrument, volume, direction, keymap_file_path):
self.set_instrument(instrument)
self.set_volume(volume)
self.set_direction(direction)
self.keymap_file_path = keymap_file_path
self.__load_notes_manager()
def get_instrument(self):
return self.__instrument
def set_instrument(self, instrument):
validate.integer(instrument, 'Instrument')
validate.midi_range(instrument, 'Instrument')
self.__instrument = instrument
def get_volume(self):
return self.__volume
def set_volume(self, volume):
validate.integer(volume, 'Volume')
validate.midi_range(volume, 'Volume')
self.__volume = volume
def get_direction(self):
return self.__direction
def set_direction(self, direction):
if direction not in [self.DIRECTION_LEFT, self.DIRECTION_MIDDLE, self.DIRECTION_RIGHT]:
raise ValueError('Invalid direction')
self.__direction = direction
def __load_notes_manager(self):
self.__notes_manager = NotesManager()
self.__notes_manager.load_file(self.keymap_file_path)
self.__notes_manager.set_start_note(self.__start_note)
self.__notes_manager.organize_notes()
def note_on(self, note):
if note not in self.notes_on:
self.notes_on.append(note)
def note_off(self, note):
if note in self.notes_on:
self.notes_on.remove(note)
def next_instrument(self):
instrument = self.__instrument + 1
if instrument > 127:
instrument = 0
self.__instrument = instrument
def previous_instrument(self):
instrument = self.__instrument - 1
if instrument < 0 :
instrument = 127
self.__instrument = instrument
def volume_down(self, num = 10):
volume = self.__volume - num
if volume < 0:
volume = 0
self.__volume = volume
def volume_up(self, num = 10):
volume = self.__volume + num
if volume > 127:
volume = 127
self.__volume = volume
def get_note(self, key):
return self.__notes_manager[key] if key in self.__notes_manager else None
def octave_down(self):
self.__notes_manager.octave_down()
def octave_up(self):
self.__notes_manager.octave_up()
def semitone_down(self, total):
self.__notes_manager.semitone_down(total)
def semitone_up(self, total):
self.__notes_manager.semitone_up(total)
```
#### File: virtual-piano/src/main.py
```python
import wx
from config import Config
import constants
from exceptions import UserError
from util.dialogs import edit_dialog, message_dialog
from util import app, char
from midi_factory import MidiOutputFactory
from piano import Piano
class PianoApp(wx.App):
config = None
functions_keymap = {}
multi_voice = False
active_channels = []
current_channel = 0
def OnInit(self):
try:
self.load_config()
self.init_piano()
self.init_ui()
return True
except UserError as user_error:
wx.MessageBox(str(user_error), 'Erro', wx.OK | wx.ICON_ERROR)
return False
def load_config(self):
self.config = Config(app.config_file_path())
def init_piano(self):
self.create_midi_driver()
self.piano = Piano(self.config.get_keymap_file_path('pianoeletronico.kmp'), self.midi_output)
self.active_channels.append(0)
self.piano.set_instrument(0, 0)
def create_midi_driver(self):
midi_output_factory = MidiOutputFactory()
midi_output_driver = self.config.get_midi_output_driver(constants.MIDI_OUTPUT_DEFAULT_DRIVER)
if midi_output_driver >= constants.MIDI_OUTPUT_DEFAULT_DRIVER:
self.midi_output = midi_output_factory.factory_pygame(midi_output_driver )
elif midi_output_driver == constants.MIDI_OUTPUT_FLUIDSYNTH_DRIVER:
self.midi_output = midi_output_factory.factory_fluidsynth(self.config.get_soundfont_file_path())
else:
raise UserError('Erro de configuração: MIDI driver inexistente')
def init_ui(self):
self.mainFrame = wx.Frame(parent = None, id = -1, title = 'Virtual Piano')
self.mainFrame.Bind(wx.EVT_KEY_DOWN, self.on_key_down)
self.mainFrame.Bind(wx.EVT_KEY_UP, self.on_key_up)
self.functions_keymap = {
wx.WXK_RIGHT: lambda evt: self.piano.next_instrument(self.current_channel),
wx.WXK_LEFT: lambda evt: self.piano.previous_instrument(self.current_channel),
wx.WXK_UP: lambda evt: self.tone_change_up(evt),
wx.WXK_DOWN: lambda evt: self.tone_change_down(evt),
wx.WXK_PAGEUP: lambda evt: self.next_channel(),
wx.WXK_PAGEDOWN: lambda evt: self.previous_channel(),
wx.WXK_DELETE: lambda evt: self.delete_current_channel(),
wx.WXK_F2: lambda evt: self.select_instrument_by_number(self.current_channel),
wx.WXK_F8: lambda evt: self.piano.volume_down(self.current_channel),
wx.WXK_F9: lambda evt: self.piano.volume_up(self.current_channel),
wx.WXK_BACK: self.toggle_multi_voice,
wx.WXK_TAB: self.pan,
}
self.mainFrame.Show(True)
def on_key_down(self, evt):
key = evt.GetKeyCode()
if key in self.functions_keymap:
self.functions_keymap[key](evt)
if self.multi_voice:
for channel_number in self.active_channels:
note = self.get_note_from_key_event(evt, channel_number)
if note is not None:
self.piano.note_on(note, channel_number)
else:
note = self.get_note_from_key_event(evt, self.current_channel)
if note is not None:
self.piano.note_on(note, self.current_channel)
def on_key_up(self, evt):
if self.multi_voice:
for channel_number in self.active_channels:
note = self.get_note_from_key_event(evt, channel_number)
if note is not None:
self.piano.note_off(note, channel_number)
else:
note = self.get_note_from_key_event(evt, self.current_channel)
if note is not None:
self.piano.note_off(note, self.current_channel)
def get_note_from_key_event(self, evt, channel_number):
key = evt.GetUnicodeKey()
if key != wx.WXK_NONE:
if key > 127:
key = char.unicode_workaround(chr(key).encode('utf-8'))
return self.piano.get_note(key, channel_number)
def toggle_multi_voice(self, evt):
self.piano.all_notes_off()
self.multi_voice = not self.multi_voice
def pan(self, evt):
back = True if evt.GetModifiers() == wx.MOD_SHIFT else False
self.piano.pan(back, self.current_channel)
def next_channel(self):
if self.current_channel == 15:
return
self.piano.all_notes_off()
self.current_channel += 1
if not self.current_channel in self.active_channels:
self.active_channels.append(self.current_channel)
self.piano.get_channel(self.current_channel)
def previous_channel(self):
if self.current_channel == 0:
return
self.piano.all_notes_off()
self.current_channel -= 1
def delete_current_channel(self):
self.piano.delete_channel(self.current_channel)
self.active_channels.remove(self.current_channel)
self.current_channel -= 1
def tone_change_up(self, evt):
key_modifier = evt.GetModifiers()
if key_modifier == wx.MOD_SHIFT:
self.piano.semitone_up(1, self.current_channel)
else:
self.piano.octave_up(self.current_channel)
def tone_change_down(self, evt):
key_modifier = evt.GetModifiers()
if key_modifier == wx.MOD_SHIFT:
self.piano.semitone_down(1, self.current_channel)
else:
self.piano.octave_down(self.current_channel)
def select_instrument_by_number(self, target_channel):
current_instrument = str(self.piano.get_instrument_for_channel(target_channel) + 1)
pressed_ok, selected_instrument = edit_dialog(self.mainFrame, "Instrument", "Enter instrument number for channel %d (from 1 to 128):" % target_channel, current_instrument)
if pressed_ok:
try:
instrument_number = int(selected_instrument) - 1
except ValueError:
message_dialog(self.mainFrame, "Error", "Instrument not a number")
return
if instrument_number < 0 or instrument_number > 127:
message_dialog(self.mainFrame, "Error", "Instrument number not in range from 1 to 128")
return
self.piano.set_instrument(instrument_number, target_channel)
if __name__ == '__main__':
app = PianoApp()
app.MainLoop()
``` |
{
"source": "josiemundi/refarch-kc",
"score": 3
} |
#### File: itg-tests/es-it/CancelledOrderTests.py
```python
import unittest
import os
import json
import requests
import time
import OrderConsumer
try:
ORDER_CMD_MS = os.environ['ORDER_CMD_MS']
except KeyError:
ORDER_CMD_MS = "ordercmd:9080"
def orderCommand(order):
res = requests.post("http://" + ORDER_CMD_MS + "/orders",json=order)
return json.loads(res.text)
class TestEventSourcingHappyPath(unittest.TestCase):
def test_createCancellableOrder(self):
print("Create a cancellable order")
# 1- load the order request from json
f = open('../data/FreshProductCancellableOrder.json','r')
order = json.load(f)
f.close()
# 2- create order by doing a POST on /api/orders of the orders command service
orderRep=orderCommand(order)
orderID=orderRep['orderID']
self.assertIsNotNone(orderID)
print('@@@@ Post new order -> resp with ID:' + orderID)
# 3- Get order create event
print('@@@@ wait for OrderCreated event with ID:' + orderID)
orderEvent = OrderConsumer.pollNextOrder(orderID)
self.assertEqual(orderEvent['type'], "OrderCreated")
# 4- get next order event, should be order cancelled to a voyage
print('@@@@ wait for OrderCancelled event from the voyage service for ' + orderID)
orderEvent = OrderConsumer.pollNextOrder(orderID)
self.assertEqual(orderEvent['type'], "OrderCancelled")
if __name__ == '__main__':
unittest.main()
```
#### File: itg-tests/es-it/TraceContainers.py
```python
import sys,os
import time,json
import signal,asyncio
from confluent_kafka import KafkaError, Consumer
try:
KAFKA_BROKERS = os.environ['KAFKA_BROKERS']
except KeyError:
print("The KAFKA_BROKERS environment variable needs to be set.")
exit
try:
KAFKA_APIKEY = os.environ['KAFKA_APIKEY']
except KeyError:
print("The KAFKA_APIKEY environment variable not set... assume local deployment")
TOPIC_NAME = "containers"
KEY_NAME = "containerID"
KEY_VALUE = "1"
def parseArguments():
global KEY_VALUE
if len(sys.argv) <= 1:
print("usage container ID to receive")
KEY_VALUE = sys.argv[1]
print("The arguments are: " , str(sys.argv))
kafkaConsumer = Consumer({
'bootstrap.servers': KAFKA_BROKERS,
'group.id': 'python-orders-consumer',
'auto.offset.reset': 'earliest',
'enable.auto.commit': True
})
kafkaConsumer.subscribe([TOPIC_NAME])
def pollNextEvent(aKey):
print('Start to listen to events')
gotIt = False
while not gotIt:
msg = kafkaConsumer.poll(timeout=10.0)
if msg is None:
print("no message")
continue
if msg.error():
print("Consumer error: {}".format(msg.error()))
continue
print('@@@ pollNextEvent {} partition: [{}] at offset {} with key {}:\n'
.format(msg.topic(), msg.partition(), msg.offset(), str(msg.key())))
msgAsStr = msg.value().decode('utf-8')
print('@@@ pollNextEvent Received message: {}'.format(msgAsStr))
eventAsJson = json.loads(msgAsStr)
if (eventAsJson['payload'][KEY_NAME] == aKey):
print('@@@@ got the matching expected key ')
gotIt = True
return eventAsJson
if __name__ == '__main__':
parseArguments()
pollNextEvent(KEY_VALUE)
kafkaConsumer.close()
```
#### File: itg-tests/OrdersPython/OrderConsumer.py
```python
import os,sys
from kafka.KcConsumer import KafkaConsumer
print(" @@@ Excuting script: OrderConsumer.py")
# Try to read the Kafka broker from the environment variables
try:
KAFKA_BROKERS = os.environ['KAFKA_BROKERS']
except KeyError:
print("[ERROR] - The KAFKA_BROKERS environment variable needs to be set.")
exit(1)
# Try to read the Kafka API key from the environment variables
try:
KAFKA_APIKEY = os.environ['KAFKA_APIKEY']
except KeyError:
print("The KAFKA_APIKEY environment variable not set... assume local deployment")
# Try to read the Kafka environment from the environment variables
try:
KAFKA_ENV = os.environ['KAFKA_ENV']
except KeyError:
KAFKA_ENV='LOCAL'
# Default values
TOPIC_NAME='orders'
OID = 'o_1'
def parseArguments():
print("The arguments for the script are: " , str(sys.argv))
if len(sys.argv) != 2:
print("[ERROR] - Need to have at least one argument order ID")
exit(1)
OID = sys.argv[1]
print("The Kafka environment is: " + KAFKA_ENV)
print("The Kafka brokers are: " + KAFKA_BROKERS)
print("The Kafka API key is: " + KAFKA_APIKEY)
return OID
if __name__ == '__main__':
OID = parseArguments()
orderConsumer = KafkaConsumer(KAFKA_ENV,KAFKA_BROKERS,KAFKA_APIKEY,TOPIC_NAME,False)
orderConsumer.prepareConsumer()
#orderConsumer.pollNextEvent(OID,'orderID')
orderConsumer.pollEvents()
orderConsumer.close()
``` |
{
"source": "josienb/project_euler",
"score": 4
} |
#### File: josienb/project_euler/euler007.py
```python
def calculate_nth_prime(n):
number = 1
prime_counter = 0
while prime_counter < n:
if isprime(number):
prime_counter = prime_counter + 1
if prime_counter == n:
break
number = number + 1
return number
def isprime(n):
'''check if integer n is a prime'''
# make sure n is a positive integer
n = abs(int(n))
# 0 and 1 are not primes
if n < 2:
return False
# 2 is the only even prime number
if n == 2:
return True
# all other even numbers are not primes
if not n & 1:
return False
# range starts with 3 and only needs to go up the squareroot of n
# for all odd numbers
for x in range(3, int(n**0.5)+1, 2):
if n % x == 0:
return False
return True
print calculate_nth_prime(10001)
``` |
{
"source": "josieoharrow/QAPics",
"score": 3
} |
#### File: QAPics/API/qapics.py
```python
import sys
from PIL import Image
import sys
import os
import json
import datetime
from scipy import misc
QAPicsDir = os.path.dirname(os.path.abspath(__file__)) + "/.."
sys.path.append(QAPicsDir + "/Analysis")
import analyze
baseline_image_path = QAPicsDir + "/Baselines/" + sys.argv[1] + ".png"
compare_image_path = QAPicsDir + "/" + sys.argv[1] + ".png"
mask_json = QAPicsDir + "/Masks/" + sys.argv[1] + ".json"
#Could store screenshot in output and adjust it with filters later.
print(mask_json)
baseline_image = Image.open(baseline_image_path, 'r')
compare_image = Image.open(compare_image_path, 'r')
mask_data = None
with open(mask_json) as f:
mask_data = json.load(f)
if len(sys.argv) > 2:
mask_data = Image.open(sys.argv[2], 'r')
def get_differences(baseline_image, compare_image, mask_data):
return analyze.compare(baseline_image, compare_image, mask_data)
def save_output_image_from_array(pixel_array):
if not os.path.isdir(QAPicsDir + "/Output"):
os.mkdir(QAPicsDir + "/Output")
currentDT = datetime.datetime.now()
image_name = QAPicsDir + '/Output/Output_' + str(currentDT) + '.png'
misc.imsave(image_name, pixel_array)
output_image = Image.open(image_name)
output_image = output_image.transpose(Image.FLIP_TOP_BOTTOM).transpose(Image.ROTATE_270)
output_image.save(image_name)
val = get_differences(baseline_image, compare_image, mask_data)
save_output_image_from_array(val[1])
print(val[0])
``` |
{
"source": "josiest/Flora-Data-Extraction",
"score": 3
} |
#### File: Flora-Data-Extraction/florana/extract.py
```python
import textract
import re
import json
import argparse
import os
import textwrap
import itertools
from pathlib import Path
from collections import OrderedDict
file_dir = Path(__file__).parent.absolute()
cwd = Path()
# TODO: Recursively iterate through a directory
# Data to extract:
# species name | states and provinces it appears in | classifier
def main():
# Build the command line argument parser
description = '''
Extract data from genus treatment pdfs of "Flora of North America
The csv ouptut files should have the following format:
<genus name>, <locations appeared in>, <classifier>
Example usage:
python -m florana.extract -A -o data.csv
'''
prog='python -m florana.extract'
fmt_class = argparse.RawDescriptionHelpFormatter
parser = argparse.ArgumentParser(formatter_class=fmt_class,
description=textwrap.dedent(description),
prog=prog)
parser.add_argument('-A', action='store_true',
help='parse all pdf files in the current directory')
parser.add_argument('filenames', metavar='F', nargs='*',
help='the treatment files to extract from')
parser.add_argument('-o', action='store',
help='specify a single output file (csv)')
success = True
args = parser.parse_args()
treatments = []
# The user specified to parse all pdf files in the directory
if args.A and not args.filenames:
treatments = [fn for fn in os.listdir() if '.pdf' in fn]
# The user specified the files manually
elif args.filenames:
treatments = args.filenames
else:
message = 'Please either specify filenames manually or use the '\
'"parse all" flag (-A).'
raise ValueError(message)
locations = ''
classifiers = ''
sep = ''
error = '' # Brief error message for program ouput to console
log_error = '' # Verbose error message for error.log
for treatment in treatments:
# name the csv file after the pdf input
match = re.match(r'([\w\.]+)\.pdf', treatment)
if not match:
print(f'"{treatment}" is not a pdf file!')
success = False
continue
fn = match[1]
# If the extracting algorithm couldn't find locations, keep track of
# the error messages
results = extract_from(treatment)
if results['error']:
success = False
error += sep+results['error']
log_error += sep+results['verbose-error']
# If the user specified a single output file, compile all the
# lines into a single string and write to a file later
if args.o:
locations += sep+results['locations']
classifiers += sep+results['classifiers']
# If the user didn't specify a single output file write the files
# for each treatment as we go
else:
with open(fn+'.csv', 'w') as f:
f.write(results['locations'])
with open(fn+'-classifiers.csv', 'w') as f:
f.write(results['classifiers'])
sep = '\n'
# if the user specified a single output file, now is when we write it
if args.o:
# locations file
fn = args.o
# classifiers file
idfn = ''
# The user may have alread include the file extension
try:
i = fn.index('.csv')
idfn = fn[:i]+'-classifiers'+fn[i:]
# If the user didn't include the file extension, add it
except ValueError:
fn += '.csv'
idfn = fn+'-classifiers.csv'
with open(fn, 'w') as f:
f.write(locations)
with open(idfn, 'w') as f:
f.write(classifiers)
if success:
print('Data was extracted successfully')
else:
print(error)
with open('error.log', 'wb') as f:
f.write(log_error.encode('utf8'))
print('An error occured when extracting the flora data. See ' \
'error.log for more details.')
def extract_from(treatment):
"""Extract the data from the genus treatment.
Parameters:
treatment - a pdf file name of the genus treatment.
data_type - "locations" or "classifiers"
Returns a dict of results with the following format
"locations" - a string of species names and locations they appear in
"classifiers" - a string of species names and their classifiers
"error" - a brief error message stating which species the algorithm
couldn't find locations for
"verbose-error" - an error message stating which species the algorithm
couldn't find locations for as well as the block of
text that the algorithm searched in for the locations
Raises a Value error if the genus isn't found in the treatment.
"""
text = load_treatment(treatment)
genus = genus_in(text)
if not genus:
raise ValueError("No genus was found!")
data = {'locations': '', 'classifiers': '',
'error': '', 'verbose-error': ''}
locsep = ''
errsep = ''
idsep = ''
for block, name in partition(text, genus):
ids = ids_in(block)
data['classifiers'] += f'{idsep}{name}, {ids}'
idsep = '\n'
locs = '\n'.join(f'{name}, {loc}' for loc in locs_in(block))
if not locs:
data['error'] += f"{errsep}Couldn't find locations for {name}"
data['verbose-error'] += f"{errsep}Couldn't find locations for " \
f"{name} in:\n\n{block}\n"
errsep = '\n'
else:
data['locations'] += locsep+locs
locsep = '\n'
return data
def load_treatment(fn, encoding='utf-8'):
""" Load the treatment using textract
Parameters:
fn - the file name of the treatment
encoding - the encoding of the file (defaults to utf-8)
"""
path = Path.joinpath(Path.cwd(), fn)
return textract.process(str(path), encoding=encoding).decode(encoding)
# regex patterns
# --- Genus pattern ---
#
# Assumes that the file contains the genus name in the following format:
#
# n. GENUS
#
# Where n is an arbitrary natural and GENUS is all-caps. GENUS doesn't
# necessarily end the line
genus_pattern = re.compile(r'^[ ]*\d+[a-z]*\.[ ]*([A-Z]+)\s+',
flags=re.MULTILINE)
def genus_in(treatment):
"""Return the genus name in the given treatment string.
If the genus couldn't be found, an empty string is returned.
"""
genus_match = genus_pattern.search(treatment)
# If the genus name couldn't be found, return an empty string
if not genus_match:
return ""
# Else, get the first match and de-"caps-lock" it
genus = genus_match[1]
return genus[0]+(genus[1:].lower())
def partition(treatment, genus):
"""Yield the block and name in treatment associated with each species*.
*Note that this includes subspecies.
treatment - the treatment text (a string)
species - a list of species names
"""
# Find all the species names in the treatment and reorder them in the order
# they appear in the text
name_gens = [keys_in(subgroup, genus) for subgroup in subgroups(treatment)]
names = sorted(itertools.chain(*name_gens),
key=lambda s: int(s.split('.')[0]))
# We want to remove the number before each name and also remove any
# duplicates while preserving order. OrderedDict can acheive this
names = (' '.join(name.split(' ')[1:3]).strip() for name in names)
names = OrderedDict.fromkeys(names).keys()
for block, name in species_blocks(treatment, names):
# each species block may have subspecies
has_subspecies = False
for sub_block, sub_name in subspecies_blocks(block, name):
has_subspecies = True
yield sub_block, sub_name
if not has_subspecies:
yield block, name
def subgroups(treatment):
"""Generate each subgroup block in order."""
# Find all occurences of genus headers
headers = list(genus_pattern.finditer(treatment))
i, j = 0, 0
# If there are subgroups, the first header is for the entire treatement and
# there's no species key before the header for the first subgroup, so take
# the first header out of the list
if len(headers) > 1:
headers = headers[1:]
for next_header in headers:
# Update j to the start of the current header: we're really yielding
# the previous match
j = next_header.start()
# If the block starts at index 0, then we haven't even reached the first
# subgroup block, so don't yield yet
if i > 0:
yield treatment[i:j]
# Update i to the start of the current header: on the next iteration
# it will become the start of the previous header and j will be the
# start of the current header.
i = j
# Once this is encountered, all info is irrelevant for this program
try:
k = treatment.lower().index("other reference")
except:
k = -1
if i > 0:
yield treatment[j:k]
# If there were no matches, then a genus couldn't be found
else:
raise ValueError("No genus was found!")
def keys_in(subgroup, genus):
"""Generate all species names from the species key in a subgroup block.
subgroup - the subgroup block containing the species
genus - of the species
"""
key_pattern = build_key_pattern(genus)
has_species_key = False
for match in key_pattern.finditer(subgroup):
has_species_key = True
yield match[0]
# it's possible that the text has no species key - this happens when
# there's only one species
if not has_species_key:
# Compile the intro pattern without knowing what the species is. Since
# there's only one species this is fine.
intro_pattern = build_intro_pattern(genus)
intro = intro_pattern.search(subgroup)
if not intro:
raise ValueError('No species found!')
else:
yield '1. '+' '.join(intro.groups())
def species_blocks(treatment, names):
"""Generate all species blocks* and names in treatment.
*Note that this includes all subspecies if any.
treatment - the treatment text
names - an ordered list of all species names that appear in the treatment
"""
error=''
i, j = 0, 0
# Split the whole text into blocks based on the introduction to each subsp.
for next_name in names:
# split the name up into its individual parts in order to pass once
# again into the intro_pattern builder, this time compiling to look
# for a specific species.
if len(next_name.split(' ')) > 2:
if error:
error += '\n'
error += f'"{next_name}" is too long: expected 2 words!'
continue
genus, species = next_name.split(' ')
intro_pattern = build_intro_pattern(genus, species=species)
intro = intro_pattern.search(treatment)
# Produce error message if species introduction couldn't be found
if not intro:
if error:
error += '\n'
error += f'Could not find species introduction for "{next_name}"'
continue
j = intro.start()
# If i > j, then something went wrong when we reordered the search
# results.
if i > j:
if error:
error += '\n'
error += f'When searching in {next_name}: Indices ({i}, {j}) are '\
'out of order!'
# If the block starts at index 0, then we haven't even reached the first
# species block, so don't yield yet
elif i > 0:
yield treatment[i:j], name
name = next_name
i = j
# Finally yield the "current" match (the last match).
try:
k = treatment.index("OTHER REFERENCES")
except ValueError:
k = -1
if i > 0:
yield treatment[j:k], name
if error:
error += "\nErrors occured while partitioning species blocks!"
raise ValueError(error)
def subspecies_blocks(block, species):
"""Generate all subspecies blocks in a species block, if any.
block - the species block to look in
species - the species name of the form "<genus> <species>"
"""
if len(species.split(' ')) > 2:
raise ValueError(f'"{species}" is too long: expected 2 words!')
genus, species = species.split(' ')
# Build the intro pattern to specifically look for subspecies
intro_pattern = build_intro_pattern(genus, species=species,
subspecies=r'[a-z]+')
error = ''
i, j = 0, 0
name = ''
# go through each subspecies introduction match
for intro in intro_pattern.finditer(block):
# Start
j = intro.start()
# Only yield the previous match when we've actually found it
if i > 0:
if i > j:
if error:
error += '\n'
error += f'When searching in "{name}" block: Indices ({i}, {j}'\
') are out of order!'
yield block[i:j], name
# The name should include the entire species, including the subspecies
# The intro pattern should have matched all of these.
name = ' '.join(intro.groups())
i = j
# It's possible that there are no subspecies. The intro pattern wouldn't
# have found anything and i would have never been incremented. If this is
# the case we don't want to yield anything, otherwise yield the rest of
# subspecies block until the end of the species block
if i > 0:
yield block[j:-1], name
if error:
error += "\nErrors occured when partitioning the treatment"
raise ValueError(error)
def build_key_pattern(genus):
"""Build a regex pattern for the genus key
Parameters:
genus - the genus of the file (a string)
The pattern has one subgroup: the genus and species name
"""
# --- Species name from index line ---
#
# Relies on the assumption that index lines have the following format
#
# n. <genus> <species> [(in part)]\n
#
# Where n is an arbitrary natural, genus is specified, species is a
# lowercase word and "(in part)" doesn't necessarily appear.
#
# The key pattern matches two subgroups:
# 1. The number that orders how the species appears in the text
# 2. The genus and species name
key_pattern = re.compile(r'(\d+)\.[ ]*('+genus+r' (?:x\\)?[a-z\-]+)'+
r'(?: \(in part\))?\s*\n', flags=re.MULTILINE)
return key_pattern
def build_intro_pattern(genus, species=r'(?:x\\)?[a-z\-]+', subspecies=''):
"""Build a regex pattern for a species introduction.
Paramters:
genus - of the species
species - specific species to look for (defaults to any)
subspecies - the subspecies to look for (defaults to empty string)
The regex pattern has three potenital subgroups.
1 - the genus name
2 - the species name
3 - the subspecies name (if specified)
"""
# --- Species Introduction ---
#
# Relies on the assumption that a species introduction is formatted as:
#
# n[a]*. Species name {arbitrary text} [(subsp|var). name] {arbitrary text}
#
# Where n is an arbitrary natural and a is an arbitrary alphabetical
# character.
# This will match the "n[a]*" part of the inroduction
pattern = r'^\d+'
# if the subspecies was specified, we know there must be alphabetical
# numbering on them
if subspecies:
pattern += r'[a-z]+'
if 'x\\' in species and '[a-z' not in species:
species = species.replace('x\\', 'x\\\\')
# This will now match the 'n[a]*. Species name' part of the introduction
pattern += r'\.[ ]*('+genus+') ('+species+')'
# if the subspecies was specified, we know there must be some descriptor
# followed by 'subsp.' and the subspecies name
#
# i.e. the '{arbitrary text} [(subsp|var). name] {arbitrary text}' part of
# the introduction is now matched
if subspecies:
pattern += r'.*?(?:subsp|var)\.\s*('+subspecies+')'
return re.compile(pattern, flags=re.MULTILINE|re.DOTALL)
# --- Finding classifiers ---
#
# Always terminates the line
# Always set off by spaces (never punctuation - before or after)
# If a common name (of the form "* Common name") appears, there will be
# text between the date and classifiers
# Otherwise it's possible to have a "(parenthetical statement)" between
# the date and the classifier, but usually not
# It's possible that there are no classifiers
id_pattern = re.compile(r'([CEFIW ]+)\s*$', re.MULTILINE)
def ids_in(block):
"""Finds the classifiers for a species.
Parameters:
block - a block of text (a string) with its scope limited to a single
species or subspecies
Returns an empty string if there are no classifiers for this species.
"""
error = ''
sep = ''
for line in block.split('\n'):
matches = id_pattern.findall(line)
# If matches were found return the last match (the pattern is meant to
# be searched from the end of the line)
if matches:
return matches[-1].strip()
# if no matches found, there are no classifiers; return an empty string
return ''
# --- Finding provinces ---
#
# abbreviations and full state names are listed in geography.txt and
# locations.txt so grab each of them
# I could just use a string, but I want to '|'.join(loc_names) so it'll be
# easier to '|' the two to gether
loc_names = []
for fn in ('geography.txt', 'locations.txt'):
path = Path.joinpath(file_dir, fn)
with open(path) as f:
s = f.read()
# these are special regex charaters, so escape them wherever they
# appear
for r in ('.', '(', ')'):
s = s.replace(r, '\\'+r)
# I want to '|' each province name, but since they have non-alphabetic
# characters I need to group each name w/o capturing, hence the (?:)
#
# Also cut off the last blank line
loc_names.extend('(?:'+m+')' for m in s.split('\n')[:-1])
# If one string is a substring of another, regex will match with whatever
# comes first in the pattern. We want to match the longest substrings possible
# so sort the location names by length
#
# Also replace spaces in each name with arbitrary whitespace
loc_names = sorted((loc.replace(' ', r'\s*') for loc in loc_names),
key=len, reverse=True)
# Assumes locations have the following format:
#
# {<beginning of line>, <;> or <,>} {location name (may include newlines)}{<;>,
# <,> or <end of line>}
loc_pattern_str = r'[^;,]\s*('+'|'.join(loc_names)+r')(?:[;,]|\s*?$|\s*?\n)'
loc_pattern = re.compile(loc_pattern_str, re.MULTILINE)
# --- Location Paragraph Pattern ---
#
# Assumes That locations that a species appears in meets the following format:
#
# 0{arbitrary white space}m; {locations on an abitrary number of lines where
# countries are separated by ';' and states/provinces are separated by ','}.\n
#
# The line doesn't necessarily begin at 0, but a line does end at '.\n'
loc_text_pattern = re.compile(r'0[\)\]]?\s+?m;.*?(?<!Nfld|Labr|..St)'+
r'\.\s*?(?:\n|$)', re.DOTALL|re.MULTILINE)
loc_exception_pattern = re.compile(r'(?:Flowering.*?;|introduced;)' \
r'.*?\.\s*?(?:\n|$)', re.DOTALL|re.MULTILINE)
# load the key which maps full state and province names to their abbreviations
key_fn = 'key.json'
key_path = Path.joinpath(file_dir, key_fn)
key = {}
with open(key_path) as f:
key = json.load(f)
def locs_in(block):
"""Generates the locations that a species appears in.
Parameters:
block - a block of text (a string) with its scope limited to a single
species or subspecies
"""
# First find the locations paragraph
loc_match = loc_text_pattern.search(block)
if not loc_match:
loc_match = loc_exception_pattern.search(block)
loc_text = ""
if loc_match:
loc_text = loc_match[0]
# find all states and provinces in the paragraph
locs = loc_pattern.findall(re.sub('[Bb]aja\s*[Cc]alifornia', '', loc_text))
# remove duplicates
#locs = {key[loc] if loc in key else loc for loc in matches}
for loc in locs:
# in replace all whitespace with a single space
loc = ' '.join(loc.split())
# convert full state and province names to their abbreviations
if loc in key:
loc = key[loc]
# Handle Nfld/Labr differentiation
# if specified, yield the relevant one
if '(Labr.)' in loc:
yield 'Labr.'
elif '(Nfld.)' in loc:
yield 'Nfld.'
# otherwise yield both if both
elif 'Nfld' in loc and 'Labr' in loc:
yield 'Nfld.'
yield 'Labr.'
# now that these cases have been handled, yield as usual
elif loc:
yield loc
if __name__ == '__main__':
main()
``` |
{
"source": "josiest/hangman",
"score": 4
} |
#### File: josiest/hangman/hangman.py
```python
import subprocess
import sys
import prompter
import random
from copy import deepcopy
from collections import OrderedDict
from textwrap import wrap
_CLEAR_WORD = 'cls'
if len(sys.argv) > 1 and sys.argv[1] == '-u':
_CLEAR_WORD = 'clear'
title = r"""================================================================================
|| || ||=\\ ||\ || //====|| ||\ /|| ||=\\ ||\ ||
|| || || \\ ||\\ || /| ||\\ //|| || \\ ||\\ ||
||=====|| ||===\\ || \\ || || ==== || \\/ || ||===\\ || \\ ||
|| || || \\ || \\ || \| || || || || \\ || \\ ||
|| || || \\ || \\|| \\====|| || || || \\ || \\||
================================================================================"""
def load_words(filename, sep=' '):
"""Load a random word from word_file
Preconditions:
word_file is a string as the name of a text file located in the same
directory as this module
has no new lines and contains only unique words as defined in the
module documentation seperated by 1 space each
returns a string (word)
Postcondition
word is a random word in word_file
"""
with open(filename) as word_file:
return word_file.read().split(sep)
words = load_words('words.txt', '\n')
def get_menu(options):
"""Get the string representation of a menu from the specified options
Assumes options is an ordered dictionary that maps strings (option)
to strings (name)
assumes option length + name length + 2 < 20
Return a string along these lines
each option must appear in order
[--------------------]
[1] option
... ...
[--------------------]
"""
border = '[%s]' % ('-'*20)
menu = border
for option, name in options.items():
space = ' ' * (20 - (len(option) + 2) - len(name))
menu += '\n\n [%s]%s%s' % (option, space, name)
menu += '\n\n%s' % border
return menu
saves = {}
def new_game():
saves['game'] = Game(random.choice(words))
return saves['game'].play()
def continue_game():
return saves['game'].play()
menus = {
'main': {
'text': '%s\n\n%s' % (title, get_menu(OrderedDict([
('1', 'new game'), ('2', 'exit')]))),
'options': {'1': new_game, '2': lambda: {}},
'message': 'Please enter "1" or "2"'},
'continue': {
'text': '%s\n\n%s' % (title, get_menu(OrderedDict([
('1', 'continue'), ('2', 'new game'), ('3', 'exit')]))),
'options': {'1': continue_game, '2': new_game, '3': lambda: {},
'message': 'Please enter "1", "2", or "3"'}
},
'gameover': {
'text': '{}\n\n%s' % get_menu(OrderedDict([
('1', 'new game'), ('2', 'exit')])),
'options': {'1': new_game, '2': lambda: {}},
'message': 'Please enter "1" or "2"'}
}
def main():
prompter.clear(_CLEAR_WORD)
menu = menus['main']
while menu:
choice = prompter.get_option(menu)
menu = menu['options'][choice]()
prompter.clear(_CLEAR_WORD)
sys.exit()
class Game(object):
screen = r"""----------------------------------
|__| /\ |\ | /** |\/| /\ |\ |
| | /--\ | \| \__| | | /--\ | \|
----------------------------------
o-----------------o
%s
o-----------------o
[ Already Guessed ]
o-----------------o
%s
o-----------------o"""
label = '| {:%s15} |'
gameover_text = {
'win': 'You guessed the word: %s',
'lose': 'You lose. The word was: %s'}
def __init__(self, word):
self.word = word
self.visible_letters = ['_' for letter in word]
self.guesses = []
self.strikes = 0
def play(self):
limit = strike_limit(self.word)
win_condition = 'lose'
while self.strikes < limit:
guess = self._get_valid_guess()
self.guesses.append(guess)
self.make_guess(guess)
if not ('_' in self.visible_letters):
win_condition = 'win'
break
gameover_menu = deepcopy(menus['gameover'])
text = gameover_menu['text']
screen = self.get_gameover_screen(win_condition)
gameover_menu['text'] = text.format(screen)
return gameover_menu
def make_guess(self, guess):
"""Make a guess for the hidden word
assumes guess is an alphabetical string 1 character long or as long as
word and it's not in visible_letters. If as long as word, it meets
precondition of word as defined in the module documentation
mutates self such that strikes has increased by 1 if the guess was
incorrect
mutates visible_letters such that if the guess was correct guess now
appears in visible_letters where it appears in word
"""
if guess == self.word:
self.visible_letters = [letter for letter in self.word]
elif guess in self.word:
self.show_letters(guess)
else:
self.strikes += 1
def show_letters(self, guess):
"""Reveal the places where guess appears in word
Preconditions
guess - is a string
* lowercase alphabetical
* one character long
* in self.word
Postconditions
mutates visible_letters
* at each index where guess appears in self.word,
guess now appears in self.visible_letters
"""
for i, letter in enumerate(self.word):
if letter == guess:
self.visible_letters[i] = letter
def get_screen(self):
"""Get the current screen as a string"""
vl = ' '.join(self.visible_letters)
vl = self._get_label(vl, '^')
guesses = ' '.join(self.guesses)
guesses = self._get_label(guesses, '<')
return Game.screen % (vl, guesses)
def get_gameover_screen(self, state):
"""Get the gameover screen as a string
assumes state is a string of either 'win' or 'lose'
"""
gameover_text = Game.gameover_text[state] % self.word
return '%s\n\n%s' % (self.get_screen(), gameover_text)
def _get_label(self, text, justification):
label = Game.label % justification
lines = ['']
if text:
lines = wrap(text, 15)
lines = [label.format(line) for line in lines]
return '\n'.join(lines)
def _get_valid_guess(self):
vl = self.visible_letters
conditions = [
{'function': lambda data, letters: len(data) in (1, len(letters)),
'args': [vl],
'message': (
'Your guess must be 1 character or as long as the word')},
{'function': lambda data: data.isalpha(),
'message': 'Your guess must only contain alphabetical characters'},
{'function': lambda data, guesses: data.lower() not in guesses,
'args': [self.guesses],
'message': 'You\'ve already guessed that'}]
prompt = 'Guess a letter or a word'
guess = prompter.ask_for(self.get_screen(), prompt, conditions,
clear_word=_CLEAR_WORD)
return guess.lower()
def strike_limit(word):
"""Get the strike limit for a specific word
Precondition
word - is a string
* contains only alphabetical characters
Postconditions
Returns an integer (limit)
* positive
* greater than the amount of unique letters in word
* greater than or equal to 6
* less than or equal to 26
"""
return 6
if __name__ == '__main__':
main()
``` |
{
"source": "josip8/EwilTwin-attack",
"score": 3
} |
#### File: josip8/EwilTwin-attack/login.py
```python
import argparse
import sys
import datetime
import json
import logging
import re
import random
import requests
import shutil
from pyquery import PyQuery as pq
def main(username, password):
logging.basicConfig(filename='logging.log', level=logging.DEBUG)
session = requests.session()
uid, dtsg = login(session, username, password)
def login(session, username, password):
# Navigate to the Facebook homepage
response = session.get('https://facebook.com')
# Construct the DOM
dom = pq(response.text)
# Get the lsd value from the HTML. This is required to make the login request
lsd = dom('[name="lsd"]').val()
# Perform the login request
response = session.post('https://www.facebook.com/login.php?login_attempt=1', data={
'lsd': lsd,
'email': username,
'pass': password,
'default_persistent': '0',
'timezone': '-60',
'lgndim': '',
'lgnrnd': '',
'lgnjs': '',
'locale':'en_GB',
'qsstamp': ''
})
print len(response.text)
sys.stdout.flush()
try:
main(username=sys.argv[1], password=sys.argv[2])
except Exception, e:
logging.exception(e)
``` |
{
"source": "JosipAleric/P2-Zadace",
"score": 4
} |
#### File: P2-Zadace/zadace/zad5.py
```python
def function(n):
for i in range(n):
if i % 2 == 0 or i % 2 != 0:
yield i
iterator = iter(function(20))
while 1:
try:
print(next(iterator))
except:
break
```
#### File: P2-Zadace/zadace/zad8.py
```python
def dobrodosao(ime):
print ("Dobrodosao " + ime)
#Druga funkcija
pozdrav = (lambda ime: ("Pozdrav " + ime))
#Treca funkcija
def dobrodoslica(funkcija):
return funkcija("Josip")
print(dobrodoslica(dobrodosao))
print(dobrodoslica(pozdrav))
``` |
{
"source": "josipbudzaki/backtrader-binance",
"score": 3
} |
#### File: backtrader-binance/backtrader_binance/binance_feed.py
```python
from collections import deque
import pandas as pd
from backtrader.feed import DataBase
from backtrader.utils import date2num
from backtrader.utils.py3 import with_metaclass
from .binance_store import BinanceStore
class MetaBinanceData(DataBase.__class__):
def __init__(cls, name, bases, dct):
"""Class has already been created ... register"""
# Initialize the class
super(MetaBinanceData, cls).__init__(name, bases, dct)
# Register with the store
BinanceStore.DataCls = cls
class BinanceData(with_metaclass(MetaBinanceData, DataBase)):
params = (
('drop_newest', True),
)
# States for the Finite State Machine in _load
_ST_LIVE, _ST_HISTORBACK, _ST_OVER = range(3)
def __init__(self, **kwargs):
self.store = BinanceStore(**kwargs)
self._data = deque()
def _load(self):
if self._state == self._ST_OVER:
return False
elif self._state == self._ST_LIVE:
return self._load_kline()
elif self._state == self._ST_HISTORBACK:
if self._load_kline():
return True
else:
self._start_live()
def _load_kline(self):
try:
kline = self._data.popleft()
except IndexError:
return None
timestamp, open_, high, low, close, volume = kline
self.lines.datetime[0] = date2num(timestamp)
self.lines.open[0] = open_
self.lines.high[0] = high
self.lines.low[0] = low
self.lines.close[0] = close
self.lines.volume[0] = volume
return True
def _parser_dataframe(self, data):
df = data.copy()
df.columns = ['timestamp', 'open', 'high', 'low', 'close', 'volume']
df['timestamp'] = df['timestamp'].values.astype(dtype='datetime64[ms]')
df['open'] = df['open'].values.astype(float)
df['high'] = df['high'].values.astype(float)
df['low'] = df['low'].values.astype(float)
df['close'] = df['close'].values.astype(float)
df['volume'] = df['volume'].values.astype(float)
# df.set_index('timestamp', inplace=True)
return df
def _parser_to_kline(self, timestamp, kline):
df = pd.DataFrame([[timestamp, kline['o'], kline['h'],
kline['l'], kline['c'], kline['v']]])
return self._parser_dataframe(df)
def _process_kline_msg(self, msg):
"""https://binance-docs.github.io/apidocs/spot/en/#kline-candlestick-streams"""
if msg['e'] == 'kline':
if msg['k']['x']: # Is closed
kline = self._parser_to_kline(msg['k']['t'], msg['k'])
self._data.extend(kline.values.tolist())
elif msg['e'] == 'error':
raise msg
def _start_live(self):
self._state = self._ST_LIVE
self.put_notification(self.LIVE)
self.store.binance_socket.start_kline_socket(
self.symbol_info['symbol'],
self._process_kline_msg,
self.interval)
self.store.start_socket()
def haslivedata(self):
return self._state == self._ST_LIVE and self._data
def islive(self):
return True
def start(self):
DataBase.start(self)
self.interval = self.store.get_interval(self.p.timeframe, self.p.compression)
if self.interval is None:
self._state = self._ST_OVER
self.put_notification(self.NOTSUPPORTED_TF)
return
self.symbol_info = self.store.get_symbol_info(self.p.dataname)
if self.symbol_info is None:
self._state = self._ST_OVER
self.put_notification(self.NOTSUBSCRIBED)
return
if self.p.fromdate:
self._state = self._ST_HISTORBACK
self.put_notification(self.DELAYED)
klines = self.store.binance.get_historical_klines(
self.symbol_info['symbol'],
self.interval,
self.p.fromdate.strftime('%d %b %Y %H:%M:%S'))
if self.p.drop_newest:
klines.pop()
df = pd.DataFrame(klines)
df.drop(df.columns[[6, 7, 8, 9, 10, 11]], axis=1, inplace=True) # Remove unnecessary columns
df = self._parser_dataframe(df)
self._data.extend(df.values.tolist())
else:
self._start_live()
``` |
{
"source": "josipbudzaki/btoandav20",
"score": 3
} |
#### File: btoandav20/sizers/oandav20backtestsizer.py
```python
import backtrader as bt
from btoandav20.commissions import OandaV20BacktestCommInfo
class OandaV20BacktestSizer(bt.Sizer):
params = dict(
percents=0, # percents of cash
amount=0, # amount of cash
avail_reduce_perc=0,
)
def _getsizing(self, comminfo, cash, data, isbuy):
position = self.broker.getposition(data)
if position:
return position.size
price = data.close[0]
avail = comminfo.getsize(price, cash)
if self.p.avail_reduce_perc > 0:
avail -= avail/100 * self.p.avail_reduce_perc
if self.p.percents != 0:
size = avail * (self.p.percents / 100)
elif self.p.amount != 0:
size = (avail / cash) * self.p.amount
else:
size = 0
return int(size)
class OandaV20BacktestPercentSizer(OandaV20BacktestSizer):
params = dict(
percents=5,
)
class OandaV20BacktestCashSizer(OandaV20BacktestSizer):
params = dict(
amount=50,
)
class OandaV20BacktestRiskSizer(bt.Sizer):
params = dict(
percents=0, # risk percents
amount=0, # risk amount
pips=5, # stop loss in pips
avail_reduce_perc=0,
)
def getsizing(self, data, isbuy, pips=None, price=None,
exchange_rate=None):
comminfo = self.broker.getcommissioninfo(data)
return self._getsizing(
comminfo, self.broker.getvalue(),
data, isbuy, pips, price, exchange_rate)
def _getsizing(self, comminfo, cash, data, isbuy, pips=None,
price=None, exchange_rate=None):
position = self.broker.getposition(data)
if position:
return position.size
if not pips:
pips = self.p.pips
price = data.close[0]
avail = comminfo.getsize(price, cash)
if self.p.avail_reduce_perc > 0:
avail -= avail / 100 * self.p.avail_reduce_perc
if self.p.percents != 0:
cash_to_use = cash * (self.p.percents / 100)
elif self.p.amount != 0:
cash_to_use = self.p.amount
else:
raise Exception('Either percents or amount is needed')
if not isinstance(comminfo, OandaV20BacktestCommInfo):
raise Exception('OandaV20CommInfo required')
mult = float(1 / 10 ** comminfo.p.pip_location)
price_per_pip = cash_to_use / pips
if not comminfo.p.acc_counter_currency and price:
# Acc currency is same as base currency
pip = price_per_pip * price
size = pip * mult
elif exchange_rate:
# Acc currency is neither same as base or counter currency
pip = price_per_pip * exchange_rate
size = pip * mult
else:
# Acc currency and counter currency are the same
size = price_per_pip * mult
size = min(size, avail)
return int(size)
class OandaV20BacktestRiskPercentSizer(OandaV20BacktestRiskSizer):
params = dict(
percents=5,
)
class OandaV20BacktestRiskCashSizer(OandaV20BacktestRiskSizer):
params = dict(
amount=50,
)
``` |
{
"source": "josipd/jax",
"score": 4
} |
#### File: jax/experimental/jambax.py
```python
import collections
import ctypes
from functools import partial # pylint:disable=g-importing-member
import jax
from jax.interpreters import batching
from jax.interpreters import xla
from jax.lib import xla_client
from jaxlib import xla_extension
import numba
from numba import types as nb_types
import numba.typed as nb_typed
import numpy as onp
def _xla_shape_to_abstract(xla_shape):
return jax.abstract_arrays.ShapedArray(xla_shape.dimensions(),
xla_shape.element_type())
def _create_xla_target_capsule(ptr):
xla_capsule_magic = b"xla._CUSTOM_CALL_TARGET"
ctypes.pythonapi.PyCapsule_New.argtypes = [
ctypes.c_void_p, ctypes.c_char_p, ctypes.c_void_p]
ctypes.pythonapi.PyCapsule_New.restype = ctypes.py_object
return ctypes.pythonapi.PyCapsule_New(ptr, xla_capsule_magic, None)
def _np_evaluation_rule(call_fn, abstract_eval_fn, *args, **kwargs):
output_shapes = abstract_eval_fn(*args)
outputs = tuple(
onp.empty(shape.shape, dtype=shape.dtype) for shape in output_shapes)
inputs = tuple(onp.zeros_like(arg) for arg in args)
call_fn(outputs + inputs, **kwargs)
return tuple(outputs)
def _naive_batching(call_fn, args, batch_axes):
# TODO(josipd): Check that the axes are all zeros. Add support when only a
# subset of the arguments have to be batched.
# TODO(josipd): Do this smarter than n CustomCalls.
return tuple(jax.lax.map(lambda x: call_fn(*x), args)), batch_axes
def _xla_translation(numba_fn, abstract_eval_fn, xla_builder, *args):
"""Returns the XLA CustomCall for the given numba function.
Args:
numba_fn: A numba function. For its signature, see the module docstring.
abstract_eval_fn: The abstract shape evaluation function.
xla_builder: The XlaBuilder instance.
*args: The positional arguments to be passed to `numba_fn`.
Returns:
The XLA CustomCall operation calling into the numba function.
"""
input_shapes = [xla_builder.get_shape(arg) for arg in args]
# TODO(josipd): Check that the input layout is the numpy default.
output_abstract_arrays = abstract_eval_fn(*[
_xla_shape_to_abstract(shape) for shape in input_shapes])
output_shapes = tuple(array.shape for array in output_abstract_arrays)
output_dtypes = tuple(array.dtype for array in output_abstract_arrays)
layout_for_shape = lambda shape: range(len(shape)-1, -1, -1)
output_layouts = map(layout_for_shape, output_shapes)
xla_output_shapes = [
xla_client.Shape.array_shape(*arg)
for arg in zip(output_dtypes, output_shapes, output_layouts)]
xla_output_shape = xla_client.Shape.tuple_shape(xla_output_shapes)
input_dtypes = tuple(shape.element_type() for shape in input_shapes)
input_dimensions = tuple(shape.dimensions() for shape in input_shapes)
xla_call_sig = nb_types.void(nb_types.CPointer(nb_types.voidptr),
nb_types.CPointer(nb_types.voidptr))
@numba.cfunc(xla_call_sig)
def xla_custom_call_target(output_ptrs, input_ptrs):
args = nb_typed.List()
for i in range(len(output_shapes)):
args.append(numba.carray(
output_ptrs[i], output_shapes[i], dtype=output_dtypes[i]))
for i in range(len(input_dimensions)):
args.append(numba.carray(
input_ptrs[i], input_dimensions[i], dtype=input_dtypes[i]))
numba_fn(args)
target_name = xla_custom_call_target.native_name.encode("ascii")
capsule = _create_xla_target_capsule(xla_custom_call_target.address)
xla_extension.register_custom_call_target(target_name, capsule, "Host")
return xla_client.ops.CustomCallWithLayout(
xla_builder,
target_name,
operands=args,
shape_with_layout=xla_output_shape,
operand_shapes_with_layout=input_shapes,
)
def numba_to_jax(name: str, numba_fn, abstract_eval_fn, batching_fn=None):
"""Create a jittable JAX function for the given Numba function.
Args:
name: The name under which the primitive will be registered.
numba_fn: The function that can be compiled with Numba.
abstract_eval_fn: The abstract evaluation function.
batching_fn: If set, this function will be used when vmap-ing the returned
function.
Returns:
A jitable JAX function.
"""
primitive = jax.core.Primitive(name)
primitive.multiple_results = True
def abstract_eval_fn_always(*args, **kwargs):
# Special-casing when only a single tensor is returned.
shapes = abstract_eval_fn(*args, **kwargs)
if not isinstance(shapes, collections.abc.Collection):
return [shapes]
else:
return shapes
primitive.def_abstract_eval(abstract_eval_fn_always)
primitive.def_impl(
partial(_np_evaluation_rule, numba_fn, abstract_eval_fn_always))
def _primitive_bind(*args):
result = primitive.bind(*args)
output_shapes = abstract_eval_fn(*args)
# Special-casing when only a single tensor is returned.
if not isinstance(output_shapes, collections.abc.Collection):
assert len(result) == 1
return result[0]
else:
return result
if batching_fn is not None:
batching.primitive_batchers[primitive] = batching_fn
else:
batching.primitive_batchers[primitive] = partial(
_naive_batching, _primitive_bind)
xla.backend_specific_translations["cpu"][primitive] = partial(
_xla_translation, numba_fn, abstract_eval_fn_always)
return _primitive_bind
``` |
{
"source": "josipd/proxTV",
"score": 2
} |
#### File: proxTV/prox_tv/prox_tv_build.py
```python
import os
import os.path
from cffi import FFI
ffi = FFI()
ffi.cdef("""
typedef struct {
...;
} Workspace;
// Condat's implementatino.
void TV1D_denoise(double* input, double* output, const int width,
const double lambda);
// Ryan's implementation of Johnson's algorithm
void dp(int n, double *y, double lam, double *beta);
/* TV-L1 solvers */
int tautString_TV1(double *y, double lambda, double *x,int n);
int PN_TV1(double *y, double lambda, double *x, double *info, int n,
double sigma, Workspace *ws);
/* Weighted TV-L1 solvers */
int PN_TV1_Weighted(double* Y, double* W, double* X, double* info, int n,
double sigma, Workspace* ws);
int tautString_TV1_Weighted(double *y, double* lambda, double *x, int n);
/* TV-L2 solvers */
int more_TV2(double *y,double lambda, double *x, double *info, int n);
int PG_TV2(double *y, double lambda, double *x,double *info, int n);
int morePG_TV2(double *y, double lambda, double *x, double *info, int n,
Workspace *ws);
/* Weighted TV-L2 solvers */
int DR2L1W_TV(size_t M, size_t N, double* unary, double*W1, double*W2,
double *s, int nThreads, int maxit, double* info);
/* 2-dimensional TV solvers */
int PD2_TV(double *y, double *lambdas, double *norms, double *dims,
double *x, double *info, int *ns, int nds, int npen, int ncores,
int maxIters);
int DR2_TV(size_t M, size_t N, double*unary, double W1, double W2,
double norm1, double norm2, double*s, int nThreads, int maxit,
double* info);
int CondatChambollePock2_TV(size_t M, size_t N, double*Y, double lambda,
double*X, short alg, int maxit, double* info);
int Yang2_TV(size_t M, size_t N, double*Y, double lambda, double*X,
int maxit, double* info);
/* General-dimension TV solvers */
int PD_TV(double *y, double *lambdas, double *norms, double *dims,
double *x, double *info, int *ns, int nds, int npen,
int ncores, int maxIters);
/* TV-Lp solvers */
int GP_TVp(double *y, double lambda, double *x, double *info, int n,
double p, Workspace *ws);
int OGP_TVp(double *y, double lambda, double *x, double *info, int n,
double p, Workspace *ws);
int FISTA_TVp(double *y, double lambda, double *x, double *info, int n,
double p, Workspace *ws);
int FW_TVp(double *y, double lambda, double *x, double *info, int n,
double p, Workspace *ws);
int GPFW_TVp(double *y, double lambda, double *x, double *info, int n,
double p, Workspace *ws);
""")
sources = [os.path.join('src', fname) for fname in (
'condat_fast_tv.cpp', 'johnsonRyanTV.cpp', 'LPopt.cpp', 'TV2Dopt.cpp',
'TV2DWopt.cpp', 'TVgenopt.cpp', 'TVL1opt.cpp', 'TVL1Wopt.cpp',
'TVL2opt.cpp', 'TVLPopt.cpp', 'TVNDopt.cpp', 'utils.cpp'
)]
ffi.set_source(
'_prox_tv',
"""
typedef struct {
/* Size of memory vectors */
int n;
/* Generic memory which can be used by 1D algorithms */
double **d;
int maxd, nd;
int **i;
int maxi, ni;
/* Memory for inputs and outputs */
double *in,*out;
/* Warm restart variables */
short warm;
double *warmDual;
double warmLambda;
} Workspace;
""",
sources=sources,
define_macros=[('NOMATLAB', 1)],
extra_compile_args=['-fopenmp'],
extra_link_args=['-fopenmp'],
libraries=['lapack']
)
if __name__ == '__main__':
ffi.compile()
``` |
{
"source": "josipgrgurica/django-docstringviewer",
"score": 2
} |
#### File: django-docstringviewer/docstringviewer/views.py
```python
from django.views.generic import TemplateView
from .conf import PROJECT_ROOT
from .utils import build_tree, get_module_docs
class DocsView(TemplateView):
"""
Builds project tree and module docs for given module import path in request.
If module path is not provided in request, first module from first app in
project is fetched.
"""
template_name = "docs/main.html"
def get_context_data(self, **kwargs):
"""
Builds project tree and module docs for given module import path in request.
If module path is not provided in request, first module from first app in
project is fetched.
"""
context = super(DocsView, self).get_context_data(**kwargs)
context['module_docs'] = get_module_docs(self.request.GET.get('module'))
if self.request.is_ajax():
self.template_name = "docs/docs.html"
else:
context['tree'] = build_tree(PROJECT_ROOT, [])
return context
``` |
{
"source": "josipjukic/Adversarial-NLP",
"score": 2
} |
#### File: attacks/neighbour_attacks/utils.py
```python
import numpy as np
def softmax(x):
exp = np.exp(x)
return exp / exp.sum()
def prob_normalize(x):
s = np.sum(x)
if s == 0: return x
return x / s
```
#### File: attacks/word_bug/adversarial_transforms.py
```python
import numpy as np
def adversarial_text(raw, nlp, indices, transform):
adv_words = [token.text_with_ws for token in nlp(raw)]
for i in indices:
if i >= len(adv_words): continue
adv_words[i] = transform(adv_words[i])
return ''.join(adv_words)
homos = {'-':'˗','9':'৭','8':'Ȣ','7':'𝟕','6':'б','5':'Ƽ','4':'Ꮞ','3':'Ʒ','2':'ᒿ','1':'l','0':'O',
"'":'`','a': 'ɑ', 'b': 'Ь', 'c': 'ϲ', 'd': 'ԁ', 'e': 'е', 'f': '𝚏', 'g': 'ɡ', 'h': 'հ',
'i': 'і', 'j': 'ϳ', 'k': '𝒌', 'l': 'ⅼ', 'm': 'm', 'n': 'ո', 'o':'о', 'p': 'р', 'q': 'ԛ',
'r': 'ⲅ', 's': 'ѕ', 't': '𝚝', 'u': 'ս', 'v': 'ѵ', 'w': 'ԝ', 'x': '×', 'y': 'у', 'z': 'ᴢ'}
def homoglyph(word):
N = len(word)-1 if word[-1] == ' ' else len(word)
N = max(1, N)
s = np.random.randint(0, N)
if word[s] in homos:
adv_char = homos[word[s]]
else:
adv_char = word[s]
adv_word = word[:s] + adv_char + word[s+1:]
return adv_word
def remove_char(word):
N = len(word)-1 if word[-1] == ' ' else len(word)
N = max(1, N)
s = np.random.randint(0, N)
adv_word = word[:s] + word[s+1:]
return adv_word
def flip_char(word):
N = len(word)-1 if word[-1] == ' ' else len(word)
N = max(1, N)
s = np.random.randint(0, N)
letter = ord(word[s])
adv_char = np.random.randint(0,25) + 97
adv_word = word[:s] + chr(adv_char) + word[s+1:]
return adv_word
```
#### File: attacks/word_bug/scoring.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader
import argparse
import model
import math
import numpy as np
import sys
def word_drop(model, batch, y_preds, num_classes, device):
inputs = batch[0]
losses = torch.zeros(inputs.shape)
target = None
for i in range(inputs.shape[0]):
if target:
index, vals = target
inputs[i-1,:] = vals
target = (i, torch.clone(inputs[i,:]))
inputs[i,:] = 0
with torch.no_grad():
out = model.predict_proba(batch)
losses[i,:] = out.gather(1, y_preds).squeeze()
if target:
index, vals = target
inputs[-1,:] = vals
return 1.-losses
def temporal(model, batch, y_preds, num_classes, device):
inputs, lengths = batch
new_preds = torch.zeros(inputs.shape)
losses = torch.zeros(inputs.shape)
for i in range(inputs.shape[0]):
preinputs = inputs[:i+1,:]
with torch.no_grad():
new_lengths = torch.min(lengths, torch.tensor(i+1).to(device))
preout = model.predict_proba((preinputs, new_lengths))
new_preds[i,:] = preout.gather(1, y_preds).squeeze()
losses[0,:] = new_preds[0,:] - 1.0/num_classes
for i in range(1, inputs.shape[0]):
losses[i,:] = new_preds[i,:] - new_preds[i-1,:]
return losses
def temporal_tail(model, batch, y_preds, num_classes, device):
inputs, lengths = batch
new_preds = torch.zeros(inputs.shape)
losses = torch.zeros(inputs.shape)
for i in range(inputs.shape[0]):
postinputs = inputs[i:,:]
with torch.no_grad():
new_lengths = torch.max(lengths-i, torch.tensor(1).to(device))
postout = model.predict_proba((postinputs, new_lengths))
new_preds[i,:] = postout.gather(1, y_preds).squeeze()
losses[-1,:] = new_preds[-1,:] - 1.0/num_classes
for i in range(inputs.shape[0]-1):
losses[i,:] = new_preds[i,:] - new_preds[i+1,:]
return losses
def combined_temporal(model, batch, y_preds, num_classes, device, alpha=1.):
temporal_score = temporal(model, batch, y_preds, num_classes, device)
temporal_tail_score = temporal_tail(model, batch, y_preds, num_classes, device)
return temporal_score + alpha*temporal_tail_score
def random(inputs, *args, **kwargs):
losses = torch.rand(inputs.size()[0], inputs.size()[1])
return losses
```
#### File: Adversarial-NLP/src/metrics.py
```python
import numpy as np
import torch
from tqdm.notebook import tqdm
def accuracy(y_pred, y_gold):
if y_pred.shape[1] == 1:
# binary
preds = torch.round(torch.sigmoid(y_pred)).squeeze()
else:
# multi-class
preds = torch.argmax(y_pred, dim=1)
correct = (preds == y_gold).float()
acc = correct.sum() / len(correct)
return acc.item()
def is_pareto_efficient(costs):
is_efficient = np.ones(costs.shape[0], dtype = bool)
for i, c in enumerate(costs):
if is_efficient[i]:
is_efficient[is_efficient] = np.any(costs[is_efficient]<c, axis=1) # keep any point with a lower cost
is_efficient[i] = True # and keep self
return is_efficient
def init_tqdms(args, iterator):
epoch_bar = tqdm(desc='Training routine',
total=args.num_epochs,
position=0)
train_bar = tqdm(desc='Train set',
total=len(iterator['train']),
position=1)
val_bar = tqdm(desc='Valid set',
total=len(iterator['valid']),
position=1)
tqdms = dict(main=epoch_bar, train=train_bar, valid=val_bar)
return tqdms
``` |
{
"source": "josipklaric/python-playground",
"score": 3
} |
#### File: python-playground/src/jkcalc.py
```python
from caster.caster import as_int
def add(x, y):
return as_int(x) + as_int(y)
def substract(x, y):
return x - y
def multiply(x, y):
return x * y
def devide(x, y):
return x / y
# Test function
def _test():
assert add(1,2) == 3
assert add('1','2') == 3
if __name__ == '__main__':
_test()
``` |
{
"source": "josip-milic/asc_qwerty_test",
"score": 2
} |
#### File: rest-framework-tutorial/services/models.py
```python
from django.db import models
class Event(models.Model):
title = models.CharField(max_length=255)
date = models.DateTimeField()
description = models.TextField()
location_lat = models.DecimalField(max_digits=9, decimal_places=6)
location_lng = models.DecimalField(max_digits=9, decimal_places=6)
marker_type = models.CharField(max_length=255)
created = models.DateTimeField(auto_now_add=True)
def __unicode__(self):
return self.title
``` |
{
"source": "josivantarcio/Desafios-em-Python",
"score": 4
} |
#### File: Desafios-em-Python/Desafios/desafio098.py
```python
def contador(i, f, p):
if p < 0:
p *= -1
if p == 0:
p = 1
if i <= f:
for x in range(i, f+p, p):
print(x, end=" ", flush=True)
sleep(0.5)
print('Fim')
if f <= i:
for x in range(i, f-p, -p):
print(x, end=" ", flush=True)
sleep(0.5)
print('Fim')
from time import sleep
#main
contador(1, 10, 1)
contador(10, 0, 2)
inicio = int(input('Início: '))
fim = int(input('Fim: '))
passo = int(input('Passo: '))
contador(inicio, fim, passo)
print('FIM')
```
#### File: Desafios-em-Python/Desafios/desafio113.py
```python
def printErro():
print('\033[31mErro! Digite um numero valido\033[m')
def leiaInt(n=0):
while True:
try:
valor = int(input(n))
break
except (ValueError, TypeError):
printErro()
return valor
def leiaReal(r):
for i in range(2, -1, -1):
try:
valor = float(input(r))
return valor
break
except (ValueError, TypeError, EOFError):
printErro()
print(f'Voce tem {i} tentativas restantes')
except (KeyboardInterrupt, KeyError):
print('\nErro! Programa suspenso pelo usuario')
return 0
break
print('Fim das tentativas! Inicie novamente')
n = leiaInt('Digite um numero inteiro: ')
f = leiaReal('Digite um numero real: ')
print(f'O numero inteiro foi {n} e o real {f}')
```
#### File: utilidadesCeV/menu/__init__.py
```python
def linha(tam=29):
return f'=' * tam
def leiaInt(n=0):
while True:
try:
valor = int(input(n))
break
except (ValueError, TypeError):
print('\033[31mErro. Digite valor valido\033[m')
return valor
def cabecalho(msg):
print(linha())
print(msg.center(29).upper())
print(linha())
def menu(lista):
cabecalho('menu principal')
for k, v in enumerate(lista):
print(f'{k+1}: {v}')
print(linha())
opcao = leiaInt('Sua Opção: ')
return opcao
def final():
print(linha())
print('ATÉ LOGO | SEE YOU LATER!'.center(29))
print(linha())
``` |
{
"source": "josix/cookiecutter-python-template",
"score": 2
} |
#### File: cookiecutter-python-template/hooks/post_gen_project.py
```python
import os
def remove_pipfile():
os.remove("Pipfile")
def remove_publish_pypi_github_action():
os.remove(".github/workflows/python-publish.yaml")
def remove_docker_file():
os.remove("Dockerfile")
def main():
if "{{ cookiecutter.dependency_management_tool }}" != "pipenv":
remove_pipfile()
if "{{ cookiecutter.build_pypi_package }}" == "n":
remove_publish_pypi_github_action()
if "{{ cookiecutter.build_docker_image }}" == "n":
remove_docker_file()
if __name__ == "__main__":
main()
``` |
{
"source": "josix/pycon.tw",
"score": 2
} |
#### File: proposals/forms/proposals.py
```python
from django import forms
from django.conf import settings
from django.utils.functional import cached_property
from django.utils.translation import gettext_lazy as _
from crispy_forms.helper import FormHelper
from core.utils import form_has_instance
from proposals.models import TalkProposal, TutorialProposal
from .mixins import (
RequestUserSpeakerValidationMixin,
TalkProposalMixin, TutorialProposalMixin,
)
class ProposalCreateForm(RequestUserSpeakerValidationMixin, forms.ModelForm):
def save(self, commit=True):
"""Fill user field on save.
"""
proposal = super().save(commit=False)
proposal.submitter = self._request.user
if commit:
proposal.save()
return proposal
class TalkProposalCreateForm(TalkProposalMixin, ProposalCreateForm):
"""Form used to create a talk proposal.
Fields in this form is intentionally reduced to allow people to submit
a proposal very quickly, and fill in the details later.
"""
duration = forms.ChoiceField(
label=_('duration'),
choices=settings.TALK_PROPOSAL_DURATION_CHOICES,
)
class Meta(TalkProposalMixin.Meta):
fields = [
'title',
'category',
'duration',
'language',
'python_level',
'recording_policy',
'remoting_policy',
'first_time_speaker',
'referring_policy',
]
class TutorialProposalCreateForm(TutorialProposalMixin, ProposalCreateForm):
"""Form used to create a tutorial proposal.
Fields in this form is intentionally reduced to allow people to submit
a proposal very quickly, and fill in the details later.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['duration'].disabled = True
class Meta(TutorialProposalMixin.Meta):
fields = [
'title', 'category', 'duration', 'language', 'python_level',
'recording_policy', 'remoting_policy'
]
class ProposalUpdateForm(forms.ModelForm):
@cached_property
def helper(self):
helper = FormHelper()
helper.template = '_includes/proposal_update_form.html'
helper.form_tag = False
helper.include_media = False
return helper
class TalkProposalUpdateForm(TalkProposalMixin, ProposalUpdateForm):
"""Form used to update a talk proposal.
This is the complete editing form for proposal. It should contain all
user-editable fields.
"""
duration = forms.ChoiceField(
label=_('duration'),
choices=settings.TALK_PROPOSAL_DURATION_CHOICES,
)
class Meta(TalkProposalMixin.Meta):
fields = [
'title',
'category',
'duration',
'language',
'abstract',
'python_level',
'objective',
'detailed_description',
'outline',
'supplementary',
'recording_policy',
'remoting_policy',
'first_time_speaker',
'slide_link',
'referring_policy',
]
class TutorialProposalUpdateForm(ProposalUpdateForm):
"""Form used to update a tutorial proposal.
This is the complete editing form for proposal. It should contain all
user-editable fields.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['duration'].disabled = True
class Meta(TutorialProposalMixin.Meta):
fields = [
'title',
'category',
'duration',
'language',
'abstract',
'python_level',
'objective',
'detailed_description',
'outline',
'supplementary',
'recording_policy',
'remoting_policy',
'slide_link',
]
class ProposalCancelForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if not form_has_instance(self):
raise ValueError(
'Proposal cancel form must be initialized with an instance.'
)
class TalkProposalCancelForm(ProposalCancelForm):
class Meta:
model = TalkProposal
fields = ['cancelled']
class TutorialProposalCancelForm(ProposalCancelForm):
class Meta:
model = TutorialProposal
fields = ['cancelled']
``` |
{
"source": "josjosephine/dsa",
"score": 4
} |
#### File: AlgorithmicToolbox/Divide-and-Conquer/organizing_lottery.py
```python
from sys import stdin
# from bisect import bisect_left, bisect_right
def points_cover_naive(starts, ends, points):
assert len(starts) == len(ends)
count = [0] * len(points)
for index, point in enumerate(points):
for start, end in zip(starts, ends):
if start <= point <= end:
count[index] += 1
return count
def BinarySearch(a, x, direction):
if len(a) == 0:
return -1
left, right = 0, len(a)-1
while left <= right:
ave = (left + right) // 2
if x == a[ave]:
while left - 1 < ave <= right:
if x == a[ave]:
if direction:
ave += 1
else:
ave -= 1
else:
break
if direction:
return ave
else:
return ave + 1
elif x < a[ave]:
right = ave - 1
else:
left = ave + 1
return right + 1
def points_cover(starts, ends, points):
cnt = [0] * len(points)
starts = sorted(starts)
ends = sorted(ends)
for i in range(len(points)):
st = BinarySearch(starts, points[i], 1)
end = BinarySearch(ends, points[i], 0)
cnt[i] = st - end
return cnt
if __name__ == '__main__':
data = list(map(int, stdin.read().split()))
n, m = data[0], data[1]
input_starts, input_ends = data[2:2 * n + 2:2], data[3:2 * n + 2:2]
input_points = data[2 * n + 2:]
output_count = points_cover(input_starts, input_ends, input_points)
print(*output_count)
```
#### File: AlgorithmicToolbox/DynamicProgramming/partition_souvenirs.py
```python
from itertools import product
from sys import stdin
def partition3(values):
assert 1 <= len(values) <= 20
assert all(1 <= v <= 30 for v in values)
total = sum(values)
if len(values) < 3 or total % 3:
return 0
third = total // 3
table = [[0] * (len(values) + 1) for _ in range(third + 1)]
for i in range(1, third + 1):
for j in range(1, len(values) + 1):
ii = i - values[j - 1]
if values[j - 1] == i or (ii > 0 and table[ii][j - 1]):
table[i][j] = 1 if table[i][j - 1] == 0 else 2
else:
table[i][j] = table[i][j - 1]
return 1 if table[-1][-1] == 2 else 0
if __name__ == '__main__':
input_n, *input_values = list(map(int, stdin.read().split()))
assert input_n == len(input_values)
print(partition3(input_values))
```
#### File: AlgorithmicToolbox/DynamicProgramming/primitive_calculator.py
```python
def compute_operations(n):
assert 1 <= n <= 10 ** 6
cnt = [0] * (n + 1)
cnt[1] = 1
for i in range(2, n + 1):
indices = [i - 1]
if i % 2 == 0:
indices.append(i // 2)
if i % 3 == 0:
indices.append(i // 3)
mins = min([cnt[x] for x in indices])
cnt[i] = mins + 1
ptr = n
optimal_seq = [ptr]
while ptr != 1:
candidates = [ptr - 1]
if ptr % 2 == 0:
candidates.append(ptr // 2)
if ptr % 3 == 0:
candidates.append(ptr // 3)
ptr = min(
[(c, cnt[c]) for c in candidates],
key=lambda x: x[1]
)[0]
optimal_seq.append(ptr)
return reversed(optimal_seq)
if __name__ == '__main__':
input_n = int(input())
output_sequence = list(compute_operations(input_n))
print(len(output_sequence) - 1)
print(*output_sequence)
```
#### File: AlgorithmicToolbox/GreedyAlgorithm/maximum_loot.py
```python
from sys import stdin
def maximum_loot_value(capacity, weights, prices):
assert 0 <= capacity <= 2 * 10 ** 6
assert len(weights) == len(prices)
assert 1 <= len(weights) <= 10 ** 3
assert all(0 < w <= 2 * 10 ** 6 for w in weights)
assert all(0 <= p <= 2 * 10 ** 6 for p in prices)
price = 0
vpw = []
for w, v, i in zip(weights, prices, list(range(n))):
vpw.append((float(v / w), i))
vpw = sorted(vpw, key=lambda t: t[0], reverse=True)
j = 0
while capacity > 0 and j < n:
weight = weights[vpw[j][1]]
if weight > capacity:
price += capacity * vpw[j][0]
break
else:
price += prices[vpw[j][1]]
capacity -= weight
j += 1
return price
if __name__ == "__main__":
data = list(map(int, stdin.read().split()))
n, input_capacity = data[0:2]
input_prices = data[2:(2 * n + 2):2]
input_weights = data[3:(2 * n + 2):2]
opt_value = maximum_loot_value(input_capacity, input_weights, input_prices)
print("{:.10f}".format(opt_value))
``` |
{
"source": "josjsjen/latent_ode",
"score": 2
} |
#### File: josjsjen/latent_ode/camh.py
```python
import torch
import os
import pandas as pd
import numpy as np
class Simulated(object):
def __init__(self, root,
quantization=0.1, n_samples=None, device=torch.device("cpu")):
self.root = root
self.reduce = "average"
self.quantization = quantization
self.csv = pd.read_csv(self.root)
self.device = device
def processing(self):
df = self.csv
df = df.drop(['time'], axis=1)
self.dataset_obj = []
max_timestamps = 0
for id in df.id.values:
if df.loc[df.id == id].iloc[:, 1].shape[0] > max_timestamps:
max_timestamps = df.loc[df.id == id].iloc[:, 1].shape[0]
total_unique_ids = df['id'].unique()
for i in total_unique_ids:
patient_id = i
tt = np.arange(max_timestamps)
vals = df.loc[df.id == i].iloc[:, 2:].values
if max_timestamps - vals.shape[0] >0:
dup = np.stack([vals[-1] for _ in range(max_timestamps - vals.shape[0])], axis=0)
vals = np.vstack((vals, dup))
masks = np.copy(vals)
masks[~np.isnan(masks)] = 1
masks[np.isnan(masks)] = 0
labels=np.zeros(1)
# todo: confirm if this is the right way to impute missingness
vals = np.nan_to_num(vals, nan=0)
tt = torch.tensor(tt).to("cpu").to(dtype=torch.float32)
vals = torch.tensor(vals).to("cpu").to(dtype=torch.float32)
masks = torch.tensor(masks).to("cpu").to(dtype=torch.float32)
labels = torch.tensor(labels).to("cpu").to(dtype=torch.float32)
self.dataset_obj.append((patient_id, tt, vals, masks, labels))
return self.dataset_obj
``` |
{
"source": "JosJuice/fifoci",
"score": 2
} |
#### File: fifoci/runner/runner.py
```python
from PIL import Image
import argparse
import hashlib
import json
import os
import os.path
import requests
import shutil
import subprocess
import sys
import tempfile
import zipfile
def recent_enough():
"""A bit ugly, but checks that the version being ran is more recent than
the one introducing features we need for fifoci to not hang.
20e82ec08c9ab811b04664a6a4f9859924f712f0 adds the configuration option to
stop a FIFO log playback after the last frame was rendered (instead of
looping back to the first frame).
b7916f965530b0369bf08ed6bc9ec3ef20f7cd2f fixes a WX assert error that
causes freezes on Windows FifoCI (and is a close descendent of another
commits that adds DumpFramesSilent to remove more interactivity).
"""
return os.system('git merge-base --is-ancestor '
'b7916f965530b0369bf08ed6bc9ec3ef20f7cd2f HEAD') == 0
def find_parents(rev_hash):
"""List a given number of parents commits from a Git hash. Required until
the Buildbot starts getting this information from Github.
"""
out = []
ref = rev_hash
for i in range(50):
ref += "^"
hash = subprocess.check_output('git rev-parse ' + ref, shell=True).strip()
out.append(hash.decode('ascii'))
return out
def download_dff(url, path):
"""Downloads a missing DFF from the specified URL to a given FS path."""
resp = requests.get(url, stream=True)
if resp.status_code != 200:
print('DFF %s not found' % url)
return False
with open(path, 'wb') as fp:
for chunk in resp.iter_content(chunk_size=4096):
if chunk:
fp.write(chunk)
return True
def generate_targets_list(dff_dir, url_base):
"""Generates a list of targets and their respective output directory from a
given spec URL. If the required input files are missing, they are
downloaded from the URL given in the spec.
"""
url_base = url_base.rstrip('/')
out = []
spec = requests.get(url_base + '/dff/').json()
for target in spec:
path = os.path.join(dff_dir, target['filename'])
success = True
if not os.path.exists(path):
print('DFF %s does not exist, downloading...' % path)
success = download_dff(url_base + target['url'], path)
if success:
out.append((target['shortname'], path,
tempfile.mkdtemp(suffix='.fifoci-out')))
return out
def get_existing_images(url_base):
"""Downloads the list of images already present on the server to reduce
upload footprint. Only new images will be present in the result zip.
"""
return requests.get(url_base.rstrip('/') + '/existing-images/').json()
def spawn_tests(args, targets):
"""Spawn the test runner, which will run each log and write output images
to a given path.
"""
base_path = os.path.dirname(__file__)
backend, system, driver = args.type.split('-', 2)
# HACK: Since we use : as field separator, C:\ paths on windows break.
# Assume that we are only ever going to run fifoci on the system drive and
# strip the drive.
def strip_drive(p):
return p[2:] if p[1] == ':' else p
if system == 'win':
targets = [(t[0], strip_drive(t[1]), strip_drive(t[2]))
for t in targets]
target_descr = ' '.join(':'.join(target[1:]) for target in targets)
if system == 'lin':
ret = os.system('%s/linux/run_fifo_test.sh %s %s %s %s'
% (base_path, backend, driver, args.dolphin,
target_descr))
elif system == 'win':
ret = subprocess.call('powershell %s/windows/run_fifo_test.ps1 %s %s %s %s'
% (base_path, backend, driver, args.dolphin,
target_descr))
else:
raise RuntimeError('unsupported system: %r' % system)
if ret:
raise RuntimeError('run_fifo_test.sh returned %d' % ret)
def compute_image_hash(fn):
"""From a given image file, generate a hash of the pixel data of that
image.
"""
im = Image.open(fn)
data = im.convert('RGB').tobytes('raw', 'RGB')
return hashlib.sha1(data).hexdigest()
def generate_results_data(args, targets):
"""Writes the results to a ZIP file. Metadata is contained in a JSON
object, and images are stored as individual files.
"""
parents = find_parents(args.rev_base_hash)
if args.rev_base_hash != args.rev_hash:
parents = [args.rev_base_hash] + parents
meta = {
'type': args.type,
'rev': {
'parents': parents,
'hash': args.rev_hash,
'name': args.rev_name,
'submitted': args.rev_submitted.lower() in ['true', 'yes', '1'],
},
'results': {}
}
zf = zipfile.ZipFile(args.output, 'w')
already_existing = get_existing_images(args.url_base)
for dff_short_name, dff_path, out_path in targets:
result = {'hashes': []}
meta['results'][dff_short_name] = result
if os.path.exists(os.path.join(out_path, 'failure')):
result['failure'] = True
else:
result['failure'] = False
for i in range(1, 1001):
fn = os.path.join(out_path, 'frame-%03d.png' % i)
if not os.path.exists(fn):
break
hash = compute_image_hash(fn)
# HACK: Currently D3D on Windows inserts a black frame as the
# first frame of all recordings.
if i == 1 and hash in ('f971f36357cc45414090cecece55a91ee19aab29',
'44bba96a45cf665df718f81ea48f867e174999da'):
continue
result['hashes'].append(hash)
if hash not in already_existing:
zf.writestr('fifoci-result/%s.png' % hash,
open(fn, 'rb').read())
zf.writestr('fifoci-result/meta.json', json.dumps(meta).encode('utf-8'))
zf.close()
def remove_output_directories(targets):
"""Deletes the temporary directory created to store image data."""
for dff_short_name, dff_path, out_path in targets:
shutil.rmtree(out_path)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Run fifoCI tests on a given Dolphin build')
parser.add_argument('--type', required=True)
parser.add_argument('--dolphin', required=True)
parser.add_argument('--rev_base_hash', required=True)
parser.add_argument('--rev_hash', required=True)
parser.add_argument('--rev_name', required=True)
parser.add_argument('--rev_submitted', required=True)
parser.add_argument('--output', required=True)
parser.add_argument('--url_base', required=True)
parser.add_argument('--dff_dir', required=True)
args = parser.parse_args()
if not recent_enough():
print('The requested version is lacking features required for fifoci.')
print('Exiting early without providing results.')
sys.exit(1)
targets = generate_targets_list(args.dff_dir, args.url_base)
spawn_tests(args, targets)
generate_results_data(args, targets)
remove_output_directories(targets)
``` |
{
"source": "joskid/tiote",
"score": 2
} |
#### File: tiote/tiote/forms.py
```python
from django import forms
from django.core import validators
from django.utils.datastructures import SortedDict
from itertools import chain
from django.utils.encoding import StrAndUnicode, force_unicode
from django.utils.html import escape, conditional_escape
from django.utils.safestring import mark_safe
from django.forms import widgets
from tiote import utils
mysql_types = ('varchar', 'char', 'text', 'tinytext', 'mediumtext', 'longtext', 'tinyint',
'smallint', 'mediumint', 'int', 'bigint', 'real', 'double', 'float', 'decimal', 'numeric',
'date', 'time', 'datetime', 'timestamp', 'tinyblob', 'blob', 'mediumblob', 'longblob', 'binary',
'varbinary', 'bit', 'enum', 'set')
pgsql_types = ('bigint', 'bigserial', 'bit', 'bit varying', 'boolean', 'bytea',
'character varying', 'character', 'cidr', 'date', 'double precision', 'inet', 'integer',
'lseg', 'macaddr', 'money', 'real', 'smallint', 'serial', 'text', 'time',
'time with time zone', 'timestamp', 'timestamp with time zone', 'uuid', 'xml')
pgsql_encoding = ('UTF8', 'SQL_ASCII', 'BIG5', 'EUC_CN', 'EUC_JP', 'EUC_KR', 'EUC_TW',
'GB18030', 'GBK', 'ISO_8859_5', 'ISO_8859_6', 'ISO_8859_7', 'ISO_8859_8', 'JOHAB',
'KOI8R', 'KOI8U', 'LATIN1', 'LATIN2', 'LATIN3', 'LATIN4', 'LATIN5', 'LATIN6', 'LATIN7',
'LATIN8', 'LATIN9', 'LATIN10', 'MULE_INTERNAL', 'WIN866', 'WIN874', 'WIN1250', 'WIN1251',
'WIN1252', 'WIN1253', 'WIN1254', 'WIN1255', 'WIN1256', 'WIN1257', 'WIN1258')
mysql_key_choices = ('primary','unique','index')
pgsql_key_choices = ('unique', 'primary', 'foreign')
mysql_other_choices = ('unsigned','binary','not null','auto increment' )
user_privilege_choices = ['SELECT', 'INSERT', 'UPDATE', 'DELETE', 'CREATE', 'DROP',
'ALTER', 'INDEX', 'CREATE TEMPORARY TABLES']
admin_privilege_choices = ('FILE','PROCESS','RELOAD', 'SHUTDOWN','SUPER')
pgsql_privileges_choices = ('INHERIT','CREATEDB','CREATEROLE','REPLICATION','SUPERUSER')
format_choices = ( ('SQL', 'sql'),('CSV', 'csv') )
export_choices = ( ('structure', 'structure'),('data', 'data') )
foreign_key_action_choices = ['no action', 'restrict', 'cascade', 'set null', 'set default']
class tt_CheckboxSelectMultiple(widgets.CheckboxSelectMultiple):
"""
Copy of that found in stock django but added here in other to change its rendering (
addition of a class to part of its rendered html)
"""
def render(self, name, value, attrs=None, choices=()):
if value is None: value = []
has_id = attrs and 'id' in attrs
final_attrs = self.build_attrs(attrs, name=name)
output = [u'<ul class="inputs-list">']
# Normalize to strings
str_values = set([force_unicode(v) for v in value])
for i, (option_value, option_label) in enumerate(chain(self.choices, choices)):
# If an ID attribute was given, add a numeric index as a suffix,
# so that the checkboxes don't all have the same ID attribute.
if has_id:
final_attrs = dict(final_attrs, id='%s_%s' % (attrs['id'], i))
label_for = u' for="%s"' % final_attrs['id']
else:
label_for = ''
cb = forms.CheckboxInput(final_attrs, check_test=lambda value: value in str_values)
option_value = force_unicode(option_value)
rendered_cb = cb.render(name, option_value)
option_label = conditional_escape(force_unicode(option_label))
output.append(u'<li><label%s>%s <span>%s</span></label></li>' % (label_for, rendered_cb, option_label))
output.append(u'</ul>')
return mark_safe(u'\n'.join(output))
class tt_RadioFieldRenderer(widgets.RadioFieldRenderer):
"""
Copy of that found in stock django but added here in other to change its rendering (
addition of a class to part of its rendered html)
"""
def render(self):
"""Outputs a <ul> for this set of radio fields."""
return mark_safe(u'<ul class="inputs-list">\n%s\n</ul>' % u'\n'.join([u'<li>%s</li>'
% force_unicode(w) for w in self]))
class InsertForm(forms.BaseForm):
'''
Dynamically created form which generates its fields along with the fields' options
from its parameters.
Does not make use of metaclasses so it subclasses forms.BaseForm directly.
It loops through the parameter ``tbl_struct``(the structure of a table) and then generates
fiels which would fit the description of respective columns
It also treats some fields specially as defined in ``tbl_indexes`` and the form's ``dialect``
'''
def __init__(self, dialect, tbl_struct, tbl_indexes=(), **kwargs):
# keys = ['column','type','null','default','character_maximum_length','numeric_precision', 'extra','column_type']
f = SortedDict()
# dict to increase performance
indexed_cols = utils.fns.parse_indexes_query(tbl_indexes)
# determing type of form fields for each column
for row in tbl_struct['rows']:
_classes = []
if row[1] in ('character varying', 'varchar','character', 'char'):
f[row[0]] = forms.CharField()
# if row[4]: f[row[0]].max_length = row[4] #max_length
elif row[1] in ('varbinary', 'bit', 'bit varying',):
f[row[0]] = forms.CharField()
# if row[4]: f[row[0]].max_length = row[4] #max_length
elif row[1] in ('text', 'tinytext', 'mediumtext', 'longtext', ):
f[row[0]] = forms.CharField(widget=forms.Textarea(attrs={'cols':'', 'rows':''}))
# if row[4]: f[row[0]].max_length = row[4] #max_length
elif row[1] in ('boolean', ): f[row[0]] = forms.BooleanField()
elif row[1] in ('tinyint', 'smallint', 'mediumint', 'int', 'bigint','integer',):
f[row[0]] = forms.IntegerField()
# if row[5]: f[row[0]].validators.append(validators.MaxLengthValidator(row[5]))
_classes.append('validate-integer')
elif row[1] in ('real', 'double', 'float', 'decimal', 'numeric', 'double precision'):
f[row[0]] = forms.FloatField()
# if row[5]: f[row[0]].validators.append(validators.MaxLengthValidator(row[5]))
_classes.append('validate-numeric')
elif row[1] in ('decimal', 'numeric', 'money',):
f[row[0]] = forms.DecimalField()
# if row[5]: f[row[0]].validators.append(validators.MaxLengthValidator(row[5]))
_classes.append('validate-numeric')
elif row[1] in ('date',):
f[row[0]] = forms.DateField()
_classes.append('validate-date')
elif row[1] in ('datetime', 'time','time with time zone','timestamp', 'timestamp with time zone',):
# no longer used a python field (date, datetime) because
# - of error generated when submitting fields which
# - are populated from the database
f[row[0]] = forms.CharField()
elif row[1] == 'set':
f[row[0]] = forms.MultipleChoiceField(widget=tt_CheckboxSelectMultiple())
# parse the field description to list with all the unnecssary quotes removed
choices = row[len(row)-1].replace("set(", "").replace(")","")
choices = choices.replace("'", "").split(",")
f[row[0]].choices = utils.fns.make_choices(choices, True)
elif row[1] == 'enum':
f[row[0]] = forms.ChoiceField()
# parse the field description to list with all the unnecssary quotes removed
choices = row[len(row)-1].replace("enum(", "").replace("\"","").replace(")","")
choices = choices.replace("'", "").split(",")
f[row[0]].choices = utils.fns.make_choices(choices, False)
# any field not currently understood (PostgreSQL makes use of a lot of user defined fields
# which is difficult to keep track of)
else: f[row[0]] = forms.CharField(widget=forms.Textarea(attrs={'cols':'', 'rows':''}))
#required fields
if row[2].lower() == 'no' or row[2] == False:
# the field row[2] is required
_classes.append("required")
# the option above must be the last assignment to _classes because it's index
# - must be the last one for the next lines of logic to work
else:
f[row[0]].required = False
# options common to all fields
# help_text
_il = [ row[len(row) - 1], ]
if dialect == 'mysql': _il.append(row[len(row) -2 ])
f[row[0]].help_text = " ".join(_il)
if row[3]: f[row[0]].default = row[3] #default
# work with indexes
if indexed_cols.has_key( row[0] ):
if dialect == 'mysql' and indexed_cols[ row[0] ].count("PRIMARY KEY"):
# make an indexed column with auto_increment flag not required (MySQL)
if row[len(row) - 2].count('auto_increment') > 0:
if _classes.count('required') > 0: _classes.pop()
f[ row[0] ].required = False
# width of the fields
if type(f[row[0]].widget) not in (forms.CheckboxSelectMultiple, tt_CheckboxSelectMultiple,):
_classes.append("span6")
# add the attribute classes
if f[row[0]].widget.attrs.has_key('class'):
f[row[0]].widget.attrs['class'] += " ".join(_classes)
else:
f[row[0]].widget.attrs.update({'class':" ".join(_classes)})
self.base_fields = f
forms.BaseForm.__init__(self, **kwargs)
class EditForm(InsertForm):
'''
Subclasses InsertForm to include the dynamic property of InsertForm as well as to
add and option that specifies if the request would be for a new row or would be
an update for that row
'''
def __init__(self, dialect, tbl_struct, tbl_indexes=(), **kwargs):
InsertForm.__init__(self, dialect, tbl_struct, tbl_indexes, **kwargs)
# working with self.fields attribute because this is an instance of InsertForm
# - and not a whole form class definition
self.fields['save_changes_to'] = forms.ChoiceField(
label = 'save changes to',
choices = (('update_row', 'Same row (UPDATE statment)',),
('insert_row', 'Another row (INSERT statement)')
),
initial = 'update_row',
widget = forms.RadioSelect(attrs={'class':'inputs-list'}, renderer = tt_RadioFieldRenderer)
)
# New Database Form
class mysqlDbForm(forms.Form):
def __init__(self, templates=None, users=None, charsets=None, **kwargs):
f = SortedDict()
f['name'] = forms.CharField(widget=forms.TextInput(attrs={'class':'required'}))
f['charset'] = forms.ChoiceField(
choices = utils.fns.make_choices(charsets),
initial = 'latin1'
)
self.base_fields = f
forms.BaseForm.__init__(self, **kwargs)
class pgsqlDbForm(forms.BaseForm):
def __init__(self, templates=None, users=None, charsets=None, **kwargs):
f = SortedDict()
f['name'] = forms.CharField(widget=forms.TextInput(attrs={'class':'required'}))
f['encoding'] = forms.ChoiceField(
choices = utils.fns.make_choices(pgsql_encoding),
initial = 'UTF8',
)
f['template'] = forms.ChoiceField(
choices = utils.fns.make_choices(templates),
required = False,
)
f['owner'] = forms.ChoiceField( choices = utils.fns.make_choices(users) ,
required = False, )
self.base_fields = f
forms.BaseForm.__init__(self, **kwargs)
#New Role/User Form
class mysqlUserForm(forms.BaseForm):
def __init__(self, dbs = None, groups=None, **kwargs):
f = SortedDict()
f['host'] = forms.CharField(
widget=forms.TextInput(attrs={'class':'required '}),
initial='localhost',
)
f['username'] = forms.CharField(
widget=forms.TextInput(attrs={'class':'required '})
)
f['password'] = forms.CharField(
widget=forms.PasswordInput(attrs={'class':''}),
required = False
)
f['access'] = forms.ChoiceField(
choices = (('all', 'All Databases'),('select', 'Selected Databases'),),
widget = forms.RadioSelect(attrs={'class':'addevnt hide_1'}),
label = 'Allow access to ',
)
f['select_databases'] = forms.MultipleChoiceField(
required = False,
widget = forms.CheckboxSelectMultiple(attrs={'class':'retouch'}),
choices = utils.fns.make_choices(dbs, True),
)
f['privileges'] = forms.ChoiceField(
choices = (('all', 'All Privileges'),('select','Selected Privedges'),),
widget = forms.RadioSelect(attrs={'class':'addevnt hide_2'})
)
f['user_privileges'] = forms.MultipleChoiceField(
required = False,
widget = forms.CheckboxSelectMultiple(attrs={'class':'privileges'}),
choices = utils.fns.make_choices(user_privilege_choices, True),
)
f['administrator_privileges'] = forms.MultipleChoiceField(
required = False,
choices = utils.fns.make_choices(admin_privilege_choices, True) ,
widget = forms.CheckboxSelectMultiple(attrs={'class':'privileges'}),
)
f['options'] = forms.MultipleChoiceField(
choices = (('GRANT OPTION','Grant Option'),),
widget = forms.CheckboxSelectMultiple,
required = False,
)
self.base_fields = f
forms.BaseForm.__init__(self, **kwargs)
class pgsqlUserForm(forms.BaseForm):
def __init__(self, groups=None, dbs=None, **kwargs):
f = SortedDict()
f['role_name'] = forms.CharField(
widget = forms.TextInput(attrs={'class':'required'})
)
f['can_login'] = forms.CharField(
widget = forms.CheckboxInput
)
f['password'] = forms.CharField(
widget = forms.PasswordInput,
required = False
)
f['valid_until'] = forms.DateTimeField(
widget = forms.TextInput(attrs={}),
required = False)
f['connection_limit'] = forms.IntegerField(
widget=forms.TextInput(attrs={'class':'validate-integer'}),
required = False)
# f['comment'] = forms.CharField(
# widget = forms.Textarea(attrs={'cols':'', 'rows':''}),
# required = False)
f['role_privileges'] = forms.MultipleChoiceField(
required = False, widget = forms.CheckboxSelectMultiple,
choices = utils.fns.make_choices(pgsql_privileges_choices, True)
)
if groups:
f['group_membership'] = forms.MultipleChoiceField(
choices = utils.fns.make_choices(groups, True), required = False,
widget = forms.CheckboxSelectMultiple,)
self.base_fields = f
forms.BaseForm.__init__(self, **kwargs)
# table and columns creation form
class pgsqlTableForm(forms.BaseForm):
def __init__(self, engines=None, charsets=None, edit=False, column_count=1, column_form=False,
existing_tables = None, existing_columns = None, **kwargs):
f = SortedDict()
wdg = forms.Select(attrs={}) if existing_tables else forms.Select
if edit is False:
f['name'] = forms.CharField(widget=forms.TextInput(attrs={'class':'required'}))
f['of_type'] = forms.ChoiceField(
choices = utils.fns.make_choices(existing_tables),
required = False, widget = wdg
)
f['inherit'] = forms.ChoiceField(
choices = utils.fns.make_choices(existing_tables),
required = False, widget = wdg
)
# variable number of columns
for i in range( column_count ):
fi = str(i)
f['name_'+fi] = forms.CharField(
widget=forms.TextInput(attrs={'class':'required'}),
label = 'name')
f['type_'+fi] = forms.ChoiceField(
label = 'type',
choices = utils.fns.make_choices(pgsql_types),
widget = forms.Select(attrs={'class':'required'}),
)
f['length_'+fi] = forms.IntegerField(
widget=forms.TextInput(attrs={'class':'validate-integer'}),
label = 'length', required=False, )
f['key_'+fi] = forms.ChoiceField(
required = False,
widget = forms.Select(attrs={'class':'even needs:foreign-fields:foreign'
+' select_requires:references_'+fi+'|column_'+fi+':foreign'}),
choices = utils.fns.make_choices(pgsql_key_choices),
label = 'key',
)
f['references_'+fi] = forms.ChoiceField(
required= False, label = 'references',
choices = utils.fns.make_choices(existing_tables),
widget = forms.Select()
)
f['column_'+fi] = forms.ChoiceField(
required = False, label = 'column',
)
f['on_update_'+fi] = forms.ChoiceField(
required= False, label = 'on update',
choices = utils.fns.make_choices(foreign_key_action_choices, True)
)
f['on_delete_'+fi] = forms.ChoiceField(
required = False, label = 'on delete',
choices = utils.fns.make_choices(foreign_key_action_choices, True)
)
f['default_'+fi] = forms.CharField(
required = False,
label = 'default',
widget=forms.TextInput
)
f['other_'+fi] = forms.MultipleChoiceField(
label = 'other', required = False,
widget = forms.CheckboxSelectMultiple(),
choices = utils.fns.make_choices(['not null'], True))
if column_form:
f['insert_position'] = forms.ChoiceField(
choices = utils.fns.make_choices(['at the end of the table', 'at the beginning'], True)
+ utils.fns.make_choices(existing_columns,False,'--------','after'),
label = 'insert this column',
initial = 'at the end of the table',
widget = forms.Select(attrs={'class':'required'}),
)
self.base_fields = f
forms.BaseForm.__init__(self, **kwargs)
class mysqlTableForm(forms.BaseForm):
def __init__(self, engines=None, charsets=None, edit=False, column_count=1, column_form=False,
existing_tables = None, existing_columns = None, **kwargs):
f = SortedDict()
engine_list = []
default_engine = ''
for tup in engines:
engine_list.append((tup[0],))
if tup[1] == 'DEFAULT':
default_engine = tup[0]
if edit is False:
f['name'] = forms.CharField(widget=forms.TextInput(attrs={'class':'required'}))
f['charset'] = forms.ChoiceField(
choices = utils.fns.make_choices(charsets),
initial='latin1'
)
f['engine'] = forms.ChoiceField(
required = False,
choices = utils.fns.make_choices( engine_list ),
initial = default_engine
)
# variable amount of column_count
# field label's are directly tied to the corresponding template
for i in range( column_count ):
sfx = '_' + str(i)
f['name'+sfx] = forms.CharField(
widget=forms.TextInput(attrs={'class':'required'}),
label = 'name')
f['type'+sfx] = forms.ChoiceField(
choices = utils.fns.make_choices(mysql_types),
widget = forms.Select(attrs={'class':'required needs:values:set|enum select_requires:values'
+sfx+':set|enum select_requires:size'+sfx+':varchar'}),
initial = 'varchar',
label = 'type',
)
f['values'+sfx] = forms.CharField(
label = 'values', required = False,
help_text="Enter in the format: ('yes','false')",
)
f['size'+sfx] = forms.IntegerField(
widget=forms.TextInput(attrs={'class':'validate-integer'}),
label = 'size', required=False, )
f['key'+sfx] = forms.ChoiceField(
required = False,
widget = forms.Select(attrs={'class':'even'}),
choices = utils.fns.make_choices(mysql_key_choices),
label = 'key',
)
f['default'+sfx] = forms.CharField(
required = False,
label = 'default',
widget=forms.TextInput
)
f['charset'+sfx] = forms.ChoiceField(
choices = utils.fns.make_choices(charsets),
initial='latin1',
label = 'charset',
widget=forms.Select(attrs={'class':'required'})
)
f['other'+sfx] = forms.MultipleChoiceField(
choices = utils.fns.make_choices(mysql_other_choices, True),
widget = forms.CheckboxSelectMultiple(attrs={'class':'occupy'}),
required = False,
label = 'other',
)
if column_form:
f['insert_position'] = forms.ChoiceField(
choices = utils.fns.make_choices(['at the end of the table', 'at the beginning'], True)
+ utils.fns.make_choices(existing_columns,False,'--------','after'),
label = 'insert this column',
initial = 'at the end of the table',
widget = forms.Select(attrs={'class':'required'}),
)
# complete form creation process
self.base_fields = f
forms.BaseForm.__init__(self, **kwargs)
class LoginForm(forms.BaseForm):
def __init__(self, templates=None, choices="a", charsets=None, **kwargs):
f = SortedDict()
# choices = "a" || all choices
# choices = "m" || mysql dialect
# ,
# choices = "p" || postgresql dialect
database_choices = [ ('', 'select database driver'),]
if choices == "p" or choices == "a":
database_choices.append(('postgresql', 'PostgreSQL'))
if choices == "m" or choices == "a":
database_choices.append(('mysql', 'MySQL'))
f['host'] = forms.CharField(
initial = 'localhost', widget=forms.TextInput(attrs=({'class':'required'}))
)
f['username'] = forms.CharField(
widget=forms.TextInput(attrs=({'class':'required'}))
)
f['password'] = forms.CharField(
widget = forms.PasswordInput,
required = False,
)
f['database_driver'] = forms.ChoiceField(
choices = database_choices,
widget = forms.Select(attrs={
# 'class':'select_requires:connection_database:postgresql'
'class':'required'
}
),
)
f['connection_database'] = forms.CharField(
required=False,
help_text='Optional but needed if the PostgreSQL installation does not include the default `postgres` database'
)
self.base_fields = f
forms.BaseForm.__init__(self, **kwargs)
class pgsqlSequenceForm(forms.Form):
name = forms.CharField(
widget=forms.TextInput(attrs={'class':'required'})
)
incremented_by = forms.IntegerField(
required=False,
widget = forms.TextInput(attrs={'class':'validate-integer'})
)
min_value = forms.IntegerField(
required=False,
widget = forms.TextInput(attrs={'class':'validate-integer'})
)
max_value = forms.IntegerField(
required=False,
widget = forms.TextInput(attrs={'class':'validate-integer'})
)
start_value = forms.IntegerField(
required = False,
widget = forms.TextInput(attrs={'class':'validate-integer'})
)
cache_value = forms.IntegerField(
required =False,
widget = forms.TextInput(attrs={'class':'validate-integer'})
)
can_cycle = forms.ChoiceField(
label = 'Can cycle?', required = False,
widget = forms.CheckboxInput()
)
class QueryForm(forms.Form):
query = forms.CharField(label = u"Enter your query:",
widget = forms.Textarea(attrs={'class':'required span10','rols':0, 'cols':0, 'style':'height:100px;resize:none;'},) )
def get_dialect_form(form_name, dialect):
'''
structure of dialect_forms:
{ 'form_name': [ postgresql version of form_name, mysql version of form_name] }
'''
dialect_forms = {
'DbForm': [pgsqlDbForm, mysqlDbForm],
'UserForm': [pgsqlUserForm, mysqlUserForm],
'TableForm': [pgsqlTableForm, mysqlTableForm],
# 'InsertForm': [pgsqlInsertForm, mysqlInsertForm]
}
return dialect_forms[form_name][0] if dialect == 'postgresql' else dialect_forms[form_name][1]
```
#### File: tiote/tiote/sql.py
```python
from sqlalchemy import create_engine, text
from sqlalchemy.engine.url import URL
from sqlalchemy.exc import OperationalError, ProgrammingError, DatabaseError
import datetime
# sqlaclehemy modules
def stored_query(query, dialect):
# db of stored queries
stored_query_db = {
'postgresql': {
'variables':
"SHOW server_version",
'template_list':
"SELECT datname FROM pg_catalog.pg_database",
'group_list':
"SELECT rolname FROM pg_catalog.pg_roles WHERE rolcanlogin=False",
'db_list':
"SELECT datname FROM pg_catalog.pg_database WHERE datistemplate = 'f' ORDER BY datname ASC;",
'user_rpr':
"SELECT rolname, rolcanlogin, rolsuper, rolinherit, rolvaliduntil FROM pg_catalog.pg_roles",
'user_list':
"SELECT rolname FROM pg_catalog.pg_roles",
'table_list':
"SELECT schemaname, tablename FROM pg_catalog.pg_tables ORDER BY schemaname DESC",
'full_schema_list':
"SELECT schema_name, schema_owner FROM information_schema.schemata \
WHERE schema_name NOT LIKE '%pg_toast%' AND schema_name NOT LIKE '%pg_temp%'",
'user_schema_list':
"SELECT schema_name, schema_owner FROM information_schema.schemata \
WHERE schema_name NOT LIKE '%pg_toast%' AND schema_name NOT LIKE '%pg_temp%' \
AND schema_name NOT IN ('pg_catalog', 'information_schema')" # manually filled, might need to be adjusted if new
# - system catalogs are discovered
},
'mysql': {
'describe_databases':
"SELECT TABLE_SCHEMA, TABLE_NAME, TABLE_ROWS FROM `information_schema`.`tables`",
'db_list':
"SHOW databases",
'user_rpr':
"SELECT user.`Host`, user.`User` FROM user",
'user_list':
"SELECT user.`User` FROM user",
'supported_engines':
"SELECT engine, support FROM `information_schema`.`engines` \
WHERE support='yes' OR support='default'",
'charset_list':
"SELECT CHARACTER_SET_NAME FROM INFORMATION_SCHEMA.CHARACTER_SETS",
'variables':
'''SHOW SESSION VARIABLES WHERE `Variable_name`='version_compile_machine'
OR `Variable_name`='version_compile_os' OR `variable_name`='version'
'''
}
}
#
return stored_query_db[dialect][query]
def generate_query(query_type, dialect='postgresql', query_data=None):
'''
Generates queries of ``query_type`` with the given ``query_data``.
The generated queries are returned as a tuple of strings.
'''
# init
if query_data.has_key('schm'):
prfx = "{schm}.".format(**query_data) if dialect =='postgresql' else ""
else: prfx = ""
#queries
if query_type == 'get_single_row':
q0 = "SELECT * FROM {0}{tbl} WHERE {where} LIMIT 1".format(prfx, **query_data)
return (q0,)
elif query_type == 'browse_table':
q0 = "SELECT * FROM {0}{tbl}"
if query_data.has_key('sort_key') and query_data.has_key('sort_dir'):
q0 += " ORDER BY {sort_key} {sort_dir}"
q0 += " LIMIT {limit} OFFSET {offset}"
return (q0.format(prfx, **query_data),)
elif query_type == 'count_rows':
q0 = "SELECT count(*) FROM {0}{tbl}".format(prfx, **query_data)
return (q0,)
elif query_type == 'drop_table':
queries = []
for where in query_data['conditions']:
where['table'] = where['table'].replace("'", "")
queries.append( "DROP TABLE {0}{table}".format(prfx, **where))
return tuple(queries)
elif query_type == 'empty_table':
queries = []
for where in query_data['conditions']:
where['table'] = where['table'].replace("'", "")
queries.append( "TRUNCATE {0}{table}".format(prfx, **where) )
return tuple(queries)
elif query_type == 'delete_row':
queries = []
for whereCond in query_data['where_stmt'].split(';'):
q0 = "DELETE FROM {0}{tbl}".format(prfx, **query_data) + " WHERE "+whereCond
queries.append(q0)
return tuple(queries)
elif dialect == 'postgresql': #postgresql-only statements
if query_type == 'create_user':
# create role statement
q0 = "CREATE ROLE {role_name}".format(**query_data)
if query_data['can_login']:
q0 += " LOGIN"
if query_data['password']:
q0 += " ENCRYPTED PASSWORD '{password}'".format(**query_data)
if query_data['role_privileges']:
for option in query_data['role_privileges']:
q0 += " " + option
if query_data['connection_limit']:
q0 += " CONNECTION LIMIT {connection_limit}".format(**query_data)
if query_data['valid_until']:
q0 += " VALID UNTIL '{valid_until}'".format(**query_data)
if query_data['group_membership']:
q0 += " IN ROLE"
for grp_index in range( len(query_data['group_membership']) ):
if grp_index == len(query_data['group_membership']) - 1:
q0 += " " + query_data['group_membership'][grp_index]
else:
q0 += " " + query_data['group_membership'][grp_index] + ","
# if query_data['comment']:
# q1 = "COMMENT ON ROLE {role_name} IS \'{comment}\'".format(**query_data)
# queries.append(q1)
queries = (q0, )
return queries
elif query_type == 'drop_user':
queries = []
for cond in query_data:
q = "DROP ROLE {rolname}".format(**cond)
queries.append(q)
return tuple(queries)
elif query_type == 'create_db':
_l = []
_l.append("CREATE DATABASE {name}")
if query_data['encoding']: _l.append(" WITH ENCODING='{encoding}'")
if query_data['owner']: _l.append(" OWNER={owner}")
if query_data['template']: _l.append(" TEMPLATE={template}")
return ("".join(_l).format(**query_data), )
elif query_type == 'table_rpr':
q = "SELECT t2.tablename AS table, t2.tableowner AS owner, t2.tablespace, t1.reltuples::integer AS \"estimated row count\" \
FROM ( pg_catalog.pg_class as t1 INNER JOIN pg_catalog.pg_tables AS t2 ON t1.relname = t2.tablename) \
WHERE t2.schemaname='{schm}' ORDER BY t2.tablename ASC".format(**query_data)
return (q, )
elif query_type == 'indexes':
q0 = "SELECT kcu.column_name, kcu.constraint_name, tc.constraint_type \
FROM information_schema.key_column_usage AS kcu LEFT OUTER JOIN information_schema.table_constraints \
AS tc on (kcu.constraint_name = tc.constraint_name) WHERE kcu.table_name='{tbl}' \
AND kcu.table_schema='{schm}' AND kcu.table_catalog='{db}'".format(**query_data)
return (q0,)
elif query_type == 'primary_keys':
q0 = "SELECT kcu.column_name, kcu.constraint_name, tc.constraint_type \
FROM information_schema.key_column_usage AS kcu LEFT OUTER JOIN information_schema.table_constraints \
AS tc on (kcu.constraint_name = tc.constraint_name) WHERE kcu.table_name='{tbl}' \
AND kcu.table_schema='{schm}' AND kcu.table_catalog='{db}' AND \
(tc.constraint_type='PRIMARY KEY')".format(**query_data)
return (q0, )
elif query_type == 'table_structure':
q0 = "SELECT column_name as column, data_type as type, is_nullable as null, \
column_default as default, character_maximum_length, numeric_precision, numeric_scale, \
datetime_precision, interval_type, interval_precision FROM information_schema.columns \
WHERE table_catalog='{db}' AND table_schema='{schm}' AND table_name='{tbl}' \
ORDER BY ordinal_position ASC".format(**query_data)
return (q0, )
elif query_type == 'table_sequences':
q0 = 'SELECT sequence_name, nextval(sequence_name::regclass), \
setval(sequence_name::regclass, lastval() - 1, true) FROM information_schema.sequences'
return (q0, )
elif query_type == 'existing_tables':
# selects both tables and views
# q0 = "SELECT table_name FROM information_schema.tables WHERE table_schema='{schm}' \
#ORDER BY table_name ASC".format(**query_data)
# selects only tables
q0 = "SELECT tablename FROM pg_catalog.pg_tables WHERE schemaname='{schm}' \
ORDER BY tablename ASC".format(**query_data)
return (q0, )
elif query_type == 'foreign_key_relation':
q0 = 'SELECT conname, confrelid::regclass AS "referenced_table", \
conkey AS array_local_columns, confkey AS array_foreign_columns \
FROM pg_constraint WHERE contype = \'f\' AND conrelid::regclass = \'{tbl}\'::regclass \
AND connamespace = (SELECT oid from pg_namespace WHERE nspname=\'{schm}\') \
'.format(**query_data)
return (q0, )
elif dialect == 'mysql': # mysql-only statements
if query_type == 'create_user':
# create user statement
queries = []
q1 = "CREATE USER '{username}'@'{host}'".format(**query_data)
if query_data['password']:
q1 += " IDENTIFIED BY '{password}'".format(**query_data)
queries.append(q1)
# grant privileges
q2 = "GRANT"
if query_data['privileges'] == 'all':
q2 += " ALL"
elif query_data['privileges'] == 'select':
priv_groups = ['user_privileges','administrator_privileges']
for priv_group in priv_groups:
for priv_in in range( len(query_data[priv_group])):
if priv_in == len(query_data[priv_group]) - 1:
q2 += ' ' + query_data[priv_group][priv_in]
else:
q2 += ' ' + query_data[priv_group][priv_in] + ','
if query_data['select_databases'] and len(query_data['select_databases']) > 1:
for db in query_data['select_databases']: #mutliple grant objects
q3 = q2 + ' ON {db}.*'.format(database = db)
# user specification
q3 += " TO '{username}'@'{host}'".format(**query_data)
# grant option
if query_data['options']:
q3 += " WITH {options[0]}".format(**query_data)
# append generated query to queries
queries.append(q3)
else:
# database access
if query_data['access'] == 'all':
q4 = q2 + ' ON *.*'
elif query_data['access'] == 'select':
q4 = q2 + ' ON {select_databases[0]}.*'.format(**query_data)
# user specification
q4 += " TO '{username}'@'{host}'".format(**query_data)
# grant option
if query_data['options']:
q4 += " WITH {options[0]}".format(**query_data)
queries.append(q4)
return tuple( queries )
elif query_type == 'create_db':
q = "CREATE DATABASE {name}".format(**query_data)
if query_data['charset']:
q += " CHARACTER SET {charset}".format(**query_data)
return (q, )
elif query_type == 'column_list':
return ("SELECT column_name FROM information_schema.columns WHERE table_schema='{db}' AND table_name='{tbl}'")
elif query_type == 'drop_user':
queries = []
for where in query_data:
q = "DROP USER '{user}'@'{host}'".format(**where)
queries.append(q)
return tuple(queries)
elif query_type == 'table_rpr':
q = "SELECT TABLE_NAME AS 'table', TABLE_ROWS AS 'rows', TABLE_TYPE AS 'type', ENGINE as 'engine' \
FROM `INFORMATION_SCHEMA`.`TABLES` WHERE TABLE_SCHEMA = '{db}'".format(**query_data)
return (q,)
elif query_type == 'indexes':
q0 = "SELECT DISTINCT kcu.column_name, kcu.constraint_name, tc.constraint_type \
from information_schema.key_column_usage as kcu, information_schema.table_constraints as tc WHERE \
kcu.constraint_name = tc.constraint_name AND kcu.table_schema='{db}' AND tc.table_schema='{db}' \
AND kcu.table_name='{tbl}'".format(**query_data)
return (q0, )
elif query_type == 'primary_keys':
q0 = "SELECT DISTINCT kcu.column_name, kcu.constraint_name, tc.constraint_type \
from information_schema.key_column_usage as kcu, information_schema.table_constraints as tc WHERE \
kcu.constraint_name = tc.constraint_name AND kcu.table_schema='{db}' AND tc.table_schema='{db}' \
AND kcu.table_name='{tbl}' AND tc.table_name='{tbl}' \
AND (tc.constraint_type='PRIMARY KEY')".format(**query_data)
return (q0, )
elif query_type == 'table_structure':
q0 = 'SELECT column_name AS "column", column_type AS "type", is_nullable AS "null", \
column_default AS "default", extra \
FROM information_schema.columns WHERE table_schema="{db}" AND table_name="{tbl}" \
ORDER BY ordinal_position ASC'.format(**query_data)
return (q0, )
elif query_type == 'raw_table_structure':
q0 = 'SELECT column_name AS "column", data_type AS "type", is_nullable AS "null", \
column_default AS "default", character_maximum_length, numeric_precision, numeric_scale, extra, column_type \
FROM information_schema.columns WHERE table_schema="{db}" AND table_name="{tbl}" \
ORDER BY ordinal_position ASC'.format(**query_data)
return (q0, )
elif query_type == 'existing_tables':
q0 = "SELECT TABLE_NAME FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA='{db}'".format(**query_data)
return (q0, )
def full_query(conn_params, query):
'''
executes and returns a query result
'''
eng = create_engine(get_conn_link(conn_params))
conn = eng.connect()
try:
conn = eng.connect()
query_result = conn.execute(text(query))
d = {}
l = []
for row in query_result:
row = list(row)
for i in range(len(row)):
if row[i] == None:
row[i] = ""
elif type( row[i] ) == datetime.datetime:
row[i] = row[i].__str__()
l.append( tuple(row) )
d = {'columns': query_result.keys(),'count': query_result.rowcount,
'rows': l}
conn.close()
return d
except Exception as e:
conn.close()
return str(e)
def short_query(conn_params, queries):
"""
executes and returns the success state of the query
"""
eng = create_engine( get_conn_link(conn_params) )
conn = ''
try:
conn = eng.connect()
for query in queries:
query_result = conn.execute(text(query))
return {'status':'success', 'msg':''}
except Exception as e:
conn.close()
return {'status':'fail', 'msg': str(e) }
def model_login(conn_params):
link = URL(conn_params['database_driver'], username = conn_params['username'],
password= conn_params['password'], host = conn_params['host'])
if conn_params['connection_database']:
link.database = conn_params['connection_database']
elif not conn_params['connection_database'] and conn_params['database_driver'] == 'postgresql':
link.database = 'postgres'
engine = create_engine(link)
conn = ''
dict_ret = {}
try:
conn = engine.connect()
except OperationalError as e:
dict_ret = {'login': False, 'msg': str(e)}
else:
# todo 'msg'
dict_ret = {'login': True, 'msg': ''}
conn.close()
return dict_ret
def get_conn_link(conn_params):
return '{dialect}://{username}:{password}@{host}/{db}'.format(**conn_params)
```
#### File: tiote/utils/db.py
```python
import json
from django.http import HttpResponse
from django.conf import settings
from tiote import sql
import fns
def rpr_query(conn_params, query_type, get_data={}, post_data={}):
'''
Run queries that have to be generated on the fly. Most queries depends on get_data,
while some few depends on post_data
get_data and post_data are gotten from request.GET and request.POST or form.cleaned_data
'''
# common queries that returns success state as a dict only
no_return_queries = ('create_user', 'drop_user', 'create_db','create_table',
'drop_table', 'empty_table', 'delete_row', 'create_column', 'delete_column',)
if query_type in no_return_queries:
conn_params['db'] = get_data['db'] if get_data.has_key('db') else conn_params['db']
query_data = {}
query_data.update(get_data, **post_data)
q = sql.generate_query( query_type, conn_params['dialect'],query_data)
result = sql.short_query(conn_params, q)
return HttpResponse( json.dumps(result) )
# specific queries with implementations similar to both dialects
elif query_type == 'user_rpr':
if conn_params['dialect'] == 'mysql':
conn_params['db'] = 'mysql'
r = sql.full_query(conn_params,
sql.stored_query(get_data['query'],conn_params['dialect']) )
if type(r) == dict:
r
else:
return fns.http_500(r)
elif query_type in ('indexes', 'primary_keys', 'foreign_key_relation'):
if conn_params['dialect'] == 'postgresql': conn_params['db'] = get_data['db']
r = sql.full_query(conn_params,
sql.generate_query(query_type, conn_params['dialect'], get_data)[0])
return r
elif query_type in ('get_single_row',):
sub_q_data = {'tbl': get_data['tbl'],'db':get_data['db']}
if get_data.has_key('schm'):
sub_q_data['schm'] = get_data['schm']
# generate where statement
sub_q_data['where'] = ""
for ind in range(len(post_data)):
sub_q_data['where'] += post_data.keys()[ind].strip() + "="
sub_q_data['where'] += post_data.values()[ind].strip()
if ind != len(post_data) - 1: sub_q_data['where'] += ' AND '
# retrieve and run queries
conn_params['db'] = get_data['db']
# assert False
q = sql.generate_query(query_type, conn_params['dialect'], sub_q_data)
r = sql.full_query(conn_params, q[0])
return r
elif query_type in ('table_rpr', 'table_structure', 'raw_table_structure'):
conn_params['db'] = get_data['db']
sub_q_data = {'db': get_data['db'],}
if get_data.has_key('tbl'):
sub_q_data['tbl'] = get_data['tbl']
if get_data.has_key('schm'):
sub_q_data['schm'] = get_data['schm']
# make query
if conn_params['dialect'] == 'postgresql' and query_type == 'raw_table_structure':
q = 'table_structure'
else: q = query_type
r = sql.full_query(conn_params,
sql.generate_query(q, conn_params['dialect'], sub_q_data)[0] )
# further needed processing
if conn_params['dialect'] == 'postgresql' and query_type.count('table_structure'):
rwz = []
for tuple_row in r['rows']:
row = list(tuple_row)
_l = [ row[1] ]
if row[1] in ('bit', 'bit varying', 'character varying', 'character') and type(row[4]) is int:
_l.append( '({0})'.format(row[4]) )
elif row[1] in ('numeric', 'decimal') and type(row[5]) is int or type(row[6]) is int:
_l.append( '({0},{1})'.format(row[5], row[6]) )
elif row[1] in ('interval', 'time with time zone', 'time without time zone',
'timestamp with time zone', 'timestamp without time zone') and type(row[7]) is int:
_l.append( '({0})'.format(row[7]) )
# append the current row to rwz
if query_type == 'table_structure': rwz.append([row[0], "".join(_l), row[2], row[3] ])
elif query_type == 'raw_table_structure':
row.append("".join(_l))
rwz.append(row)
# change r['rows']
r['rows'] = rwz
# change r['columns']
if query_type == 'table_structure':
r['columns'] = [ r['columns'][0], r['columns'][1], r['columns'][2], r['columns'][3] ]
elif query_type == 'raw_table_structure': r['columns'].append('column_type')
return r
elif query_type == 'browse_table':
# initializations
sub_q_data = {'tbl': get_data['tbl'],'db':get_data['db']}
sub_q_data['offset'] = get_data['offset'] if get_data.has_key('offset') else 0
sub_q_data['limit'] = get_data['limit'] if get_data.has_key('limit') else getattr(settings, 'TT_MAX_ROW_COUNT', 100)
for item in ['schm', 'sort_key', 'sort_dir']:
if get_data.has_key(item): sub_q_data[item] = get_data[item]
# retrieve and run queries
conn_params['db'] = get_data['db']
keys = rpr_query(conn_params, 'primary_keys', sub_q_data)
count = sql.full_query(conn_params,
sql.generate_query('count_rows', conn_params['dialect'], sub_q_data)[0],
)['rows']
r = sql.full_query(conn_params,
sql.generate_query(query_type, conn_params['dialect'], sub_q_data)[0]
)
# format and return data
if type(r) == dict:
r.update({'total_count': count[0][0], 'offset': sub_q_data['offset'],
'limit':sub_q_data['limit'], 'keys': keys})
return r
else:
return fns.http_500(r)
# queries that just asks formats and return result
elif query_type in ('existing_tables',):
query_data = {'db':get_data['db'],}
if get_data.has_key('tbl'): query_data['tbl'] = get_data['tbl']
if conn_params['dialect'] == 'postgresql':
query_data['schm'] = get_data['schm']
conn_params['db'] = query_data['db']
q = sql.generate_query(query_type, conn_params['dialect'], query_data)
r = sql.full_query(conn_params,
q[0])
return r['rows']
# queries with dissimilar implementations
elif conn_params['dialect'] == 'postgresql':
return fns.http_500('query ({query_type}) not implemented!'.format(query_type=query_type))
elif conn_params['dialect'] == 'mysql':
if query_type == 'describe_databases':
conn_params['db'] = 'INFORMATION_SCHEMA';
query = sql.stored_query(query_type, conn_params['dialect'])
return sql.full_query(conn_params, query)
else:
return fns.http_500('query not yet implemented!')
else:
return fns.http_500('dialect not supported!')
def fn_query(conn_params, query_name, get_data={}, post_data={}):
'''
reduces the growth rate of the rpr_query function above
it uses a mapping to know which function to call
all its queries are functions to be called not sections of stored logic like rpr_query
'''
query_map = {
'get_row': get_row
}
return query_map[query_name](conn_params, get_data, post_data)
def common_query(conn_params, query_name, get_data={}):
'''
Run queries that needs no dynamic generation. Queries here are already stored and would
only need to be executed on the database selected
get_data is a django QueryDict structure
'''
pgsql_redundant_queries = ('template_list', 'group_list', 'user_list', 'db_list', 'schema_list')
mysql_redundant_queries = ('db_list','charset_list', 'supported_engines')
if conn_params['dialect'] == 'postgresql' and query_name in pgsql_redundant_queries :
# this kind of queries require no special attention
if query_name == 'schema_list':
if hasattr(settings, 'TT_SHOW_SYSTEM_CATALOGS'):
query_name = 'full_schema_list' if settings.TT_SHOW_SYSTEM_CATALOGS == True else "user_schema_list"
else: query_name = "user_schema_list" # default
conn_params['db'] == get_data.get('db') if get_data.get('db') else conn_params['db']
r = sql.full_query(conn_params,
sql.stored_query(query_name, conn_params['dialect']))
return r['rows']
elif conn_params['dialect'] == 'mysql':
if query_name in mysql_redundant_queries :
# this kind of queries require no special attention
return sql.full_query(conn_params,
sql.stored_query(query_name, conn_params['dialect']))['rows']
def get_row(conn_params, get_data={}, post_data={}):
r = rpr_query(conn_params, 'get_single_row', get_data, post_data)
html = ""
if type(r) == str: return r
for ind in range(len(r['columns'])):
html += '<span class="column-entry">' + str(r['columns'][ind]) + '</span>'
html += '<br /><div class="data-entry"><code>' + str(r['rows'][0][ind]) + '</code></div>'
# replace all newlines with <br /> because html doesn't render newlines (\n) directly
html = html.replace('\n', '<br />')
return html
def insert_row(conn_params, get_data={}, form_data={}):
# set execution context
conn_params['db'] = get_data['db']
# format form_data ( from a form) according to the following rules
# * add single qoutes to the variables
# * make lists a concatenation of lists
cols, values = [], []
for k in form_data:
if k in ('csrfmiddlewaretoken', 'save_changes_to'): continue
cols.append(k)
if type(form_data[k]) == list:
value = ",".join( form_data[k] )
values.append( fns.quote(value) )
else:
values.append( fns.quote( unicode(form_data[k]) ) )
# generate sql insert statement
q = "INSERT INTO {0}{tbl} ({1}) VALUES ({2})".format(
'{schm}.'.format(**get_data) if conn_params['dialect'] == 'postgresql' else '',
",".join(cols), ",".join(values), **get_data
)
# run query and return results
ret = sql.short_query(conn_params, (q, ))
if ret['status'] == 'success': ret['msg'] = 'Insertion succeeded'
# format status messages used in flow control (javascript side)
# replaces with space and new lines with the HTML equivalents
ret['msg'] = '<div class="alert-message block-message {0} span8 data-entry"><code>\
{1}</code></div>'.format(
'success' if ret['status'] == 'success' else 'error',
ret['msg'].replace(' ', ' ').replace('\n', '<br />')
)
return ret
def update_row(conn_params, indexed_cols={}, get_data={}, form_data={}):
# set execution context
conn_params['db'] = get_data['db']
# format form_data ( from a form) according to the following rules
# * add single qoutes to the variables
# * make lists a concatenation of lists
cols, values = [], []
for k in form_data:
if k in ('csrfmiddlewaretoken', 'save_changes_to'): continue
cols.append(k)
if type(form_data[k]) == list:
value = ",".join( form_data[k] )
values.append( fns.quote(value) )
else:
values.append( fns.quote( unicode(form_data[k]) ) )
# generate SET sub statment
_l_set = []
for i in range(len(cols)):
short_stmt = "=".join([cols[i], values[i]])
_l_set.append(short_stmt)
# generate WHERE sub statement
_l_where = []
for key in indexed_cols:
short_stmt = "=".join([ key, fns.quote( unicode(form_data[key]) ) ])
_l_where.append(short_stmt)
# generate full query
q = "UPDATE {0}{tbl} SET {set_stmts} WHERE {where_stmts}".format(
'{schm}.'.format(**get_data) if conn_params['dialect'] == 'postgresql' else '',
set_stmts = ", ".join(_l_set), where_stmts = "AND ".join(_l_where), **get_data
)
# run query and return results
ret = sql.short_query(conn_params, (q, ))
if ret['status'] == 'success': ret['msg'] = 'Row update succeeded'
# format status messages used in flow control (javascript side)
# replaces with space and new lines with the HTML equivalents
ret['msg'] = '<div class="alert-message block-message {0} span12 data-entry"><code>\
{1}</code></div>'.format(
'success' if ret['status'] == 'success' else 'error',
ret['msg'].replace(' ', ' ').replace('\n', '<br />')
)
return ret
def do_login(request, cleaned_data):
host = cleaned_data['host']
username = cleaned_data['username']
password = <PASSWORD>_data['password']
database_driver = cleaned_data['database_driver']
dict_post = {'username':username,'password':password,'database_driver':database_driver, 'host':host}
if 'connection_database' in cleaned_data:
dict_post['connection_database'] = cleaned_data['connection_database']
dict_cd = sql.model_login(dict_post)
if not dict_cd['login']:
#authentication failed
return dict_cd
else:
# authentication succeeded
request.session['TT_LOGIN'] = 'true'
request.session['TT_USERNAME'] = username
request.session['TT_PASSWORD'] = password
request.session['TT_DIALECT'] = database_driver
request.session['TT_HOST'] = host
if 'connection_database' in dict_post:
request.session['TT_DATABASE'] = dict_post['connection_database']
return dict_cd
def get_home_variables(request):
p = fns.get_conn_params(request)
variables = {'user': p['username'], 'host': p['host']}
variables['dialect'] = 'PostgreSQL' if p['dialect'] == 'postgresql' else 'MySQL'
result = sql.full_query( p, sql.stored_query('variables', p['dialect']))
if p['dialect'] == 'postgresql':
variables['version'] = result['rows'][0]
return variables
elif p['dialect'] == 'mysql':
if type(result) == dict:
ll = result['rows']
d = {}
for i in range( len(ll) ):
d[ll[i][0]] = ll[i][1]
variables.update(d)
return variables
else:
return fns.http_500(result)
``` |
{
"source": "joskid/vardbg",
"score": 3
} |
#### File: vardbg/output/json_writer.py
```python
import copy
import time
import jsonpickle
from .writer import Writer
NEW_FRAME = "new_frame"
EXECUTE_FRAME = "exec_frame"
ADD_VARIABLE = "add_var"
CHANGE_VARIABLE = "change_var"
REMOVE_VARIABLE = "remove_var"
class JsonWriter(Writer):
def __init__(self, output_path):
self.output_path = output_path
self.data = {"events": []}
self._step = 0
def step(self):
self._step += 1
return self._step
def write_event(self, evt_name, **kwargs):
event = {"step": self.step(), "time": time.time_ns(), "event": evt_name}
event.update(kwargs)
self.data["events"].append(event)
def write_cur_frame(self, frame_info, output):
self.write_event(NEW_FRAME, frame_info=frame_info, output=output)
def write_frame_exec(self, frame_info, exec_time, exec_times):
# exec_times needs to be copied to preserve the *current* state
self.write_event(EXECUTE_FRAME, frame_info=frame_info, exec_time=exec_time, exec_times=exec_times.copy())
def write_add(self, var, val, history, *, action, plural):
self.write_event(
ADD_VARIABLE, var_name=var, value=val, history=copy.deepcopy(history), action=action, plural=plural,
)
def write_change(self, var, val_before, val_after, history, *, action):
self.write_event(
CHANGE_VARIABLE,
var_name=var,
value_before=val_before,
value_after=val_after,
history=copy.deepcopy(history),
action=action,
)
def write_remove(self, var, val, history, *, action):
self.write_event(REMOVE_VARIABLE, var_name=var, value=val, history=copy.deepcopy(history), action=action)
def write_summary(self, var_history, exec_start_time, exec_stop_time, frame_exec_times):
# frame_exec_times is skipped because it can be readily reconstructed during replay
self.data["var_history"] = list(var_history.items())
self.data["exec_start_time"] = exec_start_time
self.data["exec_stop_time"] = exec_stop_time
def close(self):
# Write all the collected data out together
with open(self.output_path, "w+") as f:
f.write(jsonpickle.dumps(self.data))
```
#### File: output/video_writer/renderer.py
```python
from pathlib import Path
from PIL import Image, ImageDraw, ImageFont
from .config import Config
from .gif_encoder import GIFEncoder
from .opencv_encoder import OpenCVEncoder
from .text_format import irepr
from .text_painter import TextPainter
from .webp_encoder import WebPEncoder
WATERMARK = "Generated by vardbg"
SAMPLE_CHARS = "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ "
class FrameRenderer:
RED = 0
GREEN = 1
BLUE = 2
def __init__(self, path, config_path):
# Config
self.cfg = Config(config_path)
# Video encoder
ext = Path(path).suffix.lower()[1:]
if ext == "mp4":
self.encoder = OpenCVEncoder(path, "mp4v", self.cfg.fps, self.cfg.w, self.cfg.h)
elif ext == "gif":
self.encoder = GIFEncoder(path, self.cfg.fps)
elif ext == "webp":
self.encoder = WebPEncoder(path, self.cfg.fps)
else:
raise ValueError(f"Unrecognized file extension '{ext}'")
# Drawing context
self.draw = None
# Fonts
self.body_font = ImageFont.truetype(*self.cfg.font_body)
self.body_bold_font = ImageFont.truetype(*self.cfg.font_body_bold)
self.caption_font = ImageFont.truetype(*self.cfg.font_caption)
self.head_font = ImageFont.truetype(*self.cfg.font_heading)
self.intro_font = ImageFont.truetype(*self.cfg.font_intro)
# Whether the watermark has been drawn on this frame
self._watermark_drawn = False
# Sizes and positions to be calculated later
# Code body size
self.line_height = None
self.body_cols = None
self._body_rows = None
self.body_rows = None
# Output body start position
self.out_x = None
self.out_y = None
# Output body size
self.out_cols = None
self.out_rows = None
# Variable body start positions
self.vars_x = None
self.vars_y = None
self.ovars_x = None
self.ovars_y = None
# Variable body size
self.vars_cols = None
self.vars_rows = None
self.ovars_cols = None
self.ovars_rows = None
# Per-frame positions
self.last_var_x = None
self.last_var_y = None
self.ref_var_x = None
self.ref_var_y = None
# Current video frame (image)
self.frame = None
# Text size cache
self.text_size_cache = {}
# Prepare base frame
self.base_frame = None
self.prepare_base_frame()
# Write intro (if necessary)
if self.cfg.intro_text and self.cfg.intro_time:
self.write_intro()
def text_size(self, text, factor=10, **kwargs):
cache_key = (text, kwargs.get("font", None))
if cache_key in self.text_size_cache:
return self.text_size_cache[cache_key]
else:
# Multiply string and divide by the factor to get a more precise width
w, h = self.draw.textsize(text * factor, **kwargs)
w /= factor
# Save to cache and return
sizes = (w, h)
self.text_size_cache[cache_key] = sizes
return sizes
def calc_sizes(self):
# Calculate text sizes
w = self.text_size(SAMPLE_CHARS, font=self.body_font)[0] / len(SAMPLE_CHARS)
hw, hh = self.text_size("A", font=self.head_font)
_, mh = self.text_size("`^Ag", font=self.body_font)
_, ch = self.text_size("1p", font=self.caption_font)
# Code body size
self.line_height = mh * self.cfg.line_height
self.body_cols = int((self.cfg.var_x - self.cfg.sect_padding * 2) / w)
self._body_rows = (self.cfg.out_y - self.cfg.sect_padding * 2 - ch) / self.line_height
self.body_rows = int(self._body_rows)
# Output body start position
self.out_x = self.cfg.sect_padding
self.out_y = self.cfg.out_y + self.cfg.head_padding * 2 + hh
# Output body size
self.out_cols = self.body_cols
self.out_rows = round((self.cfg.h - self.out_y) / self.line_height)
# Variable body start positions
# Top-left X and Y for last variable section
self.vars_x = self.cfg.var_x + self.cfg.sect_padding
self.vars_y = self.cfg.head_padding * 2 + hh
# Columns and rows for last variable section
self.vars_cols = int((self.cfg.w - self.cfg.var_x - self.cfg.sect_padding * 2) / w)
self.vars_rows = int((self.cfg.ovar_y - self.cfg.head_padding * 2 - hh) / self.line_height)
# Top-left X and Y for other variables section
self.ovars_x = self.vars_x
self.ovars_y = self.cfg.ovar_y + self.vars_y
# Columns and rows for other variables section
self.ovars_cols = self.vars_cols
ovars_h = self.cfg.h - self.cfg.ovar_y
self.ovars_rows = int((ovars_h - self.cfg.sect_padding * 2) / self.line_height)
def get_color(self, col):
if col == self.RED:
return self.cfg.red
elif col == self.GREEN:
return self.cfg.green
else:
return self.cfg.blue
def draw_text_center(self, x, y, text, font, color):
w, h = self.text_size(text, font=font)
self.draw.text((x - w / 2, y - h / 2), text, font=font, fill=color)
def prepare_base_frame(self):
# Create new empty frame
self.new_frame(from_base=False)
# Draw output section
# Horizontal divider at 4/5 height
self.draw.line(((0, self.cfg.out_y), (self.cfg.var_x, self.cfg.out_y)), fill=self.cfg.fg_divider, width=1)
# Label horizontally centered and padded
out_center_x = self.cfg.var_x / 2
out_y = self.cfg.out_y + self.cfg.head_padding
self.draw_text_center(
out_center_x, out_y, "Output", self.head_font, self.cfg.fg_heading,
)
# Draw variable section
# Vertical divider at 2/3 width
self.draw.line(((self.cfg.var_x, 0), (self.cfg.var_x, self.cfg.h)), fill=self.cfg.fg_divider, width=1)
# Label horizontally centered in the variable section and vertically padded
var_center_x = self.cfg.var_x + ((self.cfg.w - self.cfg.var_x) / 2)
self.draw_text_center(var_center_x, self.cfg.head_padding, "Last Variable", self.head_font, self.cfg.fg_heading)
# Draw other variables section
# Horizontal divider at 1/3 height
self.draw.line(
((self.cfg.var_x, self.cfg.ovar_y), (self.cfg.w, self.cfg.ovar_y)), fill=self.cfg.fg_divider, width=1
)
# Label similar to the first, but in the others section instead
ovar_label_y = self.cfg.ovar_y + self.cfg.head_padding
self.draw_text_center(var_center_x, ovar_label_y, "Other Variables", self.head_font, self.cfg.fg_heading)
# Populate sizes and positions
self.calc_sizes()
# Save frame as base and reset current frame
self.base_frame = self.frame
self.frame = None
def new_frame(self, from_base=True):
# Create image
if from_base:
self.frame = self.base_frame.copy()
else:
self.frame = Image.new("RGB", (self.cfg.w, self.cfg.h), self.cfg.bg)
# Create drawing context
self.draw = ImageDraw.Draw(self.frame)
# Reset watermark drawn flag
self._watermark_drawn = False
def start_frame(self):
self.new_frame()
def finish_frame(self, var_state):
# Bail out if there's no frame to finish
if self.frame is None:
return
# Draw variable state (if available)
if var_state is not None:
self.draw_variables(var_state)
if self.cfg.watermark and not self._watermark_drawn:
self.draw_watermark()
self._watermark_drawn = True
self.encoder.write(self.frame)
def write_intro(self):
# Render frame
self.new_frame(from_base=False)
x = self.cfg.w / 2
y = self.cfg.h / 2
self.draw_text_center(x, y, self.cfg.intro_text, self.intro_font, self.cfg.fg_heading)
# Repeatedly write frame
frames = round(self.cfg.intro_time * self.cfg.fps)
for _ in range(frames):
self.finish_frame(None)
def draw_code(self, lines, cur_line):
cur_idx = cur_line - 1
# Construct list of (line, highlighted) tuples
hlines = [(line, i == cur_idx) for i, line in enumerate(lines)]
# Calculate start and end display indexes with an equivalent number of lines on both sides for context
ctx_side_lines = (self._body_rows - 1) / 2
start_idx = round(cur_idx - ctx_side_lines)
end_idx = round(cur_idx + ctx_side_lines)
# Accommodate for situations where not enough lines are available at the beginning
if start_idx < 0:
start_extra = abs(start_idx)
end_idx += start_extra
start_idx = 0
# Slice selected section
display_lines = hlines[start_idx:end_idx]
# Construct painter
x_start = self.cfg.sect_padding
y_start = self.cfg.sect_padding + self.line_height
x_end = self.cfg.var_x - self.cfg.sect_padding
painter = TextPainter(self, x_start, y_start, self.body_cols, self.body_rows, x_end=x_end, show_truncate=False)
# Render processed lines
for i, (line, highlighted) in enumerate(display_lines):
bg_color = self.cfg.highlight if highlighted else None
for token, text in line:
painter.write(text, bg_color=bg_color, **self.cfg.styles[token])
def draw_output(self, lines):
lines = lines[-self.out_rows :]
painter = TextPainter(self, self.out_x, self.out_y, self.out_cols, self.out_rows)
painter.write("\n".join(lines))
def draw_exec(self, nr_times, cur, avg, total):
plural = "" if nr_times == 1 else "s"
text = f"Line executed {nr_times} time{plural} — current time elapsed: {cur}, average: {avg}, total: {total}"
_, h = self.text_size(text, font=self.caption_font)
x = self.cfg.sect_padding
y = self.cfg.out_y - self.cfg.sect_padding - h
self.draw.text((x, y), text, font=self.caption_font)
def draw_last_var(self, state):
painter = TextPainter(self, self.vars_x, self.vars_y, self.vars_cols, self.vars_rows)
# Draw variable name
painter.write(state.name + " ")
# Draw action with color
self.last_var_x, self.last_var_y = painter.write(state.action + " ", bold=True, color=state.color)
painter.new_line()
# Draw remaining text
painter.write(state.text)
def draw_other_vars(self, state):
painter = TextPainter(self, self.ovars_x, self.ovars_y, self.ovars_cols, self.ovars_rows)
# Draw text
for idx, (var, values) in enumerate(state.other_history):
if values.ignored:
continue
if idx > 0:
painter.write("\n\n")
painter.write(var.name + ":")
for v_idx, value in enumerate(values): # sourcery off
painter.write("\n \u2022 ")
# Reference highlighting for latest value and matching variables only
if var.name == state.ref and v_idx == len(values) - 1:
v_pos = irepr(painter, value.value, state.value, bold=True, color=state.color, return_pos="H")
self.ref_var_x, self.ref_var_y = v_pos
else:
irepr(painter, value.value)
def draw_var_ref(self, state):
# Calculate X position to route the line on
# It should be as short as possible while not obscuring any variables or exceeding the scene width
right_line_x = min(
max(self.last_var_x, self.ref_var_x) + self.cfg.sect_padding, self.cfg.w - self.cfg.sect_padding / 2
)
sw, sh = self.text_size(" ", font=self.body_font)
# Draw the polyline
self.draw.line(
(
(self.last_var_x, self.last_var_y),
(right_line_x, self.last_var_y),
(right_line_x, self.ref_var_y - sh),
(self.ref_var_x, self.ref_var_y - sh),
(self.ref_var_x, self.ref_var_y),
),
fill=state.color,
width=2,
)
def draw_variables(self, state):
self.draw_other_vars(state)
self.draw_last_var(state)
if state.ref is not None:
self.draw_var_ref(state)
def draw_watermark(self):
# Get target bottom-right position
x = self.cfg.w - self.cfg.sect_padding
y = self.cfg.h - self.cfg.sect_padding
# Subtract text size to position it properly
w, h = self.text_size(WATERMARK, font=self.caption_font)
x -= w
y -= h
# Draw text
self.draw.text((x, y), WATERMARK, fill=self.cfg.fg_watermark, font=self.caption_font)
def close(self, var_state):
# Finish final frame
self.finish_frame(var_state)
# Close encoder
self.encoder.stop()
``` |
{
"source": "joskoanicic/carelle",
"score": 2
} |
#### File: carelle/arelle/FormulaEvaluator.py
```python
from arelle import (XPathContext, XbrlConst, XmlUtil, XbrlUtil, XmlValidate)
from arelle.FunctionXs import xsString
from arelle.ModelObject import ModelObject
from arelle.ModelFormulaObject import (aspectModels, Aspect, aspectModelAspect,
ModelFormula, ModelTuple, ModelExistenceAssertion,
ModelValueAssertion,
ModelFactVariable, ModelGeneralVariable, ModelVariable,
ModelParameter, ModelFilter, ModelAspectCover, ModelBooleanFilter)
from arelle.PrototypeInstanceObject import DimValuePrototype
from arelle.ModelValue import (QName)
import datetime, time, logging, re
from decimal import Decimal
from math import log10, isnan, isinf, fabs
from arelle.Locale import format_string
from collections import defaultdict
ModelDimensionValue = None
expressionVariablesPattern = re.compile(r"([^$]*)([$]\w[\w:.-]*)([^$]*)")
def evaluate(xpCtx, varSet, variablesInScope=False, uncoveredAspectFacts=None):
# for each dependent variable, find bindings
if variablesInScope:
stackedEvaluations = (xpCtx.evaluations, xpCtx.evaluationHashDicts)
else:
xpCtx.varBindings = {}
uncoveredAspectFacts = {}
xpCtx.evaluations = [] # list of evaluations
xpCtx.evaluationHashDicts = [] # hash indexs of evaluations
try:
xpCtx.variableSet = varSet
if isinstance(varSet, ModelExistenceAssertion):
varSet.evaluationsCount = 0
if xpCtx.formulaOptions.timeVariableSetEvaluation:
varSet.timeEvaluationStarted = timeEvaluationsStarted = time.time()
varSet.evaluationNumber = 0
initialTraceCount = xpCtx.modelXbrl.logCount.get(logging.getLevelName('INFO'), 0)
evaluateVar(xpCtx, varSet, 0, {}, uncoveredAspectFacts)
if isinstance(varSet, ModelExistenceAssertion):
prog = varSet.testProg
if prog:
assertionParamQnames = [] # set and then remove assertion variable quames
for varRel in varSet.orderedVariableRelationships:
varQname = varRel.variableQname
var = varRel.toModelObject
if isinstance(var, ModelParameter) and varQname not in xpCtx.inScopeVars:
assertionParamQnames.append(varQname)
xpCtx.inScopeVars[varQname] = xpCtx.inScopeVars.get(var.parameterQname)
result = xpCtx.evaluateBooleanValue(prog, contextItem=varSet.evaluationsCount)
for varQname in assertionParamQnames:
xpCtx.inScopeVars.pop(varQname)
else:
result = varSet.evaluationsCount > 0
if result: varSet.countSatisfied += 1
else: varSet.countNotSatisfied += 1
if ((xpCtx.formulaOptions.traceSatisfiedAssertions and result) or
((xpCtx.formulaOptions.traceUnsatisfiedAssertions or
xpCtx.formulaOptions.errorUnsatisfiedAssertions ) and not result)):
xpCtx.modelXbrl.log(
"ERROR" if (xpCtx.formulaOptions.errorUnsatisfiedAssertions and not result) else "INFO",
"formula:assertionSatisfied" if result else "formula:assertionUnsatisfied",
_("%(label)s"),
modelObject=varSet, label=varSet.logLabel(),
messageCodes=("formula:assertionSatisfied", "formula:assertionUnsatisfied"))
if xpCtx.formulaOptions.traceVariableSetExpressionResult:
xpCtx.modelXbrl.info("formula:trace",
_("Existence Assertion %(xlinkLabel)s \nResult: %(result)s"),
modelObject=varSet, xlinkLabel=varSet.xlinkLabel, result=result)
msg = varSet.message(result)
if msg is not None:
xpCtx.inScopeVars[XbrlConst.qnEaTestExpression] = varSet.test
xpCtx.modelXbrl.info("message:" + (varSet.id or varSet.xlinkLabel or _("unlabeled variableSet")),
msg.evaluate(xpCtx),
modelObject=varSet,
messageCodes=("message:{variableSetID|xlinkLabel}",))
xpCtx.inScopeVars.pop(XbrlConst.qnEaTestExpression)
if xpCtx.formulaOptions.traceVariableSetExpressionResult and initialTraceCount == xpCtx.modelXbrl.logCount.get(logging._checkLevel('INFO'), 0):
xpCtx.modelXbrl.info("formula:trace",
_("Variable set %(xlinkLabel)s had no xpCtx.evaluations"),
modelObject=varSet, xlinkLabel=varSet.xlinkLabel)
if xpCtx.formulaOptions.timeVariableSetEvaluation:
xpCtx.modelXbrl.info("formula:time",
_("Variable set %(xlinkLabel)s time for %(count)s evaluations: %(time)s"),
modelObject=varSet, xlinkLabel=varSet.xlinkLabel, count=varSet.evaluationNumber,
time=format_string(xpCtx.modelXbrl.modelManager.locale, "%.3f", time.time() - timeEvaluationsStarted))
xpCtx.variableSet = None
except XPathContext.XPathException as err:
xpCtx.modelXbrl.error(err.code,
_("Variable set %(label)s \nException: %(error)s"),
modelObject=varSet, label=varSet.logLabel(), error=err.message)
xpCtx.variableSet = None
if xpCtx.formulaOptions.traceVariableSetExpressionResult:
xpCtx.modelXbrl.info("formula:trace",
_("Variable set %(xlinkLabel)s evaluations: %(evaluations)s x %(variables)s"),
modelObject=varSet, xlinkLabel=varSet.xlinkLabel,
evaluations=len(xpCtx.evaluations),
variables=max(len(e) for e in xpCtx.evaluations) if xpCtx.evaluations else 0)
del xpCtx.evaluations[:] # dereference
del xpCtx.evaluationHashDicts[:]
if variablesInScope:
xpCtx.evaluations, xpCtx.evaluationHashDicts = stackedEvaluations
else:
for vb in xpCtx.varBindings.values():
vb.close() # dereference
xpCtx.varBindings.clear() # dereference
uncoveredAspectFacts.clear() # dereference
pass
def evaluateVar(xpCtx, varSet, varIndex, cachedFilteredFacts, uncoveredAspectFacts):
if varIndex == len(varSet.orderedVariableRelationships):
# check if all fact vars are fallen back
anyFactVar = False; anyBoundFactVar = False
for vb in xpCtx.varBindings.values():
if vb.isFactVar:
anyFactVar = True
if not vb.isFallback: anyBoundFactVar = True
if xpCtx.varBindings and anyFactVar and not anyBoundFactVar:
if xpCtx.formulaOptions.traceVariableSetExpressionResult:
xpCtx.modelXbrl.info("formula:trace",
_("Variable set %(xlinkLabel)s skipped evaluation, all fact variables have fallen back"),
modelObject=varSet, xlinkLabel=varSet.xlinkLabel)
return
# record completed evaluation, for fallback blocking purposes
fbVars = set(vb.qname for vb in xpCtx.varBindings.values() if vb.isFallback)
thisEvaluation = tuple(vb.matchableBoundFact(fbVars) for vb in xpCtx.varBindings.values())
if evaluationIsUnnecessary(thisEvaluation, xpCtx.evaluationHashDicts, xpCtx.evaluations):
if xpCtx.formulaOptions.traceVariableSetExpressionResult:
xpCtx.modelXbrl.info("formula:trace",
_("Variable set %(xlinkLabel)s skipped non-different or fallback evaluation, duplicates another evaluation"),
modelObject=varSet, xlinkLabel=varSet.xlinkLabel)
varSet.evaluationNumber += 1
if xpCtx.formulaOptions.timeVariableSetEvaluation:
now = time.time()
xpCtx.modelXbrl.info("formula:time",
_("Variable set %(xlinkLabel)s skipped evaluation %(count)s: %(time)s sec"),
modelObject=varSet, xlinkLabel=varSet.xlinkLabel, count=varSet.evaluationNumber,
time=format_string(xpCtx.modelXbrl.modelManager.locale, "%.3f", now - varSet.timeEvaluationStarted))
varSet.timeEvaluationStarted = now
if xpCtx.isRunTimeExceeded: raise XPathContext.RunTimeExceededException()
xpCtx.modelXbrl.profileActivity("... evaluation {0} (skipped)".format(varSet.evaluationNumber), minTimeToShow=10.0)
return
xpCtx.modelXbrl.profileActivity("... evaluation {0}".format(varSet.evaluationNumber), minTimeToShow=10.0)
for i, fb in enumerate(thisEvaluation):
while i >= len(xpCtx.evaluationHashDicts): xpCtx.evaluationHashDicts.append(defaultdict(set))
xpCtx.evaluationHashDicts[i][hash(fb)].add(len(xpCtx.evaluations)) # hash and eval index
xpCtx.evaluations.append(thisEvaluation) # complete evaluations tuple
# evaluate preconditions
for precondition in varSet.preconditions:
result = precondition.evalTest(xpCtx)
if xpCtx.formulaOptions.traceVariableSetExpressionResult:
xpCtx.modelXbrl.info("formula:trace",
_("Variable set %(xlinkLabel)s \nPrecondition %(precondition)s \nResult: %(result)s"),
modelObject=varSet, xlinkLabel=varSet.xlinkLabel, precondition=precondition.xlinkLabel, result=result)
if not result: # precondition blocks evaluation
if xpCtx.formulaOptions.timeVariableSetEvaluation:
varSet.evaluationNumber += 1
now = time.time()
xpCtx.modelXbrl.info("formula:time",
_("Variable set %(xlinkLabel)s precondition blocked evaluation %(count)s: %(time)s sec"),
modelObject=varSet, xlinkLabel=varSet.xlinkLabel, count=varSet.evaluationNumber,
time=format_string(xpCtx.modelXbrl.modelManager.locale, "%.3f", now - varSet.timeEvaluationStarted))
varSet.timeEvaluationStarted = now
if xpCtx.isRunTimeExceeded: raise XPathContext.RunTimeExceededException()
return
# evaluate variable set
if isinstance(varSet, ModelExistenceAssertion):
varSet.evaluationsCount += 1
else:
if isinstance(varSet, ModelTuple):
result = "(tuple)"
traceOf = "Tuple"
elif isinstance(varSet, ModelFormula):
result = xpCtx.evaluate(varSet.valueProg)
traceOf = "Formula"
elif isinstance(varSet, ModelValueAssertion):
result = xpCtx.evaluateBooleanValue(varSet.testProg)
if result: varSet.countSatisfied += 1
else: varSet.countNotSatisfied += 1
msg = varSet.message(result)
if msg is not None:
xpCtx.inScopeVars[XbrlConst.qnVaTestExpression] = varSet.test
xpCtx.modelXbrl.info("message:" + (varSet.id or varSet.xlinkLabel or _("unlabeled variableSet")),
msg.evaluate(xpCtx),
modelObject=varSet,
label=varSet.logLabel(),
messageCodes=("message:{variableSetID|xlinkLabel}",))
xpCtx.inScopeVars.pop(XbrlConst.qnVaTestExpression)
if ((xpCtx.formulaOptions.traceSatisfiedAssertions and result) or
((xpCtx.formulaOptions.traceUnsatisfiedAssertions or
xpCtx.formulaOptions.errorUnsatisfiedAssertions ) and not result)):
_modelObjects = [varSet]
factVarBindings = []
for vb in sorted(xpCtx.varBindings.values(), key=lambda _vb: _vb.qname):
if vb.isFallback:
factVarBindings.append(", \n${}: fallback {}".format(vb.qname, xpCtx.flattenSequence(vb.values)))
else:
if vb.isBindAsSequence:
_modelObjects.extend(vb.yieldedEvaluation)
else:
_modelObjects.append(vb.yieldedFact)
factVarBindings.append(", \n${}: {} context {}".format(vb.qname, vb.yieldedFact.qname, vb.yieldedFactContext.id))
xpCtx.modelXbrl.log(
"ERROR" if (xpCtx.formulaOptions.errorUnsatisfiedAssertions and not result) else "INFO",
"formula:assertionSatisfied" if result else "formula:assertionUnsatisfied",
_("%(label)s%(factVarBindings)s"),
modelObject=_modelObjects, label=varSet.logLabel(),
factVarBindings="".join(factVarBindings) + ("\n" if factVarBindings else ""),
messageCodes=("formula:assertionSatisfied", "formula:assertionUnsatisfied"))
del _modelObjects[:]
traceOf = "Value Assertion"
if xpCtx.formulaOptions.traceVariableSetExpressionResult:
label = varSet.logLabel()
expression = varSet.expression
xpCtx.modelXbrl.info("formula:trace",
_("%(variableSetType)s %(xlinkLabel)s{0} \nExpression: %(expression)s \nEvaluated: %(evaluatedExpression)s \nResult: %(result)s")
.format(" \n%(label)s" if label else ""),
modelObject=varSet, variableSetType=traceOf, xlinkLabel=varSet.xlinkLabel,
label=label, result=result, expression=expression,
evaluatedExpression=''.join(xpCtx.traceEffectiveVariableValue(varSet,expr)
for grp in expressionVariablesPattern.findall(expression)
for expr in grp))
if isinstance(varSet, ModelFormula) and varSet.outputInstanceQname in xpCtx.inScopeVars:
newFact = produceOutputFact(xpCtx, varSet, result)
else:
newFact = None
if varSet.hasConsistencyAssertion:
from arelle import FormulaConsisAsser
FormulaConsisAsser.evaluate(xpCtx, varSet, newFact)
if xpCtx.formulaOptions.timeVariableSetEvaluation:
varSet.evaluationNumber += 1
now = time.time()
xpCtx.modelXbrl.info("formula:time",
_("Variable set %(xlinkLabel)s completed evaluation %(count)s: %(time)s sec"),
modelObject=varSet, xlinkLabel=varSet.xlinkLabel, count=varSet.evaluationNumber,
time=format_string(xpCtx.modelXbrl.modelManager.locale, "%.3f", now - varSet.timeEvaluationStarted))
varSet.timeEvaluationStarted = now
if xpCtx.isRunTimeExceeded: raise XPathContext.RunTimeExceededException()
# do dependent variable scope relationships
for varScopeRel in xpCtx.modelXbrl.relationshipSet(XbrlConst.variablesScope).fromModelObject(varSet):
try:
resultQname = varScopeRel.variableQname
if resultQname:
overriddenInScopeVar = xpCtx.inScopeVars.get(resultQname)
xpCtx.inScopeVars[resultQname] = result
vb = VariableBinding(xpCtx, varScopeRel)
vb.yieldedEvaluation = result
vb.yieldedFact = newFact
overriddenVarBinding = xpCtx.varBindings.get(resultQname)
xpCtx.varBindings[resultQname] = vb
evaluate(xpCtx, varScopeRel.toModelObject, True, uncoveredAspectFacts)
if resultQname:
xpCtx.inScopeVars.pop(resultQname)
if overriddenInScopeVar is not None: # restore overridden value if there was one
xpCtx.inScopeVars[resultQname] = overriddenInScopeVar
xpCtx.varBindings.pop(resultQname)
if overriddenVarBinding is not None:
xpCtx.varBindings[resultQname] = overriddenVarBinding
vb.close() # dereference
except XPathContext.XPathException as err:
xpCtx.modelXbrl.error(err.code,
_("Variable set chained in scope of variable set %(variableset)s \nException: \n%(error)s"),
modelObject=(varSet, varScopeRel.toModelObject), variableSet=varSet.logLabel(), error=err.message)
else:
# produce variable bindings
varRel = varSet.orderedVariableRelationships[varIndex]
varQname = varRel.variableQname
vb = VariableBinding(xpCtx, varRel)
var = vb.var
if vb.isFactVar:
vb.aspectsDefined = set(aspectModels[varSet.aspectModel]) # has to be a mutable set
vb.values = None
varHasNoVariableDependencies = var.hasNoVariableDependencies
varHasNilFacts = var.nils == "true"
if varHasNoVariableDependencies and varQname in cachedFilteredFacts:
facts, vb.aspectsDefined, vb.aspectsCovered = cachedFilteredFacts[varQname]
if xpCtx.formulaOptions.traceVariableFilterWinnowing:
xpCtx.modelXbrl.info("formula:trace",
_("Fact Variable %(variable)s: start with %(factCount)s facts previously cached after explicit filters"),
modelObject=var, variable=varQname, factCount=len(facts))
else:
if var.fromInstanceQnames:
groupFilteredFactsKey = "grp:" + str(varQname) # multi instance vars or non-var-dependent variables
elif varHasNilFacts:
groupFilteredFactsKey = "grp:stdInstWithNils"
else:
groupFilteredFactsKey = "grp:stdInstNonNil"
if groupFilteredFactsKey in cachedFilteredFacts:
facts = cachedFilteredFacts[groupFilteredFactsKey]
if xpCtx.formulaOptions.traceVariableFilterWinnowing:
xpCtx.modelXbrl.info("formula:trace",
_("Fact Variable %(variable)s: start with %(factCount)s facts previously cached before variable filters"),
modelObject=var, variable=varQname, factCount=len(facts))
else:
facts = set.union(*[(inst.factsInInstance if varHasNilFacts else inst.nonNilFactsInInstance)
for inst in vb.instances])
if xpCtx.formulaOptions.traceVariableFilterWinnowing:
xpCtx.modelXbrl.info("formula:trace",
_("Fact Variable %(variable)s filtering: start with %(factCount)s facts"),
modelObject=var, variable=varQname, factCount=len(facts))
facts = filterFacts(xpCtx, vb, facts, varSet.groupFilterRelationships, "group")
vb.aspectsCovered.clear() # group boolean sub-filters may have covered aspects
cachedFilteredFacts[groupFilteredFactsKey] = facts
facts = filterFacts(xpCtx, vb, facts, var.filterRelationships, None) # also finds covered aspects (except aspect cover filter dims, not known until after this complete pass)
# adding dim aspects must be done after explicit filterin
for fact in facts:
if fact.isItem and fact.context is not None:
vb.aspectsDefined |= fact.context.dimAspects(xpCtx.defaultDimensionAspects)
coverAspectCoverFilterDims(xpCtx, vb, var.filterRelationships) # filters need to know what dims are covered
if varHasNoVariableDependencies:
cachedFilteredFacts[varQname] = (facts, vb.aspectsDefined, vb.aspectsCovered)
considerFallback = bool(var.fallbackValueProg)
if varSet.implicitFiltering == "true":
if any((_vb.isFactVar and not _vb.isFallback) for _vb in xpCtx.varBindings.values()):
factCount = len(facts)
# uncovered aspects of the prior variable bindings may include aspects not in current variable binding
uncoveredAspects = (vb.aspectsDefined | _DICT_SET(uncoveredAspectFacts.keys())) - vb.aspectsCovered - {Aspect.DIMENSIONS}
facts = implicitFilter(xpCtx, vb, facts, uncoveredAspects, uncoveredAspectFacts)
if (considerFallback and varHasNoVariableDependencies and
factCount and
factCount - len(facts) == 0 and
len(xpCtx.varBindings) > 1 and
all((len(_vb.aspectsDefined) == len(vb.aspectsDefined) for _vb in xpCtx.varBindings.values()))):
considerFallback = False
vb.facts = facts
if xpCtx.formulaOptions.traceVariableFiltersResult:
xpCtx.modelXbrl.info("formula:trace",
_("Fact Variable %(variable)s: filters result %(result)s"),
modelObject=var, variable=varQname, result=str(vb.facts))
if considerFallback:
vb.values = xpCtx.evaluate(var.fallbackValueProg)
if xpCtx.formulaOptions.traceVariableExpressionResult:
xpCtx.modelXbrl.info("formula:trace",
_("Fact Variable %(variable)s: fallbackValue result %(result)s"),
modelObject=var, variable=varQname, result=str(vb.values))
elif vb.isGeneralVar: # general variable
if var.fromInstanceQnames:
contextItem = [inst.modelDocument.xmlRootElement
for qn in var.fromInstanceQnames
for instSeq in (xpCtx.inScopeVars[qn],)
for inst in (instSeq if isinstance(instSeq,(list,tuple)) else (instSeq,))
]
else:
contextItem = xpCtx.modelXbrl.modelDocument.xmlRootElement # default is standard input instance
vb.values = xpCtx.flattenSequence( xpCtx.evaluate(var.selectProg, contextItem=contextItem) )
if xpCtx.formulaOptions.traceVariableExpressionResult:
xpCtx.modelXbrl.info("formula:trace",
_("General Variable %(variable)s: select result %(result)s"),
modelObject=var, variable=varQname, result=str(vb.values))
elif vb.isParameter:
vb.parameterValue = xpCtx.inScopeVars.get(var.parameterQname)
# recurse partitions, preserve overlaid var bindings and inScopeVars
overriddenVarBinding = xpCtx.varBindings.get(varQname)
xpCtx.varBindings[varQname] = vb
for evaluationResult in vb.evaluationResults:
overriddenInScopeVar = xpCtx.inScopeVars.get(varQname)
xpCtx.inScopeVars[varQname] = evaluationResult
evaluationContributedUncoveredAspects = {}
if vb.isFactVar and not vb.isFallback:
# cache uncoveredAspect facts for nested evaluations
for aspect in vb.aspectsDefined | vb.aspectsCovered: # covered aspects may not be defined e.g., test 12062 v11, undefined aspect is a complemented aspect
if uncoveredAspectFacts.get(aspect) is None:
evaluationContributedUncoveredAspects[aspect] = uncoveredAspectFacts.get(aspect,"none")
uncoveredAspectFacts[aspect] = None if vb.hasAspectValueCovered(aspect) else vb.yieldedFact
if xpCtx.formulaOptions.traceVariableFiltersResult:
xpCtx.modelXbrl.info("formula:trace",
_("%(variableType)s %(variable)s: bound value %(result)s"),
modelObject=var, variableType=vb.resourceElementName, variable=varQname, result=str(evaluationResult))
if xpCtx.isRunTimeExceeded: raise XPathContext.RunTimeExceededException()
evaluateVar(xpCtx, varSet, varIndex + 1, cachedFilteredFacts, uncoveredAspectFacts)
xpCtx.inScopeVars.pop(varQname)
if overriddenInScopeVar is not None: # restore overridden value if there was one
xpCtx.inScopeVars[varQname] = overriddenInScopeVar
for aspect, priorFact in evaluationContributedUncoveredAspects.items():
if priorFact == "none":
del uncoveredAspectFacts[aspect]
else:
uncoveredAspectFacts[aspect] = priorFact
xpCtx.varBindings.pop(varQname)
vb.close() # dereference
if overriddenVarBinding is not None:
xpCtx.varBindings[varQname] = overriddenVarBinding
def filterFacts(xpCtx, vb, facts, filterRelationships, filterType):
typeLbl = filterType + " " if filterType else ""
orFilter = filterType == "or"
groupFilter = filterType == "group"
if orFilter:
factSet = set()
for varFilterRel in filterRelationships:
_filter = varFilterRel.toModelObject
if isinstance(_filter,ModelFilter): # relationship not constrained to real filters
result = _filter.filter(xpCtx, vb, facts, varFilterRel.isComplemented)
if xpCtx.formulaOptions.traceVariableFilterWinnowing:
xpCtx.modelXbrl.info("formula:trace",
_("Fact Variable %(variable)s %(filterType)s %(filter)s filter %(xlinkLabel)s passes %(factCount)s facts"),
modelObject=vb.var, variable=vb.qname,
filterType=typeLbl, filter=_filter.localName, xlinkLabel=_filter.xlinkLabel, factCount=len(result)),
if orFilter:
factSet |= result
else:
facts = result
if not groupFilter and varFilterRel.isCovered: # block boolean group filters that have cover in subnetworks
vb.aspectsCovered |= _filter.aspectsCovered(vb)
if orFilter:
return factSet
else:
return facts
def coverAspectCoverFilterDims(xpCtx, vb, filterRelationships):
for varFilterRel in filterRelationships:
_filter = varFilterRel.toModelObject
if isinstance(_filter,ModelAspectCover): # relationship not constrained to real filters
if varFilterRel.isCovered:
vb.aspectsCovered |= _filter.dimAspectsCovered(vb)
elif isinstance(_filter,ModelBooleanFilter) and varFilterRel.isCovered:
coverAspectCoverFilterDims(xpCtx, vb, _filter.filterRelationships)
def implicitFilter(xpCtx, vb, facts, aspects, uncoveredAspectFacts):
if xpCtx.formulaOptions.traceVariableFilterWinnowing: # trace shows by aspect by bound variable match
for aspect in aspects:
if uncoveredAspectFacts.get(aspect, "none") is not None:
facts = [fact
for fact in facts
if aspectMatches(xpCtx, uncoveredAspectFacts.get(aspect), fact, aspect)]
a = str(aspect) if isinstance(aspect,QName) else Aspect.label[aspect]
xpCtx.modelXbrl.info("formula:trace",
_("Fact Variable %(variable)s implicit filter %(aspect)s passes %(factCount)s facts"),
modelObject=vb.var, variable=vb.qname, aspect=a, factCount=len(facts))
if len(facts) == 0: break
else:
testableAspectFacts = [(aspect, uncoveredAspectFacts.get(aspect))
for aspect in aspects
if uncoveredAspectFacts.get(aspect, "none") is not None]
#testableAspectFacts = [(aspect, fact)
# for aspect, fact in uncoveredAspectFacts.items()
# if not vb.hasAspectValueCovered(aspect)]
if testableAspectFacts:
# not tracing, do bulk aspect filtering
facts = [fact
for fact in facts
if all(aspectMatches(xpCtx, uncoveredAspectFact, fact, aspect)
for (aspect, uncoveredAspectFact) in testableAspectFacts)]
return facts
def aspectsMatch(xpCtx, fact1, fact2, aspects):
return all(aspectMatches(xpCtx, fact1, fact2, aspect) for aspect in aspects)
def aspectMatches(xpCtx, fact1, fact2, aspect):
if fact1 is None: # fallback (atomic) never matches any aspect
return False
if aspect == 1: # Aspect.LOCATION:
return (fact2 is not None and
fact1.modelXbrl != fact2.modelXbrl or # test deemed true for multi-instance comparisons
fact1.getparent() == fact2.getparent())
elif aspect == 2: # Aspect.CONCEPT:
return fact2 is not None and fact1.qname == fact2.qname
elif fact1.isTuple or fact2.isTuple:
return fact1.isTuple and fact2.isTuple # only match the aspects both facts have
elif aspect == 5: # Aspect.UNIT:
u1 = fact1.unit
u2 = fact2.unit if fact2 is not None else None
if u1 is not None:
return u1.isEqualTo(u2)
return u2 is None
else:
# rest of comparisons are for context
c1 = fact1.context
c2 = fact2.context if fact2 is not None else None
if c1 is None or (c2 is None and aspect != 10):
return False # something wrong, must be a context
if c1 is c2:
return True # same context
if aspect == 4: # Aspect.PERIOD:
return c1.isPeriodEqualTo(c2)
if aspect == 3: # Aspect.ENTITY_IDENTIFIER:
return c1.isEntityIdentifierEqualTo(c2)
if aspect == 6: # Aspect.COMPLETE_SEGMENT:
return XbrlUtil.nodesCorrespond(fact1.modelXbrl, c1.segment, c2.segment, dts2=fact2.modelXbrl)
elif aspect == 7: # Aspect.COMPLETE_SCENARIO:
return XbrlUtil.nodesCorrespond(fact1.modelXbrl, c1.scenario, c2.scenario, dts2=fact2.modelXbrl)
elif aspect == 8 or aspect == 9: # aspect in (Aspect.NON_XDT_SEGMENT, Aspect.NON_XDT_SCENARIO):
nXs1 = c1.nonDimValues(aspect)
nXs2 = c2.nonDimValues(aspect)
lXs1 = len(nXs1)
lXs2 = len(nXs2)
if lXs1 != lXs2:
return False
elif lXs1 > 0:
for i in range(lXs1):
if not XbrlUtil.nodesCorrespond(fact1.modelXbrl, nXs1[i], nXs2[i], dts2=fact2.modelXbrl):
return False
return True
elif aspect == 10: # Aspect.DIMENSIONS:
''' (no implicit filtering on ALL dimensions for now)
dimQnames1 = fact1.context.dimAspects
dimQnames2 = fact2.context.dimAspects
if len(dimQnames1 ^ dimQnames2): # dims not in both
matches = False
else:
for dimQname1 in dimQnames1:
if dimQname1 not in dimQnames2 or \
not aspectMatches(fact1, fact2, dimQname1):
matches = False
break
'''
elif isinstance(aspect, QName):
global ModelDimensionValue
if ModelDimensionValue is None:
from arelle.ModelInstanceObject import ModelDimensionValue
dimValue1 = c1.dimValue(aspect)
if c2 is None:
if dimValue1 is None: # neither fact nor matching facts have this dimension aspect
return True
return False
dimValue2 = c2.dimValue(aspect)
if isinstance(dimValue1, ModelDimensionValue):
if dimValue1.isExplicit:
if isinstance(dimValue2, QName):
if dimValue1.memberQname != dimValue2:
return False
elif isinstance(dimValue2, (ModelDimensionValue,DimValuePrototype)):
if dimValue2.isTyped:
return False
elif dimValue1.memberQname != dimValue2.memberQname:
return False
elif dimValue2 is None:
return False
elif dimValue1.isTyped:
if isinstance(dimValue2, QName):
return False
elif isinstance(dimValue2, (ModelDimensionValue,DimValuePrototype)):
if dimValue2.isExplicit:
return False
elif dimValue1.dimension.typedDomainElement in xpCtx.modelXbrl.modelFormulaEqualityDefinitions:
equalityDefinition = xpCtx.modelXbrl.modelFormulaEqualityDefinitions[dimValue1.dimension.typedDomainElement]
return equalityDefinition.evalTest(xpCtx, fact1, fact2)
elif not XbrlUtil.nodesCorrespond(fact1.modelXbrl, dimValue1.typedMember, dimValue2.typedMember, dts2=fact2.modelXbrl):
return False
elif dimValue2 is None:
return False
elif isinstance(dimValue1,QName): # first dim is default value of an explicit dim
if isinstance(dimValue2, QName): # second dim is default value of an explicit dim
# multi-instance does not consider member's qname here where it is a default
# only check if qnames match if the facts are from same instance
if fact1.modelXbrl == fact2.modelXbrl and dimValue1 != dimValue2:
return False
elif isinstance(dimValue2, (ModelDimensionValue,DimValuePrototype)):
if dimValue2.isTyped:
return False
elif dimValue1 != dimValue2.memberQname:
return False
elif dimValue2 is None: # no dim aspect for fact 2
if fact1.modelXbrl == fact2.modelXbrl: # only allowed for multi-instance
return False
elif dimValue1 is None:
# absent dim member from fact1 allowed if fact2 is default in different instance
if isinstance(dimValue2,QName):
if fact1.modelXbrl == fact2.modelXbrl:
return False
elif dimValue2 is not None:
return False
# else if both are None, matches True for single and multiple instance
return True
def factsPartitions(xpCtx, facts, aspects):
factsPartitions = []
for fact in facts:
matched = False
for partition in factsPartitions:
if aspectsMatch(xpCtx, fact, partition[0], aspects):
partition.append(fact)
matched = True
break
if not matched:
factsPartitions.append([fact,])
return factsPartitions
def evaluationIsUnnecessary(thisEval, otherEvalHashDicts, otherEvals):
if otherEvals:
if all(e is None for e in thisEval):
return True # evaluation not necessary, all fallen back
# hash check if any hashes merit further look for equality
otherEvalSets = [otherEvalHashDicts[i].get(hash(e), set())
for i, e in enumerate(thisEval)
if e is not None]
if otherEvalSets:
matchingEvals = [otherEvals[i] for i in set.intersection(*otherEvalSets)]
# detects evaluations which are not different (duplicate) and extra fallback evaluations
return any(all([e == matchingEval[i] for i, e in enumerate(thisEval) if e is not None])
for matchingEval in matchingEvals)
return False
'''
r = range(len(thisEval))
for otherEval in otherEvals:
if all([thisEval[i] is None or thisEval[i] == otherEval[i] for i in r]):
return True
return False
'''
def produceOutputFact(xpCtx, formula, result):
priorErrorCount = len(xpCtx.modelXbrl.errors)
isTuple = isinstance(formula,ModelTuple)
# assemble context
conceptQname = formulaAspectValue(xpCtx, formula, Aspect.CONCEPT, "xbrlfe:missingConceptRule")
if isinstance(conceptQname, VariableBindingError):
xpCtx.modelXbrl.error(conceptQname.err,
_("Formula %(label)s concept: %(concept)s"),
modelObject=formula, label=formula.logLabel(), concept=conceptQname.msg)
modelConcept = None
else:
modelConcept = xpCtx.modelXbrl.qnameConcepts[conceptQname]
if modelConcept is None or (not modelConcept.isTuple if isTuple else not modelConcept.isItem):
xpCtx.modelXbrl.error("xbrlfe:missingConceptRule",
_("Formula %(label)s concept %(concept)s is not a %(element)s"),
modelObject=formula, label=formula.logLabel(), concept=conceptQname, element=formula.localName)
outputLocation = formulaAspectValue(xpCtx, formula, Aspect.LOCATION_RULE, None)
if not isTuple:
# entity
entityIdentScheme = formulaAspectValue(xpCtx, formula, Aspect.SCHEME, "xbrlfe:missingEntityIdentifierRule")
if isinstance(entityIdentScheme, VariableBindingError):
xpCtx.modelXbrl.error(str(entityIdentScheme),
_("Formula %(label)s entity identifier scheme: %(scheme)s"),
modelObject=formula, label=formula.logLabel(), scheme=entityIdentScheme.msg)
entityIdentValue = None
else:
entityIdentValue = formulaAspectValue(xpCtx, formula, Aspect.VALUE, "xbrlfe:missingEntityIdentifierRule")
if isinstance(entityIdentValue, VariableBindingError):
xpCtx.modelXbrl.error(str(entityIdentScheme),
_("Formula %(label)s entity identifier value: %(entityIdentifier)s"),
modelObject=formula, label=formula.logLabel(), entityIdentifier=entityIdentValue.msg)
# period
periodType = formulaAspectValue(xpCtx, formula, Aspect.PERIOD_TYPE, "xbrlfe:missingPeriodRule")
periodStart = None
periodEndInstant = None
if isinstance(periodType, VariableBindingError):
xpCtx.modelXbrl.error(str(periodType),
_("Formula %(label)s period type: %(periodType)s"),
modelObject=formula, label=formula.logLabel(), periodType=periodType.msg)
elif periodType == "instant":
periodEndInstant = formulaAspectValue(xpCtx, formula, Aspect.INSTANT, "xbrlfe:missingPeriodRule")
if isinstance(periodEndInstant, VariableBindingError):
xpCtx.modelXbrl.error(str(periodEndInstant),
_("Formula %(label)s period end: %(period)s"),
modelObject=formula, label=formula.logLabel(), period=periodEndInstant.msg)
elif periodType == "duration":
periodStart = formulaAspectValue(xpCtx, formula, Aspect.START, "xbrlfe:missingPeriodRule")
if isinstance(periodStart, VariableBindingError):
xpCtx.modelXbrl.error(str(periodStart),
_("Formula %(label)s period start: %(period)s"),
modelObject=formula, label=formula.logLabel(), period=periodStart.msg)
periodEndInstant = formulaAspectValue(xpCtx, formula, Aspect.END, "xbrlfe:missingPeriodRule")
if isinstance(periodEndInstant, VariableBindingError):
xpCtx.modelXbrl.error(str(periodEndInstant),
_("Formula %(label)s period end: %(period)s"),
modelObject=formula, label=formula.logLabel(), period=periodEndInstant.msg)
# unit
if modelConcept is not None and modelConcept.isNumeric:
unitSource = formulaAspectValue(xpCtx, formula, Aspect.UNIT_MEASURES, None)
multDivBy = formulaAspectValue(xpCtx, formula, Aspect.MULTIPLY_BY, "xbrlfe:missingUnitRule")
if isinstance(multDivBy, VariableBindingError):
xpCtx.modelXbrl.error(str(multDivBy) if isinstance(multDivBy, VariableBindingError) else "xbrlfe:missingUnitRule",
_("Formula %(label)s unit: %(unit)s"),
modelObject=formula, label=formula.logLabel(), unit=multDivBy.msg)
multiplyBy = (); divideBy = () # prevent errors later if bad
else:
divMultBy = formulaAspectValue(xpCtx, formula, Aspect.DIVIDE_BY, "xbrlfe:missingUnitRule")
if isinstance(divMultBy, VariableBindingError):
xpCtx.modelXbrl.error(str(multDivBy) if isinstance(divMultBy, VariableBindingError) else "xbrlfe:missingUnitRule",
_("Formula %(label)s unit: %(unit)s"),
modelObject=formula, label=formula.logLabel(), unit=divMultBy.msg)
multiplyBy = (); divideBy = () # prevent errors later if bad
else:
multiplyBy = unitSource[0] + multDivBy[0] + divMultBy[1]
divideBy = unitSource[1] + multDivBy[1] + divMultBy[0]
# remove cancelling mult/div units
lookForCommonUnits = True
while lookForCommonUnits:
lookForCommonUnits = False
for commonUnit in multiplyBy:
if commonUnit in divideBy:
multiplyBy.remove(commonUnit)
divideBy.remove(commonUnit)
lookForCommonUnits = True
break
if len(multiplyBy) == 0: # if no units add pure
if (Aspect.MULTIPLY_BY not in formula.aspectValues and Aspect.MULTIPLY_BY not in formula.aspectProgs and
Aspect.DIVIDE_BY not in formula.aspectValues and Aspect.DIVIDE_BY not in formula.aspectProgs):
xpCtx.modelXbrl.error("xbrlfe:missingUnitRule",
_("Formula %(label)s"),
modelObject=formula, label=formula.logLabel())
multiplyBy.append(XbrlConst.qnXbrliPure)
# dimensions
segOCCs = []
scenOCCs = []
if formula.aspectModel == "dimensional":
dimAspects = {}
dimQnames = formulaAspectValue(xpCtx, formula, Aspect.DIMENSIONS, None)
if dimQnames:
for dimQname in dimQnames:
dimConcept = xpCtx.modelXbrl.qnameConcepts[dimQname]
dimErr = "xbrlfe:missing{0}DimensionRule".format("typed" if dimConcept is not None and dimConcept.isTypedDimension else "explicit")
dimValue = formulaAspectValue(xpCtx, formula, dimQname, dimErr)
if isinstance(dimValue, VariableBindingError):
xpCtx.modelXbrl.error(dimErr,
_("Formula %(label)s dimension %(dimension)s: %(value)s"),
modelObject=formula, label=formula.logLabel(),
dimension=dimQname, value=dimValue.msg)
elif dimConcept.isTypedDimension:
if isinstance(dimValue, list): # result of flatten, always a list
if len(dimValue) != 1 or not isinstance(dimValue[0], ModelObject):
xpCtx.modelXbrl.error("xbrlfe:wrongXpathResultForTypedDimensionRule",
_("Formula %(label)s dimension %(dimension)s value is not a node: %(value)s"),
modelObject=formula, label=formula.logLabel(),
dimension=dimQname, value=dimValue)
continue
dimValue = dimValue[0]
dimAspects[dimQname] = dimValue
elif dimValue is not None and xpCtx.modelXbrl.qnameDimensionDefaults.get(dimQname) != dimValue:
dimAspects[dimQname] = dimValue
segOCCs = formulaAspectValue(xpCtx, formula, Aspect.NON_XDT_SEGMENT, None)
scenOCCs = formulaAspectValue(xpCtx, formula, Aspect.NON_XDT_SCENARIO, None)
for occElt in xpCtx.flattenSequence((segOCCs, scenOCCs)):
if isinstance(occElt, ModelObject) and occElt.namespaceURI == XbrlConst.xbrldi:
xpCtx.modelXbrl.error("xbrlfe:badSubsequentOCCValue",
_("Formula %(label)s OCC element %(occ)s covers a dimensional aspect"),
modelObject=(formula,occElt), label=formula.logLabel(),
occ=occElt.elementQname)
else:
dimAspects = None # non-dimensional
segOCCs = formulaAspectValue(xpCtx, formula, Aspect.COMPLETE_SEGMENT, None)
scenOCCs = formulaAspectValue(xpCtx, formula, Aspect.COMPLETE_SCENARIO, None)
if priorErrorCount < len(xpCtx.modelXbrl.errors):
return None # had errors, don't produce output fact
# does context exist in out instance document
outputInstanceQname = formula.outputInstanceQname
outputXbrlInstance = xpCtx.inScopeVars[outputInstanceQname]
xbrlElt = outputXbrlInstance.modelDocument.xmlRootElement
# in source instance document
newFact = None
if isTuple:
newFact = outputXbrlInstance.createFact(conceptQname, parent=outputLocation,
afterSibling=xpCtx.outputLastFact.get(outputInstanceQname))
else:
# add context
prevCntx = outputXbrlInstance.matchContext(
entityIdentScheme, entityIdentValue, periodType, periodStart, periodEndInstant,
dimAspects, segOCCs, scenOCCs)
if prevCntx is not None:
cntxId = prevCntx.id
newCntxElt = prevCntx
else:
newCntxElt = outputXbrlInstance.createContext(entityIdentScheme, entityIdentValue,
periodType, periodStart, periodEndInstant, conceptQname, dimAspects, segOCCs, scenOCCs,
afterSibling=xpCtx.outputLastContext.get(outputInstanceQname),
beforeSibling=xpCtx.outputFirstFact.get(outputInstanceQname))
cntxId = newCntxElt.id
xpCtx.outputLastContext[outputInstanceQname] = newCntxElt
# does unit exist
# add unit
if modelConcept.isNumeric:
prevUnit = outputXbrlInstance.matchUnit(multiplyBy, divideBy)
if prevUnit is not None:
unitId = prevUnit.id
newUnitElt = prevUnit
else:
newUnitElt = outputXbrlInstance.createUnit(multiplyBy, divideBy,
afterSibling=xpCtx.outputLastUnit.get(outputInstanceQname),
beforeSibling=xpCtx.outputFirstFact.get(outputInstanceQname))
unitId = newUnitElt.id
xpCtx.outputLastUnit[outputInstanceQname] = newUnitElt
# add fact
attrs = [("contextRef", cntxId)]
precision = None
decimals = None
if modelConcept.isNumeric:
attrs.append(("unitRef", unitId))
value = formula.evaluate(xpCtx)
valueSeqLen = len(value)
if valueSeqLen > 1:
xpCtx.modelXbrl.error("xbrlfe:nonSingletonOutputValue",
_("Formula %(label)s value is a sequence of length %(valueSequenceLength)s"),
modelObject=formula, label=formula.logLabel(), valueSequenceLength=valueSeqLen)
else:
if valueSeqLen == 0: #xsi:nil if no value
attrs.append((XbrlConst.qnXsiNil, "true"))
v = None
else:
# add precision/decimals for non-fraction numerics
if modelConcept.isNumeric and not modelConcept.isFraction:
if formula.hasDecimals:
decimals = formula.evaluateRule(xpCtx, Aspect.DECIMALS)
attrs.append(("decimals", decimals))
else:
if formula.hasPrecision:
precision = formula.evaluateRule(xpCtx, Aspect.PRECISION)
else:
precision = 0
attrs.append(("precision", precision))
x = value[0]
if isinstance(x,float):
if (isnan(x) or
(precision and (isinf(precision) or precision == 0)) or
(decimals and isinf(decimals))):
v = xsString(xpCtx, None, x)
elif decimals is not None:
v = "%.*f" % ( int(decimals), x)
elif precision is not None and precision != 0:
a = fabs(x)
log = log10(a) if a != 0 else 0
v = "%.*f" % ( int(precision) - int(log) - (1 if a >= 1 else 0), x)
else: # no implicit precision yet
v = xsString(xpCtx, None, x)
elif isinstance(x,Decimal):
if (x.is_nan() or
(precision and (isinf(precision) or precision == 0)) or
(decimals and isinf(decimals))):
v = xsString(xpCtx, None, x)
elif decimals is not None:
v = "%.*f" % ( int(decimals), x)
elif precision is not None and precision != 0:
a = x.copy_abs()
log = a.log10() if a != 0 else 0
v = "%.*f" % ( int(precision) - int(log) - (1 if a >= 1 else 0), x)
else: # no implicit precision yet
v = xsString(xpCtx, None, x)
elif isinstance(x,QName):
v = XmlUtil.addQnameValue(xbrlElt, x)
elif isinstance(x,datetime.datetime):
v = XmlUtil.dateunionValue(x)
else:
v = xsString(xpCtx, None, x)
newFact = outputXbrlInstance.createFact(conceptQname, attributes=attrs, text=v,
parent=outputLocation,
afterSibling=xpCtx.outputLastFact.get(outputInstanceQname))
if newFact is not None:
xpCtx.outputLastFact[outputInstanceQname] = newFact
if outputInstanceQname not in xpCtx.outputFirstFact:
xpCtx.outputFirstFact[outputInstanceQname] = newFact
return newFact
def formulaAspectValue(xpCtx, formula, aspect, srcMissingErr):
ruleValue = formula.evaluateRule(xpCtx, aspect)
if ruleValue is not None:
if aspect in (Aspect.CONCEPT,
Aspect.VALUE, Aspect.SCHEME,
Aspect.PERIOD_TYPE, Aspect.START, Aspect.END, Aspect.INSTANT,
):
return ruleValue
if isinstance(aspect,QName) and ruleValue != XbrlConst.qnFormulaDimensionSAV:
return ruleValue
sourceQname = formula.source(aspect)
formulaUncovered = sourceQname == XbrlConst.qnFormulaUncovered
if aspect == Aspect.LOCATION_RULE and sourceQname is None:
return xpCtx.inScopeVars[formula.outputInstanceQname].modelDocument.xmlRootElement
elif aspect == Aspect.DIMENSIONS and formulaUncovered:
aspectSourceValue = set() # union of uncovered dimensions, all variables
elif srcMissingErr is None:
aspectSourceValue = None # important for dimensions, missing is not an error
elif formulaUncovered:
if isinstance(aspect,QName): # absent uncovered dimension is ok, just not copied to output OCC
aspectSourceValue = None
else:
aspectSourceValue = xbrlfe_undefinedSAV # other then dimensions, absent is an error
else:
aspectSourceValue = VariableBindingError(srcMissingErr,
_("neither source {0}, nor an aspect rule, were found.")
.format(sourceQname if sourceQname else ''))
for vb in xpCtx.varBindings.values():
if vb.isFactVar and not vb.isFallback:
if aspect == Aspect.DIMENSIONS and formulaUncovered:
aspectSourceValue |= vb.aspectValue(aspect)
elif formulaUncovered and vb.hasAspectValueUncovered(aspect):
aspectSourceValue = vb.aspectValue(aspect)
break
elif sourceQname == vb.qname:
if not vb.isBindAsSequence or vb.hasAspectValueUncovered(aspect):
aspectSourceValue = vb.aspectValue(aspect)
else:
aspectSourceValue = VariableBindingError("xbrlfe:sequenceSAVConflicts",
_("source, {0}, contains the QName of a fact variable that binds as a sequence where that fact's aspect rule covers this filtered aspect")
.format(sourceQname))
break
elif aspect == Aspect.LOCATION_RULE and sourceQname == vb.qname:
aspectSourceValue = vb.aspectValue(aspect)
break
# modify by any specific rules
if aspect in (Aspect.CONCEPT, Aspect.LOCATION_RULE,
Aspect.VALUE, Aspect.SCHEME,
Aspect.PERIOD_TYPE, Aspect.START, Aspect.END, Aspect.INSTANT,
) or isinstance(aspect,QName):
return aspectSourceValue
elif aspect == Aspect.UNIT_MEASURES:
augment = formula.evaluateRule(xpCtx, Aspect.AUGMENT)
if aspectSourceValue and (not augment or augment == "true"): # true is the default behavior
return aspectSourceValue
else:
return ([],[])
elif aspect in (Aspect.MULTIPLY_BY, Aspect.DIVIDE_BY):
if sourceQname and aspectSourceValue:
return aspectSourceValue
else:
return (ruleValue,[])
elif aspect == Aspect.DIMENSIONS:
if aspectSourceValue is None: aspectSourceValue = set()
if ruleValue is None: ruleValueSet = set()
else: ruleValueSet = set(ruleValue)
omitDims = formula.evaluateRule(xpCtx, Aspect.OMIT_DIMENSIONS)
if omitDims is None: omitDimsSet = set()
else: omitDimsSet = set(omitDims)
return (aspectSourceValue | ruleValueSet) - omitDimsSet
elif isinstance(aspect, QName):
return aspectSourceValue
elif aspect in (Aspect.COMPLETE_SEGMENT, Aspect.COMPLETE_SCENARIO,
Aspect.NON_XDT_SEGMENT, Aspect.NON_XDT_SCENARIO):
occFragments = []
occEmpty = ruleValue and ruleValue[0] == XbrlConst.qnFormulaOccEmpty
if not occEmpty and aspectSourceValue:
occFragments.extend(aspectSourceValue)
if ruleValue:
occFragments.extend(ruleValue[1 if occEmpty else 0:])
return occFragments
return None
def uncoveredAspectValue(xpCtx, aspect):
for vb in xpCtx.varBindings.values():
if vb.isFactVar and not vb.isFallback and vb.hasAspectValueUncovered(aspect):
return vb.aspectValue(aspect)
return None
def variableBindingIsFallback(xpCtx, variableQname):
for vb in xpCtx.varBindings.values():
if vb.qname == variableQname:
return vb.isFactVar and vb.isFallback
return False
def uncoveredVariableSetAspects(xpCtx):
aspectsDefined = set()
aspectsCovered = set()
for vb in xpCtx.varBindings.values():
if vb.isFactVar and not vb.isFallback:
aspectsCovered |= vb.aspectsCovered
aspectsDefined |= vb.aspectsDefined
return (aspectsDefined - aspectsCovered)
class VariableBindingError:
def __init__(self, err, msg=None):
self.err = err
self.msg = msg
def __repr__(self):
return self.err
xbrlfe_undefinedSAV = VariableBindingError("xbrlfe:undefinedSAV")
class VariableBinding:
def __init__(self, xpCtx, varRel=None, boundFact=None):
self.xpCtx = xpCtx
if varRel is not None:
self.qname = varRel.variableQname
self.var = varRel.toModelObject
else:
self.qname = self.var = None
self.aspectsDefined = set()
self.aspectsCovered = set()
self.isFactVar = isinstance(self.var, ModelFactVariable)
self.isGeneralVar = isinstance(self.var, ModelGeneralVariable)
self.isParameter = isinstance(self.var, ModelParameter)
self.isFormulaResult = isinstance(self.var, ModelFormula)
self.isBindAsSequence = self.var.bindAsSequence == "true" if isinstance(self.var,ModelVariable) else False
self.yieldedFact = boundFact
self.yieldedFactResult = None
self.isFallback = False
self.instances = ([inst
for qn in self.var.fromInstanceQnames
for inst in xpCtx.flattenSequence(xpCtx.inScopeVars[qn])]
if self.var is not None and self.var.fromInstanceQnames
else [xpCtx.modelXbrl])
def close(self):
self.__dict__.clear() # dereference
pass
@property
def resourceElementName(self):
if self.isFactVar: return _("Fact Variable")
elif self.isGeneralVar: return _("General Variable")
elif self.isParameter: return _("Parameter")
elif isinstance(self.var, ModelTuple): return _("Tuple")
elif isinstance(self.var, ModelFormula): return _("Formula")
elif isinstance(self.var, ModelValueAssertion): return _("ValueAssertion")
elif isinstance(self.var, ModelExistenceAssertion): return _("ExistenceAssertion")
def matchesSubPartitions(self, partition, aspects):
if self.var.matches == "true":
return [partition]
subPartitions = []
for fact in partition:
foundSubPartition = False
for subPartition in subPartitions:
matchedInSubPartition = False
for fact2 in subPartition:
if aspectsMatch(self.xpCtx, fact, fact2, aspects):
matchedInSubPartition = True
break
if not matchedInSubPartition:
subPartition.append(fact)
foundSubPartition = True
break
if not foundSubPartition:
subPartitions.append([fact,])
return subPartitions
@property
def evaluationResults(self):
includeFallback = True
if self.isFactVar:
if self.isBindAsSequence and self.facts:
for factsPartition in factsPartitions(self.xpCtx, self.facts, self.aspectsDefined - self.aspectsCovered):
for matchesSubPartition in self.matchesSubPartitions(factsPartition, self.aspectsDefined):
self.yieldedFact = matchesSubPartition[0]
self.yieldedFactContext = self.yieldedFact.context
self.yieldedEvaluation = matchesSubPartition
self.isFallback = False
includeFallback = False
yield matchesSubPartition
else:
for fact in self.facts:
self.yieldedFact = fact
self.yieldedFactContext = self.yieldedFact.context
self.yieldedEvaluation = fact
self.isFallback = False
includeFallback = False
yield fact
if self.values:
if includeFallback:
self.yieldedFact = None
self.yieldedFactContext = None
self.yieldedEvaluation = "fallback"
self.isFallback = True
yield self.values
elif self.isGeneralVar:
self.yieldedFact = None
self.yieldedFactContext = None
self.isFallback = False
if self.isBindAsSequence:
self.yieldedEvaluation = self.values
yield self.values
else:
for value in self.values:
self.yieldedEvaluation = value
yield value
elif self.isParameter:
self.yieldedFact = None
self.yieldedEvaluation = None
self.isFallback = False
yield self.parameterValue
def matchableBoundFact(self, fbVars): # return from this function has to be hashable
if (self.isFallback or self.isParameter
# remove to allow different gen var evaluations: or self.isGeneralVar
or (self.isGeneralVar and not fbVars.isdisjoint(self.var.variableRefs()))):
return None
if self.isBindAsSequence:
return tuple(self.yieldedEvaluation)
if self.isFormulaResult:
return self.yieldedFact
return self.yieldedEvaluation
def hasDimension(self, dimension):
return dimension in self.definedDimensions
def hasDimensionValueDefined(self, dimension):
return dimension in self.definedDimensions
def definedDimensions(self, dimension):
return self.yieldedFact.context.dimAspects(self.xpCtx.defaultDimensionAspects) if self.yieldedFact.isItem and self.yieldedFact.context is not None else set()
def isDimensionalValid(self, dimension):
return False
def hasAspectValueUncovered(self, aspect):
if aspect in aspectModelAspect: aspect = aspectModelAspect[aspect]
return aspect in self.aspectsDefined and aspect not in self.aspectsCovered
def hasAspectValueCovered(self, aspect):
if aspect in aspectModelAspect: aspect = aspectModelAspect[aspect]
return aspect in self.aspectsCovered
def aspectsNotCovered(self, aspects):
return set(a for a in aspects if not self.hasAspectValueCovered(a))
def hasAspectValueDefined(self, aspect):
if aspect in aspectModelAspect: aspect = aspectModelAspect[aspect]
return aspect in self.aspectsDefined
def aspectValue(self, aspect):
fact = self.yieldedFact
if fact is None:
if aspect == Aspect.DIMENSIONS:
return set()
else:
return None
if aspect == Aspect.LOCATION:
return fact.getparent()
elif aspect == Aspect.LOCATION_RULE:
return fact
elif aspect == Aspect.CONCEPT:
return fact.qname
elif fact.isTuple or fact.context is None:
return None #subsequent aspects don't exist for tuples
# context is known to be not None after here
elif aspect == Aspect.PERIOD:
return fact.context.period
elif aspect == Aspect.PERIOD_TYPE:
if fact.context.isInstantPeriod: return "instant"
elif fact.context.isStartEndPeriod: return "duration"
elif fact.context.isForeverPeriod: return "forever"
return None
elif aspect == Aspect.INSTANT:
return fact.context.instantDatetime
elif aspect == Aspect.START:
return fact.context.startDatetime
elif aspect == Aspect.END:
return fact.context.endDatetime
elif aspect == Aspect.ENTITY_IDENTIFIER:
return fact.context.entityIdentifierElement
elif aspect == Aspect.SCHEME:
return fact.context.entityIdentifier[0]
elif aspect == Aspect.VALUE:
return fact.context.entityIdentifier[1]
elif aspect in (Aspect.COMPLETE_SEGMENT, Aspect.COMPLETE_SCENARIO,
Aspect.NON_XDT_SEGMENT, Aspect.NON_XDT_SCENARIO):
return fact.context.nonDimValues(aspect)
elif aspect == Aspect.DIMENSIONS:
return fact.context.dimAspects(self.xpCtx.defaultDimensionAspects)
elif isinstance(aspect, QName):
return fact.context.dimValue(aspect)
elif fact.unit is not None:
if aspect == Aspect.UNIT:
return fact.unit
elif aspect in (Aspect.UNIT_MEASURES, Aspect.MULTIPLY_BY, Aspect.DIVIDE_BY):
return fact.unit.measures
return None
```
#### File: carelle/arelle/ValidateFiling.py
```python
import xml.dom, xml.parsers
import os, re, collections, datetime
from decimal import Decimal
from collections import defaultdict
from arelle import (ModelDocument, ModelValue, ValidateXbrl,
ModelRelationshipSet, XmlUtil, XbrlConst, UrlUtil,
ValidateFilingDimensions, ValidateFilingDTS, ValidateFilingText)
from arelle.ValidateXbrlCalcs import insignificantDigits
from arelle.XmlValidate import UNVALIDATED, VALID
from arelle.ModelObject import ModelObject
from arelle.ModelInstanceObject import ModelFact
from arelle.ModelDtsObject import ModelConcept
from arelle.PluginManager import pluginClassMethods
from arelle.UrlUtil import isHttpUrl
datePattern = None
linkroleDefinitionStatementSheet = None
class ValidateFiling(ValidateXbrl.ValidateXbrl):
def __init__(self, modelXbrl):
super(ValidateFiling, self).__init__(modelXbrl)
global datePattern, GFMcontextDatePattern, signOrCurrencyPattern, instanceFileNamePattern, linkroleDefinitionStatementSheet, efmCIKpattern
if datePattern is None:
datePattern = re.compile(r"([12][0-9]{3})-([01][0-9])-([0-3][0-9])")
GFMcontextDatePattern = re.compile(r"^[12][0-9]{3}-[01][0-9]-[0-3][0-9]$")
# note \u20zc = euro, \u00a3 = pound, \u00a5 = yen
signOrCurrencyPattern = re.compile("^(-)[0-9]+|[^eE](-)[0-9]+|(\\()[0-9].*(\\))|([$\u20ac\u00a3\00a5])")
instanceFileNamePattern = re.compile(r"^(\w+)-([12][0-9]{3}[01][0-9][0-3][0-9]).xml$")
linkroleDefinitionStatementSheet = re.compile(r"[^-]+-\s+Statement\s+-\s+.*", # no restriction to type of statement
re.IGNORECASE)
efmCIKpattern = re.compile(r"^[0-9]{10}$")
def validate(self, modelXbrl, parameters=None):
if not hasattr(modelXbrl.modelDocument, "xmlDocument"): # not parsed
return
self._isStandardUri = {}
modelXbrl.modelManager.disclosureSystem.loadStandardTaxonomiesDict()
# find typedDomainRefs before validateXBRL pass
if modelXbrl.modelManager.disclosureSystem.SBRNL:
for pluginXbrlMethod in pluginClassMethods("Validate.SBRNL.Start"):
pluginXbrlMethod(self, modelXbrl)
self.qnSbrLinkroleorder = ModelValue.qname("http://www.nltaxonomie.nl/5.0/basis/sbr/xbrl/xbrl-syntax-extension","linkroleOrder")
self.typedDomainQnames = set()
self.typedDomainElements = set()
for modelConcept in modelXbrl.qnameConcepts.values():
if modelConcept.isTypedDimension:
typedDomainElement = modelConcept.typedDomainElement
if isinstance(typedDomainElement, ModelConcept):
self.typedDomainQnames.add(typedDomainElement.qname)
self.typedDomainElements.add(typedDomainElement)
# note that some XFM tests are done by ValidateXbrl to prevent mulstiple node walks
super(ValidateFiling,self).validate(modelXbrl, parameters)
xbrlInstDoc = modelXbrl.modelDocument.xmlDocument.getroot()
disclosureSystem = self.disclosureSystem
disclosureSystemVersion = disclosureSystem.version
modelXbrl.modelManager.showStatus(_("validating {0}").format(disclosureSystem.name))
self.modelXbrl.profileActivity()
conceptsUsed = {} # key=concept object value=True if has presentation label
labelsRelationshipSet = modelXbrl.relationshipSet(XbrlConst.conceptLabel)
if self.validateSBRNL: # include generic labels in a (new) set
genLabelsRelationshipSet = modelXbrl.relationshipSet(XbrlConst.elementLabel)
presentationRelationshipSet = modelXbrl.relationshipSet(XbrlConst.parentChild)
referencesRelationshipSetWithProhibits = modelXbrl.relationshipSet(XbrlConst.conceptReference, includeProhibits=True)
self.modelXbrl.profileActivity("... cache lbl, pre, ref relationships", minTimeToShow=1.0)
validateInlineXbrlGFM = (modelXbrl.modelDocument.type == ModelDocument.Type.INLINEXBRL and
self.validateGFM)
validateEFMpragmatic = disclosureSystem.names and "efm-pragmatic" in disclosureSystem.names
self.validateLoggingSemantic = validateLoggingSemantic = (
modelXbrl.isLoggingEffectiveFor(level="WARNING-SEMANTIC") or
modelXbrl.isLoggingEffectiveFor(level="ERROR-SEMANTIC"))
if self.validateEFM:
for pluginXbrlMethod in pluginClassMethods("Validate.EFM.Start"):
pluginXbrlMethod(self)
# instance checks
self.fileNameBasePart = None # prevent testing on fileNameParts if not instance or invalid
self.fileNameDate = None
self.entityRegistrantName = None
self.requiredContext = None
self.standardNamespaceConflicts = defaultdict(set)
self.exhibitType = None # e.g., EX-101, EX-201
if modelXbrl.modelDocument.type == ModelDocument.Type.INSTANCE or \
modelXbrl.modelDocument.type == ModelDocument.Type.INLINEXBRL:
instanceName = modelXbrl.modelDocument.basename
# parameter-provided CIKs and registrant names
paramFilerIdentifier = None
paramFilerIdentifiers = None
paramFilerNames = None
submissionType = None
if self.validateEFM and self.parameters:
p = self.parameters.get(ModelValue.qname("CIK",noPrefixIsNoNamespace=True))
if p and len(p) == 2 and p[1] not in ("null", "None"):
paramFilerIdentifier = p[1]
p = self.parameters.get(ModelValue.qname("cikList",noPrefixIsNoNamespace=True))
if p and len(p) == 2:
paramFilerIdentifiers = p[1].split(",")
p = self.parameters.get(ModelValue.qname("cikNameList",noPrefixIsNoNamespace=True))
if p and len(p) == 2:
paramFilerNames = p[1].split("|Edgar|")
if paramFilerIdentifiers and len(paramFilerIdentifiers) != len(paramFilerNames):
self.modelXbrl.error(("EFM.6.05.24.parameters", "GFM.3.02.02"),
_("parameters for cikList and cikNameList different list entry counts: %(cikList)s, %(cikNameList)s"),
modelXbrl=modelXbrl, cikList=paramFilerIdentifiers, cikNameList=paramFilerNames)
p = self.parameters.get(ModelValue.qname("submissionType",noPrefixIsNoNamespace=True))
if p and len(p) == 2:
submissionType = p[1]
p = self.parameters.get(ModelValue.qname("exhibitType",noPrefixIsNoNamespace=True))
if p and len(p) == 2:
self.exhibitType = p[1]
#6.3.3 filename check
m = instanceFileNamePattern.match(modelXbrl.modelDocument.basename)
if m:
self.fileNameBasePart = m.group(1)
self.fileNameDatePart = m.group(2)
if not self.fileNameBasePart:
modelXbrl.error((self.EFM60303, "GFM.1.01.01"),
_('Invalid instance document base name part (ticker or mnemonic name) in "{base}-{yyyymmdd}.xml": %(filename)s'),
modelObject=modelXbrl.modelDocument, filename=modelXbrl.modelDocument.basename,
messageCodes=("EFM.6.03.03", "EFM.6.23.01", "GFM.1.01.01"))
else:
try:
self.fileNameDate = datetime.datetime.strptime(self.fileNameDatePart,"%Y%m%d").date()
except ValueError:
modelXbrl.error((self.EFM60303, "GFM.1.01.01"),
_('Invalid instance document base name part (date) in "{base}-{yyyymmdd}.xml": %(filename)s'),
modelObject=modelXbrl.modelDocument, filename=modelXbrl.modelDocument.basename,
messageCodes=("EFM.6.03.03", "EFM.6.23.01", "GFM.1.01.01"))
else:
modelXbrl.error((self.EFM60303, "GFM.1.01.01"),
_('Invalid instance document name, must match "{base}-{yyyymmdd}.xml": %(filename)s'),
modelObject=modelXbrl.modelDocument, filename=modelXbrl.modelDocument.basename,
messageCodes=("EFM.6.03.03", "EFM.6.23.01", "GFM.1.01.01"))
#6.5.1 scheme, 6.5.2, 6.5.3 identifier
entityIdentifierValue = None
entityIdentifierValueElt = None
if disclosureSystem.identifierValueName: # omit if no checks
for entityIdentifierElt in xbrlInstDoc.iterdescendants("{http://www.xbrl.org/2003/instance}identifier"):
if isinstance(entityIdentifierElt,ModelObject):
schemeAttr = entityIdentifierElt.get("scheme")
entityIdentifier = XmlUtil.text(entityIdentifierElt)
if not disclosureSystem.identifierSchemePattern.match(schemeAttr):
try:
contextId = entityIdentifierElt.getparent().getparent().id
except AttributeError:
contextId = "not available"
modelXbrl.error(("EFM.6.05.01", "GFM.1.02.01"),
_("Invalid entity identifier scheme %(scheme)s in context %(context)s for identifier %(identifier)s"),
modelObject=entityIdentifierElt, scheme=schemeAttr,
context=contextId, identifier=entityIdentifier)
if not disclosureSystem.identifierValuePattern.match(entityIdentifier):
modelXbrl.error(("EFM.6.05.02", "GFM.1.02.02"),
_("Invalid entity identifier %(entityIdentifierName)s: %(entityIdentifer)s"),
modelObject=entityIdentifierElt,
entityIdentifierName=disclosureSystem.identifierValueName,
entityIdentifer=entityIdentifier)
if not entityIdentifierValue:
entityIdentifierValue = entityIdentifier
entityIdentifierValueElt = entityIdentifierElt
if self.validateEFM and not efmCIKpattern.match(entityIdentifierValue):
self.modelXbrl.error("EFM.6.05.23.cikValue",
_("EntityIdentifier %(entityIdentifer)s must be 10 digits."),
modelObject=entityIdentifierElt, entityIdentifer=entityIdentifierValue)
elif entityIdentifier != entityIdentifierValue:
modelXbrl.error(("EFM.6.05.03", "GFM.1.02.03"),
_("Multiple %(entityIdentifierName)ss: %(entityIdentifer)s, %(entityIdentifer2)s"),
modelObject=(entityIdentifierElt, entityIdentifierValueElt),
entityIdentifierName=disclosureSystem.identifierValueName,
entityIdentifer=entityIdentifierValue,
entityIdentifer2=entityIdentifier,
filerIdentifier=",".join(paramFilerIdentifiers or []))
self.modelXbrl.profileActivity("... filer identifier checks", minTimeToShow=1.0)
#6.5.7 duplicated contexts
contexts = modelXbrl.contexts.values()
contextIDs = set()
uniqueContextHashes = {}
contextsWithDisallowedOCEs = []
contextsWithDisallowedOCEcontent = []
for context in contexts:
contextID = context.id
contextIDs.add(contextID)
h = context.contextDimAwareHash
if h in uniqueContextHashes:
if context.isEqualTo(uniqueContextHashes[h]):
modelXbrl.error(("EFM.6.05.07", "GFM.1.02.07"),
_("Context ID %(context)s is equivalent to context ID %(context2)s"),
modelObject=(context, uniqueContextHashes[h]), context=contextID, context2=uniqueContextHashes[h].id)
else:
uniqueContextHashes[h] = context
#GFM no time in contexts
if self.validateGFM:
for dateElt in XmlUtil.children(context, XbrlConst.xbrli, ("startDate", "endDate", "instant")):
dateText = XmlUtil.text(dateElt)
if not GFMcontextDatePattern.match(dateText):
modelXbrl.error("GFM.1.02.25",
_("Context id %(context)s %(elementName)s invalid content %(value)s"),
modelObject=dateElt, context=contextID,
elementName=dateElt.prefixedName, value=dateText)
#6.5.4 scenario
hasSegment = XmlUtil.hasChild(context, XbrlConst.xbrli, "segment")
hasScenario = XmlUtil.hasChild(context, XbrlConst.xbrli, "scenario")
notAllowed = None
if disclosureSystem.contextElement == "segment" and hasScenario:
notAllowed = _("Scenario")
elif disclosureSystem.contextElement == "scenario" and hasSegment:
notAllowed = _("Segment")
elif disclosureSystem.contextElement == "either" and hasSegment and hasScenario:
notAllowed = _("Both segment and scenario")
elif disclosureSystem.contextElement == "none" and (hasSegment or hasScenario):
notAllowed = _("Neither segment nor scenario")
if notAllowed:
if validateEFMpragmatic:
contextsWithDisallowedOCEs.append(context)
else:
modelXbrl.error(("EFM.6.05.04", "GFM.1.02.04", "SBR.NL.2.3.5.06"),
_("%(elementName)s element not allowed in context Id: %(context)s"),
modelObject=context, elementName=notAllowed, context=contextID, count=1)
#6.5.5 segment only explicit dimensions
for contextName in {"segment": ("{http://www.xbrl.org/2003/instance}segment",),
"scenario": ("{http://www.xbrl.org/2003/instance}scenario",),
"either": ("{http://www.xbrl.org/2003/instance}segment","{http://www.xbrl.org/2003/instance}scenario"),
"both": ("{http://www.xbrl.org/2003/instance}segment","{http://www.xbrl.org/2003/instance}scenario"),
"none": [], None:[]
}[disclosureSystem.contextElement]:
for segScenElt in context.iterdescendants(contextName):
if isinstance(segScenElt,ModelObject):
childTags = ", ".join([child.prefixedName for child in segScenElt.iterchildren()
if isinstance(child,ModelObject) and
child.tag != "{http://xbrl.org/2006/xbrldi}explicitMember"])
if len(childTags) > 0:
if validateEFMpragmatic:
contextsWithDisallowedOCEcontent.append(context)
else:
modelXbrl.error(("EFM.6.05.05", "GFM.1.02.05"),
_("%(elementName)s of context Id %(context)s has disallowed content: %(content)s"),
modelObject=context, context=contextID, content=childTags,
elementName=contextName.partition("}")[2].title())
#6.5.38 period forever
if context.isForeverPeriod:
self.modelXbrl.error("EFM.6.05.38",
_("Context %(contextID)s has a forever period."),
modelObject=context, contextID=contextID)
if validateEFMpragmatic: # output combined count message
if contextsWithDisallowedOCEs:
modelXbrl.error(("EFM.6.05.04", "GFM.1.02.04"),
_("%(count)s contexts contain disallowed %(elementName)s: %(context)s"),
modelObject=contextsWithDisallowedOCEs, elementName=notAllowed,
count=len(contextsWithDisallowedOCEs), context=', '.join(c.id for c in contextsWithDisallowedOCEs))
if contextsWithDisallowedOCEcontent:
modelXbrl.error(("EFM.6.05.05", "GFM.1.02.05"),
_("%(count)s contexts contain disallowed %(elementName)s content: %(context)s"),
modelObject=contextsWithDisallowedOCEcontent, elementName=disclosureSystem.contextElement,
count=len(contextsWithDisallowedOCEcontent), context=', '.join(c.id for c in contextsWithDisallowedOCEcontent))
del uniqueContextHashes, contextsWithDisallowedOCEs, contextsWithDisallowedOCEcontent
self.modelXbrl.profileActivity("... filer context checks", minTimeToShow=1.0)
#fact items from standard context (no dimension)
amendmentDescription = None
amendmentDescriptionFact = None
amendmentFlag = None
amendmentFlagFact = None
documentPeriodEndDate = None
documentPeriodEndDateFact = None
documentType = None
documentTypeFact = None
deiItems = {}
deiFacts = {}
commonSharesItemsByStockClass = defaultdict(list)
commonSharesClassMembers = None
hasDefinedStockAxis = False
hasCommonSharesOutstandingDimensionedFactWithDefaultStockClass = False
commonSharesClassUndefinedMembers = None
commonStockMeasurementDatetime = None
deiCheckLocalNames = {
"EntityRegistrantName",
"EntityCommonStockSharesOutstanding",
"EntityCurrentReportingStatus",
"EntityVoluntaryFilers",
disclosureSystem.deiCurrentFiscalYearEndDateElement,
"EntityFilerCategory",
"EntityWellKnownSeasonedIssuer",
"EntityPublicFloat",
disclosureSystem.deiDocumentFiscalYearFocusElement,
"DocumentFiscalPeriodFocus",
"EntityReportingCurrencyISOCode", # for SD
}
#6.5.8 unused contexts
for f in modelXbrl.facts:
factContextID = f.contextID
contextIDs.discard(factContextID)
context = f.context
factQname = f.qname # works for both inline and plain instances
factElementName = factQname.localName
if disclosureSystem.deiNamespacePattern is not None:
factInDeiNamespace = disclosureSystem.deiNamespacePattern.match(factQname.namespaceURI)
else:
factInDeiNamespace = None
# standard dei items from required context
if context is not None: # tests do not apply to tuples
if not context.hasSegment and not context.hasScenario:
#default context
if factInDeiNamespace:
value = f.value
if factElementName == disclosureSystem.deiAmendmentFlagElement:
amendmentFlag = value
amendmentFlagFact = f
elif factElementName == "AmendmentDescription":
amendmentDescription = value
amendmentDescriptionFact = f
elif factElementName == disclosureSystem.deiDocumentPeriodEndDateElement:
documentPeriodEndDate = value
documentPeriodEndDateFact = f
commonStockMeasurementDatetime = context.endDatetime
elif factElementName == "DocumentType":
documentType = value
documentTypeFact = f
elif factElementName == disclosureSystem.deiFilerIdentifierElement:
deiItems[factElementName] = value
if entityIdentifierValue != value:
self.modelXbrl.error(("EFM.6.05.23", "GFM.3.02.02"),
_("dei:%(elementName)s %(value)s must match the context entity identifier %(entityIdentifier)s"),
modelObject=f, elementName=disclosureSystem.deiFilerIdentifierElement,
value=value, entityIdentifier=entityIdentifierValue)
if paramFilerIdentifiers and value not in paramFilerIdentifiers:
self.modelXbrl.error(("EFM.6.05.23.submissionIdentifier", "GFM.3.02.02"),
_("dei:%(elementName)s %(value)s must match submission: %(filerIdentifier)s"),
modelObject=f, elementName=disclosureSystem.deiFilerIdentifierElement,
value=value, filerIdentifier=",".join(paramFilerIdentifiers))
elif paramFilerIdentifier and value != paramFilerIdentifier:
self.modelXbrl.error(("EFM.6.05.23.submissionIdentifier", "GFM.3.02.02"),
_("dei:%(elementName)s %(value)s must match submission: %(filerIdentifier)s"),
modelObject=f, elementName=disclosureSystem.deiFilerIdentifierElement,
value=value, filerIdentifier=paramFilerIdentifier)
elif factElementName == disclosureSystem.deiFilerNameElement:
deiItems[factElementName] = value
if paramFilerIdentifiers and paramFilerNames and entityIdentifierValue in paramFilerIdentifiers:
prefix = paramFilerNames[paramFilerIdentifiers.index(entityIdentifierValue)]
if not value.lower().startswith(prefix.lower()):
self.modelXbrl.error(("EFM.6.05.24", "GFM.3.02.02"),
_("dei:%(elementName)s %(prefix)s should be a case-insensitive prefix of: %(value)s"),
modelObject=f, elementName=disclosureSystem.deiFilerNameElement,
prefix=prefix, value=value)
elif factElementName in deiCheckLocalNames:
deiItems[factElementName] = value
deiFacts[factElementName] = f
if (self.requiredContext is None and context.isStartEndPeriod and
context.startDatetime is not None and context.endDatetime is not None):
self.requiredContext = context
else:
# segment present
isEntityCommonStockSharesOutstanding = factElementName == "EntityCommonStockSharesOutstanding"
hasClassOfStockMember = False
# note all concepts used in explicit dimensions
for dimValue in context.qnameDims.values():
if dimValue.isExplicit:
dimConcept = dimValue.dimension
memConcept = dimValue.member
for dConcept in (dimConcept, memConcept):
if dConcept is not None:
conceptsUsed[dConcept] = False
if (isEntityCommonStockSharesOutstanding and
dimConcept is not None and
dimConcept.name == "StatementClassOfStockAxis"):
commonSharesItemsByStockClass[memConcept.qname].append(f)
''' per discussion with <NAME>, remove use of LB defined members from this test
if commonSharesClassMembers is None:
commonSharesClassMembers, hasDefinedStockAxis = self.getDimMembers(dimConcept)
if not hasDefinedStockAxis: # no def LB for stock axis, note observed members
commonSharesClassMembers.add(memConcept.qname)
#following is replacement:'''
if commonSharesClassMembers is None:
commonSharesClassMembers = set()
commonSharesClassMembers.add(memConcept.qname) # only note the actually used members, not any defined members
#end of replacement
hasClassOfStockMember = True
if isEntityCommonStockSharesOutstanding and not hasClassOfStockMember:
hasCommonSharesOutstandingDimensionedFactWithDefaultStockClass = True # absent dimension, may be no def LB
if self.validateEFM: # note that this is in the "if context is not None" region
for pluginXbrlMethod in pluginClassMethods("Validate.EFM.Fact"):
pluginXbrlMethod(self, f)
#6.5.17 facts with precision
concept = f.concept
if concept is None:
modelXbrl.error(("EFM.6.04.03", "GFM.2.01.01"),
_("Fact %(fact)s of context %(contextID)s has an XBRL error"),
modelObject=f, fact=f.qname, contextID=factContextID)
else:
# note fact concpts used
conceptsUsed[concept] = False
if concept.isNumeric:
if f.precision:
modelXbrl.error(("EFM.6.05.17", "GFM.1.02.16"),
_("Numeric fact %(fact)s of context %(contextID)s has a precision attribute '%(precision)s'"),
modelObject=f, fact=f.qname, contextID=factContextID, precision=f.precision)
#6.5.25 domain items as facts
if self.validateEFM and concept.type is not None and concept.type.isDomainItemType:
modelXbrl.error("EFM.6.05.25",
_("Domain item %(fact)s in context %(contextID)s may not appear as a fact"),
modelObject=f, fact=f.qname, contextID=factContextID)
if validateInlineXbrlGFM:
if f.localName == "nonFraction" or f.localName == "fraction":
syms = signOrCurrencyPattern.findall(f.text)
if syms:
modelXbrl.error(("EFM.N/A", "GFM.1.10.18"),
'ix-numeric Fact %(fact)s of context %(contextID)s has a sign or currency symbol "%(value)s" in "%(text)s"',
modelObject=f, fact=f.qname, contextID=factContextID,
value="".join(s for t in syms for s in t), text=f.text)
self.entityRegistrantName = deiItems.get("EntityRegistrantName") # used for name check in 6.8.6
self.modelXbrl.profileActivity("... filer fact checks", minTimeToShow=1.0)
if len(contextIDs) > 0: # check if contextID is on any undefined facts
for undefinedFact in modelXbrl.undefinedFacts:
contextIDs.discard(undefinedFact.get("contextRef"))
if len(contextIDs) > 0:
modelXbrl.error(("EFM.6.05.08", "GFM.1.02.08"),
_("The instance document contained a context(s) %(contextIDs)s that was(are) not used in any fact."),
modelXbrl=modelXbrl, contextIDs=", ".join(contextIDs))
#6.5.9, .10 start-end durations
if disclosureSystem.GFM or \
disclosureSystemVersion[0] >= 27 or \
documentType in {
'20-F', '40-F', '10-Q', '10-QT', '10-K', '10-KT', '10', 'N-CSR', 'N-CSRS', 'N-Q',
'20-F/A', '40-F/A', '10-Q/A', '10-QT/A', '10-K/A', '10-KT/A', '10/A', 'N-CSR/A', 'N-CSRS/A', 'N-Q/A'}:
'''
for c1 in contexts:
if c1.isStartEndPeriod:
end1 = c1.endDatetime
start1 = c1.startDatetime
for c2 in contexts:
if c1 != c2 and c2.isStartEndPeriod:
duration = end1 - c2.startDatetime
if duration > datetime.timedelta(0) and duration <= datetime.timedelta(1):
modelXbrl.error(("EFM.6.05.09", "GFM.1.2.9"),
_("Context {0} endDate and {1} startDate have a duration of one day; that is inconsistent with document type {2}."),
c1.id, c2.id, documentType),
"err", )
if self.validateEFM and c1 != c2 and c2.isInstantPeriod:
duration = c2.endDatetime - start1
if duration > datetime.timedelta(0) and duration <= datetime.timedelta(1):
modelXbrl.error(
_("Context {0} startDate and {1} end (instant) have a duration of one day; that is inconsistent with document type {2}."),
c1.id, c2.id, documentType),
"err", "EFM.6.05.10")
'''
durationCntxStartDatetimes = defaultdict(set)
for cntx in contexts:
if cntx.isStartEndPeriod and cntx.startDatetime is not None:
durationCntxStartDatetimes[cntx.startDatetime].add(cntx)
probStartEndCntxsByEnd = defaultdict(set)
startEndCntxsByEnd = defaultdict(set)
probInstantCntxsByEnd = defaultdict(set)
probCntxs = set()
for cntx in contexts:
end = cntx.endDatetime
if end is not None:
if cntx.isStartEndPeriod:
thisStart = cntx.startDatetime
for otherStart, otherCntxs in durationCntxStartDatetimes.items():
duration = end - otherStart
if duration > datetime.timedelta(0) and duration <= datetime.timedelta(1):
if disclosureSystemVersion[0] < 27:
probCntxs |= otherCntxs - {cntx}
elif thisStart is not None and end - thisStart > datetime.timedelta(1):
for otherCntx in otherCntxs:
if otherCntx is not cntx and otherCntx.endDatetime != end and otherStart != cntx.startDatetime:
probCntxs.add(otherCntx)
if probCntxs:
probStartEndCntxsByEnd[end] |= probCntxs
startEndCntxsByEnd[end] |= {cntx}
probCntxs.clear()
if self.validateEFM and cntx.isInstantPeriod:
for otherStart, otherCntxs in durationCntxStartDatetimes.items():
duration = end - otherStart
if duration > datetime.timedelta(0) and duration <= datetime.timedelta(1):
probCntxs |= otherCntxs
if probCntxs:
probInstantCntxsByEnd[end] |= ( probCntxs | {cntx} )
probCntxs.clear()
del probCntxs
for end, probCntxs in probStartEndCntxsByEnd.items():
endCntxs = startEndCntxsByEnd[end]
modelXbrl.error(("EFM.6.05.09", "GFM.1.2.9"),
_("Context endDate %(endDate)s, and startDate(s) have a duration of one day, for end context(s): %(endContexts)s and start context(s): %(startContexts)s; that is inconsistent with document type %(documentType)s."),
modelObject=probCntxs, endDate=XmlUtil.dateunionValue(end, subtractOneDay=True),
endContexts=', '.join(sorted(c.id for c in endCntxs)),
startContexts=', '.join(sorted(c.id for c in probCntxs)),
documentType=documentType)
if disclosureSystemVersion[0] < 27:
for end, probCntxs in probInstantCntxsByEnd.items():
modelXbrl.error("EFM.6.05.10",
_("Context instant date %(endDate)s startDate has a duration of one day,with end (instant) of context(s): %(contexts)s; that is inconsistent with document type %(documentType)s."),
modelObject=probCntxs, endDate=XmlUtil.dateunionValue(end, subtractOneDay=True),
contexts=', '.join(sorted(c.id for c in probCntxs)),
documentType=documentType)
del probStartEndCntxsByEnd, startEndCntxsByEnd, probInstantCntxsByEnd
del durationCntxStartDatetimes
self.modelXbrl.profileActivity("... filer instant-duration checks", minTimeToShow=1.0)
#6.5.19 required context
foundRequiredContext = False
for c in contexts:
if c.isStartEndPeriod:
if not c.hasSegment:
foundRequiredContext = True
break
if not foundRequiredContext:
modelXbrl.error(("EFM.6.05.19", "GFM.1.02.18"),
_("Required context (no segment) not found for document type %(documentType)s."),
modelObject=documentTypeFact, documentType=documentType)
#6.5.11 equivalent units
uniqueUnitHashes = {}
for unit in self.modelXbrl.units.values():
h = unit.hash
if h in uniqueUnitHashes:
if unit.isEqualTo(uniqueUnitHashes[h]):
modelXbrl.error(("EFM.6.05.11", "GFM.1.02.10"),
_("Units %(unitID)s and %(unitID2)s are equivalent."),
modelObject=(unit, uniqueUnitHashes[h]), unitID=unit.id, unitID2=uniqueUnitHashes[h].id)
else:
uniqueUnitHashes[h] = unit
if self.validateEFM: # 6.5.38
for measureElt in unit.iterdescendants(tag="{http://www.xbrl.org/2003/instance}measure"):
if isinstance(measureElt.xValue, ModelValue.QName) and len(measureElt.xValue.localName) > 65:
l = len(measureElt.xValue.localName.encode("utf-8"))
if l > 200:
modelXbrl.error("EFM.6.05.36",
_("Unit has a measure with localName length (%(length)s) over 200 bytes long in utf-8, %(measure)s."),
modelObject=measureElt, unitID=unit.id, measure=measureElt.xValue.localName, length=l)
del uniqueUnitHashes
self.modelXbrl.profileActivity("... filer unit checks", minTimeToShow=1.0)
# EFM.6.05.14, GFM.1.02.13 xml:lang tests, as of v-17, full default lang is compared
#if self.validateEFM:
# factLangStartsWith = disclosureSystem.defaultXmlLang[:2]
#else:
# factLangStartsWith = disclosureSystem.defaultXmlLang
requiredFactLang = disclosureSystem.defaultXmlLang
#6.5.12 equivalent facts
factsForLang = {}
factForConceptContextUnitLangHash = {}
keysNotDefaultLang = {}
iF1 = 1
for f1 in modelXbrl.facts:
# build keys table for 6.5.14
if not f1.isNil:
langTestKey = "{0},{1},{2}".format(f1.qname, f1.contextID, f1.unitID)
factsForLang.setdefault(langTestKey, []).append(f1)
lang = f1.xmlLang
if lang and lang != requiredFactLang: # not lang.startswith(factLangStartsWith):
keysNotDefaultLang[langTestKey] = f1
# 6.5.37 test (insignificant digits due to rounding)
if f1.isNumeric and f1.decimals and f1.decimals != "INF" and not f1.isNil and getattr(f1,"xValid", 0) == 4:
try:
insignificance = insignificantDigits(f1.xValue, decimals=f1.decimals)
if insignificance: # if not None, returns (truncatedDigits, insiginficantDigits)
modelXbrl.error(("EFM.6.05.37", "GFM.1.02.26"),
_("Fact %(fact)s of context %(contextID)s decimals %(decimals)s value %(value)s has nonzero digits in insignificant portion %(insignificantDigits)s."),
modelObject=f1, fact=f1.qname, contextID=f1.contextID, decimals=f1.decimals,
value=f1.xValue, truncatedDigits=insignificance[0], insignificantDigits=insignificance[1])
except (ValueError,TypeError):
modelXbrl.error(("EFM.6.05.37", "GFM.1.02.26"),
_("Fact %(fact)s of context %(contextID)s decimals %(decimals)s value %(value)s causes Value Error exception."),
modelObject=f1, fact=f1.qname, contextID=f1.contextID, decimals=f1.decimals, value=f1.value)
# 6.5.12 test
h = f1.conceptContextUnitLangHash
if h in factForConceptContextUnitLangHash:
f2 = factForConceptContextUnitLangHash[h]
if f1.qname == f2.qname and \
f1.contextID == f2.contextID and \
f1.unitID == f2.unitID and \
f1.xmlLang == f2.xmlLang:
modelXbrl.error(("EFM.6.05.12", "GFM.1.02.11"),
"Facts %(fact)s of context %(contextID)s and %(contextID2)s are equivalent.",
modelObject=(f1, f2), fact=f1.qname, contextID=f1.contextID, contextID2=f2.contextID)
else:
factForConceptContextUnitLangHash[h] = f1
iF1 += 1
del factForConceptContextUnitLangHash
self.modelXbrl.profileActivity("... filer fact checks", minTimeToShow=1.0)
#6.5.14 facts without english text
for keyNotDefaultLang, factNotDefaultLang in keysNotDefaultLang.items():
anyDefaultLangFact = False
for fact in factsForLang[keyNotDefaultLang]:
if fact.xmlLang == requiredFactLang: #.startswith(factLangStartsWith):
anyDefaultLangFact = True
break
if not anyDefaultLangFact:
self.modelXbrl.error(("EFM.6.05.14", "GFM.1.02.13"),
_("Fact %(fact)s of context %(contextID)s has text of xml:lang '%(lang)s' without corresponding %(lang2)s text"),
modelObject=factNotDefaultLang, fact=factNotDefaultLang.qname, contextID=factNotDefaultLang.contextID,
lang=factNotDefaultLang.xmlLang, lang2=requiredFactLang) # factLangStartsWith)
#label validations
if not labelsRelationshipSet:
self.modelXbrl.error(("EFM.6.10.01.missingLabelLinkbase", "GFM.1.05.01"),
_("A label linkbase is required but was not found"),
modelXbrl=modelXbrl)
elif disclosureSystem.defaultXmlLang: # cannot check if no defaultXmlLang specified
for concept in conceptsUsed.keys():
self.checkConceptLabels(modelXbrl, labelsRelationshipSet, disclosureSystem, concept)
#6.5.15 facts with xml in text blocks
if self.validateEFMorGFM:
ValidateFilingText.validateTextBlockFacts(modelXbrl)
if amendmentFlag is None:
modelXbrl.log("WARNING" if validateEFMpragmatic else "ERROR",
("EFM.6.05.20.missingAmendmentFlag", "GFM.3.02.01"),
_("%(elementName)s is not found in the default context"),
modelXbrl=modelXbrl, elementName=disclosureSystem.deiAmendmentFlagElement)
if not documentPeriodEndDate:
modelXbrl.error(("EFM.6.05.20.missingDocumentPeriodEndDate", "GFM.3.02.01"),
_("%(elementName)s is required and was not found in the default context"),
modelXbrl=modelXbrl, elementName=disclosureSystem.deiDocumentPeriodEndDateElement)
else:
dateMatch = datePattern.match(documentPeriodEndDate)
if not dateMatch or dateMatch.lastindex != 3:
modelXbrl.error(("EFM.6.05.20", "GFM.3.02.01"),
_("%(elementName)s is in the default context is incorrect '%(date)s'"),
modelXbrl=modelXbrl, elementName=disclosureSystem.deiDocumentPeriodEndDateElement,
date=documentPeriodEndDate)
self.modelXbrl.profileActivity("... filer label and text checks", minTimeToShow=1.0)
if self.validateEFM:
if amendmentFlag == "true" and amendmentDescription is None:
modelXbrl.log("WARNING" if validateEFMpragmatic else "ERROR",
"EFM.6.05.20.missingAmendmentDescription",
_("AmendmentFlag is true in context %(contextID)s so AmendmentDescription is also required"),
modelObject=amendmentFlagFact, contextID=amendmentFlagFact.contextID if amendmentFlagFact is not None else "unknown")
if amendmentDescription is not None and amendmentFlag != "true":
modelXbrl.log("WARNING" if validateEFMpragmatic else "ERROR",
"EFM.6.05.20.extraneous",
_("AmendmentDescription can not be provided when AmendmentFlag is not true in context %(contextID)s"),
modelObject=amendmentDescriptionFact, contextID=amendmentDescriptionFact.contextID)
if documentType is None:
modelXbrl.error("EFM.6.05.20.missingDocumentType",
_("DocumentType is required and was not found in the default context"),
modelXbrl=modelXbrl)
elif documentType not in {
"497",
"10-12B",
"10-12B/A",
"10-12G",
"10-12G/A",
"10-K/A",
"10-KT",
"10-K",
"10-KT/A",
"10-Q/A",
"10-QT",
"10-Q",
"10-QT/A",
"20-F",
"20-F/A",
"20FR12B",
"20FR12B/A",
"20FR12G",
"20FR12G/A",
"40-F",
"40-F/A",
"40FR12B",
"40FR12B/A",
"40FR12G",
"40FR12G/A",
"485BPOS",
"6-K",
"6-K/A",
"8-K",
"8-K/A",
"8-K12B",
"8-K12B/A",
"8-K12G3",
"8-K12G3/A",
"8-K15D5",
"8-K15D5/A",
"F-1/A",
"F-10",
"F-10/A",
"F-10EF",
"F-10POS",
"F-3/A",
"F-3ASR",
"F-3D",
"F-3DPOS",
"F-4 POS",
"F-4/A",
"F-4EF",
"F-9 POS",
"F-9/A",
"F-9",
"F-9EF",
"N-1A",
"N-1A/A",
"N-CSR",
"N-CSR/A",
"N-CSRS",
"N-CSRS/A",
"N-Q",
"N-Q/A",
"F-1",
"F-6",
"POS AM",
"SD",
"SD/A",
"S-20",
"S-B",
"F-4",
"POS EX",
"F-1MEF",
"F-3MEF",
"F-4MEF",
"POS462B",
"POS462C",
"S-BMEF",
"F-3",
"Other",
"POSASR",
"S-1",
"S-1/A",
"S-11",
"S-11/A",
"S-11MEF",
"S-1MEF",
"S-3/A",
"S-3ASR",
"S-3D",
"S-3",
"S-3DPOS",
"S-3MEF",
"S-4 POS",
"S-4/A",
"S-4",
"S-4EF",
"S-4MEF",
"SD",
"SD/A",
"SP 15D2",
"SP 15D2/A"
}:
modelXbrl.error("EFM.6.05.20.documentTypeValue",
_("DocumentType '%(documentType)s' of context %(contextID)s was not recognized"),
modelObject=documentTypeFact, contextID=documentTypeFact.contextID, documentType=documentType)
elif submissionType:
expectedDocumentTypes = {
"10-12B": ("10-12B", "Other"),
"10-12B/A": ("10-12B/A", "Other"),
"10-12G": ("10-12G", "Other"),
"10-12G/A": ("10-12G/A", "Other"),
"10-K": ("10-K",),
"10-K/A": ("10-K", "10-K/A"),
"10-KT": ("10-K","10-KT","Other"),
"10-KT/A": ("10-K", "10-KT", "10-KT/A", "Other"),
"10-Q": ("10-Q",),
"10-Q/A": ("10-Q", "10-Q/A"),
"10-QT": ("10-Q", "10-QT", "Other"),
"10-QT/A": ("10-Q", "10-QT", "10-QT/A", "Other"),
"20-F": ("20-F",),
"20-F/A": ("20-F", "20-F/A"),
"20FR12B": ("20FR12B", "Other"),
"20FR12B/A": ("20FR12B/A", "Other"),
"20FR12G": ("20FR12G", "Other"),
"20FR12G/A": ("20FR12G/A", "Other"),
"40-F": ("40-F",),
"40-F/A": ("40-F", "40-F/A"),
"40FR12B": ("40FR12B", "Other"),
"40FR12B/A": ("40FR12B/A", "Other"),
"40FR12G": ("40FR12G", "Other"),
"40FR12G/A": ("40FR12G/A", "Other"),
"485BPOS": ("485BPOS",),
"497": ("497", "Other"),
"6-K": ("6-K",),
"6-K/A": ("6-K", "6-K/A"),
"8-K": ("8-K",),
"8-K/A": ("8-K", "8-K/A"),
"8-K12B": ("8-K12B", "Other"),
"8-K12B/A": ("8-K12B/A", "Other"),
"8-K12G3": ("8-K12G3", "Other"),
"8-K12G3/A": ("8-K12G3/A", "Other"),
"8-K15D5": ("8-K15D5", "Other"),
"8-K15D5/A": ("8-K15D5/A", "Other"),
"F-1": ("F-1",),
"F-1/A": ("F-1", "F-1/A"),
"F-10": ("F-10",),
"F-10/A": ("F-10", "F-10/A"),
"F-10EF": ("F-10EF", "Other"),
"F-10POS": ("F-10POS", "Other"),
"F-1MEF": ("F-1MEF",),
"F-3": ("F-3",),
"F-3/A": ("F-3", "F-3/A"),
"F-3ASR": ("F-3", "F-3ASR"),
"F-3D": ("F-3", "F-3D"),
"F-3DPOS": ("F-3", "F-3DPOS"),
"F-3MEF": ("F-3MEF",),
"F-4": ("F-4",),
"F-4 POS": ("F-4", "F-4 POS"),
"F-4/A": ("F-4", "F-4/A"),
"F-4EF": ("F-4", "F-4EF"),
"F-4MEF": ("F-4MEF",),
"F-9": ("F-9",),
"F-9 POS": ("F-9", "F-9 POS"),
"F-9/A": ("F-9", "F-9/A"),
"F-9EF": ("F-9", "F-9EF"),
"N-1A": ("N-1A",),
"N-1A/A": ("N-1A/A", "Other"),
"N-CSR": ("N-CSR",),
"N-CSR/A": ("N-CSR/A",),
"N-CSRS": ("N-CSRS",),
"N-CSRS/A": ("N-CSRS/A",),
"N-Q": ("N-Q",),
"N-Q/A": ("N-Q/A",),
"POS AM": ("F-1", "F-3", "F-4", "F-6", "Other",
"POS AM", "S-1", "S-11", "S-20", "S-3", "S-4", "S-B"),
"POS EX": ("F-3", "F-4", "Other",
"POS EX", "S-1", "S-3", "S-4"),
"POS462B": ("F-1MEF", "F-3MEF", "F-4MEF", "Other",
"POS462B", "POS462C", "S-11MEF", "S-1MEF", "S-3MEF", "S-BMEF"),
"POSASR": ("F-3", "Other", "POSASR", "S-3"),
"S-1": ("S-1",),
"S-1/A": ("S-1", "S-1/A"),
"S-11": ("S-11",),
"S-11/A": ("S-11/A",),
"S-11MEF": ("S-11MEF",),
"S-1MEF": ("S-1MEF",),
"S-3": ("S-3",),
"S-3/A": ("S-3", "S-3/A"),
"S-3ASR": ("S-3", "S-3ASR"),
"S-3D": ("S-3", "S-3D"),
"S-3DPOS": ("S-3", "S-3DPOS"),
"S-3MEF": ("S-3MEF",),
"S-4": ("S-4",),
"S-4 POS": ("S-4", "S-4 POS"),
"S-4/A": ("S-4", "S-4/A"),
"S-4EF": ("S-4", "S-4EF"),
"S-4MEF": ("S-4MEF",),
"SD": ("SD",),
"SD/A": ("SD/A",),
"SP 15D2": ("SP 15D2",),
"SP 15D2/A": ("SP 15D2/A",)
}.get(submissionType)
if expectedDocumentTypes and documentType not in expectedDocumentTypes:
modelXbrl.error("EFM.6.05.20.submissionDocumentType" if self.exhibitType != "EX-2.01" else "EFM.6.23.03",
_("DocumentType '%(documentType)s' of context %(contextID)s inapplicable to submission form %(submissionType)s"),
modelObject=documentTypeFact, contextID=documentTypeFact.contextID, documentType=documentType, submissionType=submissionType,
messageCodes=("EFM.6.05.20.submissionDocumentType", "EFM.6.23.03"))
if self.exhibitType:
if (documentType in ("SD", "SD/A")) != (self.exhibitType == "EX-2.01"):
modelXbrl.error({"EX-100":"EFM.6.23.04",
"EX-101":"EFM.6.23.04",
"EX-2.01":"EFM.6.23.05"}.get(self.exhibitType,"EX-101"),
_("The value for dei:DocumentType, %(documentType)s, is not allowed for %(exhibitType)s attachments."),
modelObject=documentTypeFact, contextID=documentTypeFact.contextID, documentType=documentType, exhibitType=self.exhibitType,
messageCodes=("EFM.6.23.04", "EFM.6.23.04", "EFM.6.23.05"))
# 6.5.21
for doctypesRequired, deiItemsRequired in (
(("10-K", "10-KT", "10-Q", "10-QT", "20-F", "40-F",
"10-K/A", "10-KT/A", "10-Q/A", "10-QT/A", "20-F/A", "40-F/A",
"6-K", "NCSR", "N-CSR", "N-CSRS", "N-Q",
"6-K/A", "NCSR/A", "N-CSR/A", "N-CSRS/A", "N-Q/A",
"10", "S-1", "S-3", "S-4", "S-11", "POS AM",
"10/A", "S-1/A", "S-3/A", "S-4/A", "S-11/A",
"8-K", "F-1", "F-3", "F-10", "497", "485BPOS",
"8-K/A", "F-1/A", "F-3/A", "F-10/A",
"Other"),
("EntityRegistrantName", "EntityCentralIndexKey")),
(("10-K", "10-KT", "20-F", "40-F",
"10-K/A", "10-KT/A", "20-F/A", "40-F/A"),
("EntityCurrentReportingStatus",)),
(("10-K", "10-KT", "10-K/A", "10-KT/A",),
("EntityVoluntaryFilers", "EntityPublicFloat")),
(("10-K", "10-KT", "10-Q", "10-QT", "20-F", "40-F",
"10-K/A", "10-KT/A", "10-Q/A", "10-QT/A", "20-F/A", "40-F/A",
"6-K", "NCSR", "N-CSR", "N-CSRS", "N-Q",
"6-K/A", "NCSR/A", "N-CSR/A", "N-CSRS/A", "N-Q/A"),
("CurrentFiscalYearEndDate", "DocumentFiscalYearFocus", "DocumentFiscalPeriodFocus")),
(("10-K", "10-KT", "10-Q", "10-QT", "20-F",
"10-K/A", "10-KT/A", "10-Q/A", "10-QT/A", "20-F/A",
"10", "S-1", "S-3", "S-4", "S-11", "POS AM",
"10/A", "S-1/A", "S-3/A", "S-4/A", "S-11/A"),
("EntityFilerCategory",)),
(("10-K", "10-KT", "20-F", "10-K/A", "10-KT/A", "20-F/A"),
("EntityWellKnownSeasonedIssuer",)),
(("SD", "SD/A"),
("EntityReportingCurrencyISOCode", ))
):
if documentType in doctypesRequired:
for deiItem in deiItemsRequired:
if deiItem not in deiItems or not deiItems[deiItem]: #must exist and value must be non-empty (incl not nil)
modelXbrl.log(("WARNING" if validateEFMpragmatic and deiItem in {
"CurrentFiscalYearEndDate", "DocumentFiscalPeriodFocus", "DocumentFiscalYearFocus",
"EntityCurrentReportingStatus", "EntityFilerCategory", "EntityPublicFloat",
"EntityVoluntaryFilers", "EntityWellKnownSeasonedIssuer"
} else "ERROR"),
("EFM.6.05.21.{0}".format(deiItem) if validateEFMpragmatic and deiItem in {
"CurrentFiscalYearEndDate", "DocumentFiscalPeriodFocus", "DocumentFiscalYearFocus",
"EntityRegistrantName", "EntityCentralIndexKey",
"EntityCurrentReportingStatus", "EntityFilerCategory", "EntityPublicFloat",
"EntityVoluntaryFilers", "EntityWellKnownSeasonedIssuer"
} else "EFM.6.23.36" if deiItem == "EntityReportingCurrencyISOCode"
else "EFM.6.05.21"),
_("dei:%(elementName)s is required for DocumentType '%(documentType)s' of context %(contextID)s"),
modelObject=documentTypeFact, contextID=documentTypeFact.contextID, documentType=documentType,
elementName=deiItem,
messageCodes=("EFM.6.05.21.CurrentFiscalYearEndDate", "EFM.6.05.21.DocumentFiscalPeriodFocus", "EFM.6.05.21.DocumentFiscalYearFocus",
"EFM.6.05.21.EntityRegistrantName", "EFM.6.05.21.EntityCentralIndexKey",
"EFM.6.05.21.EntityCurrentReportingStatus", "EFM.6.05.21.EntityFilerCategory", "EFM.6.05.21.EntityPublicFloat",
"EFM.6.05.21.EntityVoluntaryFilers", "EFM.6.05.21.EntityWellKnownSeasonedIssuer",
"EFM.6.23.36", "EFM.6.05.21"))
if documentType in {"10-K", "10-KT", "10-Q", "10-QT", "20-F", "40-F",
"10-K/A", "10-KT/A", "10-Q/A", "10-QT/A", "20-F/A", "40-F/A"}:
defaultContextSharesOutstandingValue = deiItems.get("EntityCommonStockSharesOutstanding")
errLevel = "WARNING" if validateEFMpragmatic else "ERROR"
if commonSharesClassMembers:
if defaultContextSharesOutstandingValue: # checks that it exists and is not empty or nil
modelXbrl.log(errLevel, "EFM.6.05.26",
_("dei:EntityCommonStockSharesOutstanding is required for DocumentType '%(documentType)s' but not in the default context because there are multiple classes of common shares"),
modelObject=documentTypeFact, contextID=documentTypeFact.contextID, documentType=documentType)
elif len(commonSharesClassMembers) == 1: # and not hasDefinedStockAxis:
modelXbrl.log(errLevel, "EFM.6.05.26",
_("dei:EntityCommonStockSharesOutstanding is required for DocumentType '%(documentType)s' but but a default-context because only one class of stock"),
modelObject=documentTypeFact, documentType=documentType)
''' per Dean R, this test no longer makes sense because we don't check against def LB defined members
missingClasses = commonSharesClassMembers - _DICT_SET(commonSharesItemsByStockClass.keys())
if missingClasses:
modelXbrl.log(errLevel, "EFM.6.05.26",
_("dei:EntityCommonStockSharesOutstanding is required for DocumentType '%(documentType)s' but missing in these stock classes: %(stockClasses)s"),
modelObject=documentTypeFact, documentType=documentType, stockClasses=", ".join([str(c) for c in missingClasses]))
'''
for mem, facts in commonSharesItemsByStockClass.items():
if len(facts) != 1:
modelXbrl.log(errLevel, "EFM.6.05.26",
_("dei:EntityCommonStockSharesOutstanding is required for DocumentType '%(documentType)s' but only one per stock class %(stockClass)s"),
modelObject=documentTypeFact, documentType=documentType, stockClass=mem)
''' removed per ARELLE-124 (should check measurement date vs report date)
elif facts[0].context.instantDatetime != commonStockMeasurementDatetime:
modelXbrl.log(errLevel, "EFM.6.05.26",
_("dei:EntityCommonStockSharesOutstanding is required for DocumentType '%(documentType)s' in stock class %(stockClass)s with measurement date %(date)s"),
modelObject=documentTypeFact, documentType=documentType, stockClass=mem, date=commonStockMeasurementDatetime)
'''
elif hasCommonSharesOutstandingDimensionedFactWithDefaultStockClass and not defaultContextSharesOutstandingValue:
modelXbrl.log(errLevel, "EFM.6.05.26",
_("dei:EntityCommonStockSharesOutstanding is required for DocumentType '%(documentType)s' but missing for a non-default-context fact"),
modelObject=documentTypeFact, documentType=documentType)
elif not defaultContextSharesOutstandingValue: # missing, empty, or nil
modelXbrl.log(errLevel, "EFM.6.05.26",
_("dei:EntityCommonStockSharesOutstanding is required for DocumentType '%(documentType)s' in the default context because there are not multiple classes of common shares"),
modelObject=documentTypeFact, documentType=documentType)
if documentType in ("SD", "SD/A"): # SD documentType
self.modelXbrl.profileActivity("... filer required facts checks (other than SD)", minTimeToShow=1.0)
rxdNs = None # find RXD schema
rxdDoc = None
hasRxdPre = hasRxdDef = False
for rxdLoc in disclosureSystem.familyHrefs["RXD"]:
rxdUri = rxdLoc.href
if rxdUri in modelXbrl.urlDocs:
if rxdUri.endswith(".xsd") and rxdLoc.elements == "1":
if rxdNs is None:
rxdDoc = modelXbrl.urlDocs[rxdUri]
rxdNs = rxdDoc.targetNamespace
else:
modelXbrl.error("EFM.6.23.10",
_("The DTS of must use only one version of the RXD schema"),
modelObject=(rxdDoc, modelXbrl.urlDocs[rxdUri]), instance=instanceName)
elif "/rxd-pre-" in rxdUri:
hasRxdPre = True
elif "/rxd-def-" in rxdUri:
hasRxdDef = True
if not hasRxdPre:
modelXbrl.error("EFM.6.23.08",
_("The DTS must use a standard presentation linkbase from Family RXD in edgartaxonomies.xml."),
modelObject=modelXbrl, instance=instanceName)
if not hasRxdDef:
modelXbrl.error("EFM.6.23.09",
_("The DTS must use a standard definition linkbase from Family RXD in edgartaxonomies.xml."),
modelObject=modelXbrl, instance=instanceName)
countryNs = None
deiNS = None
for url, doc in modelXbrl.urlDocs.items():
if doc.type == ModelDocument.Type.SCHEMA:
if url.startswith("http://xbrl.sec.gov/country/"):
if countryNs is None:
countryNs = doc.targetNamespace
else:
modelXbrl.error("EFM.6.23.11",
_("The DTS must use must use only one version of the COUNTRY schema."),
modelObject=(doc
for url,doc in modelXbrl.urlDocs.items()
if url.startswith("http://xbrl.sec.gov/country/")), instance=instanceName)
if disclosureSystem.deiNamespacePattern.match(doc.targetNamespace):
deiNS = doc.targetNamespace
if rxdNs:
qn = ModelValue.qname(rxdNs, "AmendmentNumber")
if amendmentFlag == "true" and (
qn not in modelXbrl.factsByQname or not any(
f.context is not None and not f.context.hasSegment
for f in modelXbrl.factsByQname[qn])):
modelXbrl.error("EFM.6.23.06",
_("The value for dei:DocumentType, %(documentType)s, requires a value for rxd:AmendmentNumber in the Required Context."),
modelObject=modelXbrl, documentType=documentType)
else:
modelXbrl.error("EFM.6.23.07",
_("The DTS must use a standard schema from Family RXD in edgartaxonomies.xml."),
modelObject=modelXbrl, instance=instanceName)
class Rxd(): # fake class of rxd qnames based on discovered rxd namespace
def __init__(self):
for name in ("CountryAxis", "GovernmentAxis", "PaymentTypeAxis", "ProjectAxis","PmtAxis",
"AllGovernmentsMember", "AllProjectsMember","BusinessSegmentAxis", "EntityDomain",
"A", "Cm", "Co", "Cu", "D", "Gv", "E", "K", "Km", "P", "Payments", "Pr", "Sm"):
setattr(self, name, ModelValue.qname(rxdNs, "rxd:" + name))
rxd = Rxd()
f1 = deiFacts.get(disclosureSystem.deiCurrentFiscalYearEndDateElement)
if f1 is not None and documentPeriodEndDateFact is not None and f1.xValid and documentPeriodEndDateFact.xValid:
d = ModelValue.dateunionDate(documentPeriodEndDateFact.xValue)# is an end date, convert back to a start date without midnight part
if f1.xValue.month != d.month or f1.xValue.day != d.day:
modelXbrl.error("EFM.6.23.26",
_("The dei:CurrentFiscalYearEndDate, %(fyEndDate)s does not match the dei:DocumentReportingPeriod %(reportingPeriod)s"),
modelObject=(f1,documentPeriodEndDateFact), fyEndDate=f1.value, reportingPeriod=documentPeriodEndDateFact.value)
if (documentPeriodEndDateFact is not None and documentPeriodEndDateFact.xValid and
not any(f2.xValue == documentPeriodEndDateFact.xValue
for f2 in modelXbrl.factsByQname[rxd.D]
if f2.xValid)):
modelXbrl.error("EFM.6.23.27",
_("The dei:DocumentPeriodEndDate %(reportingPeriod)s has no corresponding rxd:D fact."),
modelObject=documentPeriodEndDateFact, reportingPeriod=documentPeriodEndDateFact.value)
for url,doc in modelXbrl.urlDocs.items():
if (url not in disclosureSystem.standardTaxonomiesDict and
doc.type == ModelDocument.Type.SCHEMA):
for concept in XmlUtil.children(doc.xmlRootElement, XbrlConst.xsd, "element"):
name = concept.name
if not concept.isAbstract and not concept.isTextBlock:
modelXbrl.error("EFM.6.23.12",
_("Extension concept %(concept)s is non-abstract and not a Text Block."),
modelObject=concept, schemaName=doc.basename, name=concept.name, concept=concept.qname)
elif name.endswith("Table") or name.endswith("Axis") or name.endswith("Domain"):
modelXbrl.error("EFM.6.23.13",
_("Extension concept %(concept)s is not allowed in an extension schema."),
modelObject=concept, schemaName=doc.basename, name=concept.name, concept=concept.qname)
self.modelXbrl.profileActivity("... SD checks 6-13, 26-27", minTimeToShow=1.0)
dimDefRelSet = modelXbrl.relationshipSet(XbrlConst.dimensionDefault)
dimDomRelSet = modelXbrl.relationshipSet(XbrlConst.dimensionDomain)
hypDimRelSet = modelXbrl.relationshipSet(XbrlConst.hypercubeDimension)
hasHypRelSet = modelXbrl.relationshipSet(XbrlConst.all)
for rel in dimDomRelSet.modelRelationships:
if (isinstance(rel.fromModelObject, ModelConcept) and isinstance(rel.toModelObject, ModelConcept) and
not dimDefRelSet.isRelated(rel.fromModelObject, "child", rel.toModelObject)):
modelXbrl.error("EFM.6.23.14",
_("The target of the dimension-domain relationship in role %(linkrole)s from %(source)s to %(target)s must be the default member of %(source)s."),
modelObject=(rel, rel.fromModelObject, rel.toModelObject),
linkbaseName=rel.modelDocument.basename, linkrole=rel.linkrole,
source=rel.fromModelObject.qname, target=rel.toModelObject.qname)
domMemRelSet = modelXbrl.relationshipSet(XbrlConst.domainMember)
memDim = {}
def checkMemMultDims(memRel, dimRel, elt, ELR, visited):
if elt not in visited:
visited.add(elt)
for rel in domMemRelSet.toModelObject(elt):
if rel.consecutiveLinkrole == ELR and isinstance(rel.fromModelObject, ModelConcept):
checkMemMultDims(memRel, None, rel.fromModelObject, rel.linkrole, visited)
for rel in dimDomRelSet.toModelObject(elt):
if rel.consecutiveLinkrole == ELR:
dim = rel.fromModelObject
mem = memRel.toModelObject
if isinstance(dim, ModelConcept) and isinstance(mem, ModelConcept):
if dim.qname == rxd.PaymentTypeAxis and not mem.modelDocument.targetNamespace.startswith("http://xbrl.sec.gov/rxd/"):
modelXbrl.error("EFM.6.23.17",
_("The member %(member)s in dimension rxd:PaymentTypeAxis in linkrole %(linkrole)s must be a QName with namespace that begins with \"http://xbrl.sec.gov/rxd/\". "),
modelObject=(rel, memRel, dim, mem), member=mem.qname, linkrole=rel.linkrole)
if dim.qname == rxd.CountryAxis and not mem.modelDocument.targetNamespace.startswith("http://xbrl.sec.gov/country/"):
modelXbrl.error("EFM.6.23.18",
_("The member %(member)s in dimension rxd:CountryAxis in linkrole %(linkrole)s must be a QName with namespace that begins with \"http://xbrl.sec.gov/country//\". "),
modelObject=(rel, memRel, dim, mem), member=mem.qname, linkrole=rel.linkrole)
checkMemMultDims(memRel, rel, rel.fromModelObject, rel.linkrole, visited)
for rel in hypDimRelSet.toModelObject(elt):
if rel.consecutiveLinkrole == ELR and isinstance(rel.fromModelObject, ModelConcept):
checkMemMultDims(memRel, dimRel, rel.fromModelObject, rel.linkrole, visited)
for rel in hasHypRelSet.toModelObject(elt):
if rel.consecutiveLinkrole == ELR and isinstance(rel.fromModelObject, ModelConcept):
linkrole = rel.linkrole
mem = memRel.toModelObject
if (mem,linkrole) not in memDim:
memDim[mem,linkrole] = (dimRel, memRel)
else:
otherDimRel, otherMemRel = memDim[mem,linkrole]
modelXbrl.error("EFM.6.23.16",
_("The member %(member)s has two dimensions, %(dimension1)s in linkrole %(linkrole1)s and %(dimension2)s in linkrole %(linkrole2)s. "),
modelObject=(dimRel, otherDimRel, memRel, otherMemRel, dimRel.fromModelObject, otherDimRel.fromModelObject),
member=mem.qname, dimension1=dimRel.fromModelObject.qname, linkrole1=linkrole,
dimension2=otherDimRel.fromModelObject.qname, linkrole2=otherDimRel.linkrole)
visited.discard(elt)
for rel in domMemRelSet.modelRelationships:
if isinstance(rel.fromModelObject, ModelConcept) and isinstance(rel.toModelObject, ModelConcept):
for rel2 in modelXbrl.relationshipSet(XbrlConst.domainMember, rel.consecutiveLinkrole).fromModelObject(rel.toModelObject):
if rel2.fromModelObject is not None and rel2.toModelObject is not None:
modelXbrl.error("EFM.6.23.15",
_("The domain-member relationship in %(linkrole)s from %(source)s to %(target)s is consecutive with domain-member relationship in %(linkrole2)s to %(target2)s. "),
modelObject=(rel, rel.fromModelObject, rel.toModelObject),
linkrole=rel.linkrole, linkrole2=rel2.linkrole,
source=rel.fromModelObject.qname, target=rel.toModelObject.qname, target2=rel2.toModelObject.qname)
checkMemMultDims(rel, None, rel.fromModelObject, rel.linkrole, set())
self.modelXbrl.profileActivity("... SD checks 14-18", minTimeToShow=1.0)
qnDeiEntityDomain = ModelValue.qname(deiNS, "dei:EntityDomain")
for relSet, dom, priItem, errCode in ((domMemRelSet, rxd.AllProjectsMember, rxd.Pr, "EFM.6.23.30"),
(domMemRelSet, rxd.AllGovernmentsMember, rxd.Gv, "EFM.6.23.31"),
(dimDomRelSet, rxd.BusinessSegmentAxis, rxd.Sm, "EFM.6.23.33"),
(domMemRelSet, qnDeiEntityDomain, rxd.E, "EFM.6.23.34")):
for f in modelXbrl.factsByQname[priItem]:
if (not f.isNil and f.xValid and
not relSet.isRelated(dom, "descendant", f.xValue, isDRS=True)):
modelXbrl.error(errCode,
_("The %(fact)s %(value)s in context %(context)s is not a %(domain)s."),
modelObject=f, fact=priItem, value=f.xValue, context=f.context.id, domain=dom,
messageCodes=("EFM.6.23.30", "EFM.6.23.31", "EFM.6.23.33", "EFM.6.23.34"))
self.modelXbrl.profileActivity("... SD checks 30, 31, 33, 34", minTimeToShow=1.0)
cntxEqualFacts = defaultdict(list)
for f in modelXbrl.facts:
if f.context is not None:
cntxEqualFacts[f.context.contextDimAwareHash].append(f)
self.modelXbrl.profileActivity("... SD prepare facts by context", minTimeToShow=1.0)
qnCurrencyMeasure = XbrlConst.qnIsoCurrency(deiItems.get("EntityReportingCurrencyISOCode"))
currencyMeasures = ([qnCurrencyMeasure],[])
qnAllCountriesDomain = ModelValue.qname(countryNs, "country:AllCountriesDomain")
for cntxFacts in cntxEqualFacts.values():
qnameFacts = dict((f.qname,f) for f in cntxFacts)
context = cntxFacts[0].context
contextDims = cntxFacts[0].context.qnameDims
# required priItem values based on context dimension
for dim, priItem, errCode in ((rxd.PmtAxis, rxd.P, "EFM.6.23.20"),
(rxd.GovernmentAxis, rxd.Payments, "EFM.6.23.22")):
if context.hasDimension(dim) and (priItem not in qnameFacts or qnameFacts[priItem].isNil):
modelXbrl.error(errCode,
_("The Context %(context)s has dimension %(dimension)s member %(member)s but is missing required fact %(fact)s"),
modelObject=context, context=context.id, dimension=dim, member=context.dimMemberQname(dim), fact=priItem,
messageCodes=("EFM.6.23.20", "EFM.6.23.22"))
if (rxd.Co in qnameFacts and not qnameFacts[rxd.Co].isNil and
not domMemRelSet.isRelated(qnAllCountriesDomain, "descendant", qnameFacts[rxd.Co].xValue, isDRS=True)):
modelXbrl.error("EFM.6.23.44",
_("Fact rxd:Co value %(value)s in context %(context)s is not in the domain of country:AllCountriesDomain"),
modelObject=f, context=context.id, value=qnameFacts[rxd.Co].value)
# required present facts based on other present fact
for qnF, fNilOk, qnG, gNilOk, errCode in ((rxd.A, True, rxd.Cu, False, "EFM.6.23.24"),
(rxd.A, True, rxd.D, False, "EFM.6.23.25"),
(rxd.A, False, rxd.Gv, False, "EFM.6.23.28"),
(rxd.A, False, rxd.Co, False, "EFM.6.23.29"),
(rxd.Km, False, rxd.K, False, "EFM.6.23.35"),
(rxd.K, False, rxd.Km, False, "EFM.6.23.35"),
(rxd.Cm, False, rxd.Cu, False, "EFM.6.23.39"),
(rxd.K, False, rxd.A, False, "EFM.6.23.42"),
(rxd.Pr, False, rxd.A, False, "EFM.6.23.43")):
if (qnF in qnameFacts and (fNilOk or not qnameFacts[qnF].isNil) and
(qnG not in qnameFacts or (not gNilOk and qnameFacts[qnG].isNil))):
modelXbrl.error(errCode,
_("The Context %(context)s has a %(fact1)s and is missing required %(fact2NotNil)sfact %(fact2)s"),
modelObject=qnameFacts[qnF], context=context.id, fact1=qnF, fact2=qnG, fact2NotNil="" if gNilOk else "non-nil ",
messageCodes=("EFM.6.23.24", "EFM.6.23.25", "EFM.6.23.28", "EFM.6.23.29", "EFM.6.23.35",
"EFM.6.23.35", "EFM.6.23.39", "EFM.6.23.42", "EFM.6.23.43"))
for f in cntxFacts:
if (not context.hasDimension(rxd.PmtAxis) and f.isNumeric and
f.unit is not None and f.unit.measures != currencyMeasures):
modelXbrl.error("EFM.6.23.37",
_("Fact %(fact)s in context %(context)s has unit %(unit)s not matching dei:EntityReportingCurrencyISOCode %(currency)s"),
modelObject=f, fact=f.qname, context=context.id, unit=f.unit.value, currency=qnCurrencyMeasure)
if (rxd.A in qnameFacts and not qnameFacts[rxd.A].isNil and
rxd.Cm in qnameFacts and not qnameFacts[rxd.Cm].isNil and
qnameFacts[rxd.A].unit is not None and qnameFacts[rxd.A].unit.measures == currencyMeasures):
modelXbrl.error("EFM.6.23.38",
_("A value cannot be given for rxd:Cm in context %(context)s because the payment is in the reporting currency %(currency)s."),
modelObject=(qnameFacts[rxd.A],qnameFacts[rxd.Cm]), context=context.id, currency=qnCurrencyMeasure)
if (rxd.A in qnameFacts and
rxd.Cu in qnameFacts and not qnameFacts[rxd.Cu].isNil and
qnameFacts[rxd.A].unit is not None and qnameFacts[rxd.A].unit.measures != ([XbrlConst.qnIsoCurrency(qnameFacts[rxd.Cu].xValue)],[])):
modelXbrl.error("EFM.6.23.41",
_("The unit %(unit)s of rxd:A in context %(context)s is not consistent with the value %(currency)s of rxd:Cu."),
modelObject=(qnameFacts[rxd.A],qnameFacts[rxd.Cu]), context=context.id, unit=qnameFacts[rxd.A].unit.value, currency=qnameFacts[rxd.Cu].value)
if (context.hasDimension(rxd.ProjectAxis) and
not any(f.xValue == m
for m in (contextDims[rxd.ProjectAxis].memberQname,)
for f in modelXbrl.factsByQname[rxd.Pr]
if f.context is not None)):
modelXbrl.error("EFM.6.23.19",
_("The Context %(context)s has dimension %(dimension)s but is missing any payment."),
modelObject=context, context=context.id, dimension=rxd.GovernmentAxis)
if (context.hasDimension(rxd.GovernmentAxis) and
not any(f.xValue == m and f.context.hasDimension(rxd.PmtAxis)
for m in (contextDims[rxd.GovernmentAxis].memberQname,)
for f in modelXbrl.factsByQname[rxd.Gv]
if f.context is not None)):
modelXbrl.error("EFM.6.23.21",
_("The Context %(context)s has dimension %(dimension)s member %(member)s but is missing any payment."),
modelObject=context, context=context.id, dimension=rxd.GovernmentAxis, member=context.dimMemberQname(rxd.GovernmentAxis))
if rxd.P in qnameFacts and not any(f.context is not None and not f.context.hasSegment
for f in modelXbrl.factsByQname.get(qnameFacts[rxd.P].xValue,())):
modelXbrl.error("EFM.6.23.23",
_("The Context %(context)s has payment type %(paymentType)s but is missing a corresponding fact in the required context."),
modelObject=context, context=context.id, paymentType=qnameFacts[rxd.P].xValue)
if not context.hasDimension(rxd.PmtAxis) and rxd.A in qnameFacts and not qnameFacts[rxd.A].isNil:
modelXbrl.error("EFM.6.23.40",
_("There is a non-nil rxd:A in context %(context)s but missing a dimension rxd:PmtAxis."),
modelObject=(context, qnameFacts[rxd.A]), context=context.id)
self.modelXbrl.profileActivity("... SD by context for 19-25, 28-29, 35, 37-39, 40-44", minTimeToShow=1.0)
for f in modelXbrl.factsByQname[rxd.D]:
if not f.isNil and f.xValid and f.xValue + datetime.timedelta(1) != f.context.endDatetime: # date needs to be midnite to compare to datetime
modelXbrl.error("EFM.6.23.32",
_("The rxd:D %(value)s in context %(context)s does not match the context end date %(endDate)s."),
modelObject=f, value=f.xValue, context=f.context.id, endDate=XmlUtil.dateunionValue(f.context.endDatetime, subtractOneDay=True))
self.modelXbrl.profileActivity("... SD checks 32 (last SD check)", minTimeToShow=1.0)
# deference object references no longer needed
del rxdDoc, cntxEqualFacts
# dereference compatibly with 2.7 (as these may be used in nested contexts above
hasHypRelSet = hypDimRelSet = dimDefRelSet = domMemRelSet = dimDomRelSet = None
memDim.clear()
else: # non-SD documentType
pass # no non=SD tests yet
elif disclosureSystem.GFM:
for deiItem in (
disclosureSystem.deiCurrentFiscalYearEndDateElement,
disclosureSystem.deiDocumentFiscalYearFocusElement,
disclosureSystem.deiFilerNameElement):
if deiItem not in deiItems or deiItems[deiItem] == "":
modelXbrl.error("GFM.3.02.01",
_("dei:%(elementName)s is required in the default context"),
modelXbrl=modelXbrl, elementName=deiItem)
if documentType not in ("SD", "SD/A"):
self.modelXbrl.profileActivity("... filer required facts checks", minTimeToShow=1.0)
#6.5.27 footnote elements, etc
footnoteLinkNbr = 0
for footnoteLinkElt in xbrlInstDoc.iterdescendants(tag="{http://www.xbrl.org/2003/linkbase}footnoteLink"):
if isinstance(footnoteLinkElt,ModelObject):
footnoteLinkNbr += 1
linkrole = footnoteLinkElt.get("{http://www.w3.org/1999/xlink}role")
if linkrole != XbrlConst.defaultLinkRole:
modelXbrl.error(("EFM.6.05.28.linkrole", "GFM.1.02.20"),
_("FootnoteLink %(footnoteLinkNumber)s has disallowed role %(linkrole)s"),
modelObject=footnoteLinkElt, footnoteLinkNumber=footnoteLinkNbr, linkrole=linkrole)
# find modelLink of this footnoteLink
# modelLink = modelXbrl.baseSetModelLink(footnoteLinkElt)
relationshipSet = modelXbrl.relationshipSet("XBRL-footnotes", linkrole)
#if (modelLink is None) or (not relationshipSet):
# continue # had no child elements to parse
locNbr = 0
arcNbr = 0
for child in footnoteLinkElt:
if isinstance(child,ModelObject):
xlinkType = child.get("{http://www.w3.org/1999/xlink}type")
if child.namespaceURI != XbrlConst.link or \
xlinkType not in ("locator", "resource", "arc") or \
child.localName not in ("loc", "footnote", "footnoteArc"):
modelXbrl.error(("EFM.6.05.27", "GFM.1.02.19"),
_("FootnoteLink %(footnoteLinkNumber)s has disallowed child element %(elementName)s"),
modelObject=child, footnoteLinkNumber=footnoteLinkNbr, elementName=child.prefixedName)
elif xlinkType == "locator":
locNbr += 1
locrole = child.get("{http://www.w3.org/1999/xlink}role")
if locrole is not None and (disclosureSystem.GFM or \
not disclosureSystem.uriAuthorityValid(locrole)):
modelXbrl.error(("EFM.6.05.29", "GFM.1.02.21"),
_("FootnoteLink %(footnoteLinkNumber)s loc %(locNumber)s has disallowed role %(role)s"),
modelObject=child, footnoteLinkNumber=footnoteLinkNbr,
xlinkLabel=child.xlinkLabel,
locNumber=locNbr, role=locrole)
href = child.get("{http://www.w3.org/1999/xlink}href")
if not href.startswith("#"):
modelXbrl.error(("EFM.6.05.32", "GFM.1.02.23"),
_("FootnoteLink %(footnoteLinkNumber)s loc %(locNumber)s has disallowed href %(locHref)s"),
modelObject=child, footnoteLinkNumber=footnoteLinkNbr, locNumber=locNbr, locHref=href,
locLabel=child.get("{http://www.w3.org/1999/xlink}label"))
else:
label = child.get("{http://www.w3.org/1999/xlink}label")
elif xlinkType == "arc":
arcNbr += 1
arcrole = child.get("{http://www.w3.org/1999/xlink}arcrole")
if (self.validateEFM and not disclosureSystem.uriAuthorityValid(arcrole)) or \
(disclosureSystem.GFM and arcrole != XbrlConst.factFootnote and arcrole != XbrlConst.factExplanatoryFact):
modelXbrl.error(("EFM.6.05.30", "GFM.1.02.22"),
_("FootnoteLink %(footnoteLinkNumber)s arc %(arcNumber)s has disallowed arcrole %(arcrole)s"),
modelObject=child, footnoteLinkNumber=footnoteLinkNbr, arcNumber=arcNbr,
arcToLabel=child.get("{http://www.w3.org/1999/xlink}to"),
arcrole=arcrole)
elif xlinkType == "resource": # footnote
footnoterole = child.get("{http://www.w3.org/1999/xlink}role")
if footnoterole == "":
modelXbrl.error(("EFM.6.05.28.missingRole", "GFM.1.2.20"),
_("Footnote %(xlinkLabel)s is missing a role"),
modelObject=child, xlinkLabel=child.get("{http://www.w3.org/1999/xlink}label"))
elif (self.validateEFM and not disclosureSystem.uriAuthorityValid(footnoterole)) or \
(disclosureSystem.GFM and footnoterole != XbrlConst.footnote):
modelXbrl.error(("EFM.6.05.28", "GFM.1.2.20"),
_("Footnote %(xlinkLabel)s has disallowed role %(role)s"),
modelObject=child, xlinkLabel=child.get("{http://www.w3.org/1999/xlink}label"),
role=footnoterole)
if self.validateEFM:
ValidateFilingText.validateFootnote(modelXbrl, child)
# find modelResource for this element
foundFact = False
if XmlUtil.text(child) != "":
if relationshipSet:
for relationship in relationshipSet.toModelObject(child):
if isinstance(relationship.fromModelObject, ModelFact):
foundFact = True
break
if not foundFact:
modelXbrl.error(("EFM.6.05.33", "GFM.1.02.24"),
_("FootnoteLink %(footnoteLinkNumber)s footnote %(footnoteLabel)s has no linked fact"),
modelObject=child, footnoteLinkNumber=footnoteLinkNbr,
footnoteLabel=child.get("{http://www.w3.org/1999/xlink}label"),
text=XmlUtil.text(child)[:100])
self.modelXbrl.profileActivity("... filer rfootnotes checks", minTimeToShow=1.0)
# entry point schema checks
elif modelXbrl.modelDocument.type == ModelDocument.Type.SCHEMA:
if self.validateSBRNL:
# entry must have a P-link
if not any(hrefElt.localName == "linkbaseRef" and hrefElt.get("{http://www.w3.org/1999/xlink}role") == "http://www.xbrl.org/2003/role/presentationLinkbaseRef"
for hrefElt, hrefDoc, hrefId in modelXbrl.modelDocument.hrefObjects):
modelXbrl.error("SBR.NL.2.2.10.01",
'Entrypoint schema must have a presentation linkbase', modelObject=modelXbrl.modelDocument)
# all-labels and references checks
defaultLangStandardLabels = {}
for concept in modelXbrl.qnameConcepts.values():
conceptHasDefaultLangStandardLabel = False
for modelLabelRel in labelsRelationshipSet.fromModelObject(concept):
modelLabel = modelLabelRel.toModelObject
role = modelLabel.role
text = modelLabel.text
lang = modelLabel.xmlLang
if role == XbrlConst.documentationLabel:
if concept.modelDocument.targetNamespace in disclosureSystem.standardTaxonomiesDict:
modelXbrl.error(("EFM.6.10.05", "GFM.1.05.05", "SBR.NL.2.1.0.08"),
_("Concept %(concept)s of a standard taxonomy cannot have a documentation label: %(text)s"),
modelObject=modelLabel, concept=concept.qname, text=text)
elif text and lang and disclosureSystem.defaultXmlLang and lang.startswith(disclosureSystem.defaultXmlLang):
if role == XbrlConst.standardLabel:
if text in defaultLangStandardLabels:
concept2, modelLabel2 = defaultLangStandardLabels[text]
modelXbrl.error(("EFM.6.10.04", "GFM.1.05.04"),
_("Same labels for concepts %(concept)s and %(concept2)s for %(lang)s standard role: %(text)s."),
modelObject=(concept, modelLabel, concept2, modelLabel2),
concept=concept.qname,
concept2=concept2.qname,
lang=disclosureSystem.defaultLanguage, text=text[:80])
else:
defaultLangStandardLabels[text] = (concept, modelLabel)
conceptHasDefaultLangStandardLabel = True
if len(text) > 511:
modelXbrl.error(("EFM.6.10.06", "GFM.1.05.06"),
_("Label for concept %(concept)s role %(role)s length %(length)s must be shorter than 511 characters: %(text)s"),
modelObject=modelLabel, concept=concept.qname, role=role, length=len(text), text=text[:80])
match = modelXbrl.modelManager.disclosureSystem.labelCheckPattern.search(text)
if match:
modelXbrl.error(("EFM.6.10.06", "GFM.1.05.07", "SBR.NL.2.3.8.07"),
'Label for concept %(concept)s role %(role)s has disallowed characters: "%(text)s"',
modelObject=modelLabel, concept=concept.qname, role=role, text=match.group())
if (text is not None and len(text) > 0 and
modelXbrl.modelManager.disclosureSystem.labelTrimPattern and
(modelXbrl.modelManager.disclosureSystem.labelTrimPattern.match(text[0]) or \
modelXbrl.modelManager.disclosureSystem.labelTrimPattern.match(text[-1]))):
modelXbrl.error(("EFM.6.10.08", "GFM.1.05.08"),
_("Label for concept %(concept)s role %(role)s lang %(lang)s is not trimmed: %(text)s"),
modelObject=modelLabel, concept=concept.qname, role=role, lang=lang, text=text)
for modelRefRel in referencesRelationshipSetWithProhibits.fromModelObject(concept):
modelReference = modelRefRel.toModelObject
text = XmlUtil.innerText(modelReference)
#6.18.1 no reference to company extension concepts
if concept.modelDocument.targetNamespace not in disclosureSystem.standardTaxonomiesDict:
modelXbrl.error(("EFM.6.18.01", "GFM.1.9.1"),
_("References for extension concept %(concept)s are not allowed: %(text)s"),
modelObject=modelReference, concept=concept.qname, text=text, xml=XmlUtil.xmlstring(modelReference, stripXmlns=True, contentsOnly=True))
elif (self.validateEFM or self.validateSBRNL) and not self.isStandardUri(modelRefRel.modelDocument.uri):
#6.18.2 no extension to add or remove references to standard concepts
modelXbrl.error(("EFM.6.18.02", "SBR.NL.2.1.0.08"),
_("References for standard taxonomy concept %(concept)s are not allowed in an extension linkbase: %(text)s"),
modelObject=modelReference, concept=concept.qname, text=text, xml=XmlUtil.xmlstring(modelReference, stripXmlns=True, contentsOnly=True))
if self.validateSBRNL and (concept.isItem or concept.isTuple):
if concept.modelDocument.targetNamespace not in disclosureSystem.standardTaxonomiesDict:
if not conceptHasDefaultLangStandardLabel:
modelXbrl.error("SBR.NL.2.2.2.26",
_("Concept %(concept)s missing standard label in local language."),
modelObject=concept, concept=concept.qname)
subsGroup = concept.get("substitutionGroup")
if ((not concept.isAbstract or subsGroup == "sbr:presentationItem") and
not (presentationRelationshipSet.toModelObject(concept) or
presentationRelationshipSet.fromModelObject(concept))):
modelXbrl.error("SBR.NL.2.2.2.04",
_("Concept %(concept)s not referred to by presentation relationship."),
modelObject=concept, concept=concept.qname)
elif ((concept.isDimensionItem or
(subsGroup and (subsGroup.endswith(":domainItem") or subsGroup.endswith(":domainMemberItem")))) and
not (presentationRelationshipSet.toModelObject(concept) or
presentationRelationshipSet.fromModelObject(concept))):
modelXbrl.error("SBR.NL.2.2.10.03",
_("DTS concept %(concept)s not referred to by presentation relationship."),
modelObject=concept, concept=concept.qname)
if (concept.substitutionGroupQname and
concept.substitutionGroupQname.namespaceURI not in disclosureSystem.baseTaxonomyNamespaces):
modelXbrl.error("SBR.NL.2.2.2.05",
_("Concept %(concept)s has a substitutionGroup of a non-standard concept."),
modelObject=concept, concept=concept.qname)
if concept.isTuple: # verify same presentation linkbase nesting
for missingQname in set(concept.type.elements) ^ pLinkedNonAbstractDescendantQnames(modelXbrl, concept):
modelXbrl.error("SBR.NL.2.3.4.01",
_("Tuple %(concept)s has mismatch between content and presentation children: %(missingQname)s."),
modelObject=concept, concept=concept.qname, missingQname=missingQname)
self.checkConceptLabels(modelXbrl, labelsRelationshipSet, disclosureSystem, concept)
self.checkConceptLabels(modelXbrl, genLabelsRelationshipSet, disclosureSystem, concept)
# role types checks
# 6.7.10 only one role type declaration in DTS
for roleURI, modelRoleTypes in modelXbrl.roleTypes.items():
if len(modelRoleTypes) > 1:
modelXbrl.error(("EFM.6.07.10", "GFM.1.03.10"),
_("RoleType %(roleType)s is defined in multiple taxonomies"),
modelObject=modelRoleTypes, roleType=roleURI, numberOfDeclarations=len(modelRoleTypes))
# 6.7.14 only one arcrole type declaration in DTS
for arcroleURI, modelRoleTypes in modelXbrl.arcroleTypes.items():
if len(modelRoleTypes) > 1:
modelXbrl.error(("EFM.6.07.14", "GFM.1.03.16"),
_("ArcroleType %(arcroleType)s is defined in multiple taxonomies"),
modelObject=modelRoleTypes, arcroleType=arcroleURI, numberOfDeclarations=len(modelRoleTypes) )
self.modelXbrl.profileActivity("... filer concepts checks", minTimeToShow=1.0)
del defaultLangStandardLabels #dereference
# checks on all documents: instance, schema, instance
ValidateFilingDTS.checkDTS(self, modelXbrl.modelDocument, [])
''' removed RH 2011-12-23, corresponding use of nameWordsTable in ValidateFilingDTS
if self.validateSBRNL:
del self.nameWordsTable
'''
self.modelXbrl.profileActivity("... filer DTS checks", minTimeToShow=1.0)
# checks for namespace clashes
if self.validateEFM:
# check number of us-roles taxonomies referenced
for conflictClass, modelDocuments in self.standardNamespaceConflicts.items():
if len(modelDocuments) > 1:
modelXbrl.error("EFM.6.22.03",
_("References for conflicting standard %(conflictClass)s taxonomies %(namespaceConflicts)s are not allowed in same DTS"),
modelObject=modelXbrl, conflictClass=conflictClass,
namespaceConflicts=sorted((d.targetNamespace for d in modelDocuments),
key=lambda ns: ns.rpartition('/')[2]))
conceptRelsUsedWithPreferredLabels = defaultdict(list)
usedCalcsPresented = defaultdict(set) # pairs of concepts objectIds used in calc
usedCalcFromTosELR = {}
localPreferredLabels = defaultdict(set)
drsELRs = set()
# do calculation, then presentation, then other arcroles
self.summationItemRelsSetAllELRs = modelXbrl.relationshipSet(XbrlConst.summationItem)
for arcroleFilter in (XbrlConst.summationItem, XbrlConst.parentChild, "*"):
for baseSetKey, baseSetModelLinks in modelXbrl.baseSets.items():
arcrole, ELR, linkqname, arcqname = baseSetKey
if ELR and linkqname and arcqname and not arcrole.startswith("XBRL-"):
# assure summationItem, then parentChild, then others
if not (arcroleFilter == arcrole or
arcroleFilter == "*" and arcrole not in (XbrlConst.summationItem, XbrlConst.parentChild)):
continue
if self.validateEFMorGFM or (self.validateSBRNL and arcrole == XbrlConst.parentChild):
ineffectiveArcs = ModelRelationshipSet.ineffectiveArcs(baseSetModelLinks, arcrole)
#validate ineffective arcs
for modelRel in ineffectiveArcs:
if modelRel.fromModelObject is not None and modelRel.toModelObject is not None:
modelXbrl.error(("EFM.6.09.03", "GFM.1.04.03", "SBR.NL.2.3.4.06"),
_("Ineffective arc %(arc)s in \nlink role %(linkrole)s \narcrole %(arcrole)s \nfrom %(conceptFrom)s \nto %(conceptTo)s \n%(ineffectivity)s"),
modelObject=modelRel, arc=modelRel.qname, arcrole=modelRel.arcrole,
linkrole=modelRel.linkrole, linkroleDefinition=modelXbrl.roleTypeDefinition(modelRel.linkrole),
conceptFrom=modelRel.fromModelObject.qname, conceptTo=modelRel.toModelObject.qname,
ineffectivity=modelRel.ineffectivity)
if arcrole == XbrlConst.parentChild:
isStatementSheet = any(linkroleDefinitionStatementSheet.match(roleType.definition or '')
for roleType in self.modelXbrl.roleTypes.get(ELR,()))
conceptsPresented = set()
# 6.12.2 check for distinct order attributes
parentChildRels = modelXbrl.relationshipSet(arcrole, ELR)
for relFrom, siblingRels in parentChildRels.fromModelObjects().items():
targetConceptPreferredLabels = defaultdict(dict)
orderRels = {}
firstRel = True
relFromUsed = True
for rel in siblingRels:
if firstRel:
firstRel = False
if relFrom in conceptsUsed:
conceptsUsed[relFrom] = True # 6.12.3, has a pres relationship
relFromUsed = True
relTo = rel.toModelObject
preferredLabel = rel.preferredLabel
if relTo in conceptsUsed:
conceptsUsed[relTo] = True # 6.12.3, has a pres relationship
if preferredLabel and preferredLabel != "":
conceptRelsUsedWithPreferredLabels[relTo].append(rel)
if self.validateSBRNL and preferredLabel in ("periodStart","periodEnd"):
modelXbrl.error("SBR.NL.2.3.4.03",
_("Preferred label on presentation relationships not allowed"), modelObject=modelRel)
# 6.12.5 distinct preferred labels in base set
preferredLabels = targetConceptPreferredLabels[relTo]
if (preferredLabel in preferredLabels or
(self.validateSBRNL and not relFrom.isTuple and
(not preferredLabel or None in preferredLabels))):
if preferredLabel in preferredLabels:
rel2, relTo2 = preferredLabels[preferredLabel]
else:
rel2 = relTo2 = None
modelXbrl.error(("EFM.6.12.05", "GFM.1.06.05", "SBR.NL.2.3.4.06"),
_("Concept %(concept)s has duplicate preferred label %(preferredLabel)s in link role %(linkrole)s"),
modelObject=(rel, relTo, rel2, relTo2),
concept=relTo.qname, fromConcept=rel.fromModelObject.qname,
preferredLabel=preferredLabel, linkrole=rel.linkrole, linkroleDefinition=modelXbrl.roleTypeDefinition(rel.linkrole))
else:
preferredLabels[preferredLabel] = (rel, relTo)
if relFromUsed:
# 6.14.5
conceptsPresented.add(relFrom.objectIndex)
conceptsPresented.add(relTo.objectIndex)
order = rel.order
if order in orderRels:
modelXbrl.error(("EFM.6.12.02", "GFM.1.06.02", "SBR.NL.2.3.4.05"),
_("Duplicate presentation relations from concept %(conceptFrom)s for order %(order)s in base set role %(linkrole)s to concept %(conceptTo)s and to concept %(conceptTo2)s"),
modelObject=(rel, orderRels[order]), conceptFrom=relFrom.qname, order=rel.arcElement.get("order"), linkrole=rel.linkrole, linkroleDefinition=modelXbrl.roleTypeDefinition(rel.linkrole),
conceptTo=rel.toModelObject.qname, conceptTo2=orderRels[order].toModelObject.qname)
else:
orderRels[order] = rel
if self.validateSBRNL and not relFrom.isTuple:
if relTo in localPreferredLabels:
if {None, preferredLabel} & localPreferredLabels[relTo]:
self.modelXbrl.error("SBR.NL.2.3.4.06",
_("Non-distinguished preferredLabel presentation relations from concept %(conceptFrom)s in base set role %(linkrole)s"),
modelObject=rel, conceptFrom=relFrom.qname, linkrole=rel.linkrole, conceptTo=relTo.qname)
localPreferredLabels[relTo].add(preferredLabel)
targetConceptPreferredLabels.clear()
orderRels.clear()
localPreferredLabels.clear() # clear for next relationship
for conceptPresented in conceptsPresented:
if conceptPresented in usedCalcsPresented:
usedCalcPairingsOfConcept = usedCalcsPresented[conceptPresented]
if len(usedCalcPairingsOfConcept & conceptsPresented) > 0:
usedCalcPairingsOfConcept -= conceptsPresented
# 6.15.02, 6.15.03 semantics checks for totals and calc arcs (by tree walk)
if validateLoggingSemantic:
for rootConcept in parentChildRels.rootConcepts:
self.checkCalcsTreeWalk(parentChildRels, rootConcept, isStatementSheet, False, conceptsUsed, set())
elif arcrole == XbrlConst.summationItem:
if self.validateEFMorGFM:
# 6.14.3 check for relation concept periods
fromRelationships = modelXbrl.relationshipSet(arcrole,ELR).fromModelObjects()
allElrRelSet = modelXbrl.relationshipSet(arcrole)
for relFrom, rels in fromRelationships.items():
orderRels = {}
for rel in rels:
relTo = rel.toModelObject
# 6.14.03 must have matched period types across relationshp
if isinstance(relTo, ModelConcept) and relFrom.periodType != relTo.periodType:
self.modelXbrl.error(("EFM.6.14.03", "GFM.1.07.03"),
"Calculation relationship period types mismatched in base set role %(linkrole)s from %(conceptFrom)s to %(conceptTo)s",
modelObject=rel, linkrole=rel.linkrole, conceptFrom=relFrom.qname, conceptTo=relTo.qname, linkroleDefinition=self.modelXbrl.roleTypeDefinition(ELR))
# 6.14.5 concepts used must have pres in same ext link
if relFrom in conceptsUsed and relTo in conceptsUsed:
fromObjId = relFrom.objectIndex
toObjId = relTo.objectIndex
if fromObjId < toObjId:
usedCalcsPresented[fromObjId].add(toObjId)
else:
usedCalcsPresented[toObjId].add(fromObjId)
order = rel.order
if order in orderRels and disclosureSystem.GFM:
self.modelXbrl.error(("EFM.N/A", "GFM.1.07.06"),
_("Duplicate calculations relations from concept %(conceptFrom)s for order %(order)s in base set role %(linkrole)s to concept %(conceptTo)s and to concept %(conceptTo2)s"),
modelObject=(rel, orderRels[order]), linkrole=rel.linkrole, conceptFrom=relFrom.qname, order=order,
conceptTo=rel.toModelObject.qname, conceptTo2=orderRels[order].toModelObject.qname)
else:
orderRels[order] = rel
directedCycleRels = self.directedCycle(relFrom,relFrom,fromRelationships,{relFrom})
if directedCycleRels is not None:
self.modelXbrl.error(("EFM.6.14.04", "GFM.1.07.04"),
_("Calculation relationships have a directed cycle in base set role %(linkrole)s starting from %(concept)s"),
modelObject=[relFrom] + directedCycleRels, linkrole=ELR, concept=relFrom.qname, linkroleDefinition=self.modelXbrl.roleTypeDefinition(ELR))
orderRels.clear()
# if relFrom used by fact and multiple calc networks from relFrom, test 6.15.04
if rels and relFrom in conceptsUsed:
relFromAndTos = (relFrom.objectIndex,) + tuple(sorted((rel.toModelObject.objectIndex
for rel in rels if isinstance(rel.toModelObject, ModelConcept))))
if relFromAndTos in usedCalcFromTosELR:
otherRels = usedCalcFromTosELR[relFromAndTos]
otherELR = otherRels[0].linkrole
self.modelXbrl.log("WARNING-SEMANTIC", ("EFM.6.15.04", "GFM.2.06.04"),
_("Calculation relationships should have a same set of targets in %(linkrole)s and %(linkrole2)s starting from %(concept)s"),
modelObject=[relFrom] + rels + otherRels, linkrole=ELR, linkrole2=otherELR, concept=relFrom.qname)
else:
usedCalcFromTosELR[relFromAndTos] = rels
elif self.validateSBRNL:
# find a calc relationship to get the containing document name
for modelRel in self.modelXbrl.relationshipSet(arcrole, ELR).modelRelationships:
self.modelXbrl.error("SBR.NL.2.3.9.01",
_("Calculation linkbase linkrole %(linkrole)s"),
modelObject=modelRel, linkrole=ELR)
break
elif arcrole == XbrlConst.all or arcrole == XbrlConst.notAll:
drsELRs.add(ELR)
elif arcrole == XbrlConst.dimensionDomain or arcrole == XbrlConst.dimensionDefault and \
self.validateEFMorGFM:
# 6.16.3 check domain targets in extension linkbases are domain items
fromRelationships = modelXbrl.relationshipSet(arcrole,ELR).fromModelObjects()
for relFrom, rels in fromRelationships.items():
for rel in rels:
relTo = rel.toModelObject
if not (isinstance(relTo, ModelConcept) and relTo.type is not None and relTo.type.isDomainItemType) and not self.isStandardUri(rel.modelDocument.uri):
self.modelXbrl.error(("EFM.6.16.03", "GFM.1.08.03"),
_("Definition relationship from %(conceptFrom)s to %(conceptTo)s in role %(linkrole)s requires domain item target"),
modelObject=(rel, relFrom, relTo), conceptFrom=relFrom.qname, conceptTo=(relTo.qname if relTo is not None else None), linkrole=rel.linkrole)
elif self.validateSBRNL:
if arcrole == XbrlConst.dimensionDefault:
for modelRel in self.modelXbrl.relationshipSet(arcrole).modelRelationships:
self.modelXbrl.error("SBR.NL.2.3.6.05",
_("Dimension-default in from %(conceptFrom)s to %(conceptTo)s in role %(linkrole)s is not allowed"),
modelObject=modelRel, conceptFrom=modelRel.fromModelObject.qname, conceptTo=modelRel.toModelObject.qname,
linkrole=modelRel.linkrole)
''' removed per RH 2013-01-11
if not (XbrlConst.isStandardArcrole(arcrole) or XbrlConst.isDefinitionOrXdtArcrole(arcrole)):
for modelRel in self.modelXbrl.relationshipSet(arcrole).modelRelationships:
relTo = modelRel.toModelObject
relFrom = modelRel.fromModelObject
if not ((isinstance(relFrom,ModelConcept) and isinstance(relTo,ModelConcept)) or
(relFrom.modelDocument.inDTS and
(relTo.qname == XbrlConst.qnGenLabel and modelRel.arcrole == XbrlConst.elementLabel) or
(relTo.qname == XbrlConst.qnGenReference and modelRel.arcrole == XbrlConst.elementReference) or
(relTo.qname == self.qnSbrLinkroleorder))):
self.modelXbrl.error("SBR.NL.2.3.2.07",
_("The source and target of an arc must be in the DTS from %(elementFrom)s to %(elementTo)s, in linkrole %(linkrole)s, arcrole %(arcrole)s"),
modelObject=modelRel, elementFrom=relFrom.qname, elementTo=relTo.qname,
linkrole=modelRel.linkrole, arcrole=arcrole)
'''
# definition tests (GFM only, for now)
if XbrlConst.isDefinitionOrXdtArcrole(arcrole) and disclosureSystem.GFM:
fromRelationships = modelXbrl.relationshipSet(arcrole,ELR).fromModelObjects()
for relFrom, rels in fromRelationships.items():
orderRels = {}
for rel in rels:
relTo = rel.toModelObject
order = rel.order
if order in orderRels and disclosureSystem.GFM:
self.modelXbrl.error("GFM.1.08.10",
_("Duplicate definitions relations from concept %(conceptFrom)s for order %(order)s in base set role %(linkrole)s to concept %(conceptTo)s and to concept %(conceptTo2)s"),
modelObject=(rel, relFrom, relTo), conceptFrom=relFrom.qname, order=order, linkrole=rel.linkrole,
conceptTo=rel.toModelObject.qname, conceptTo2=orderRels[order].toModelObject.qname)
else:
orderRels[order] = rel
if (arcrole not in (XbrlConst.dimensionDomain, XbrlConst.domainMember) and
rel.get("{http://xbrl.org/2005/xbrldt}usable") == "false"):
self.modelXrl.error("GFM.1.08.11",
_("Disallowed xbrldt:usable='false' attribute on %(arc)s relationship from concept %(conceptFrom)s in base set role %(linkrole)s to concept %(conceptTo)s"),
modelObject=(rel, relFrom, relTo), arc=rel.qname, conceptFrom=relFrom.qname, linkrole=rel.linkrole, conceptTo=rel.toModelObject.qname)
del localPreferredLabels # dereference
del usedCalcFromTosELR
del self.summationItemRelsSetAllELRs
self.modelXbrl.profileActivity("... filer relationships checks", minTimeToShow=1.0)
# checks on dimensions
ValidateFilingDimensions.checkDimensions(self, drsELRs)
self.modelXbrl.profileActivity("... filer dimensions checks", minTimeToShow=1.0)
for concept, hasPresentationRelationship in conceptsUsed.items():
if not hasPresentationRelationship:
self.modelXbrl.error(("EFM.6.12.03", "GFM.1.6.3"),
_("Concept used in instance %(concept)s does not participate in an effective presentation relationship"),
modelObject=[concept] + list(modelXbrl.factsByQname[concept.qname]), concept=concept.qname)
for fromIndx, toIndxs in usedCalcsPresented.items():
for toIndx in toIndxs:
fromModelObject = self.modelXbrl.modelObject(fromIndx)
toModelObject = self.modelXbrl.modelObject(toIndx)
calcRels = modelXbrl.relationshipSet(XbrlConst.summationItem) \
.fromToModelObjects(fromModelObject, toModelObject, checkBothDirections=True)
fromFacts = self.modelXbrl.factsByQname[fromModelObject.qname]
toFacts = self.modelXbrl.factsByQname[toModelObject.qname]
fromFactContexts = set(f.context.contextNonDimAwareHash for f in fromFacts if f.context is not None)
contextId = backupId = None # for EFM message
for f in toFacts:
if f.context is not None:
if f.context.contextNonDimAwareHash in fromFactContexts:
contextId = f.context.id
break
backupId = f.context.id
if contextId is None:
contextId = backupId
self.modelXbrl.error(("EFM.6.14.05", "GFM.1.7.5"),
_("Used calculation relationship from %(conceptFrom)s to %(conceptTo)s does not participate in an effective presentation relationship"),
modelObject=calcRels + [fromModelObject, toModelObject],
linkroleDefinition=self.modelXbrl.roleTypeDefinition(calcRels[0].linkrole if calcRels else None),
conceptFrom=self.modelXbrl.modelObject(fromIndx).qname, conceptTo=self.modelXbrl.modelObject(toIndx).qname, contextId=contextId)
if disclosureSystem.defaultXmlLang:
for concept, preferredLabelRels in conceptRelsUsedWithPreferredLabels.items():
for preferredLabelRel in preferredLabelRels:
preferredLabel = preferredLabelRel.preferredLabel
hasDefaultLangPreferredLabel = False
for modelLabelRel in labelsRelationshipSet.fromModelObject(concept):
modelLabel = modelLabelRel.toModelObject
if modelLabel.xmlLang.startswith(disclosureSystem.defaultXmlLang) and \
modelLabel.role == preferredLabel:
hasDefaultLangPreferredLabel = True
break
if not hasDefaultLangPreferredLabel:
self.modelXbrl.error("GFM.1.06.04", # 6.12.04 now reserved: ("EFM.6.12.04", "GFM.1.06.04"),
_("Concept %(concept)s missing %(lang)s preferred labels for role %(preferredLabel)s"),
modelObject=(preferredLabelRel, concept), concept=concept.qname, fromConcept=preferredLabelRel.fromModelObject.qname,
lang=disclosureSystem.defaultLanguage, preferredLabel=preferredLabel)
del conceptRelsUsedWithPreferredLabels
# 6 16 4, 1.16.5 Base sets of Domain Relationship Sets testing
self.modelXbrl.profileActivity("... filer preferred label checks", minTimeToShow=1.0)
''' try moving to plug-in
if self.validateSBRNL:
# check presentation link roles for generic linkbase order number
ordersRelationshipSet = modelXbrl.relationshipSet("http://www.nltaxonomie.nl/2011/arcrole/linkrole-order")
presLinkroleNumberURI = {}
presLinkrolesCount = 0
for countLinkroles in (True, False):
for roleURI, modelRoleTypes in modelXbrl.roleTypes.items():
for modelRoleType in modelRoleTypes:
if XbrlConst.qnLinkPresentationLink in modelRoleType.usedOns:
if countLinkroles:
presLinkrolesCount += 1
else:
if not ordersRelationshipSet:
modelXbrl.error("SBR.NL.2.2.3.06",
_("Presentation linkrole %(linkrole)s missing order number relationship set"),
modelObject=modelRoleType, linkrole=modelRoleType.roleURI)
else:
order = None
for orderNumRel in ordersRelationshipSet.fromModelObject(modelRoleType):
order = getattr(orderNumRel.toModelObject, "xValue", "(noPSVIvalue)")
if order in presLinkroleNumberURI:
modelXbrl.error("SBR.NL.2.2.3.06",
_("Presentation linkrole order number %(order)s of %(linkrole)s also used in %(otherLinkrole)s"),
modelObject=modelRoleType, order=order, linkrole=modelRoleType.roleURI, otherLinkrole=presLinkroleNumberURI[order])
else:
presLinkroleNumberURI[order] = modelRoleType.roleURI
if not order:
modelXbrl.error("SBR.NL.2.2.3.06",
_("Presentation linkrole %(linkrole)s missing order number"),
modelObject=modelRoleType, linkrole=modelRoleType.roleURI)
if countLinkroles and presLinkrolesCount < 2:
break # don't check order numbers if only one presentation linkrole
# check arc role definitions for labels
for arcroleURI, modelRoleTypes in modelXbrl.arcroleTypes.items():
for modelRoleType in modelRoleTypes:
if (not arcroleURI.startswith("http://xbrl.org/") and
modelRoleType.modelDocument.targetNamespace not in disclosureSystem.baseTaxonomyNamespaces and
(not modelRoleType.genLabel(lang="nl") or not modelRoleType.genLabel(lang="en"))):
modelXbrl.error("SBR.NL.2.2.4.02",
_("ArcroleType missing nl or en generic label: %(arcrole)s"),
modelObject=modelRoleType, arcrole=arcroleURI)
for domainElt in typedDomainElements:
if domainElt.modelDocument.targetNamespace not in disclosureSystem.baseTaxonomyNamespaces:
if not domainElt.genLabel(fallbackToQname=False,lang="nl"):
modelXbrl.error("SBR.NL.2.2.8.01",
_("Typed dimension domain element %(concept)s must have a generic label"),
modelObject=domainElt, concept=domainElt.qname)
if domainElt.type is not None and domainElt.type.localName == "complexType":
modelXbrl.error("SBR.NL.2.2.8.02",
_("Typed dimension domain element %(concept)s has disallowed complex content"),
modelObject=domainElt, concept=domainElt.qname)
self.modelXbrl.profileActivity("... SBR role types and type facits checks", minTimeToShow=1.0)
'''
if self.validateEFM:
for pluginXbrlMethod in pluginClassMethods("Validate.EFM.Finally"):
pluginXbrlMethod(self, conceptsUsed)
elif self.validateSBRNL:
for pluginXbrlMethod in pluginClassMethods("Validate.SBRNL.Finally"):
pluginXbrlMethod(self, conceptsUsed)
self.modelXbrl.profileActivity("... plug in '.Finally' checks", minTimeToShow=1.0)
self.modelXbrl.profileStat(_("validate{0}").format(modelXbrl.modelManager.disclosureSystem.validationType))
modelXbrl.modelManager.showStatus(_("ready"), 2000)
def isStandardUri(self, uri):
try:
return self._isStandardUri[uri]
except KeyError:
isStd = (uri in self.disclosureSystem.standardTaxonomiesDict or
(not isHttpUrl(uri) and
# try 2011-12-23 RH: if works, remove the localHrefs
# any(u.endswith(e) for u in (uri.replace("\\","/"),) for e in disclosureSystem.standardLocalHrefs)
"/basis/sbr/" in uri.replace("\\","/")
))
self._isStandardUri[uri] = isStd
return isStd
def directedCycle(self, relFrom, origin, fromRelationships, path):
if relFrom in fromRelationships:
for rel in fromRelationships[relFrom]:
relTo = rel.toModelObject
if relTo == origin:
return [rel]
if relTo not in path: # report cycle only where origin causes the cycle
path.add(relTo)
foundCycle = self.directedCycle(relTo, origin, fromRelationships, path)
if foundCycle is not None:
foundCycle.insert(0, rel)
return foundCycle
path.discard(relTo)
return None
''' this may be unused now, if common stock class members are only taken from observed facts, not defined members, per Dean R
def getDimMembers(self, dim, default=None, rels=None, members=None, visited=None):
hasDefinedRelationship = False
if rels is None:
visited = set()
members = set()
for rel in self.modelXbrl.relationshipSet(XbrlConst.dimensionDefault).fromModelObject(dim):
default = rel.toModelObject
rels = self.modelXbrl.relationshipSet(XbrlConst.dimensionDomain).fromModelObject(dim)
for rel in rels:
hasDefinedRelationship = True
relTo = rel.toModelObject
if rel.isUsable and relTo != default:
# HF: bug, if not usable, then not usable in any other place in network, fix
members.add(relTo.qname)
if relTo not in visited:
visited.add(relTo)
domMbrRels = self.modelXbrl.relationshipSet(XbrlConst.domainMember, rel.consecutiveLinkrole).fromModelObject(relTo)
self.getDimMembers(dim, default, domMbrRels, members, visited)
visited.discard(relTo)
return (members,hasDefinedRelationship)
'''
def checkConceptLabels(self, modelXbrl, labelsRelationshipSet, disclosureSystem, concept):
hasDefaultLangStandardLabel = False
dupLabels = {}
for modelLabelRel in labelsRelationshipSet.fromModelObject(concept):
modelLabel = modelLabelRel.toModelObject
if modelLabel is not None and modelLabel.xmlLang:
if modelLabel.xmlLang.startswith(disclosureSystem.defaultXmlLang) and \
modelLabel.role == XbrlConst.standardLabel:
hasDefaultLangStandardLabel = True
dupDetectKey = ( (modelLabel.role or ''), modelLabel.xmlLang)
if dupDetectKey in dupLabels:
modelXbrl.error(("EFM.6.10.02", "GFM.1.5.2", "SBR.NL.2.2.1.05"),
_("Concept %(concept)s has duplicated labels for role %(role)s lang %(lang)s."),
modelObject=(modelLabel, dupLabels[dupDetectKey]), # removed concept from modelObjects
concept=concept.qname, role=dupDetectKey[0], lang=dupDetectKey[1])
else:
dupLabels[dupDetectKey] = modelLabel
if modelLabel.role in (XbrlConst.periodStartLabel, XbrlConst.periodEndLabel):
modelXbrl.error("SBR.NL.2.3.8.03",
_("Concept %(concept)s has label for semantical role %(role)s."),
modelObject=modelLabel, concept=concept.qname, role=modelLabel.role)
if self.validateSBRNL: # check for missing nl labels
for role, lang in dupLabels.keys():
if role and lang != disclosureSystem.defaultXmlLang and (role,disclosureSystem.defaultXmlLang) not in dupLabels:
modelXbrl.error("SBR.NL.2.3.8.05",
_("Concept %(concept)s has en but no nl label in role %(role)s."),
modelObject=(concept,dupLabels[(role,lang)]), concept=concept.qname, role=role)
#6 10.1 en-US standard label
if not hasDefaultLangStandardLabel:
modelXbrl.error(("EFM.6.10.01", "GFM.1.05.01"),
_("Concept used in facts %(concept)s is missing an %(lang)s standard label."),
# concept must be the first referenced modelObject
modelObject=[concept] + list(modelXbrl.factsByQname[concept.qname]), concept=concept.qname,
lang=disclosureSystem.defaultLanguage)
#6 10.3 default lang label for every role
try:
dupLabels[("zzzz",disclosureSystem.defaultXmlLang)] = None #to allow following loop
priorRole = None
priorLang = None
hasDefaultLang = True
for role, lang in sorted(dupLabels.keys()):
if role != priorRole:
if not hasDefaultLang:
modelXbrl.error(("EFM.6.10.03", "GFM.1.5.3"),
_("Concept %(concept)s is missing an %(lang)s label for role %(role)s."),
modelObject=list(modelXbrl.factsByQname[concept.qname]) + [dupLabels[(priorRole,priorLang)]],
concept=concept.qname,
lang=disclosureSystem.defaultLanguage, role=priorRole)
hasDefaultLang = False
priorLang = lang
priorRole = role
if lang is not None and lang.startswith(disclosureSystem.defaultXmlLang):
hasDefaultLang = True
except Exception as err:
pass
# check if concept is behaving as a total based on role, deed, or circumstances
def presumptionOfTotal(self, rel, siblingRels, iSibling, isStatementSheet, nestedInTotal, checkLabelRoleOnly):
"""
A numeric concept target of a parent-child relationship is presumed total if:
(i) its preferredLabel role is a total role (pre XbrlConst static function of
current such total roles) or
(ii) if not in a nested total (abstract child relationship to a known total's
contributing siblings):
the parent is not SupplementalCashFlowInformationAbstract and the preceding
sibling relationship is monetary and it's on a statement sheet and it's the
last of more than one monetary item
(a) Last monetary parented by an abstract or non-monetary and not in a nested
(breakdown) total, or
(b) effective label (en-US of preferred role) has "Total" in its wording.
(c) (commented out for now due to false positives: Concept name has "Total"
in its name)
(d) last monetary (may be sub level) whose immediate sibling is a calc LB child
"""
concept = rel.toModelObject
if isinstance(concept, ModelConcept) and concept.isNumeric:
preferredLabel = rel.preferredLabel
if XbrlConst.isTotalRole(preferredLabel):
return _("preferredLabel {0}").format(os.path.basename(preferredLabel))
if concept.isMonetary and not checkLabelRoleOnly:
effectiveLabel = concept.label(lang="en-US", fallbackToQname=False, preferredLabel=preferredLabel)
''' word total in label/name does not seem to be a good indicator,
e.g., Google Total in label for ShareBasedCompensationArrangementByShareBasedPaymentAwardGrantDateFairValueOfOptionsVested followed by
label with Aggregate but name has Total
... so only perform this test on last monetary in a Note
if 'Total' in effectiveLabel: # also check for Net ???
return _("word 'Total' in effective label {0}").format(effectiveLabel)
if 'Total' in concept.name: # also check for Net ???
return _("word 'Total' in concept name {0}").format(concept.name)
'''
parent = rel.fromModelObject
if (len(siblingRels) > 1 and
iSibling == len(siblingRels) - 1 and
parent is not None and
parent.name not in {
"SupplementalCashFlowInformationAbstract"
}):
preceedingSibling = siblingRels[iSibling - 1].toModelObject
if preceedingSibling is not None and preceedingSibling.isMonetary:
# last fact, may be total
if isStatementSheet:
# check if facts add up??
if (parent.isAbstract or not parent.isMonetary) and not nestedInTotal:
return _("last monetary item in statement sheet monetary line items parented by nonMonetary concept")
elif effectiveLabel and 'Total' in effectiveLabel:
return _("last monetary item in statement sheet monetary line items with word 'Total' in effective label {0}").format(effectiveLabel)
elif 'Total' in concept.name:
return _("last monetary item in statement sheet monetary line items with word 'Total' in concept name {0}").format(concept.name)
elif self.summationItemRelsSetAllELRs.isRelated(concept, "child", preceedingSibling):
return _("last monetary item in statement sheet monetary line items is calc sum of previous line item")
''' for now unreliable to use total words for notes
else:
if 'Total' in effectiveLabel: # also check for Net ???
return _("last monetary item in note with word 'Total' in effective label {0}").format(effectiveLabel)
if 'Total' in concept.name: # also check for Net ???
return _("last monetary item in note with word 'Total' in concept name {0}").format(concept.name)
'''
return None
# 6.15.02, 6.15.03
def checkCalcsTreeWalk(self, parentChildRels, concept, isStatementSheet, inNestedTotal, conceptsUsed, visited):
"""
- EFM-strict validation 6.15.2/3: finding presumed totals in presentation and inspecting for
equivalents in calculation (noted as error-semantic, in efm-strict mode).
- Best practice approach: inspecting for calcuations in the UGT calculations that would hint
that like filing constructs should have presentation (noted as warning-semantic in best practices plug-in, when loaded and enabled)
EFM-strict missing-calcs
a. Presumption of total
The presentation linkbase is tree-walked to find items presumed to be totals and their contributing
items. (see description of presumptionOfTotal, above)
b. Finding calculation link roles with least mis-fit to presumed total and its contributing items
(presumptionOfTotal in ValidateFiling.py).
For each presumed total (checkForCalculations in ValidateFiling.py):
b.1 Contributing items are found for the presumed total as follows:
From the presumed total, walking back through its preceding sibilings (with caution to avoid
looping on allowed direct cycles), a preceding sibling is a contributing item if it has facts,
same period type, and numeric. If a preceding sibling is abstract, the abstract's children are
likewise recursively checked (as they often represent a breakdown, and such children of an
abstract sibling to the total are also contributing items (except for such children preceding
a total at the child level).
If a preceding sibling is presumed total (on same level), it is a running subtotal (in subsequent
same-level total) unless it's independent in the calc LB (separate totaled stuff preceding these
siblings) or related to grandparent sum.
b.2 Finding the facts of these total/contributing item sets
Sets of total and compatible contributing facts that match the sets of total concept and
contributing concept must next be found, because each of these different sets (of total
and compatible contributing facts) may fit different calculation link roles (according to
which compatible contributing facts are present for each total). This is particularly
important when totals and contributing items exist both on face statements and notes, but
the contributing compatible fact population is different).
For each fact of the total concept, that has a specified end/instant datetime and unit, if
(i) it's not on a statement or
(ii) required context is absent or
(iii) the fact's end/instant is within the required context's duration, the contributing
item facts are those unit and context equivalent to such total fact.
b.3 Finding least-mis-matched calculation link role
Each link role in calculation produces a different set of summation-item arc-sets, and
each set of presumed-total facts and compatible contributing item facts is separately
considered to find the least-mismatched calculation summation-item arc-set.
The link roles are not intermixed or aggregated, each link role produces independent
summation-item arc-sets (XBRL 2.1 section 5.2.5.2).
For each total fact and compatible contributing item facts, the calculation link roles
are examined one-by-one for that link-role where the total has children missing the
least of the compatible contributing item fact children, and reported either as 6.15.02
(for statement sheet presentation link roles) or 6.15.03 (for non-statement link roles).
The determination of statement sheet is according to the presentation tree walk. The
search for least-misfit calculation link role does not care or consider the value of the
calculation link role, just the summation-item arc-set from the presumed-total concept.
"""
if concept not in visited:
visited.add(concept)
siblingRels = parentChildRels.fromModelObject(concept)
foundTotalAtThisLevel = False
for iSibling, rel in enumerate(siblingRels):
reasonPresumedTotal = self.presumptionOfTotal(rel, siblingRels, iSibling, isStatementSheet, False, inNestedTotal)
if reasonPresumedTotal:
foundTotalAtThisLevel = True
self.checkForCalculations(parentChildRels, siblingRels, iSibling, rel.toModelObject, rel, reasonPresumedTotal, isStatementSheet, conceptsUsed, False, set())
if foundTotalAtThisLevel: # try nested tree walk to look for lower totals
inNestedTotal = True
for rel in siblingRels:
self.checkCalcsTreeWalk(parentChildRels, rel.toModelObject, isStatementSheet, inNestedTotal, conceptsUsed, visited)
visited.remove(concept)
def checkForCalculations(self, parentChildRels, siblingRels, iSibling, totalConcept, totalRel, reasonPresumedTotal, isStatementSheet, conceptsUsed, nestedItems, contributingItems):
# compatible preceding sibling facts must have calc relationship to toal
for iContributingRel in range(iSibling - 1, -1, -1):
contributingRel = siblingRels[iContributingRel]
siblingConcept = contributingRel.toModelObject
if siblingConcept is not None:
if siblingConcept is totalConcept: # direct cycle loop likely, possibly among children of abstract sibling
break
if self.summationItemRelsSetAllELRs.isRelated(totalConcept, 'ancestral-sibling', siblingConcept):
break # sibling independently contributes as sibling of totalConcept to grandfather total
if any(self.summationItemRelsSetAllELRs.isRelated(contributingItem, 'child', siblingConcept)
for contributingItem in contributingItems):
break # this subtotal is a breakdown of something already being considered
isContributingTotal = self.presumptionOfTotal(contributingRel, siblingRels, iContributingRel, isStatementSheet, True, False)
# contributing total may actually be separate non-running subtotal, if so don't include it here
if isContributingTotal:
if (self.summationItemRelsSetAllELRs.fromModelObject(siblingConcept) and not
self.summationItemRelsSetAllELRs.toModelObject(siblingConcept)):
break # sibling independently contributes as sibling of totalConcept as a root in another hierarchy
if siblingConcept.isAbstract:
childRels = parentChildRels.fromModelObject(siblingConcept)
self.checkForCalculations(parentChildRels, childRels, len(childRels), totalConcept, totalRel, reasonPresumedTotal, isStatementSheet, conceptsUsed, True, contributingItems)
elif (siblingConcept in conceptsUsed and
siblingConcept.isNumeric and
siblingConcept.periodType == totalConcept.periodType):
contributingItems.add(siblingConcept)
if isContributingTotal:
break
if not nestedItems and contributingItems:
# must check each totalFact and compatible items for a relationship set separately
# (because different sets of sums/items could, on edge case, be in different ELRs)
compatibleItemsFacts = defaultdict(set)
for totalFact in self.modelXbrl.factsByQname[totalConcept.qname]:
totalFactContext = totalFact.context
totalFactUnit = totalFact.unit
if (totalFactContext is not None and totalFactUnit is not None and totalFactContext.endDatetime is not None and
(not isStatementSheet or
(self.requiredContext is None or
self.requiredContext.startDatetime <= totalFactContext.endDatetime <= self.requiredContext.endDatetime))):
compatibleItemConcepts = set()
compatibleFacts = {totalFact}
for itemConcept in contributingItems:
for itemFact in self.modelXbrl.factsByQname[itemConcept.qname]:
if (totalFactContext.isEqualTo(itemFact.context) and
totalFactUnit.isEqualTo(itemFact.unit)):
compatibleItemConcepts.add(itemConcept)
compatibleFacts.add(itemFact)
if len(compatibleItemConcepts) >= 2: # 6.15.2 requires 2 or more line items along with their net or total
compatibleItemsFacts[frozenset(compatibleItemConcepts)].update(compatibleFacts)
for compatibleItemConcepts, compatibleFacts in compatibleItemsFacts.items():
foundSummationItemSet = False
leastMissingItemsSet = compatibleItemConcepts
for ELR in self.summationItemRelsSetAllELRs.linkRoleUris:
relSet = self.modelXbrl.relationshipSet(XbrlConst.summationItem,ELR)
missingItems = (compatibleItemConcepts -
frozenset(r.toModelObject
for r in relSet.fromModelObject(totalConcept)))
# may be slow, but must remove sibling or descendants to avoid annoying false positives
# such as in http://www.sec.gov/Archives/edgar/data/1341439/000119312512129918/orcl-20120229.xml
missingItems -= set(concept
for concept in missingItems
if relSet.isRelated(totalConcept, "sibling-or-descendant", concept))
# items not required in sum
unrequiredItems = set(concept
for concept in missingItems
if concept.name in ("CommitmentsAndContingencies"))
missingItems -= unrequiredItems
if missingItems:
if len(missingItems) < len(leastMissingItemsSet):
leastMissingItemsSet = missingItems
else:
foundSummationItemSet = True
'''
# testing with DH (merge all calc ELRs instead of isolating calc ELRs)...
relSet = self.modelXbrl.relationshipSet(XbrlConst.summationItem)
missingItems = (compatibleItemConcepts -
frozenset(r.toModelObject
for r in relSet.fromModelObject(totalConcept)))
foundSummationItemSet = len(missingItems) == 0
'''
if not foundSummationItemSet:
linkroleDefinition = self.modelXbrl.roleTypeDefinition(contributingRel.linkrole)
reasonIssueIsWarning = ""
msgCode = "ERROR-SEMANTIC"
if isStatementSheet:
errs = ("EFM.6.15.02,6.13.02,6.13.03", "GFM.2.06.02,2.05.02,2.05.03")
msg = _("Financial statement calculation relationship missing from total concept to item concepts, based on required presentation of line items and totals. "
"%(reasonIssueIsWarning)s"
"\n\nPresentation link role: \n%(linkrole)s \n%(linkroleDefinition)s. "
"\n\nTotal concept: \n%(conceptSum)s. "
"\n\nReason presumed total: \n%(reasonPresumedTotal)s. "
"\n\nSummation items missing: \n%(missingConcepts)s. "
"\n\nExpected item concepts: \n%(itemConcepts)s. "
"\n\nCorresponding facts in contexts: \n%(contextIDs)s\n")
else:
errs = ("EFM.6.15.03,6.13.02,6.13.03", "GFM.2.06.03,2.05.02,2.05.03")
msg = _("Notes calculation relationship missing from total concept to item concepts, based on required presentation of line items and totals. "
"%(reasonIssueIsWarning)s"
"\n\nPresentation link role: \n%(linkrole)s \n%(linkroleDefinition)s."
"\n\nTotal concept: \n%(conceptSum)s. "
"\n\nReason presumed total: \n%(reasonPresumedTotal)s. "
"\n\nSummation items missing \n%(missingConcepts)s. "
"\n\nExpected item concepts \n%(itemConcepts)s. "
"\n\nCorresponding facts in contexts: \n%(contextIDs)s\n")
# cases causing this issue to be a warning instead of an error
if all(f.isNil for f in compatibleFacts if f.concept in leastMissingItemsSet):
reasonIssueIsWarning = _("\n\nMissing items are nil, which doesn't affect validity but may impair analysis of concept semantics from calculation relationships. ")
msgCode = "WARNING-SEMANTIC"
errs = tuple(e + '.missingItemsNil' for e in errs)
if "parenthetical" in linkroleDefinition.lower():
reasonIssueIsWarning += _("\n\nLink role is parenthetical. ")
msgCode = "WARNING-SEMANTIC"
errs = tuple(e + '.parenthetical' for e in errs)
"""@messageCatalog=[
[["EFM.6.15.02,6.13.02,6.13.03", "GFM.2.06.02,2.05.02,2.05.03"],
"Financial statement calculation relationship missing from total concept to item concepts, based on required presentation of line items and totals.
%(reasonIssueIsWarning)s
Presentation link role:
%(linkrole)s
%(linkroleDefinition)s.
Total concept:
%(conceptSum)s.
Reason presumed total: n%(reasonPresumedTotal)s.
Summation items missing n%(missingConcepts)s.
Expected item concepts
%(itemConcepts)s.
Corresponding facts in contexts:
%(contextIDs)s
"],
[["EFM.6.15.03,6.13.02,6.13.03", "GFM.2.06.03,2.05.02,2.05.03"],
"Notes calculation relationship missing from total concept to item concepts, based on required presentation of line items and totals.
%(reasonIssueIsWarning)s
Presentation link role:
%(linkrole)s
%(linkroleDefinition)s.
Total concept:
%(conceptSum)s.
Reason presumed total:
%(reasonPresumedTotal)s.
Summation items missing
%(missingConcepts)s.
Expected item concepts
%(itemConcepts)s.
Corresponding facts in contexts:
%(contextIDs)s"]]"""
self.modelXbrl.log(msgCode, errs, msg,
modelObject=[totalConcept, totalRel, siblingConcept, contributingRel] + [f for f in compatibleFacts],
reasonIssueIsWarning=reasonIssueIsWarning,
conceptSum=totalConcept.qname, linkrole=contributingRel.linkrole, linkroleDefinition=linkroleDefinition,
reasonPresumedTotal=reasonPresumedTotal,
itemConcepts=', \n'.join(sorted(set(str(c.qname) for c in compatibleItemConcepts))),
missingConcepts = ', \n'.join(sorted(set(str(c.qname) for c in leastMissingItemsSet))),
contextIDs=', '.join(sorted(set(f.contextID for f in compatibleFacts))))
leastMissingItemsSet = None #dereference, can't delete with Python 3.1
del foundSummationItemSet
del compatibleItemsFacts # dereference object references
@property
def EFM60303(self):
if self.exhibitType == "EX-2.01": # only applicable for edgar production and parameterized testcases
return "EFM.6.23.01"
else:
return "EFM.6.03.03"
# for SBR 2.3.4.01
def pLinkedNonAbstractDescendantQnames(modelXbrl, concept, descendants=None):
if descendants is None: descendants = set()
for rel in modelXbrl.relationshipSet(XbrlConst.parentChild).fromModelObject(concept):
child = rel.toModelObject
if child is not None:
if child.isAbstract:
pLinkedNonAbstractDescendantQnames(modelXbrl, child, descendants)
else:
descendants.add(child.qname)
return descendants
``` |
{
"source": "josl/ASM_challenge",
"score": 2
} |
#### File: ASM_challenge/isolates/download_accession_list.py
```python
from __future__ import division, print_function, absolute_import
import os
import sys
import json
import argparse
from shutil import move
from progressbar import Bar, Percentage, ProgressBar, ETA
from isolates import __version__, TemporaryDirectory
from isolates.log import _logger
from isolates.metadata import (ExtractExperimentMetadata,
ExtractExperimentIDs_acc)
from isolates.sequence import Sequence
from isolates.source import acctypes
__author__ = "<NAME>"
__coauthor__ = "<NAME>"
__copyright__ = "<NAME>"
__license__ = "none"
def parse_args_accessions(args):
"""
Parse command line parameters
:param args: command line parameters as list of strings
:return: command line parameters as :obj:`argparse.Namespace`
"""
parser = argparse.ArgumentParser(
description="Download script of isolates from" +
"ENA taxonomy or Accession list")
parser.add_argument(
'--version',
action='version',
version='isolates {ver}'.format(ver=__version__))
parser.add_argument(
'-a',
nargs=1,
metavar=('PATH'),
help='Format: [PATH]\n' +
'to file containing list of ACCESSION IDs, 1 per line\n' +
'Name of the file is used to identify the isolates downloaded.'
)
parser.add_argument(
'-m',
nargs=1,
type=argparse.FileType('r'),
metavar=('METADATA'),
default=None,
help='JSON file with seed attributes and mandatory fields\n'
)
parser.add_argument(
'-out',
nargs=1,
metavar=('OUTPUT'),
required=True,
help='Path to save isolates'
)
parser.add_argument(
'-p',
'--preserve',
action="store_true",
dest="preserve",
default=False,
help='preserve any existing SRA and fastq files\n'
)
parser.add_argument(
'--all_runs_as_samples',
action="store_true",
dest="all_runs_as_samples",
default=False,
help=('Treat all runs associated to a sample as separate samples. '
'Default is to combine them into one run.\n')
)
parser.add_argument(
'--skip_files',
action="store_true",
dest="skip_files",
default=False,
help=('Treat all runs associated to a sample as separate samples. '
'Default is to combine them into one run.\n')
)
return parser.parse_args(args)
def DownloadRunFiles(runid, tmpdir):
# Download run files
try:
s = Sequence(runid, tmpdir)
s.download_fastq()
if not s.error:
_logger.info("Downloaded files: %s", ','.join(s.files))
return s.files
else: return None
except ValueError, e:
_logger.error(e)
return None
def CreateSampleDir(sfiles, m, sample_dir, preserve=False, skip_files=False):
sample_dir = str(sample_dir)
if not skip_files and len(sfiles) == 0:
_logger.error("Error: No files were found! (%s)", sample_dir)
return False
if not os.path.exists(sample_dir):
_logger.info("Create sample dir: %s", sample_dir)
# Create 'sample' dir
os.mkdir(sample_dir)
# Move files from tmpdir to sample dir
for sf in sfiles: move(sf, sample_dir)
elif not preserve and not skip_files:
# Empty sample directory
for fn in os.listdir(sample_dir):
os.unlink("%s/%s"%(sample_dir, fn))
# Move files from tmpdir to sample dir
for sf in sfiles: move(sf, sample_dir)
# Update and create metadata file
try:
m.metadata["file_names"] = ' '.join(
[os.path.basename(sf).replace(' ','_')
for sf in sfiles
if not os.path.basename(sf) == 'meta.json']
)
m.save_metadata(sample_dir)
except ValueError, e:
_logger.error(e)
return False
else:
return True
def download_fastq_from_list(accession_list, output, json, preserve=False, all_runs_as_samples=False, skip_files=False):
"""
Get Fastq from list of IDs
:param accession_list: List of accessions
:param dir: Output folder
"""
metadata = []
cwd = os.getcwd()
with open(accession_list, 'r') as f:
# Setup batch dir
batch_dir = "%s/%s/"%(cwd, output)
if not os.path.exists(batch_dir): os.mkdir(batch_dir)
os.chdir(batch_dir)
# Set logging
_logger.Set(filename="%s/download-acceession-list.log"%batch_dir)
# Count samples in accession_list
n_samples = sum(1 for l in f)
f.seek(0)
_logger.info("Number of samples to download: %s", n_samples)
# Start progress bar
pbar = ProgressBar(
widgets = [ETA(), ' - ', Percentage(), ' : ', Bar()],
maxval = n_samples
).start()
pbar.update(0)
failed_accession = []
sample_dir_id = 0
for i, l in enumerate(f):
accession = l.strip()
if accession == '': continue
# Determine accession type
if accession[:3] in acctypes:
accession_type = acctypes[accession[:3]]
else:
_logger.error("unknown accession type for '%s'!", accession)
failed_accession.append(accession)
continue
_logger.info("Acc Found: %s (%s)", accession, accession_type)
if accession_type in ['study', 'sample']:
for experiment_id in ExtractExperimentIDs_acc(accession):
sample_dir_id = ProcessExperiment(
experiment_id, json, batch_dir,sample_dir_id, preserve,
failed_accession, all_runs_as_samples, skip_files)
elif accession_type == 'experiment':
sample_dir_id = ProcessExperiment(
accession, json, batch_dir,sample_dir_id, preserve,
failed_accession, all_runs_as_samples, skip_files)
elif accession_type == 'run':
sample_dir_id = ProcessExperiment(
accession, json, batch_dir,sample_dir_id, preserve,
failed_accession, all_runs_as_samples, skip_files)
pbar.update(i)
pbar.finish()
if failed_accession:
_logger.info("The following accessions were not downloaded!")
_logger.info('\n'.join(failed_accession))
else:
_logger.info("All accessions downloaded succesfully!")
def ProcessExperiment(experiment_id, json, batch_dir, sample_dir_id, preserve, failed_accession, all_runs_as_samples, skip_files=False):
_logger.info("Processing %s...", experiment_id)
if all_runs_as_samples:
sample_dir_id = ProcessExperimentSeparate(
experiment_id, json, batch_dir, sample_dir_id,
preserve, failed_accession, skip_files)
else:
sample_dir_id = ProcessExperimentCombined(
experiment_id, json, batch_dir, sample_dir_id,
preserve, failed_accession, skip_files)
return sample_dir_id
def ProcessExperimentSeparate(experiment_id, json, batch_dir, sample_dir_id, preserve, failed_accession, skip_files=False):
m = ExtractExperimentMetadata(experiment_id, json)
if m.valid_metadata():
# Check if a run ID was submitted, and if so only process that
if experiment_id in m.runIDs: m.runIDs = [experiment_id]
# Process the runIDs as samples
_logger.info("Found Following Runs: %s", ', '.join(m.runIDs))
for runid in m.runIDs:
with TemporaryDirectory() as tmpdir:
os.chdir(batch_dir)
sample_dir = "%s/%s/"%(batch_dir, sample_dir_id)
if os.path.exists(sample_dir):
sfiles = [x for x in os.listdir(sample_dir) if any([y in x for y in ['fq','fastq']])]
else:
sfiles = []
if not preserve or not skip_files or len(sfiles) == 0:
sfiles = DownloadRunFiles(runid, tmpdir)
if sfiles is not None:
success = CreateSampleDir(sfiles, m, sample_dir, preserve, skip_files)
if success:
sample_dir_id += 1
else:
failed_accession.append(runid)
else:
_logger.error("Files could not be retrieved! (%s)", runid)
failed_accession.append(runid)
else:
_logger.error("Metadata Invalid! (%s) - %s", experiment_id, m.metadata.items())
failed_accession.append(experiment_id)
return sample_dir_id
def ProcessExperimentCombined(experiment_id, json, batch_dir, sample_dir_id, preserve, failed_accession, skip_files=False):
m = ExtractExperimentMetadata(experiment_id, json)
if m.valid_metadata():
# Check if a run ID was submitted, and if so only process that
if experiment_id in m.runIDs: m.runIDs = [experiment_id]
# Process the runs as one sample
_logger.info("Found Following Runs: %s", ', '.join(m.runIDs))
with TemporaryDirectory() as tmpdir:
os.chdir(batch_dir)
sample_dir = "%s/%s/"%(batch_dir, sample_dir_id)
csfiles = []
if preserve and os.path.exists(sample_dir):
csfiles = [x for x in os.listdir(sample_dir) if any([y in x for y in ['fq','fastq']])]
if csfiles == [] and not skip_files:
sfiles = []
for runid in m.runIDs:
sf = DownloadRunFiles(runid, tmpdir)
if sf is not None:
sfiles.append(sf)
else:
_logger.error("Run files could not be retrieved! (%s)",
runid)
_logger.info("Found Following files sets:\n%s\n",
'\n'.join([', '.join(sf) for sf in sfiles]))
# Combine sfiles into one entry
if len(sfiles) > 1:
for file_no, file_set in enumerate(zip(*sfiles)):
ext = '.'.join(file_set[0].split('/')[-1].split('.')[1:])
if len(sfiles[0]) > 1:
new_file = "%s_%s.combined.%s"%(experiment_id,file_no+1, ext)
else:
new_file = "%s.combined.%s"%(experiment_id, ext)
with open(new_file, 'w') as nf:
for fn in file_set:
with open(fn, 'rb') as f:
nf.write(f.read())
if os.path.exists(new_file):
csfiles.append(new_file)
else:
_logger.error("Combined file creation failed! (%s: %s)",
experiment_id, file_no)
break
elif isinstance(sfiles[0], list):
csfiles = sfiles[0]
if csfiles == []:
_logger.error("Files could not be combined! (%s)",
experiment_id)
failed_accession.append(experiment_id)
if csfiles != [] or skip_files:
success = CreateSampleDir(csfiles, m, sample_dir, preserve, skip_files)
if success:
sample_dir_id += 1
else:
failed_accession.append(experiment_id)
else:
_logger.error("Files could not be retrieved! (%s)",
experiment_id)
failed_accession.append(experiment_id)
else:
_logger.error("Metadata Invalid! (%s) - %s", experiment_id, m.metadata.items())
failed_accession.append(experiment_id)
return sample_dir_id
def download_accession_list():
args = parse_args_accessions(sys.argv[1:])
if args.a is not None:
if args.m is not None:
try:
default = json.load(args.m[0])
except ValueError as e:
print("ERROR: Json file has the wrong format!\n", e)
exit()
else:
default = None
download_fastq_from_list(args.a[0], args.out[0], default, args.preserve, args.all_runs_as_samples, args.skip_files)
else:
print('Usage: -a PATH -o ORGANISM -out PATH [-m JSON]')
if __name__ == "__main__":
download_accession_list()
```
#### File: ASM_challenge/isolates/__init__.py
```python
import pkg_resources
import socket
import urllib
from email.mime.text import MIMEText
from subprocess import Popen, PIPE
from shutil import rmtree
from tempfile import mkdtemp
# GENERAL MODULE CLASSES
class TemporaryDirectory(object):
"""Create and return a temporary directory. This has the same
behavior as mkdtemp but can be used as a context manager. For
example:
>>> import os
>>> tmpfile = 'file.ext'
>>> with TemporaryDirectory() as tmpdir:
... print "Was tmpdir created? %s"%os.path.exists(tmpdir)
... os.chdir(tmpdir)
... with open(tmpfile, 'w') as f:
... f.write('Hello World!')
... print "Was tmpfile created? %s"%os.path.exists(tmpfile)
Was tmpdir created? True
Was tmpfile created? True
>>> print "Does tmpfile still exist? %s"%os.path.exists(tmpfile)
Does tmpfile still exist? False
>>> print "Does tmpdir still exist? %s"%os.path.exists(tmpdir)
Does tmpdir still exist? False
Upon exiting the context, the directory and everything contained
in it are removed.
This method is not implemented in python-2.7!
"""
def __init__(self, suffix="", prefix="tmp", dir=None):
self.name = None
self.name = mkdtemp(suffix, prefix, dir)
def __enter__(self):
return self.name
def cleanup(self, _warn=False):
if self.name:
try: rmtree(self.name)
except: print('Could not remove %s'%self.name)
else: self.name = None
def __exit__(self, exc, value, tb):
self.cleanup()
class openurl(object):
''' urllib library wrapper, to make it easier to use.
>>> import urllib
>>> with openurl('http://www.ncbi.nlm.nih.gov/sra/?term=ERX006651&format=text') as u:
... for l in u:
... print l.strip()
'''
def __init__(self, url):
self.url = url
def __enter__(self):
self.u = urllib.urlopen(self.url)
return self.u
def __exit__(self, type=None, value=None, traceback=None):
self.u.close()
self.u = None
def __iter__(self):
yield self.readline()
def read(self):
return self.u.read()
def readline(self):
return self.u.readline()
def readlines(self):
return self.u.readlines()
class mail_obj():
'''
>>> mail = mail_obj(['<EMAIL>'], '<EMAIL>')
>>> mail.send('Hello my subject!','Hello my body!')
'''
def __init__(self, recepients, sender, reply):
self.to = recepients
self.fr = sender
self.rt = reply
def send(self, subject, message):
''' '''
msg = MIMEText(message)
msg["To"] = ', '.join(self.to) if isinstance(self.to, list) else self.to
msg["From"] = self.fr
msg["Reply-To"] = self.rt
msg["Subject"] = subject
p = Popen(["sendmail -r %s %s"%(self.fr, ' '.join(self.to))],
shell=True, stdin=PIPE, stdout=PIPE, stderr=PIPE)
out, err = p.communicate(msg.as_string())
p.wait()
def test(self, subject, message):
''' '''
msg = MIMEText(message)
msg["To"] = ', '.join(self.to) if isinstance(self.to, list) else self.to
msg["From"] = self.fr
msg["Reply-To"] = self.rt
msg["Subject"] = subject
return "%s\n%s"%("sendmail -r %s %s"%(self.fr, ' '.join(self.to)),
msg.as_string())
# GENERAL MODULE FUNCTIONS
def flipdict(d):
''' switch keys and values, so that all values are keys in a new dict '''
return dict(zip(*list(reversed(zip(*[(k, v) for k in d for v in d[k]])))))
def ceil(n):
''' compute the closest upper integer of a float '''
return int(n) + (n%1 > 0)
# MAIN
# Set version
try:
__version__ = pkg_resources.get_distribution(__name__).version
except:
__version__ = 'unknown'
# Setup Mail Wrapper
fqdn = socket.getfqdn()
if 'cbs.dtu.dk' in fqdn:
mail = mail_obj(['<EMAIL>'],
'<EMAIL>',
'<EMAIL>')
elif any([x in fqdn for x in ['computerome', 'risoe']]):
mail = mail_obj(['<EMAIL>'],
'<EMAIL>',
'<EMAIL>')
else:
mail = None
```
#### File: ASM_challenge/isolates/log.py
```python
import logging
import sys
class log_obj():
''' Object for handling logging across sevaral modules/scripts
USAGE:
>>> from log import _logger
>>> _logger.Set(filename='metadata.log')
>>> _logger.error("error msg %s, %s", 'arg1', 'arg2')
'''
def __init__(self):
pass
def Set(self, filename='metadata.log'):
# Setup of what?
logging.basicConfig(
level=logging.INFO,
stream=sys.stdout,
format='%(levelname)s:%(message)s',
filename=filename,
filemode='w'
)
self.logger = logging.getLogger(__name__)
def debug(self, *msg):
self.logger.debug(*msg)
def info(self, *msg):
self.logger.info(*msg)
def warning(self, *msg):
self.logger.warning(*msg)
def error(self, *msg):
self.logger.error(*msg)
def log(self, *msg):
self.logger.log(*msg)
# Init _logger
_logger = log_obj()
``` |
{
"source": "JoslaTech/casba-web-master",
"score": 3
} |
#### File: casba-web-master/test/switchIT.py
```python
import os
import time
from flask import Flask, request
import hashlib
import requests
from xml.etree.ElementTree import Element, SubElement, tostring
app = Flask(__name__)
@app.route('/')
def index():
destination = "2001220212"
endpoint = "A"
pin = "0012"
bankCode = "033"
amount = "1000"
currency = "NGN"
reference = ''.join(["JE", str(time.time()), "switchIT"])
fundTransferAccount = Element('fundTransferAccount')
destination = SubElement(fundTransferAccount, 'destination')
destination.text = "2001220212"
endpoint = SubElement(fundTransferAccount, 'endpoint')
endpoint.text = "A"
pin = SubElement(fundTransferAccount, 'pin')
pin.text = "0012"
bankCode = SubElement(fundTransferAccount, 'bankCode')
bankCode.text = "033"
amount = SubElement(fundTransferAccount, 'amount')
amount.text = "1000"
currency = SubElement(fundTransferAccount, 'currency')
currency.text = "NGN"
reference = SubElement(fundTransferAccount, 'reference')
reference.text = ''.join(["JE", str(time.time()), "switchIT"])
# print tostring(fundTransferAccount)
headers = {'Content-Type': 'application/xml'} # set what your server accepts
response = requests.post("https://staging.etranzact.com/FundGatePlus/doc.wsdl", data=fundTransferAccount, )
print response.content
return '<h1>Hello World!</h1>'
port = os.getenv('PORT', '5000')
if __name__ == '__main__':
app.run(host='0.0.0.0', port=int(port))
``` |
{
"source": "josl/global-sewage-signatures",
"score": 2
} |
#### File: global-sewage-signatures/global_sewage_signatures/test.python.py
```python
from global_sewage_signatures import MinHash as MH
import redis
from pkg_resources import resource_filename
import numpy as np
import sys
import threading
import time
from bitarray import bitarray
from struct import *
import cython
# Hostname comes from docker-compose.yml "depends_on" directive
redis_db = redis.StrictRedis(db=3, host='redis', port=6379)
dist = 0.2
sparse_matrix = []
hash_funcs = 100
keys = len(redis_db.keys())
# size = 4**16
# a = bitarray(size)
# a.setall(True)
# a[45] = False
# redis_db.set('test5', a.tobytes())
def worker_dense(key):
key = 'test5'
print(key)
byte_string = redis_db.get(key)
bytes_read = 0
bytes_to_unpack = 1
# bytes_to_unpack = string.len(s)
# if bytes_to_unpack > 32 then
# bytes_to_unpack = 1
# end
# max = string.len(s) / bytes_to_unpack
max_bytes = len(byte_string) / bytes_to_unpack
tot_bits = max_bytes * 8
tot = len(byte_string) * 8
zeroes = 0
positions = []
total_bytes = 1
negation = 2 ** (bytes_to_unpack * 8) - 1
fmt = str(bytes_to_unpack) + 'B'
pop = redis_db.bitcount(key)
tot = len(byte_string) * 8
start = time.time()
print(bytes_read, max_bytes)
while bytes_read < max_bytes:
d = unpack_from(fmt, byte_string, bytes_read)
bytes_read += (1 * bytes_to_unpack)
d = negation - d[0]
while d:
tot -= 1
# zeroes = zeroes + 1
# Calculate position of 1
# d_ones = d
# inner_zeroes = 0
# while not d_ones & 1:
# inner_zeroes += 1
# d_ones >>= 1
# positions.append(((bytes_read - 1) * 8) - 1 - inner_zeroes)
# estimated = (((time.time() - start) / len(positions)) * ((tot_bits - pop) - len(positions))) / (60*60)
# sys.stdout.write('Thread: [%s] %s out of total (%s) %.2f hours\r' % (key, len(positions), tot_bits - pop, estimated))
d &= d - 1
print('\n',tot, pop)
# def worker_sparse(key):
threads = []
for key in range(0, 85):
tot = redis_db.strlen(key) * 8
pop = redis_db.bitcount(key)
if pop < (tot - pop) and False:
t = threading.Thread(target=worker_sparse, args=(key,))
else:
t = threading.Thread(target=worker_dense, args=(key,))
threads.append(t)
t.start()
break
``` |
{
"source": "josl/sequence-reverse-complement",
"score": 3
} |
#### File: josl/sequence-reverse-complement/test.py
```python
import timeit
import time
from threading import Timer
import random
from string import maketrans
from Bio.Seq import Seq
# --------------------------------------
# reverse complement sequence:
# --------------------------------------
def reversecomplement():
'''Reverse complement'''
comp = ''
for s in seq:
if s == 'A':
comp = comp + 'T'
elif s == 'T':
comp = comp + 'A'
elif s == 'C':
comp = comp + 'G'
elif s == 'G':
comp = comp + 'C'
else:
comp = comp + s
return comp[::-1]
def reversecomplement_faster_A():
'''Reverse complement'''
mapping = {
'A': 'T',
'T': 'A',
'C': 'G',
'G': 'C'
}
return ''.join([mapping.get(s, s) for s in seq])[::-1]
def reversecomplement_faster_A1():
'''Reverse complement'''
mapping = {
'A': 'T',
'T': 'A',
'C': 'G',
'G': 'C'
}
return ''.join(mapping.get(s, s) for s in seq)[::-1]
def reversecomplement_faster_B():
'''Reverse complement'''
mapping = {
'A': 'T',
'T': 'A',
'C': 'G',
'G': 'C'
}
return ''.join([mapping.get(s, s) for s in reversed(seq)])
def reversecomplement_faster_B1():
'''Reverse complement'''
mapping = {
'A': 'T',
'T': 'A',
'C': 'G',
'G': 'C'
}
return ''.join(mapping.get(s, s) for s in reversed(seq))
def reversecomplement_faster_C():
'''Reverse complement'''
reverse = []
mapping = {
'A': 'T',
'T': 'A',
'C': 'G',
'G': 'C'
}
for s in seq:
reverse.append(mapping.get(s, s))
return ''.join(reverse)[::-1]
def ReverseComplementSUPERFAST():
return seq.translate(maketrans("ATGC", "TACG"))[::-1]
def ReverseComplementBIOPython():
my_seq.reverse_complement()
if __name__ == '__main__':
import timeit
# Global to make it visible to all functions
seq = ''.join(random.choice('CGTA') for _ in xrange(10000))
print 'Old and slow string concat'
print (timeit.timeit(
"""reversecomplement()""",
number=1000,
setup="from __main__ import reversecomplement"))
print "''.join([mapping.get(s, s) for s in seq])[::-1]"
print (timeit.timeit(
"""reversecomplement_faster_A()""",
number=1000,
setup="from __main__ import reversecomplement_faster_A"))
print "''.join(mapping.get(s, s) for s in seq)[::-1]"
print (timeit.timeit(
"""reversecomplement_faster_A1()""",
number=1000,
setup="from __main__ import reversecomplement_faster_A1"))
print "''.join([mapping.get(s, s) for s in reversed(seq)])"
print (timeit.timeit(
"""reversecomplement_faster_B()""",
number=1000,
setup="from __main__ import reversecomplement_faster_B"))
print "''.join(mapping.get(s, s) for s in reversed(seq))"
print (timeit.timeit(
"""reversecomplement_faster_B1()""",
number=1000,
setup="from __main__ import reversecomplement_faster_B1"))
print """
for s in seq:
reverse.append(mapping.get(s, s))
return ''.join(reverse)[::-1] """
print (timeit.timeit(
"""reversecomplement_faster_C()""",
number=1000,
setup="from __main__ import reversecomplement_faster_C"))
print 'seq.translate(maketrans("ATGC","TACG"))[::-1]'
print (timeit.timeit(
"""ReverseComplementSUPERFAST()""",
number=1000,
setup="from __main__ import ReverseComplementSUPERFAST"))
print 'Reverse Bio PYthon'
my_seq = Seq(seq)
print (timeit.timeit(
"""ReverseComplementBIOPython()""",
number=1000,
setup="from __main__ import ReverseComplementBIOPython"))
``` |
{
"source": "JosmanPS/foe-gae",
"score": 2
} |
#### File: foe-gae/foe/views.py
```python
from django.shortcuts import redirect, render, render_to_response, render, get_object_or_404
from django.contrib.auth.decorators import login_required
from django.core.urlresolvers import reverse
from .forms import *
from .models import *
from django.utils.text import slugify
#
# FOE, main site
#
def index(request):
args = dict()
return render_to_response('foe/main/index.html', args)
@login_required
def registro_oe(request):
args = dict()
usuario = request.user
oe_usuario = OrganizacionEstudiantil(usuario=usuario)
oe = OrganizacionEstudiantil.objects.filter(usuario=usuario)
args['completo'] = False
if request.method == 'POST':
print(usuario)
if oe:
form = OEForm(request.POST, request.FILES, instance=oe[0])
else:
form = OEForm(request.POST, request.FILES, instance=oe_usuario)
if form.is_valid():
f = form.save()
f.slug = slugify(f.nombre)
f.save()
return redirect(reverse('registro_bancario'))
else:
if oe:
form = OEForm(instance=oe[0])
else:
form = OEForm(instance=oe_usuario)
args['form'] = form
return render(request, "foe/forms/registroOE.html", args)
@login_required
def registro_comite(request):
args = dict()
usuario = request.user
cm_usuario = Comite(usuario=usuario)
cm = Comite.objects.filter(usuario=usuario)
if request.method == 'POST':
print(usuario)
if cm:
form = ComiteForm(request.POST, request.FILES, instance=cm[0])
else:
form = ComiteForm(request.POST, request.FILES, instance=cm_usuario)
print(request.FILES)
print(form.is_valid())
if form.is_valid():
form.save()
return redirect('/')
else:
if cm:
form = ComiteForm(instance=cm[0])
else:
form = ComiteForm(instance=cm_usuario)
args['form'] = form
return render(request, "foe/forms/comite.html", args)
@login_required
def miembros_oe(request):
args = dict()
usuario = request.user
oe = get_object_or_404(OrganizacionEstudiantil, usuario=usuario)
pres_oe = Miembro(organizacion_estudiantil=oe,
cargo='presidente')
pres = Miembro.objects.filter(organizacion_estudiantil=oe,
cargo='presidente')
sec_oe = Miembro(organizacion_estudiantil=oe,
cargo='secretario')
sec = Miembro.objects.filter(
organizacion_estudiantil=oe,
cargo='secretario')
tes_oe = Miembro(
organizacion_estudiantil=oe,
cargo='tesorero')
tes = Miembro.objects.filter(
organizacion_estudiantil=oe,
cargo='tesorero')
redes_oe = Miembro(
organizacion_estudiantil=oe,
cargo='redes')
redes = Miembro.objects.filter(
organizacion_estudiantil=oe,
cargo='redes')
if request.method == 'POST':
if pres:
form_presidente = MiembroForm(
request.POST,
request.FILES,
instance=pres[0],
prefix='presidente')
else:
form_presidente = MiembroForm(
request.POST,
request.FILES,
instance=pres_oe,
prefix='presidente')
if sec:
form_secretario = MiembroForm(
request.POST,
request.FILES,
instance=sec[0],
prefix='secretario')
else:
form_secretario = MiembroForm(
request.POST,
request.FILES,
instance=sec_oe,
prefix='secretario')
if tes:
form_tesorero = MiembroForm(
request.POST,
request.FILES,
instance=tes[0],
prefix='tesorero')
else:
form_tesorero = MiembroForm(
request.POST,
request.FILES,
instance=tes_oe,
prefix='tesorero')
if redes:
form_redes = MiembroForm(
request.POST,
request.FILES,
instance=redes[0],
prefix='redes')
else:
form_redes = MiembroForm(
request.POST,
request.FILES,
instance=redes_oe,
prefix='redes')
if form_presidente.is_valid():
form_presidente.save()
if form_secretario.is_valid():
form_secretario.save()
if form_tesorero.is_valid():
form_tesorero.save()
if form_redes.is_valid():
form_redes.save()
return redirect('/')
else:
if pres:
form_presidente = MiembroForm(
instance=pres[0],
prefix='presidente')
else:
form_presidente = MiembroForm(
instance=pres_oe,
prefix='presidente')
if sec:
form_secretario = MiembroForm(
instance=sec[0],
prefix='secretario')
else:
form_secretario = MiembroForm(
instance=sec_oe,
prefix='secretario')
if tes:
form_tesorero = MiembroForm(
instance=tes[0],
prefix='tesorero')
else:
form_tesorero = MiembroForm(
instance=tes_oe,
prefix='tesorero')
if redes:
form_redes = MiembroForm(
instance=redes[0],
prefix='redes')
else:
form_redes = MiembroForm(
instance=redes_oe,
prefix='redes')
args['form_presidente'] = form_presidente
args['form_secretario'] = form_secretario
args['form_tesorero'] = form_tesorero
args['form_redes'] = form_redes
return render(request, "foe/forms/miembro.html", args)
@login_required
def datos_bancarios(request):
args = dict()
usuario = request.user
oe = get_object_or_404(OrganizacionEstudiantil, usuario=usuario)
m_oe = DatosBancarios(organizacion_estudiantil=oe)
m = DatosBancarios.objects.filter(organizacion_estudiantil=oe)
if request.method == 'POST':
print(usuario)
if m:
form = BancarioForm(request.POST, request.FILES, instance=m[0])
else:
form = BancarioForm(request.POST, request.FILES, instance=m_oe)
if form.is_valid():
form.save()
return redirect(reverse('registro_miembro'))
else:
if m:
form = BancarioForm(instance=m[0])
else:
form = BancarioForm(instance=m_oe)
args['form'] = form
return render(request, "foe/forms/datos-bancarios.html", args)
def directorio(request):
args = dict()
oes = OrganizacionEstudiantil.objects.all()
oes.order_by('clasificacion', 'nombre')
args['organizaciones'] = oes
return render(request, "foe/main/directorio.html", args)
def perfil_oe(request, oe_slug):
args = dict()
oe = get_object_or_404(
OrganizacionEstudiantil, slug=oe_slug)
args['oe'] = oe
args['logo_url'] = oe.logo._get_url()
args['plan_trabajo_url'] = oe.plan_trabajo._get_url()
args['presupuesto_url'] = oe.presupuesto._get_url()
return render(request, "foe/main/perfil.html", args)
``` |
{
"source": "JosmarSuarez/centermask2",
"score": 2
} |
#### File: modeling/centermask/proposal_utils.py
```python
import math
import torch
from detectron2.structures import Instances
def add_ground_truth_to_proposals(targets, proposals):
"""
Call `add_ground_truth_to_proposals_single_image` for all images.
Args:
targets(list[Instances]): list of N elements. Element i is a Boxes
representing the gound-truth for image i.
proposals (list[Instances]): list of N elements. Element i is a Instances
representing the proposals for image i.
Returns:
list[Instances]: list of N Instances. Each is the proposals for the image,
with field "proposal_boxes" and "objectness_logits".
"""
assert targets is not None
assert len(proposals) == len(targets)
if len(proposals) == 0:
return proposals
return [
add_ground_truth_to_proposals_single_image(tagets_i, proposals_i)
for tagets_i, proposals_i in zip(targets, proposals)
]
def add_ground_truth_to_proposals_single_image(targets_i, proposals):
"""
Augment `proposals` with ground-truth boxes from `gt_boxes`.
Args:
Same as `add_ground_truth_to_proposals`, but with targets and proposals
per image.
Returns:
Same as `add_ground_truth_to_proposals`, but for only one image.
"""
device = proposals.scores.device
proposals.proposal_boxes = proposals.pred_boxes
proposals.remove("pred_boxes")
# Concatenating gt_boxes with proposals requires them to have the same fields
# Assign all ground-truth boxes an objectness logit corresponding to P(object) \approx 1.
gt_logit_value = math.log((1.0 - 1e-10) / (1 - (1.0 - 1e-10)))
gt_logits = gt_logit_value * torch.ones(len(targets_i), device=device)
gt_proposal = Instances(proposals.image_size)
gt_proposal.proposal_boxes = targets_i.gt_boxes
# to have the same fields with proposals
gt_proposal.scores = gt_logits
gt_proposal.pred_classes = targets_i.gt_classes
gt_proposal.locations = torch.ones((len(targets_i), 2), device=device)
new_proposals = Instances.cat([proposals, gt_proposal])
return new_proposals
``` |
{
"source": "josmcg/Mask_RCNN",
"score": 3
} |
#### File: Mask_RCNN/training/train.py
```python
import mrcnn.model as modellib
from config import PageConfig, classes
import click
from dataset.dataset import PageDataset
from dataset.dataset import ICDAR_convert
def train(epochs, save_dir, data_dir, collapse=False):
collapse = bool(collapse)
# just in case
if data_dir[-1] != '/':
data_dir += '/'
data_train = PageDataset('train', data_dir, collapse)
data_train.load_page(classes=classes)
data_train.prepare()
data_val = PageDataset('val', data_dir, collapse)
data_val.load_page(classes=classes)
data_val.prepare()
config = PageConfig()
model = modellib.MaskRCNN(mode="training", config=config,
model_dir=save_dir)
try:
model_path = model.find_last()
print("reloading wieghts from {}".format(model_path))
except Exception:
model_path = model.get_imagenet_weights()
model.load_weights(model_path, by_name=True, exclude=['mrcnn_bbox_fc', 'mrcnn_class_logits', 'mrcnn_mask'])
model.train(data_train, data_val,
learning_rate=config.LEARNING_RATE,
epochs=epochs,
layers='heads')
@click.command()
@click.argument("epochs",type=click.INT)
@click.argument("save_dir")
@click.argument("data_dir")
@click.option("--collapse", default=False)
def main(*inputs,**kwargs):
train(*inputs,**kwargs)
if __name__ == "__main__":
main()
``` |
{
"source": "josmejia2401/angox-core-lib",
"score": 3
} |
#### File: angox-core-lib/utils/path.py
```python
from urllib.parse import urlparse
from config.config_dto import ConfigDTO
import random
class UtilidadPath(object):
def __init__(self, config: ConfigDTO = None):
self.config = config
def allows_path(self, original_path = None) -> str:
if not original_path:
return None, True
if original_path[0] != '/':
original_path = '/' + original_path
if len(self.config.routes.routes) > 0:
for route in self.config.routes.routes:
if not route.url or len(route.url) == 0:
continue
num_asterisks = str(route.path).count('*')
route_url = None
route_path = route.path
if len(route.url) > 0:
route_url = random.choice(route.url)
if num_asterisks == 0:
if original_path == route_path:
return f'{route_url}{original_path}', route.need_authentication
elif num_asterisks == 1:
new_path = route_path.replace('*', '')
new_path = original_path.replace(new_path, '')
num_asterisks_ = new_path.count('/')
if num_asterisks_ < 2:
return f'{route_url}{original_path}', route.need_authentication
else:
new_path = route_path.replace('*', '')
new_path = new_path.replace('//', '/')
if new_path in original_path:
return f'{route_url}{original_path}', route.need_authentication
raise Exception('La ruta no coincide (3)', original_path)
else:
raise Exception('No hay routes definidos')
``` |
{
"source": "josmejia2401/angox-media-api-speech-to-text",
"score": 2
} |
#### File: josmejia2401/angox-media-api-speech-to-text/handler.py
```python
from src.main.controllers.controller import Controller1
from src.main.utils.core import config
from src.core_lib.logger2 import Logger2
import jsonpickle
controller = Controller1(config)
def lambda_handler(event, context):
try:
controller.speechToText({"advanced": {}, "fileName": "aa.mp3", "voice": '' })
#Logger2.setLog('media', config.application.name, 'INFO', jsonpickle.encode(event))
return "ok"
except Exception as e:
print(e)
#Logger2.setLog('media', config.application.name, 'ERROR', str(e))
raise e
if __name__ == '__main__':
try:
lambda_handler({"name": "jose"}, None)
except Exception as e:
Logger2.setLog('media', config.application.name, 'ERROR', str(e))
``` |
{
"source": "josmejia2401/angox-orchestrator-server",
"score": 3
} |
#### File: core_lib/dto/dto.py
```python
import json
import datetime
class DTO(object):
def __init__(self, dataAsJson={}, ignored_none=False):
self.ignored_none = ignored_none
self.id = None
if dataAsJson is None:
return
if 'id' in dataAsJson and dataAsJson['id']:
self.id = dataAsJson['id']
def validate(self, validateItems=[]):
for item in validateItems:
if any(item == element for element in self.__dict__):
if item not in self.__dict__ or self.__dict__[item] is None:
raise Exception(item + ' not found (1)')
else:
raise Exception(item + ' not found (2)')
def default(self, o):
if isinstance(o, (datetime.date, datetime.datetime)):
return o.isoformat()
return json.JSONEncoder.default(self, o)
def to_json(self):
to_json_value = {}
for k in self.__dict__:
if k == 'ignored_none':
continue
if self.ignored_none == True and self.__dict__[k] is None:
continue
v = self.__dict__[k]
if v and isinstance(v, (datetime.date, datetime.datetime)):
v = v.isoformat()
to_json_value[k] = v
#return json.dumps(self.__dict__, sort_keys=True, indent=1, default=self.default)
return to_json_value
def get_json(self):
return {
'id': self.id,
'updateAt': self.updateAt.isoformat(),
'createAt': self.createAt.isoformat()
}
```
#### File: angox-orchestrator-server/core_lib/service.py
```python
import requests
from requests.exceptions import HTTPError
from core_lib.config.config_dto import ConfigDTO
class ServiceExternal(object):
def __init__(self, config:ConfigDTO = None):
self.__config = config
def get(self, url, params=None, is_json=True):
try:
response = requests.get(url, params=params)
if response and response.status_code > 299:
raise Exception(response.content)
if is_json == True:
return response.json()
return response.content
except HTTPError as e:
raise e
except Exception as e:
raise e
def post(self, url, params=None, payload=None, data=None, is_json=True):
try:
response = requests.post(url, data=data, json=payload, params=params)
if response and response.status_code > 299:
raise Exception(response.content)
if is_json == True:
return response.json()
return response.content
except HTTPError as e:
raise e
except Exception as e:
raise e
def put(self, url, params=None, payload=None, data=None, is_json=True):
try:
response = requests.put(url, data=data, json=payload, params=params)
if response and response.status_code > 299:
raise Exception(response.content)
if is_json == True:
return response.json()
return response.content
except HTTPError as e:
raise e
except Exception as e:
raise e
def delete(self, url, params=None, payload=None, data=None, is_json=True):
try:
response = requests.delete(url, data=data, json=payload, params=params)
if response and response.status_code > 299:
raise Exception(response.content)
if is_json == True:
return response.json()
return response.content
except HTTPError as e:
raise e
except Exception as e:
raise e
``` |
{
"source": "josmejia2401/angox-security-ms-account",
"score": 2
} |
#### File: main/controllers/controller.py
```python
from src.main.utils.core import ControllerDataBase
from src.main.utils.core import ConfigDTO
from src.main.dto.dto import AccountDTO
from src.main.controllers.token import TokenExternal
class Controller1(ControllerDataBase):
def __init__(self, config:ConfigDTO = None):
ControllerDataBase.__init__(self, config, 'sa_security_account')
self.token_external = TokenExternal(config)
def create(self, payload=None) -> any:
account_dto = AccountDTO(payload)
account_dto.validate(['fullName', 'username','password', 'email'])
response = self.get_service().create(account_dto.to_json())
if response:
del response['password']
return response
def update(self, id=None, payload=None, upsert=False) -> any:
account_dto = AccountDTO(payload, True, True)
account_dto.validate(['fullName', 'username','password', 'email'])
response = self.get_service().update(id, account_dto.to_json(), upsert)
if response:
del response['password']
return response
def delete(self, id=None) -> int:
raise NotImplementedError('the ID is not found')
def get(self, id=None) -> any:
if id is None or not id:
raise Exception('the ID is not found')
response = self.get_service().get(id)
if response:
del response['password']
return response
def sign_in(self, payload=None) -> any:
account_dto = AccountDTO(payload)
account_dto.validate(['username','password'])
response = self.get_service().get_one({'username': account_dto.username, 'password': account_dto.password})
print(1, response)
if response:
print(2)
del response['password']
print(3, response['id'])
token = self.token_external.create_token(response['id'])
print(4, token)
response['token'] = token['token']
print(5, response)
return response
``` |
{
"source": "josmejia2401/face_recognition_python",
"score": 2
} |
#### File: app/camera/alert.py
```python
import copy
import boto3
from app.dto.record import ConfigDTO, StreamDTO
from app.camera.q import Q
import time
class Alert(Q):
def __init__(self, config: ConfigDTO = None):
super().__init__()
self.__config = copy.deepcopy(config)
self.current_mov = 0
self.client = None
def process_item(self, item: StreamDTO = None) -> None:
try:
if item is None:
return
if item.is_mov == True:
self.current_mov += 1
if self.current_mov > self.__config.general.maxMovements and self.client:
phoneNumber = self.__config.alarm.indicator + self.__config.alarm.phoneNumber
self.client.publish(PhoneNumber=phoneNumber, Message=self.__config.alarm.defaultMessage)
self.current_mov = 0
except Exception as e:
print("Record_process_item", e)
def process_status(self)-> None:
try:
if self.__config.alarm.delay_status > 0:
time.sleep(self.__config.alarm.delay_status)
except: pass
def empty_queue_for_lock(self)-> None:
if self.__config.alert.delay > 0:
time.sleep(self.__config.alert.delay)
else:
self.apply_lock()
def initialize(self):
if self.__config.aws.accessKey and self.__config.aws.secretKey and self.__config.aws.regionName:
try:
self.client = boto3.client(self.__config.aws.client, aws_access_key_id=self.__config.aws.accessKey, aws_secret_access_key=self.__config.aws.secretKey, region_name=self.__config.aws.regionName)
self.run_queue()
except Exception as e:
print(e)
def stop(self) -> None:
self.stop_queue()
```
#### File: app/camera/process_frame.py
```python
from app.dto.record import FrameDTO, StreamDTO, ConfigDTO
from app.camera.frame import Frame
from app.camera.record import Record
from app.camera.alert import Alert
from app.camera.q import Q
import copy
import time
class ProcessFrame(Q):
def __init__(self, config: ConfigDTO = None):
super().__init__()
self.__config = copy.deepcopy(config)
self.__frame = Frame(self.__config)
self.__record = Record(self.__config)
self.__alert = Alert(self.__config)
def process_item(self, item: FrameDTO = None) -> None:
try:
if item is None:
return
is_mov = self.__frame.is_movement(item.frame1, item.frame2)
stream_dto = StreamDTO(item.source, item.frame2, self.__config.camera.cameraType, is_mov, item.image)
self.__record.put_item(stream_dto)
self.__alert.put_item(stream_dto)
except Exception as e:
print("ProcessFrame__process", e)
def process_status(self)-> None:
try:
if self.__config.general.delay_status > 0:
time.sleep(self.__config.general.delay_status)
except: pass
def empty_queue_for_lock(self)-> None:
if self.__config.general.delay > 0:
time.sleep(self.__config.general.delay)
else:
self.apply_lock()
def initialize(self):
self.__record.initialize()
self.__alert.initialize()
self.run_queue()
def stop(self) -> None:
self.stop_queue()
self.__record.stop()
self.__alert.stop()
```
#### File: app/dto/record.py
```python
class StreamDTO:
def __init__(self, source, frame, camera_type, is_mov, image):
self.source = source
self.frame = frame
self.camera_type = camera_type
self.is_mov = is_mov
self.image = image
class FrameDTO:
def __init__(self, source, frame1, frame2, image):
self.source = source
self.frame1 = frame1
self.frame2 = frame2
self.image = image
#config
class GeneralDTO:
def __init__(self, data_json = {}):
self.maxMovements = int(data_json["MAX_MOVEMENTS"])
self.onGuard = int(data_json["ON_GUARD"])
self.minAreaObject = int(data_json["MIN_AREA_OBJECT"])
self.delay = float(data_json["DELAY"])
self.delay_status = float(data_json["DELAY_STATUS"])
class AlarmDTO:
def __init__(self, data_json = {}):
self.phoneNumber = str(data_json["PHONE_NUMBER"])
self.indicator = str(data_json["INDICATOR"])
self.maxMessages = str(data_json["MAX_MESSAGES"])
self.defaultMessage = str(data_json["DEFAULT_MESSAGE"])
self.delay = float(data_json["DELAY"])
self.delay_status = float(data_json["DELAY_STATUS"])
class AwsDTO:
def __init__(self, data_json = {}):
self.accessKey = str(data_json["ACCESS_KEY"])
self.secretKey = str(data_json["SECRET_KEY"])
self.regionName = str(data_json["REGION_NAME"])
self.client = str(data_json["CLIENT"])
class CameraDTO:
def __init__(self, data_json = {}):
self.dimWidth = int(data_json["DIM_WIDTH"])
self.dimHeight = int(data_json["DIM_HEIGHT"])
self.cameraType = int(data_json["CAMERA_TYPE"])
self.applyResize = False
class RecordDTO:
def __init__(self, data_json = {}):
self.fps = float(data_json["FPS"])
self.maxTimeOutSeg = int(data_json["MAX_TIME_OUT_SEG"])
self.delay = float(data_json["DELAY"])
self.delay_status = float(data_json["DELAY_STATUS"])
class StreamingDTO:
def __init__(self, data_json = {}):
self.host = str(data_json["HOST"])
self.port = int(data_json["PORT"])
self.username = str(data_json["USERNAME"])
self.password = str(data_json["PASSWORD"])
self.delay = float(data_json["DELAY"])
self.delay_status = float(data_json["DELAY_STATUS"])
class ConfigDTO:
def __init__(self, data_json = {}):
self.general = GeneralDTO(data_json["GENERAL"])
self.alarm = AlarmDTO(data_json["ALARM"])
self.aws = AwsDTO(data_json["AWS"])
self.camera = CameraDTO(data_json["CAMERA"])
self.record = RecordDTO(data_json["RECORD"])
self.streaming = StreamingDTO(data_json["STREAMING"])
```
#### File: trash/camera copy 2/process_frame.py
```python
from dto.record import FrameDTO, StreamDTO, ConfigDTO
from camera.frame import Frame
from camera.record import Record
from camera.alert import Alert
from camera.streaming import Streaming
from camera.q import Q
import copy
import time
class ProcessFrame(Q):
def __init__(self, config: ConfigDTO = None):
super().__init__()
self.__config = copy.deepcopy(config)
self.__frame = Frame(self.__config)
self.__record = Record(self.__config)
self.__alert = Alert(self.__config)
self.__streaming = Streaming(self.__config, False)
def process_item(self, item: FrameDTO = None) -> None:
try:
if item is None:
return
is_mov = self.__frame.is_movement(item.frame1, item.frame2)
if self.__config.camera.defaultDim == True:
new_frame = self.__frame.resize(item.frame1)
else:
new_frame = item.frame1
_, image = self.__frame.frame_to_image(new_frame)
stream_dto = StreamDTO(item.source, new_frame, self.__config.camera.cameraType, is_mov, image)
self.__record.put_item(stream_dto)
self.__alert.put_item(stream_dto)
self.__streaming.put_item(stream_dto)
except Exception as e:
print("ProcessFrame__process", e)
def process_status(self)-> None:
try:
if self.__config.general.delay_status > 0:
time.sleep(self.__config.general.delay_status)
except: pass
def empty_queue_for_lock(self)-> None:
if self.__config.general.delay > 0:
time.sleep(self.__config.general.delay)
else:
self.apply_lock()
def initialize(self):
self.__record.initialize()
self.__alert.initialize()
self.__streaming.initialize()
self.run_queue()
def stop(self) -> None:
self.stop_queue()
self.__record.stop()
self.__alert.stop()
self.__streaming.stop()
```
#### File: trash/camera copy 2/streaming2.py
```python
from socketIO_client import SocketIO, LoggingNamespace
from dto.record import StreamDTO, ConfigDTO
from threading import Thread, RLock, Condition
import queue
import base64
import time
import copy
from socketIO_client import SocketIO, BaseNamespace
start_broadcasting = 0
class Namespace(BaseNamespace):
def message(self, data):
try:
print("type", type(data))
if isinstance(data, dict):
if "operation" in data and data["operation"] == 'broadcasting':
global start_broadcasting
start_broadcasting = int(data["message"])
print("message", data)
except Exception as e:
pass
def on_message(self, data):
try:
print("type", type(data))
if isinstance(data, dict):
if "operation" in data and data["operation"] == 'broadcasting':
global start_broadcasting
start_broadcasting = int(data["message"])
print("on_message", data)
except Exception as e:
pass
def on_handle_message(self, data):
try:
print("type", type(data))
if isinstance(data, dict):
if "operation" in data and data["operation"] == 'broadcasting':
global start_broadcasting
start_broadcasting = int(data["message"])
print("on_handle_message", data)
except Exception as e:
pass
def handle_message(self, data):
try:
print("type", type(data))
if isinstance(data, dict):
if "operation" in data and data["operation"] == 'broadcasting':
global start_broadcasting
start_broadcasting = int(data["message"])
print("handle_message", data)
except Exception as e:
pass
def on_connect(self):
print('[Connected]')
def on_reconnect(self):
print('[Reconnected]')
def on_disconnect(self):
print('[Disconnected]')
#https://pypi.org/project/socketIO-client/
class Streaming:
def __init__(self, config: ConfigDTO = None, wait_for_connection = True):
super().__init__()
self.__config = copy.deepcopy(config)
self.wait_for_connection = wait_for_connection
self.q = queue.Queue(maxsize=1000)
self.condition = Condition(RLock())
self.started = False
self.waiting = False
self.socketIO = None
def __build(self):
#while True:
data_auth = {'username': self.__config.streaming.username, 'password': self.__config.streaming.password}
self.socketIO = SocketIO(host=self.__config.streaming.host, port=self.__config.streaming.port, Namespace=Namespace, wait_for_connection=self.wait_for_connection, params=data_auth)
self.socketIO.wait(seconds=1)
def __check_socket(self):
if self.socketIO and self.socketIO.connected == False:
self.socketIO.connect()
def initialize(self):
try:
self.__build()
self.__check_socket()
except Exception as e:
print(e)
def __on_bbb_response(self, *args):
print('on_bbb_response', args)
def __process(self, item: StreamDTO = None) -> None:
try:
if item is None:
return
jpeg = item.image.tobytes()
jpeg = base64.b64encode(jpeg).decode('utf-8')
image = "data:image/jpeg;base64,{}".format(jpeg)
item = {'image': True, 'source': item.source, 'buff': image, 'username': self.__config.streaming.username}
self.__check_socket()
if self.socketIO:
self.socketIO.emit('handle_frame', item, callback=self.__on_bbb_response)
except Exception as e:
print(e)
def __worker(self):
while self.started == True:
if self.q.empty() == True:
with self.condition:
self.waiting = True
self.condition.wait()
self.waiting = False
else:
item = self.q.get()
self.__process(item)
self.q.task_done()
def put_nowait(self, item: StreamDTO = None) -> None:
global start_broadcasting
if start_broadcasting > 1:
self.q.put_nowait(item)
self.run()
if self.waiting == True:
with self.condition:
self.condition.notify_all()
def put(self, item: StreamDTO = None, block=True, timeout=None) -> None:
global start_broadcasting
if start_broadcasting > 1:
self.q.put(item, block, timeout)
self.run()
if self.waiting == True:
with self.condition:
self.condition.notify_all()
def run(self) -> None:
if self.started == True:
return
self.started = True
self.thr = Thread(target=self.__worker, daemon=True)
self.thr.start()
def join(self) -> None:
self.q.join()
def __unsuscriber(self):
if self.socketIO:
#self.socketIO.emit('on_unsubscriber', callback=self.__on_bbb_response)
self.socketIO.emit('unsubscriber', callback=self.__on_bbb_response)
self.socketIO.wait_for_callbacks(seconds=1)
self.socketIO.disconnect()
def stop(self):
self.started = False
self.__unsuscriber()
def __del__(self):
self.__config = None
self.q = None
self.started = None
self.socketIO = None
self.condition = None
self.waiting = None
self.wait_for_connection = None
```
#### File: trash/camera copy/camera_async.py
```python
import cv2
from threading import Thread, Lock
import time
class CameraAsync:
def __init__(self, src=0, name='0', width=None, height=None):
super().__init__()
self.name = name
self.src = src
self.width=width
self.height = height
#others
self.started = False
self.read_lock = Lock()
self.stream = None
self.grabbed1, self.frame1 = None, None
self.grabbed2, self.frame2 = None, None
def __put_stream__(self) -> None:
if self.stream and self.stream.isOpened():
return
self.stream = cv2.VideoCapture(self.src)
if self.width is None or self.height is None:
self.height = int(self.stream.get(3))
self.width = int(self.stream.get(4))
self.stream.set(3, self.height)
self.stream.set(4, self.width)
def initialize(self) -> None:
self.__put_stream__()
def get_started(self) -> bool:
return self.started
def get_dimentions(self):
return self.width, self.height
def start(self) -> None:
if self.started == True:
return
self.started = True
self.thread = Thread(target=self.__update, args=(), name=self.name, daemon=True)
self.thread.start()
def __update(self) -> None:
while self.started == True and self.stream.isOpened() == True:
(grabbed, frame) = self.stream.read()
self.read_lock.acquire()
if self.frame1 is None or self.frame2 is None:
self.grabbed1, self.frame1 = grabbed, frame
else:
self.grabbed1, self.frame1 = self.grabbed2, self.frame2
self.grabbed2, self.frame2 = grabbed, frame
self.read_lock.release()
def read(self) -> any:
try:
self.read_lock.acquire()
frame1 = self.frame1.copy()
grabbed1 = self.grabbed1
frame2 = self.frame2.copy()
grabbed2 = self.grabbed2
except Exception as e:
grabbed1, frame1, grabbed2, frame2 = False, None, False, None
finally:
self.read_lock.release()
return grabbed1, frame1, grabbed2, frame2
def stop(self) -> any:
try:
self.started = False
time.sleep(0.9)
self.thread.join()
self.release()
self.grabbed1, self.frame1 = None, None
self.grabbed2, self.frame2 = None, None
self.stream = None
except Exception as e:
print(e)
def release(self):
if self.stream:
self.stream.release()
@staticmethod
def list_devices():
index = 0
arr = []
cap = None
while True:
try:
cap = cv2.VideoCapture(index)
if not cap.read()[0]:
break
else:
arr.append(index)
cap.release()
index += 1
except Exception as e:
print(e)
cap.release()
return arr
def __exit__(self, exc_type, exc_value, traceback):
print("exit")
self.stop()
def __del__(self):
print("del")
self.stop()
```
#### File: face_recognition_python/trash/circle.py
```python
import cv2
import numpy as np
class Cyrcle:
def __init__(self):
super().__init__()
def get_detector_with_params_circle(self):
# Setup SimpleBlobDetector parameters.
params = cv2.SimpleBlobDetector_Params()
# Change thresholds
params.minThreshold = 0
params.maxThreshold = 255
# Set edge gradient
params.thresholdStep = 5
# Filter by Area.
params.filterByArea = True
params.minArea = 1000
# Set up the detector with default parameters.
detector = cv2.SimpleBlobDetector_create(params)
return detector
def _process_circle(self, detector, frame):
# Detect blobs.
keypoints = detector.detect(frame)
# Draw detected blobs as red circles.
# cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS ensures the size of the circle corresponds to the size of blob
im_with_keypoints = cv2.drawKeypoints(frame, keypoints, np.array([]), (0, 0, 255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
return im_with_keypoints
```
#### File: face_recognition_python/trash/live2.py
```python
import cv2
import numpy as np
def get_detector_with_params_circle():
# Setup SimpleBlobDetector parameters.
params = cv2.SimpleBlobDetector_Params()
# Change thresholds
params.minThreshold = 0
params.maxThreshold = 255
# Set edge gradient
params.thresholdStep = 5
# Filter by Area.
params.filterByArea = True
params.minArea = 1000
# Set up the detector with default parameters.
detector = cv2.SimpleBlobDetector_create(params)
return detector
def _process_circle(detector, frame):
# Detect blobs.
keypoints = detector.detect(frame)
# Draw detected blobs as red circles.
# cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS ensures the size of the circle corresponds to the size of blob
im_with_keypoints = cv2.drawKeypoints(frame, keypoints, np.array([]), (0, 0, 255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
return im_with_keypoints
def _show(name, item):
# Show keypoints circle
cv2.imshow(name, item)
def _process_frame(frame1, frame2):
# Difference between frame1(image) and frame2(image)
diff = cv2.absdiff(frame1, frame2)
# Converting color image to gray_scale image
gray = cv2.cvtColor(diff, cv2.COLOR_BGR2GRAY)
# Converting gray scale image to GaussianBlur, so that change can be find easily
blur = cv2.GaussianBlur(gray, (5, 5), 0)
# If pixel value is greater than 20, it is assigned white(255) otherwise black
_, thresh = cv2.threshold(blur, 20, 255, cv2.THRESH_BINARY)
dilated = cv2.dilate(thresh, None, iterations=4)
# finding contours of moving object
contours, hirarchy = cv2.findContours(dilated, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
return contours, hirarchy, thresh
def _draw_rectangle(frame, x, y, w, h, text):
cv2.rectangle(frame1, (x, y), (x+w, y+h), (0, 255, 255), 2)
cv2.putText(frame, text, (x+5, y-5), font, 1, (255, 255, 255), 2)
def _is_object(contour):
referenceArea = cv2.contourArea(contour)
if referenceArea < 10000:
return None
return referenceArea
# capturing video
cap = cv2.VideoCapture(0)
# cap.set(3, 640) # set video width
# cap.set(4, 480) # set video height
# reading back-to-back frames(images) from video
cont = 0
font = cv2.FONT_HERSHEY_SIMPLEX
REFERENCE_AREA = 36
detector = get_detector_with_params_circle()
ret, frame1 = cap.read()
ret, frame2 = cap.read()
while cap.isOpened():
contours, hirarchy, thresh = _process_frame(frame1, frame2)
# making rectangle around moving object
for contour in contours:
(x, y, w, h) = cv2.boundingRect(contour)
referenceArea = _is_object(contour)
if referenceArea is None:
continue
text = "area {} px2".format(referenceArea)
_draw_rectangle(frame1, x, y, w, h, text)
#im_with_keypoints = _process_circle(detector, frame2)
print("hay movimiento", cont)
cont += 1
#_show("circle", im_with_keypoints)
# Display original frame
_show('Motion Detector', frame1)
# Display Diffrenciate Frame - escala gris - muestra los cambios
#_show('Difference Frame', thresh)
# Displaying image in gray_scale
# cv2.imshow("Gray_Frame", gray)
# Assign frame2(image) to frame1(image)
frame1 = frame2
# Read new frame2
ret, frame2 = cap.read()
# Press 'esc' for quit
if cv2.waitKey(40) == 27:
break
# Release cap resource
cap.release()
# Destroy all windows
cv2.destroyAllWindows()
```
#### File: server/webserver/api.py
```python
from flask import Flask, request, Response, render_template
import server.webserver.controllers as c
app = Flask(__name__)
app.config['JSON_AS_ASCII'] = False
@app.route('/')
def index():
return render_template('index.html')
@app.route('/video-feed', methods=["GET", "POST"])
def video_feed():
return Response(c._video_feed(), mimetype='multipart/x-mixed-replace; boundary=frame')
@app.route('/video-stop', methods=["GET", "POST"])
def video_stop():
return c._video_stop()
@app.route('/video-reset', methods=["GET", "POST"])
def video_reset():
return c._video_reset()
@app.after_request
def after_request(response):
response.headers.add('Access-Control-Allow-Origin', '*')
response.headers.add('Access-Control-Allow-Headers', '*')
response.headers.add('Access-Control-Allow-Methods', 'GET,PUT,POST,DELETE,OPTIONS')
return response
```
#### File: server/webserver/controllers.py
```python
from app.camera.kernel import Kernel
_k = Kernel()
_k.initialize()
def _video_feed() -> any:
try:
global _k
if _k:
return _k.get_frame()
return None
except ValueError as e:
return str(e), 400
except Exception as e:
print(e)
return str(e), 500
def _video_stop() -> any:
try:
global _k
if _k:
_k.stop_streaming()
return "OK", 200
return None
except ValueError as e:
return str(e), 400
except Exception as e:
print(e)
return str(e), 500
def _video_reset() -> any:
try:
global _k
if _k:
_k.stop()
_k = Kernel()
_k.initialize()
return "OK", 200
return None
except ValueError as e:
return str(e), 400
except Exception as e:
print(e)
return str(e), 500
``` |
{
"source": "josmejia2401/temp-notes",
"score": 3
} |
#### File: temp-notes/mc-web/app.py
```python
from main.util.config import Config
from flask import Flask, render_template, send_from_directory
import os
#
app = Flask(__name__, static_folder="build", template_folder="build")
@app.route("/")
def hello():
return render_template('index.html')
@app.route('/', defaults={'path': ''})
@app.route('/<path:path>')
def serve(path):
if path.startswith('notes/'):
path = path.replace("notes/", "")
if path != "" and os.path.exists(app.static_folder + '/' + path):
return send_from_directory(app.static_folder, path)
else:
return render_template('index.html')
if __name__ == '__main__':
serverConfig = Config().get_object('server')
app.run(host=serverConfig['host'], port=serverConfig['port'], debug=serverConfig['debug'])
```
#### File: main/controllers/note_controller.py
```python
from main.services.note_service import NoteService
from main.db.entity.note import NoteEntity
class NoteController(object):
def __init__(self, config = {}):
self.__noteService = NoteService(config)
def insert(self, username=None, payload=None) -> any:
try:
note = NoteEntity(payload)
note.validate(['title', 'description'])
return self.__noteService.insert(username=username, payload=note.get_json())
except Exception as e:
raise e
def get(self, username=None, noteId=None) -> any:
try:
if not username or not noteId:
raise Exception('username or noteId not found')
result = self.__noteService.get(username, noteId)
return result
except Exception as e:
raise e
def get_all(self, username=None) -> any:
try:
if not username:
raise Exception('username not found')
return self.__noteService.get_all(username)
except Exception as e:
raise e
def update(self, username=None, noteId=None, payload=None) -> any:
try:
if not payload or not username or not noteId:
raise Exception('payload or username or noteId not found')
payload['username'] = username
note = NoteEntity(payload)
note.validate(['title', 'description'])
return self.__noteService.update(username, noteId, note.get_json())
except Exception as e:
raise e
def delete(self, username=None, noteId=None) -> any:
try:
if not username or not noteId:
raise Exception('username or noteId not found')
return self.__noteService.delete(username, noteId)
except Exception as e:
raise e
```
#### File: main/db/db.py
```python
import abc
from pymongo import MongoClient
from pymongo.errors import AutoReconnect
from main.util.db_util import retry
retry_auto_reconnect = retry(3, (AutoReconnect,))
class Database(abc.ABC):
def __init__(self):
pass
@abc.abstractmethod
def connect(self):
pass
@abc.abstractmethod
def disconnect(self):
pass
@abc.abstractmethod
def reconnect(self):
pass
@abc.abstractmethod
def get_db(self, name):
pass
# @Database.register
# https://api.mongodb.com/python/3.3.0/api/pymongo/mongo_client.html
class MongoDatabase(Database):
def __init__(self, config = {}):
super().__init__()
self.__config = config
self.__client = None
def connect(self):
try:
print('conecting to MongoDB!')
db_config = self.__config.get_object('db')
general_config = self.__config.get_object('general')
# variables
uri = db_config['uri2']
name = db_config['name']
maxPoolSize = None if db_config['maxPoolSize'] == 0 else db_config['maxPoolSize']
minPoolSize = 0 if db_config['minPoolSize'] == 0 else db_config['minPoolSize']
maxIdleTimeMS = None if db_config['maxIdleTimeMS'] == 0 else db_config['maxIdleTimeMS']
socketTimeoutMS = None if db_config['socketTimeoutMS'] == 0 else db_config['socketTimeoutMS']
connectTimeoutMS = None if db_config['connectTimeoutMS'] == 0 else db_config['connectTimeoutMS']
serverSelectionTimeoutMS = None if db_config['serverSelectionTimeoutMS'] == 0 else db_config['serverSelectionTimeoutMS']
appName = general_config['appName']
# conexion
self.__client = MongoClient(host=uri,
appname=appName,
maxPoolSize=maxPoolSize,#el número máximo permitido de conexiones simultáneas a cada servidor conectado. Las solicitudes a un servidor se bloquearán si hay conexiones pendientes de maxPoolSize al servidor solicitado. El valor predeterminado es 100. No puede ser 0.
minPoolSize=minPoolSize,#el número mínimo requerido de conexiones simultáneas que el grupo mantendrá en cada servidor conectado. El valor predeterminado es 0.
maxIdleTimeMS=maxIdleTimeMS,#a cantidad máxima de milisegundos que una conexión puede permanecer inactiva en el grupo antes de ser eliminada y reemplazada. El valor predeterminado es Ninguno (sin límite).
socketTimeoutMS=socketTimeoutMS,#Controla cuánto tiempo (en milisegundos) el controlador esperará una respuesta después de enviar una operación de base de datos normal (sin supervisión) antes de concluir que se ha producido un error de red. El valor predeterminado es Ninguno (sin tiempo de espera).
connectTimeoutMS=connectTimeoutMS,#Controla cuánto tiempo (en milisegundos) esperará el controlador durante la supervisión del servidor cuando se conecta un nuevo socket a un servidor antes de concluir que el servidor no está disponible. El valor predeterminado es 20000 (20 segundos).
serverSelectionTimeoutMS=serverSelectionTimeoutMS#Controla cuánto tiempo (en milisegundos) esperará el controlador para encontrar un servidor apropiado disponible para llevar a cabo una operación de base de datos; mientras está esperando, se pueden realizar múltiples operaciones de monitoreo del servidor, cada una controlada por connectTimeoutMS . El valor predeterminado es 30000 (30 segundos).
)
print('conected to MongoDB!')
except Exception as e:
print('MongoDatabase.connect', e)
raise e
def disconnect(self):
if self.__client:
self.__client.close()
@retry_auto_reconnect
def reconnect(self):
print('reconecting to MongoDB!')
db_config = self.__config.get_object('db')
general_config = self.__config.get_object('general')
# variables
uri = db_config['uri2']
name = db_config['name']
maxPoolSize = None if db_config['maxPoolSize'] == 0 else db_config['maxPoolSize']
minPoolSize = 0 if db_config['minPoolSize'] == 0 else db_config['minPoolSize']
maxIdleTimeMS = None if db_config['maxIdleTimeMS'] == 0 else db_config['maxIdleTimeMS']
socketTimeoutMS = None if db_config['socketTimeoutMS'] == 0 else db_config['socketTimeoutMS']
connectTimeoutMS = None if db_config['connectTimeoutMS'] == 0 else db_config['connectTimeoutMS']
serverSelectionTimeoutMS = None if db_config['serverSelectionTimeoutMS'] == 0 else db_config['serverSelectionTimeoutMS']
appName = general_config['appName']
# conexion
self.__client = MongoClient(host=uri,
appname=appName,
maxPoolSize=maxPoolSize,#el número máximo permitido de conexiones simultáneas a cada servidor conectado. Las solicitudes a un servidor se bloquearán si hay conexiones pendientes de maxPoolSize al servidor solicitado. El valor predeterminado es 100. No puede ser 0.
minPoolSize=minPoolSize,#el número mínimo requerido de conexiones simultáneas que el grupo mantendrá en cada servidor conectado. El valor predeterminado es 0.
maxIdleTimeMS=maxIdleTimeMS,#a cantidad máxima de milisegundos que una conexión puede permanecer inactiva en el grupo antes de ser eliminada y reemplazada. El valor predeterminado es Ninguno (sin límite).
socketTimeoutMS=socketTimeoutMS,#Controla cuánto tiempo (en milisegundos) el controlador esperará una respuesta después de enviar una operación de base de datos normal (sin supervisión) antes de concluir que se ha producido un error de red. El valor predeterminado es Ninguno (sin tiempo de espera).
connectTimeoutMS=connectTimeoutMS,#Controla cuánto tiempo (en milisegundos) esperará el controlador durante la supervisión del servidor cuando se conecta un nuevo socket a un servidor antes de concluir que el servidor no está disponible. El valor predeterminado es 20000 (20 segundos).
serverSelectionTimeoutMS=serverSelectionTimeoutMS#Controla cuánto tiempo (en milisegundos) esperará el controlador para encontrar un servidor apropiado disponible para llevar a cabo una operación de base de datos; mientras está esperando, se pueden realizar múltiples operaciones de monitoreo del servidor, cada una controlada por connectTimeoutMS . El valor predeterminado es 30000 (30 segundos).
)
print('reconected to MongoDB!')
def get_db(self, name: str = None):
if name and self.__client:
return self.__client[name]
elif self.__client:
db_config = self.__config.get_object('db')
name = db_config['name']
return self.__client[name]
else:
raise Exception('No se encuentra el cliente DB')
```
#### File: temp-notes/ms-reverse-proxy/app_requests.py
```python
from urllib.parse import urlparse, urljoin
from flask import Flask, render_template, request, Response, redirect
import requests
from main.util.config import Config
from main.log.log import logger, logging
app = Flask(__name__)
config = Config()
logging.getLogger('requests').setLevel(logging.ERROR)
logging.getLogger('urllib3').setLevel(logging.ERROR)
logging.getLogger('werkzeug').setLevel(logging.ERROR)
logging.getLogger('requests.packages.urllib3').setLevel(logging.ERROR)
@app.after_request
def after_request(response):
response.headers.add(['Access-Control-Allow-Origin'], '*')
response.headers.add("Access-Control-Allow-Origin", "*")
response.headers.add('Access-Control-Allow-Headers', "*")
response.headers.add('Access-Control-Allow-Methods', "*")
return response
@app.route('/<path:url>', methods=["GET", "POST", "PUT", "DELETE"])
def proxy(url):
try:
targetHost = __allows_path(url, request.method)
print('url - targetHost', url, targetHost)
if not targetHost:
return {}, 403
r = make_request(url, targetHost, request.method, dict(request.headers))
headers = dict(r.raw.headers)
def generate():
for chunk in r.raw.stream(decode_content=False):
yield chunk
out = Response(generate(), headers=headers)
out.status_code = r.status_code
return out
except Exception as e:
logger.error(e)
return 'Error interno', 500
def make_request(url, targetHost, method, headers={}):
if targetHost['ignorePath'] == True:
targetPath = __normalize_url('', targetHost['path'])
url = __normalize_url('', url)
url = url.replace(targetPath, '')
url = __normalize_url(targetHost['url'], url)
else:
url = __normalize_url(targetHost['url'], url)
logger.info('se ha definido false=>', url)
host = request.headers.get('host')
if host:
headers.update({ "Host" : "%s" % (targetHost['url'])})
if method.upper() == 'GET' or method.upper() == 'DELETE':
return requests.request(method, url, params=request.args, stream=True, headers=headers, allow_redirects=False)
if request.is_json and request.json and len(request.json) > 0:
return requests.request(method, url, params=request.args, stream=True, headers=headers, allow_redirects=False, json=request.json)
elif request.form and len(request.form) > 0:
return requests.request(method, url, params=request.args, stream=True, headers=headers, allow_redirects=False, data=request.form)
elif request.data and len(request.data) > 0:
return requests.request(method, url, params=request.args, stream=True, headers=headers, allow_redirects=False, data=request.data)
else:
return requests.request(method, url, params=request.args, stream=True, headers=headers, allow_redirects=False)
def __normalize_url(url, path):
if not url.endswith('/'):
url = url + '/'
if path.endswith('/'):
path = path[:-1]
if path.startswith('/'):
path = path[1:]
return urljoin(url, path)
def __allows_path(url, command):
if not url:
return None
targetHost = config.get_object('targetHost')
new_url = __normalize_url('localhost', url)
for target in targetHost:
target_url = __normalize_url('localhost', target['path'])
if new_url in target_url:
return target
elif target_url in new_url:
return target
return None
if __name__ == "__main__":
serverConfig = config.get_object('server')
app.run(host=serverConfig['host'], port=serverConfig['port'], debug=serverConfig['debug'])
```
#### File: main/util/config.py
```python
import json
from pathlib import Path
import os
class Config(object):
def __init__(self):
self.__config_json = None
self.__load_json()
def __load_json(self):
current_dir = Path(__file__).parent
file_path = os.path.join(current_dir, 'resources', 'config.json')
contBreak = 0
while os.path.exists(file_path) == False:
if contBreak > 3: break
current_dir = Path(current_dir).parent
file_path = os.path.join(current_dir, 'resources', 'config.json')
contBreak += 1
with open(file_path) as json_file:
self.__config_json = json.load(json_file)
def get_config(self) -> any:
return self.__config_json
def get_object(self, name: str) -> any:
return self.__config_json[name]
``` |
{
"source": "josmejia2401/xyz_voice",
"score": 3
} |
#### File: core/collections/activation.py
```python
import sys
import time
from datetime import datetime
from core.skill import AssistantSkill
class ActivationSkills(AssistantSkill):
@classmethod
def enable_assistant(cls, ext = None, template = None, values = None, history = []):
cls.set_activation(True)
r = template.format("Se activa el asistente")
cls.response(r)
@classmethod
def disable_assistant(cls, ext = None, template = None, values = None, history = []):
cls.set_activation(False)
r = template.format("Se desactiva el asistente")
cls.response(r)
@classmethod
def assistant_greeting(cls, ext = None, template = None, values = None, history = []) -> None:
try:
if cls.get_activation() == False:
return
r = ''
now = datetime.now()
day_time = int(now.strftime('%H'))
if day_time < 12:
r = template.format('Buenos días mi señor')
elif 12 <= day_time < 18:
r = template.format("Buenas tardes mi señor")
else:
r = template.format('Buenas noches mi señor')
cls.response(r)
except Exception as e:
print("ActivationSkills.assistant_greeting", e)
r = template.format("No se pudo procesar el comando")
cls.response(r)
```
#### File: core/collections/alarm.py
```python
import re
import time
import datetime
from utils.text_number import to_number
from threading import Thread
from apscheduler.schedulers.background import BackgroundScheduler
#from apscheduler.schedulers.blocking import BlockingScheduler
from apscheduler.triggers.cron import CronTrigger
from apscheduler.triggers.combining import OrTrigger
import uuid
from core.skill import AssistantSkill
time_intervals = {
'segundos': {'variations': ['segundos', 'segundo'],
'scheduler_interval': 'seconds'
},
'minutos': {'variations': ['minutos', 'minuto'],
'scheduler_interval': 'minutes'
},
'horas': {'variations': ['hora', 'horas'],
'scheduler_interval': 'hours'
},
'mes': {'variations': ['mes', 'meses'],
'scheduler_interval': 'months'
},
'año': {'variations': ['año', 'años'],
'scheduler_interval': 'years'
},
'lunes': {'variations': ['lunes', 'lune'],
'scheduler_interval': 'mon'
},
'martes': {'variations': ['martes', 'marte'],
'scheduler_interval': 'tue'
},
'miercoles': {'variations': ['miercoles', 'miercole'],
'scheduler_interval': 'wed'
},
'jueves': {'variations': ['jueves', 'jueve'],
'scheduler_interval': 'thu'
},
'viernes': {'variations': ['viernes', 'vierne'],
'scheduler_interval': 'fri'
},
'sabado': {'variations': ['sabado', 'sabados'],
'scheduler_interval': 'sat'
},
'domingo': {'variations': ['domingo', 'domingos'],
'scheduler_interval': 'sun'
},
}
def get_id():
uuid_x = str(uuid.uuid1())
return uuid_x
class AlarmSkills(AssistantSkill):
scheduler = BackgroundScheduler()
alarm_pending = []
@classmethod
def stop_all(cls, ext=None, template=None, values=None, history=[]):
try:
if cls.get_activation() == False:
return
cls.alarm_pending = []
cls.scheduler.remove_all_jobs()
cls.scheduler.shutdown()
r = template.format("Se detienen todas las alarmas")
cls.response(r)
except Exception as e:
r = template.format("No se pudo detener todas las alarmas")
cls.response(r)
@classmethod
def list_all(cls, ext=None, template=None, values=None, history=[]):
try:
if cls.get_activation() == False:
return
if cls.alarm_pending:
cls.response("Las alarmas son:")
for alarm in cls.alarm_pending:
day_of_week = alarm["day_of_week"]
hour = alarm["hour"]
minute = alarm["minute"]
r = "semana: {} , hora: {} , minuto: {}".format(day_of_week, hour, minute)
cls.response(r)
r = template.format("Se listaron todas las alarmas")
cls.response(r)
else:
r = template.format("No hay alarmas")
cls.response(r)
except Exception as e:
r = template.format("No se pudo listar las alarmas")
cls.response(r)
@classmethod
def _alarm_minutes(cls, idx, duration):
cls.scheduler.remove_job(job_id=idx)
for p in cls.alarm_pending:
if p["id"] == idx:
cls.alarm_pending.remove(p)
break
cls.response("<NAME>")
@classmethod
def create_alarm_time_minutes(cls, ext=None, template=None, values=None, history=[]):
try:
if cls.get_activation() == False:
return
#if isinstance(values, tuple):
values_x = values[0]
if isinstance(values_x, list):
duration = values_x[0]
else:
duration = values_x
duration = cls._replace_words_with_numbers(duration)
if duration is not None:
idx = get_id()
scheduler_interval = 'minutes'
interval = {scheduler_interval: int(duration)}
job = cls.scheduler.add_job(cls._alarm_minutes, 'interval', **interval, id=idx, args=[idx, duration])
cls.alarm_pending.append({"id": idx, "job": job, "day_of_week": None, "hour": None, "minute": duration})
if not cls.scheduler.running:
cls.scheduler.start()
r = template.format("He creado una alarma en {0} minutos".format(duration))
cls.response(r)
except Exception as e:
print(e)
r = template.format("No se pudo crear la alarma")
cls.response(r)
@classmethod
def _alarm_hours(cls, idx, duration):
cls.scheduler.remove_job(job_id=idx)
for p in cls.alarm_pending:
if p["id"] == idx:
cls.alarm_pending.remove(p)
break
cls.response("SONANDO ALARMA")
@classmethod
def create_alarm_time_hours(cls, ext=None, template=None, values=None, history=[]):
try:
if cls.get_activation() == False:
return
values_x = values[0]
if isinstance(values_x, list):
duration = values_x[0]
else:
duration = values_x
duration = cls._replace_words_with_numbers(duration)
if duration is not None:
idx = get_id()
scheduler_interval = 'hours'
interval = {scheduler_interval: int(duration)}
job = cls.scheduler.add_job(cls._alarm_hours, 'interval', **interval, id=idx, args=[idx, duration])
cls.alarm_pending.append({"id": idx, "job": job, "day_of_week": None, "hour": duration, "minute": None})
if not cls.scheduler.running:
cls.scheduler.start()
r = template.format("He creado una alarma en {0} horas".format(duration))
cls.response(r)
except Exception as e:
print(e)
r = template.format("No se pudo crear la alarma")
cls.response(r)
@classmethod
def _alarm_range_am(cls, idx):
cls.scheduler.remove_job(job_id=idx)
for p in cls.alarm_pending:
if p["id"] == idx:
cls.alarm_pending.remove(p)
break
cls.response("<NAME>")
@classmethod
def create_alarm_range_time_week_am(cls, ext=None, template=None, values=None, history=[]):
try:
if cls.get_activation() == False:
return
#hora, minuto, rango ini, rango fin
values_x = values[0]
if isinstance(values_x, list):
duration_h = values_x[0]
duration_m = values_x[1]
range_week_start = values_x[2]
range_week_end = values_x[3]
else:
duration_h = values_x
duration_m = values[1]
range_week_start = values[2]
range_week_end = values[3]
duration_h = cls._replace_words_with_numbers(duration_h)
duration_m = cls._replace_words_with_numbers(duration_m)
for time_interval in time_intervals.values():
for variation in time_interval['variations']:
if variation in range_week_start:
range_week_start = time_interval['scheduler_interval']
break
for time_interval in time_intervals.values():
for variation in time_interval['variations']:
if variation in range_week_end:
range_week_end = time_interval['scheduler_interval']
break
if duration_h is not None and duration_m is not None:
idx = get_id()
m2 = str(duration_h) + ":" + str(duration_m) + " AM"
m2 = cls._time_conversion(m2)
m2 = m2.split(":")
duration_h = m2[0].strip()
duration_m = m2[1].strip()
day_of_week = range_week_start + "-" + range_week_end
cron1 = CronTrigger(day_of_week=day_of_week, hour=duration_h, minute=duration_m, timezone='America/Bogota')
trigger = OrTrigger([cron1])
job = cls.scheduler.add_job(cls._alarm_range_am, trigger, id=idx, args=[idx])
cls.alarm_pending.append({"id": idx, "job": job, "day_of_week": day_of_week, "hour": duration_h, "minute": duration_m})
if not cls.scheduler.running:
cls.scheduler.start()
r = template.format("He creado la alarma {0} {1} {2} am".format(day_of_week, duration_h, duration_m))
cls.response(r)
except Exception as e:
print(e)
r = template.format("No se pudo crear la alarma")
cls.response(r)
@classmethod
def create_alarm_range_week_time_am(cls, ext=None, template=None, values=None, history=[]):
try:
if cls.get_activation() == False:
return
#hora, minuto, rango ini, rango fin
values_x = values[0]
if isinstance(values_x, list):
range_week_start = values_x[0]
range_week_end = values_x[1]
duration_h = values_x[2]
duration_m = values_x[3]
elif isinstance(values_x, tuple):
range_week_start = values_x[0]
range_week_end = values_x[1]
duration_h = values_x[2]
duration_m = values_x[3]
else:
range_week_start = values[0]
range_week_end = values[1]
duration_h = values[2]
duration_m = values[3]
duration_h = cls._replace_words_with_numbers(duration_h)
duration_m = cls._replace_words_with_numbers(duration_m)
for time_interval in time_intervals.values():
for variation in time_interval['variations']:
if variation in range_week_start:
range_week_start = time_interval['scheduler_interval']
break
for time_interval in time_intervals.values():
for variation in time_interval['variations']:
if variation in range_week_end:
range_week_end = time_interval['scheduler_interval']
break
if duration_h is not None and duration_m is not None:
idx = get_id()
m2 = str(duration_h) + ":" + str(duration_m) + " AM"
m2 = cls._time_conversion(m2)
m2 = m2.split(":")
duration_h = m2[0].strip()
duration_m = m2[1].strip()
day_of_week = range_week_start + "-" + range_week_end
cron1 = CronTrigger(day_of_week=day_of_week, hour=duration_h, minute=duration_m, timezone='America/Bogota')
trigger = OrTrigger([cron1])
job = cls.scheduler.add_job(cls._alarm_range_am, trigger, id=idx, args=[idx])
cls.alarm_pending.append({"id": idx, "job": job, "day_of_week": day_of_week, "hour": duration_h, "minute": duration_m})
if not cls.scheduler.running:
cls.scheduler.start()
r = template.format("He creado la alarma {0} {1} {2} am".format(day_of_week, duration_h, duration_m))
cls.response(r)
except Exception as e:
print(e)
r = template.format("No se pudo crear la alarma")
cls.response(r)
@classmethod
def _alarm_range_pm(cls, idx):
cls.scheduler.remove_job(job_id=idx)
for p in cls.alarm_pending:
if p["id"] == idx:
cls.alarm_pending.remove(p)
break
cls.response("SON<NAME>")
@classmethod
def create_alarm_range_time_week_pm(cls, ext=None, template=None, values=None, history=[]):
try:
if cls.get_activation() == False:
return
#hora, minuto, rango ini, rango fin
values_x = values[0]
if isinstance(values_x, list):
duration_h = values_x[0]
duration_m = values_x[1]
range_week_start = values_x[2]
range_week_end = values_x[3]
elif isinstance(values_x, tuple):
duration_h = values_x[0]
duration_m = values_x[1]
range_week_start = values_x[2]
range_week_end = values_x[3]
else:
duration_h = values[0]
duration_m = values[1]
range_week_start = values[2]
range_week_end = values[3]
duration_h = cls._replace_words_with_numbers(duration_h)
duration_m = cls._replace_words_with_numbers(duration_m)
for time_interval in time_intervals.values():
for variation in time_interval['variations']:
if variation in range_week_start:
range_week_start = time_interval['scheduler_interval']
break
for time_interval in time_intervals.values():
for variation in time_interval['variations']:
if variation in range_week_end:
range_week_end = time_interval['scheduler_interval']
break
if duration_h is not None and duration_m is not None:
idx = get_id()
m2 = str(duration_h) + ":" + str(duration_m) + " PM"
m2 = cls._time_conversion(m2)
m2 = m2.split(":")
duration_h = m2[0].strip()
duration_m = m2[1].strip()
day_of_week = range_week_start + "-" + range_week_end
cron1 = CronTrigger(day_of_week=day_of_week, hour=duration_h, minute=duration_m, timezone='America/Bogota')
trigger = OrTrigger([cron1])
job = cls.scheduler.add_job(cls._alarm_range_pm, trigger, id=idx, args=[idx])
cls.alarm_pending.append({"id": idx, "job": job, "day_of_week": day_of_week, "hour": duration_h, "minute": duration_m})
if not cls.scheduler.running:
cls.scheduler.start()
r = template.format("He creado la alarma {0} {1} {2} pm".format(day_of_week, duration_h, duration_m))
cls.response(r)
except Exception as e:
print(e)
r = template.format("No se pudo crear la alarma")
cls.response(r)
@classmethod
def create_alarm_range_week_time_pm(cls, ext=None, template=None, values=None, history=[]):
try:
if cls.get_activation() == False:
return
#hora, minuto, rango ini, rango fin
values_x = values[0]
if isinstance(values_x, list):
range_week_start = values_x[0]
range_week_end = values_x[1]
duration_h = values_x[2]
duration_m = values_x[3]
elif isinstance(values_x, tuple):
range_week_start = values_x[0]
range_week_end = values_x[1]
duration_h = values_x[2]
duration_m = values_x[3]
else:
range_week_start = values[0]
range_week_end = values[1]
duration_h = values[2]
duration_m = values[3]
duration_h = cls._replace_words_with_numbers(duration_h)
duration_m = cls._replace_words_with_numbers(duration_m)
for time_interval in time_intervals.values():
for variation in time_interval['variations']:
if variation in range_week_start:
range_week_start = time_interval['scheduler_interval']
break
for time_interval in time_intervals.values():
for variation in time_interval['variations']:
if variation in range_week_end:
range_week_end = time_interval['scheduler_interval']
break
if duration_h is not None and duration_m is not None:
idx = get_id()
m2 = str(duration_h) + ":" + str(duration_m) + " PM"
m2 = cls._time_conversion(m2)
m2 = m2.split(":")
duration_h = m2[0].strip()
duration_m = m2[1].strip()
day_of_week = range_week_start + "-" + range_week_end
cron1 = CronTrigger(day_of_week=day_of_week, hour=duration_h, minute=duration_m, timezone='America/Bogota')
trigger = OrTrigger([cron1])
job = cls.scheduler.add_job(cls._alarm_range_pm, trigger, id=idx, args=[idx])
cls.alarm_pending.append({"id": idx, "job": job, "day_of_week": day_of_week, "hour": duration_h, "minute": duration_m})
if not cls.scheduler.running:
cls.scheduler.start()
r = template.format("He creado la alarma {0} {1} {2} pm".format(day_of_week, duration_h, duration_m))
cls.response(r)
except Exception as e:
print(e)
r = template.format("No se pudo crear la alarma")
cls.response(r)
@classmethod
def _replace_words_with_numbers(cls, transcript):
transcript_with_numbers = ''
for word in transcript.split():
try:
number = to_number(word)
print(number)
transcript_with_numbers += ' ' + str(number)
except ValueError as e:
print(e)
transcript_with_numbers += ' ' + word
return transcript_with_numbers
@classmethod
def _time_conversion(cls, s):
if "PM" in s:
s=s.replace("PM"," ")
t= s.split(":")
if t[0] != '12':
t[0]=str(int(t[0])+12)
s= (":").join(t)
return s
else:
s = s.replace("AM"," ")
t= s.split(":")
if t[0] == '12':
t[0]='00'
s= (":").join(t)
return s
```
#### File: core/collections/internet.py
```python
from core.skill import AssistantSkill
import urllib.request as urllib2
class InternetSkills(AssistantSkill):
@classmethod
def _check_internet_connection(cls,**kwargs):
try:
urllib2.urlopen('http://www.google.com', timeout=1)
return True
except urllib2.URLError as err:
print(err)
return False
@classmethod
def internet_availability(cls, ext = None, template = None, values = None, history = []):
if cls.get_activation() == False:
return
if cls._check_internet_connection():
r = template.format("Hay conexión a internet")
cls.response(r)
else:
r = template.format("En este momento no hay conexión a internet")
cls.response(r)
```
#### File: core/collections/weather.py
```python
import requests
from utils.settings import WEATHER_API
from core.skill import AssistantSkill
from core.collections.geolocation import GeoLocationSkills
class WeatherSkills(AssistantSkill):
@classmethod
def tell_the_weather(cls, ext = None, template = None, values = None, history = []):
try:
if cls.get_activation() == False:
return
if WEATHER_API['KEY']:
city = cls._get_city()
if city:
temperature, temperature_min, temperature_max = cls._get_weather_status_and_temperature(city)
if temperature and temperature_min and temperature_max:
r = template.format("La temperatura actual para %s es %0.1f centigrados, temperatura mínima de %0.1f centigrados y temperatura máxima de %0.1f centigrados" % (city, temperature, temperature_min, temperature_max))
else:
r = template.format("Lo siento, en este momento no hay datos del tiempo.")
else:
r = template.format("Lo siento, en este momento no hay datos de tu localización.")
else:
r = template.format("Por favor define la llave de open weather map.")
cls.response(r)
except Exception as e:
r = template.format("En este momento no pude obtener datos del tiempo.")
cls.response(r)
@classmethod
def tell_the_weather_city(cls, ext = None, template = None, values = None, history = []):
try:
if cls.get_activation() == False:
return
if WEATHER_API['KEY']:
city = values[0]
if isinstance(city, list):
city = city[0]
elif isinstance(city, tuple):
city = city[0]
else:
city = city
if city:
temperature, temperature_min, temperature_max = cls._get_weather_status_and_temperature(city)
if temperature and temperature_min and temperature_max:
r = template.format("La temperatura actual para %s es %0.1f centigrados, temperatura mínima de %0.1f centigrados y temperatura máxima de %0.1f centigrados" % (city, temperature, temperature_min, temperature_max))
else:
r = template.format("Lo siento, en este momento no hay datos del tiempo.")
else:
r = template.format("Lo siento, en este momento no hay datos de tu localización.")
else:
r = template.format("Por favor define la llave de open weather map.")
cls.response(r)
except Exception as e:
r = template.format("En este momento no pude obtener datos del tiempo.")
cls.response(r)
"""
{
"coord":{
"lon":-74.08,
"lat":4.61
},
"weather":[
{
"id":803,
"main":"Clouds",
"description":"nubes rotas",
"icon":"04d"
}
],
"base":"stations",
"main":{
"temp":18,
"feels_like":12.72,
"temp_min":18,
"temp_max":18,
"pressure":1029,
"humidity":45
},
"visibility":10000,
"wind":{
"speed":6.2,
"deg":130
},
"clouds":{
"all":75
},
"dt":1601057296,
"sys":{
"type":1,
"id":8582,
"country":"CO",
"sunrise":1601030689,
"sunset":1601074246
},
"timezone":-18000,
"id":3688689,
"name":"Bogotá",
"cod":200
}
"""
@classmethod
def _get_weather_status_and_temperature(cls, city):
url = WEATHER_API["URL"].format(city, WEATHER_API["UNITS"], WEATHER_API["LANG"], WEATHER_API['KEY'])
r = requests.get(url=url)
if r.status_code == 200:
rx = r.json()
if rx["main"]:
return rx["main"]["temp"], rx["main"]["temp_min"], rx["main"]["temp_max"]
return None, None, None
@classmethod
def _get_city(cls):
return GeoLocationSkills._get_location()
```
#### File: xyz_voice/core/skill.py
```python
from engines.tts import TTSEngine
from engines.stt import STTEngine
import locale
#locale.setlocale(locale.LC_TIME, 'es_ES.UTF-8')
locale.setlocale(locale.LC_TIME, 'es_CO.utf8')
class AssistantSkill:
"""
This class is the parent of all skill classes.
"""
activation = True
@classmethod
def set_activation(cls, activation = True):
AssistantSkill.activation = activation
@classmethod
def get_activation(cls):
return AssistantSkill.activation
@classmethod
def set_stop_speaking(cls, stop_speaking = True):
TTSEngine.set_stop_speaking(stop_speaking)
if stop_speaking == True:
TTSEngine.stop_audio()
TTSEngine.stop_song()
@classmethod
def get_stop_speaking(cls):
return TTSEngine.stop_speaking
@classmethod
def response(cls, text):
"""
The mode of the response depends on the output engine:
- TTT Engine: The response is only in text
- TTS Engine: The response is in voice and text
"""
TTSEngine.play_text(text)
@classmethod
def play_sound(cls, filename, asyncx = False):
"""
The mode of the response depends on the output engine:
- TTT Engine: The response is only in text
- TTS Engine: The response is in voice and text
"""
TTSEngine.play_sound(filename, asyncx)
@classmethod
def user_input(cls):
response = STTEngine._recognize_speech_from_mic(already_activated=True)
return response
@classmethod
def new_history(cls, inputx, outputx):
out = {
"intput" : inputx,
"output": outputx
}
return new_history
@classmethod
def extract_tags(cls, voice_transcript, tags):
"""
This method identifies the tags from the user transcript for a specific skill.
e.x
Let's that the user says "hi jarvis!".
The skill analyzer will match it with enable_assistant skill which has tags 'hi, hello ..'
This method will identify the that the enabled word was the 'hi' not the hello.
:param voice_transcript: string
:param tags: string
:return: set
"""
try:
transcript_words = voice_transcript.split()
tags = tags.split(',')
return set(transcript_words).intersection(tags)
except Exception as e:
return set()
``` |
{
"source": "josmet52/amod",
"score": 3
} |
#### File: amod/lib/amod_interrupt_lib.py
```python
import time
import RPi.GPIO as GPIO
import math
import numpy as np
import scipy.stats as stat
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import matplotlib.patches as mpatches
import pdb
class Amod:
def __init__(self, from_who = ""):
# version infos
VERSION_NO = "0.01.01"
VERSION_DATE = "27.11.2020"
VERSION_DESCRIPTION = "prototype"
VERSION_STATUS = "initial version"
VERSION_AUTEUR = "josmet"
self.pin_cmd = 38 # control pin
self.pin_mes = 36 # measure pin
GPIO.setmode(GPIO.BOARD)
GPIO.setwarnings(False)
GPIO.setup(self.pin_cmd, GPIO.OUT) # initialize control pin
GPIO.setup(self.pin_mes, GPIO.IN) # initialize measure pi (attention no pull-up or pull-down)
GPIO.add_event_detect(self.pin_mes, GPIO.RISING, callback=self.end_charge_reached)
GPIO.output(self.pin_cmd, GPIO.HIGH)
self.t_discharge = 250e-6 # time to discharge the capacitor
self.t_charge_stop = 0.0
self.t_charge_start = 0.0
self.stop_requierd = False
self.rep_int_time = 5.15e-3
self.v_timeout = 1
self.v_tol = 2.5 / 100 # 2.5 %
self.filter = 1.5 # +/- n ecart types gardés
if from_who != "calibration": # if not in calibration read the ini data
with open('amod.ini', 'r') as ini_file:
data = ini_file.readlines()
params = data[0].split(",")
self.u_in_trig = float(params[0]) # the input trigger level (depend on the harware)
self.R1 = float(params[1]) # value of the resistor
self.C1 = float(params[2]) # value of the capacitor
self.rep_int_time = float(params[3]) # interrupt respons time
def get_tension(self, n_moyenne, show_histogram = False):
GPIO.output(self.pin_cmd, GPIO.HIGH) # décharger le condensateur
j = 0
l_elapsed = []
while j < n_moyenne:
time.sleep(self.t_discharge) # laisser du temps pour décharger le condo
self.stop_requierd = False
GPIO.output(self.pin_cmd, GPIO.LOW) # déclancher la mesure
self.t_charge_start = time.time() # déclancher le chrono
#TODO: voir s'il ne faut pas inverser les deux opérations ci-dessus
while not self.stop_requierd:
if time.time() - self.t_charge_start > self.v_timeout:
stop_requierd = True
print("interruption manquée")
elapsed = (self.t_charge_stop - self.t_charge_start) - self.rep_int_time
l_elapsed.append(elapsed)
GPIO.output(self.pin_cmd, GPIO.HIGH) # déclancher la décharge du condensateur
j += 1
GPIO.output(self.pin_cmd, GPIO.LOW) # déclancher la décharge du condensateur
# get stats of data list
nx, mmx, mx, vx, skx, ktx = stat.describe(l_elapsed)
# filter the data list
df = pd.DataFrame(l_elapsed, columns=list('B'))
l_ref_filtered = df[((df.B - df.B.mean()) / df.B.std()).abs() < self.filter]
l_ref_filtered_mean = l_ref_filtered.B.mean()
# create ans show histogramm
if show_histogram:
l_tension = []
for v in l_elapsed:
l_tension.append(self.u_in_trig / (1 - math.exp(- v / (self.R1 * self.C1))))
df1 = pd.DataFrame(l_tension, columns=list('B'))
l_tension_filtered = df1[((df1.B - df1.B.mean()) / df1.B.std()).abs() < self.filter]
l_tension_filtered_mean = l_tension_filtered.B.mean()
# plot histogramm
n, bins, patches = plt.hist(x=l_tension, bins=min(int(n_moyenne/4),50), color='#0504aa', alpha=0.7, rwidth=0.85)
plt.hist(x=l_tension_filtered, bins=bins, color='#ffff00', alpha=0.7, rwidth=0.85)
plt.grid(axis='y', alpha=0.75)
plt.xlabel('Avg = ' + '{:.3f}'.format(l_tension_filtered_mean))
plt.ylabel('Frequency')
plt.title("Filtered on " + str(self.filter) + " standard deviation")
plt.text(23, 45, r'$\mu=15, b=3$')
maxfreq = n.max()
# Set a clean upper y-axis limit.
plt.ylim(ymax=np.ceil(maxfreq/10) *10 if maxfreq % 10 else maxfreq + 10)
# insert a legend
blue_patch = mpatches.Patch(color='#0504aa', label='excluded')
yellow_patch = mpatches.Patch(color='#ffff00', label='used for avg')
plt.legend(handles=[blue_patch, yellow_patch])
plt.show()
u_average = self.u_in_trig / (1 - math.exp(- l_ref_filtered_mean / (self.R1 * self.C1)))
return u_average
def end_charge_reached(self, channel):
self.t_charge_stop = time.time()
self.stop_requierd = True
def set_param(self, u_in, xR1, xC1, n_moyenne, int_resp_time):
GPIO.output(self.pin_cmd, GPIO.HIGH) # décharger le condensateur
j = 0
l_elapsed = []
while j < n_moyenne:
time.sleep(self.t_discharge) # laisser du temps pour décharger le condo
self.stop_requierd = False
GPIO.output(self.pin_cmd, GPIO.LOW) # déclancher la mesure
self.t_charge_start = time.time() # déclancher le chrono
#TODO: voir s'il ne faut pas inverser les deux opérations ci-dessus
while not self.stop_requierd:
if time.time() - self.t_charge_start > self.v_timeout:
stop_requierd = True
print("interruption manquée")
elapsed = (self.t_charge_stop - self.t_charge_start) - self.rep_int_time
l_elapsed.append(elapsed)
GPIO.output(self.pin_cmd, GPIO.HIGH) # déclancher la décharge du condensateur
j += 1
GPIO.output(self.pin_cmd, GPIO.LOW) # déclancher la décharge du condensateur
# get stats of data list
nx, mmx, mx, vx, skx, ktx = stat.describe(l_elapsed)
# filter the data list
df = pd.DataFrame(l_elapsed, columns=list('B'))
l_ref_filtered = df[((df.B - df.B.mean()) / df.B.std()).abs() < self.filter]
l_ref_filtered_mean = l_ref_filtered.B.mean()
u_trig_calc = u_in * (1 - math.exp(-l_ref_filtered_mean / (xR1 * xC1)))
with open('amod.ini', 'w') as ini_file:
ini_file.writelines(str(u_trig_calc) + "," + str(xR1) + "," + str(xC1) + "," + str(int_resp_time))
return u_trig_calc
if __name__ == '__main__':
#verify tension and filtering
amod = Amod()
a = amod.get_tension(50, show_histogram = True)
GPIO.cleanup()
``` |
{
"source": "josmfred/fastMRI",
"score": 2
} |
#### File: fastmri/data/transforms.py
```python
import numpy as np
import torch
def to_tensor(data):
"""
Convert numpy array to PyTorch tensor.
For complex arrays, the real and imaginary parts are stacked along the last
dimension.
Args:
data (np.array): Input numpy array.
Returns:
torch.Tensor: PyTorch version of data.
"""
if np.iscomplexobj(data):
data = np.stack((data.real, data.imag), axis=-1)
return torch.from_numpy(data)
def tensor_to_complex_np(data):
"""
Converts a complex torch tensor to numpy array.
Args:
data (torch.Tensor): Input data to be converted to numpy.
Returns:
np.array: Complex numpy version of data.
"""
data = data.numpy()
return data[..., 0] + 1j * data[..., 1]
def apply_mask(data, mask_func, seed=None, padding=None):
"""
Subsample given k-space by multiplying with a mask.
Args:
data (torch.Tensor): The input k-space data. This should have at least 3 dimensions, where
dimensions -3 and -2 are the spatial dimensions, and the final dimension has size
2 (for complex values).
mask_func (callable): A function that takes a shape (tuple of ints) and a random
number seed and returns a mask.
seed (int or 1-d array_like, optional): Seed for the random number generator.
Returns:
(tuple): tuple containing:
masked data (torch.Tensor): Subsampled k-space data
mask (torch.Tensor): The generated mask
"""
shape = np.array(data.shape)
shape[:-3] = 1
mask = mask_func(shape, seed)
if padding is not None:
mask[:, :, : padding[0]] = 0
mask[:, :, padding[1] :] = 0 # padding value inclusive on right of zeros
masked_data = data * mask + 0.0 # the + 0.0 removes the sign of the zeros
return masked_data, mask
def mask_center(x, mask_from, mask_to):
mask = torch.zeros_like(x)
mask[:, :, :, mask_from:mask_to] = x[:, :, :, mask_from:mask_to]
return mask
def center_crop(data, shape):
"""
Apply a center crop to the input real image or batch of real images.
Args:
data (torch.Tensor): The input tensor to be center cropped. It should
have at least 2 dimensions and the cropping is applied along the
last two dimensions.
shape (int, int): The output shape. The shape should be smaller than
the corresponding dimensions of data.
Returns:
torch.Tensor: The center cropped image.
"""
assert 0 < shape[0] <= data.shape[-2]
assert 0 < shape[1] <= data.shape[-1]
w_from = (data.shape[-2] - shape[0]) // 2
h_from = (data.shape[-1] - shape[1]) // 2
w_to = w_from + shape[0]
h_to = h_from + shape[1]
return data[..., w_from:w_to, h_from:h_to]
def complex_center_crop(data, shape):
"""
Apply a center crop to the input image or batch of complex images.
Args:
data (torch.Tensor): The complex input tensor to be center cropped. It
should have at least 3 dimensions and the cropping is applied along
dimensions -3 and -2 and the last dimensions should have a size of
2.
shape (int): The output shape. The shape should be smaller than
the corresponding dimensions of data.
Returns:
torch.Tensor: The center cropped image
"""
assert 0 < shape[0] <= data.shape[-3]
assert 0 < shape[1] <= data.shape[-2]
w_from = (data.shape[-3] - shape[0]) // 2
h_from = (data.shape[-2] - shape[1]) // 2
w_to = w_from + shape[0]
h_to = h_from + shape[1]
return data[..., w_from:w_to, h_from:h_to, :]
def center_crop_to_smallest(x, y):
"""
Apply a center crop on the larger image to the size of the smaller.
The minimum is taken over dim=-1 and dim=-2. If x is smaller than y at
dim=-1 and y is smaller than x at dim=-2, then the returned dimension will
be a mixture of the two.
Args:
x (torch.Tensor): The first image.
y (torch.Tensor): The second image
Returns:
tuple: tuple of tensors x and y, each cropped to the minimim size.
"""
smallest_width = min(x.shape[-1], y.shape[-1])
smallest_height = min(x.shape[-2], y.shape[-2])
x = center_crop(x, (smallest_height, smallest_width))
y = center_crop(y, (smallest_height, smallest_width))
return x, y
def normalize(data, mean, stddev, eps=0.0):
"""
Normalize the given tensor.
Applies the formula (data - mean) / (stddev + eps).
Args:
data (torch.Tensor): Input data to be normalized.
mean (float): Mean value.
stddev (float): Standard deviation.
eps (float, default=0.0): Added to stddev to prevent dividing by zero.
Returns:
torch.Tensor: Normalized tensor
"""
return (data - mean) / (stddev + eps)
def normalize_instance(data, eps=0.0):
"""
Normalize the given tensor with instance norm/
Applies the formula (data - mean) / (stddev + eps), where mean and stddev
are computed from the data itself.
Args:
data (torch.Tensor): Input data to be normalized
eps (float): Added to stddev to prevent dividing by zero
Returns:
torch.Tensor: Normalized tensor
"""
mean = data.mean()
std = data.std()
return normalize(data, mean, std, eps), mean, std
```
#### File: models/varnet/varnet.py
```python
import math
import pathlib
import os
import random
import numpy as np
import torch
import torch.backends.cudnn as cudnn
from pytorch_lightning import Trainer
from torch import nn
from torch.nn import functional as F
from common.args import Args
from common.subsample import create_mask_for_mask_type
from data import transforms as T
from models.mri_model import MRIModel
from models.unet.unet_model import UnetModel
class DataTransform:
"""
Data Transformer for training Var Net models.
"""
def __init__(self, resolution, mask_func=None, use_seed=True):
"""
Args:
mask_func (common.subsample.MaskFunc): A function that can create a mask of
appropriate shape.
resolution (int): Resolution of the image.
use_seed (bool): If true, this class computes a pseudo random number generator seed
from the filename. This ensures that the same mask is used for all the slices of
a given volume every time.
"""
self.mask_func = mask_func
self.resolution = resolution
self.use_seed = use_seed
def __call__(self, kspace, mask, target, attrs, fname, slice):
"""
Args:
kspace (numpy.array): Input k-space of shape (num_coils, rows, cols, 2) for multi-coil
data or (rows, cols, 2) for single coil data.
mask (numpy.array): Mask from the test dataset
target (numpy.array): Target image
attrs (dict): Acquisition related information stored in the HDF5 object.
fname (str): File name
slice (int): Serial number of the slice.
Returns:
(tuple): tuple containing:
masked_kspace (torch.Tensor): Masked k-space
mask (torch.Tensor): Mask
target (torch.Tensor): Target image converted to a torch Tensor.
fname (str): File name
slice (int): Serial number of the slice.
max_value (numpy.array): Maximum value in the image volume
"""
if target is not None:
target = T.to_tensor(target)
max_value = attrs['max']
else:
target = torch.tensor(0)
max_value = 0.0
kspace = T.to_tensor(kspace)
seed = None if not self.use_seed else tuple(map(ord, fname))
acq_start = attrs['padding_left']
acq_end = attrs['padding_right']
if self.mask_func:
masked_kspace, mask = T.apply_mask(
kspace, self.mask_func, seed, (acq_start, acq_end))
else:
masked_kspace = kspace
shape = np.array(kspace.shape)
num_cols = shape[-2]
shape[:-3] = 1
mask_shape = [1 for _ in shape]
mask_shape[-2] = num_cols
mask = torch.from_numpy(mask.reshape(
*mask_shape).astype(np.float32))
mask[:, :, :acq_start] = 0
mask[:, :, acq_end:] = 0
return masked_kspace, mask.byte(), target, fname, slice, max_value
class SSIM(nn.Module):
def __init__(self, win_size=7, k1=0.01, k2=0.03):
super().__init__()
self.win_size = win_size
self.k1, self.k2 = k1, k2
self.register_buffer('w', torch.ones(
1, 1, win_size, win_size) / win_size ** 2)
NP = win_size ** 2
self.cov_norm = NP / (NP - 1)
def forward(self, X, Y, data_range):
data_range = data_range[:, None, None, None]
C1 = (self.k1 * data_range) ** 2
C2 = (self.k2 * data_range) ** 2
ux = F.conv2d(X, self.w)
uy = F.conv2d(Y, self.w)
uxx = F.conv2d(X * X, self.w)
uyy = F.conv2d(Y * Y, self.w)
uxy = F.conv2d(X * Y, self.w)
vx = self.cov_norm * (uxx - ux * ux)
vy = self.cov_norm * (uyy - uy * uy)
vxy = self.cov_norm * (uxy - ux * uy)
A1, A2, B1, B2 = (2 * ux * uy + C1, 2 * vxy + C2,
ux ** 2 + uy ** 2 + C1, vx + vy + C2)
D = B1 * B2
S = (A1 * A2) / D
return 1 - S.mean()
class NormUnet(nn.Module):
def __init__(self, chans, num_pools):
super().__init__()
self.unet = UnetModel(
in_chans=2,
out_chans=2,
chans=chans,
num_pool_layers=num_pools,
drop_prob=0
)
def complex_to_chan_dim(self, x):
b, c, h, w, two = x.shape
assert two == 2
return x.permute(0, 4, 1, 2, 3).contiguous().view(b, 2 * c, h, w)
def chan_complex_to_last_dim(self, x):
b, c2, h, w = x.shape
assert c2 % 2 == 0
c = c2 // 2
return x.view(b, 2, c, h, w).permute(0, 2, 3, 4, 1)
def norm(self, x):
# Group norm
b, c, h, w = x.shape
x = x.contiguous().view(b, 2, c // 2 * h * w)
mean = x.mean(dim=2).view(b, 2, 1, 1, 1).expand(
b, 2, c // 2, 1, 1).contiguous().view(b, c, 1, 1)
std = x.std(dim=2).view(b, 2, 1, 1, 1).expand(
b, 2, c // 2, 1, 1).contiguous().view(b, c, 1, 1)
x = x.view(b, c, h, w)
return (x - mean) / std, mean, std
def unnorm(self, x, mean, std):
return x * std + mean
def pad(self, x):
def floor_ceil(n):
return math.floor(n), math.ceil(n)
b, c, h, w = x.shape
w_mult = ((w - 1) | 15) + 1
h_mult = ((h - 1) | 15) + 1
w_pad = floor_ceil((w_mult - w) / 2)
h_pad = floor_ceil((h_mult - h) / 2)
x = F.pad(x, w_pad + h_pad)
return x, (h_pad, w_pad, h_mult, w_mult)
def unpad(self, x, h_pad, w_pad, h_mult, w_mult):
return x[..., h_pad[0]:h_mult - h_pad[1], w_pad[0]:w_mult - w_pad[1]]
def forward(self, x):
x = self.complex_to_chan_dim(x)
x, mean, std = self.norm(x)
x, pad_sizes = self.pad(x)
x = self.unet(x)
x = self.unpad(x, *pad_sizes)
x = self.unnorm(x, mean, std)
x = self.chan_complex_to_last_dim(x)
return x
class VarNetBlock(nn.Module):
def __init__(self, model):
super(VarNetBlock, self).__init__()
self.model = model
self.dc_weight = nn.Parameter(torch.ones(1))
self.register_buffer('zero', torch.zeros(1, 1, 1, 1, 1))
def forward(self, current_kspace, ref_kspace, mask, sens_maps):
def sens_expand(x):
return T.fft2(T.complex_mul(x, sens_maps))
def sens_reduce(x):
x = T.ifft2(x)
return T.complex_mul(x, T.complex_conj(sens_maps)).sum(dim=1, keepdim=True)
def soft_dc(x):
return torch.where(mask, x - ref_kspace, self.zero) * self.dc_weight
return current_kspace - \
soft_dc(current_kspace) - \
sens_expand(self.model(sens_reduce(current_kspace)))
class SensitivityModel(nn.Module):
def __init__(self, chans, num_pools):
super().__init__()
self.norm_unet = NormUnet(chans, num_pools)
def chans_to_batch_dim(self, x):
b, c, *other = x.shape
return x.contiguous().view(b * c, 1, *other), b
def batch_chans_to_chan_dim(self, x, batch_size):
bc, one, *other = x.shape
c = bc // batch_size
return x.view(batch_size, c, *other)
def divide_root_sum_of_squares(self, x):
return x / T.root_sum_of_squares_complex(x, dim=1).unsqueeze(-1).unsqueeze(1)
def forward(self, masked_kspace, mask):
def get_low_frequency_lines(mask):
l = r = mask.shape[-2] // 2
while mask[..., r, :]:
r += 1
while mask[..., l, :]:
l -= 1
return l + 1, r
l, r = get_low_frequency_lines(mask)
num_low_freqs = r - l
pad = (mask.shape[-2] - num_low_freqs + 1) // 2
x = T.mask_center(masked_kspace, pad, pad + num_low_freqs)
x = T.ifft2(x)
x, b = self.chans_to_batch_dim(x)
x = self.norm_unet(x)
x = self.batch_chans_to_chan_dim(x, b)
x = self.divide_root_sum_of_squares(x)
return x
class VariationalNetworkModel(MRIModel):
def __init__(self, hparams):
super().__init__(hparams)
self.sens_net = SensitivityModel(
hparams.sens_chans, hparams.sens_pools)
self.cascades = nn.ModuleList([
VarNetBlock(NormUnet(hparams.chans, hparams.pools))
for _ in range(hparams.num_cascades)
])
self.ssim_loss = SSIM()
def forward(self, masked_kspace, mask):
sens_maps = self.sens_net(masked_kspace, mask)
kspace_pred = masked_kspace.clone()
for cascade in self.cascades:
kspace_pred = cascade(kspace_pred, masked_kspace, mask, sens_maps)
return T.root_sum_of_squares(T.complex_abs(T.ifft2(kspace_pred)), dim=1)
def training_step(self, batch, batch_idx):
masked_kspace, mask, target, fname, _, max_value = batch
output = self.forward(masked_kspace, mask)
target, output = T.center_crop_to_smallest(target, output)
ssim_loss = self.ssim_loss(output.unsqueeze(
1), target.unsqueeze(1), data_range=max_value)
return {'loss': ssim_loss, 'log': {'train_loss': ssim_loss.item()}}
def validation_step(self, batch, batch_idx):
masked_kspace, mask, target, fname, slice, max_value = batch
output = self.forward(masked_kspace, mask)
target, output = T.center_crop_to_smallest(target, output)
return {
'fname': fname,
'slice': slice,
'output': output.cpu().numpy(),
'target': target.cpu().numpy(),
'val_loss': self.ssim_loss(output.unsqueeze(1), target.unsqueeze(1), data_range=max_value),
}
def test_step(self, batch, batch_idx):
masked_kspace, mask, _, fname, slice, _ = batch
output = self.forward(masked_kspace, mask)
b, h, w = output.shape
crop_size = min(w, self.hparams.resolution)
output = T.center_crop(output, (crop_size, crop_size))
return {
'fname': fname,
'slice': slice,
'output': output.cpu().numpy(),
}
def configure_optimizers(self):
optim = torch.optim.Adam(
self.parameters(), lr=self.hparams.lr, weight_decay=self.hparams.weight_decay)
scheduler = torch.optim.lr_scheduler.StepLR(
optim, self.hparams.lr_step_size, self.hparams.lr_gamma)
return [optim], [scheduler]
def train_data_transform(self):
mask = create_mask_for_mask_type(self.hparams.mask_type, self.hparams.center_fractions,
self.hparams.accelerations)
return DataTransform(self.hparams.resolution, mask, use_seed=False)
def val_data_transform(self):
mask = create_mask_for_mask_type(self.hparams.mask_type, self.hparams.center_fractions,
self.hparams.accelerations)
return DataTransform(self.hparams.resolution, mask)
def test_data_transform(self):
mask = create_mask_for_mask_type(self.hparams.mask_type, self.hparams.center_fractions,
self.hparams.accelerations)
return DataTransform(self.hparams.resolution, mask)
@staticmethod
def add_model_specific_args(parser):
parser.add_argument('--num-cascades', type=int,
default=12, help='Number of U-Net channels')
parser.add_argument('--pools', type=int, default=4,
help='Number of U-Net pooling layers')
parser.add_argument('--chans', type=int, default=18,
help='Number of U-Net channels')
parser.add_argument('--sens-pools', type=int, default=4,
help='Number of U-Net pooling layers')
parser.add_argument('--sens-chans', type=int,
default=8, help='Number of U-Net channels')
parser.add_argument('--batch-size', default=1,
type=int, help='Mini batch size')
parser.add_argument('--lr', type=float,
default=0.0003, help='Learning rate')
parser.add_argument('--lr-step-size', type=int, default=40,
help='Period of learning rate decay')
parser.add_argument('--lr-gamma', type=float, default=0.1,
help='Multiplicative factor of learning rate decay')
parser.add_argument('--weight-decay', type=float, default=0.,
help='Strength of weight decay regularization')
return parser
def create_trainer(args):
backend = 'ddp' if args.gpus > 0 else 'ddp_cpu'
return Trainer(
default_save_path=args.exp_dir,
max_epochs=args.num_epochs,
gpus=args.gpus,
num_nodes=args.nodes,
weights_summary=None,
distributed_backend=backend,
replace_sampler_ddp=False,
)
def run(args):
cudnn.benchmark = True
cudnn.enabled = True
if args.mode == 'train':
trainer = create_trainer(args)
model = VariationalNetworkModel(args)
trainer.fit(model)
else: # args.mode == 'test' or args.mode == 'challenge'
assert args.checkpoint is not None
model = VariationalNetworkModel.load_from_checkpoint(
str(args.checkpoint))
model.hparams = args
model.hparams.sample_rate = 1.
trainer = create_trainer(args)
trainer.test(model)
def main(args=None):
parser = Args()
parser.add_argument('--mode', choices=['train', 'test'], default='train')
parser.add_argument('--num-epochs', type=int, default=50,
help='Number of training epochs')
parser.add_argument('--gpus', type=int, default=1)
parser.add_argument('--nodes', type=int, default=1)
parser.add_argument('--exp-dir', type=pathlib.Path, default='experiments',
help='Path where model and results should be saved')
parser.add_argument('--exp', type=str,
help='Name of the experiment', default='default')
parser.add_argument('--checkpoint', type=pathlib.Path,
help='Path to pre-trained model. Use with --mode test')
parser = VariationalNetworkModel.add_model_specific_args(parser)
if args is not None:
parser.set_defaults(**args)
args, _ = parser.parse_known_args()
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
run(args)
if __name__ == '__main__':
main()
```
#### File: fastMRI/tests/conftest.py
```python
import pathlib
import numpy as np
import pytest
import torch
import yaml
# these are really slow - skip by default
skip_module_test_flag = True
skip_data_test_flag = True
def create_input(shape):
x = np.arange(np.product(shape)).reshape(shape)
x = torch.from_numpy(x).float()
return x
@pytest.fixture
def skip_module_test():
return skip_module_test_flag
@pytest.fixture
def skip_data_test():
return skip_data_test_flag
@pytest.fixture
def knee_split_lens():
split_lens = {
"multicoil_train": 34742,
"multicoil_val": 7135,
"multicoil_test": 4092,
"singlecoil_train": 34742,
"singlecoil_val": 7135,
"singlecoil_test": 3903,
}
return split_lens
@pytest.fixture
def brain_split_lens():
split_lens = {
"multicoil_train": 70748,
"multicoil_val": 21842,
"multicoil_test": 8852,
}
return split_lens
```
#### File: fastMRI/tests/test_modules.py
```python
import pathlib
from argparse import ArgumentParser
import pytest
from pytorch_lightning import Trainer
from experimental.unet.unet_module import UnetModule
from experimental.varnet.varnet_module import VarNetModule
from fastmri.data.mri_data import fetch_dir
def build_unet_args():
knee_path = fetch_dir("knee_path")
logdir = fetch_dir("log_path") / "test_dir"
parent_parser = ArgumentParser(add_help=False)
parser = UnetModule.add_model_specific_args(parent_parser)
parser = Trainer.add_argparse_args(parser)
num_gpus = 1
backend = "dp"
batch_size = 1 if backend == "ddp" else num_gpus
config = dict(
in_chans=1,
out_chans=1,
chans=32,
num_pool_layers=4,
drop_prob=0.0,
mask_type="random",
center_fractions=[0.08],
accelerations=[4],
resolution=384,
lr=0.001,
lr_step_size=40,
lr_gamma=0.1,
weight_decay=0.0,
data_path=knee_path,
challenge="singlecoil",
exp_dir=logdir,
exp_name="unet_demo",
test_split="test",
batch_size=batch_size,
)
parser.set_defaults(**config)
parser.set_defaults(
gpus=num_gpus,
default_root_dir=logdir,
replace_sampler_ddp=(backend != "ddp"),
distributed_backend=backend,
)
parser.add_argument("--mode", default="train", type=str)
args = parser.parse_args([])
return args
def build_varnet_args():
knee_path = fetch_dir("knee_path")
logdir = fetch_dir("log_path") / "test_dir"
parent_parser = ArgumentParser(add_help=False)
parser = VarNetModule.add_model_specific_args(parent_parser)
parser = Trainer.add_argparse_args(parser)
backend = "dp"
num_gpus = 2 if backend == "ddp" else 1
batch_size = 1
config = dict(
num_cascades=8,
pools=4,
chans=18,
sens_pools=4,
sens_chans=8,
mask_type="equispaced",
center_fractions=[0.08],
accelerations=[4],
resolution=384,
lr=0.001,
lr_step_size=40,
lr_gamma=0.1,
weight_decay=0.0,
data_path=knee_path,
challenge="multicoil",
exp_dir=logdir,
exp_name="varnet_demo",
test_split="test",
batch_size=batch_size,
)
parser.set_defaults(**config)
parser.set_defaults(
gpus=num_gpus,
default_root_dir=logdir,
replace_sampler_ddp=(backend != "ddp"),
distributed_backend=backend,
)
parser.add_argument("--mode", default="train", type=str)
args = parser.parse_args([])
return args
@pytest.mark.parametrize("backend", [(None)])
def test_unet_trainer(backend, skip_module_test):
if skip_module_test:
pytest.skip("config set to skip")
args = build_unet_args()
args.fast_dev_run = True
args.backend = backend
model = UnetModule(**vars(args))
trainer = Trainer.from_argparse_args(args)
trainer.fit(model)
@pytest.mark.parametrize("backend", [(None)])
def test_varnet_trainer(backend, skip_module_test):
if skip_module_test:
pytest.skip("config set to skip")
args = build_varnet_args()
args.fast_dev_run = True
args.backend = backend
model = VarNetModule(**vars(args))
trainer = Trainer.from_argparse_args(args)
trainer.fit(model)
``` |
{
"source": "josmjimjim/NeuroscienceLab",
"score": 3
} |
#### File: NeuroscienceLab/main/background.py
```python
import sys, os
from PyQt5.QtWidgets import QApplication, QMdiArea
from PyQt5.QtGui import QFont, QPixmap, QPainter, QColor
from PyQt5.QtCore import Qt, pyqtSlot
class Background (QMdiArea):
def __init__(self, parent=None):
# Initialize QMdiArea and set up parent: MainWindow
super().__init__(parent)
self.tileSubWindows()
# Define paintEvent to display background image
@pyqtSlot()
def paintEvent(self, event):
# Paint event is a method from QMdiArea
super().paintEvent(event)
painter = QPainter(self.viewport())
file = os.getcwd()
pixmap, control = self.loadImage(file + '/main/assets/neuroscience.png')
if control:
painter.drawPixmap(self.rect(), pixmap)
else:
painter.setPen(QColor(168, 34, 3))
painter.setFont(QFont('Helvetica', 26))
painter.drawText(self.rect(), Qt.AlignCenter, pixmap)
@staticmethod
def loadImage(path2Image):
# Static Method to load background image
# and return it and control variable
try:
with open(path2Image):
pixmap = QPixmap(path2Image)
control = True
except FileNotFoundError:
pixmap = 'Error image not found !'
control = False
return pixmap, control
if __name__ == '__main__':
app = QApplication(sys.argv)
window = Background()
sys.exit(app.exec_())
```
#### File: NeuroscienceLab/main/plotoptions.py
```python
import sys
from PyQt5.QtWidgets import (QApplication, QWidget, QVBoxLayout, QPushButton,
QRadioButton, QButtonGroup, QGroupBox, QMessageBox,
QCheckBox, QLabel, QSpinBox, QGridLayout, QLineEdit, QFileDialog)
from PyQt5.QtCore import Qt, pyqtSignal
from singleton import Singleton
from stylesheet import abstyle
class PlotKindsOptions(QWidget):
__slots__ = ['names', 'options', 'options_list', 'url_link',
'time', 'interval', 'file', 'directory']
accept_signal = pyqtSignal(list, bool, int, str)
def __init__(self, trigger, parent = None):
super().__init__(parent)
self.url_link = False
self.directory = None
self.interval = 0
if trigger:
self.names = ('Lineal Plot Time', 'Pie',
'Box Plot', 'Histogram', 'Gaussian Histogram',
'Bar Plot', 'Horizontal Bar Plot', 'Violin')
else:
self.names = ('Lineal Plot Time', 'Pie',
'Bar Plot', 'Horizontal Bar Plot')
self.displayOptions()
optionsLayout = QVBoxLayout()
optionsLayout.addWidget(self.options)
self.setGeometry(340, 200, 560, 420)
self.setLayout(optionsLayout)
self.setStyleSheet(abstyle)
self.show()
def displayOptions(self):
self.options = QGroupBox('Please, select plot types to display',self)
self.options_list = QButtonGroup(self)
layout_box = QVBoxLayout(self)
# Define check option for time int plot
self.file = QLineEdit(self)
dir_button = QPushButton('...')
dir_button.setToolTip("Select directory to save.")
dir_button.clicked.connect(self.setDirectory)
layout_dirbox = QVBoxLayout()
layout_box.addLayout(layout_dirbox)
grid = QGridLayout()
grid.addWidget(self.file, 0, 0)
grid.addWidget(dir_button, 0, 1)
layout_dirbox.addLayout(grid)
for opt in self.names:
nms = QRadioButton(opt)
layout_box.addWidget(nms)
self.options_list.addButton(nms)
# Define check option for time int plot
label = QLabel('Insert time interval to plot', self)
self.time = QSpinBox()
self.time.setRange(0,100)
grid_time = QGridLayout()
grid_time.addWidget(label,0,0)
grid_time.addWidget(self.time,0,1)
layout_box.addLayout(grid_time)
self.time.valueChanged.connect(self.recognizeState2)
# Define check option for video display
if Singleton().hasUrl():
check_button = QCheckBox('Do you want to display the video file?',self)
layout_box.addStretch()
layout_box.addWidget(check_button)
check_button.stateChanged.connect(self.recognizeState)
# Define accept button and add to layout
accept_button = QPushButton('&Accept',self)
accept_button.clicked.connect(self.buttonAccept)
layout_box.addWidget(accept_button)
# Define the user alert
label_alert = QLabel("Please remain here until window closes,"
" after press accept button!!", self)
layout_box.addWidget(label_alert)
self.options_list.setExclusive(False)
self.options.setLayout(layout_box)
def buttonAccept(self):
'''
Close the window and show results.
'''
options_selected = [button.text() for i, button in enumerate(self.
options_list.buttons()) if button.isChecked()]
self.directory = self.file.text()
if options_selected and self.directory:
self.accept_signal.emit(options_selected, self.url_link, self.interval, self.directory)
self.close() # close windows class
else:
QMessageBox().warning(self, "", "Error, options selections are empty!\n"
"Please select one at least! or select path",
QMessageBox.Ok, QMessageBox.Ok)
def recognizeState(self, state):
if state == Qt.Checked:
self.url_link = True
else:
self.url_link = False
def recognizeState2(self):
self.interval = self.time.value()
def setDirectory(self):
"""
Choose the directory.
"""
file_dialog = QFileDialog(self)
#file_dialog.setFileMode(QFileDialog.Directory)
self.directory = file_dialog.getExistingDirectory(self, "Select Directory")
if self.directory:
self.file.setText(self.directory)
else:
QMessageBox().warning(self, "",
"Error, please select a path folder",
QMessageBox.Ok, QMessageBox.Ok)
if __name__ == '__main__':
app = QApplication(sys.argv)
window = PlotKindsOptions()
sys.exit(app.exec_())
```
#### File: NeuroscienceLab/main/videoprocess.py
```python
import cv2, sys, os
from videobarplot import imageGenerator, overlayPlot
import pandas as pd
def VideoProcess(*args):
video = args[0]
path = args[1]
try:
# Load csv file and delete the firs column
data = pd.read_csv(path+'/emotion.csv')
if data.shape[1] == 1:
raise Exception
except:
# Load csv file and delete the firs column
data = pd.read_csv(path+'/emotion.csv', delimiter=';')
#Proces emotiosn and convert to numpy and get time for check with fps
cols = data.shape[0]
time = data.at[cols-1,'time']
fps1 = cols/time
if args[2] == '1':
data = data.drop(['Unnamed: 0','trigger', 'time'], axis=1).to_numpy().astype(float)
else:
data = data.drop(['Unnamed: 0','time'], axis=1).to_numpy().astype(float)
emotions = data[:,0::1]
# Open the video and get dimensions and fps
cap = cv2.VideoCapture(video)
video_width = cap.get(cv2.CAP_PROP_FRAME_WIDTH)
video_heigth = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
fps = cap.get(cv2.CAP_PROP_FPS)
# Check the video fps
if fps > fps1:
fps = fps1
size = (int(video_width), int(video_heigth))
size_plot = (int(video_width*0.35), int(video_heigth*0.35))
# Initialize the video writer
out = cv2.VideoWriter(path + '/output_processed.avi',
cv2.VideoWriter_fourcc(*'MJPG'), fps, size)
# Initialize the detector
file = os.getcwd()
face_cascade = cv2.CascadeClassifier(file + '/main/assets/haarcascade_frontalface_default.xml')
j = 0
jend = cols
# Do operations while video is opened
while (cap.isOpened()):
# ret is the boolean True is video opened and frame is the frame in this moment
ret, img = cap.read()
try:
# Convert frame to gray color and detect face
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
for (x, y, w, h) in faces:
# draw rectangle to main image
cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0), 2)
if j < jend:
# Generate plot image
try:
image = imageGenerator(emotions[j], size_plot, size)
img = overlayPlot(img, image, size)
except Exception:
break
else:
break
except:
break
out.write(img)
j += 1
if j == int(0.2*jend):
sys.stderr.write("Total complete: 20%\n")
elif j == int(0.4 * jend):
sys.stderr.write("Total complete: 40%\n")
elif j == int(0.6 * jend):
sys.stderr.write("Total complete: 60%\n")
elif j == int(0.8 * jend):
sys.stderr.write("Total complete: 80%\n")
# ESC to quit
if (cv2.waitKey(0) & 0xFF == ord('q')) or not ret:
break
sys.stderr.write("Total complete: 100%\n")
# kill open cv things
cap.release()
out.release()
cv2.destroyAllWindows()
if __name__ == "__main__":
a = sys.argv[1]
b = sys.argv[2]
c = sys.argv[3]
VideoProcess(a, b, c)
``` |
{
"source": "josmople/pytorch_utils",
"score": 3
} |
#### File: josmople/pytorch_utils/az.py
```python
import typing as _T
A = _T.TypeVar("A")
B = _T.TypeVar("B")
C = _T.TypeVar("C")
D = _T.TypeVar("D")
@_T.overload
def tuple_n(n: _T.Literal[2], vals: _T.Tuple[A, B], clss: _T.Tuple[_T.Type[A], _T.Type[B]] = None) -> _T.Tuple[A, B]: ...
@_T.overload
def tuple_n(n: _T.Literal[2], vals: _T.Tuple[A, A], clss: _T.Type[A] = None) -> _T.Tuple[A, A]: ...
@_T.overload
def tuple_n(n: _T.Literal[2], vals: _T.Iterable[A], clss: _T.Type[A] = None) -> _T.Tuple[A, A]: ...
@_T.overload
def tuple_n(n: _T.Literal[2], vals: A, clss: _T.Type[A] = None) -> _T.Tuple[A, A]: ...
@_T.overload
def tuple_n(n: _T.Literal[3], vals: _T.Tuple[A, B, C], clss: _T.Tuple[_T.Type[A], _T.Type[B], _T.Type[C]] = None) -> _T.Tuple[A, B, C]: ...
@_T.overload
def tuple_n(n: _T.Literal[3], vals: _T.Tuple[A, A, A], clss: _T.Type[A] = None) -> _T.Tuple[A, A, A]: ...
@_T.overload
def tuple_n(n: _T.Literal[3], vals: _T.Iterable[A], clss: _T.Type[A] = None) -> _T.Tuple[A, A, A]: ...
@_T.overload
def tuple_n(n: _T.Literal[3], vals: A, clss: _T.Type[A] = None) -> _T.Tuple[A, A, A]: ...
@_T.overload
def tuple_n(n: _T.Literal[4], vals: _T.Tuple[A, B, C, D], clss: _T.Tuple[_T.Type[A], _T.Type[B], _T.Type[C], _T.Type[D]] = None) -> _T.Tuple[A, B, C, D]: ...
@_T.overload
def tuple_n(n: _T.Literal[4], vals: _T.Tuple[A, A, A, A], clss: _T.Type[A] = None) -> _T.Tuple[A, A, A, A]: ...
@_T.overload
def tuple_n(n: _T.Literal[4], vals: _T.Iterable[A], clss: _T.Type[A] = None) -> _T.Tuple[A, A, A, A]: ...
@_T.overload
def tuple_n(n: _T.Literal[4], vals: A, clss: _T.Type[A] = None) -> _T.Tuple[A, A, A, A]: ...
@_T.overload
def tuple_n(n: int, vals: _T.Iterable[A], clss: _T.Type[A] = None) -> _T.Tuple[A, ...]: ...
@_T.overload
def tuple_n(n: int, vals: _T.Union[_T.Iterable, object], clss: _T.Union[type, _T.Iterable[type]] = None) -> tuple: ...
def tuple_n(n: int, vals: object, clss: type = None) -> tuple:
if isinstance(vals, _T.Iterable):
vals = list(vals)
assert len(vals) == n, f"Length of iterable must be {n} but got {len(vals)}"
else:
vals = [vals] * n
if clss is None:
clss = [None] * n
if callable(clss): # `type` or `Callable`
clss = [clss] * n
assert isinstance(clss, _T.Iterable), "Parameter clss must be a `type` or `Iterable[type | Callable]`"
nvals = list(vals)
for i, (val, cls) in enumerate(zip(vals, clss)):
if cls == None:
continue
assert callable(cls), f"Parameter clss[{i}] must be `type` or `Callable`"
nvals[i] = cls(val)
return tuple(vals)
@_T.overload
def tuple_2(vals: _T.Tuple[A, B], clss: _T.Tuple[_T.Type[A], _T.Type[B]] = None) -> _T.Tuple[A, B]: ...
@_T.overload
def tuple_2(vals: _T.Tuple[A, A], clss: _T.Type[A] = None) -> _T.Tuple[A, A]: ...
@_T.overload
def tuple_2(vals: _T.Iterable[A], clss: _T.Type[A] = None) -> _T.Tuple[A, A]: ...
@_T.overload
def tuple_2(vals: A, clss: _T.Type[A] = None) -> _T.Tuple[A, A]: ...
def tuple_2(vals: object, clss: type = None) -> tuple:
return tuple_n(2, vals, clss)
@_T.overload
def tuple_3(vals: _T.Tuple[A, B, C], clss: _T.Tuple[_T.Type[A], _T.Type[B], _T.Type[C]] = None) -> _T.Tuple[A, B, C]: ...
@_T.overload
def tuple_3(vals: _T.Tuple[A, A, A], clss: _T.Type[A] = None) -> _T.Tuple[A, A, A]: ...
@_T.overload
def tuple_3(vals: _T.Iterable[A], clss: _T.Type[A] = None) -> _T.Tuple[A, A, A]: ...
@_T.overload
def tuple_3(vals: A, clss: _T.Type[A] = None) -> _T.Tuple[A, A, A]: ...
def tuple_3(vals: object, clss: type = None) -> tuple:
return tuple_n(3, vals, clss)
@_T.overload
def tuple_4(vals: _T.Tuple[A, B, C, D], clss: _T.Tuple[_T.Type[A], _T.Type[B], _T.Type[C], _T.Type[D]] = None) -> _T.Tuple[A, B, C, D]: ...
@_T.overload
def tuple_4(vals: _T.Tuple[A, A, A, A], clss: _T.Type[A] = None) -> _T.Tuple[A, A, A, A]: ...
@_T.overload
def tuple_4(vals: _T.Iterable[A], clss: _T.Type[A] = None) -> _T.Tuple[A, A, A, A]: ...
@_T.overload
def tuple_4(vals: A, clss: _T.Type[A] = None) -> _T.Tuple[A, A, A, A]: ...
def tuple_4(vals: object, clss: type = None) -> tuple:
return tuple_n(4, vals, clss)
```
#### File: josmople/pytorch_utils/fn.py
```python
import typing as _T
from torch.nn import Module as _Module
_Fn = _T.TypeVar("_Fn")
class instanceclassmethod(object):
"""
The method should take two arguments, 'self' and 'cls'.
The value of 'self' is None if invoked as a classmethod.
"""
def __init__(self, func: _Fn):
self.func = func
def __get__(self, instance, clas=None) -> _Fn:
from functools import wraps
@wraps(self.func)
def func(*args, **kwds):
return self.func(instance, clas, *args, **kwds)
return func
def ignore_unmatched_kwargs(f):
"""Make function ignore unmatched kwargs.
If the function already has the catch all **kwargs, do nothing.
From: https://stackoverflow.com/a/63787701
"""
from inspect import Parameter, signature
import functools
if any(param.kind == Parameter.VAR_KEYWORD for param in signature(f).parameters.values()):
return f
#
@functools.wraps(f)
def inner(*args, **kwargs):
# For each keyword arguments recognised by f, take their binding from **kwargs received
filtered_kwargs = {
name: kwargs[name]
for name, param in signature(f).parameters.items()
if name in kwargs
and (
param.kind is Parameter.KEYWORD_ONLY or
param.kind is Parameter.POSITIONAL_OR_KEYWORD
)
}
return f(*args, **filtered_kwargs)
return inner
def as_pytorch_module(fn: _Fn = None, *, include_self=False) -> _T.Union[type, _T.Callable[[], _T.Union[_Fn, _Module]]]:
"""
Transforms functions to torch.nn.Module
"""
if fn is None:
from functools import partial
return partial(as_pytorch_module, include_self=include_self)
from torch.nn import Module
if include_self:
def forward(self, *args, **kwds):
return fn(self, *args, **kwds)
else:
def forward(self, *args, **kwds):
return fn(*args, **kwds)
return type(f"LambdaModule__{fn.__name__}", (Module, ), dict(forward=forward))
del _Module, _T, _Fn
``` |
{
"source": "jo-sm/SublimeLinter-contrib-stylelint_d",
"score": 3
} |
#### File: jo-sm/SublimeLinter-contrib-stylelint_d/linter.py
```python
from SublimeLinter.lint import NodeLinter, util
import json
class Stylelint_d(NodeLinter):
"""Provides an interface to stylelint_d."""
syntax = ('css', 'sass', 'scss', 'postcss', 'less')
cmd = ('stylelint_d', '--stdin', '--formatter=json', '--file', '@')
npm_name = 'stylelint_d'
version_args = '--version'
version_re = r'(?P<version>\d+\.\d+\.\d+)'
version_requirement = '>= 1.0.8'
line_col_base = (1, 1)
error_stream = util.STREAM_BOTH
comment_re = r'\s*/[/*]'
selectors = {
'html': 'source.css.embedded.html'
}
# https://github.com/kungfusheep/SublimeLinter-contrib-stylelint/blob/master/linter.py
# Adapted from SublimeLinter-contrib-stylelint
regex = (
r'^\s*(?P<line>[0-9]*)\:(?P<col>[0-9]*)\s*'
r'(?:(?P<error>error)|(?P<warning>warning)|(?P<deprecation>deprecation)|(?P<invalid>invalid))\s*'
r'(?P<message>.+)'
)
# https://github.com/SublimeLinter/SublimeLinter-csslint/blob/master/linter.py
# Taken from SublimeLinter-csslint
word_re = r'^(#?[-\w]+)'
def split_match(self, match):
"""Override `split_match` to support invalidOptionWarnings and deprecations."""
if match:
group = match.groupdict()
if group.get('deprecation') or group.get('invalid'):
# We have to show the error on some line or else it won't
# show in Sublime
return match, 0, 0, True, False, group.get('message'), None
else:
return super().split_match(match)
else:
return super().split_match(match)
def run(self, cmd, code):
"""Parse returned JSON into SublimeLinter friendly text."""
raw = super().run(cmd, code)
try:
parsed = json.loads(raw)
except ValueError:
return []
result = []
try:
file_errors = parsed[0] # Parsed is an array of each file data
except IndexError:
return []
for error in file_errors.get('deprecations', []):
result.append("0:0 deprecation {}".format(error.get('text')))
for error in file_errors.get('invalidOptionWarnings', []):
result.append('0:0 invalid {}'.format(error.get('text')))
for error in file_errors.get('warnings', []):
# Severity may not be present in the warning
result.append("{}:{} {} {}".format(
error.get('line', '0'),
error.get('column', '0'),
error.get('severity', 'error'),
error.get('text', ''))
)
return "\n".join(result)
``` |
{
"source": "josndan/octo-song-search",
"score": 3
} |
#### File: josndan/octo-song-search/Graph.py
```python
import sys
class Vertex:
def __init__(self, artist):
self.id = artist
self.songs = []
self.coArtists = {}
self.color = 'white'
self.dist = sys.maxsize
self.pred = None
self.disc = 0
self.fin = 0
"""resets a node to do search algorithms again"""
def reset(self):
self.color = 'white'
self.dist = sys.maxsize
self.pred = None
self.disc = 0
self.fin = 0
"""Sets the color"""
def setColor(self, color):
self.color = color
"""Sets the distance"""
def setDistance(self, d):
self.dist = d
"""Sets the the predecessor node"""
def setPred(self, p):
self.pred = p
"""Sets the discovery time"""
def setDiscovery(self, dtime):
self.disc = dtime
"""Sets the finish time"""
def setFinish(self, ftime):
self.fin = ftime
"""Returns the finish time"""
def getFinish(self):
return self.fin
"""returns the discovery time"""
def getDiscovery(self):
return self.disc
"""returns the predecessor node """
def getPred(self):
return self.pred
"""returns the distance"""
def getDistance(self):
return self.dist
"""returns the color of the node"""
def getColor(self):
return self.color
"""creates an edge"""
def addNeighbor(self, nbr, weight=0):
self.coArtists[nbr] = weight
def __str__(self):
return str(self.id) + ' connectedTo: ' + str([x for x in self.coArtists])
"""returns the connectes nodes list"""
def getConnections(self):
return self.coArtists.keys()
"""returns the node id"""
def getId(self):
return self.id
"""returns the weight of an edge"""
def getWeight(self, nbr):
return self.coArtists[nbr]
``` |
{
"source": "josndan/twitter-moniter",
"score": 3
} |
#### File: josndan/twitter-moniter/twitter.py
```python
import twitter_credential
import tweepy
import time
from tweepy import OAuthHandler
from tweepy import API
from tweepy import Cursor
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from nltk.sentiment.vader import SentimentIntensityAnalyzer
import nltk
nltk.download('vader_lexicon',quiet=True)
def authenticate():
auth = OAuthHandler(twitter_credential.API_KEY, twitter_credential.API_SECRET_KEY)
auth.set_access_token(twitter_credential.ACCESS_TOKEN, twitter_credential.ACCESS_TOKEN_SECRET)
return auth
class TwitterClient():
def __init__(self, username=None):
self.auth = authenticate()
self.twitterClient = API(self.auth,wait_on_rate_limit=True,wait_on_rate_limit_notify=True)
self.username = username
def get_tweets(self, numTweets):
# tweets = []
# for tweet in Cursor(self.twitterClient.user_timeline, id=self.username).items(numTweets):
# tweets.append(tweet)
count = 0
add = False
tweets = Cursor(self.twitterClient.user_timeline,q="-filter:retweets", id=self.username).items()
res=[]
while count < numTweets:
try:
tweet = tweets.next()
if hasattr(tweet, "in_reply_to_status_id_str"):
if tweet.in_reply_to_status_id_str is None:
add=True
else:
add = True
if add and (not tweet.retweeted) and ('RT @' not in tweet.text):
res.append(tweet)
count += 1
add = False
except tweepy.RateLimitError as e:
print(f'API rate limit reached {e}')
time.sleep(60)
continue
except tweepy.TweepError as e:
print(f'tweepy error occured {e}')
error = True
break
except StopIteration as e:
print(f"Loop ended {e}")
break
except Exception as e:
print(f'Error: {e}')
error = True
break
return res
def get_client(self):
return self.twitterClient
def get_username(self):
return self.username
class DataLoader():
def toDataFrame(self, tweets):
df =None
try:
df = pd.DataFrame(
data=[[tweet.id, tweet.text, tweet.favorite_count, tweet.retweet_count
, tweet.source, tweet.created_at] for tweet in tweets]
, columns=["id", "text", "favorite_count", "retweet_count", "source", "created_at"])
except AttributeError:
df = pd.DataFrame(
data=[[tweet.id, tweet.full_text, tweet.favorite_count, tweet.retweet_count
, tweet.source, tweet.created_at] for tweet in tweets]
, columns=["id", "text", "favorite_count", "retweet_count", "source", "created_at"])
return df
class Analyzer():
# def strip_tweet(self, tweet):
# return ''.join(re.sub("(@[A-Za-z0-9]+)|([^0-9A-Za-z \t])|(\w+:\/\/\S+)", " ", tweet)).strip()
def get_sentiment(self, tweet):
return SentimentIntensityAnalyzer().polarity_scores(tweet)
def add_sentiment(self,df):
df["sentiment"] = np.array([self.get_sentiment(tweet)["compound"] for tweet in df["text"]])
return df
def graph(self,df):
sns.set()
sns.relplot(data=df, x="created_at", y="sentiment", hue="source", kind="line", ci=None)
plt.show()
class Tweets():
def __init__(self,username,num_tweets=5):
self.client = TwitterClient(username)
self.tweets = DataLoader().toDataFrame(self.client.get_tweets(num_tweets))
self.analyzer = Analyzer()
self.num_tweets = num_tweets
# self.num_replies = num_replies
# self.replies = {}
# self.get_replies()
self.analyzer.add_sentiment(self.tweets)
def get_reply(self,id,num=5):
api = self.client.get_client()
print("\nSearching for tweets with")
print(f'Id : {id}\nto:{self.client.get_username()}')
print("\n")
tweets = Cursor(api.search, q=f'to:@{self.<EMAIL>_username()}'
, since_id=id, tweet_mode='extended'
, result_type='recent'
).items()
count = 0
replies=[]
while count < num:
try:
tweet = tweets.next()
# print(tweet)
if hasattr(tweet, "in_reply_to_status_id_str"):
# print(tweet)
if tweet.in_reply_to_status_id_str == str(id):
# print(f'Reply:{tweet}')
replies.append(tweet)
count += 1
except tweepy.RateLimitError as e:
print(f'API rate limit reached {e}')
time.sleep(60)
continue
except tweepy.TweepError as e:
print(f'tweepy error occured {e}')
break
except StopIteration:
print()
print("Stop Iteration")
print(f'Only {count} items present in the result')
break
except Exception as e:
print(f'Error: {e}')
break
replies = DataLoader().toDataFrame(replies)
self.analyzer.add_sentiment(replies)
return replies
# for id in self.tweets["id"]:
# replies=[]
# # try:
# # for tweet in Cursor(api.search,q=f'to:{self.client.get_username()}'
# # ,since_id = id,tweet_mode='extended'
# # ,result_type='recent').items(2):
# # print(tweet)
# # if hasattr(tweet,"in_reply_to_status_id_str") and tweet.in_reply_to_status_id_str==id:
# # print(f'Reply:{tweet}')
# # replies.append(tweet)
#
# if error:
# break
# # except Exception as e:
# # print(f'Error: {e}')
# # break
#
# self.replies[id] = DataLoader().toDataFrame(replies)
# self.analyzer.add_sentiment(self.replies[id])
def display(self):
print(f"\n First {self.num_tweets} tweets\n")
pd.set_option('max_colwidth', 60)
pd.options.display.show_dimensions = False
print(self.tweets[['text','sentiment']])
print("\n")
# print()
# print("Replies ")
# for id in self.replies:
# print(f'For tweet {id} :')
# print(self.replies[id][['id','text','sentiment']])
username = input("Enter the username ")
user = Tweets(username)
user.display()
row=int(input("\nEnter the tweet number to analyse "))
id= user.tweets["id"].iloc[row]
nreplies=int(input("\nEnter the number of replies to analyse "))
user.analyzer.graph(user.get_reply(id,nreplies))
``` |
{
"source": "josndan/urabn-fortnight",
"score": 3
} |
#### File: urabn-fortnight/lib/Posting.py
```python
class Posting:
def __init__(self, id, data, positions):
self.docId = id
self.data = data
self.positions = positions
def getTermCount(self):
return len(self.positions)
def getDocId(self):
return self.docId
def getPositions(self):
return self.positions
def __repr__(self):
return f"{self.docId} : {self.data} : {self.positions}"
``` |
{
"source": "josojo/plasma-mvp",
"score": 2
} |
#### File: plasma/client/client.py
```python
import json
import rlp
from web3.contract import ConciseContract
from web3 import HTTPProvider
from plasma.config import plasma_config
from plasma.root_chain.deployer import Deployer
from plasma.child_chain.transaction import Transaction, UnsignedTransaction
from .child_chain_service import ChildChainService
class Client(object):
def __init__(self, root_chain_provider=HTTPProvider('http://localhost:8545'), child_chain_url="http://localhost:8546/jsonrpc"):
deployer = Deployer(root_chain_provider)
abi = json.load(open("contract_data/RootChain.json"))
self.root_chain = deployer.w3.eth.contract(abi, plasma_config['ROOT_CHAIN_CONTRACT_ADDRESS'], ContractFactoryClass=ConciseContract)
self.child_chain = ChildChainService(child_chain_url)
def create_transaction(self, blknum1=0, txindex1=0, oindex1=0,
blknum2=0, txindex2=0, oindex2=0,
newowner1=b'\x00' * 20, amount1=0,
newowner2=b'\x00' * 20, amount2=0,
fee=0):
return Transaction(blknum1, txindex1, oindex1,
blknum2, txindex2, oindex2,
newowner1, amount1,
newowner2, amount2,
fee)
def sign_transaction(self, transaction, key1=b'', key2=b''):
if key1:
transaction.sign1(key1)
if key2:
transaction.sign1(key2)
return transaction
def deposit(self, transaction):
self.root_chain.deposit(transact={'from': '0x' + transaction.newowner1.hex(), 'value': transaction.amount1})
def apply_transaction(self, transaction):
self.child_chain.apply_transaction(transaction)
def submit_block(self, block):
self.child_chain.submit_block(block)
def withdraw(self, txPos, tx, proof, sigs):
utxoPos = txPos[0] * 1000000000 + txPos[1] * 10000 + txPos[2] * 1
self.root_chain.startExit(utxoPos, rlp.encode(tx, UnsignedTransaction), proof, sigs, transact={'from': '0x' + tx.newowner1.hex()})
def get_transaction(self, blknum, txindex):
return self.child_chain.get_transaction(blknum, txindex)
def get_current_block(self):
return self.child_chain.get_current_block()
def get_block(self, blknum):
return self.child_chain.get_block(blknum)
def get_current_block_num(self):
return self.child_chain.get_current_block_num()
```
#### File: plasma/root_chain/deployer.py
```python
import json
import os
from ethereum.tools import tester as t
from solc import compile_standard
from web3.contract import ConciseContract
from web3 import Web3, HTTPProvider
from plasma.config import plasma_config
OWN_DIR = os.path.dirname(os.path.realpath(__file__))
class Deployer(object):
def __init__(self, provider=HTTPProvider('http://localhost:8545')):
self.w3 = Web3(provider)
def get_dirs(self, path):
abs_contract_path = os.path.realpath(os.path.join(OWN_DIR, 'contracts'))
extra_args = [[file, [os.path.realpath(os.path.join(r, file))]] for r, d, f in os.walk(abs_contract_path) for file in f]
contracts = {}
for contract in extra_args:
contracts[contract[0]] = {'urls': contract[1]}
path = '{}/{}'.format(abs_contract_path, path)
return path, contracts
def compile_contract(self, path, args=()):
file_name = path.split('/')[1]
contract_name = file_name.split('.')[0]
path, contracts = self.get_dirs(path)
compiled_sol = compile_standard({'language': 'Solidity',
'sources': {**{path.split('/')[-1]: {'urls': [path]}}, **contracts}}, # Noqa E999
allow_paths=OWN_DIR + "/contracts")
abi = compiled_sol['contracts'][file_name][contract_name]['abi']
bytecode = compiled_sol['contracts'][file_name][contract_name]['evm']['bytecode']['object']
# Create the contract_data folder if it doesn't already exist
os.makedirs('contract_data', exist_ok=True)
contract_file = open('contract_data/%s.json' % (file_name.split('.')[0]), "w+")
json.dump(abi, contract_file)
contract_file.close()
return abi, bytecode, contract_name
def create_contract(self, path, args=(), gas=4410000, sender=t.k0):
abi, bytecode, contract_name = self.compile_contract(path, args)
contract = self.w3.eth.contract(abi=abi, bytecode=bytecode)
# Get transaction hash from deployed contract
tx_hash = contract.deploy(transaction={'from': self.w3.eth.accounts[0], 'gas': gas}, args=args)
# Get tx receipt to get contract address
tx_receipt = self.w3.eth.getTransactionReceipt(tx_hash)
contract_address = tx_receipt['contractAddress']
# Contract instance in concise mode
contract_instance = self.w3.eth.contract(abi, contract_address, ContractFactoryClass=ConciseContract)
print("Successfully deployed {} contract!".format(contract_name))
return contract_instance
def get_contract(self, path):
file_name = path.split('/')[1]
abi = json.load(open('contract_data/%s.json' % (file_name.split('.')[0])))
return self.w3.eth.contract(abi, plasma_config['ROOT_CHAIN_CONTRACT_ADDRESS'])
``` |
{
"source": "josola/project-euler",
"score": 4
} |
#### File: project-euler/python/p010.py
```python
def compute(LIMIT):
# - Uses Sieve of Eratosthenes to eliminate composite
# numbers up to the limit.
# - The sieve tracks previously marked primes without increasing
# the computation time unnecessarily. This allows the sieve to
# jump ahead to the square of the current prime and
# remove all the factors of the current prime.
prime = [True] * LIMIT
i = 2
while (i * i) <= LIMIT:
if prime[i] == True:
j = i * i
while j <= LIMIT - 1:
prime[j] = False
j += i
i += 1
sum = 0
for i in range(2, LIMIT):
if prime[i] == True:
sum += i
return sum
if __name__ == "__main__":
print(compute(2000000))
# Answer: 142'913'828'922
# Asymptotic complexity: O(N LogN)
# M1 (3.2 GHz CPU) ARMv8-A64 (64 bit): 1 loop, best of 5: 314 msec per loop
```
#### File: project-euler/python/p012.py
```python
import math
def compute(TARGET):
term = 1
triangular = 0
div_count = 0
while div_count < TARGET:
term += 1
div_count = 0
# - Triangular numbers and arithmetic progressions can be found
# using the same formula.
# - Triangular number (arithmetic progression): a = n * (n + 1) / 2
# Where "a" is the triangular number, and "n" is the last number
# in the arithmetic progression (term).
# - See p001, and p006 for more examples of
# the arithmetic progression formula.
triangular = term * (term + 1) // 2
# - We can take the same algorithm p003 used to find
# the largest prime factor and adapt it to find all the
# factors belonging to our triangular number.
root = int(math.sqrt(triangular))
for i in range(1, root + 1, 1):
if triangular % i == 0:
div_count += 2
return triangular
if __name__ == "__main__":
print(compute(500))
# Answer: 76576500
# Asymptotic complexity: O(N)
# M1 (3.2 GHz CPU) ARMv8-A64 (64 bit): 1 loop, best of 5: 1.79 sec per loop
```
#### File: project-euler/python/p017.py
```python
def compute(START, END):
ones_place = [ "", "one", "two", "three", "four", "five", "six", "seven", "eight", "nine", "ten", "eleven", "twelve", "thirteen", "forteen", "fifteen", "sixteen", "seventeen", "eighteen", "nineteen" ]
tens_place = [ "", "", "twenty", "thirty", "forty", "fifty", "sixty", "seventy", "eighty", "ninety" ]
hundreds_place = "hundred"
total = 0
for i in range(START, END + 1, 1):
word = ""
if i < 20:
word = ones_place[i]
elif i >= 20 and i < 100:
word = tens_place[i // 10] + ones_place[i % 10]
elif i >= 100 and (i % 100) // 10 == 0 and i % 10 == 0 and i < 1000:
word = ones_place[i // 100] + hundreds_place
elif i >= 100 and (i % 100) // 10 < 2 and i < 1000:
word = ones_place[i // 100] + hundreds_place + "and" + ones_place[i % 100]
elif i >= 120 and i < 1000:
word = ones_place[i // 100] + hundreds_place + "and" + tens_place[(i % 100) // 10] + ones_place[i % 10]
elif i == 1000:
word = "onethousand"
total += len(word)
return total
if __name__ == "__main__":
print(compute(1, 1000))
# Answer: 21124
# Asymptotic complexity: O(N)
# M1 (3.2 GHz CPU) ARMv8-A64 (64 bit): 1000 loops, best of 5: 315 usec per loop
``` |
{
"source": "josonle/LearningDjango",
"score": 2
} |
#### File: mysite/blog/views.py
```python
from django.shortcuts import render,get_object_or_404
from .models import BlogArticles
# Create your views here.
def blog_title(request):
blogs=BlogArticles.objects.all()
return render(request,"blog/titles.html",{"blogs":blogs})
def article(request,a_id):
blog=get_object_or_404(BlogArticles,id=a_id)
publish_time=blog.publish
return render(request,"blog/article.html",{'blog':blog,'publish':publish_time})
``` |
{
"source": "josoriov/python3-projects",
"score": 4
} |
#### File: easy/02-simple-chatty-bot/simple_chatty_bot.py
```python
from src.functions import safe_cast
def introduction():
botname = "Talko"
bith_year = 2021
print(f"Hello! My name is {botname}")
print(f"I Was created in {bith_year}")
def presentation():
print("Please, remind me your name")
name = input()
print(f"What a great name you have, {name}!")
def age_guess():
print("Let me guess your age.")
remainder_3 = int(input("Enter the remainder of dividing you age by 3: "))
remainder_5 = int(input("Enter the remainder of dividing you age by 5: "))
remainder_7 = int(input("Enter the remainder of dividing you age by 7: "))
age = (remainder_3*70 + remainder_5*21 + remainder_7*15) % 105
print(f"Your age is {age}; that's a good time to start programming!")
def counting():
print("Now I will prove to you that I can count to any number you want.")
number = int(input("Enter the number you want me to count to: "))
for i in range(number):
print(i, end=" - ")
print(number)
def quiz():
question = """
Let's test your programming knowledge!
What type of language Python is?
1) Interpreted and statically typed
2) Interpreted and dynamically typed
3) Compiled and statically typed
4) Compilled and dynamically typed"""
print(question)
correct = False
while not correct:
ans = input("Enter your anwser: ")
ans = safe_cast(ans, int, 0)
if ans == 2:
print("That's correct! Python is both interpreted and dynamically typed")
correct = True
else:
print("Please, try again.")
def exit_message():
print("Congratulations, have a nice day!")
def main():
introduction()
presentation()
age_guess()
counting()
quiz()
exit_message()
main()
``` |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.