ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | 1a2ff66e092665412a561a2125c1879ca5d1d219 | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Util class for job-related operations.
"""
import contextlib
import os
from taskflow import engines
from taskflow.persistence import logbook
from oslo_utils import uuidutils
from pipeline.pipelines import pipeline_factory
from pipeline.utils import backend_helper
def post_remote_pipeline_job(pipeline):
ME = os.getpid()
print("Starting poster with pid: %s" % ME)
my_name = "poster-%s" % ME
persist_backend = backend_helper.default_persistence_backend()
with contextlib.closing(persist_backend):
with contextlib.closing(persist_backend.get_connection()) as conn:
conn.upgrade()
job_backend = backend_helper.default_jobboard_backend(my_name)
job_backend.connect()
with contextlib.closing(job_backend):
# Create information in the persistence backend about the
# unit of work we want to complete and the factory that
# can be called to create the tasks that the work unit needs
# to be done.
lb = logbook.LogBook("post-from-%s" % my_name)
fd = logbook.FlowDetail("sample-from-%s" % my_name,
uuidutils.generate_uuid())
lb.add(fd)
with contextlib.closing(persist_backend.get_connection()) as conn:
conn.save_logbook(lb)
engines.save_factory_details(fd,
pipeline_factory.make_pipeline_flow,
[pipeline.name],
pipeline.kwargs,
backend=persist_backend)
# Post, and be done with it!
jb = job_backend.post("sample-job-from-%s" % my_name, book=lb)
print("Posted: %s" % jb)
return jb
|
py | 1a2ff7c55b8ed076fa46d310054279a8163972e9 | import csv
import random
from functools import partial
from typing import Callable, Optional
from pdb import set_trace as st
import os
import random
import pandas as pd
from typing import Any, Callable, Dict, Iterable, List, Tuple, Union
import numpy as np
import tensorflow as tf
from foolbox.attacks import (
FGSM,
Attack,
DeepFoolAttack,
IterativeGradientSignAttack,
SaliencyMapAttack,
)
# from foolbox.criteria import TargetClass
# from foolbox.models import TensorFlowModel
from tensorflow.python.training import saver
from tensorflow.python.training.session_manager import SessionManager
import tensorflow as tf
import numpy as np
import pickle
import sklearn.metrics as metrics
import scipy.ndimage as ndimage
import matplotlib.pyplot as plt
plt.switch_backend('Agg')
from model.config import LENET
from model import LeNet
import nninst_mode as mode
from dataset import mnist
from dataset.config import MNIST_TRAIN, MNIST_PATH
from dataset.mnist_transforms import *
from trace.lenet_mnist_class_trace_v2 import (
data_config,
)
from trace.common import (
class_trace,
)
from tf_utils import new_session_config
from nninst_statistics import calc_trace_side_overlap
from nninst_trace import TraceKey
from nninst_utils.numpy import arg_approx
from nninst_utils.ray import ray_init
from nninst_utils.fs import ensure_dir, IOAction, CsvIOAction, abspath
from eval.common import get_overlay_summary, clean_overlap_ratio, \
translation_overlap_ratio, attack_overlap_ratio, \
lenet_mnist_example
from eval.cw_attack import cw_generate_adversarial_example
from eval.eval_mnist import foolbox_generate_adversarial_example
from eval.cw_attacks import CarliniL2
from nninst_graph import AttrMap, Graph, GraphAttrKey
from nninst_utils.ray import ray_iter
from tf_graph import (
MaskWeightWithTraceHook,
model_fn_with_fetch_hook,
)
from trace.common import (
get_predicted_value,
get_rank,
predict,
reconstruct_class_trace_from_tf,
reconstruct_trace_from_tf,
reconstruct_trace_from_tf_brute_force,
)
from eval.eval_by_reduced_point import reconstruct_point
from nninst_op import *
from nninst_trace import calc_padding
threshold = 0.9
dilation_iter = 1
dilation_structure = ndimage.generate_binary_structure(2, 2)
# Model config
model_label = "augmentation"
model_dir = f"result/lenet/model_{model_label}"
# Trace config
trace_dir = f"{model_dir}/traces_{threshold}"
trace_name = "noop"
training_trace_dir = f"{model_dir}/per_image_trace_{threshold}/train"
# Result dir
result_name = "test"
result_dir = f"{model_dir}/birelation/{threshold}_{dilation_iter}"
# result_dir = f"result/lenet/test"
images_per_class = 1000
attack_name = "FGSM"
attacks = {
"FGSM": [FGSM],
"BIM": [IterativeGradientSignAttack],
"JSMA": [SaliencyMapAttack],
"DeepFool": [DeepFoolAttack],
# "DeepFool_full": [DeepFoolAttack, dict(subsample=None)],
# "CWL2": [CarliniL2],
}
# DeepFool will shutdown when num_gpu<0.2
num_gpus = 0.2
overlap_fn = calc_trace_side_overlap
per_channel = False
lenet_mnist_class_trace = class_trace(
trace_name,
model_config=LENET,
data_config=data_config,
)
graph = LENET.network_class.graph().load()
def reconstruct_edge_from_trace(
trace,
graph,
node_name,
key = TraceKey.EDGE,
):
attrs = trace.nodes[node_name]
op = graph.op(graph.id(node_name))
if key not in attrs:
return None
else:
attr = attrs[key]
edge = TraceKey.to_array(attr)
return edge
def reconstruct_point_from_trace_contrib(
trace,
graph,
node_name,
key = TraceKey.POINT,
):
attrs = trace.nodes[node_name]
def to_bitmap(shape, attr, contrib):
mask = np.zeros(np.prod(shape), dtype=np.int8)
pos_attr = attr[contrib > 0]
mask[TraceKey.to_array(pos_attr)] = 1
neg_attr = attr[contrib < 0]
mask[TraceKey.to_array(neg_attr)] = -1
return mask.reshape(shape)
if key in attrs:
return to_bitmap(attrs[key + "_shape"], attrs[key], attrs[TraceKey.POINT_CONTRIB])
else:
for attr_name, attr in attrs.items():
if attr_name.startswith(TraceKey.POINT + ".") and attr is not None:
return to_bitmap(attrs[TraceKey.POINT_SHAPE], attr)
RuntimeError(f"Point key not found in {node_name}")
def reconstruct_point_from_trace(
trace,
graph,
node_name,
key = TraceKey.POINT,
):
attrs = trace.nodes[node_name]
def to_bitmap(shape, attr):
mask = np.zeros(np.prod(shape), dtype=np.int8)
mask[TraceKey.to_array(attr)] = 1
return mask.reshape(shape)
if key in attrs:
return to_bitmap(attrs[key + "_shape"], attrs[key])
else:
for attr_name, attr in attrs.items():
if attr_name.startswith(TraceKey.POINT + ".") and attr is not None:
return to_bitmap(attrs[TraceKey.POINT_SHAPE], attr)
RuntimeError(f"Point key not found in {node_name}")
def reconstruct_weight_from_trace_contrib(
trace,
graph,
node_name,
key = TraceKey.WEIGHT,
):
attrs = trace.nodes[node_name]
def to_bitmap(shape, attr, contrib):
mask = np.zeros(np.prod(shape), dtype=np.int8)
mask[TraceKey.to_array(attr)] = contrib
return mask.reshape(shape)
if key in attrs:
return to_bitmap(attrs[key + "_shape"], attrs[key], attrs[TraceKey.WEIGHT_CONTRIB])
else:
RuntimeError(f"Weight key not found in {node_name}")
def reconstruct_weight_from_trace(
trace,
graph,
node_name,
key = TraceKey.WEIGHT,
):
attrs = trace.nodes[node_name]
def to_bitmap(shape, attr):
mask = np.zeros(np.prod(shape), dtype=np.int8)
mask[TraceKey.to_array(attr)] = 1
return mask.reshape(shape)
if key in attrs:
return to_bitmap(attrs[key + "_shape"], attrs[key])
else:
RuntimeError(f"Weight key not found in {node_name}")
def reconstruct_point_fn(
trace,
):
node_names = []
key = TraceKey.POINT
for attr_name, attr in trace.nodes.items():
if key in attr:
node_names.append(attr_name)
point_dict = {}
for node_name in [
"conv2d/Relu:0",
"conv2d_1/Relu:0",
"dense/BiasAdd:0",
"dense_1/BiasAdd:0",
]:
point_dict[node_name] = reconstruct_point_from_trace(
trace,
graph,
node_name,
)
# print(node_name, point_dict[node_name].shape)
return point_dict
def reconstruct_weight_fn(
trace,
):
weight_dict = {}
for node_name in [
"conv2d/Conv2D",
"conv2d_1/Conv2D",
]:
weight = reconstruct_weight_from_trace(
trace,
graph,
node_name,
)
weight = weight.reshape(-1, weight.shape[-2], weight.shape[-1])
weight_dict[node_name] = weight
return weight_dict
reconstruct_edge_fn = partial(
reconstruct_edge_from_trace,
graph = graph,
key = TraceKey.EDGE
)
|
py | 1a2ff7d6ee544f38724df5d47c67ff7c9424e404 | """
instabot example
Workflow:
Like user's, follower's media by user_id.
"""
import argparse
import os
import sys
sys.path.append(os.path.join(sys.path[0], '../'))
from instabot import Bot
parser = argparse.ArgumentParser(add_help=True)
parser.add_argument('-u', type=str, help="username")
parser.add_argument('-p', type=str, help="password")
parser.add_argument('-proxy', type=str, help="proxy")
parser.add_argument('users', type=str, nargs='+', help='users')
args = parser.parse_args()
bot = Bot()
bot.login(username=args.u, password=args.p,
proxy=args.proxy)
for username in args.users:
bot.like_followers(username, nlikes=3)
|
py | 1a2ff90b8b9115aeed04b0e1d7443a0af43ffa46 | from .flyweight import Flyweight
|
py | 1a2ff916014c9c622853ba156b2f225dc12f0362 | # -*- coding: utf-8 -*-
# FOGLAMP_BEGIN
# See: http://foglamp.readthedocs.io/
# FOGLAMP_END
import json
import urllib.parse
import aiohttp
from aiohttp import web
from foglamp.common import utils
from foglamp.common import logger
from foglamp.common.service_record import ServiceRecord
from foglamp.common.storage_client.exceptions import StorageServerError
from foglamp.common.configuration_manager import ConfigurationManager
from foglamp.services.core import connect
from foglamp.services.core.service_registry.service_registry import ServiceRegistry
from foglamp.services.core.service_registry import exceptions as service_registry_exceptions
from foglamp.common.audit_logger import AuditLogger
__author__ = "Amarendra K Sinha"
__copyright__ = "Copyright (c) 2018 Dianomic Systems"
__license__ = "Apache 2.0"
__version__ = "${VERSION}"
_help = """
-------------------------------------------------------------------------------
| GET | /foglamp/notification/plugin |
| GET POST PUT DELETE | /foglamp/notification |
-------------------------------------------------------------------------------
"""
_logger = logger.setup()
NOTIFICATION_TYPE = ["one shot", "retriggered", "toggled"]
async def get_plugin(request):
""" GET lists of rule plugins and delivery plugins
:Example:
curl -X GET http://localhost:8081/foglamp/notification/plugin
"""
try:
notification_service = ServiceRegistry.get(s_type=ServiceRecord.Type.Notification.name)
_address, _port = notification_service[0]._address, notification_service[0]._port
except service_registry_exceptions.DoesNotExist:
raise web.HTTPNotFound(reason="No Notification service available.")
try:
url = 'http://{}:{}/notification/rules'.format(_address, _port)
rule_plugins = json.loads(await _hit_get_url(url))
url = 'http://{}:{}/notification/delivery'.format(_address, _port)
delivery_plugins = json.loads(await _hit_get_url(url))
except Exception as ex:
raise web.HTTPInternalServerError(reason=ex)
else:
return web.json_response({'rules': rule_plugins, 'delivery': delivery_plugins})
async def get_type(request):
""" GET the list of available notification types
:Example:
curl -X GET http://localhost:8081/foglamp/notification/type
"""
return web.json_response({'notification_type': NOTIFICATION_TYPE})
async def get_notification(request):
""" GET an existing notification
:Example:
curl -X GET http://localhost:8081/foglamp/notification/<notification_name>
"""
try:
notif = request.match_info.get('notification_name', None)
if notif is None:
raise ValueError("Notification name is required.")
notification = {}
storage = connect.get_storage_async()
config_mgr = ConfigurationManager(storage)
notification_config = await config_mgr._read_category_val(notif)
if notification_config:
rule_config = await config_mgr._read_category_val("rule{}".format(notif))
delivery_config = await config_mgr._read_category_val("delivery{}".format(notif))
notification = {
"name": notification_config['name']['value'],
"description": notification_config['description']['value'],
"rule": notification_config['rule']['value'],
"ruleConfig": rule_config,
"channel": notification_config['channel']['value'],
"deliveryConfig": delivery_config,
"notificationType": notification_config['notification_type']['value'],
"enable": notification_config['enable']['value'],
}
else:
raise ValueError("The Notification: {} does not exist.".format(notif))
except ValueError as ex:
raise web.HTTPBadRequest(reason=str(ex))
except Exception as ex:
raise web.HTTPInternalServerError(reason=ex)
else:
return web.json_response({'notification': notification})
async def get_notifications(request):
""" GET list of notifications
:Example:
curl -X GET http://localhost:8081/foglamp/notification
"""
try:
storage = connect.get_storage_async()
config_mgr = ConfigurationManager(storage)
all_notifications = await config_mgr._read_all_child_category_names("Notifications")
notifications = []
for notification in all_notifications:
notification_config = await config_mgr._read_category_val(notification['child'])
notification = {
"name": notification_config['name']['value'],
"rule": notification_config['rule']['value'],
"channel": notification_config['channel']['value'],
"notificationType": notification_config['notification_type']['value'],
"enable": notification_config['enable']['value'],
}
notifications.append(notification)
except Exception as ex:
raise web.HTTPInternalServerError(reason=ex)
else:
return web.json_response({'notifications': notifications})
async def post_notification(request):
"""
Create a new notification to run a specific plugin
:Example:
curl -X POST http://localhost:8081/foglamp/notification -d '{"name": "Test Notification", "description":"Test Notification", "rule": "threshold", "channel": "email", "notification_type": "one shot", "enabled": false}'
curl -X POST http://localhost:8081/foglamp/notification -d '{"name": "Test Notification", "description":"Test Notification", "rule": "threshold", "channel": "email", "notification_type": "one shot", "enabled": false, "rule_config": {}, "delivery_config": {}}'
"""
try:
notification_service = ServiceRegistry.get(s_type=ServiceRecord.Type.Notification.name)
_address, _port = notification_service[0]._address, notification_service[0]._port
except service_registry_exceptions.DoesNotExist:
raise web.HTTPNotFound(reason="No Notification service available.")
try:
data = await request.json()
if not isinstance(data, dict):
raise ValueError('Data payload must be a valid JSON')
name = data.get('name', None)
description = data.get('description', None)
rule = data.get('rule', None)
channel = data.get('channel', None)
notification_type = data.get('notification_type', None)
enabled = data.get('enabled', None)
rule_config = data.get('rule_config', {})
delivery_config = data.get('delivery_config', {})
if name is None or name.strip() == "":
raise ValueError('Missing name property in payload.')
if description is None:
raise ValueError('Missing description property in payload.')
if rule is None:
raise ValueError('Missing rule property in payload.')
if channel is None:
raise ValueError('Missing channel property in payload.')
if notification_type is None:
raise ValueError('Missing notification_type property in payload.')
if utils.check_reserved(name) is False:
raise ValueError('Invalid name property in payload.')
if utils.check_reserved(rule) is False:
raise ValueError('Invalid rule property in payload.')
if utils.check_reserved(channel) is False:
raise ValueError('Invalid channel property in payload.')
if notification_type not in NOTIFICATION_TYPE:
raise ValueError('Invalid notification_type property in payload.')
if enabled is not None:
if enabled not in ['true', 'false', True, False]:
raise ValueError('Only "true", "false", true, false are allowed for value of enabled.')
is_enabled = "true" if ((type(enabled) is str and enabled.lower() in ['true']) or (
(type(enabled) is bool and enabled is True))) else "false"
storage = connect.get_storage_async()
config_mgr = ConfigurationManager(storage)
curr_config = await config_mgr.get_category_all_items(name)
if curr_config is not None:
raise ValueError("A Category with name {} already exists.".format(name))
try:
# Get default config for rule and channel plugins
url = '{}/plugin'.format(request.url)
try:
# When authentication is mandatory we need to pass token in request header
auth_token = request.token
except AttributeError:
auth_token = None
list_plugins = json.loads(await _hit_get_url(url, auth_token))
r = list(filter(lambda rules: rules['name'] == rule, list_plugins['rules']))
c = list(filter(lambda channels: channels['name'] == channel, list_plugins['delivery']))
if len(r) == 0 or len(c) == 0: raise KeyError
rule_plugin_config = r[0]['config']
delivery_plugin_config = c[0]['config']
except KeyError:
raise ValueError("Invalid rule plugin {} and/or delivery plugin {} supplied.".format(rule, channel))
# Verify if rule_config contains valid keys
if rule_config != {}:
for k, v in rule_config.items():
if k not in rule_plugin_config:
raise ValueError("Invalid key {} in rule_config {} supplied for plugin {}.".format(k, rule_config, rule))
# Verify if delivery_config contains valid keys
if delivery_config != {}:
for k, v in delivery_config.items():
if k not in delivery_plugin_config:
raise ValueError(
"Invalid key {} in delivery_config {} supplied for plugin {}.".format(k, delivery_config, channel))
# First create templates for notification and rule, channel plugins
post_url = 'http://{}:{}/notification/{}'.format(_address, _port, urllib.parse.quote(name))
await _hit_post_url(post_url) # Create Notification template
post_url = 'http://{}:{}/notification/{}/rule/{}'.format(_address, _port, urllib.parse.quote(name),
urllib.parse.quote(rule))
await _hit_post_url(post_url) # Create Notification rule template
post_url = 'http://{}:{}/notification/{}/delivery/{}'.format(_address, _port, urllib.parse.quote(name),
urllib.parse.quote(channel))
await _hit_post_url(post_url) # Create Notification delivery template
# Create configurations
notification_config = {
"description": description,
"rule": rule,
"channel": channel,
"notification_type": notification_type,
"enable": is_enabled,
}
await _update_configurations(config_mgr, name, notification_config, rule_config, delivery_config)
audit = AuditLogger(storage)
await audit.information('NTFAD', {"name": name})
except ValueError as ex:
raise web.HTTPBadRequest(reason=str(ex))
except Exception as e:
raise web.HTTPInternalServerError(reason=str(e))
else:
return web.json_response({'result': "Notification {} created successfully".format(name)})
class NotFoundError(Exception):
pass
async def put_notification(request):
"""
Update an existing notification
:Example:
curl -X PUT http://localhost:8081/foglamp/notification/<notification_name> -d '{"description":"Test Notification modified"}'
curl -X PUT http://localhost:8081/foglamp/notification/<notification_name> -d '{"rule": "threshold", "channel": "email"}'
curl -X PUT http://localhost:8081/foglamp/notification/<notification_name> -d '{"notification_type": "one shot", "enabled": false}'
curl -X PUT http://localhost:8081/foglamp/notification/<notification_name> -d '{"enabled": false}'
curl -X PUT http://localhost:8081/foglamp/notification/<notification_name> -d '{"description":"Test Notification", "rule": "threshold", "channel": "email", "notification_type": "one shot", "enabled": false, "rule_config": {}, "delivery_config": {}}'
"""
try:
notification_service = ServiceRegistry.get(s_type=ServiceRecord.Type.Notification.name)
_address, _port = notification_service[0]._address, notification_service[0]._port
except service_registry_exceptions.DoesNotExist:
raise web.HTTPNotFound(reason="No Notification service available.")
try:
notif = request.match_info.get('notification_name', None)
if notif is None:
raise ValueError("Notification name is required for updation.")
# TODO: Stop notification before update
data = await request.json()
if not isinstance(data, dict):
raise ValueError('Data payload must be a valid JSON')
description = data.get('description', None)
rule = data.get('rule', None)
channel = data.get('channel', None)
notification_type = data.get('notification_type', None)
enabled = data.get('enabled', None)
rule_config = data.get('rule_config', {})
delivery_config = data.get('delivery_config', {})
if utils.check_reserved(notif) is False:
raise ValueError('Invalid notification instance name.')
if rule is not None and utils.check_reserved(rule) is False:
raise ValueError('Invalid rule property in payload.')
if channel is not None and utils.check_reserved(channel) is False:
raise ValueError('Invalid channel property in payload.')
if notification_type is not None and notification_type not in NOTIFICATION_TYPE:
raise ValueError('Invalid notification_type property in payload.')
if enabled is not None:
if enabled not in ['true', 'false', True, False]:
raise ValueError('Only "true", "false", true, false are allowed for value of enabled.')
is_enabled = "true" if ((type(enabled) is str and enabled.lower() in ['true']) or (
(type(enabled) is bool and enabled is True))) else "false"
storage = connect.get_storage_async()
config_mgr = ConfigurationManager(storage)
current_config = await config_mgr._read_category_val(notif)
if current_config is None:
raise NotFoundError('No {} notification instance found'.format(notif))
rule_changed = True if rule is not None and rule != current_config['rule']['value'] else False
channel_changed = True if channel is not None and channel != current_config['channel']['value'] else False
try:
# Get default config for rule and channel plugins
url = str(request.url)
url_parts = url.split("/foglamp/notification")
url = '{}/foglamp/notification/plugin'.format(url_parts[0])
try:
# When authentication is mandatory we need to pass token in request header
auth_token = request.token
except AttributeError:
auth_token = None
list_plugins = json.loads(await _hit_get_url(url, auth_token))
search_rule = rule if rule_changed else current_config['rule']['value']
r = list(filter(lambda rules: rules['name'] == search_rule, list_plugins['rules']))
if len(r) == 0:
raise KeyError
rule_plugin_config = r[0]['config']
search_channel = channel if channel_changed else current_config['channel']['value']
c = list(filter(lambda channels: channels['name'] == search_channel, list_plugins['delivery']))
if len(c) == 0:
raise KeyError
delivery_plugin_config = c[0]['config']
except KeyError:
raise ValueError("Invalid rule plugin:{} and/or delivery plugin:{} supplied.".format(rule, channel))
# Verify if rule_config contains valid keys
if rule_config != {}:
for k, v in rule_config.items():
if k not in rule_plugin_config:
raise ValueError("Invalid key:{} in rule plugin:{}".format(k, rule_plugin_config))
# Verify if delivery_config contains valid keys
if delivery_config != {}:
for k, v in delivery_config.items():
if k not in delivery_plugin_config:
raise ValueError(
"Invalid key:{} in delivery plugin:{}".format(k, delivery_plugin_config))
if rule_changed: # A new rule has been supplied
category_desc = rule_plugin_config['plugin']['description']
category_name = "rule{}".format(notif)
await config_mgr.create_category(category_name=category_name,
category_description=category_desc,
category_value=rule_plugin_config,
keep_original_items=False)
if channel_changed: # A new delivery has been supplied
category_desc = delivery_plugin_config['plugin']['description']
category_name = "delivery{}".format(notif)
await config_mgr.create_category(category_name=category_name,
category_description=category_desc,
category_value=delivery_plugin_config,
keep_original_items=False)
notification_config = {}
if description is not None:
notification_config.update({"description": description})
if rule is not None:
notification_config.update({"rule": rule})
if channel is not None:
notification_config.update({"channel": channel})
if notification_type is not None:
notification_config.update({"notification_type": notification_type})
if enabled is not None:
notification_config.update({"enable": is_enabled})
await _update_configurations(config_mgr, notif, notification_config, rule_config, delivery_config)
except ValueError as e:
raise web.HTTPBadRequest(reason=str(e))
except NotFoundError as e:
raise web.HTTPNotFound(reason=str(e))
except Exception as ex:
raise web.HTTPInternalServerError(reason=str(ex))
else:
# TODO: Start notification after update
return web.json_response({'result': "Notification {} updated successfully".format(notif)})
async def delete_notification(request):
""" Delete an existing notification
:Example:
curl -X DELETE http://localhost:8081/foglamp/notification/<notification_name>
"""
try:
notification_service = ServiceRegistry.get(s_type=ServiceRecord.Type.Notification.name)
_address, _port = notification_service[0]._address, notification_service[0]._port
except service_registry_exceptions.DoesNotExist:
raise web.HTTPNotFound(reason="No Notification service available.")
try:
notif = request.match_info.get('notification_name', None)
if notif is None:
raise ValueError("Notification name is required for deletion.")
# Stop & remove notification
url = 'http://{}:{}/notification/{}'.format(_address, _port, urllib.parse.quote(notif))
notification = json.loads(await _hit_delete_url(url))
# Removes the child categories for the rule and delivery plugins, Removes the category for the notification itself
storage = connect.get_storage_async()
config_mgr = ConfigurationManager(storage)
await config_mgr.delete_category_and_children_recursively(notif)
audit = AuditLogger(storage)
await audit.information('NTFDL', {"name": notif})
except ValueError as ex:
raise web.HTTPBadRequest(reason=str(ex))
except Exception as ex:
raise web.HTTPInternalServerError(reason=str(ex))
else:
return web.json_response({'result': 'Notification {} deleted successfully.'.format(notif)})
async def _hit_get_url(get_url, token=None):
headers = {"Authorization": token} if token else None
try:
async with aiohttp.ClientSession(connector=aiohttp.TCPConnector(verify_ssl=False)) as session:
async with session.get(get_url, headers=headers) as resp:
status_code = resp.status
jdoc = await resp.text()
if status_code not in range(200, 209):
_logger.error("Error code: %d, reason: %s, details: %s, url: %s", resp.status, resp.reason, jdoc,
get_url)
raise StorageServerError(code=resp.status, reason=resp.reason, error=jdoc)
except Exception:
raise
else:
return jdoc
async def _hit_post_url(post_url, data=None):
try:
async with aiohttp.ClientSession() as session:
async with session.post(post_url, data=data) as resp:
status_code = resp.status
jdoc = await resp.text()
if status_code not in range(200, 209):
_logger.error("Error code: %d, reason: %s, details: %s, url: %s", resp.status, resp.reason, jdoc,
post_url)
raise StorageServerError(code=resp.status, reason=resp.reason, error=jdoc)
except Exception:
raise
else:
return jdoc
async def _update_configurations(config_mgr, name, notification_config, rule_config, delivery_config):
try:
# Update main notification
if notification_config != {}:
await config_mgr.update_configuration_item_bulk(name, notification_config)
# Replace rule configuration
if rule_config != {}:
category_name = "rule{}".format(name)
await config_mgr.update_configuration_item_bulk(category_name, rule_config)
# Replace delivery configuration
if delivery_config != {}:
category_name = "delivery{}".format(name)
await config_mgr.update_configuration_item_bulk(category_name, delivery_config)
except Exception as ex:
_logger.exception("Failed to update notification configuration. %s", str(ex))
raise web.HTTPInternalServerError(reason='Failed to update notification configuration. {}'.format(ex))
async def _hit_delete_url(delete_url, data=None):
try:
async with aiohttp.ClientSession() as session:
async with session.delete(delete_url, data=data) as resp:
status_code = resp.status
jdoc = await resp.text()
if status_code not in range(200, 209):
_logger.error("Error code: %d, reason: %s, details: %s, url: %s",
resp.status,
resp.reason,
jdoc,
delete_url)
raise StorageServerError(code=resp.status,
reason=resp.reason,
error=jdoc)
except Exception:
raise
else:
return jdoc
|
py | 1a2ff9760b7949b5260393f7e1f6e78efb49f42a | import csv
import os
import copy
import re
from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from scrapy.http import Request, HtmlResponse, FormRequest
from scrapy.utils.response import get_base_url
from scrapy.utils.url import urljoin_rfc
from scrapy.http.cookies import CookieJar
from product_spiders.items import Product, ProductLoaderWithNameStrip as ProductLoader
HERE = os.path.abspath(os.path.dirname(__file__))
class AmazonSpider(BaseSpider):
name = 'bosch-german-diy-amazon.de'
allowed_domains = ['amazon.de']
user_agent = 'spd'
def start_requests(self):
with open(os.path.join(HERE, 'bosch_german_diy.csv')) as f:
reader = csv.DictReader(f)
for row in reader:
url = row['amazon']
if url:
yield Request(url, meta={'sku': row['sku']}, callback=self.parse_product)
def parse(self, response):
pass
def parse_product(self, response):
hxs = HtmlXPathSelector(response)
loader = ProductLoader(item=Product(), selector=hxs)
loader.add_value('url', response.url)
loader.add_xpath('name', u'//div[@class="buying"]/h1[@class="parseasinTitle"]/span[@id="btAsinTitle"]/text()')
price = hxs.select(u'//div[@class="buying"]/table[@class="product"]//b[@class="priceLarge"]/text()').extract()[0]
loader.add_value('price', price.replace(',', '.'))
loader.add_value('sku', response.meta['sku'])
yield loader.load_item()
|
py | 1a2ff98e6dbfbe12703bc881c2266d7e41cc44ee | # dataset settings
ann_type = 'bast_eval' # * change accordingly
num_classes = 9 if ann_type == 'bast_base' else 42
# model settings
model = dict(
type='Recognizer3D',
backbone=dict(
type='ResNet3dSlowOnly',
depth=50,
pretrained=None,
in_channels=17,
base_channels=32,
num_stages=3,
out_indices=(2, ),
stage_blocks=(4, 6, 3),
conv1_stride_s=1,
pool1_stride_s=1,
inflate=(0, 1, 1),
spatial_strides=(2, 2, 2),
temporal_strides=(1, 1, 2),
dilations=(1, 1, 1)),
cls_head=dict(
type='I3DHead',
in_channels=512,
num_classes=num_classes,
spatial_type='avg',
dropout_ratio=0.5),
train_cfg=dict(),
test_cfg=dict(average_clips='prob'))
# dataset settings
dataset_type = 'PoseDataset'
ann_file_train = f'data/skeleton/{ann_type}/bast_train.pkl'
ann_file_val = f'data/skeleton/{ann_type}/bast_val.pkl'
ann_file_test = f'data/skeleton/{ann_type}/bast_test.pkl'
left_kp = [1, 3, 5, 7, 9, 11, 13, 15]
right_kp = [2, 4, 6, 8, 10, 12, 14, 16]
train_pipeline = [
dict(type='UniformSampleFrames', clip_len=54),
dict(type='PoseDecode'),
dict(type='PoseCompact', hw_ratio=1., allow_imgpad=True),
dict(type='Resize', scale=(-1, 64)),
dict(type='RandomResizedCrop', area_range=(0.56, 1.0)),
dict(type='Resize', scale=(56, 56), keep_ratio=False),
dict(type='Flip', flip_ratio=0.5, left_kp=left_kp, right_kp=right_kp),
dict(
type='GeneratePoseTarget',
sigma=0.6,
use_score=True,
with_kp=True,
with_limb=False),
dict(type='FormatShape', input_format='NCTHW'),
dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
dict(type='ToTensor', keys=['imgs', 'label'])
]
val_pipeline = [
dict(type='UniformSampleFrames', clip_len=54, num_clips=1, test_mode=True),
dict(type='PoseDecode'),
dict(type='PoseCompact', hw_ratio=1., allow_imgpad=True),
dict(type='Resize', scale=(-1, 64)),
dict(type='CenterCrop', crop_size=64),
dict(
type='GeneratePoseTarget',
sigma=0.6,
use_score=True,
with_kp=True,
with_limb=False),
dict(type='FormatShape', input_format='NCTHW'),
dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
dict(type='ToTensor', keys=['imgs'])
]
test_pipeline = [
dict(
type='UniformSampleFrames', clip_len=54, num_clips=10, test_mode=True),
dict(type='PoseDecode'),
dict(type='PoseCompact', hw_ratio=1., allow_imgpad=True),
dict(type='Resize', scale=(-1, 64)),
dict(type='CenterCrop', crop_size=64),
dict(
type='GeneratePoseTarget',
sigma=0.6,
use_score=True,
with_kp=True,
with_limb=False,
double=True,
left_kp=left_kp,
right_kp=right_kp),
dict(type='FormatShape', input_format='NCTHW'),
dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
dict(type='ToTensor', keys=['imgs'])
]
data = dict(
videos_per_gpu=6,
workers_per_gpu=1,
test_dataloader=dict(videos_per_gpu=1),
train=dict(
type=dataset_type,
ann_file=ann_file_train,
data_prefix='',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=ann_file_val,
data_prefix='',
pipeline=val_pipeline),
test=dict(
type=dataset_type,
ann_file=ann_file_test,
data_prefix='',
pipeline=test_pipeline))
# optimizer
optimizer = dict(
type='SGD', lr=0.0094, momentum=0.9,
weight_decay=0.0003) # this lr is used for 8 gpus
optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2))
# learning policy
lr_config = dict(policy='CosineAnnealing', by_epoch=False, min_lr=0)
total_epochs = 280
checkpoint_config = dict(interval=10)
workflow = [('train', 10)]
evaluation = dict(
interval=5,
metrics=['top_k_accuracy', 'mean_class_accuracy'],
topk=(1, 2, 3, 4, 5))
eval_config = dict(
metric_options=dict(
top_k_accuracy=dict(topk=(1, 2, 3, 4, 5))),)
log_config = dict(
interval=20, hooks=[
dict(type='TextLoggerHook'),
])
dist_params = dict(backend='nccl')
log_level = 'INFO'
load_from = ('https://download.openmmlab.com/mmaction/skeleton/posec3d/'
'slowonly_kinetics400_pretrained_r50_u48_120e_hmdb51_split1_keypoint/'
'slowonly_kinetics400_pretrained_r50_u48_120e_hmdb51_split1_keypoint-76ffdd8b.pth')
resume_from = None
find_unused_parameters = False
|
py | 1a2ffa3affc0e93e2021cfca989b2e8c5e1909ae | from lark import Transformer, v_args, Token
from synthesis.synthesizer.dplyr_to_pd.code_analysis.nodes import *
from abc import ABC, abstractmethod
class DplyrTransformer(Transformer):
""" Lark built in visitor for grammar construction rules """
@v_args(inline=True)
def identifier_node(self, arg):
return IdentifierNode(arg)
@v_args(inline=True)
def single_block_node(self, arg):
nodes = [arg]
return BlockNode(nodes)
@v_args(inline=True)
def block_node(self, arg, other_bock: BlockNode):
nodes = [arg] + other_bock.lines
return BlockNode(nodes)
@v_args(inline=True)
def single_sequence_node(self, arg):
nodes = [arg]
return SequenceNode(nodes)
@v_args(inline=True)
def sequence_node(self, arg, other_seq: SequenceNode):
nodes = [arg] + other_seq.arguments
return SequenceNode(nodes)
@v_args(inline=True)
def function_node(self, name: Token, args: SequenceNode):
return FunctionNode(str(name), args)
@v_args(inline=True)
def collapse_function_node(self, arg: Tree, fn: FunctionNode):
return FunctionNode(str(fn.name), self.sequence_node(arg, fn.arguments))
@v_args(inline=True)
def predicate_node(self, arg: Token, op: Token, expr: Node, lc: Token, rest: Node):
visitor = RWriter()
args = [str(arg), str(op), expr.accept(visitor), str(lc), rest.accept(visitor)]
return PredicateNode(' '.join(args))
@v_args(inline=True)
def single_predicate_node(self, arg: Token, op: Token, expr: Node):
visitor = RWriter()
args = [str(arg), str(op), expr.accept(visitor)]
return PredicateNode(' '.join(args))
@v_args(inline=True)
def empty_node(self):
return EmptyNode()
@v_args(inline=True)
def assignment_node(self, lvalue: IdentifierNode, expr: Node):
return AssignmentNode(lvalue, expr)
@v_args(inline=True)
def rvalue_node(self, lvalue: IdentifierNode):
return RValueNode(lvalue)
@v_args(inline=True)
def literal_node(self, lit: Token):
return LiteralNode(lit)
@v_args(inline=True)
def collapse(self, arg):
return arg
class Visitor(ABC):
"""Generic visitor used to the traverse the AST"""
@abstractmethod
def visit_block_node(self, sq: BlockNode):
raise NotImplementedError
@abstractmethod
def visit_function_node(self, fn: FunctionNode):
raise NotImplementedError
@abstractmethod
def visit_identifier_node(self, ide: IdentifierNode):
raise NotImplementedError
@abstractmethod
def visit_sequence_node(self, sq: SequenceNode):
raise NotImplementedError
@abstractmethod
def visit_predicate_node(self, pr: PredicateNode):
raise NotImplementedError
@abstractmethod
def visit_empty_node(self, pr: EmptyNode):
raise NotImplementedError
@abstractmethod
def visit_assignment_node(self, an: AssignmentNode):
raise NotImplementedError
@abstractmethod
def visit_right_value_node(self, rv: RValueNode):
raise NotImplementedError
@abstractmethod
def visit_literal_node(self, rv: LiteralNode):
raise NotImplementedError
class RWriter(Visitor):
"""Visitor used to write R"""
def visit_block_node(self, sq: BlockNode):
args = []
for arg in sq.lines:
args += [arg.accept(self)]
return '\n'.join(args)
def visit_function_node(self, fn: FunctionNode):
return f'{fn.name}({fn.arguments.accept(self)})'
def visit_identifier_node(self, ide: IdentifierNode):
return ide.name
def visit_sequence_node(self, sq: SequenceNode):
args = []
for arg in sq.arguments:
args += [arg.accept(self)]
return ', '.join(args)
def visit_predicate_node(self, pr: PredicateNode):
return pr.predicate
def visit_empty_node(self, pr: EmptyNode):
return ''
def visit_assignment_node(self, an: AssignmentNode):
return f'{an.left_value.accept(self)} <- {an.right_value.accept(self)}'
def visit_right_value_node(self, rv: RValueNode):
return rv.value.accept(self)
def visit_literal_node(self, lit: LiteralNode):
return lit.value
class DependencyFinder(Visitor):
""" For each line find its depedencies on inputs"""""
def __init__(self, n_inputs: int):
self.count = 0
self.left_values = {IdentifierNode(f'input{i+1}'): IdentifierNode(f'input{i+1}') for i in range(n_inputs)}
self.fn_dependencies = {}
self.new_assignments = {}
def visit_block_node(self, sq: BlockNode):
for line in sq.lines:
line.accept(self)
return self.fn_dependencies
def visit_function_node(self, fn: FunctionNode):
result = fn.arguments.accept(self)
return result
def visit_identifier_node(self, ide: IdentifierNode):
dep = next(filter(lambda x: x == ide, self.left_values), None)
if dep is not None:
return [self.left_values[dep]]
return []
def visit_sequence_node(self, sq: SequenceNode):
dependencies = []
for i in range(len(sq.arguments)):
if isinstance(sq.arguments[i], FunctionNode) and sq.arguments[i] not in self.new_assignments:
if sq.arguments[i].accept(self):
new_id = IdentifierNode(f'tmp_{self.count}')
an = AssignmentNode(new_id, sq.arguments[i])
self.count += 1
self.left_values[an.left_value] = an
self.new_assignments[sq.children[i]] = new_id
self.fn_dependencies[an] = an.right_value.accept(self)
sq.replace_arg(i, new_id)
dependencies += sq.arguments[i].accept(self)
return dependencies
def visit_predicate_node(self, pr: PredicateNode):
return []
def visit_empty_node(self, pr: EmptyNode):
return []
def visit_assignment_node(self, an: AssignmentNode):
self.left_values[an.left_value] = an
self.fn_dependencies[an] = an.right_value.accept(self)
def visit_right_value_node(self, rv: RValueNode):
return rv.value.accept(self)
def visit_literal_node(self, lit: LiteralNode):
return []
|
py | 1a2ffa6ea157d8f454156833627a835f6464f06e | import numpy as np
import pandas as pd
from napari.qt.threading import thread_worker
from skimage.measure import regionprops_table
from imlib.pandas.misc import initialise_df
from imlib.general.list import unique_elements_lists
from brainreg_segment.atlas.utils import lateralise_atlas_image
@thread_worker
def region_analysis(
label_layers,
atlas_layer_image,
atlas,
regions_directory,
output_csv_file=None,
volumes=True,
summarise=True,
):
regions_directory.mkdir(parents=True, exist_ok=True)
if volumes:
print("Calculating region volume distribution")
print(f"Saving summary volumes to: {regions_directory}")
for label_layer in label_layers:
analyse_region_brain_areas(
label_layer,
atlas_layer_image,
regions_directory,
atlas,
)
if summarise:
if output_csv_file is not None:
print("Summarising regions")
summarise_brain_regions(
label_layers, output_csv_file, atlas.resolution
)
print("Finished!\n")
def summarise_brain_regions(label_layers, filename, atlas_resolution):
summaries = []
for label_layer in label_layers:
summaries.append(summarise_single_brain_region(label_layer))
result = pd.concat(summaries)
# TODO: use atlas.space to make these more intuitive
volume_header = "volume_mm3"
length_columns = [
"axis_0_min_um",
"axis_1_min_um",
"axis_2_min_um",
"axis_0_max_um",
"axis_1_max_um",
"axis_2_max_um",
"axis_0_center_um",
"axis_1_center_um",
"axis_2_center_um",
]
result.columns = ["region"] + [volume_header] + length_columns
voxel_volume_in_mm = np.prod(atlas_resolution) / (1000 ** 3)
result[volume_header] = result[volume_header] * voxel_volume_in_mm
for header in length_columns:
for dim, idx in enumerate(atlas_resolution):
if header.startswith(f"axis_{idx}"):
scale = float(dim)
assert scale > 0
result[header] = result[header] * scale
result.to_csv(filename, index=False)
def summarise_single_brain_region(
label_layer,
ignore_empty=True,
properties_to_fetch=[
"area",
"bbox",
"centroid",
],
):
data = label_layer.data
if ignore_empty:
if data.sum() == 0:
return
regions_table = regionprops_table(data, properties=properties_to_fetch)
df = pd.DataFrame.from_dict(regions_table)
df.insert(0, "Region", label_layer.name)
return df
def analyse_region_brain_areas(
label_layer,
atlas_layer_data,
destination_directory,
atlas,
extension=".csv",
ignore_empty=True,
):
"""
:param label_layer: napari labels layer (with segmented regions)
:param ignore_empty: If True, don't analyse empty regions
"""
data = label_layer.data
if ignore_empty:
if data.sum() == 0:
return
name = label_layer.name
masked_annotations = data.astype(bool) * atlas_layer_data
annotations_left, annotations_right = lateralise_atlas_image(
masked_annotations,
atlas.hemispheres,
left_hemisphere_value=atlas.left_hemisphere_value,
right_hemisphere_value=atlas.right_hemisphere_value,
)
unique_vals_left, counts_left = np.unique(
annotations_left, return_counts=True
)
unique_vals_right, counts_right = np.unique(
annotations_right, return_counts=True
)
voxel_volume_in_mm = np.prod(atlas.resolution) / (1000 ** 3)
df = initialise_df(
"structure_name",
"left_volume_mm3",
"left_percentage_of_total",
"right_volume_mm3",
"right_percentage_of_total",
"total_volume_mm3",
"percentage_of_total",
)
sampled_structures = unique_elements_lists(
list(unique_vals_left) + list(unique_vals_right)
)
total_volume_region = get_total_volume_regions(
unique_vals_left, unique_vals_right, counts_left, counts_right
)
for atlas_value in sampled_structures:
if atlas_value != 0:
try:
df = add_structure_volume_to_df(
df,
atlas_value,
atlas.structures,
unique_vals_left,
unique_vals_right,
counts_left,
counts_right,
voxel_volume_in_mm,
total_volume_voxels=total_volume_region,
)
except KeyError:
print(
f"Value: {atlas_value} is not in the atlas structure"
f" reference file. Not calculating the volume"
)
filename = destination_directory / (name + extension)
df.to_csv(filename, index=False)
def get_total_volume_regions(
unique_vals_left,
unique_vals_right,
counts_left,
counts_right,
):
zero_index_left = np.where(unique_vals_left == 0)[0][0]
counts_left = list(counts_left)
counts_left.pop(zero_index_left)
zero_index_right = np.where(unique_vals_right == 0)[0][0]
counts_right = list(counts_right)
counts_right.pop(zero_index_right)
return sum(counts_left + counts_right)
def add_structure_volume_to_df(
df,
atlas_value,
atlas_structures,
unique_vals_left,
unique_vals_right,
counts_left,
counts_right,
voxel_volume,
total_volume_voxels=None,
):
name = atlas_structures[atlas_value]["name"]
left_volume, left_percentage = get_volume_in_hemisphere(
atlas_value,
unique_vals_left,
counts_left,
total_volume_voxels,
voxel_volume,
)
right_volume, right_percentage = get_volume_in_hemisphere(
atlas_value,
unique_vals_right,
counts_right,
total_volume_voxels,
voxel_volume,
)
if total_volume_voxels is not None:
total_percentage = left_percentage + right_percentage
else:
total_percentage = 0
df = df.append(
{
"structure_name": name,
"left_volume_mm3": left_volume,
"left_percentage_of_total": left_percentage,
"right_volume_mm3": right_volume,
"right_percentage_of_total": right_percentage,
"total_volume_mm3": left_volume + right_volume,
"percentage_of_total": total_percentage,
},
ignore_index=True,
)
return df
def get_volume_in_hemisphere(
atlas_value, unique_vals, counts, total_volume_voxels, voxel_volume
):
try:
index = np.where(unique_vals == atlas_value)[0][0]
volume = counts[index] * voxel_volume
if total_volume_voxels is not None:
percentage = 100 * (counts[index] / total_volume_voxels)
else:
percentage = 0
except IndexError:
volume = 0
percentage = 0
return volume, percentage
|
py | 1a2ffb7fc92162f5f54956dbe55a174b83ff8a33 | import psycopg2
class Conn:
def __init__(self, connstr):
self.conn = psycopg2.connect(connstr)
self.setversion()
self.nexttmp = 0
def setversion(self):
cur = self.conn.cursor()
cur.execute("select version()")
verstr = cur.fetchone()
if "Greenplum Database 4" in verstr[0]:
self.ver = 4
elif "Greenplum Database 5" in verstr[0]:
self.ver = 5
else:
raise RuntimeError('Unknown Deepgreen Version')
self.typemap = {}
cur.execute("select oid, typname from pg_type")
rows = cur.fetchall()
for row in rows:
self.typemap[row[0]] = row[1]
cur.close()
self.conn.commit()
def close(self):
self.conn.close()
def next_tmpname(self):
self.nexttmp += 1
return "tmp_{0}".format(self.nexttmp)
def execute(self, sql):
cur = self.conn.cursor()
cur.execute(sql)
rows = cur.fetchall()
cur.close()
self.conn.commit()
return rows
def cursor(self, sql):
cur = self.conn.cursor()
cur.execute(sql)
return cur
if __name__ == '__main__':
conn = Conn("host=localhost user=ftian dbname=ftian")
print("Connected to deepgreen database, version is ", conn.ver)
|
py | 1a2ffbd7b7c07c33f20999ed51f3d64383f0f858 | import _plotly_utils.basevalidators
class TextsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(self, plotly_name="textsrc", parent_name="scattermapbox", **kwargs):
super(TextsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs
)
|
py | 1a2ffbd9964cf66f2b41f80870cc2a60b4cddbce | """
The most naive way of computing n15 requires fourteen multiplications:
n × n × ... × n = n15
But using a "binary" method you can compute it in six multiplications:
n × n = n2
n2 × n2 = n4
n4 × n4 = n8
n8 × n4 = n12
n12 × n2 = n14
n14 × n = n15
However it is yet possible to compute it in only five multiplications:
n × n = n2
n2 × n = n3
n3 × n3 = n6
n6 × n6 = n12
n12 × n3 = n15
We shall define m(k) to be the minimum number of multiplications to compute nk; for example m(15) = 5.
For 1 ≤ k ≤ 200, find ∑ m(k).
ans: 1582
"""
n = 200
class Done(Exception):
pass
def combine(exponents, max_depth, steps):
if exponents[-1] > n or len(exponents) - 1 > max_depth:
return
try:
steps[exponents[-1]] = min(steps[exponents[-1]], len(exponents)-1)
except KeyError:
steps[exponents[-1]] = len(exponents)-1
if len(steps) == n:
raise Done
for i in range(len(exponents)):
exp = exponents[i]
exponents.append(exp + exponents[-1])
combine(exponents, max_depth, steps)
exponents.pop()
steps = {1:0}
try:
for depth in range(n): # need to iterate depths in order to stop early in combine()
combine([1], depth, steps)
#print(f"{depth} {len(steps)}")
except Done:
pass
#print(steps)
print(sum(( steps[k] for k in steps ))) |
py | 1a2ffc4dfbbfcd4c40a6f056c0aaac1ff112c9a9 | from typing import Optional
import logging
import boto3
from botocore.exceptions import ClientError
from kermes_infra.models import User
class UserAdapter:
def __init__(self, endpoint_url: str, table_name: str, logger: logging.Logger) -> None:
self.dynamodb = boto3.resource("dynamodb", endpoint_url=endpoint_url)
self.table = self.dynamodb.Table(table_name)
self.logger = logger
def get(self, user_id: str) -> Optional[User]:
try:
item = self.table.get_item(Key={"user_id": user_id})
return User.from_dynamo(item["Item"])
except ClientError:
self.logger.error(f"error while getting record from Dynamo: user_id {user_id}", exc_info=True)
return None
def put(self, user: User) -> bool:
try:
self.table.put_item(Item=user.to_dynamo())
return True
except ClientError:
self.logger.error(
f"error while writing record to Dynamo: user_id {user.user_id}",
exc_info=True,
)
return False
|
py | 1a2ffd14bee0c539d2c149abaa4f7937e9156b91 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.paging import Paged
class StorageAccountPaged(Paged):
"""
A paging container for iterating over a list of :class:`StorageAccount <azure.mgmt.storage.v2017_06_01.models.StorageAccount>` object
"""
_attribute_map = {
'next_link': {'key': 'nextLink', 'type': 'str'},
'current_page': {'key': 'value', 'type': '[StorageAccount]'}
}
def __init__(self, *args, **kwargs):
super(StorageAccountPaged, self).__init__(*args, **kwargs)
|
py | 1a2ffd6458b72da5981d8465dd861600cea84490 | ##
## File: utils.py
##
## Author: Schuyler Martin <[email protected]>
##
## Description: Python file that contains basic utility functions
##
from utils.macros import *
import sys
#### GLOBALS ####
#### FUNCTIONS ####
def printd(msg):
'''
Prints debugging messages if debugging is enabled
:param: msg Message to print
'''
if (DEBUG_MACRO):
print("DEBUG: " + msg)
def read_file(fd):
'''
Reads in the file, line by line
:param: fd Name of the file
:return: Contents of the file, as an array of line strings
'''
data = []
for line in open(fd):
data += [line]
return data
def write_file(data, fd):
'''
Writes to a file, line by line
:param: data Lines of the file to write
:param: fd Name of the file to write
'''
fptr = open(fd, 'w')
for line in data:
fptr.write(line)
fptr.close()
|
py | 1a2ffdce407417f63fd3d58bf33c86816e6173bc | """
Example script of constant use in Python. Constants should always be
capitalized to signify their significance. Because everything is an object
in Python and nothing can really be set as private, using visual
identifiers, such as capitalizing all letters in a name and starting function
names with an underscore, are important to for script readability.
This script uses two constants, URL and STATE, and then prints out a string
using these constants for a state's download url (not a real url and doesn't
actually download anything).
"""
URL = 'https://www.statedata.com/'
STATE = 'MD'
print 'Downloading data from {}{}'.format(URL, STATE)
|
py | 1a2ffefd93504a899b3ed1bdacedc16dad3774b3 | #!/home/wecode/Documents/Django_Projects/NeighborhoodNews/virtual/bin/python
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
|
py | 1a2fff6c7fe50c71c5d89283ce05e59433db0a38 | import argparse
import sys
import time
from typing import Optional, Union
from moonstreamdb.db import yield_db_session_ctx
from moonstreamdb.models import ESDEventSignature, ESDFunctionSignature
from sqlalchemy.orm import Session
import requests
CRAWL_URLS = {
"functions": "https://www.4byte.directory/api/v1/signatures/",
"events": "https://www.4byte.directory/api/v1/event-signatures/",
}
DB_MODELS = {
"functions": ESDFunctionSignature,
"events": ESDEventSignature,
}
def crawl_step(
db_session: Session,
crawl_url: str,
db_model: Union[ESDEventSignature, ESDFunctionSignature],
) -> Optional[str]:
attempt = 0
current_interval = 2
success = False
response: Optional[requests.Response] = None
while (not success) and attempt < 3:
attempt += 1
try:
response = requests.get(crawl_url)
response.raise_for_status()
success = True
except:
current_interval *= 2
time.sleep(current_interval)
if response is None:
print(f"Could not process URL: {crawl_url}", file=sys.stderr)
return None
page = response.json()
results = page.get("results", [])
rows = [
db_model(
id=row.get("id"),
text_signature=row.get("text_signature"),
hex_signature=row.get("hex_signature"),
created_at=row.get("created_at"),
)
for row in results
]
db_session.bulk_save_objects(rows)
db_session.commit()
return page.get("next")
def crawl(crawl_type: str, interval: float) -> None:
crawl_url: Optional[str] = CRAWL_URLS[crawl_type]
db_model = DB_MODELS[crawl_type]
with yield_db_session_ctx() as db_session:
while crawl_url is not None:
print(f"Crawling: {crawl_url}")
crawl_url = crawl_step(db_session, crawl_url, db_model)
time.sleep(interval)
def main():
parser = argparse.ArgumentParser(
description="Crawls function and event signatures from the Ethereum Signature Database (https://www.4byte.directory/)"
)
parser.add_argument(
"crawl_type",
choices=CRAWL_URLS,
help="Specifies whether to crawl function signatures or event signatures",
)
parser.add_argument(
"--interval",
type=float,
default=0.1,
help="Number of seconds to wait between requests to the Ethereum Signature Database API",
)
args = parser.parse_args()
crawl(args.crawl_type, args.interval)
if __name__ == "__main__":
main()
|
py | 1a2fffabca09fbc25104a88cc8bdbb8d1b43aded | import mock
import pytest
from os.path import abspath, dirname, join
import sys
from praw.models import (Button, ButtonWidget, Calendar, CommunityList,
CustomWidget, Menu, MenuLink, IDCard, Image,
ImageData, ImageWidget, ModeratorsWidget,
PostFlairWidget, Redditor, RulesWidget, Submenu,
Subreddit, TextArea, Widget)
from ... import IntegrationTest
if sys.version_info.major > 2:
basestring = str # pylint: disable=invalid-name
class TestButtonWidget(IntegrationTest):
@staticmethod
def image_path(name):
test_dir = abspath(dirname(sys.modules[__name__].__file__))
return join(test_dir, '..', '..', 'files', name)
def test_button_widget(self):
subreddit = self.reddit.subreddit(pytest.placeholders.test_subreddit)
widgets = subreddit.widgets
with self.recorder.use_cassette('TestSubredditWidgets.fetch_widgets'):
button_widget = None
for widget in widgets.sidebar:
if isinstance(widget, ButtonWidget):
button_widget = widget
break
assert isinstance(button_widget, ButtonWidget)
assert len(button_widget) >= 1
assert all(isinstance(button, Button) for button in
button_widget.buttons)
assert button_widget == button_widget
assert button_widget.id == button_widget
assert button_widget in widgets.sidebar
assert button_widget[0].text
assert button_widget.shortName
assert hasattr(button_widget, 'description')
assert subreddit == button_widget.subreddit
@mock.patch('time.sleep', return_value=None)
def test_create_and_update_and_delete(self, _):
self.reddit.read_only = False
subreddit = self.reddit.subreddit(pytest.placeholders.test_subreddit)
widgets = subreddit.widgets
with self.recorder.use_cassette(
'TestButtonWidget.test_create_and_update_and_delete'):
styles = {'headerColor': '#123456', 'backgroundColor': '#bb0e00'}
my_image = widgets.mod.upload_image(self.image_path('test.png'))
buttons = [
{
'kind': 'text',
'text': 'View source',
'url': 'https://github.com/praw-dev/praw',
'color': '#FF0000',
'textColor': '#00FF00',
'fillColor': '#0000FF',
'hoverState': {
'kind': 'text',
'text': 'VIEW SOURCE!',
'color': '#FFFFFF',
'textColor': '#000000',
'fillColor': '#0000FF'
}
},
{
'kind': 'image',
'text': 'View documentation',
'linkUrl': 'https://praw.readthedocs.io',
'url': my_image,
'height': 200,
'width': 200,
'hoverState': {
'kind': 'image',
'url': my_image,
'height': 200,
'width': 200
}
},
{
'kind': 'text',
'text': '/r/redditdev',
'url': 'https://reddit.com/r/redditdev',
'color': '#000000',
'textColor': '#FF00FF',
'fillColor': '#005500'
}
]
widget = widgets.mod.add_button_widget(
'Things to click', 'Click some of these *cool* links!',
buttons, styles)
assert isinstance(widget, ButtonWidget)
assert len(widget) == 3
assert all(isinstance(item, Button) for item in widget)
assert widget.shortName == 'Things to click'
assert widget.description == 'Click some of these *cool* links!'
assert widget.styles == styles
assert widget[0].text == 'View source'
assert widget[0].url == 'https://github.com/praw-dev/praw'
assert widget[2].text == '/r/redditdev'
assert widget[2].url == 'https://reddit.com/r/redditdev'
assert widget[1].text == 'View documentation'
assert widget[1].linkUrl == 'https://praw.readthedocs.io'
assert widget[1].hoverState['kind'] == 'image'
assert widget[1].hoverState['height'] == 200
widgets.refresh() # the links are initially invalid
for new_widget in widgets.sidebar:
if new_widget == widget:
widget = new_widget
break
widget = widget.mod.update(shortName='New short name')
assert isinstance(widget, ButtonWidget)
assert len(widget) == 3
assert all(isinstance(item, Button) for item in widget)
assert widget.shortName == 'New short name'
assert widget.description == 'Click some of these *cool* links!'
assert widget.styles == styles
assert widget[0].text == 'View source'
assert widget[0].url == 'https://github.com/praw-dev/praw'
assert widget[2].text == '/r/redditdev'
assert widget[2].url == 'https://reddit.com/r/redditdev'
assert widget[1].text == 'View documentation'
assert widget[1].linkUrl == 'https://praw.readthedocs.io'
assert widget[1].hoverState['kind'] == 'image'
assert widget[1].hoverState['height'] == 200
buttons.reverse()
widget = widget.mod.update(buttons=buttons)
assert isinstance(widget, ButtonWidget)
assert len(widget) == 3
assert all(isinstance(item, Button) for item in widget)
assert widget.shortName == 'New short name'
assert widget.description == 'Click some of these *cool* links!'
assert widget.styles == styles
assert widget[0].text == '/r/redditdev'
assert widget[0].url == 'https://reddit.com/r/redditdev'
assert widget[2].text == 'View source'
assert widget[2].url == 'https://github.com/praw-dev/praw'
assert widget[1].text == 'View documentation'
assert widget[1].linkUrl == 'https://praw.readthedocs.io'
assert widget[1].hoverState['kind'] == 'image'
assert widget[1].hoverState['height'] == 200
widget.mod.delete()
class TestCalendar(IntegrationTest):
def test_calendar(self):
subreddit = self.reddit.subreddit(pytest.placeholders.test_subreddit)
widgets = subreddit.widgets
with self.recorder.use_cassette('TestSubredditWidgets.fetch_widgets'):
calendar = None
for widget in widgets.sidebar:
if isinstance(widget, Calendar):
calendar = widget
break
assert isinstance(calendar, Calendar)
assert calendar == calendar
assert calendar.id == calendar
assert calendar in widgets.sidebar
assert isinstance(calendar.configuration, dict)
assert hasattr(calendar, 'requiresSync')
assert subreddit == calendar.subreddit
@mock.patch('time.sleep', return_value=None)
def test_create_and_update_and_delete(self, _):
self.reddit.read_only = False
subreddit = self.reddit.subreddit(pytest.placeholders.test_subreddit)
widgets = subreddit.widgets
with self.recorder.use_cassette(
'TestCalendar.test_create_and_update_and_delete'):
styles = {'headerColor': '#123456', 'backgroundColor': '#bb0e00'}
config = {'numEvents': 10,
'showDate': True,
'showDescription': False,
'showLocation': False,
'showTime': True,
'showTitle': True}
cal_id = '[email protected]'
widget = widgets.mod.add_calendar('Upcoming Events', cal_id, True,
config, styles)
assert isinstance(widget, Calendar)
assert widget.shortName == 'Upcoming Events'
assert widget.googleCalendarId == 'ccahu0rstno2jrvioq4ccffn78@' \
'group.calendar.google.com'
assert widget.configuration == config
assert widget.styles == styles
widget = widget.mod.update(shortName='Past Events :(')
assert isinstance(widget, Calendar)
assert widget.shortName == 'Past Events :('
assert widget.googleCalendarId == 'ccahu0rstno2jrvioq4ccffn78@' \
'group.calendar.google.com'
assert widget.configuration == config
assert widget.styles == styles
widget.mod.delete()
class TestCommunityList(IntegrationTest):
def test_community_list(self):
subreddit = self.reddit.subreddit(pytest.placeholders.test_subreddit)
widgets = subreddit.widgets
with self.recorder.use_cassette('TestSubredditWidgets.fetch_widgets'):
comm_list = None
for widget in widgets.sidebar:
if isinstance(widget, CommunityList):
comm_list = widget
break
assert isinstance(comm_list, CommunityList)
assert len(comm_list) >= 1
assert all(isinstance(subreddit, Subreddit) for subreddit in
comm_list)
assert comm_list == comm_list
assert comm_list.id == comm_list
assert comm_list in widgets.sidebar
assert comm_list.shortName
assert comm_list[0] in comm_list
assert subreddit == comm_list.subreddit
@mock.patch('time.sleep', return_value=None)
def test_create_and_update_and_delete(self, _):
self.reddit.read_only = False
subreddit = self.reddit.subreddit(pytest.placeholders.test_subreddit)
widgets = subreddit.widgets
with self.recorder.use_cassette(
'TestCommunityList.test_create_and_update_and_delete'):
styles = {'headerColor': '#123456', 'backgroundColor': '#bb0e00'}
subreddits = ['learnpython', self.reddit.subreddit('redditdev')]
widget = widgets.mod.add_community_list('My fav subs', subreddits,
styles)
assert isinstance(widget, CommunityList)
assert widget.shortName == 'My fav subs'
assert widget.styles == styles
assert self.reddit.subreddit('learnpython') in widget
assert 'redditdev' in widget
widget = widget.mod.update(shortName='My least fav subs :(',
data=['redesign'])
assert isinstance(widget, CommunityList)
assert widget.shortName == 'My least fav subs :('
assert widget.styles == styles
assert self.reddit.subreddit('redesign') in widget
widget.mod.delete()
class TestCustomWidget(IntegrationTest):
@staticmethod
def image_path(name):
test_dir = abspath(dirname(sys.modules[__name__].__file__))
return join(test_dir, '..', '..', 'files', name)
@mock.patch('time.sleep', return_value=None)
def test_create_and_update_and_delete(self, _):
self.reddit.read_only = False
subreddit = self.reddit.subreddit(pytest.placeholders.test_subreddit)
widgets = subreddit.widgets
with self.recorder.use_cassette(
'TestCustomWidget.test_create_and_update_and_delete'):
image_dicts = [{'width': 0,
'height': 0,
'name': 'a',
'url': widgets.mod.upload_image(self.image_path(
'test.png'))}]
styles = {'headerColor': '#123456', 'backgroundColor': '#bb0e00'}
widget = widgets.mod.add_custom_widget('My widget',
'# Hello world!', '/**/',
200, image_dicts, styles)
assert isinstance(widget, CustomWidget)
assert widget.shortName == 'My widget'
assert widget.text == '# Hello world!'
assert widget.css == '/**/'
assert widget.height == 200
assert widget.styles == styles
assert len(widget.imageData) == 1
assert all(isinstance(img, ImageData) for img in widget.imageData)
# initially, image URLs are incorrect, so we much refresh to get
# the proper ones.
widgets.refresh()
refreshed = widgets.sidebar[-1]
assert refreshed == widget
widget = refreshed
new_css = 'h1,h2,h3,h4,h5,h6 {color: #00ff00;}'
widget = widget.mod.update(css=new_css)
assert isinstance(widget, CustomWidget)
assert widget.shortName == 'My widget'
assert widget.text == '# Hello world!'
assert widget.css == new_css
assert widget.height == 200
assert widget.styles == styles
assert len(widget.imageData) == 1
assert all(isinstance(img, ImageData) for img in widget.imageData)
widget.mod.delete()
def test_custom_widget(self):
subreddit = self.reddit.subreddit(pytest.placeholders.test_subreddit)
widgets = subreddit.widgets
with self.recorder.use_cassette('TestSubredditWidgets.fetch_widgets'):
custom = None
for widget in widgets.sidebar:
if isinstance(widget, CustomWidget):
custom = widget
break
assert isinstance(custom, CustomWidget)
assert len(custom.imageData) > 0
assert all(isinstance(img_data, ImageData) for img_data in
custom.imageData)
assert custom == custom
assert custom.id == custom
assert custom in widgets.sidebar
assert 500 >= custom.height >= 50
assert custom.text
assert custom.css
assert custom.shortName
assert subreddit == custom.subreddit
class TestIDCard(IntegrationTest):
def test_id_card(self):
subreddit = self.reddit.subreddit(pytest.placeholders.test_subreddit)
widgets = subreddit.widgets
with self.recorder.use_cassette('TestSubredditWidgets.fetch_widgets'):
card = widgets.id_card
assert isinstance(card, IDCard)
assert card == card
assert card.id == card
assert card.shortName
assert card.currentlyViewingText
assert card.subscribersText
assert subreddit == card.subreddit
class TestImageWidget(IntegrationTest):
@staticmethod
def image_path(name):
test_dir = abspath(dirname(sys.modules[__name__].__file__))
return join(test_dir, '..', '..', 'files', name)
@mock.patch('time.sleep', return_value=None)
def test_create_and_update_and_delete(self, _):
self.reddit.read_only = False
subreddit = self.reddit.subreddit(pytest.placeholders.test_subreddit)
widgets = subreddit.widgets
with self.recorder.use_cassette(
'TestImageWidget.test_create_and_update_and_delete'):
image_paths = (self.image_path(name) for name in
('test.jpg', 'test.png'))
image_dicts = [{'width': 0, 'height': 0, 'linkUrl': '',
'url': widgets.mod.upload_image(img_path)}
for img_path in image_paths]
styles = {'headerColor': '#123456', 'backgroundColor': '#bb0e00'}
widget = widgets.mod.add_image_widget(short_name='My new pics!',
data=image_dicts,
styles=styles)
assert isinstance(widget, ImageWidget)
assert widget.shortName == 'My new pics!'
assert widget.styles == styles
assert len(widget) == 2
assert all(isinstance(img, Image) for img in widget)
widget = widget.mod.update(shortName='My old pics :(',
data=image_dicts[:1])
assert isinstance(widget, ImageWidget)
assert widget.shortName == 'My old pics :('
assert widget.styles == styles
assert len(widget) == 1
assert all(isinstance(img, Image) for img in widget)
widget.mod.delete()
def test_image_widget(self):
subreddit = self.reddit.subreddit(pytest.placeholders.test_subreddit)
widgets = subreddit.widgets
with self.recorder.use_cassette('TestSubredditWidgets.fetch_widgets'):
img_widget = None
for widget in widgets.sidebar:
if isinstance(widget, ImageWidget):
img_widget = widget
break
assert isinstance(img_widget, ImageWidget)
assert len(img_widget) >= 1
assert all(isinstance(image, Image) for image in img_widget)
assert img_widget == img_widget
assert img_widget.id == img_widget
assert img_widget in widgets.sidebar
assert img_widget[0].linkUrl
assert img_widget.shortName
assert subreddit == img_widget.subreddit
class TestMenu(IntegrationTest):
@mock.patch('time.sleep', return_value=None)
def test_create_and_update_and_delete(self, _):
self.reddit.read_only = False
subreddit = self.reddit.subreddit(pytest.placeholders.test_subreddit)
widgets = subreddit.widgets
menu_contents = [
{'text': 'My homepage', 'url': 'https://example.com'},
{'text': 'Python packages',
'children': [
{'text': 'PRAW', 'url': 'https://praw.readthedocs.io/'},
{'text': 'requests', 'url': 'http://python-requests.org'}
]},
{'text': 'Reddit homepage', 'url': 'https://reddit.com'}
]
with self.recorder.use_cassette(
'TestMenu.test_create_and_update_and_delete'):
widget = widgets.mod.add_menu(menu_contents)
assert isinstance(widget, Menu)
assert len(widget) == 3
assert all(isinstance(item, (Submenu, MenuLink))
for item in widget)
assert all(all(isinstance(item, MenuLink) for item in subm)
for subm in widget if isinstance(subm, Submenu))
assert widget[0].text == 'My homepage'
assert widget[0].url == 'https://example.com'
assert widget[2].text == 'Reddit homepage'
assert widget[2].url == 'https://reddit.com'
assert widget[1].text == 'Python packages'
assert widget[1][0].text == 'PRAW'
assert widget[1][0].url == 'https://praw.readthedocs.io/'
assert widget[1][1].text == 'requests'
assert widget[1][1].url == 'http://python-requests.org'
menu_contents.reverse()
widget = widget.mod.update(data=menu_contents)
assert isinstance(widget, Menu)
assert len(widget) == 3
assert all(isinstance(item, (Submenu, MenuLink))
for item in widget)
assert all(all(isinstance(item, MenuLink) for item in subm)
for subm in widget if isinstance(subm, Submenu))
assert widget[0].text == 'Reddit homepage'
assert widget[0].url == 'https://reddit.com'
assert widget[2].text == 'My homepage'
assert widget[2].url == 'https://example.com'
assert widget[1].text == 'Python packages'
assert widget[1][0].text == 'PRAW'
assert widget[1][0].url == 'https://praw.readthedocs.io/'
assert widget[1][1].text == 'requests'
assert widget[1][1].url == 'http://python-requests.org'
widget.mod.delete()
def test_menu(self):
subreddit = self.reddit.subreddit(pytest.placeholders.test_subreddit)
widgets = subreddit.widgets
with self.recorder.use_cassette('TestSubredditWidgets.fetch_widgets'):
menu = None
for widget in widgets.topbar:
if isinstance(widget, Menu):
menu = widget
break
assert isinstance(menu, Menu)
assert all(isinstance(item, (MenuLink, Submenu)) for item in menu)
assert menu == menu
assert menu.id == menu
assert menu in widgets.topbar
assert len(menu) >= 1
assert menu[0].text
assert subreddit == menu.subreddit
submenu = None
for child in menu:
if isinstance(child, Submenu):
submenu = child
break
assert isinstance(submenu, Submenu)
assert len(submenu) >= 0
assert all(isinstance(child, MenuLink) for child in submenu)
assert submenu[0].text
assert submenu[0].url
class TestModeratorsWidget(IntegrationTest):
def test_moderators_widget(self):
subreddit = self.reddit.subreddit(pytest.placeholders.test_subreddit)
widgets = subreddit.widgets
with self.recorder.use_cassette('TestSubredditWidgets.fetch_widgets'):
mods = widgets.moderators_widget
assert isinstance(mods, ModeratorsWidget)
assert all(isinstance(mod, Redditor) for mod in mods)
assert mods == mods
assert mods.id == mods
assert len(mods) >= 1
assert isinstance(mods[0], Redditor)
assert subreddit == mods.subreddit
class TestPostFlairWidget(IntegrationTest):
@mock.patch('time.sleep', return_value=None)
def test_create_and_update_and_delete(self, _):
self.reddit.read_only = False
subreddit = self.reddit.subreddit(pytest.placeholders.test_subreddit)
widgets = subreddit.widgets
with self.recorder.use_cassette(
'TestPostFlairWidget.test_create_and_update_and_delete'):
flairs = [f['id'] for f in subreddit.flair.link_templates]
styles = {'headerColor': '#123456', 'backgroundColor': '#bb0e00'}
widget = widgets.mod.add_post_flair_widget('Some flairs', 'list',
flairs, styles)
assert isinstance(widget, PostFlairWidget)
assert widget.shortName == 'Some flairs'
assert widget.display == 'list'
assert widget.order == flairs
assert widget.styles == styles
assert len(widget) == 2
assert all(flair_id in widget.templates for flair_id in widget)
widget = widget.mod.update(display='cloud')
assert isinstance(widget, PostFlairWidget)
assert widget.shortName == 'Some flairs'
assert widget.display == 'cloud'
assert widget.order == flairs
assert widget.styles == styles
assert len(widget) == 2
assert all(flair_id in widget.templates for flair_id in widget)
widget = widget.mod.update(order=widget.order[1:])
assert isinstance(widget, PostFlairWidget)
assert widget.shortName == 'Some flairs'
assert widget.display == 'cloud'
assert widget.order == flairs[1:]
assert widget.styles == styles
assert len(widget) == 1
assert all(flair_id in widget.templates for flair_id in widget)
widget.mod.delete()
def test_post_flair_widget(self):
subreddit = self.reddit.subreddit(pytest.placeholders.test_subreddit)
widgets = subreddit.widgets
with self.recorder.use_cassette('TestSubredditWidgets.fetch_widgets'):
pf_widget = None
for widget in widgets.sidebar:
if isinstance(widget, PostFlairWidget):
pf_widget = widget
break
assert isinstance(pf_widget, PostFlairWidget)
assert len(pf_widget) >= 1
assert all(flair_id in widget.templates for flair_id in widget)
assert pf_widget == pf_widget
assert pf_widget.id == pf_widget
assert pf_widget in widgets.sidebar
assert pf_widget.shortName
assert all(flair in pf_widget for flair in pf_widget)
assert subreddit == pf_widget.subreddit
class TestRulesWidget(IntegrationTest):
def test_rules_widget(self):
subreddit = self.reddit.subreddit(pytest.placeholders.test_subreddit)
widgets = subreddit.widgets
with self.recorder.use_cassette('TestSubredditWidgets.fetch_widgets'):
rules = None
for widget in widgets.sidebar:
if isinstance(widget, RulesWidget):
rules = widget
break
assert isinstance(rules, RulesWidget)
assert rules == rules
assert rules.id == rules
assert rules.display
assert len(rules) > 0
assert subreddit == rules.subreddit
class TestSubredditWidgets(IntegrationTest):
def test_bad_attribute(self):
subreddit = self.reddit.subreddit(pytest.placeholders.test_subreddit)
widgets = subreddit.widgets
with self.recorder.use_cassette('TestSubredditWidgets.fetch_widgets'):
with pytest.raises(AttributeError):
widgets.nonexistant_attribute
def test_items(self):
subreddit = self.reddit.subreddit(pytest.placeholders.test_subreddit)
widgets = subreddit.widgets
with self.recorder.use_cassette('TestSubredditWidgets.fetch_widgets'):
assert isinstance(widgets.items, dict)
def test_progressive_images(self):
subreddit = self.reddit.subreddit(pytest.placeholders.test_subreddit)
widgets = subreddit.widgets
def has_progressive(widgets_):
# best way I could figure if an image is progressive
sign = 'fm=pjpg'
for widget in widgets_.sidebar:
if isinstance(widget, ImageWidget):
for image in widget:
if sign in image.url:
return True
elif isinstance(widget, CustomWidget):
for image_data in widget.imageData:
if sign in image_data.url:
return True
return False
with self.recorder.use_cassette(
'TestSubredditWidgets.test_progressive_images'):
widgets.progressive_images = True
assert has_progressive(widgets)
widgets.progressive_images = False
widgets.refresh()
assert not has_progressive(widgets)
widgets.progressive_images = True
widgets.refresh()
assert has_progressive(widgets)
def test_refresh(self):
subreddit = self.reddit.subreddit(pytest.placeholders.test_subreddit)
widgets = subreddit.widgets
with self.recorder.use_cassette('TestSubredditWidgets.test_refresh'):
assert widgets.sidebar # to fetch
old_sidebar = widgets.sidebar # reference, not value
widgets.refresh()
assert old_sidebar is not widgets.sidebar # should be new list
def test_repr(self):
subreddit = self.reddit.subreddit(pytest.placeholders.test_subreddit)
widgets = subreddit.widgets
assert ("SubredditWidgets(subreddit=Subreddit(display_name='"
"{}'))").format(pytest.placeholders.test_subreddit) == repr(
widgets)
def test_sidebar(self):
subreddit = self.reddit.subreddit(pytest.placeholders.test_subreddit)
widgets = subreddit.widgets
with self.recorder.use_cassette('TestSubredditWidgets.fetch_widgets'):
assert len(widgets.sidebar) >= 1 # also tests lazy-loading
# all items should be Widget subclasses
assert all(isinstance(widget, Widget) and type(widget) != Widget
for widget in widgets.sidebar)
def test_specials(self):
subreddit = self.reddit.subreddit(pytest.placeholders.test_subreddit)
widgets = subreddit.widgets
with self.recorder.use_cassette('TestSubredditWidgets.fetch_widgets'):
assert isinstance(widgets.id_card, IDCard)
assert isinstance(widgets.moderators_widget, ModeratorsWidget)
def test_topbar(self):
subreddit = self.reddit.subreddit(pytest.placeholders.test_subreddit)
widgets = subreddit.widgets
with self.recorder.use_cassette('TestSubredditWidgets.fetch_widgets'):
assert 1 <= len(widgets.topbar)
assert all(isinstance(widget, Widget) and type(widget) != Widget
for widget in widgets.topbar)
class TestSubredditWidgetsModeration(IntegrationTest):
@staticmethod
def image_path(name):
test_dir = abspath(dirname(sys.modules[__name__].__file__))
return join(test_dir, '..', '..', 'files', name)
@mock.patch('time.sleep', return_value=None)
def test_reorder(self, _):
self.reddit.read_only = False
subreddit = self.reddit.subreddit(pytest.placeholders.test_subreddit)
widgets = subreddit.widgets
with self.recorder.use_cassette(
'TestSubredditWidgetsModeration.test_reorder'):
old_order = list(widgets.sidebar)
new_order = list(reversed(old_order))
widgets.mod.reorder(new_order)
widgets.refresh()
assert list(widgets.sidebar) == new_order
widgets.mod.reorder(old_order)
widgets.refresh()
assert list(widgets.sidebar) == old_order
mixed_types = [thing if i % 2 == 0 else thing.id
for i, thing in enumerate(new_order)]
# mixed_types has some str and some Widget.
assert any(isinstance(thing, basestring) for thing in mixed_types)
assert any(isinstance(thing, Widget) for thing in mixed_types)
widgets.mod.reorder(mixed_types)
widgets.refresh()
assert list(widgets.sidebar) == new_order
@mock.patch('time.sleep', return_value=None)
def test_upload_image(self, _):
self.reddit.read_only = False
subreddit = self.reddit.subreddit(pytest.placeholders.test_subreddit)
widgets = subreddit.widgets
with self.recorder.use_cassette(
'TestSubredditWidgetsModeration.test_upload_image'):
for image in ('test.jpg', 'test.png'):
image_url = widgets.mod.upload_image(self.image_path(image))
assert image_url
class TestTextArea(IntegrationTest):
@mock.patch('time.sleep', return_value=None)
def test_create_and_update_and_delete(self, _):
self.reddit.read_only = False
subreddit = self.reddit.subreddit(pytest.placeholders.test_subreddit)
widgets = subreddit.widgets
with self.recorder.use_cassette(
'TestTextArea.test_create_and_update_and_delete'):
styles = {'headerColor': '#123456', 'backgroundColor': '#bb0e00'}
widget = widgets.mod.add_text_area(short_name='My new widget!',
text='Hello world!',
styles=styles)
assert isinstance(widget, TextArea)
assert widget.shortName == 'My new widget!'
assert widget.styles == styles
assert widget.text == 'Hello world!'
widget = widget.mod.update(shortName='My old widget :(',
text='Feed me')
assert isinstance(widget, TextArea)
assert widget.shortName == 'My old widget :('
assert widget.styles == styles
assert widget.text == 'Feed me'
widget.mod.delete()
def test_text_area(self):
subreddit = self.reddit.subreddit(pytest.placeholders.test_subreddit)
widgets = subreddit.widgets
with self.recorder.use_cassette('TestSubredditWidgets.fetch_widgets'):
text = None
for widget in widgets.sidebar:
if isinstance(widget, TextArea):
text = widget
break
assert isinstance(text, TextArea)
assert text == text
assert text.id == text
assert text in widgets.sidebar
assert text in widgets.sidebar
assert text.shortName
assert text.text
assert subreddit == text.subreddit
class TestWidget(IntegrationTest):
def test_inequality(self):
subreddit = self.reddit.subreddit(pytest.placeholders.test_subreddit)
widgets = subreddit.widgets
with self.recorder.use_cassette('TestSubredditWidgets.fetch_widgets'):
assert len(widgets.sidebar) >= 2
assert widgets.sidebar[0] != widgets.sidebar[1]
assert widgets.sidebar[0] != widgets.sidebar[1].id
assert u'\xf0\x9f\x98\x80' != widgets.sidebar[0] # for python 2
|
py | 1a300032205a6b71ba08b5e7d24efeaa39f773c6 | #!/usr/bin/env python
#
# Script inspired in bud:
# https://github.com/indutny/bud
#
import platform
import os
import subprocess
import sys
CC = os.environ.get('CC', 'cc')
script_dir = os.path.dirname(__file__)
root = os.path.normpath(os.path.join(script_dir, '..'))
output_dir = os.path.join(os.path.abspath(root), 'out')
sys.path.insert(0, os.path.join(root, 'deps', 'gyp', 'pylib'))
try:
import gyp
except ImportError:
print('Error: you need to install gyp in deps/gyp first, run:')
print(' ./scripts/get-dep.sh gyp')
sys.exit(42)
def host_arch():
machine = platform.machine()
if machine == 'i386': return 'ia32'
if machine == 'x86_64': return 'x64'
if machine == 'aarch64': return 'arm64'
if machine == 'mips64': return 'mips64el'
if machine.startswith('arm'): return 'arm'
if machine.startswith('mips'): return 'mips'
return machine # Return as-is and hope for the best.
def compiler_version():
proc = subprocess.Popen(CC.split() + ['--version'], stdout=subprocess.PIPE)
is_clang = b'clang' in proc.communicate()[0].split(b'\n')[0]
proc = subprocess.Popen(CC.split() + ['-dumpversion'], stdout=subprocess.PIPE)
version = proc.communicate()[0].split(b'.')
mayor_version = int(version[:1][0])
if is_clang is False and mayor_version >= 7:
proc = subprocess.Popen(CC.split() + ['-dumpfullversion'], stdout=subprocess.PIPE)
version = proc.communicate()[0].split(b'.')
version = map(int, version[:2])
version = tuple(version)
return (version, is_clang)
def run_gyp(args):
rc = gyp.main(args)
if rc != 0:
print('Error running GYP')
sys.exit(rc)
if __name__ == '__main__':
args = sys.argv[1:]
# GYP bug.
# On msvs it will crash if it gets an absolute path.
# On Mac/make it will crash if it doesn't get an absolute path.
# NOTE ibc: Not sure that it requires absolute path in Mac/make...
if sys.platform == 'win32':
args.append(os.path.join(root, 'mediasoup-worker.gyp'))
common_fn = os.path.join(root, 'common.gypi')
# we force vs 2010 over 2008 which would otherwise be the default for gyp.
if not os.environ.get('GYP_MSVS_VERSION'):
os.environ['GYP_MSVS_VERSION'] = '2010'
else:
args.append(os.path.join(os.path.abspath(root), 'mediasoup-worker.gyp'))
common_fn = os.path.join(os.path.abspath(root), 'common.gypi')
if os.path.exists(common_fn):
args.extend(['-I', common_fn])
args.append('--depth=' + root)
# There's a bug with windows which doesn't allow this feature.
if sys.platform != 'win32':
if '-f' not in args:
args.extend('-f make'.split())
if 'ninja' not in args:
args.extend(['-Goutput_dir=' + output_dir])
args.extend(['--generator-output', output_dir])
(major, minor), is_clang = compiler_version()
args.append('-Dgcc_version=%d' % (10 * major + minor))
args.append('-Dclang=%d' % int(is_clang))
if is_clang is False and major == 4 and minor <= 8:
raise RuntimeError('gcc <= 4.8 not supported, please upgrade your gcc')
if not any(a.startswith('-Dhost_arch=') for a in args):
args.append('-Dhost_arch=%s' % host_arch())
if not any(a.startswith('-Dtarget_arch=') for a in args):
args.append('-Dtarget_arch=%s' % host_arch())
if any(a.startswith('-Dopenssl_fips=') for a in args):
fips_fn = os.path.join(os.path.abspath(root), 'fips.gypi')
args.extend(['-I', fips_fn])
else:
args.append('-Dopenssl_fips=')
if 'asan' in args:
args.append('-Dmediasoup_asan=true')
args = filter(lambda arg: arg != 'asan', args)
else:
args.append('-Dmediasoup_asan=false')
args.append('-Dnode_byteorder=' + sys.byteorder)
gyp_args = list(args)
print(gyp_args)
run_gyp(gyp_args)
|
py | 1a3000de496c901bc6d511dd1d5046206e8ac2fa | '''
Created on 2020-08-11
@author: wf
'''
import unittest
import time
from lodstorage.sparql import SPARQL
from lodstorage.lod import LOD
from ptp.location import CountryManager, ProvinceManager, CityManager
import datetime
from collections import Counter
import getpass
class TestLocations(unittest.TestCase):
'''
check countries, provinces/states and cities
'''
def setUp(self):
self.debug=False
pass
def tearDown(self):
pass
def testCityStorage(self):
'''
try storing city data in cache
'''
cim=CityManager(name="github")
cim.fromLutangar()
cim.store(cim.cityList)
def testCities(self):
'''
test consolidating cities from different sources
'''
cim=CityManager('lutangarVersusOpenResearch')
startTime=time.time()
cim.fromLutangar()
self.assertEqual(128769,(len(cim.cityList)))
print ("reading %d cities from github took %5.1f secs" % (len(cim.cityList),time.time()-startTime))
startTime=time.time()
orCities=cim.fromOpenResearch(showProgress=True)
cityCounter=Counter(orCities)
uniqueCities=list(cityCounter.most_common())
print ("reading %d cities from %d events from openresearch took %5.1f secs" % (len(uniqueCities),len(orCities),time.time()-startTime))
print (cityCounter.most_common(1000))
orCityList=[]
for cityName,count in uniqueCities:
orCityList.append({'name': cityName, 'count': count})
startTime=time.time()
validCities=LOD.intersect(cim.cityList, orCityList, 'name')
print ("validating %d cities from openresearch took %5.1f secs" % (len(validCities),time.time()-startTime))
def getDBPedia(self,mode='query',debug=False):
endpoint="http://dbpedia.org/sparql"
dbpedia=SPARQL(endpoint,mode=mode,debug=debug)
return dbpedia
def testDBPediaCities(self):
'''
https://github.com/LITMUS-Benchmark-Suite/dbpedia-graph-convertor/blob/master/get_data.py
'''
# kglf
return
dbpedia=self.getDBPedia()
limit=100
# Query to get the population of cities
citiesWithPopulationQuery = """
PREFIX dbo: <http://dbpedia.org/ontology/>
PREFIX dbp: <http://dbpedia.org/property/>
PREFIX dbr: <http://dbpedia.org/resource/>
SELECT DISTINCT ?dbCity ?country ?name ?website ?population
WHERE {
?dbCity a dbo:City .
?dbCity dbp:name ?name .
?dbCity dbo:country ?country .
OPTIONAL { ?dbCity dbo:populationTotal ?population . }
OPTIONAL { ?dbCity dbp:website ?website . }
}
LIMIT %d
""" % limit
cityList=dbpedia.queryAsListOfDicts(citiesWithPopulationQuery)
cim=CityManager("dbpedia")
LOD.setNone4List(cityList, ["population","website"])
cim.store(cityList)
def testDBPediaCountries(self):
'''
http://dbpedia.org/ontology/Country
'''
# kglf
return
dbpedia=self.getDBPedia()
countriesQuery="""
# https://opendata.stackexchange.com/a/7660/18245 - dbp:iso3166code not set ...
PREFIX dbo: <http://dbpedia.org/ontology/>
SELECT ?country_name ?population ?isocode
WHERE {
?country_name a dbo:Country .
?country_name dbp:iso3166code ?isocode.
OPTIONAL { ?country_name dbo:populationTotal ?population . }
}
"""
countriesResult=dbpedia.query(countriesQuery)
print(countriesResult)
print(len(countriesResult))
def getEndPoint(self):
endpoint="https://query.wikidata.org/sparql"
# check we have local wikidata copy:
if getpass.getuser()=="travis":
endpoint=None
elif getpass.getuser()=="wf":
# use 2018 wikidata copy
#endpoint="http://blazegraph.bitplan.com/sparql"
# use 2020 wikidata copy
endpoint="http://jena.zeus.bitplan.com/wikidata"
return endpoint
def testWikiDataCities(self):
'''
test getting cities(human settlements to be precise)
from Wikidata
'''
#endpoint=self.getEndPoint()
# force caching - 3.5 hour query if done via endpoint!
endpoint=None
cm=CityManager("wikidata")
cm.endpoint=endpoint
cm.fromCache()
print("found %d cities" % len(cm.cityList))
self.assertTrue(len(cm.cityList)>=200000)
def testWikiDataProvinces(self):
'''
test getting provinces from wikidata
'''
pm=ProvinceManager("wikidata")
pm.endpoint=self.getEndPoint()
pm.fromCache()
print("found %d provinces" % len(pm.provinceList))
self.assertTrue(len(pm.provinceList)>=195)
def testWikiDataCountries(self):
'''
check local wikidata
'''
cm=CountryManager("wikidata")
cm.endpoint=self.getEndPoint()
cm.fromCache()
self.assertTrue(len(cm.countryList)>=195)
# sparql=TestJena.getJena(debug=self.debug)
# errors=cm.storeToRDF(sparql)
# self.assertFalse(sparql.printErrors(errors))
# doimport=True
# if doimport:
# cm2=CountryManager()
# cm2.fromRDF(sparql)
# self.assertEqual(cm.countryList,cm2.countryList)
def testCountryManager(self):
'''
test storying countries in SQL format
'''
cm=CountryManager("github",debug=True)
cm.fromErdem()
cm.store(cm.countryList)
def testIntersection(self):
'''
test creating the intersection of a list of dictionaries
'''
list1 = [{'count': 351, 'evt_datetime': datetime.datetime(2015, 10, 23, 8, 45), 'att_value': 'red'},
{'count': 332, 'evt_datetime': datetime.datetime(2015, 10, 23, 8, 45), 'att_value': 'red'},
{'count': 336, 'evt_datetime': datetime.datetime(2015, 10, 23, 8, 45), 'att_value': 'red'},
{'count': 359, 'evt_datetime': datetime.datetime(2015, 10, 23, 8, 45), 'att_value': 'red'},
{'count': 309, 'evt_datetime': datetime.datetime(2015, 10, 23, 8, 45), 'att_value': 'red'}]
list2 = [{'count': 359, 'evt_datetime': datetime.datetime(2015, 10, 23, 8, 45), 'att_value': 'red'},
{'count': 351, 'evt_datetime': datetime.datetime(2015, 10, 23, 8, 45), 'att_value': 'red'},
{'count': 381, 'evt_datetime': datetime.datetime(2015, 10, 22, 8, 45), 'att_value': 'red'}]
listi=LOD.intersect(list1, list2,'count')
print(listi)
self.assertEquals(2,len(listi))
listi=LOD.intersect(list1, list2)
print(listi)
self.assertEquals(2,len(listi))
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
py | 1a3000fe29d6bffbc1632b67fc91192b9e58c865 |
from triton import *
from pintool import *
# Output
#
# $ ./triton ./src/examples/pin/callback_signals.py ./src/samples/others/signals
# Signal 11 received on thread 0.
# ========================== DUMP ==========================
# rax: 0x00000000000000 ((_ zero_extend 32) (_ bv234 32))
# rbx: 0x00000000000000 UNSET
# rcx: 0x00000000001ba4 ((_ zero_extend 32) ((_ extract 31 0) #81))
# rdx: 0x0000000000000b ((_ sign_extend 32) ((_ extract 31 0) #34))
# rdi: 0x00000000001ba4 ((_ sign_extend 32) ((_ extract 31 0) #83))
# rsi: 0x00000000001ba4 ((_ sign_extend 32) ((_ extract 31 0) #90))
# rbp: 0x007fff097e3540 ((_ extract 63 0) #0)
# rsp: 0x007fff097e3528 (bvsub ((_ extract 63 0) #47) (_ bv8 64))
# rip: 0x007f3fa0735ea7 (_ bv139911251582629 64)
# r8: 0x007f3fa0a94c80 UNSET
# r9: 0x007f3fb671b120 UNSET
# r10: 0x00000000000000 UNSET
# r11: 0x007f3fa0735e70 UNSET
# r12: 0x00000000400460 UNSET
# r13: 0x007fff097e3620 UNSET
# r14: 0x00000000000000 UNSET
# r15: 0x00000000000000 UNSET
# xmm0: 0x000000ff000000 UNSET
# xmm1: 0x2f2f2f2f2f2f2f2f2f2f2f2f2f2f2f2f UNSET
# xmm2: 0x00000000000000 UNSET
# xmm3: 0x00ff000000ff00 UNSET
# xmm4: 0x000000000000ff UNSET
# xmm5: 0x00000000000000 UNSET
# xmm6: 0x00000000000000 UNSET
# xmm7: 0x00000000000000 UNSET
# xmm8: 0x00000000000000 UNSET
# xmm9: 0x00000000000000 UNSET
# xmm10: 0x00000000000000 UNSET
# xmm11: 0x00000000000000 UNSET
# xmm12: 0x00000000000000 UNSET
# xmm13: 0x00000000000000 UNSET
# xmm14: 0x00000000000000 UNSET
# xmm15: 0x00000000000000 UNSET
# af: 0x00000000000000 (ite (= (_ bv16 64) (bvand (_ bv16 64) (bvxor #12 (bvxor ((_ extract 63 0) #0) (_ bv16 64))))) (_ bv1 1) (_ bv0 1))
# cf: 0x00000000000000 (_ bv0 1)
# df: 0x00000000000000 UNSET
# if: 0x00000000000001 UNSET
# of: 0x00000000000000 (_ bv0 1)
# pd: 0x00000000000001 (ite (= (parity_flag ((_ extract 7 0) #73)) (_ bv0 1)) (_ bv1 1) (_ bv0 1))
# sf: 0x00000000000000 (ite (= ((_ extract 31 31) #73) (_ bv1 1)) (_ bv1 1) (_ bv0 1))
# tf: 0x00000000000000 UNSET
# zf: 0x00000000000001 (ite (= #73 (_ bv0 32)) (_ bv1 1) (_ bv0 1))
def signals(threadId, sig):
print 'Signal %d received on thread %d.' %(sig, threadId)
print '========================== DUMP =========================='
regs = getParentRegisters()
for reg in regs:
value = getCurrentRegisterValue(reg)
exprId = getSymbolicRegisterId(reg)
print '%s:\t%#016x\t%s' %(reg.getName(), value, (getSymbolicExpressionFromId(exprId).getAst() if exprId != SYMEXPR.UNSET else 'UNSET'))
return
if __name__ == '__main__':
# Set architecture
setArchitecture(ARCH.X86_64)
# Start the symbolic analysis from the Entry point
startAnalysisFromEntry()
# Add a callback.
insertCall(signals, INSERT_POINT.SIGNALS)
# Run the instrumentation - Never returns
runProgram()
|
py | 1a300115d079adedd687bd2d5d6b2ab5068a5a87 | from core.room import Room
class Player():
def __init__(self, current_room, inventory = []):
# self.name = name
self.current_room = current_room
self.inventory = inventory
def room_info(self):
name = self.current_room.name
description = self.current_room.description
return f'{name} - {description}'
def investigate(self):
item = self.current_room.items
if item != None:
return f'You see a {item}.'
else:
return "There is nothing here."
def remove_inventory(self):
self.inventory = []
|
py | 1a3002db951a683a9292c176146e1549f7244536 | from argparse import ArgumentParser
from ._version import __version__
def build_args_parser(
prog: str,
description: str = '',
epilog: str = ''
) -> ArgumentParser:
parser = ArgumentParser(
prog = prog,
description = description,
epilog = epilog
)
# Build Parser
parser = add_arguments(parser)
return parser
def add_arguments(parser: ArgumentParser) -> ArgumentParser:
parser.add_argument(
'input',
type=str,
help='Path to an .xml SBOL file containing constructs designs and sequences'
)
parser.add_argument(
'output',
type=str,
help='Path to the output spreadsheet'
)
parser.add_argument(
'assembly_method',
type=str,
choices=["gibson", "golden_gate", "any_method"],
help='If "any_method" is selected, each construct can be built with any method. However, Golden Gate Assembly will have priority over Gibson Assembly'
)
parser.add_argument(
'--nb_constructs',
type=int,
help='Maximum number of constructs to build (only used in tests)'
)
parser.add_argument(
'--version',
action='version',
version='%(prog)s {}'.format(__version__),
help='show the version number and exit'
)
return parser
|
py | 1a30040ef7766bef0b11b045ff268440af4cf397 | from Instrucciones.Declaracion import Declaracion
from Instrucciones.Sql_create.Tipo_Constraint import Tipo_Constraint, Tipo_Dato_Constraint
from Instrucciones.TablaSimbolos.Tipo import Tipo
from Instrucciones.TablaSimbolos.Instruccion import Instruccion
from Instrucciones.TablaSimbolos.Tabla import Tabla
from Instrucciones.Excepcion import Excepcion
from storageManager.jsonMode import *
from Instrucciones.Tablas.Tablas import Tablas
from Instrucciones.TablaSimbolos.Tipo import Tipo, Tipo_Dato
from Instrucciones.TablaSimbolos.Tipo import Tipo, Tipo_Dato
from Instrucciones.Tablas.Campo import Campo
from Optimizador.C3D import *
from Instrucciones.TablaSimbolos import Instruccion3D as c3d
class CreateTable(Instruccion):
def __init__(self, tabla, tipo, campos, herencia, strGram ,linea, columna):
Instruccion.__init__(self,tipo,linea,columna, strGram)
self.tabla = tabla
self.campos = campos
self.herencia = herencia
def ejecutar(self, tabla, arbol):
super().ejecutar(tabla,arbol)
# Ambito para la tabla
tablaLocal = Tabla(tabla)
compuesta = True
#SE VALIDA QUE SE HAYA SELECCIONADO UN BD
if arbol.bdUsar != None:
for camp in self.campos:
if isinstance(camp, Tipo_Constraint):
tc=self.campos.pop(int(self.campos.index(camp)))
if tc.tipo == Tipo_Dato_Constraint.UNIQUE or tc.tipo == Tipo_Dato_Constraint.PRIMARY_KEY or tc.tipo == Tipo_Dato_Constraint.FOREIGN_KEY:
for id in tc.expresion:
bid=False
for ct in self.campos:
if ct.nombre== id:
if self.campos[self.campos.index(ct)].constraint == None:
self.campos[self.campos.index(ct)].constraint=[]
if tc.tipo == Tipo_Dato_Constraint.UNIQUE:
self.campos[self.campos.index(ct)].constraint.append(Tipo_Constraint(self.tabla+"_"+ct.nombre+"_pkey", Tipo_Dato_Constraint.UNIQUE, None))
if tc.tipo == Tipo_Dato_Constraint.PRIMARY_KEY:
compuesta = False
self.campos[self.campos.index(ct)].constraint.append(Tipo_Constraint(self.tabla+"_pkey", Tipo_Dato_Constraint.PRIMARY_KEY, None))
#if tc.tipo == Tipo_Dato_Constraint.FOREIGN_KEY:
#self.campos[self.campos.index(ct)].constraint.append(Tipo_Constraint(None, Tipo_Dato_Constraint.UNIQUE, None))
bid=True
if not bid:
error = Excepcion("42P10","Semantico",f"La columna <<{id}>> no existe, Error en el Constraint",self.linea,self.columna)
arbol.excepciones.append(error)
arbol.consola.append(error.toString())
return
#SE VALIDA SI LA TABLA VA HEREDAR
if self.herencia!=None:
#SE BUSCA LA SI LA TABLA HEREDADA EXISTE
htabla = arbol.devolverBaseDeDatos().getTabla(self.herencia)
if htabla != None:
tabla_temp=[]
#SE RECORRE TODOS LAS COLUMNAS DE LA TABLA PARA UNIR CAMPOS REPETIDOS
for campo_her in htabla.lista_de_campos:
indice=0
bandera_campo=True
for campo_nuevo in self.campos:
if campo_her.nombre==campo_nuevo.nombre:
tabla_temp.append(campo_nuevo)
arbol.consola.append(f"NOTICE: mezclando la columna <<{campo_nuevo.nombre}>> con la definición heredada.")
self.campos.pop(indice)
indice+=1
bandera_campo=False
break
if bandera_campo:
tabla_temp.append(campo_her)
tabla_temp = tabla_temp + self.campos
self.campos= tabla_temp
else:
error = Excepcion(f"42P01","Semantico","No existe la relación <<{self.herencia}>>.",self.linea,self.columna)
arbol.excepciones.append(error)
arbol.consola.append(error.toString())
return
# VERIFICACIÓN LLAVES PRIMARIAS
listaPrimarias = []
for camp in self.campos:
if isinstance(camp.tipo,Tipo):
if camp.constraint != None:
for s in camp.constraint:
if s.tipo == Tipo_Dato_Constraint.PRIMARY_KEY:
listaPrimarias.append(camp)
if len(listaPrimarias) > 1 and compuesta:
error = Excepcion("42P16","Semantico","No se permiten múltiples llaves primarias para la tabla «"+self.tabla+"»",self.linea,self.columna)
arbol.excepciones.append(error)
arbol.consola.append(error.toString())
return error
#SE CREA UN AMBITO PARA LA TABLA
tablaNueva = Tablas(self.tabla,None)
#SE LLENA LA TABLA EN MEMORIA
for camp in self.campos:
if isinstance(camp.tipo,Tipo):
if camp.tipo.tipo == Tipo_Dato.TIPOENUM:
existe = arbol.getEnum(camp.tipo.nombre)
if existe == None:
error = Excepcion('42P00',"Semántico","El tipo "+camp.tipo.nombre+" no existe",self.linea,self.columna)
arbol.excepciones.append(error)
arbol.consola.append(error.toString())
return error
if camp.constraint != None:
for s in camp.constraint:
if s.tipo == Tipo_Dato_Constraint.CHECK:
arbol.comprobacionCreate = True
objeto = Declaracion(camp.nombre, camp.tipo, s.expresion)
checkBueno = objeto.ejecutar(tablaLocal, arbol)
if not isinstance(checkBueno,Excepcion):
if s.id == None:
s.id = self.tabla+"_"+camp.nombre+"_"+"check1"
#tablaNueva.agregarColumna(camp.nombre,camp.tipo.toString(),None, camp.constraint)
#continue
pass
else:
#arbol.consola.append(checkBueno.toString())
return
elif s.tipo == Tipo_Dato_Constraint.PRIMARY_KEY:
if s.id == None:
s.id = self.tabla+"_pkey"
elif s.tipo == Tipo_Dato_Constraint.UNIQUE:
if s.id == None:
s.id = self.tabla+"_"+camp.nombre+"_pkey"
tablaNueva.agregarColumna(camp.nombre,camp.tipo,None, camp.constraint)
#tablaNueva.lista_constraint.append(camp.constraint)
else:
tablaNueva.agregarColumna(camp.nombre,camp.tipo,None, camp.constraint)
#tablaNueva.lista_constraint.append(camp.constraint)
arbol.comprobacionCreate = False
#SE CREA LA TABLA EN DISCO
ctable = createTable(arbol.bdUsar,self.tabla,len(self.campos))
if ctable==0: #CUANDO LA TABLA SE CREA CORRECTAMENTE
arbol.consola.append(f"La Tabla: <<{self.tabla}>> se creo correctamente.")
arbol.agregarTablaABd(tablaNueva)
elif ctable==3: #CUANDO LA TABLA YA EXISTE
error = Excepcion("100","Semantico","La Tabla ya Existe.",self.linea,self.columna)
arbol.excepciones.append(error)
arbol.consola.append(error.toString())
elif ctable==2: #CUANDO POR ALGUN ERROR NO SE CREA LA TABLA.
error = Excepcion("100","Semantico","Error Interno.",self.linea,self.columna)
arbol.excepciones.append(error)
arbol.consola.append(error.toString())
# SE AGREGAN LAS LLAVES PRIMARIAS A LA TABLA
listaIndices = []
resultado=0
for i in listaPrimarias:
listaIndices.append(tablaNueva.devolverColumna(i.nombre))
if len(listaIndices) >0:
#print("SE AGREGO UN INDICE")
resultado = alterAddPK(arbol.getBaseDatos(), self.tabla, listaIndices)
if resultado == 1:
error = Excepcion('XX000',"Semántico","Error interno",self.linea,self.columna)
arbol.excepciones.append(error)
arbol.consola.append(error.toString())
return error
elif resultado == 2:
error = Excepcion('42P00',"Semántico","La base de datos "+str(arbol.getBaseDatos())+" no existe",self.linea,self.columna)
arbol.excepciones.append(error)
arbol.consola.append(error.toString())
return error
elif resultado == 3:
error = Excepcion('42P01',"Semántico","No existe la relación "+self.tabla,self.linea,self.columna)
arbol.excepciones.append(error)
arbol.consola.append(error.toString())
return error
elif resultado == 4:
error = Excepcion('42P16',"Semántico","No se permiten múltiples llaves primarias para la tabla «"+self.tabla+"»",self.linea,self.columna)
arbol.excepciones.append(error)
arbol.consola.append(error.toString())
return error
elif resultado == 5:
error = Excepcion('XX002',"Semántico","Columna fuera de limites."+self.tabla,self.linea,self.columna)
arbol.excepciones.append(error)
arbol.consola.append(error.toString())
return error
else:
error = Excepcion("100","Semantico","No ha seleccionado ninguna Base de Datos.",self.linea,self.columna)
arbol.excepciones.append(error)
arbol.consola.append(error.toString())
def generar3D(self, tabla, arbol):
super().generar3D(tabla,arbol)
code = []
t0 = c3d.getTemporal()
code.append(c3d.asignacionString(t0, "CREATE TABLE " + self.tabla + " (\\n"))
sizeCol = len(self.campos)
contador = 1
for col in self.campos:
if isinstance(col, Campo):
sizeCol -= 1
elif not isinstance(col, Campo):
lista = col.generar3D(tabla, arbol)
code += lista
tLast = c3d.getLastTemporal()
if contador != sizeCol:
t3 = c3d.getTemporal()
code.append(c3d.operacion(t3, Identificador(tLast), Valor('",\\n"', "STRING"), OP_ARITMETICO.SUMA))
contador += 1
tLast = t3
t2 = c3d.getTemporal()
code.append(c3d.operacion(t2, Identificador(t0), Identificador(tLast), OP_ARITMETICO.SUMA))
t0 = t2
t1 = c3d.getTemporal()
if self.herencia != None:
code.append(c3d.operacion(t1, Identificador(t0), Valor('"\\n) INHERITS (' + self.herencia + '"', "STRING"), OP_ARITMETICO.SUMA))
t0 = t1
t1 = c3d.getTemporal()
code.append(c3d.operacion(t1, Identificador(t0), Valor('");"', "STRING"), OP_ARITMETICO.SUMA))
code.append(c3d.asignacionTemporalStack(t1))
code.append(c3d.aumentarP())
return code
class IdentificadorColumna(Instruccion):
def __init__(self, id, linea, columna):
self.id = id
Instruccion.__init__(self,Tipo(Tipo_Dato.ID),linea,columna,strGram)
def ejecutar(self, tabla, arbol):
super().ejecutar(tabla,arbol)
variable = tabla.getVariable(self.id)
if variable == None:
error = Excepcion("42P10","Semantico","La columna "+str(self.id)+" no existe",self.linea,self.columna)
arbol.excepciones.append(error)
arbol.consola.append(error.toString())
return error
self.tipo = variable.tipo
return variable.valor.ejecutar(tabla, arbol)
def generar3D(self, tabla, arbol):
super().generar3D(tabla,arbol) |
py | 1a30050b9c482e1742df7e3ab1665fb4995cac5b | import logging
import sys
from requests import HTTPError
from .readwritelock import ReadWriteLock
from .interfaces import CachePolicy
log = logging.getLogger(sys.modules[__name__].__name__)
class ManualPollingCachePolicy(CachePolicy):
def __init__(self, config_fetcher, config_cache):
self._config_fetcher = config_fetcher
self._config_cache = config_cache
self._lock = ReadWriteLock()
def get(self):
try:
self._lock.acquire_read()
config = self._config_cache.get()
return config
finally:
self._lock.release_read()
def force_refresh(self):
force_fetch = False
try:
self._lock.acquire_read()
config = self._config_cache.get()
force_fetch = not bool(config)
finally:
self._lock.release_read()
try:
configuration_response = self._config_fetcher.get_configuration_json(
force_fetch
)
if configuration_response.is_fetched():
configuration = configuration_response.json()
try:
self._lock.acquire_write()
self._config_cache.set(configuration)
finally:
self._lock.release_write()
except HTTPError as e:
log.error(
"Double-check your SDK Key at https://app.configcat.com/sdkkey."
" Received unexpected response: [%s]" % str(e.response)
)
except:
log.exception(sys.exc_info()[0])
def stop(self):
pass
|
py | 1a30056debc252823120b2717a4d36ee3a01f83e | """
This module contains callbacks used during the test phase.
"""
from torchlite.data.datasets import ImageDataset
from tqdm import tqdm
class TestCallback:
def __init__(self):
self.validation_data = None
self.params = None
self.model = None
def on_test_begin(self, logs=None):
pass
def on_test_end(self, logs=None):
pass
def on_batch_begin(self, batch, logs=None):
pass
def on_batch_end(self, batch, logs=None):
pass
class TestCallbackList(object):
"""Container abstracting a list of callbacks.
Args:
callbacks: List of `Callback` instances.
queue_length: Queue length for keeping
running statistics over callback execution time.
"""
def __init__(self, callbacks=None, queue_length=10):
callbacks = callbacks or []
self.callbacks = [c for c in callbacks]
self.queue_length = queue_length
def append(self, callback):
assert isinstance(callback, TestCallback), \
"Your callback is not an instance of TestCallback: {}".format(callback)
self.callbacks.append(callback)
def on_test_begin(self, logs=None):
"""Called at the beginning of testing.
Args:
logs: dictionary of logs.
"""
logs = logs or {}
for callback in self.callbacks:
callback.on_test_begin(logs)
def on_test_end(self, logs=None):
"""Called at the end of testing.
Args:
logs: dictionary of logs.
"""
logs = logs or {}
for callback in self.callbacks:
callback.on_test_end(logs)
def on_batch_begin(self, batch, logs=None):
"""Called right before processing a batch.
Args:
batch: integer, index of batch within the current epoch.
logs: dictionary of logs.
"""
logs = logs or {}
for callback in self.callbacks:
callback.on_batch_begin(batch, logs)
def on_batch_end(self, batch, logs=None):
"""Called at the end of a batch.
Args:
batch: integer, index of batch within the current epoch.
logs: dictionary of logs.
"""
logs = logs or {}
for callback in self.callbacks:
callback.on_batch_end(batch, logs)
def __iter__(self):
return iter(self.callbacks)
class TQDM(TestCallback):
def __init__(self):
super().__init__()
self.pbar = None
def on_test_begin(self, logs=None):
test_loader_len = len(logs["loader"])
self.pbar = tqdm(total=test_loader_len, desc="Classifying")
def on_batch_end(self, batch, logs=None):
self.pbar.update(1)
def on_test_end(self, logs=None):
print()
class ActivationMapVisualizerCallback(TestCallback):
def __init__(self, filename):
"""
Store an image with heatmap activations in a heatmaps list
using the Grad_cam++ technique: https://arxiv.org/abs/1710.11063
# TODO may combines with TensorboardVisualizer?
/!\ This technique only works with image torchlite.data.datasets.ImagesDataset
Args:
filename (str): The file name that you want to visualize
"""
super().__init__()
self.filename = filename
self.heatmap = None
def on_test_end(self, logs=None):
model = self.model
ds = logs["loader"].dataset if logs["loader"] else None
assert isinstance(ds, ImageDataset), \
"ActivationMapVisualizer: The loader is not an instance of torchlite.data.datasets.ImagesDataset"
image, label, _ = ds.get_by_name(self.filename)
# TODO finish grad cam here https://github.com/adityac94/Grad_CAM_plus_plus/blob/master/misc/utils.py#L51
@property
def get_heatmap(self):
return self.heatmap
class TTACallback(TestCallback):
def __init__(self):
"""
Test time augmentation callback
"""
# TODO implement https://github.com/fastai/fastai/blob/master/fastai/learner.py#L242
super().__init__()
|
py | 1a30059e79652b426d539e42af781474e26d2df8 | from .util import Configurable, Openable, pretty_str
@pretty_str
class Hook(Configurable, Openable):
"""
Base of all hook classes, performs any form of processing on messages from all connected
plugs, via the provided host instance.
Instantiation may raise :class:`.ConfigError` if the provided configuration is invalid.
Attributes:
virtual (bool):
``True`` if managed by another component (e.g. a hook that exposes plug functionality).
"""
def __init__(self, name, config, host, virtual=False):
super().__init__(name, config, host)
self.virtual = virtual
async def start(self):
"""
Perform any setup tasks.
"""
async def stop(self):
"""
Perform any teardown tasks.
"""
def on_load(self):
"""
Perform any additional one-time setup that requires other plugs or hooks to be loaded.
"""
async def channel_migrate(self, old, new):
"""
Move any private data between channels on admin request. This is intended to cover data
keyed by channel sources and plug network identifiers.
Args:
old (.Channel):
Existing channel with local data.
new (.Channel):
Target replacement channel to migrate data to.
Returns:
bool:
``True`` if any data was migrated for the requested channel.
"""
return False
async def before_send(self, channel, msg):
"""
Modify an outgoing message before it's pushed to the network. The ``(channel, msg)`` pair
must be returned, so hooks may modify in-place or return a different pair. This method is
called for each hook, one after another. If ``channel`` is modified, the sending will
restart on the new channel, meaning this method will be called again for all hooks.
Hooks may also suppress a message (e.g. if their actions caused it, but it bears no value
to the network) by returning ``None``.
Args:
channel (.Channel):
Original source of this message.
msg (.Message):
Raw message received from another plug.
Returns:
(.Channel, .Message) tuple:
The augmented or replacement pair, or ``None`` to suppress this message.
"""
return (channel, msg)
async def before_receive(self, sent, source, primary):
"""
Modify an incoming message before it's pushed to other hooks. The ``sent`` object must be
returned, so hooks may modify in-place or return a different object. This method is called
for each hook, one after another, so any time-consuming tasks should be deferred to
:meth:`process` (which is run for all hooks in parallel).
Hooks may also suppress a message (e.g. if their actions caused it, but it bears no value
to the rest of the system) by returning ``None``.
Args:
sent (.SentMessage):
Raw message received from another plug.
source (.Message):
Original message data used to generate the raw message, if sent via the plug (e.g.
from another hook), equivalent to ``msg`` if the source is otherwise unknown.
primary (bool):
``False`` for supplementary messages if the source message required multiple raw
messages in order to represent it (e.g. messages with multiple attachments where
the underlying network doesn't support it), otherwise ``True``.
Returns:
.SentMessage:
The augmented or replacement message, or ``None`` to suppress this message.
"""
return sent
async def on_receive(self, sent, source, primary):
"""
Handle an incoming message received by any plug.
Args:
sent (.SentMessage):
Raw message received from another plug.
source (.Message):
Original message data used to generate the raw message, if sent via the plug (e.g.
from another hook), equivalent to ``msg`` if the source is otherwise unknown.
primary (bool):
``False`` for supplementary messages if the source message required multiple raw
messages in order to represent it (e.g. messages with multiple attachments where
the underlying network doesn't support it), otherwise ``True``.
"""
def on_config_change(self, source):
"""
Handle a configuration change from another plug or hook.
Args:
source (.Configurable):
Source plug or hook that triggered the event.
"""
def __repr__(self):
return "<{}: {}>".format(self.__class__.__name__, self.name)
class ResourceHook(Hook):
"""
Variant of hooks that globally provide access to some resource.
Only one of each class may be loaded, which happens before regular hooks, and such hooks are
keyed by their class rather than a name, allowing for easier lookups.
"""
|
py | 1a3005cef96dbe2a43a46cf679a83e061a6ffb5c | import enum
import platform
import typing
import math
from functools import lru_cache
from publicsuffix2 import get_sld, get_tld
import urwid
import urwid.util
from mitmproxy import flow
from mitmproxy.http import HTTPFlow
from mitmproxy.utils import human, emoji
from mitmproxy.tcp import TCPFlow
from mitmproxy import dns
from mitmproxy.dns import DNSFlow
# Detect Windows Subsystem for Linux and Windows
IS_WINDOWS_OR_WSL = "Microsoft" in platform.platform() or "Windows" in platform.platform()
def is_keypress(k):
"""
Is this input event a keypress?
"""
if isinstance(k, str):
return True
def highlight_key(str, key, textattr="text", keyattr="key"):
l = []
parts = str.split(key, 1)
if parts[0]:
l.append((textattr, parts[0]))
l.append((keyattr, key))
if parts[1]:
l.append((textattr, parts[1]))
return l
KEY_MAX = 30
def format_keyvals(
entries: typing.Iterable[typing.Tuple[str, typing.Union[None, str, urwid.Widget]]],
key_format: str = "key",
value_format: str = "text",
indent: int = 0
) -> typing.List[urwid.Columns]:
"""
Format a list of (key, value) tuples.
Args:
entries: The list to format. keys must be strings, values can also be None or urwid widgets.
The latter makes it possible to use the result of format_keyvals() as a value.
key_format: The display attribute for the key.
value_format: The display attribute for the value.
indent: Additional indent to apply.
"""
max_key_len = max((len(k) for k, v in entries if k is not None), default=0)
max_key_len = min(max_key_len, KEY_MAX)
if indent > 2:
indent -= 2 # We use dividechars=2 below, which already adds two empty spaces
ret = []
for k, v in entries:
if v is None:
v = urwid.Text("")
elif not isinstance(v, urwid.Widget):
v = urwid.Text([(value_format, v)])
ret.append(
urwid.Columns(
[
("fixed", indent, urwid.Text("")),
(
"fixed",
max_key_len,
urwid.Text([(key_format, k)])
),
v
],
dividechars=2
)
)
return ret
def fcol(s: str, attr: str) -> typing.Tuple[str, int, urwid.Text]:
s = str(s)
return (
"fixed",
len(s),
urwid.Text(
[
(attr, s)
]
)
)
if urwid.util.detected_encoding:
SYMBOL_REPLAY = "\u21ba"
SYMBOL_RETURN = "\u2190"
SYMBOL_MARK = "\u25cf"
SYMBOL_UP = "\u21E7"
SYMBOL_DOWN = "\u21E9"
SYMBOL_ELLIPSIS = "\u2026"
SYMBOL_FROM_CLIENT = "\u21d2"
SYMBOL_TO_CLIENT = "\u21d0"
else:
SYMBOL_REPLAY = "[r]"
SYMBOL_RETURN = "<-"
SYMBOL_MARK = "#"
SYMBOL_UP = "^"
SYMBOL_DOWN = " "
SYMBOL_ELLIPSIS = "~"
SYMBOL_FROM_CLIENT = "->"
SYMBOL_TO_CLIENT = "<-"
SCHEME_STYLES = {
'http': 'scheme_http',
'https': 'scheme_https',
'ws': 'scheme_ws',
'wss': 'scheme_wss',
'tcp': 'scheme_tcp',
'dns': 'scheme_dns',
}
HTTP_REQUEST_METHOD_STYLES = {
'GET': 'method_get',
'POST': 'method_post',
'DELETE': 'method_delete',
'HEAD': 'method_head',
'PUT': 'method_put'
}
HTTP_RESPONSE_CODE_STYLE = {
2: "code_200",
3: "code_300",
4: "code_400",
5: "code_500",
}
class RenderMode(enum.Enum):
TABLE = 1
"""The flow list in table format, i.e. one row per flow."""
LIST = 2
"""The flow list in list format, i.e. potentially multiple rows per flow."""
DETAILVIEW = 3
"""The top lines in the detail view."""
def fixlen(s: str, maxlen: int) -> str:
if len(s) <= maxlen:
return s.ljust(maxlen)
else:
return s[0:maxlen - len(SYMBOL_ELLIPSIS)] + SYMBOL_ELLIPSIS
def fixlen_r(s: str, maxlen: int) -> str:
if len(s) <= maxlen:
return s.rjust(maxlen)
else:
return SYMBOL_ELLIPSIS + s[len(s) - maxlen + len(SYMBOL_ELLIPSIS):]
def render_marker(marker: str) -> str:
rendered = emoji.emoji.get(marker, SYMBOL_MARK)
# The marker can only be one glyph. Some emoji that use zero-width joiners (ZWJ)
# will not be rendered as a single glyph and instead will show
# multiple glyphs. Just use the first glyph as a fallback.
# https://emojipedia.org/emoji-zwj-sequence/
return rendered[0]
class TruncatedText(urwid.Widget):
def __init__(self, text, attr, align='left'):
self.text = text
self.attr = attr
self.align = align
super().__init__()
def pack(self, size, focus=False):
return (len(self.text), 1)
def rows(self, size, focus=False):
return 1
def render(self, size, focus=False):
text = self.text
attr = self.attr
if self.align == 'right':
text = text[::-1]
attr = attr[::-1]
text_len = urwid.util.calc_width(text, 0, len(text))
if size is not None and len(size) > 0:
width = size[0]
else:
width = text_len
if width >= text_len:
remaining = width - text_len
if remaining > 0:
c_text = text + ' ' * remaining
c_attr = attr + [('text', remaining)]
else:
c_text = text
c_attr = attr
else:
trim = urwid.util.calc_trim_text(text, 0, width - 1, 0, width - 1)
visible_text = text[0:trim[1]]
if trim[3] == 1:
visible_text += ' '
c_text = visible_text + SYMBOL_ELLIPSIS
c_attr = (urwid.util.rle_subseg(attr, 0, len(visible_text.encode())) +
[('focus', len(SYMBOL_ELLIPSIS.encode()))])
if self.align == 'right':
c_text = c_text[::-1]
c_attr = c_attr[::-1]
return urwid.TextCanvas([c_text.encode()], [c_attr], maxcol=width)
def truncated_plain(text, attr, align='left'):
return TruncatedText(text, [(attr, len(text.encode()))], align)
# Work around https://github.com/urwid/urwid/pull/330
def rle_append_beginning_modify(rle, a_r):
"""
Append (a, r) (unpacked from *a_r*) to BEGINNING of rle.
Merge with first run when possible
MODIFIES rle parameter contents. Returns None.
"""
a, r = a_r
if not rle:
rle[:] = [(a, r)]
else:
al, run = rle[0]
if a == al:
rle[0] = (a, run + r)
else:
rle[0:0] = [(a, r)]
def colorize_host(host):
tld = get_tld(host)
sld = get_sld(host)
attr = []
tld_size = len(tld)
sld_size = len(sld) - tld_size
for letter in reversed(range(len(host))):
character = host[letter]
if tld_size > 0:
style = 'url_domain'
tld_size -= 1
elif tld_size == 0:
style = 'text'
tld_size -= 1
elif sld_size > 0:
sld_size -= 1
style = 'url_extension'
else:
style = 'text'
rle_append_beginning_modify(attr, (style, len(character.encode())))
return attr
def colorize_req(s):
path = s.split('?', 2)[0]
i_query = len(path)
i_last_slash = path.rfind('/')
i_ext = path[i_last_slash + 1:].rfind('.')
i_ext = i_last_slash + i_ext if i_ext >= 0 else len(s)
in_val = False
attr = []
for i in range(len(s)):
c = s[i]
if ((i < i_query and c == '/') or
(i < i_query and i > i_last_slash and c == '.') or
(i == i_query)):
a = 'url_punctuation'
elif i > i_query:
if in_val:
if c == '&':
in_val = False
a = 'url_punctuation'
else:
a = 'url_query_value'
else:
if c == '=':
in_val = True
a = 'url_punctuation'
else:
a = 'url_query_key'
elif i > i_ext:
a = 'url_extension'
elif i > i_last_slash:
a = 'url_filename'
else:
a = 'text'
urwid.util.rle_append_modify(attr, (a, len(c.encode())))
return attr
def colorize_url(url):
parts = url.split('/', 3)
if len(parts) < 4 or len(parts[1]) > 0 or parts[0][-1:] != ':':
return [('error', len(url))] # bad URL
return [
(SCHEME_STYLES.get(parts[0], "scheme_other"), len(parts[0]) - 1),
('url_punctuation', 3), # ://
] + colorize_host(parts[2]) + colorize_req('/' + parts[3])
def format_http_content_type(content_type: str) -> typing.Tuple[str, str]:
content_type = content_type.split(";")[0]
if content_type.endswith('/javascript'):
style = 'content_script'
elif content_type.startswith('text/'):
style = 'content_text'
elif (content_type.startswith('image/') or
content_type.startswith('video/') or
content_type.startswith('font/') or
"/x-font-" in content_type):
style = 'content_media'
elif content_type.endswith('/json') or content_type.endswith('/xml'):
style = 'content_data'
elif content_type.startswith('application/'):
style = 'content_raw'
else:
style = 'content_other'
return content_type, style
def format_duration(duration: float) -> typing.Tuple[str, str]:
pretty_duration = human.pretty_duration(duration)
style = 'gradient_%02d' % int(99 - 100 * min(math.log2(1 + 1000 * duration) / 12, 0.99))
return pretty_duration, style
def format_size(num_bytes: int) -> typing.Tuple[str, str]:
pretty_size = human.pretty_size(num_bytes)
style = 'gradient_%02d' % int(99 - 100 * min(math.log2(1 + num_bytes) / 20, 0.99))
return pretty_size, style
def format_left_indicators(
*,
focused: bool,
intercepted: bool,
timestamp: float
):
indicators: typing.List[typing.Union[str, typing.Tuple[str, str]]] = []
if focused:
indicators.append(("focus", ">>"))
else:
indicators.append(" ")
pretty_timestamp = human.format_timestamp(timestamp)[-8:]
if intercepted:
indicators.append(("intercept", pretty_timestamp))
else:
indicators.append(("text", pretty_timestamp))
return "fixed", 10, urwid.Text(indicators)
def format_right_indicators(
*,
replay: bool,
marked: str,
):
indicators: typing.List[typing.Union[str, typing.Tuple[str, str]]] = []
if replay:
indicators.append(("replay", SYMBOL_REPLAY))
else:
indicators.append(" ")
if bool(marked):
indicators.append(("mark", render_marker(marked)))
else:
indicators.append(" ")
return "fixed", 3, urwid.Text(indicators)
@lru_cache(maxsize=800)
def format_http_flow_list(
*,
render_mode: RenderMode,
focused: bool,
marked: str,
is_replay: bool,
request_method: str,
request_scheme: str,
request_host: str,
request_path: str,
request_url: str,
request_http_version: str,
request_timestamp: float,
request_is_push_promise: bool,
intercepted: bool,
response_code: typing.Optional[int],
response_reason: typing.Optional[str],
response_content_length: typing.Optional[int],
response_content_type: typing.Optional[str],
duration: typing.Optional[float],
error_message: typing.Optional[str],
) -> urwid.Widget:
req = []
if render_mode is RenderMode.DETAILVIEW:
req.append(fcol(human.format_timestamp(request_timestamp), "highlight"))
else:
if focused:
req.append(fcol(">>", "focus"))
else:
req.append(fcol(" ", "focus"))
method_style = HTTP_REQUEST_METHOD_STYLES.get(request_method, "method_other")
req.append(fcol(request_method, method_style))
if request_is_push_promise:
req.append(fcol('PUSH_PROMISE', 'method_http2_push'))
preamble_len = sum(x[1] for x in req) + len(req) - 1
if request_http_version not in ("HTTP/1.0", "HTTP/1.1"):
request_url += " " + request_http_version
if intercepted and not response_code:
url_style = "intercept"
elif response_code or error_message:
url_style = "text"
else:
url_style = "title"
if render_mode is RenderMode.DETAILVIEW:
req.append(
urwid.Text([(url_style, request_url)])
)
else:
req.append(truncated_plain(request_url, url_style))
req.append(format_right_indicators(replay=is_replay, marked=marked))
resp = [
("fixed", preamble_len, urwid.Text(""))
]
if response_code:
if intercepted:
style = "intercept"
else:
style = ""
status_style = style or HTTP_RESPONSE_CODE_STYLE.get(response_code // 100, "code_other")
resp.append(fcol(SYMBOL_RETURN, status_style))
resp.append(fcol(str(response_code), status_style))
if response_reason and render_mode is RenderMode.DETAILVIEW:
resp.append(fcol(response_reason, status_style))
if response_content_type:
ct, ct_style = format_http_content_type(response_content_type)
resp.append(fcol(ct, style or ct_style))
if response_content_length:
size, size_style = format_size(response_content_length)
elif response_content_length == 0:
size = "[no content]"
size_style = "text"
else:
size = "[content missing]"
size_style = "text"
resp.append(fcol(size, style or size_style))
if duration:
dur, dur_style = format_duration(duration)
resp.append(fcol(dur, style or dur_style))
elif error_message:
resp.append(fcol(SYMBOL_RETURN, "error"))
resp.append(urwid.Text([("error", error_message)]))
return urwid.Pile([
urwid.Columns(req, dividechars=1),
urwid.Columns(resp, dividechars=1)
])
@lru_cache(maxsize=800)
def format_http_flow_table(
*,
render_mode: RenderMode,
focused: bool,
marked: str,
is_replay: typing.Optional[str],
request_method: str,
request_scheme: str,
request_host: str,
request_path: str,
request_url: str,
request_http_version: str,
request_timestamp: float,
request_is_push_promise: bool,
intercepted: bool,
response_code: typing.Optional[int],
response_reason: typing.Optional[str],
response_content_length: typing.Optional[int],
response_content_type: typing.Optional[str],
duration: typing.Optional[float],
error_message: typing.Optional[str],
) -> urwid.Widget:
items = [
format_left_indicators(
focused=focused,
intercepted=intercepted,
timestamp=request_timestamp
)
]
if intercepted and not response_code:
request_style = "intercept"
else:
request_style = ""
scheme_style = request_style or SCHEME_STYLES.get(request_scheme, "scheme_other")
items.append(fcol(fixlen(request_scheme.upper(), 5), scheme_style))
if request_is_push_promise:
method_style = 'method_http2_push'
else:
method_style = request_style or HTTP_REQUEST_METHOD_STYLES.get(request_method, "method_other")
items.append(fcol(fixlen(request_method, 4), method_style))
items.append(('weight', 0.25, TruncatedText(request_host, colorize_host(request_host), 'right')))
items.append(('weight', 1.0, TruncatedText(request_path, colorize_req(request_path), 'left')))
if intercepted and response_code:
response_style = "intercept"
else:
response_style = ""
if response_code:
status = str(response_code)
status_style = response_style or HTTP_RESPONSE_CODE_STYLE.get(response_code // 100, "code_other")
if response_content_length and response_content_type:
content, content_style = format_http_content_type(response_content_type)
content_style = response_style or content_style
elif response_content_length:
content = ''
content_style = 'content_none'
elif response_content_length == 0:
content = "[no content]"
content_style = 'content_none'
else:
content = "[content missing]"
content_style = 'content_none'
elif error_message:
status = 'err'
status_style = 'error'
content = error_message
content_style = 'error'
else:
status = ''
status_style = 'text'
content = ''
content_style = ''
items.append(fcol(fixlen(status, 3), status_style))
items.append(('weight', 0.15, truncated_plain(content, content_style, 'right')))
if response_content_length:
size, size_style = format_size(response_content_length)
items.append(fcol(fixlen_r(size, 5), response_style or size_style))
else:
items.append(("fixed", 5, urwid.Text("")))
if duration:
duration_pretty, duration_style = format_duration(duration)
items.append(fcol(fixlen_r(duration_pretty, 5), response_style or duration_style))
else:
items.append(("fixed", 5, urwid.Text("")))
items.append(format_right_indicators(
replay=bool(is_replay),
marked=marked,
))
return urwid.Columns(items, dividechars=1, min_width=15)
@lru_cache(maxsize=800)
def format_tcp_flow(
*,
render_mode: RenderMode,
focused: bool,
timestamp_start: float,
marked: str,
client_address,
server_address,
total_size: int,
duration: typing.Optional[float],
error_message: typing.Optional[str],
):
conn = f"{human.format_address(client_address)} <-> {human.format_address(server_address)}"
items = []
if render_mode in (RenderMode.TABLE, RenderMode.DETAILVIEW):
items.append(
format_left_indicators(focused=focused, intercepted=False, timestamp=timestamp_start)
)
else:
if focused:
items.append(fcol(">>", "focus"))
else:
items.append(fcol(" ", "focus"))
if render_mode is RenderMode.TABLE:
items.append(fcol("TCP ", SCHEME_STYLES["tcp"]))
else:
items.append(fcol("TCP", SCHEME_STYLES["tcp"]))
items.append(('weight', 1.0, truncated_plain(conn, "text", 'left')))
if error_message:
items.append(('weight', 1.0, truncated_plain(error_message, "error", 'left')))
if total_size:
size, size_style = format_size(total_size)
items.append(fcol(fixlen_r(size, 5), size_style))
else:
items.append(("fixed", 5, urwid.Text("")))
if duration:
duration_pretty, duration_style = format_duration(duration)
items.append(fcol(fixlen_r(duration_pretty, 5), duration_style))
else:
items.append(("fixed", 5, urwid.Text("")))
items.append(format_right_indicators(replay=False, marked=marked))
return urwid.Pile([
urwid.Columns(items, dividechars=1, min_width=15)
])
@lru_cache(maxsize=800)
def format_dns_flow(
*,
render_mode: RenderMode,
focused: bool,
intercepted: bool,
marked: str,
is_replay: typing.Optional[str],
op_code: str,
request_timestamp: float,
domain: str,
type: str,
response_code: typing.Optional[str],
response_code_http_equiv: int,
answer: typing.Optional[str],
error_message: str,
duration: typing.Optional[float],
):
items = []
if render_mode in (RenderMode.TABLE, RenderMode.DETAILVIEW):
items.append(format_left_indicators(focused=focused, intercepted=intercepted, timestamp=request_timestamp))
else:
items.append(fcol(">>" if focused else " ", "focus"))
scheme_style = "intercepted" if intercepted else SCHEME_STYLES["dns"]
t = f"DNS {op_code}"
if render_mode is RenderMode.TABLE:
t = fixlen(t, 10)
items.append(fcol(t, scheme_style))
items.append(('weight', 0.5, TruncatedText(domain, colorize_host(domain), 'right')))
items.append(fcol("(" + fixlen(type, 5)[:len(type)] + ") =", "text"))
items.append(("weight", 1, (
truncated_plain("..." if answer is None else "?" if not answer else answer, "text")
if error_message is None else
truncated_plain(error_message, "error")
)))
status_style = "intercepted" if intercepted else HTTP_RESPONSE_CODE_STYLE.get(response_code_http_equiv // 100, "code_other")
items.append(fcol(fixlen("" if response_code is None else response_code, 9), status_style))
if duration:
duration_pretty, duration_style = format_duration(duration)
items.append(fcol(fixlen_r(duration_pretty, 5), duration_style))
else:
items.append(("fixed", 5, urwid.Text("")))
items.append(format_right_indicators(
replay=bool(is_replay),
marked=marked,
))
return urwid.Pile([
urwid.Columns(items, dividechars=1, min_width=15)
])
def format_flow(
f: flow.Flow,
*,
render_mode: RenderMode,
hostheader: bool = False, # pass options directly if we need more stuff from them
focused: bool = True,
) -> urwid.Widget:
"""
This functions calls the proper renderer depending on the flow type.
We also want to cache the renderer output, so we extract all attributes
relevant for display and call the render with only that. This assures that rows
are updated if the flow is changed.
"""
duration: typing.Optional[float]
error_message: typing.Optional[str]
if f.error:
error_message = f.error.msg
else:
error_message = None
if isinstance(f, TCPFlow):
total_size = 0
for message in f.messages:
total_size += len(message.content)
if f.messages:
duration = f.messages[-1].timestamp - f.client_conn.timestamp_start
else:
duration = None
return format_tcp_flow(
render_mode=render_mode,
focused=focused,
timestamp_start=f.client_conn.timestamp_start,
marked=f.marked,
client_address=f.client_conn.peername,
server_address=f.server_conn.address,
total_size=total_size,
duration=duration,
error_message=error_message,
)
elif isinstance(f, DNSFlow):
if f.response:
duration = f.response.timestamp - f.request.timestamp
response_code_str: typing.Optional[str] = dns.response_codes.to_str(f.response.response_code)
response_code_http_equiv = dns.response_codes.http_equiv_status_code(f.response.response_code)
answer = ", ".join(str(x) for x in f.response.answers)
else:
duration = None
response_code_str = None
response_code_http_equiv = 0
answer = None
return format_dns_flow(
render_mode=render_mode,
focused=focused,
intercepted=f.intercepted,
marked=f.marked,
is_replay=f.is_replay,
op_code=dns.op_codes.to_str(f.request.op_code),
request_timestamp=f.request.timestamp,
domain=f.request.questions[0].name if f.request.questions else "",
type=dns.types.to_str(f.request.questions[0].type) if f.request.questions else "",
response_code=response_code_str,
response_code_http_equiv=response_code_http_equiv,
answer=answer,
error_message=error_message,
duration=duration,
)
elif isinstance(f, HTTPFlow):
intercepted = f.intercepted
response_content_length: typing.Optional[int]
if f.response:
if f.response.raw_content is not None:
response_content_length = len(f.response.raw_content)
else:
response_content_length = None
response_code: typing.Optional[int] = f.response.status_code
response_reason: typing.Optional[str] = f.response.reason
response_content_type = f.response.headers.get("content-type")
if f.response.timestamp_end:
duration = max([f.response.timestamp_end - f.request.timestamp_start, 0])
else:
duration = None
else:
response_content_length = None
response_code = None
response_reason = None
response_content_type = None
duration = None
scheme = f.request.scheme
if f.websocket is not None:
if scheme == "https":
scheme = "wss"
elif scheme == "http":
scheme = "ws"
if render_mode in (RenderMode.LIST, RenderMode.DETAILVIEW):
render_func = format_http_flow_list
else:
render_func = format_http_flow_table
return render_func(
render_mode=render_mode,
focused=focused,
marked=f.marked,
is_replay=f.is_replay,
request_method=f.request.method,
request_scheme=scheme,
request_host=f.request.pretty_host if hostheader else f.request.host,
request_path=f.request.path,
request_url=f.request.pretty_url if hostheader else f.request.url,
request_http_version=f.request.http_version,
request_timestamp=f.request.timestamp_start,
request_is_push_promise='h2-pushed-stream' in f.metadata,
intercepted=intercepted,
response_code=response_code,
response_reason=response_reason,
response_content_length=response_content_length,
response_content_type=response_content_type,
duration=duration,
error_message=error_message,
)
else:
raise NotImplementedError()
|
py | 1a3006a452cdbbb51f2df48ae8cc8d6376c541f9 | """
anime.py contains the base classes required for other anime classes.
"""
import os
import logging
import copy
import importlib
from anime_downloader.sites.exceptions import AnimeDLError, NotFoundError
from anime_downloader import util
from anime_downloader.config import Config
from anime_downloader.extractors import get_extractor
from anime_downloader.downloader import get_downloader
logger = logging.getLogger(__name__)
class Anime:
"""
Base class for all anime classes.
Parameters
----------
url: string
URL of the anime.
quality: One of ['360p', '480p', '720p', '1080p']
Quality of episodes
fallback_qualities: list
The order of fallback.
Attributes
----------
sitename: str
name of the site
title: str
Title of the anime
meta: dict
metadata about the anime. [Can be empty]
QUALITIES: list
Possible qualities for the site
"""
sitename = ''
title = ''
meta = dict()
subclasses = {}
QUALITIES = ['360p', '480p', '720p', '1080p']
@classmethod
def search(cls, query):
"""
Search searches for the anime using the query given.
Parameters
----------
query: str
query is the query keyword to be searched.
Returns
-------
list
List of :py:class:`~anime_downloader.sites.anime.SearchResult`
"""
return
def __init__(self, url=None, quality='720p',
fallback_qualities=None,
_skip_online_data=False):
self.url = url
if fallback_qualities is None:
fallback_qualities = ['720p', '480p', '360p']
self._fallback_qualities = [
q for q in fallback_qualities if q in self.QUALITIES]
if quality in self.QUALITIES:
self.quality = quality
else:
raise AnimeDLError(
'Quality {0} not found in {1}'.format(quality, self.QUALITIES))
if not _skip_online_data:
logger.info('Extracting episode info from page')
self._episode_urls = self.get_data()
self._len = len(self._episode_urls)
@classmethod
def verify_url(cls, url):
if cls.sitename in url:
return True
return False
@property
def config(self):
return Config['siteconfig'][self.sitename]
def __init_subclass__(cls, sitename, **kwargs):
super().__init_subclass__(**kwargs)
cls.subclasses[sitename] = cls
@classmethod
def factory(cls, sitename: str):
"""
factory returns the appropriate subclass for the given site name.
Parameters
----------
sitename: str
sitename is the name of the site
Returns
-------
subclass of :py:class:`Anime`
Sub class of :py:class:`Anime`
"""
return cls.subclasses[sitename]
@classmethod
def new_anime(cls, sitename: str):
"""
new_anime is a factory which returns the anime class corresposing to
`sitename`
Returns
-------
subclass of Anime
"""
module = importlib.import_module(
'anime_downloader.sites.{}'.format(sitename)
)
for c in dir(module):
if issubclass(c, cls):
return c
raise ImportError("Cannot find subclass of {}".format(cls))
def get_data(self):
"""
get_data is called inside the :code:`__init__` of
:py:class:`~anime_downloader.sites.anime.BaseAnime`. It is used to get
the necessary data about the anime and it's episodes.
This function calls
:py:class:`~anime_downloader.sites.anime.BaseAnime._scarpe_episodes`
and
:py:class:`~anime_downloader.sites.anime.BaseAnime._scrape_metadata`
TODO: Refactor this so that classes which need not be soupified don't
have to overload this function.
Returns
-------
list
A list of tuples of episodes containing episode name and
episode url.
Ex::
[('1', 'https://9anime.is/.../...', ...)]
"""
self._episode_urls = []
try:
self._scrape_metadata()
except Exception as e:
logger.debug('Metadata scraping error: {}'.format(e))
self._episode_urls = self._scrape_episodes()
self._len = len(self._episode_urls)
logger.debug('EPISODE IDS: length: {}, ids: {}'.format(
self._len, self._episode_urls))
if not isinstance(self._episode_urls[0], tuple):
self._episode_urls = [(no+1, id) for no, id in
enumerate(self._episode_urls)]
return self._episode_urls
def __getitem__(self, index):
episode_class = AnimeEpisode.subclasses[self.sitename]
if isinstance(index, int):
try:
ep_id = self._episode_urls[index]
except IndexError as e:
raise RuntimeError("No episode found with index") from e
return episode_class(ep_id[1], parent=self,
ep_no=ep_id[0])
elif isinstance(index, slice):
anime = copy.deepcopy(self)
try:
anime._episode_urls = anime._episode_urls[index]
except IndexError as e:
raise RuntimeError("No episode found with index") from e
return anime
return None
def __iter__(self):
episode_class = AnimeEpisode.subclasses[self.sitename]
for ep_id in self._episode_urls:
yield episode_class(ep_id[1], parent=self, ep_no=ep_id[0])
def __repr__(self):
return '''
Site: {name}
Anime: {title}
Episode count: {length}
'''.format(name=self.sitename, title=self.title, length=len(self))
def __len__(self):
return self._len
def __str__(self):
return self.title
def _scarpe_episodes(self):
"""
_scarpe_episodes is function which has to be overridden by the base
classes to scrape the episode urls from the web page.
Parameters
----------
soup: `bs4.BeautifulSoup`
soup is the html of the anime url after passing through
BeautifulSoup.
Returns
-------
:code:`list` of :code:`str`
A list of episode urls.
"""
return
def _scrape_metadata(self):
"""
_scrape_metadata is function which has to be overridden by the base
classes to scrape the metadata of anime from the web page.
Parameters
----------
soup: :py:class:`bs4.BeautifulSoup`
soup is the html of the anime url after passing through
BeautifulSoup.
"""
return
class AnimeEpisode:
"""
Base class for all Episode classes.
Parameters
----------
url: string
URL of the episode.
quality: One of ['360p', '480p', '720p', '1080p']
Quality of episode
fallback_qualities: list
The order of fallback.
Attributes
----------
sitename: str
name of the site
title: str
Title of the anime
meta: dict
metadata about the anime. [Can be empty]
ep_no: string
Episode number/title of the episode
pretty_title: string
Pretty title of episode in format <animename>-<ep_no>
"""
QUALITIES = []
title = ''
stream_url = ''
subclasses = {}
def __init__(self, url, parent: Anime = None, ep_no=None):
self.ep_no = ep_no
self.url = url
self.quality = parent.quality
self.QUALITIES = parent.QUALITIES
self._parent = parent
self._sources = None
self.pretty_title = '{}-{}'.format(self._parent.title, self.ep_no)
logger.debug("Extracting stream info of id: {}".format(self.url))
def try_data():
self.get_data()
# Just to verify the source is acquired
self.source().stream_url
try:
try_data()
except NotFoundError:
# Issue #28
qualities = copy.copy(self._parent._fallback_qualities)
try:
qualities.remove(self.quality)
except ValueError:
pass
for quality in qualities:
logger.warning('Quality {} not found. Trying {}.'.format(
self.quality, quality))
self.quality = quality
try:
try_data()
return
except NotFoundError:
pass
logger.warning(f'Skipping episode: {self.ep_no}')
def __init_subclass__(cls, sitename: str, **kwargs):
super().__init_subclass__(**kwargs)
cls.subclasses[sitename] = cls
cls.sitename = sitename
@classmethod
def factory(cls, sitename: str):
return cls.subclasses[sitename]
@property
def config(self):
return Config['siteconfig'][self.sitename]
def source(self, index=0):
"""
Get the source for episode
Returns
-------
`anime_downloader.extractors.base_extractor.BaseExtractor`
Extractor depending on the source.
"""
if not self._sources:
self.get_data()
try:
sitename, url = self._sources[index]
except TypeError:
return self._sources[index]
except IndexError:
raise NotFoundError("No episode sources found.")
ext = get_extractor(sitename)(url, quality=self.quality)
self._sources[index] = ext
return ext
def get_data(self):
self._sources = self._get_sources()
logger.debug('Sources : {}'.format(self._sources))
def _get_sources(self):
raise NotImplementedError
def sort_sources(self, data):
"""
Formatted data should look something like this
[
{'extractor': 'mp4upload', 'url': 'https://twist.moe/mp4upload/...', 'server': 'mp4upload', 'version': 'subbed'},
{'extractor': 'vidstream', 'url': 'https://twist.moe/vidstream/...', 'server': 'vidstream', 'version': 'dubbed'},
{'extractor': 'no_extractor', 'url': 'https://twist.moe/anime/...', 'server': 'default', 'version': 'subbed'}
]
extractor = the extractor the link should be passed to
url = url to be passed to the extractor
server = the server name used in config
version = subbed/dubbed
The config should consist of a list with servers in preferred order and a preferred language, eg
"servers":["vidstream","default","mp4upload"],
"version":"subbed"
Using the example above, this function will return: [('no_extractor', 'https://twist.moe/anime/...')]
as it prioritizes preferred language over preferred server
"""
version = self.config.get('version','subbed') #TODO add a flag for this
servers = self.config.get('servers',[''])
logger.debug('Data : {}'.format(data))
#Sorts the dicts by preferred server in config
sorted_by_server = sorted(data, key=lambda x: servers.index(x['server']) if x['server'] in servers else len(data))
#Sorts the above by preferred language
#resulting in a list with the dicts sorted by language and server
#with language being prioritized over server
sorted_by_lang = list(sorted(sorted_by_server, key=lambda x: x['version'] == version, reverse=True))
logger.debug('Sorted sources : {}'.format(sorted_by_lang))
return '' if not sorted_by_lang else [(sorted_by_lang[0]['extractor'],sorted_by_lang[0]['url'])]
def download(self, force=False, path=None,
format='{anime_title}_{ep_no}', range_size=None):
"""
Downloads episode. This might be removed in a future release.
Parameters
----------
force: bool
Whether to force download or not.
path: string
Path to the directory/file where the file should be downloaded to.
format: string
The format of the filename if not provided.
"""
# TODO: Remove this shit
logger.info('Downloading {}'.format(self.pretty_title))
if format:
file_name = util.format_filename(format, self)+'.mp4'
if path is None:
path = './' + file_name
if path.endswith('.mp4'):
path = path
else:
path = os.path.join(path, file_name)
Downloader = get_downloader('http')
downloader = Downloader(self.source(),
path, force, range_size=range_size)
downloader.download()
class SearchResult:
"""
SearchResult class holds the search result of a search done by an Anime
class
Parameters
----------
title: str
Title of the anime.
url: str
URL of the anime
poster: str
URL for the poster of the anime.
meta: dict
Additional metadata regarding the anime.
Attributes
----------
title: str
Title of the anime.
url: str
URL of the anime
poster: str
URL for the poster of the anime.
meta: dict
Additional metadata regarding the anime.
meta_info: dict
Metadata regarding the anime. Not shown in the results, used to match with MAL
"""
def __init__(self, title, url, poster='', meta='', meta_info={}):
self.title = title
self.url = url
self.poster = poster
self.meta = meta
self.meta_info = meta_info
def __repr__(self):
return '<SearchResult Title: {} URL: {}>'.format(self.title, self.url)
def __str__(self):
return self.title
@property
def pretty_metadata(self):
"""
pretty_metadata is the prettified version of metadata
"""
if self.meta:
return ' | '.join(val for _, val in self.meta.items())
return ''
|
py | 1a30072634fc39f2566f9e3473f156264b3066c5 | import sys
from fastapi import FastAPI, Request
from .exceptions import CustomHTTPException
from .routers import oauth, webhooks
if sys.version_info[1] < 7:
from backports.datetime_fromisoformat import MonkeyPatch
MonkeyPatch.patch_fromisoformat()
app = FastAPI()
@app.exception_handler(CustomHTTPException)
def custom_http_exception_handler(request: Request, exc: CustomHTTPException):
return exc.response
app.include_router(oauth.router)
app.include_router(webhooks.router)
|
py | 1a3007316afbaf0d3b0079fa8d10af149bcb31c9 | #Copyright ReportLab Europe Ltd. 2000-2017
#see license.txt for license details
#history https://hg.reportlab.com/hg-public/reportlab/log/tip/tools/pythonpoint/styles/__init__.py
|
py | 1a30089d53a86042c21b7f2f23c1e8e4be3f6cb1 | # -*- coding: utf-8 -*-
"""
Fuel inventory library (UOX)
Script to run computations. It will produce a set of folders and outputfiles
and a csv file storing linking the output file paths to the BU, CT, IE values.
zsolt elter 2019
"""
import numpy as np
import os
import math
#import pandas as pd
#from PDfunctions import *
def fuelinput(wp):
"""
function to calculate the weight percentage of MOX nuclides
formulae from http://holbert.faculty.asu.edu/eee460/NumberDensity.pdf
Parameters
----------
wp : float
Plutonium content in percentage
Returns
-------
fuelstr : str
Serpent formatted material composition
Notes
-----
1, Right now the temperature is hard coded (ie ZAID ends with '.15c'), this can be modified.
2, Right now the density of fuel is hard coded, this can be modified
3, The fuel string includes Cf nuclides with 0.0w%. This is to force Serpent2 to include these
nuclides. The reason to include them because they might be relevant in subsequent neutron coincidence
based calculations.
"""
u=1000*1.660539040e-27 #g
NA=6.0221409e23 ##/mol
M={'U235': 235.0439299*u*NA,
'U234': 234.0409521*u*NA,
'U238': 238.05078826*u*NA,
'Pu238': 238.0495599*u*NA,
'Pu239': 239.0521634*u*NA,
'Pu240': 240.0538135*u*NA,
'Pu241': 241.0568515*u*NA,
'Pu242': 242.0587426*u*NA}
Puvec={'Pu238':2.5/100,'Pu239':54.7/100,'Pu240':26.1/100,'Pu241':9.5/100,'Pu242':7.2/100}
Uvec={'U234':0.0012/100,'U235':0.25/100,'U238':99.7488/100} #czsolti 0.00119 rounded to get 1
MO16= 15.99491461956*u*NA
rhoMOX=10.5 #g/cm3 czsolti this density falls out from the equations
wp=wp/100
MU=1/sum([Uvec[iso]/M[iso] for iso in Uvec])
MPu=1/sum([Puvec[iso]/M[iso] for iso in Puvec])
MHM=(1-wp)*MU+wp*MPu
MMOX=MHM+2*MO16
rhoHM=rhoMOX*(MHM/MMOX)
rhoO=rhoMOX*(MO16/MMOX)
MVOL={}
for iso in Uvec:
MVOL[iso] = (1-wp)*Uvec[iso]*rhoHM
for iso in Puvec:
MVOL[iso] = wp*Puvec[iso]*rhoHM
M_O16=(rhoO*2)
M_TOT=sum(MVOL.values())+M_O16
fuelstr='mat MOX -10.5 burn 1'
fuelstr=fuelstr+'\n 92234.15c -%.8f'%(MVOL['U234']/M_TOT)
fuelstr=fuelstr+'\n 92235.15c -%.8f'%(MVOL['U235']/M_TOT)
fuelstr=fuelstr+'\n 92238.15c -%.8f'%(MVOL['U238']/M_TOT)
fuelstr=fuelstr+'\n 94238.15c -%.8f'%(MVOL['Pu238']/M_TOT)
fuelstr=fuelstr+'\n 94239.15c -%.8f'%(MVOL['Pu239']/M_TOT)
fuelstr=fuelstr+'\n 94240.15c -%.8f'%(MVOL['Pu240']/M_TOT)
fuelstr=fuelstr+'\n 94241.15c -%.8f'%(MVOL['Pu241']/M_TOT)
fuelstr=fuelstr+'\n 94242.15c -%.8f'%(MVOL['Pu242']/M_TOT)
fuelstr=fuelstr+'\n 8016.15c -%.8f'%(M_O16/M_TOT)
fuelstr=fuelstr+'\n 98249.15c -0.0'
fuelstr=fuelstr+'\n 98250.15c -0.0'
fuelstr=fuelstr+'\n 98251.15c -0.0'
fuelstr=fuelstr+'\n 98252.15c -0.0'
fuelstr=fuelstr+'\n 98253.15c -0.0'
fuelstr=fuelstr+'\n 98254.15c -0.0'
return fuelstr
### SCRIPT to run
###Init array for CTs-> can be modified if other CT values are preferred.
CT=0
CTs=[0]
decstep=[]
while CT<70*365:
if CT<10*365:
decstep.append(91.25)
CT=CT+91.25
CTs.append(CT)
elif CT<40*365:
decstep.append(2*91.25)
CT=CT+2*91.25
CTs.append(CT)
else:
decstep.append(4*91.25)
CT=CT+4*91.25
CTs.append(CT)
#csv header
csvstr=',BU,CT,IE,fuelType,reactorType,serpent\n'
#path to be updated
path=os.getcwd()+'/'
dataFrame='fuellog_strategicPWR_MOX.csv'
inputFileRun = open(dataFrame,'a')
inputFileRun.write(csvstr)
inputFileRun.close()
inputFileBU = open('MOX_manyBU')
inputFileBURefStr = inputFileBU.read()
inputFileBU.close()
inputFileCT = open('MOX_manyCT')
inputFileCTRefStr = inputFileCT.read()
inputFileCT.close()
IE=np.linspace(4,10,31)
idfuel=0
for ie in IE:
fstr=fuelinput(ie)
inputFileBUStr = inputFileBURefStr
inputFileBUStr = inputFileBUStr.replace('fuelstr', fstr)
sfile='sPWR_MOX_IE_%d'%(ie*10)
os.chdir(path+'serpent_files/')
os.system('mkdir IE%d'%(ie*10))
os.chdir(path+'serpent_files/IE%d/'%(ie*10))
inputFileRun = open(sfile,'w')
inputFileRun.write(inputFileBUStr)
inputFileRun.close()
#pathV=path+'serpent_filesPWR_BIC/'
#os.system('ssh '+node+' "nice sss2 '+pathV+sfile+' -omp 64"')
os.system('nice sss2 '+sfile+' -omp 64')
bu=5.0
for bui in range(10,147): #5-70 MWd/kgU
if bui not in [0,21,42,63,84,105,126]:#downtime
os.chdir(path+'serpent_files/IE%d/'%(ie*10))
spentmat = open(sfile+'.bumat'+str(bui)).read()
spentmat=spentmat.replace('MOXp1r1','MOX')
spentmat=spentmat.replace('\n 1001.15c',' burn 1\n 1001.15c')
inputFileCTStr = inputFileCTRefStr
inputFileCTStr = inputFileCTStr.replace('matstr', spentmat)
sfilect='sPWR_MOX_IE_%d_BU_%d'%(ie*10,bu*10)
os.system('mkdir BU%d'%(bu*10))
os.chdir(path+'serpent_files/IE%d/BU%d/'%(ie*10,bu*10))
inputFileRun = open(sfilect,'w')
inputFileRun.write(inputFileCTStr)
inputFileRun.close()
os.system('nice sss2 '+sfilect+' -omp 64')
for cti in range(131):
filepath=path+'serpent_files/IE%d/BU%d/'%(ie*10,bu*10)+sfilect+'.bumat'+str(cti)
csvstr='%d,%.2f,%.2f,%.2f,MOX,PWR,%s\n'%(idfuel,bu,CTs[cti],ie,filepath)
idfuel=idfuel+1
os.chdir(path)
inputFileRun = open(dataFrame,'a')
inputFileRun.write(csvstr)
inputFileRun.close()
bu=bu+0.5
|
py | 1a3008c9e3f698609041dc42ffb29cb03f4606e6 | from supervisor.supervisord import SupervisorStates
from supervisor.xmlrpc import Faults
from supervisor.xmlrpc import RPCError
API_VERSION = '0.2'
class CacheNamespaceRPCInterface:
""" A Supervisor RPC interface that provides the ability
to cache abritrary data in the Supervisor instance as key/value pairs.
"""
def __init__(self, supervisord):
self.supervisord = supervisord
self.cache = {}
def _update(self, text):
self.update_text = text # for unit tests, mainly
state = self.supervisord.get_state()
if state == SupervisorStates.SHUTDOWN:
raise RPCError(Faults.SHUTDOWN_STATE)
# XXX fatal state
# RPC API methods
def getAPIVersion(self):
""" Return the version of the RPC API used by supervisor_cache
@return string version
"""
self._update('getAPIVersion')
return API_VERSION
def getKeys(self):
""" Return keys for all data stored in the cache
@return array An array of strings representing cache keys
"""
self._update('getKeys')
return sorted(self.cache.keys())
def getCount(self):
""" Return a count of all items in the cache
@return integer Count of items
"""
self._update('getCount')
return len(self.cache)
def store(self, key, data):
""" Store a string value in the cache, referenced by 'key'
@param string key A string to use as a cache key
@param string data A string for cache value
@return boolean Always true unless error
"""
self._update('store')
self._validateKey(key)
# XMLRPC can handle non-string values
#if not isinstance(data, str):
# why = 'Cache data must be a string'
# raise RPCError(Faults.INCORRECT_PARAMETERS, why)
self.cache[key] = data
return True
def fetch(self, key):
""" Retrieve data from cache stored under 'key'
@param string key The cache key
@return string Cache data stored at key
"""
self._update('fetch')
self._validateKey(key)
data = self.cache.get(key)
if data is None:
raise RPCError(Faults.BAD_NAME)
return data
def delete(self, key):
""" Delete data stored in cache under 'key'
@param string key The key to delete from the cache
@return boolean Always true unless error.
"""
self._update('delete')
self._validateKey(key)
if key in self.cache:
del self.cache[key]
return True
def clear(self):
""" Clear the cache
@return boolean Always true unless error.
"""
self._update('clear')
self.cache.clear()
return True
def _validateKey(self, key):
""" validate 'key' is suitable for a cache key name """
if not isinstance(key, str) or (key == ''):
why = 'Cache key must be a non-empty string'
raise RPCError(Faults.BAD_NAME, why)
def make_cache_rpcinterface(supervisord, **config):
return CacheNamespaceRPCInterface(supervisord)
|
py | 1a30099984527719ff6921cc308643f970eddd4e | from tensorflow.keras import layers, models, datasets, optimizers
import numpy as np
def neural_network_spatial():
input_ = layers.Input(shape=(32,32,3))
cnn = layers.Conv2D(16, (3,3), activation="relu") (input_)
cnn = layers.SpatialDropout2D(0.2) (cnn)
cnn = layers.MaxPooling2D() (cnn)
cnn = layers.Conv2D(32, (3,3), activation="relu") (cnn)
cnn = layers.SpatialDropout2D(0.5) (cnn)
cnn = layers.MaxPooling2D() (cnn)
flatten = layers.GlobalMaxPooling2D() (cnn)
dense = layers.Dense(32, activation="relu") (flatten)
dense = layers.Dropout(0.5) (dense)
dense = layers.Dense(16, activation="relu") (dense)
output = layers.Dense(10, activation="softmax") (dense)
opt = optimizers.Adam()
m= models.Model(input_, output)
m.compile(optimizer=opt,
loss='categorical_crossentropy',
metrics=['accuracy'])
return m
model = neural_network_spatial() # get model
print(model.summary()) |
py | 1a3009b35f5b8355caff0b51ec67834425032174 | import coloredlogs
import logging
import os
logging.basicConfig(
filename="plex_doctor.log",
level=logging.DEBUG,
format='%(levelname)s: "%(asctime)s - %(message)s',
)
log = logging.getLogger("PLEX-DOCTOR")
log.setLevel(logging.DEBUG)
LOGLEVEL = os.environ.get("LOGLEVEL", "INFO").upper()
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(
logging.Formatter('%(levelname)s: "%(asctime)s - %(message)s')
)
log.addHandler(stream_handler)
coloredlogs.install(LOGLEVEL, logger=log) |
py | 1a300a2a94c36098cc627f24f1e9f76a0c5cef6a | #!/usr/bin/env python
"""VHDL generation of unary operators"""
import common
__author__ = "Jon Dawson"
__copyright__ = "Copyright 2010, Jonathan P Dawson"
__license__ = "MIT"
__version__ = "0.1.3"
__maintainer__ = "Jon Dawson"
__email__ = "[email protected]"
__status__ = "Prototype"
def write(stream):
identifier = stream.get_identifier()
bits = stream.get_bits()
identifier_a = stream.a.get_identifier()
constant = stream.constant
expressions = {
'srn' : "STREAM_{0} <= SR( STREAM_{1}, {2})",
'sln' : "STREAM_{0} <= SL( STREAM_{1}, {2})",
'abs' : "STREAM_{0} <= ABSOLUTE(STREAM_{1})",
'invert' : "STREAM_{0} <= not STREAM_{1}",
'not' : "STREAM_{0} <= LNOT(STREAM_{1})",
}
expression = expressions[stream.function].format(identifier, identifier_a, common.binary(constant, bits))
expression = " {0};".format(expression)
ports = [
]
declarations = [
" signal STATE_{0} : BINARY_STATE_TYPE;".format(identifier),
" signal STREAM_{0} : std_logic_vector({1} downto 0);".format(identifier, bits - 1),
" signal STREAM_{0}_STB : std_logic;".format(identifier),
" signal STREAM_{0}_ACK : std_logic;".format(identifier),
"",
]
definitions = [
" --file: {0}, line: {1}".format(stream.filename, stream.lineno),
" --STREAM {0} Unary({1}, {2}, '{3}')".format(identifier, identifier_a, constant, stream.function),
" process",
" begin",
" wait until rising_edge(CLK);",
" case STATE_{0} is".format(identifier),
" when BINARY_INPUT =>",
" if STREAM_{0}_STB = '1' then".format(identifier_a),
" STREAM_{0}_ACK <= '1';".format(identifier_a),
expression,
" STREAM_{0}_STB <= '1';".format(identifier),
" STATE_{0} <= BINARY_OUTPUT;".format(identifier),
" end if;",
" when BINARY_OUTPUT =>",
" STREAM_{0}_ACK <= '0';".format(identifier_a),
" if STREAM_{0}_ACK = '1' then".format(identifier),
" STREAM_{0}_STB <= '0';".format(identifier),
" STATE_{0} <= BINARY_INPUT;".format(identifier),
" end if;",
" end case;",
" if RST = '1' then",
" STREAM_{0}_STB <= '0';".format(identifier),
" STREAM_{0}_ACK <= '0';".format(identifier_a),
" STATE_{0} <= BINARY_INPUT;".format(identifier),
" end if;",
" end process;",
"",
]
return ports, declarations, definitions
|
py | 1a300c2dc35aa09e1cd804c7e387dc31876d8779 | from arm.logicnode.arm_nodes import *
class OnContactArrayNode(ArmLogicTreeNode):
"""Activates the output when the given rigid body make contact with other given rigid bodies."""
bl_idname = 'LNOnContactArrayNode'
bl_label = 'On Contact Array'
arm_section = 'contact'
arm_version = 1
property0: EnumProperty(
items = [('begin', 'Begin', 'The contact between the rigid bodies begins'),
('overlap', 'Overlap', 'The contact between the rigid bodies is happening'),
('end', 'End', 'The contact between the rigid bodies ends')],
name='', default='begin')
def init(self, context):
super(OnContactArrayNode, self).init(context)
self.add_input('ArmNodeSocketObject', 'RB')
self.add_input('ArmNodeSocketArray', 'RBs')
self.add_output('ArmNodeSocketAction', 'Out')
def draw_buttons(self, context, layout):
layout.prop(self, 'property0')
|
py | 1a300c6d0dabf14e31123f23b2e574db609135a8 | #!/usr/bin/env python
from load import ROOT as R
from gna.unittest import *
from gna.env import env
import gna.constructors as C
import numpy as N
from gna import context
import gna.bindings.arrayview
@floatcopy(globals(), True)
def test_vararray_preallocated_v01(function_name):
ns = env.globalns(function_name)
names = [ 'zero', 'one', 'two', 'three', 'four', 'five' ]
values = N.arange(len(names), dtype=context.current_precision_short())
variables = R.vector('variable<%s>'%context.current_precision())()
with context.allocator(100) as allocator:
for name, value in zip(names, values):
par = ns.defparameter(name, central=value, relsigma=0.1)
variables.push_back(par.getVariable())
with ns:
vsum = C.VarSum(names, 'sum', ns=ns)
vsum_var=ns['sum'].get()
variables.push_back(vsum_var.getVariable())
vprod = C.VarProduct(names, 'product', ns=ns)
vprod_var=ns['product'].get()
variables.push_back(vprod_var.getVariable())
va = C.VarArrayPreallocated(variables)
pool=allocator.view()
res=va.vararray.points.data()
values_all = N.zeros(shape=values.size+2, dtype=values.dtype)
values_all[:-2]=values
values_all[-2]=values_all[:-2].sum()
values_all[-1]=values_all[:-2].prod()
print('Python array:', values_all)
print('VarArray (preallocated):', res)
print('Pool:', pool)
assert (values_all==res).all()
assert (values_all==pool).all()
assert (res==pool).all()
for i, (val, name) in enumerate(enumerate(names, 2)):
ns[name].set(val)
values_all[i]=val
values_all[-2]=values_all[:-2].sum()
values_all[-1]=values_all[:-2].prod()
res=va.vararray.points.data()
print('Iteration', i)
print(' Python array:', values_all)
print(' VarArray (preallocated):', res)
assert (values_all==res).all()
assert (values_all==pool).all()
assert (res==pool).all()
if __name__ == '__main__':
run_unittests(globals())
|
py | 1a300ce0d6ee0b9f711e5b7905619ea8718207cf | import json
import datetime
from pyld import jsonld
from core.testing import DatabaseTest
from core.util.datetime_helpers import utc_now
from .test_controller import ControllerTest
from core.model import (
Annotation,
create,
)
from api.annotations import (
AnnotationWriter,
AnnotationParser,
)
from api.problem_details import *
class AnnotationTest(DatabaseTest):
def _patron(self):
"""Create a test patron who has opted in to annotation sync."""
patron = super(AnnotationTest, self)._patron()
patron.synchronize_annotations = True
return patron
class TestAnnotationWriter(AnnotationTest, ControllerTest):
def test_annotations_for(self):
patron = self._patron()
# The patron doesn't have any annotations yet.
assert [] == AnnotationWriter.annotations_for(patron)
identifier = self._identifier()
annotation, ignore = create(
self._db, Annotation,
patron=patron,
identifier=identifier,
motivation=Annotation.IDLING,
)
# The patron has one annotation.
assert [annotation] == AnnotationWriter.annotations_for(patron)
assert [annotation] == AnnotationWriter.annotations_for(patron, identifier)
identifier2 = self._identifier()
annotation2, ignore = create(
self._db, Annotation,
patron=patron,
identifier=identifier2,
motivation=Annotation.IDLING,
)
# The patron has two annotations for different identifiers.
assert set([annotation, annotation2]) == set(AnnotationWriter.annotations_for(patron))
assert [annotation] == AnnotationWriter.annotations_for(patron, identifier)
assert [annotation2] == AnnotationWriter.annotations_for(patron, identifier2)
def test_annotation_container_for(self):
patron = self._patron()
with self.app.test_request_context("/"):
container, timestamp = AnnotationWriter.annotation_container_for(patron)
assert (set([AnnotationWriter.JSONLD_CONTEXT, AnnotationWriter.LDP_CONTEXT]) ==
set(container['@context']))
assert "annotations" in container["id"]
assert set(["BasicContainer", "AnnotationCollection"]) == set(container["type"])
assert 0 == container["total"]
first_page = container["first"]
assert "AnnotationPage" == first_page["type"]
# The page doesn't have a context, since it's in the container.
assert None == first_page.get('@context')
# The patron doesn't have any annotations yet.
assert 0 == container['total']
# There's no timestamp since the container is empty.
assert None == timestamp
# Now, add an annotation.
identifier = self._identifier()
annotation, ignore = create(
self._db, Annotation,
patron=patron,
identifier=identifier,
motivation=Annotation.IDLING,
)
annotation.timestamp = utc_now()
container, timestamp = AnnotationWriter.annotation_container_for(patron)
# The context, type, and id stay the same.
assert (set([AnnotationWriter.JSONLD_CONTEXT, AnnotationWriter.LDP_CONTEXT]) ==
set(container['@context']))
assert "annotations" in container["id"]
assert identifier.identifier not in container["id"]
assert set(["BasicContainer", "AnnotationCollection"]) == set(container["type"])
# But now there is one item.
assert 1 == container['total']
first_page = container["first"]
assert 1 == len(first_page['items'])
# The item doesn't have a context, since it's in the container.
first_item = first_page['items'][0]
assert None == first_item.get('@context')
# The timestamp is the annotation's timestamp.
assert annotation.timestamp == timestamp
# If the annotation is deleted, the container will be empty again.
annotation.active = False
container, timestamp = AnnotationWriter.annotation_container_for(patron)
assert 0 == container['total']
assert None == timestamp
def test_annotation_container_for_with_identifier(self):
patron = self._patron()
identifier = self._identifier()
with self.app.test_request_context("/"):
container, timestamp = AnnotationWriter.annotation_container_for(patron, identifier)
assert (set([AnnotationWriter.JSONLD_CONTEXT, AnnotationWriter.LDP_CONTEXT]) ==
set(container['@context']))
assert "annotations" in container["id"]
assert identifier.identifier in container["id"]
assert set(["BasicContainer", "AnnotationCollection"]) == set(container["type"])
assert 0 == container["total"]
first_page = container["first"]
assert "AnnotationPage" == first_page["type"]
# The page doesn't have a context, since it's in the container.
assert None == first_page.get('@context')
# The patron doesn't have any annotations yet.
assert 0 == container['total']
# There's no timestamp since the container is empty.
assert None == timestamp
# Now, add an annotation for this identifier, and one for a different identifier.
annotation, ignore = create(
self._db, Annotation,
patron=patron,
identifier=identifier,
motivation=Annotation.IDLING,
)
annotation.timestamp = utc_now()
other_annotation, ignore = create(
self._db, Annotation,
patron=patron,
identifier=self._identifier(),
motivation=Annotation.IDLING,
)
container, timestamp = AnnotationWriter.annotation_container_for(patron, identifier)
# The context, type, and id stay the same.
assert (set([AnnotationWriter.JSONLD_CONTEXT, AnnotationWriter.LDP_CONTEXT]) ==
set(container['@context']))
assert "annotations" in container["id"]
assert identifier.identifier in container["id"]
assert set(["BasicContainer", "AnnotationCollection"]) == set(container["type"])
# But now there is one item.
assert 1 == container['total']
first_page = container["first"]
assert 1 == len(first_page['items'])
# The item doesn't have a context, since it's in the container.
first_item = first_page['items'][0]
assert None == first_item.get('@context')
# The timestamp is the annotation's timestamp.
assert annotation.timestamp == timestamp
# If the annotation is deleted, the container will be empty again.
annotation.active = False
container, timestamp = AnnotationWriter.annotation_container_for(patron, identifier)
assert 0 == container['total']
assert None == timestamp
def test_annotation_page_for(self):
patron = self._patron()
with self.app.test_request_context("/"):
page = AnnotationWriter.annotation_page_for(patron)
# The patron doesn't have any annotations, so the page is empty.
assert AnnotationWriter.JSONLD_CONTEXT == page['@context']
assert 'annotations' in page['id']
assert 'AnnotationPage' == page['type']
assert 0 == len(page['items'])
# If we add an annotation, the page will have an item.
identifier = self._identifier()
annotation, ignore = create(
self._db, Annotation,
patron=patron,
identifier=identifier,
motivation=Annotation.IDLING,
)
page = AnnotationWriter.annotation_page_for(patron)
assert 1 == len(page['items'])
# But if the annotation is deleted, the page will be empty again.
annotation.active = False
page = AnnotationWriter.annotation_page_for(patron)
assert 0 == len(page['items'])
def test_annotation_page_for_with_identifier(self):
patron = self._patron()
identifier = self._identifier()
with self.app.test_request_context("/"):
page = AnnotationWriter.annotation_page_for(patron, identifier)
# The patron doesn't have any annotations, so the page is empty.
assert AnnotationWriter.JSONLD_CONTEXT == page['@context']
assert 'annotations' in page['id']
assert identifier.identifier in page['id']
assert 'AnnotationPage' == page['type']
assert 0 == len(page['items'])
# If we add an annotation, the page will have an item.
annotation, ignore = create(
self._db, Annotation,
patron=patron,
identifier=identifier,
motivation=Annotation.IDLING,
)
page = AnnotationWriter.annotation_page_for(patron, identifier)
assert 1 == len(page['items'])
# If a different identifier has an annotation, the page will still have one item.
other_annotation, ignore = create(
self._db, Annotation,
patron=patron,
identifier=self._identifier(),
motivation=Annotation.IDLING,
)
page = AnnotationWriter.annotation_page_for(patron, identifier)
assert 1 == len(page['items'])
# But if the annotation is deleted, the page will be empty again.
annotation.active = False
page = AnnotationWriter.annotation_page_for(patron, identifier)
assert 0 == len(page['items'])
def test_detail_target(self):
patron = self._patron()
identifier = self._identifier()
target = {
"http://www.w3.org/ns/oa#hasSource": {
"@id": identifier.urn
},
"http://www.w3.org/ns/oa#hasSelector": {
"@type": "http://www.w3.org/ns/oa#FragmentSelector",
"http://www.w3.org/1999/02/22-rdf-syntax-ns#value": "epubcfi(/6/4[chap01ref]!/4[body01]/10[para05]/3:10)"
}
}
annotation, ignore = create(
self._db, Annotation,
patron=patron,
identifier=identifier,
motivation=Annotation.IDLING,
target=json.dumps(target),
)
with self.app.test_request_context("/"):
detail = AnnotationWriter.detail(annotation)
assert "annotations/%i" % annotation.id in detail["id"]
assert "Annotation" == detail['type']
assert Annotation.IDLING == detail['motivation']
compacted_target = {
"source": identifier.urn,
"selector": {
"type": "FragmentSelector",
"value": "epubcfi(/6/4[chap01ref]!/4[body01]/10[para05]/3:10)"
}
}
assert compacted_target == detail["target"]
def test_detail_body(self):
patron = self._patron()
identifier = self._identifier()
body = {
"@type": "http://www.w3.org/ns/oa#TextualBody",
"http://www.w3.org/ns/oa#bodyValue": "A good description of the topic that bears further investigation",
"http://www.w3.org/ns/oa#hasPurpose": {
"@id": "http://www.w3.org/ns/oa#describing"
}
}
annotation, ignore = create(
self._db, Annotation,
patron=patron,
identifier=identifier,
motivation=Annotation.IDLING,
content=json.dumps(body),
)
with self.app.test_request_context("/"):
detail = AnnotationWriter.detail(annotation)
assert "annotations/%i" % annotation.id in detail["id"]
assert "Annotation" == detail['type']
assert Annotation.IDLING == detail['motivation']
compacted_body = {
"type": "TextualBody",
"bodyValue": "A good description of the topic that bears further investigation",
"purpose": "describing"
}
assert compacted_body == detail["body"]
class TestAnnotationParser(AnnotationTest):
def setup_method(self):
super(TestAnnotationParser, self).setup_method()
self.pool = self._licensepool(None)
self.identifier = self.pool.identifier
self.patron = self._patron()
def _sample_jsonld(self, motivation=Annotation.IDLING):
data = dict()
data["@context"] = [AnnotationWriter.JSONLD_CONTEXT,
{'ls': Annotation.LS_NAMESPACE}]
data["type"] = "Annotation"
motivation = motivation.replace(Annotation.LS_NAMESPACE, 'ls:')
motivation = motivation.replace(Annotation.OA_NAMESPACE, 'oa:')
data["motivation"] = motivation
data["body"] = {
"type": "TextualBody",
"bodyValue": "A good description of the topic that bears further investigation",
"purpose": "describing"
}
data["target"] = {
"source": self.identifier.urn,
"selector": {
"type": "oa:FragmentSelector",
"value": "epubcfi(/6/4[chap01ref]!/4[body01]/10[para05]/3:10)"
}
}
return data
def test_parse_invalid_json(self):
annotation = AnnotationParser.parse(self._db, "not json", self.patron)
assert INVALID_ANNOTATION_FORMAT == annotation
def test_invalid_identifier(self):
# If the target source can't be parsed as a URN we send
# INVALID_ANNOTATION_TARGET
data = self._sample_jsonld()
data['target']['source'] = 'not a URN'
annotation = AnnotationParser.parse(
self._db, json.dumps(data), self.patron
)
assert INVALID_ANNOTATION_TARGET == annotation
def test_null_id(self):
# A JSON-LD document can have its @id set to null -- it's the
# same as if the @id wasn't present -- but the jsonld library
# can't handle this, so we need to test it specially.
self.pool.loan_to(self.patron)
data = self._sample_jsonld()
data['id'] = None
annotation = AnnotationParser.parse(
self._db, json.dumps(data), self.patron
)
assert isinstance(annotation, Annotation)
def test_parse_expanded_jsonld(self):
self.pool.loan_to(self.patron)
data = dict()
data['@type'] = ["http://www.w3.org/ns/oa#Annotation"]
data["http://www.w3.org/ns/oa#motivatedBy"] = [{
"@id": Annotation.IDLING
}]
data["http://www.w3.org/ns/oa#hasBody"] = [{
"@type" : ["http://www.w3.org/ns/oa#TextualBody"],
"http://www.w3.org/ns/oa#bodyValue": [{
"@value": "A good description of the topic that bears further investigation"
}],
"http://www.w3.org/ns/oa#hasPurpose": [{
"@id": "http://www.w3.org/ns/oa#describing"
}]
}]
data["http://www.w3.org/ns/oa#hasTarget"] = [{
"http://www.w3.org/ns/oa#hasSelector": [{
"@type": ["http://www.w3.org/ns/oa#FragmentSelector"],
"http://www.w3.org/1999/02/22-rdf-syntax-ns#value": [{
"@value": "epubcfi(/6/4[chap01ref]!/4[body01]/10[para05]/3:10)"
}]
}],
"http://www.w3.org/ns/oa#hasSource": [{
"@id": self.identifier.urn
}],
}]
data_json = json.dumps(data)
annotation = AnnotationParser.parse(self._db, data_json, self.patron)
assert self.patron.id == annotation.patron_id
assert self.identifier.id == annotation.identifier_id
assert Annotation.IDLING == annotation.motivation
assert True == annotation.active
assert json.dumps(data["http://www.w3.org/ns/oa#hasTarget"][0]) == annotation.target
assert json.dumps(data["http://www.w3.org/ns/oa#hasBody"][0]) == annotation.content
def test_parse_compacted_jsonld(self):
self.pool.loan_to(self.patron)
data = dict()
data["@type"] = "http://www.w3.org/ns/oa#Annotation"
data["http://www.w3.org/ns/oa#motivatedBy"] = {
"@id": Annotation.IDLING
}
data["http://www.w3.org/ns/oa#hasBody"] = {
"@type": "http://www.w3.org/ns/oa#TextualBody",
"http://www.w3.org/ns/oa#bodyValue": "A good description of the topic that bears further investigation",
"http://www.w3.org/ns/oa#hasPurpose": {
"@id": "http://www.w3.org/ns/oa#describing"
}
}
data["http://www.w3.org/ns/oa#hasTarget"] = {
"http://www.w3.org/ns/oa#hasSource": {
"@id": self.identifier.urn
},
"http://www.w3.org/ns/oa#hasSelector": {
"@type": "http://www.w3.org/ns/oa#FragmentSelector",
"http://www.w3.org/1999/02/22-rdf-syntax-ns#value": "epubcfi(/6/4[chap01ref]!/4[body01]/10[para05]/3:10)"
}
}
data_json = json.dumps(data)
expanded = jsonld.expand(data)[0]
annotation = AnnotationParser.parse(self._db, data_json, self.patron)
assert self.patron.id == annotation.patron_id
assert self.identifier.id == annotation.identifier_id
assert Annotation.IDLING == annotation.motivation
assert True == annotation.active
assert json.dumps(expanded["http://www.w3.org/ns/oa#hasTarget"][0]) == annotation.target
assert json.dumps(expanded["http://www.w3.org/ns/oa#hasBody"][0]) == annotation.content
def test_parse_jsonld_with_context(self):
self.pool.loan_to(self.patron)
data = self._sample_jsonld()
data_json = json.dumps(data)
expanded = jsonld.expand(data)[0]
annotation = AnnotationParser.parse(self._db, data_json, self.patron)
assert self.patron.id == annotation.patron_id
assert self.identifier.id == annotation.identifier_id
assert Annotation.IDLING == annotation.motivation
assert True == annotation.active
assert json.dumps(expanded["http://www.w3.org/ns/oa#hasTarget"][0]) == annotation.target
assert json.dumps(expanded["http://www.w3.org/ns/oa#hasBody"][0]) == annotation.content
def test_parse_jsonld_with_bookmarking_motivation(self):
"""You can create multiple bookmarks in a single book."""
self.pool.loan_to(self.patron)
data = self._sample_jsonld(motivation=Annotation.BOOKMARKING)
data_json = json.dumps(data)
annotation = AnnotationParser.parse(self._db, data_json, self.patron)
assert Annotation.BOOKMARKING == annotation.motivation
# You can't create another bookmark at the exact same location --
# you just get the same annotation again.
annotation2 = AnnotationParser.parse(self._db, data_json, self.patron)
assert annotation == annotation2
# But unlike with IDLING, you _can_ create multiple bookmarks
# for the same identifier, so long as the selector value
# (ie. the location within the book) is different.
data['target']['selector']['value'] = 'epubcfi(/3/4[chap01ref]!/4[body01]/15[para05]/3:10)'
data_json = json.dumps(data)
annotation3 = AnnotationParser.parse(self._db, data_json, self.patron)
assert annotation3 != annotation
assert 2 == len(self.patron.annotations)
def test_parse_jsonld_with_invalid_motivation(self):
self.pool.loan_to(self.patron)
data = self._sample_jsonld()
data["motivation"] = "not-a-valid-motivation"
data_json = json.dumps(data)
annotation = AnnotationParser.parse(self._db, data_json, self.patron)
assert INVALID_ANNOTATION_MOTIVATION == annotation
def test_parse_jsonld_with_no_loan(self):
data = self._sample_jsonld()
data_json = json.dumps(data)
annotation = AnnotationParser.parse(self._db, data_json, self.patron)
assert INVALID_ANNOTATION_TARGET == annotation
def test_parse_jsonld_with_no_target(self):
data = self._sample_jsonld()
del data['target']
data_json = json.dumps(data)
annotation = AnnotationParser.parse(self._db, data_json, self.patron)
assert INVALID_ANNOTATION_TARGET == annotation
def test_parse_updates_existing_annotation(self):
self.pool.loan_to(self.patron)
original_annotation, ignore = create(
self._db, Annotation,
patron_id=self.patron.id,
identifier_id=self.identifier.id,
motivation=Annotation.IDLING,
)
original_annotation.active = False
yesterday = utc_now() - datetime.timedelta(days=1)
original_annotation.timestamp = yesterday
data = self._sample_jsonld()
data = json.dumps(data)
annotation = AnnotationParser.parse(self._db, data, self.patron)
assert original_annotation == annotation
assert True == annotation.active
assert annotation.timestamp > yesterday
def test_parse_treats_duplicates_as_interchangeable(self):
self.pool.loan_to(self.patron)
# Due to an earlier race condition, two duplicate annotations
# were put in the database.
a1, ignore = create(
self._db, Annotation,
patron_id=self.patron.id,
identifier_id=self.identifier.id,
motivation=Annotation.IDLING,
)
a2, ignore = create(
self._db, Annotation,
patron_id=self.patron.id,
identifier_id=self.identifier.id,
motivation=Annotation.IDLING,
)
assert a1 != a2
# Parsing the annotation again retrieves one or the other
# of the annotations rather than crashing or creating a third
# annotation.
data = self._sample_jsonld()
data = json.dumps(data)
annotation = AnnotationParser.parse(self._db, data, self.patron)
assert annotation in (a1, a2)
def test_parse_jsonld_with_patron_opt_out(self):
self.pool.loan_to(self.patron)
data = self._sample_jsonld()
data_json = json.dumps(data)
self.patron.synchronize_annotations=False
annotation = AnnotationParser.parse(
self._db, data_json, self.patron
)
assert PATRON_NOT_OPTED_IN_TO_ANNOTATION_SYNC == annotation
|
py | 1a300e2fcaaccd53fb1f1b3fc7df607e9426948c | from django.utils import translation
class TranslatedField(object):
def __init__(self, en_field, es_field):
self.en_field = en_field
self.es_field = es_field
def __get__(self, instance, owner):
if translation.get_language() == 'es':
return getattr(instance, self.es_field)
else:
return getattr(instance, self.en_field)
|
py | 1a300e54796c2cac920e94e6eb43542d2c9b4bec | ##############################################################################
#
# Copyright (c) 2019 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""
TPC protocol state management.
The various states in which a storage instance can find itself during
two-phase commit are complicated. This package presents a set of
objects that encapsulate various possibilities. In this way we can
test independent states...independently, and the state transitions are
explicit.
"""
from __future__ import absolute_import
from __future__ import print_function
import logging
from transaction.interfaces import NoTransaction
from transaction._transaction import rm_key
from transaction import get as get_thread_local_transaction
from perfmetrics import statsd_client
from zope.interface import implementer
from ZODB.POSException import ReadOnlyError
from ZODB.POSException import StorageTransactionError
from ..interfaces import ITPCStateNotInTransaction
from ..interfaces import ITPCStateDatabaseAvailable
from ...adapters.connections import ClosedConnection
from ..._util import Lazy as BaseLazy
from ..._util import get_boolean_from_environ
from .temporary_storage import TemporaryStorage
logger = logging.getLogger(__name__)
_CLOSED_CONNECTION = ClosedConnection()
#: Set the ``RELSTORAGE_LOCK_EARLY`` environment variable if you
#: experience deadlocks or failures to commit (``tpc_finish``). This
#: will cause the commit lock to be taken as part of ``tpc_vote``
#: (similar to RelStorage 2.x) instead of deferring it until
#: ``tpc_finish``.
#:
#: If this is necessary, this is probably a bug in RelStorage; please report
#: it.
LOCK_EARLY = get_boolean_from_environ(
'RELSTORAGE_LOCK_EARLY',
False,
logger=logger,
)
class _LazyResource(BaseLazy):
# If not None, a callable ``(storage, resource, force)``
# that aborts the *resource*, possibly forcefully (*force*).
# The return value will be the new value in the object
# instance.
abort_function = None
# If not None, a callable ``(storage, resource)`` to clean up
# any use of the *resource* after success.
release_function = None
def _stored_value_for_name_in_inst(self, value, name, inst):
# type: (Any, str, SharedTPCState) -> None
if name == 'store_connection':
# Try to do this first
inst._used_resources.insert(0, self)
else:
inst._used_resources.append(self)
def aborter(self, func):
assert not isinstance(func, _LazyResource)
self.abort_function = func
return self
def releaser(self, func):
assert not isinstance(func, _LazyResource)
self.release_function = func
return self
def cleaner(self, func):
self.abort_function = self.release_function = func
return self
class SharedTPCState(object):
"""
Contains attributes marking resources that *might* be used during the commit
process. If any of them are, then the `abort` method takes care of cleaning them up.
Accessing a resource implicitly begins it, if needed.
"""
# pylint:disable=method-hidden
prepared_txn = None
transaction = None
not_in_transaction_state = None
read_only = False # Or we wouldn't allocate this object.
def __init__(self, initial_state, storage, transaction):
self.initial_state = initial_state
self._storage = storage
self.transaction = transaction
self._used_resources = []
@_LazyResource
def local_client(self):
return self._storage._cache.local_client
@_LazyResource
def store_connection(self):
conn = self._storage._store_connection_pool.borrow()
# Report on the connection we will use.
# https://github.com/zodb/relstorage/issues/460
logger.info("Using store connection %s", conn)
return conn
@store_connection.aborter
def store_connection(self, storage, store_connection, force):
try:
adapter = storage._adapter
if store_connection:
# It's possible that this connection/cursor was
# already closed if an error happened (which would
# release the locks). Don't try to re-open it.
adapter.locker.release_commit_lock(store_connection.cursor)
# Though, this might re-open it.
adapter.txncontrol.abort(
store_connection,
self.prepared_txn)
if force:
store_connection.drop()
finally:
storage._store_connection_pool.replace(store_connection)
return _CLOSED_CONNECTION
@store_connection.releaser
def store_connection(self, storage, store_connection):
storage._store_connection_pool.replace(store_connection)
return _CLOSED_CONNECTION
@_LazyResource
def load_connection(self):
return self._storage._load_connection
@load_connection.aborter
def load_connection(self, _storage, load_connection, force):
if force:
load_connection.drop()
else:
load_connection.rollback_quietly()
load_connection.exit_critical_phase()
return _CLOSED_CONNECTION
@load_connection.releaser
def load_connection(self, _storage, load_connection):
load_connection.rollback_quietly()
load_connection.exit_critical_phase()
return _CLOSED_CONNECTION
@_LazyResource
def blobhelper(self):
blobhelper = self._storage.blobhelper
blobhelper.begin()
return blobhelper
@blobhelper.aborter
def blobhelper(self, _storage, blobhelper, _force):
blobhelper.abort()
@blobhelper.releaser
def blobhelper(self, _storage, blobhelper):
blobhelper.clear_temp()
def has_blobs(self):
# pylint:disable=no-member
return (
'blobhelper' in self.__dict__
and self.blobhelper is not None
and self.blobhelper.txn_has_blobs
)
@BaseLazy
def cache(self):
return self._storage._cache
@BaseLazy
def adapter(self):
return self._storage._adapter
@_LazyResource
def temp_storage(self):
return TemporaryStorage()
@temp_storage.cleaner
def temp_storage(self, _storage, temp_storage, _force=None):
temp_storage.close()
def has_temp_data(self):
return 'temp_storage' in self.__dict__ and self.temp_storage
@_LazyResource
def _statsd_buf(self):
return []
@_statsd_buf.cleaner
def _statds_buf(self, _storage, buf, _force=None):
client = statsd_client()
if client is not None and buf:
client.sendbuf(buf)
def stat_timing(self, stat, value, rate=1):
"""
Record a timing value.
For compatibility with the default settings of ``perfmetrics``,
the stat name should end in ``.t``
The *value* should be a floating point difference of seconds
(eg, ``time.time() - time.time()``). This will be converted to an integer
number of milliseconds (again for consistency with ``perfmetrics``).
"""
client = statsd_client()
if client is not None:
# scale from float seconds to milliseconds
value = int(value * 1000.0)
client.timing(stat, value, rate, self._statsd_buf)
def stat_count(self, stat, value, rate=1):
client = statsd_client()
if client is not None:
client.incr(stat, value, rate, self._statsd_buf)
def __cleanup(self, method_name, method_args):
storage = self._storage
resources = self._used_resources
self._used_resources = () # No more opening resources.
exceptions = []
for resource in resources:
assert resource.__name__ in vars(self)
cleaner = getattr(resource, method_name)
if not cleaner:
setattr(self, resource.__name__, None)
continue
value = getattr(self, resource.__name__)
new_value = None
try:
new_value = cleaner(self, storage, value, *method_args)
except Exception as ex: # pylint:disable=broad-except
exceptions.append(ex)
setattr(self, resource.__name__, new_value)
if exceptions: # pragma: no cover
# This usually indicates a bug in RelStorage that should be fixed.
raise Exception("Failed to close one or more resources: %s" % (exceptions,))
def abort(self, force=False):
self.__cleanup('abort_function', (force,))
def release(self):
self.__cleanup('release_function', ())
@implementer(ITPCStateDatabaseAvailable)
class AbstractTPCStateDatabaseAvailable(object):
__slots__ = (
'shared_state',
)
# - store
# - restore/restoreBlob
# - deleteObject
# - undo
# should raise ReadOnlyError if the storage is read only.
# - tpc_vote should raise StorageTransactionError
# Because entering tpc_begin wasn't allowed if the storage was
# read only, this needs to happen in the "not in transaction"
# state.
def __init__(self, shared_state):
self.shared_state = shared_state # type: SharedTPCState
@property
def transaction(self):
return self.shared_state.transaction
@property
def initial_state(self):
return self.shared_state.initial_state
@property
def store_connection(self):
return self.shared_state.store_connection
def __repr__(self):
result = "<%s at 0x%x stored_count=%s %s" % (
type(self).__name__,
id(self),
len(getattr(self, 'temp_storage', ()) or ()),
self._tpc_state_transaction_data(),
)
extra = self._tpc_state_extra_repr_info()
for k, v in extra.items():
result += ' %s=%r' % (k, v)
result += '>'
return result
def _tpc_state_extra_repr_info(self):
return {}
def _tpc_state_transaction_data(self):
# Grovels around in the transaction object and tries to find interesting
# things to include.
# The ZODB Connection passes us an internal TransactionMetaData
# object; the real transaction object stores a reference to that in its data,
# keyed off the connection.
# We may or may not be able to get the real transaction using transaction.get(),
# depending on if we are using the global (thread local) transaction manager or not.
try:
global_tx = get_thread_local_transaction()
except NoTransaction:
# It's in explicit mode and we're not using it.
return "<no global transaction> tx=%r" % (self.transaction,)
tx_data = getattr(global_tx, '_data', None)
if not tx_data:
# No data stored on the transaction (or the implementation changed!)
return "<no transaction data> tx=%r" % (self.transaction,)
for v in tx_data.values():
if v is self.transaction:
# Yes, we found the metadata that ZODB uses, so we are
# joined to this transaction.
break
else:
return "<no transaction meta %r> tx=%r" % (tx_data, self.transaction,)
resources = sorted(global_tx._resources, key=rm_key)
return "transaction=%r resources=%r" % (global_tx, resources)
def tpc_finish(self, storage, transaction, f=None, _time=None): # pylint:disable=unused-argument
# For the sake of some ZODB tests, we need to implement this everywhere,
# even if it's not actually usable, and the first thing it needs to
# do is check the transaction.
if transaction is not self.transaction:
raise StorageTransactionError('tpc_finish called with wrong transaction')
raise NotImplementedError("tpc_finish not allowed in this state.")
def tpc_begin(self, _storage, transaction):
# Ditto as for tpc_finish
raise StorageTransactionError('tpc_begin not allowed in this state', type(self))
def tpc_abort(self, transaction, force=False):
if not force:
if transaction is not self.transaction:
return self
self.shared_state.abort(force)
return self.initial_state
def no_longer_stale(self):
return self
def stale(self, e):
return Stale(self, e)
def close(self):
if self.shared_state is not None:
self.tpc_abort(None, True)
self.shared_state = None
@implementer(ITPCStateNotInTransaction)
class NotInTransaction(object):
# The default state, when the storage is not attached to a
# transaction.
__slots__ = (
'last_committed_tid_int',
'read_only',
'begin_factory',
)
transaction = None
def __init__(self, begin_factory, read_only, committed_tid_int=0):
self.begin_factory = begin_factory
self.read_only = read_only
self.last_committed_tid_int = committed_tid_int
def with_committed_tid_int(self, committed_tid_int):
return NotInTransaction(
self.begin_factory,
self.read_only,
committed_tid_int
)
def tpc_abort(self, *args, **kwargs): # pylint:disable=arguments-differ,unused-argument,signature-differs
# Nothing to do
return self
def _no_transaction(self, *args, **kwargs):
raise StorageTransactionError("No transaction in progress")
tpc_finish = tpc_vote = _no_transaction
checkCurrentSerialInTransaction = _no_transaction
def store(self, *_args, **_kwargs):
if self.read_only:
raise ReadOnlyError()
self._no_transaction()
restore = deleteObject = undo = restoreBlob = store
def tpc_begin(self, storage, transaction): # XXX: Signature needs to change.
if self.read_only:
raise ReadOnlyError()
if transaction is self.transaction: # Also handles None.
raise StorageTransactionError("Duplicate tpc_begin calls for same transaction.")
state = SharedTPCState(self, storage, transaction)
try:
return self.begin_factory(state)
except:
state.abort()
raise
@property
def initial_state(self):
return self
# This object appears to be false.
def __bool__(self):
return False
__nonzero__ = __bool__
def close(self):
pass
@implementer(ITPCStateNotInTransaction)
class Stale(object):
"""
An error that lets us know we are stale
was encountered.
Just about all accesses to this object result in
re-raising that error.
"""
transaction = None
last_committed_tid_int = 0
def __init__(self, previous_state, stale_error):
self.previous_state = previous_state
self.stale_error = stale_error
def _stale(self, *args, **kwargs):
raise self.stale_error
store = restore = checkCurrentSerialInTransaction = _stale
undo = deleteObject = restoreBlob = _stale
tpc_begin = tpc_finish = tpc_vote = _stale
def tpc_abort(self, *args, **kwargs):
return self.previous_state.tpc_abort(*args, **kwargs)
@property
def initial_state(self):
return self.previous_state.initial_state
def no_longer_stale(self):
return self.previous_state
def stale(self, _e):
return self
def __bool__(self):
return False
__nonzero__ = __bool__
|
py | 1a300feeb1489d80c5e1aa38555ac20a50c24d8e | from .json_data_provider import *
|
py | 1a3011922de5be60416538f7f831f8861157210d |
IGNORED = None
ACTION_PENDING = 1
# Bigger than necessary
_MAX_VK_KEY = 0x200
_VK_KEY_MASK = 0x1ff
_CURRENT_KEY_STATE = [False] * _MAX_VK_KEY
_MODIFIERS = set()
def on_key_hook(vk_code, is_down, special_modifier_state = None):
"""
Module-wide storage for the current key state.
:param vk_code:
:param is_down:
:param special_modifier_state: map of vcodes to the up/down state
(True == is_down, False == !is_down). This is part of the
windows key state / locked desktop work-around.
:return: True if it's a recognized key, False if it isn't known.
"""
if special_modifier_state is not None:
for k, v in special_modifier_state.items():
if k != vk_code and k in _MODIFIER_KEYS:
if _CURRENT_KEY_STATE[k] != v:
print("DEBUG modifier {0} does not match inner state.".format(k))
if k in _MODIFIER_KEYS:
if is_down:
_MODIFIERS.add(k)
else:
_MODIFIERS.remove(k)
_CURRENT_KEY_STATE[k] = v
if 0 <= vk_code <= _MAX_VK_KEY:
_CURRENT_KEY_STATE[vk_code] = is_down
if vk_code in _MODIFIER_KEYS:
if is_down:
_MODIFIERS.add(vk_code)
else:
_MODIFIERS.remove(vk_code)
return True
return False
class KeyOverride(object):
"""
Captures all key presses. Certain keys map to actions.
All keys are simple (straight up keys; modifiers are considered keys).
One key per action.
"""
def __init__(self, key_commands=None):
self.__keys = {}
if key_commands is not None:
self.set_key_actions(key_commands)
def set_key_actions(self, actions):
assert isinstance(actions, dict)
# FIXME use a dict instead
# TODO in the future we may allow "shift+left" type keys here.
# The implementation in key_action would just check the _MODIFIERS
# state.
new_key_actions = {}
for key, action in actions.items():
assert isinstance(action, list) or isinstance(action, tuple)
action = tuple(action)
key = key.strip().lower()
if key in VK_ALIASES:
for k in VK_ALIASES[key]:
if k in MODIFIERS:
# TODO better error / warning
# Note use of user's value "key", rather than internal "k"
print("CONFIG ERROR: Simple keys are not allowed to be modifiers: {0}".format(key))
elif k in STR_VK_MAP:
# print("DEBUG KeyOverride: assigning {0} = `{1}`".format(hex(STR_VK_MAP[k]), action))
new_key_actions[STR_VK_MAP[k]] = action
else:
# TODO better error / warning
print("ERROR IN SETUP: alias {0} not in vk map".format(k))
elif key in MODIFIERS:
# TODO better error / warning
print("CONFIG ERROR: Simple keys are not allowed to be modifiers: {0}".format(key))
elif key in STR_VK_MAP:
new_key_actions[STR_VK_MAP[key]] = action
else:
# TODO better error / warning
print("CONFIG ERROR: Simple key not a known key: {0}".format(key))
self.__keys = new_key_actions
def reset(self):
pass
def key_action(self, vk_code, is_down):
if vk_code in _MODIFIER_KEYS:
# Ignore all modifier keys, so the "release" from a mode switch works right.
# This ties in with modifiers not allowed as simple keys.
return IGNORED
if not is_down and vk_code in self.__keys:
return self.__keys[vk_code]
# Prevent all other keys from working
return ACTION_PENDING
class HotKeyChain(object):
"""
Takes a keypress, and manages the state of the keys.
It stores a list of key chains to action pairs.
There should be one of these per system "mode".
"""
def __init__(self, chain_commands=None):
self.__combos = []
# The modifiers which are down and associated with the active combos
self.__active_modifiers = []
# The previous key in the combo chain; we're waiting for it to be off.
self.__active_key = None
# The active combo chains. Index 0 in each item is the remaining list
# of key down actions to look for ([0] meaning the next one). Index 1
# in each item is the command to return.
self.__active_combos = []
# Set to True to prevent the OS shell from using the "windows" key.
self.block_win_key = False
if chain_commands is not None:
self.set_key_chains(chain_commands)
def set_key_chains(self, chain_commands):
assert isinstance(chain_commands, dict)
combos = []
for key_chain, command in chain_commands.items():
assert isinstance(command, list) or isinstance(command, tuple)
keys = parse_combo_str(key_chain)
if len(keys) > 0:
# We store modifiers a little differently.
# Rather than having a list of lists, which must be
# carefully examined, we instead construct the
# permutations of the keys, and store each of those as
# their own combo.
permutation_keys = []
_key_permutations(keys[0], 0, [], permutation_keys)
for perm in permutation_keys:
# print("DEBUG Combo {0} + {1} => {2}".format(perm, keys[1:], command))
combos.append((perm, keys[1:], tuple(command)))
# Change the variable in a single command.
self.__combos = combos
self.reset()
def reset(self):
self.__active_combos = []
self.__active_modifiers = []
self.__active_key = None
def key_action(self, vk_code, is_down):
"""
:param is_down:
:param vk_code:
:return: IGNORED if the key should be passed through,
ACTION_PENDING if the key should be blocked from passing to
another application, but does not complete an action, or
a list of the action to run.
"""
if _MODIFIERS == self.__active_modifiers:
if self.__active_key is None or not _CURRENT_KEY_STATE[self.__active_key]:
# The previous key is no longer down.
self.__active_key = None
next_combos = []
for ac in self.__active_combos:
if vk_code in ac[0][0]:
ac[0].pop(0)
if len(ac[0]) <= 0:
# We have our key
command = ac[1]
self.reset()
# print("DEBUG keys generated command {0}".format(command))
return command
next_combos.append(ac)
if len(next_combos) > 0:
self.__active_key = vk_code
self.__active_combos = next_combos
return ACTION_PENDING
elif is_down:
# A new key was pressed, which isn't a key in a pending
# combo. Reset our hot keys, and return an ignored.
self.reset()
# else, the previous active key is still down; wait for it
# to come up.
else:
# Discover which combo matches the modifiers.
self.reset()
new_active = []
for combo in self.__combos:
if combo[0] == _MODIFIERS:
new_active.append((list(combo[1]), combo[2]))
if len(new_active) > 0:
self.__active_key = None
self.__active_combos = new_active
self.__active_modifiers = set(_MODIFIERS)
# We still pass on the modifiers to the OS, just in case it's not
# a match.
if self.block_win_key and vk_code in _WIN_KEYS:
return ACTION_PENDING
return IGNORED
def parse_combo_str(chain_description):
"""
Special compact form of the string. For each key combo part,
we make a "string" of the only VK codes that must be "down" in
order to trigger the next part of the chain.
The format is "primary + primary + ... + key, key, key, ..."
:param chain_description:
:return: list of aliased combo lists. So, the return will be
[primary, key1, key2, ...], where "primary" are the primary
keys that must be pressed through the whole action. Key1 and
key2 (and so on) are the keys that must be pressed and released
in order (the last key will respond on key down). Each key
in the list is itself a list of alternate keys.
"""
assert isinstance(chain_description, str)
key_parts = chain_description.split(",")
# Parse the primary first. These are separated by "+".
# The last key in the list is the "non-always-down" key,
# meaning it's the first in the key chain.
primary_list = []
primary_keys = key_parts[0].split("+")
secondary_keys = [primary_keys[-1]]
secondary_keys.extend(key_parts[1:])
for key_text in primary_keys[:-1]:
primary_key = []
key_text = key_text.strip().lower()
if key_text in VK_ALIASES:
for k in VK_ALIASES[key_text]:
if k in STR_VK_MAP:
if k in MODIFIERS:
primary_key.append(STR_VK_MAP[k])
else:
# TODO better error / warning
print("CONFIG ERROR: Primary key not a modifier {0}".format(k))
else:
print("ERROR IN SETUP: alias {0} not in vk map".format(k))
elif key_text in STR_VK_MAP:
if key_text in MODIFIERS:
primary_key.append(STR_VK_MAP[key_text])
else:
# TODO better error / warning
print("CONFIG ERROR: Primary key not a modifier {0}".format(key_text))
else:
# TODO better error / warning
print("CONFIG ERROR: unknown key code [{0}]".format(key_text))
if len(primary_key) > 0:
primary_list.append(primary_key)
chain = [primary_list]
for key_text in secondary_keys:
key = []
key_text = key_text.strip().lower()
if key_text in VK_ALIASES:
for k in VK_ALIASES[key_text]:
if k in STR_VK_MAP:
if k in MODIFIERS:
# TODO better error / warning
print("CONFIG ERROR: secondary key is a modifier {0}".format(k))
else:
key.append(STR_VK_MAP[k])
else:
print("ERROR IN SETUP: alias {0} not in vk map".format(k))
elif key_text in STR_VK_MAP:
if key_text in MODIFIERS:
# TODO better error / warning
print("CONFIG ERROR: secondary key is a modifier {0}".format(key_text))
else:
key.append(STR_VK_MAP[key_text])
else:
# TODO better error / warning
print("CONFIG ERROR: unknown key code {0}".format(key_text))
if len(key) > 0:
chain.append(key)
return chain
def _key_permutations(key_alt_list, alt_index, current_list, final_list):
"""
Takes a list of key alternates ([ [k1a, k1b, ...], [k2a, k2b, ...], ...])
and transforms this into the
:param key_alt_list:
:return:
"""
for key in key_alt_list[alt_index]:
next_list = list(current_list)
next_list.append(key)
if alt_index + 1 >= len(key_alt_list):
final_list.append(set(next_list))
else:
_key_permutations(key_alt_list, alt_index + 1, next_list, final_list)
def vk_to_names(vk):
maps = []
for vk_str, code in STR_VK_MAP.items():
# There are multiple mappings; return them all.
if code == vk:
maps.append(vk_str)
if len(maps) <= 0:
maps.append("#{0}".format(hex(vk)))
return maps
def is_vk_modifier(vk):
return vk in _MODIFIER_KEYS
# Built-in alias VK keys for user-specified keys
VK_ALIASES = {
"win": ["lwin", "rwin"],
"shift": ["lshift", "rshift"],
"control": ["lcontrol", "rcontrol"],
"alt": ["lalt", "ralt"],
"menu": ["lmenu", "rmenu"],
}
# Set of all recognized modifiers
MODIFIERS = {
"shift",
"lshift",
"rshift",
"control",
"ctrl",
"lcontrol",
"lctrl",
"rcontrol",
"rctrl",
"alt",
"lalt",
"ralt",
"lwin",
"rwin",
"lmenu",
"rmenu",
"apps",
"caps-lock",
}
# https://msdn.microsoft.com/en-us/library/windows/desktop/dd375731(v=vs.85).aspx
STR_VK_MAP = {
"lmb": 0x01, # VK_LBUTTON Left mouse button
"rmb": 0x02, # VK_RBUTTON Right mouse button
"break": 0x03, # VK_CANCEL Control-break processing
"mmb": 0x04, # VK_MBUTTON Middle mouse button (three-button mouse)
"x1mb": 0x05, # VK_XBUTTON1 X1 mouse button
"x2mb": 0x06, # VK_XBUTTON2 X2 mouse button
"x3mb": 0x07, # - Undefined
"back": 0x08, # VK_BACK BACKSPACE key
"backspace": 0x08, # VK_BACK BACKSPACE key
"tab": 0x09, # VK_TAB TAB key
# - 0x0A-0B Reserved
"clear": 0x0C, # VK_CLEAR CLEAR key
"return": 0x0D, # VK_RETURN ENTER key
"enter": 0x0D, # VK_RETURN ENTER key
"cr": 0x0D, # VK_RETURN ENTER key
"lf": 0x0D, # VK_RETURN ENTER key
# - 0x0E-0F Undefined
# These VK keys don't seem to get generated by the global key handler;
# instead, the more low-level (lcontrol, rcontrol, etc) ones are.
"shift": 0x10, # VK_SHIFT SHIFT key
"sft": 0x10, # VK_SHIFT SHIFT key
"control": 0x11, # VK_CONTROL CTRL key
"ctrl": 0x11, # VK_CONTROL CTRL key
"menu": 0x12, # VK_MENU ALT key
"alt": 0x12, # VK_MENU ALT key
"pause": 0x13, # VK_PAUSE PAUSE key
"caps-lock": 0x14, # VK_CAPITAL CAPS LOCK key
"kana": 0x15, # VK_KANA IME Kana mode
"hanguel": 0x15, # VK_HANGUEL IME Hanguel mode (maintained for compatibility; use VK_HANGUL)
"hangul": 0x15, # VK_HANGUL IME Hangul mode
# - 0x16 Undefined
"junja": 0x17, # VK_JUNJA IME Junja mode
"final": 0x18, # VK_FINAL IME final mode
"hanja": 0x19, # VK_HANJA IME Hanja mode
"kanji": 0x19, # VK_KANJI IME Kanji mode
# 0x1A - Undefined
"escape": 0x1B, # VK_ESCAPE ESC key
"esc": 0x1B, # VK_ESCAPE ESC key
"convert": 0x1C, # VK_CONVERT IME convert
"nonconvert": 0x1D, # VK_NONCONVERT IME nonconvert
"accept": 0x1E, # VK_ACCEPT IME accept
"modechange": 0x1F, # VK_MODECHANGE IME mode change request
"space": 0x20, # VK_SPACE SPACEBAR
"prior": 0x21, # VK_PRIOR PAGE UP key
"pgup": 0x21, # VK_PRIOR PAGE UP key
"pageup": 0x21, # VK_PRIOR PAGE UP key
"next": 0x22, # VK_NEXT PAGE DOWN key
"pgdn": 0x22, # VK_NEXT PAGE DOWN key
"pagedown": 0x22, # VK_NEXT PAGE DOWN key
"end": 0x23, # VK_END END key
"home": 0x24, # VK_HOME HOME key
"left": 0x25, # VK_LEFT LEFT ARROW key
"up": 0x26, # VK_UP UP ARROW key
"right": 0x27, # VK_RIGHT RIGHT ARROW key
"down": 0x28, # VK_DOWN DOWN ARROW key
"select": 0x29, # VK_SELECT SELECT key
"print": 0x2A, # VK_PRINT PRINT key
"execute": 0x2B, # VK_EXECUTE EXECUTE key
"snapshot": 0x2C, # VK_SNAPSHOT PRINT SCREEN key
"insert": 0x2D, # VK_INSERT INS key
"delete": 0x2E, # VK_DELETE DEL key
"del": 0x2E, # VK_DELETE DEL key
"help": 0x2F, # VK_HELP HELP key
"lwin": 0x5B, # VK_LWIN Left Windows key (Natural keyboard)
"rwin": 0x5C, # VK_RWIN Right Windows key (Natural keyboard)
"apps": 0x5D, # VK_APPS Applications key (Natural keyboard)
# 0x5E - Reserved
"sleep": 0x5F, # VK_SLEEP Computer Sleep key
"numpad0": 0x60, # VK_NUMPAD0 Numeric keypad 0 key
"numpad1": 0x61, # VK_NUMPAD1 Numeric keypad 1 key
"numpad2": 0x62, # VK_NUMPAD2 Numeric keypad 2 key
"numpad3": 0x63, # VK_NUMPAD3 Numeric keypad 3 key
"numpad4": 0x64, # VK_NUMPAD4 Numeric keypad 4 key
"numpad5": 0x65, # VK_NUMPAD5 Numeric keypad 5 key
"numpad6": 0x66, # VK_NUMPAD6 Numeric keypad 6 key
"numpad7": 0x67, # VK_NUMPAD7 Numeric keypad 7 key
"numpad8": 0x68, # VK_NUMPAD8 Numeric keypad 8 key
"numpad9": 0x69, # VK_NUMPAD9 Numeric keypad 9 key
"multiply": 0x6A, # VK_MULTIPLY Multiply key
"add": 0x6B, # VK_ADD Add key
"separator": 0x6C, # VK_SEPARATOR Separator key
"subtract": 0x6D, # VK_SUBTRACT Subtract key
"decimal": 0x6E, # VK_DECIMAL Decimal key
"divide": 0x6F, # VK_DIVIDE Divide key
"f1": 0x70, # VK_F1 F1 key
"f2": 0x71, # VK_F2 F2 key
"f3": 0x72, # VK_F3 F3 key
"f4": 0x73, # VK_F4 F4 key
"f5": 0x74, # VK_F5 F5 key
"f6": 0x75, # VK_F6 F6 key
"f7": 0x76, # VK_F7 F7 key
"f8": 0x77, # VK_F8 F8 key
"f9": 0x78, # VK_F9 F9 key
"f10": 0x79, # VK_F10 F10 key
"f11": 0x7A, # VK_F11 F11 key
"f12": 0x7B, # VK_F12 F12 key
"f13": 0x7C, # VK_F13 F13 key
"f14": 0x7D, # VK_F14 F14 key
"f15": 0x7E, # VK_F15 F15 key
"f16": 0x7F, # VK_F16 F16 key
"f17": 0x80, # VK_F17 F17 key
"f18": 0x81, # VK_F18 F18 key
"f19": 0x82, # VK_F19 F19 key
"f20": 0x83, # VK_F20 F20 key
"f21": 0x84, # VK_F21 F21 key
"f22": 0x85, # VK_F22 F22 key
"f23": 0x86, # VK_F23 F23 key
"f24": 0x87, # VK_F24 F24 key
# 0x88-8F - Unassigned
"numlock": 0x90, # VK_NUMLOCK NUM LOCK key
"scroll": 0x91, # VK_SCROLL SCROLL LOCK key
# 0x92-96 - OEM specific
# 0x97-9F - Unassigned
"lshift": 0xA0, # VK_LSHIFT Left SHIFT key
"rshift": 0xA1, # VK_RSHIFT Right SHIFT key
"lcontrol": 0xA2, # VK_LCONTROL Left CONTROL key
"lctrl": 0xA2, # VK_LCONTROL Left CONTROL key
"rcontrol": 0xA3, # VK_RCONTROL Right CONTROL key
"rctrl": 0xA3, # VK_RCONTROL Right CONTROL key
"lmenu": 0xA4, # VK_LMENU Left MENU key
"lalt": 0xA4, # VK_LMENU Left MENU key
"rmenu": 0xA5, # VK_RMENU Right MENU key
"ralt": 0xA5, # VK_RMENU Right MENU key
"browser-back": 0xA6, # VK_BROWSER_BACK Browser Back key
"browser-forward": 0xA7, # VK_BROWSER_FORWARD Browser Forward key
"browser-refresh": 0xA8, # VK_BROWSER_REFRESH Browser Refresh key
"browser-stop": 0xA9, # VK_BROWSER_STOP Browser Stop key
"browser-search": 0xAA, # VK_BROWSER_SEARCH Browser Search key
"browser-favorites": 0xAB, # VK_BROWSER_FAVORITES Browser Favorites key
"browser-home": 0xAC, # VK_BROWSER_HOME Browser Start and Home key
"volume-mute": 0xAD, # VK_VOLUME_MUTE Volume Mute key
"volume-down": 0xAE, # VK_VOLUME_DOWN Volume Down key
"volume-up": 0xAF, # VK_VOLUME_UP Volume Up key
"media-next-track": 0xB0, # VK_MEDIA_NEXT_TRACK Next Track key
"media-prev-track": 0xB1, # VK_MEDIA_PREV_TRACK Previous Track key
"media-stop": 0xB2, # VK_MEDIA_STOP Stop Media key
"media-play-pause": 0xB3, # VK_MEDIA_PLAY_PAUSE Play/Pause Media key
"launch-mail": 0xB4, # VK_LAUNCH_MAIL Start Mail key
"launch-media-select": 0xB5, # VK_LAUNCH_MEDIA_SELECT Select Media key
"launch-app1": 0xB6, # VK_LAUNCH_APP1 Start Application 1 key
"launch-app2": 0xB7, # VK_LAUNCH_APP2 Start Application 2 key
# 0xB8-B9 - Reserved
"oem_1": 0xBA, # VK_OEM_1 Used for miscellaneous characters;
# it can vary by keyboard. For the US standard keyboard, the ';:' key
":": 0xBA,
";": 0xBA,
"colon": 0xBA,
"oem_plus": 0xBB, # VK_OEM_PLUS For any country/region, the '+' key
"plus": 0xBB,
"oem_comma": 0xBC, # VK_OEM_COMMA For any country/region, the ',' key
"comma": 0xBC,
",": 0xBC,
"<": 0xBC,
"oem_minus": 0xBD, # VK_OEM_MINUS For any country/region, the '-' key
"minus": 0xBD,
"oem_period": 0xBE, # VK_OEM_PERIOD For any country/region, the '.' key
".": 0xBE,
"period": 0xBE,
">": 0xBE,
"oem_2": 0xBF, # VK_OEM_2 Used for miscellaneous characters;
# it can vary by keyboard. For the US standard keyboard, the '/?' key
"/": 0xBF,
"slash": 0xBF,
"?": 0xBF,
"question": 0xBF,
"question-mark": 0xBF,
"oem2": 0xBF,
"oem_3": 0xC0, # VK_OEM_3 Used for miscellaneous characters;
# it can vary by keyboard. For the US standard keyboard, the '`~' key
"oem3": 0xC0,
"~": 0xC0,
"tilde": 0xC0,
"twiddle": 0xC0,
"`": 0xC0,
"back-tick": 0xC0,
# 0xC1-D7 - Reserved
# 0xD8-DA - Unassigned
"oem_4": 0xDB, # VK_OEM_4 Used for miscellaneous characters;
# it can vary by keyboard. For the US standard keyboard, the '[{' key
"oem4": 0xDB,
"[": 0xDB,
"{": 0xDB,
"left-bracket": 0xDB,
"oem_5": 0xDC, # VK_OEM_5 Used for miscellaneous characters;
# it can vary by keyboard. For the US standard keyboard, the '\|' key
"oem5": 0xDC,
"|": 0xDC,
"\\": 0xDC,
"pipe": 0xDC,
"backslash": 0xDC,
"oem_6": 0xDD, # VK_OEM_6 Used for miscellaneous characters;
# it can vary by keyboard. For the US standard keyboard, the ']}' key
"oem6": 0xDD,
"]": 0xDD,
"}": 0xDD,
"right-bracket": 0xDD,
"oem_7": 0xDE, # VK_OEM_7 Used for miscellaneous characters;
# it can vary by keyboard. For the US standard keyboard,
# the 'single-quote/double-quote' key
"oem7": 0xDE,
'"': 0xDE,
"'": 0xDE,
"quote": 0xDE,
"tick": 0xDE,
"oem_8": 0xDF, # VK_OEM_8 Used for miscellaneous characters; it can vary by keyboard.
"oem8": 0xDF,
# 0xE0 - Reserved
# 0xE1 - OEM specific
"oem_102": 0xE2, # VK_OEM_102 Either the angle bracket key or the backslash key on
# the RT 102-key keyboard
"oem102": 0xE2,
# 0xE3-E4 - OEM specific
"processkey": 0xE5, # VK_PROCESSKEY IME PROCESS key
# 0xE6 - OEM specific
"packet": 0xE7, # VK_PACKET Used to pass Unicode characters as if they were
# keystrokes. The VK_PACKET key is the low word of a 32-bit Virtual
# Key value used for non-keyboard input methods. For more
# information, see Remark in KEYBDINPUT, SendInput, WM_KEYDOWN, and WM_KEYUP
# 0xE8 - Unassigned
# 0xE9-F5 - OEM specific
"attn": 0xF6, # VK_ATTN Attn key
"crsel": 0xF7, # VK_CRSEL CrSel key
"exsel": 0xF8, # VK_EXSEL ExSel key
"ereof": 0xF9, # VK_EREOF Erase EOF key
"play": 0xFA, # VK_PLAY Play key
"zoom": 0xFB, # VK_ZOOM Zoom key
"noname": 0xFC, # VK_NONAME Reserved
"pa1": 0xFD, # VK_PA1 PA1 key
"oem_clear": 0xFE, # VK_OEM_CLEAR Clear key
# 0x3A-40 - Undefined
"0": 0x30, # 0 key
"1": 0x31, # 1 key
"2": 0x32, # 2 key
"3": 0x33, # 3 key
"4": 0x34, # 4 key
"5": 0x35, # 5 key
"6": 0x36, # 6 key
"7": 0x37, # 7 key
"8": 0x38, # 8 key
"9": 0x39, # 9 key
"a": 0x41, # A key
"b": 0x42, # B key
"c": 0x43, # C key
"d": 0x44, # D key
"e": 0x45, # E key
"f": 0x46, # F key
"g": 0x47, # G key
"h": 0x48, # H key
"i": 0x49, # I key
"j": 0x4A, # J key
"k": 0x4B, # K key
"l": 0x4C, # L key
"m": 0x4D, # M key
"n": 0x4E, # N key
"o": 0x4F, # O key
"p": 0x50, # P key
"q": 0x51, # Q key
"r": 0x52, # R key
"s": 0x53, # S key
"t": 0x54, # T key
"u": 0x55, # U key
"v": 0x56, # V key
"w": 0x57, # W key
"x": 0x58, # X key
"y": 0x59, # Y key
"z": 0x5A, # Z key
}
_MODIFIER_KEYS = set()
for __k in MODIFIERS:
_MODIFIER_KEYS.add(STR_VK_MAP[__k])
_WIN_KEYS = [STR_VK_MAP['lwin'], STR_VK_MAP['rwin']]
SPECIAL_MODIFIER_CHECK_VKEY_CODES = (
STR_VK_MAP['lwin'], STR_VK_MAP['rwin']
)
|
py | 1a3012a0f455705472b60991f832c2726af097fd | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
import astropy.units as u
from ....utils.testing import assert_quantity_allclose, requires_data
from .. import PrimaryFlux
@requires_data("gammapy-data")
def test_primary_flux():
with pytest.raises(ValueError):
PrimaryFlux(channel="Spam", mDM=1 * u.TeV)
primflux = PrimaryFlux(channel="W", mDM=1 * u.TeV)
actual = primflux.table_model(500 * u.GeV)
desired = 9.328234e-05 / u.GeV
assert_quantity_allclose(actual, desired)
|
py | 1a30130f642b10e1d40939478b8141a9ebcedc60 | # -*- coding: utf-8 -*-
"""
glusterfstools.volumefilters
:copyright: (c) 2013, 2014 by Aravinda VK
:license: BSD, see LICENSE for more details.
"""
from functools import wraps
import re
_volume_filters = {}
def filter(name):
def filter_decorator(f):
@wraps(f)
def wrapper(*args, **kwds):
return f(*args, **kwds)
global _volume_filters
_volume_filters[name] = wrapper
return wrapper
return filter_decorator
@filter("name")
def name_filter(vols, value):
def is_match(vol, value):
if value in ['', 'all'] or \
vol["name"].lower() == value.lower().strip() or \
re.search(value, vol["name"]):
return True
else:
return False
return [v for v in vols if is_match(v, value)]
@filter("status")
def status_filter(vols, value):
def is_match(vol, value):
if value in ['', 'all'] or \
vol["status"].lower() == value.lower().strip():
return True
else:
return False
return [v for v in vols if is_match(v, value)]
@filter("type")
def type_filter(vols, value):
def is_match(vol, value):
if value in ['', 'all'] or \
vol["type"].lower() == value.lower() or \
re.search(value, vol["type"], re.I):
return True
else:
return False
return [v for v in vols if is_match(v, value)]
@filter("volumewithbrick")
def volumewithbrick_filter(vols, value):
def is_match(vol, value):
for brick in vol["bricks"]:
if value in ['', 'all'] or \
brick.lower() == value.lower() or \
re.search(value, brick, re.I):
return True
# If no single brick matching the query
return False
return [v for v in vols if is_match(v, value)]
def get():
return _volume_filters
|
py | 1a301448b33056fc64fe89f0c4136b91c4fc9fa8 | # Copyright 2018-2020 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Pytest configuration file for PennyLane test suite.
"""
import os
import pytest
import numpy as np
import pennylane as qml
from pennylane.plugins import DefaultGaussian
# defaults
TOL = 1e-3
TF_TOL = 2e-2
class DummyDevice(DefaultGaussian):
"""Dummy device to allow Kerr operations"""
_operation_map = DefaultGaussian._operation_map.copy()
_operation_map['Kerr'] = lambda *x, **y: np.identity(2)
@pytest.fixture(scope="session")
def tol():
"""Numerical tolerance for equality tests."""
return float(os.environ.get("TOL", TOL))
@pytest.fixture(scope="session")
def tf_tol():
"""Numerical tolerance for equality tests."""
return float(os.environ.get("TF_TOL", TF_TOL))
@pytest.fixture(scope="session", params=[1, 2])
def n_layers(request):
"""Number of layers."""
return request.param
@pytest.fixture(scope="session", params=[2, 3])
def n_subsystems(request):
"""Number of qubits or qumodes."""
return request.param
@pytest.fixture(scope="session")
def qubit_device(n_subsystems):
return qml.device('default.qubit', wires=n_subsystems)
@pytest.fixture(scope="function")
def qubit_device_1_wire():
return qml.device('default.qubit', wires=1)
@pytest.fixture(scope="function")
def qubit_device_2_wires():
return qml.device('default.qubit', wires=2)
@pytest.fixture(scope="function")
def qubit_device_3_wires():
return qml.device('default.qubit', wires=3)
@pytest.fixture(scope="session")
def tensornet_device(n_subsystems):
return qml.device('default.tensor', wires=n_subsystems)
@pytest.fixture(scope="function")
def tensornet_device_1_wire():
return qml.device('default.tensor', wires=1)
@pytest.fixture(scope="function")
def tensornet_device_2_wires():
return qml.device('default.tensor', wires=2)
@pytest.fixture(scope="function")
def tensornet_device_3_wires():
return qml.device('default.tensor', wires=3)
@pytest.fixture(scope="session")
def gaussian_device(n_subsystems):
"""Number of qubits or modes."""
return DummyDevice(wires=n_subsystems)
@pytest.fixture(scope="session")
def gaussian_dummy():
"""Number of qubits or modes."""
return DummyDevice
@pytest.fixture(scope="session")
def gaussian_device_2_wires():
"""A 2-mode Gaussian device."""
return DummyDevice(wires=2)
@pytest.fixture(scope="session")
def gaussian_device_4modes():
"""A 4 mode Gaussian device."""
return DummyDevice(wires=4)
@pytest.fixture(scope='session')
def torch_support():
"""Boolean fixture for PyTorch support"""
try:
import torch
from torch.autograd import Variable
torch_support = True
except ImportError as e:
torch_support = False
return torch_support
@pytest.fixture()
def skip_if_no_torch_support(torch_support):
if not torch_support:
pytest.skip("Skipped, no torch support")
@pytest.fixture(scope='module')
def tf_support():
"""Boolean fixture for TensorFlow support"""
try:
import tensorflow as tf
tf_support = True
except ImportError as e:
tf_support = False
return tf_support
@pytest.fixture()
def skip_if_no_tf_support(tf_support):
if not tf_support:
pytest.skip("Skipped, no tf support")
@pytest.fixture(scope="module",
params=[1, 2, 3])
def seed(request):
"""Different seeds."""
return request.param
@pytest.fixture(scope="function")
def mock_device(monkeypatch):
"""A mock instance of the abstract Device class"""
with monkeypatch.context() as m:
dev = qml.Device
m.setattr(dev, '__abstractmethods__', frozenset())
m.setattr(dev, 'short_name', 'mock_device')
m.setattr(dev, 'capabilities', lambda cls: {"model": "qubit"})
yield qml.Device(wires=2)
@pytest.fixture
def tear_down_hermitian():
yield None
qml.Hermitian._eigs = {}
|
py | 1a30151df25ee8d440dce68656b0ceb7eb16edb0 | from data.cifar import Cifar
from utility.step_lr import StepLR
from utility.initialize import initialize
from utility.log import Log
from utility.lognolr import LogNoLR
from model import *
import time
from model.preact_resnet import *
from model.smooth_cross_entropy import smooth_crossentropy
from model.wideresnet import WideResNet
from model.resnet import *
from model.vgg import *
from sam import SAM
import argparse
import torch
import sys
import os
import torchvision
import torchvision.transforms as transforms
from torch.utils.tensorboard import SummaryWriter
from torch.utils.data import DataLoader
from torch.optim.lr_scheduler import CosineAnnealingLR, CosineAnnealingWarmRestarts
from utility.cosine_annealing_with_warmup_lr import CosineAnnealingWarmUpRestarts
import tensorboard
from utils import progress_bar
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--adaptive", default=True, type=bool,
help="True if you want to use the Adaptive SAM.")
parser.add_argument("--batch_size", default=128, type=int,
help="Batch size used in the training and validation loop.")
parser.add_argument("--depth", default=16, type=int,
help="Number of layers.")
parser.add_argument("--dropout", default=0.0,
type=float, help="Dropout rate.")
parser.add_argument("--epochs", default=150, type=int,
help="Total number of epochs.")
parser.add_argument("--label_smoothing", default=0.1,
type=float, help="Use 0.0 for no label smoothing.")
parser.add_argument("--learning_rate", default=0.1, type=float,
help="Base learning rate at the start of the training.")
parser.add_argument("--momentum", default=0.9,
type=float, help="SGD Momentum.")
parser.add_argument("--threads", default=2, type=int,
help="Number of CPU threads for dataloaders.")
parser.add_argument("--rho", default=0.5, type=int,
help="Rho parameter for SAM.")
parser.add_argument("--weight_decay", default=0.0005,
type=float, help="L2 weight decay.")
parser.add_argument("--width_factor", default=8, type=int,
help="In case WideResNet, how many times wider compared to normal ResNet.")
parser.add_argument("--SAM", default=False, type=bool,
help="Use SAM optimizer or SGD.")
parser.add_argument('--resume', '-r', action='store_true',
help='resume from checkpoint')
args = parser.parse_args()
initialize(args, seed=42)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# Data
print('==> Preparing data..')
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465),
(0.2023, 0.1994, 0.2010)),
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465),
(0.2023, 0.1994, 0.2010)),
])
trainset = torchvision.datasets.CIFAR10(
root='./data', train=True, download=True, transform=transform_train)
trainloader = torch.utils.data.DataLoader(
trainset, batch_size=128, shuffle=True, num_workers=2)
testset = torchvision.datasets.CIFAR10(
root='./data', train=False, download=True, transform=transform_test)
testloader = torch.utils.data.DataLoader(
testset, batch_size=100, shuffle=False, num_workers=2)
classes = ('plane', 'car', 'bird', 'cat', 'deer',
'dog', 'frog', 'horse', 'ship', 'truck')
# dataset = Cifar(args.batch_size, args.threads)
# Logger
# log = Log(log_each=10)
log = LogNoLR(log_each=10)
# which model to use (VGG, Preactivation ResNet,)
# model = WideResNet(args.depth, 10, args.width_factor,
# dropRate=args.dropout).to(device)
model = VGG16().to(device)
if device == 'cuda':
model = torch.nn.DataParallel(model)
best_acc = 0 # best test accuracy
start_epoch = 0 # start from epoch 0 or last checkpoint epoch
hermite_bias_list = []
hermite_weight_list = []
for name, param in model.named_parameters():
if 'bias' in name:
hermite_bias_list.append(name)
if 'wts' in name:
hermite_weight_list.append(name)
hermite_list = hermite_bias_list + hermite_weight_list
params1 = list(map(lambda x: x[1], list(
filter(lambda kv: kv[0] in hermite_bias_list, model.named_parameters()))))
params2 = list(map(lambda x: x[1], list(
filter(lambda kv: kv[0] in hermite_weight_list, model.named_parameters()))))
# params3 = list(map(lambda x: x[1], list(
# filter(lambda kv: kv[0] in hermite_weight2_list, model.named_parameters()))))
# params3 = list(map(lambda x: x[1], list(
# filter(lambda kv: kv[0] in w3, model.named_parameters()))))
# params4 = list(map(lambda x: x[1], list(
# filter(lambda kv: kv[0] in w4, model.named_parameters()))))
base_params = list(map(lambda x: x[1], list(
filter(lambda kv: kv[0] not in hermite_list, model.named_parameters()))))
if args.resume:
# Load checkpoint.
print('==> Resuming from checkpoint..')
assert os.path.isdir(
'checkpoint'), 'Error: no checkpoint directory found!'
checkpoint = torch.load('./checkpoint/ckpt.pth')
model.load_state_dict(checkpoint['net'])
best_acc = checkpoint['acc']
start_epoch = checkpoint['epoch']
# Optimizer (SGD or SAM): SAM shows slightly better accuracy compared to SGD
if args.SAM is True:
base_optimizer = torch.optim.SGD
optimizer = SAM(
[{'params': base_params},
{'params': params1, 'weight_decay': 0, 'lr': args.learning_rate},
{'params': params2, 'weight_decay': args.weight_decay /
2, 'lr': args.learning_rate},
# {'params': params3, 'weight_decay': args.weight_decay /
# 2, 'lr': args.learning_rate},
# {'params': params4, 'weight_decay': args.weight_decay /
# 2, 'lr': args.learning_rate}
],
base_optimizer, rho=args.rho, adaptive=args.adaptive, lr=args.learning_rate, momentum=args.momentum, weight_decay=args.weight_decay)
else:
optimizer = torch.optim.SGD(
[{'params': base_params},
{'params': params1, 'weight_decay': args.weight_decay /
2, 'lr': args.learning_rate},
{'params': params2, 'weight_decay': args.weight_decay /
2, 'lr': args.learning_rate},
# {'params': params3, 'weight_decay': args.weight_decay /
# 2, 'lr': args.learning_rate},
# {'params': params4, 'weight_decay': args.weight_decay/2, 'lr': args.learning_rate}
],
lr=args.learning_rate, momentum=args.momentum, weight_decay=args.weight_decay)
scheduler = StepLR(optimizer, args.learning_rate, args.epochs)
print(args.epochs, " epochs")
if args.SAM is True:
print("SAM optimizer")
else:
print("SGD optimizer")
# print(list(model.parameters()))
def train(epoch):
print('\nEpoch: %d' % epoch)
model.train()
train_loss = 0
correct = 0
total = 0
for batch_idx, (inputs, targets) in enumerate(trainloader):
inputs, targets = inputs.to(device), targets.to(device)
if args.SAM is False:
optimizer.zero_grad()
outputs = model(inputs)
loss = smooth_crossentropy(outputs, targets)
loss.mean().backward()
if args.SAM is True:
optimizer.first_step(zero_grad=True)
smooth_crossentropy(model(inputs), targets).mean().backward()
optimizer.second_step(zero_grad=True)
else:
optimizer.step()
train_loss += loss.mean().item()
_, predicted = outputs.max(1)
total += targets.size(0)
correct += predicted.eq(targets).sum().item()
progress_bar(batch_idx, len(trainloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)'
% (train_loss/(batch_idx+1), 100.*correct/total, correct, total))
def test(epoch):
global best_acc
model.eval()
test_loss = 0
correct = 0
total = 0
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(testloader):
inputs, targets = inputs.to(device), targets.to(device)
outputs = model(inputs)
loss = smooth_crossentropy(outputs, targets)
test_loss += loss.mean().item()
_, predicted = outputs.max(1)
total += targets.size(0)
correct += predicted.eq(targets).sum().item()
progress_bar(batch_idx, len(testloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)'
% (test_loss/(batch_idx+1), 100.*correct/total, correct, total))
acc = 100.*correct/total
if acc > best_acc:
best_acc = acc
# Save checkpoint.
print('Saving checkpoint..')
state = {
'net': model.state_dict(),
'acc': acc,
'epoch': epoch,
}
if not os.path.isdir('checkpoint'):
os.mkdir('checkpoint')
torch.save(state, './checkpoint/ckpt.pth')
# # Save torchscript
with torch.no_grad():
print('Saving Torch Script..')
if not os.path.isdir('torchscript'):
os.mkdir('torchscript')
example = torch.rand(1, 3, 32, 32).to(device)
traced_script_module = torch.jit.trace(model, example)
traced_script_module.save("./torchscript/model.pt")
for epoch in range(start_epoch, start_epoch+200):
train(epoch)
test(epoch)
scheduler(epoch)
##############################################################################################
##############################################################################################
##############################################################################################
##############################################################################################
##############################################################################################
##############################################################################################
##############################################################################################
##############################################################################################
##############################################################################################
##############################################################################################
# for epoch in range(args.epochs):
# model.train()
# log.train(len_dataset=len(dataset.train))
# for batch in dataset.train:
# inputs, targets = (b.to(device) for b in batch)
# if args.SAM is False:
# optimizer.zero_grad()
# predictions = model(inputs)
# loss = smooth_crossentropy(predictions, targets)
# loss.mean().backward()
# if args.SAM is True:
# optimizer.first_step(zero_grad=True)
# smooth_crossentropy(model(inputs), targets).mean().backward()
# optimizer.second_step(zero_grad=True)
# else:
# optimizer.step()
# with torch.no_grad():
# correct = torch.argmax(predictions.data, 1) == targets
# # log(model, loss.cpu(), correct.cpu(), scheduler.lr())
# # check which log function to use at line 61
# log(model, loss.cpu(), correct.cpu())
# scheduler(epoch)
# model.eval()
# log.eval(len_dataset=len(dataset.test))
# with torch.no_grad():
# for batch in dataset.test:
# inputs, targets = (b.to(device) for b in batch)
# predictions = model(inputs)
# loss = smooth_crossentropy(predictions, targets)
# correct = torch.argmax(predictions, 1) == targets
# log(model, loss.cpu(), correct.cpu())
# log.flush()
|
py | 1a3015f7dd34484d6ac908e0b92ea79ba988ef16 | from im2mesh.onet_upconv2d_occtolocal import (
config, generation, training, models
)
__all__ = [
config, generation, training, models
]
|
py | 1a3016f34bfed9bf3b2e0f9512f111566e863fd2 | # -*- coding: [stf-8 -*-
"""
Contains the definition of all data models according to the Castor EDC API.
@author: R.C.A. van Linschoten
https://orcid.org/0000-0003-3052-596X
"""
audit_trail_model = {
"datetime": [dict],
"event_type": [str],
"user_id": [str],
"user_name": [str],
"user_email": [str],
"event_details": [dict, list],
}
country_model = {
"id": [
str,
],
"country_id": [
str,
],
"country_name": [
str,
],
"country_tld": [
str,
],
"country_cca2": [
str,
],
"country_cca3": [
str,
],
}
single_country_model = {
"id": [
int,
],
"country_id": [
int,
],
"country_name": [
str,
],
"country_tld": [
str,
],
"country_cca2": [
str,
],
"country_cca3": [
str,
],
"_links": [
dict,
],
}
device_token_model = {
"device_token": [str],
"record_id": [str],
"created_on": [dict],
"updated_on": [dict],
}
export_data_model = {
"Study ID": [
str,
],
"Record ID": [
str,
],
"Form Type": [
str,
],
"Form Instance ID": [
str,
],
"Form Instance Name": [
str,
],
"Field ID": [
str,
],
"Value": [
str,
],
"Date": [
str,
],
"User ID": [
str,
],
}
export_structure_model = {
"Study ID": [
str,
],
"Form Type": [
str,
],
"Form Collection ID": [
str,
],
"Form Collection Name": [
str,
],
"Form Collection Order": [
str,
], # Actually int in database, but csv interprets everything as string
"Form ID": [
str,
],
"Form Name": [
str,
],
"Form Order": [
str,
], # Actually int in database, but csv interprets everything as string
"Field ID": [
str,
],
"Field Variable Name": [
str,
],
"Field Label": [
str,
],
"Field Type": [
str,
],
"Field Order": [
str,
], # Actually int in database, but csv interprets everything as string
"Field Required": [
str,
], # Actually bool in database, but csv interprets everything as string
"Calculation Template": [
str,
],
"Field Option Group": [
str,
],
}
export_option_group_model = {
"Study ID": [
str,
],
"Option Group Id": [
str,
],
"Option Group Name": [
str,
],
"Option Id": [
str,
],
"Option Name": [
str,
],
"Option Value": [
str,
],
}
role_model = {
"name": [str],
"description": [str],
"permissions": [dict],
"_links": [dict],
}
study_data_point_model = {
"field_id": [
str,
],
"field_value": [
str,
],
"record_id": [
str,
],
"updated_on": [str, type(None)],
}
study_data_point_extended_model = {
"record_id": [
str,
],
"field_variable_name": [
str,
],
"field_id": [
str,
],
"value": [
str,
],
"updated_on": [str, type(None)],
"_embedded": [
dict,
],
"_links": [
dict,
],
}
study_step_model = {
"id": [
str,
],
"step_id": [
str,
],
"step_description": [
str,
],
"step_name": [
str,
],
"step_order": [
int,
],
"_embedded": [
dict,
],
"_links": [
dict,
],
}
user_model = {
"id": [
str,
],
"user_id": [
str,
],
"entity_id": [
str,
],
"full_name": [
str,
],
"name_first": [str, type(None)],
"name_middle": [str, type(None)],
"name_last": [str, type(None)],
"email_address": [
str,
],
"institute": [str, type(None)],
"department": [str, type(None)],
"last_login": [
str,
],
"_links": [
dict,
],
}
user_study_model = {
"id": [
str,
],
"user_id": [
str,
],
"entity_id": [
str,
],
"full_name": [
str,
],
"name_first": [str, type(None)],
"name_middle": [str, type(None)],
"name_last": [str, type(None)],
"email_address": [
str,
],
"institute": [str, type(None)],
"department": [str, type(None)],
"manage_permissions": [dict],
"institute_permissions": [list],
"last_login": [
str,
],
"_links": [
dict,
],
}
study_model = {
"crf_id": [
str,
],
"study_id": [
str,
],
"name": [
str,
],
"created_by": [
str,
],
"created_on": [
str,
],
"live": [
bool,
],
"randomization_enabled": [
bool,
],
"gcp_enabled": [
bool,
],
"surveys_enabled": [
bool,
],
"premium_support_enabled": [
bool,
],
"main_contact": [
str,
],
"expected_centers": [int, type(None)],
"duration": [int, type(None)],
"expected_records": [int, type(None)],
"slug": [
str,
],
"version": [
str,
],
"domain": [
str,
],
"_links": [
dict,
],
}
report_model = {
"id": [
str,
],
"report_id": [
str,
],
"description": [
str,
],
"name": [
str,
],
"type": [
str,
],
"_links": [
dict,
],
}
report_instance_model = {
"id": [
str,
],
"name": [
str,
],
"status": [
str,
],
"parent_id": [
str,
],
"parent_type": [
str,
],
"record_id": [
str,
],
"report_name": [
str,
],
"archived": [
bool,
],
"created_on": [
str,
],
"created_by": [
str,
],
"_embedded": [
dict,
],
"_links": [
dict,
],
}
report_data_point_model = {
"field_id": [
str,
],
"report_instance_id": [
str,
],
"report_instance_name": [
str,
],
"field_value": [
str,
],
"record_id": [
str,
],
"updated_on": [
str,
],
}
report_data_point_extended_model = {
"record_id": [
str,
],
"field_variable_name": [
str,
],
"field_id": [
str,
],
"value": [
str,
],
"updated_on": [
str,
],
"report_instance_id": [
str,
],
"_embedded": [
dict,
],
"_links": [
dict,
],
}
report_step_model = {
"id": [
str,
],
"report_step_id": [
str,
],
"report_step_name": [
str,
],
"report_step_description": [
str,
],
"report_step_number": [
int,
],
"_links": [
dict,
],
"_embedded": [
dict,
],
}
survey_model = {
"id": [
str,
],
"survey_id": [
str,
],
"name": [
str,
],
"description": [
str,
],
"intro_text": [
str,
],
"outro_text": [
str,
],
"survey_steps": [
list,
],
"_links": [
dict,
],
}
package_model = {
"id": [
str,
],
"allow_open_survey_link": [bool],
"survey_package_id": [
str,
],
"name": [
str,
],
"description": [
str,
],
"intro_text": [
str,
],
"outro_text": [
str,
],
"sender_name": [
str,
],
"sender_email": [
str,
],
"auto_send": [
bool,
],
"allow_step_navigation": [
bool,
],
"show_step_navigator": [
bool,
],
"finish_url": [
str,
],
"auto_lock_on_finish": [
bool,
],
"default_invitation": [
str,
],
"default_invitation_subject": [
str,
],
"is_mobile": [bool],
"expire_after_hours": [int, type(None)],
"_embedded": [
dict,
],
"_links": [
dict,
],
}
survey_package_instance_model = {
"id": [
str,
],
"survey_package_instance_id": [
str,
],
"record_id": [
str,
],
"institute_id": [
str,
],
"institute_name": [
str,
],
"survey_package_id": [
str,
],
"survey_package_name": [
str,
],
"invitation_subject": [
str,
],
"invitation_content": [
str,
],
"created_on": [
dict,
],
"created_by": [
str,
],
"sent_on": [dict, type(None)],
"first_opened_on": [dict, type(None)],
"finished_on": [dict, type(None)],
"available_from": [dict],
"expire_on": [str, type(None)],
"all_fields_filled_on": [dict, type(None)],
"started_on": [dict, type(None)],
"locked": [
bool,
],
"archived": [
bool,
],
"survey_url_string": [
str,
],
"progress": [
int,
],
"auto_lock_on_finish": [
bool,
],
"auto_send": [
bool,
],
"_embedded": [
dict,
],
"_links": [
dict,
],
}
survey_data_point_model = {
"field_id": [
str,
],
"survey_instance_id": [
str,
],
"survey_name": [
str,
],
"field_value": [
str,
],
"record_id": [
str,
],
"updated_on": [
str,
],
}
survey_package_data_point_model = {
"field_id": [
str,
],
"survey_instance_id": [
str,
],
"survey_name": [
str,
],
"field_value": [
str,
],
"record_id": [
str,
],
"updated_on": [
str,
],
"survey_package_id": [
str,
],
}
survey_data_point_extended_model = {
"record_id": [
str,
],
"field_variable_name": [
str,
],
"field_id": [
str,
],
"value": [
str,
],
"updated_on": [
str,
],
"survey_instance_id": [
str,
],
"_embedded": [
dict,
],
"_links": [
dict,
],
}
survey_step_model = {
"id": [
str,
],
"survey_step_id": [
str,
],
"survey_step_name": [
str,
],
"survey_step_description": [
str,
],
"survey_step_number": [
int,
],
"_embedded": [
dict,
],
"_links": [
dict,
],
}
field_dep_model = {
"id": [
int,
],
"operator": [
str,
],
"value": [
str,
],
"parent_id": [
str,
],
"child_id": [
str,
],
"_links": [
dict,
],
}
field_model = {
"id": [
str,
],
"parent_id": [
str,
],
"field_id": [
str,
],
"field_number": [
int,
],
"field_label": [
str,
],
"field_variable_name": [str, type(None)],
"field_enforce_decimals": [bool, type(None)],
"field_type": [
str,
],
"field_required": [
int,
],
"field_hidden": [
int,
],
"field_info": [
str,
],
"field_units": [
str,
],
"field_min": [
int,
float,
type(None),
],
"field_min_label": [
str,
type(None),
],
"field_max": [
int,
float,
type(None),
],
"field_max_label": [
str,
type(None),
],
"field_summary_template": [
str,
type(None),
],
"field_slider_step": [
str,
int,
type(None),
],
"report_id": [
str,
],
"field_length": [
int,
type(None),
],
"additional_config": [
str,
],
"exclude_on_data_export": [
bool,
],
"option_group": [
dict,
type(None),
],
"metadata_points": [
list,
],
"validations": [
list,
],
"dependency_parents": [
list,
],
"dependency_children": [
list,
],
"_links": [
dict,
],
"field_image": [str, None],
}
field_opt_model = {
"id": [
str,
],
"name": [
str,
],
"description": [
str,
],
"layout": [
bool,
],
"options": [
list,
],
"_links": [
dict,
],
}
field_val_model = {
"id": [
int,
],
"type": [
str,
],
"value": [
str,
],
"operator": [
str,
],
"text": [
str,
],
"field_id": [
str,
],
"_links": [
dict,
],
}
institute_model = {
"id": [
str,
],
"institute_id": [
str,
],
"name": [
str,
],
"abbreviation": [
str,
],
"code": [str, type(None)],
"order": [
int,
],
"country_id": [
int,
],
"deleted": [
bool,
],
"_links": [
dict,
],
}
metadata_model = {
"id": [
str,
],
"metadata_type": [
dict,
],
"parent_id": [str, type(None)],
"value": [
str,
],
"description": [str, type(None)],
"element_type": [
str,
],
"element_id": [str],
"_links": [
dict,
],
}
metadata_type_model = {
"id": [
int,
],
"name": [
str,
],
"description": [
str,
],
"_links": [
dict,
],
}
phase_model = {
"id": [
str,
],
"phase_id": [
str,
],
"phase_description": [str, type(None)],
"phase_name": [
str,
],
"phase_duration": [int, type(None)],
"phase_order": [
int,
],
"_links": [
dict,
],
}
query_model = {
"id": [
str,
],
"record_id": [
str,
],
"field_id": [
str,
],
"status": [
str,
],
"first_query_remark": [
str,
],
"created_by": [
str,
],
"created_on": [
dict,
],
"updated_by": [
str,
],
"updated_on": [
dict,
],
"_embedded": [
dict,
],
"_links": [
dict,
],
}
randomization_model = {
"randomized_id": [str, type(None)],
"randomization_group": [str, type(None)],
"randomization_group_name": [str, type(None)],
"randomized_on": [dict, type(None)],
"_links": [
dict,
],
}
record_model = {
"id": [
str,
],
"record_id": [
str,
],
"_embedded": [
dict,
],
"ccr_patient_id": [
str,
],
"randomized_id": [str, type(None)],
"randomization_group": [str, type(None)],
"randomization_group_name": [str, type(None)],
"randomized_on": [dict, type(None)],
"last_opened_step": [str, type(None)],
"progress": [
int,
],
"status": [
str,
],
"locked": [bool],
"archived": [
bool,
],
"archived_reason": [str, type(None)],
"created_by": [
str,
],
"created_on": [
dict,
],
"updated_by": [
str,
],
"updated_on": [
dict,
],
"_links": [
dict,
],
}
record_progress_model = {
"record_id": [
str,
],
"steps": [
list,
],
"_links": [
dict,
],
}
steps_model = {
"step_id": [
str,
],
"complete": [
int,
],
"sdv": [
bool,
],
"locked": [
bool,
],
"signed": [
bool,
],
}
statistics_model = {
"study_id": [
str,
],
"records": [
dict,
],
"_links": [
dict,
],
}
stats_records_model = {
"total_count": [
int,
],
"institutes": [
list,
],
}
stats_institutes_model = {
"institute_id": [
str,
],
"institute_name": [
str,
],
"record_count": [
int,
],
}
data_options = {
"numeric": "1",
"date": "11-11-2017",
"string": "testing",
"dropdown": "1",
"radio": "1",
"textarea": "testing",
"slider": "5",
"checkbox": "1",
"calculation": "5",
"year": "2005",
}
|
py | 1a301712abadc14d6a6b8fb81f0639cd1a1b4dd8 | # https://www.kaggle.com/c/amazon-employee-access-challenge/forums/t/4838/python-code-to-achieve-0-90-auc-with-logistic-regression
__author__ = 'Miroslaw Horbal'
__email__ = '[email protected]'
__date__ = '14-06-2013'
import json
import pymongo as pymongo
from numpy import array
from sklearn import metrics, linear_model
from sklearn.model_selection import train_test_split
from scipy import sparse
from itertools import combinations
import numpy as np
import pandas as pd
SEED = 25
def group_data(data, degree=3, hash=hash):
"""
numpy.array -> numpy.array
Groups all columns of data into all combinations of triples
"""
new_data = []
m, n = data.shape
for indicies in combinations(range(n), degree):
new_data.append([hash(tuple(v)) for v in data[:, indicies]])
return array(new_data).T
def OneHotEncoder(data, keymap=None):
"""
OneHotEncoder takes data matrix with categorical columns and
converts it to a sparse binary matrix.
Returns sparse binary matrix and keymap mapping categories to indicies.
If a keymap is supplied on input it will be used instead of creating one
and any categories appearing in the data that are not in the keymap are
ignored
"""
if keymap is None:
keymap = []
for col in data.T:
uniques = set(list(col))
keymap.append(dict((key, i) for i, key in enumerate(uniques)))
total_pts = data.shape[0]
outdat = []
for i, col in enumerate(data.T):
km = keymap[i]
num_labels = len(km)
spmat = sparse.lil_matrix((total_pts, num_labels))
for j, val in enumerate(col):
if val in km:
spmat[j, km[val]] = 1
outdat.append(spmat)
outdat = sparse.hstack(outdat).tocsr()
return outdat, keymap
def create_test_submission(filename, prediction):
content = []
for i, p in enumerate(prediction):
content.append({
'id': '%i' % (i + 1),
'ACTION': '%f' % p
})
f = open(filename, 'w')
json.dump(content, f)
f.close()
print('Saved')
# This loop essentially from Paul's starter code
def cv_loop(X, y, model, N):
mean_auc = 0.
for i in range(N):
X_train, X_cv, y_train, y_cv = train_test_split(
X, y, test_size=.20,
random_state=i * SEED)
model.fit(X_train, y_train)
preds = model.predict_proba(X_cv)[:, 1]
auc = metrics.roc_auc_score(y_cv, preds)
print("AUC (fold %d/%d): %f" % (i + 1, N, auc))
mean_auc += auc
return mean_auc / N
def main(user, password):
print("Reading dataset...")
client = pymongo.MongoClient("mongodb://%s:%s@businessdb:27017" % (user, password))
train_data = pd.read_json(json.dumps(list(client.test.train.find({}, {'_id': 0}))), orient='records')
test_data = pd.read_json(json.dumps(list(client.test.test.find({}, {'_id': 0}))), orient='records')
all_data = np.vstack((train_data.iloc[:, 1:], test_data.iloc[:, :]))
num_train = np.shape(train_data)[0]
# Transform data
print("Transforming data...")
dp = group_data(all_data, degree=2)
dt = group_data(all_data, degree=3)
y = array(train_data.iloc[:, 0])
X = all_data[:num_train]
X_2 = dp[:num_train]
X_3 = dt[:num_train]
X_test = all_data[num_train:]
X_test_2 = dp[num_train:]
X_test_3 = dt[num_train:]
X_train_all = np.hstack((X, X_2, X_3))
X_test_all = np.hstack((X_test, X_test_2, X_test_3))
num_features = X_train_all.shape[1]
model = linear_model.LogisticRegression()
# Xts holds one hot encodings for each individual feature in memory
# speeding up feature selection
Xts = [OneHotEncoder(X_train_all[:, [i]])[0] for i in range(num_features)]
print("Performing greedy feature selection...")
score_hist = []
N = 10
good_features = set([])
# Greedy feature selection loop
while len(score_hist) < 2 or score_hist[-1][0] > score_hist[-2][0]:
scores = []
for f in range(len(Xts)):
if f not in good_features:
feats = list(good_features) + [f]
Xt = sparse.hstack([Xts[j] for j in feats]).tocsr()
score = cv_loop(Xt, y, model, N)
scores.append((score, f))
print("Feature: %i Mean AUC: %f" % (f, score))
good_features.add(sorted(scores)[-1][1])
score_hist.append(sorted(scores)[-1])
print("Current features: %s" % sorted(list(good_features)))
# Remove last added feature from good_features
good_features.remove(score_hist[-1][1])
good_features = sorted(list(good_features))
print("Selected features %s" % good_features)
print("Performing hyperparameter selection...")
# Hyperparameter selection loop
score_hist = []
Xt = sparse.hstack([Xts[j] for j in good_features]).tocsr()
Cvals = np.logspace(-4, 4, 15, base=2)
for C in Cvals:
model.C = C
score = cv_loop(Xt, y, model, N)
score_hist.append((score, C))
print("C: %f Mean AUC: %f" % (C, score))
bestC = sorted(score_hist)[-1][1]
print("Best C value: %f" % (bestC))
print("Performing One Hot Encoding on entire dataset...")
Xt = np.vstack((X_train_all[:, good_features], X_test_all[:, good_features]))
Xt, keymap = OneHotEncoder(Xt)
X_train = Xt[:num_train]
X_test = Xt[num_train:]
print("Training full model...")
model.fit(X_train, y)
print("Making prediction and saving results...")
preds = model.predict_proba(X_test)[:, 1]
create_test_submission('results.json', preds)
if __name__ == "__main__":
main('admin', 'toor')
|
py | 1a3017e2047958a83cbc36b3be7ad231abdacfc8 | import json
import logging
from datetime import date, datetime
from gzip import GzipFile
from io import BytesIO
from typing import Any, Optional, Union
import requests
from dateutil.tz import tzutc
from posthog.utils import remove_trailing_slash
from posthog.version import VERSION
_session = requests.sessions.Session()
DEFAULT_HOST = "https://app.posthog.com"
USER_AGENT = "posthog-python/" + VERSION
def post(
api_key: str, host: Optional[str] = None, path=None, gzip: bool = False, timeout: int = 15, **kwargs
) -> requests.Response:
"""Post the `kwargs` to the API"""
log = logging.getLogger("posthog")
body = kwargs
body["sentAt"] = datetime.utcnow().replace(tzinfo=tzutc()).isoformat()
url = remove_trailing_slash(host or DEFAULT_HOST) + path
body["api_key"] = api_key
data = json.dumps(body, cls=DatetimeSerializer)
log.debug("making request: %s", data)
headers = {"Content-Type": "application/json", "User-Agent": USER_AGENT}
if gzip:
headers["Content-Encoding"] = "gzip"
buf = BytesIO()
with GzipFile(fileobj=buf, mode="w") as gz:
# 'data' was produced by json.dumps(),
# whose default encoding is utf-8.
gz.write(data.encode("utf-8"))
data = buf.getvalue()
res = _session.post(url, data=data, headers=headers, timeout=timeout)
if res.status_code == 200:
log.debug("data uploaded successfully")
return res
def _process_response(
res: requests.Response, success_message: str, *, return_json: bool = True
) -> Union[requests.Response, Any]:
log = logging.getLogger("posthog")
if not res:
raise APIError(
"N/A",
"Error when fetching PostHog API, please make sure you are using your public project token/key and not a private API key.",
)
if res.status_code == 200:
log.debug(success_message)
return res.json() if return_json else res
try:
payload = res.json()
log.debug("received response: %s", payload)
raise APIError(res.status_code, payload["detail"])
except ValueError:
raise APIError(res.status_code, res.text)
def decide(api_key: str, host: Optional[str] = None, gzip: bool = False, timeout: int = 15, **kwargs) -> Any:
"""Post the `kwargs to the decide API endpoint"""
res = post(api_key, host, "/decide/", gzip, timeout, **kwargs)
return _process_response(res, success_message="Feature flags decided successfully")
def batch_post(
api_key: str, host: Optional[str] = None, gzip: bool = False, timeout: int = 15, **kwargs
) -> requests.Response:
"""Post the `kwargs` to the batch API endpoint for events"""
res = post(api_key, host, "/batch/", gzip, timeout, **kwargs)
return _process_response(res, success_message="data uploaded successfully", return_json=False)
def get(api_key: str, url: str, host: Optional[str] = None, timeout: Optional[int] = None) -> requests.Response:
url = remove_trailing_slash(host or DEFAULT_HOST) + url
res = requests.get(url, headers={"Authorization": "Bearer %s" % api_key, "User-Agent": USER_AGENT}, timeout=timeout)
return _process_response(res, success_message=f"GET {url} completed successfully")
def shutdown():
# Avoid logs with
# sys:1: ResourceWarning: unclosed
# <ssl.SSLSocket fd=10, family=AddressFamily.AF_INET, type=SocketKind.SOCK_STREAM, proto=0,
# laddr=('x.x.x.x', y), raddr=('x.x.x.x', 443)>
# Should only be called when once, renders `_session` unusable
_session.close()
class APIError(Exception):
def __init__(self, status: Union[int, str], message: str):
self.message = message
self.status = status
def __str__(self):
msg = "[PostHog] {0} ({1})"
return msg.format(self.message, self.status)
class DatetimeSerializer(json.JSONEncoder):
def default(self, obj: Any):
if isinstance(obj, (date, datetime)):
return obj.isoformat()
return json.JSONEncoder.default(self, obj)
|
py | 1a3018eed7af9ecef4b593a07d12b4af7aa6c7f3 | '''Tools for sensitivity analyses. I still need to read Lash & Fox to integrate more tools
for multiple bias analysis. This branch is still very much a work in progress. The goal is
to simplify sensitivity analyses, in the hopes they become more common in publications
-MonteCarloRR(): generates a corrected RR distribution based on binary confounder
-trapezoidal(): generates a trapezoidal distribution of values
'''
from .Simple import MonteCarloRR
from .distributions import trapezoidal
|
py | 1a30197dc12d598d0dc6eeec07243d5c3a4e332a | from itertools import chain
from Model.point import Point
from View.itempriorities import ItemPriorities
class Cell:
def __init__ (self, pt):
assert isinstance (pt, Point)
self.pt = pt
#self.neighbors = {}
#self.items = {} # including walls and players
self.players = []
self.walls = [] # seems like too many walls
self.items = []
# TODO add floor and ceiling, so you can break through it
#def setNeighbor (self, direction, cell):
# self.neighbors[direction] = cell
def isTraversable (self):
if len (filter (
lambda (item): not item.isTraversable (),
chain (self.players, self.walls, self.items))) is 0:
return True
return False
"""
def getItemsOfType (self, T):
return chain.from_iterable (map (
lambda (key): self.items.get (key),
filter (lambda (t): isinstance (t, T), self.items.keySet ())))
def getItems (self): return chain.from_iterable (self.items.values ())
"""
def __repr__ (self):
if len (self.itemViews) is 0: return '.'
return repr (self.items.values ())
def __str__ (self):
#if len (self.items) is 0: return '.'
#for priority in [Wall] + itemPriorities + [Item]:
# items = filter (
# lambda (item): isinstance (item, priority),
# self.items)
# if len (items) is 0: continue
# return str (items[0]) # some are not displayed
#raise Exception (self.items)
c = chain (self.players, self.walls, self.items)
return str (next (c, '.'))
def addPlayer (self, player):
assert self.isTraversable ()
assert self.pt == player.pt
self.players.append (player)
def removePlayer (self, player):
assert self.pt == player.pt
assert player in self.players
self.players.remove (player)
def containsPlayer (self, player):
assert self.pt == player.pt
return player in self.players |
py | 1a30198251afc1ca2d19648163f92962b918a14d | # This is an auto-generated Django model module.
# You'll have to do the following manually to clean this up:
# * Rearrange models' order
# * Make sure each model has one field with primary_key=True
# * Make sure each ForeignKey and OneToOneField has `on_delete` set to the desired behavior
# * Remove `managed = False` lines if you wish to allow Django to create, modify, and delete the table
# Feel free to rename the models, but don't rename db_table values or field names.
from django.db import models
class Address(models.Model):
address_id = models.AutoField(primary_key=True)
state = models.CharField(max_length=50)
city = models.CharField(max_length=50)
street = models.CharField(max_length=50)
house_number = models.IntegerField()
postal_code = models.IntegerField()
class Meta:
managed = False
db_table = 'address'
def __str__(self):
return f"{self.state}, {self.city}, {self.street}, {self.house_number}, {self.postal_code}"
class Contact(models.Model):
contact_id = models.AutoField(primary_key=True)
person = models.ForeignKey('Person', models.DO_NOTHING)
contact_type = models.CharField(max_length=20)
value = models.CharField(max_length=50)
last_change = models.DateTimeField(blank=True, null=True)
class Meta:
managed = False
db_table = 'contact'
def __str__(self):
return f"{self.contact_type}, {self.value}"
def getType(self):
return self.contact_type
def getValue(self):
return self.value
class Department(models.Model):
department_id = models.AutoField(primary_key=True)
name = models.CharField(max_length=50)
decription = models.CharField(max_length=255, blank=True, null=True)
class Meta:
managed = False
db_table = 'department'
def getAll():
return [str(qSet.name) for qSet in Department.objects.all()]
class DepartmentHasPerson(models.Model):
department = models.ForeignKey(Department, models.DO_NOTHING)
person = models.ForeignKey('Person', on_delete=models.CASCADE)
class Meta:
managed = False
db_table = 'department_has_person'
class DepartmentHasProgram(models.Model):
department = models.ForeignKey(Department, models.DO_NOTHING)
program = models.ForeignKey('StudyProgram', models.DO_NOTHING)
class Meta:
managed = False
db_table = 'department_has_program'
class Faculty(models.Model):
faculty_id = models.AutoField(primary_key=True)
name = models.CharField(max_length=50)
address = models.ForeignKey(Address, models.DO_NOTHING)
class Meta:
managed = False
db_table = 'faculty'
def getAll():
return [str(qSet.name) for qSet in Faculty.objects.all()]
class FacultyHasDepartment(models.Model):
faculty = models.ForeignKey(Faculty, models.DO_NOTHING)
department = models.ForeignKey(Department, models.DO_NOTHING)
class Meta:
managed = False
db_table = 'faculty_has_department'
class FacultyHasPerson(models.Model):
faculty = models.ForeignKey(Faculty, models.DO_NOTHING)
person = models.ForeignKey('Person', on_delete=models.CASCADE)
class Meta:
managed = False
db_table = 'faculty_has_person'
class Person(models.Model):
person_id = models.AutoField(primary_key=True)
name = models.CharField(max_length=20)
surname = models.CharField(max_length=20)
birthdate = models.DateField()
email = models.CharField(max_length=50)
passwd = models.CharField(max_length=255)
additional_note = models.CharField(max_length=255, blank=True, null=True)
class Meta:
managed = False
db_table = 'person'
def getId(self):
return self.person_id
def getName(self):
return self.name
def getSurname(self):
return self.surname
def getBirthdate(self):
return self.birthdate
def getEmail(self):
return self.email
def getPasswd(self):
return self.passwd
def getNote(self):
return self.additional_note
class PersonHasAddress(models.Model):
person = models.ForeignKey(Person, on_delete=models.CASCADE)
address = models.ForeignKey(Address, models.DO_NOTHING)
address_type = models.CharField(max_length=50, blank=True, null=True)
class Meta:
managed = False
db_table = 'person_has_address'
class PersonHasRole(models.Model):
person = models.ForeignKey(Person, on_delete=models.CASCADE)
role = models.ForeignKey('Role', models.DO_NOTHING)
class Meta:
managed = False
db_table = 'person_has_role'
def __str__(self):
return f"{self.person} {self.role}"
class PersonHasSubject(models.Model):
person = models.ForeignKey(Person, on_delete=models.CASCADE)
subject = models.ForeignKey('Subject', models.DO_NOTHING)
class Meta:
managed = False
db_table = 'person_has_subject'
class ProgramHasPerson(models.Model):
program = models.ForeignKey('StudyProgram', models.DO_NOTHING)
person = models.ForeignKey(Person, on_delete=models.CASCADE)
class Meta:
managed = False
db_table = 'program_has_person'
class ProgramHasSubject(models.Model):
program = models.ForeignKey('StudyProgram', models.DO_NOTHING)
subject = models.ForeignKey('Subject', models.DO_NOTHING)
class Meta:
managed = False
db_table = 'program_has_subject'
class Role(models.Model):
role_id = models.AutoField(primary_key=True)
role_type = models.CharField(max_length=20)
class Meta:
managed = False
db_table = 'role'
def __str__(self):
return self.role_type
def getAll():
return [str(qSet.role_type) for qSet in Role.objects.all()]
class StudyProgram(models.Model):
program_id = models.AutoField(primary_key=True)
name = models.CharField(max_length=50)
description = models.CharField(max_length=255, blank=True, null=True)
class Meta:
managed = False
db_table = 'study_program'
def getAll():
return [str(qSet.name) for qSet in StudyProgram.objects.all()]
class Subject(models.Model):
subject_id = models.AutoField(primary_key=True)
name = models.CharField(max_length=50)
department = models.ForeignKey(Department, models.DO_NOTHING)
description = models.CharField(max_length=255, blank=True, null=True)
prerequisites = models.CharField(max_length=255, blank=True, null=True)
semester = models.SmallIntegerField()
review = models.SmallIntegerField(blank=True, null=True)
additional_info = models.CharField(max_length=255, blank=True, null=True)
class Meta:
managed = False
db_table = 'subject'
def getAll():
return [str(qSet.name) for qSet in Subject.objects.all()]
class Thesis(models.Model):
thesis_id = models.AutoField(primary_key=True)
name = models.CharField(max_length=100)
thesis_type = models.CharField(max_length=20)
description = models.CharField(max_length=255, blank=True, null=True)
person = models.ForeignKey(Person, on_delete=models.CASCADE)
class Meta:
managed = False
db_table = 'thesis'
def __str__(self):
return self.name
|
py | 1a3019cdaf96ced50b6083be38769453051442c7 | from threading import Thread
import pyrealtime as prt
class SubprocessLayer(prt.TransformMixin, prt.ThreadLayer):
def __init__(self, port_in, cmd, *args, encoder=None, decoder=None, **kwargs):
super().__init__(port_in, *args, **kwargs)
self.cmd = cmd
self.proc = None
self.read_thread = None
self._encode = encoder if encoder is not None else self.encode
self._decode = decoder if decoder is not None else self.decode
def encode(self, data):
return data + "\n"
def decode(self, data):
return data.rstrip().decode('utf-8')
def initialize(self):
try:
import pexpect.popen_spawn
except ImportError:
raise ModuleNotFoundError("pexpect required to use subprocess layers")
self.proc = pexpect.popen_spawn.PopenSpawn(self.cmd)
self.read_thread = Thread(target=self.read_loop)
self.read_thread.start()
def read_loop(self):
import pexpect
while True:
try:
index = self.proc.expect(".*\n")
data = self.proc.match[index]
self.handle_output(self._decode(data))
except pexpect.exceptions.EOF:
print("end of file")
return prt.LayerSignal.STOP
def transform(self, data):
self.proc.write(self._encode(data))
return None
|
py | 1a301a820e688d7a6e23162424700f051e650e8e | from tests.helpers import req
request = req('get')
def test_extra_and_extra_evaluated():
# language=rst
"""
extra and extra_evaluated
=========================
Very often it's useful to add some little bit of data on the side that you need
later to customize something. We think it's important to support this use case
with minimal amounts of code. To do this we have `extra` and `extra_evaluated`.
This is your place to put whatever you want in order to extend iommi for a general
feature or just some simple one-off customization for a single view.
All `Part` derived classes have `extra` and `extra_evaluated` namespaces, for example:
`Page`, `Column`, `Table`, `Field`, `Form`, and `Action`.
You use `extra` to put some data you want as-is:
.. code-block::
form = Form.create(
auto__model=Artist
fields__name__extra__sounds_cool=True,
extra__is_cool=True,
)
Here we add `sounds_cool` to the `name` field, and the `is_cool` value to the
entire `Form`. We can then access these in e.g. a template:
`{{ form.fields.name.extra.sounds_cool }}` and `{{ form.extra.is_cool }}`.
`extra_evaluated` is useful when you want to use the iommi evalaution
machinery to get some dynamic behavior:
.. code-block::
form = Form.create(
auto__model=Artist
fields__name__extra_evaluated__sounds_cool=lambda request, **_: request.is_staff,
extra_evaluated__is_cool=lambda request, **_: request.is_staff,
)
These are accessed like this in the template: `{{ form.fields.name.extra_evaluated.sounds_cool }}`.
"""
|
py | 1a301d3f2eba522abf77c252be8a0724dae60b86 | """
Code originally developed for pyEcholab
(https://github.com/CI-CMG/pyEcholab)
by Rick Towler <[email protected]> at NOAA AFSC.
The code has been modified to handle split-beam data and
channel-transducer structure from different EK80 setups.
"""
import logging
import re
import struct
import sys
import xml.etree.ElementTree as ET
from collections import Counter
import numpy as np
from .ek_date_conversion import nt_to_unix
TCVR_CH_NUM_MATCHER = re.compile(r"\d{6}-\w{1,2}|\w{12}-\w{1,2}")
__all__ = [
"SimradNMEAParser",
"SimradDepthParser",
"SimradBottomParser",
"SimradAnnotationParser",
"SimradConfigParser",
"SimradRawParser",
]
log = logging.getLogger(__name__)
class _SimradDatagramParser(object):
""""""
def __init__(self, header_type, header_formats):
self._id = header_type
self._headers = header_formats
self._versions = list(header_formats.keys())
def header_fmt(self, version=0):
return "=" + "".join([x[1] for x in self._headers[version]])
def header_size(self, version=0):
return struct.calcsize(self.header_fmt(version))
def header_fields(self, version=0):
return [x[0] for x in self._headers[version]]
def header(self, version=0):
return self._headers[version][:]
def validate_data_header(self, data):
if isinstance(data, dict):
type_ = data["type"][:3]
version = int(data["type"][3])
elif isinstance(data, str):
type_ = data[:3]
version = int(data[3])
else:
raise TypeError("Expected a dict or str")
if type_ != self._id:
raise ValueError("Expected data of type %s, not %s" % (self._id, type_))
if version not in self._versions:
raise ValueError(
"No parser available for type %s version %d" % (self._id, version)
)
return type_, version
def from_string(self, raw_string, bytes_read):
header = raw_string[:4]
if sys.version_info.major > 2:
header = header.decode()
id_, version = self.validate_data_header(header)
return self._unpack_contents(raw_string, bytes_read, version=version)
def to_string(self, data={}):
id_, version = self.validate_data_header(data)
datagram_content_str = self._pack_contents(data, version=version)
return self.finalize_datagram(datagram_content_str)
def _unpack_contents(self, raw_string="", version=0):
raise NotImplementedError
def _pack_contents(self, data={}, version=0):
raise NotImplementedError
@classmethod
def finalize_datagram(cls, datagram_content_str):
datagram_size = len(datagram_content_str)
final_fmt = "=l%dsl" % (datagram_size)
return struct.pack(
final_fmt, datagram_size, datagram_content_str, datagram_size
)
class SimradDepthParser(_SimradDatagramParser):
"""
ER60 Depth Detection datagram (from .bot files) contain the following keys:
type: string == 'DEP0'
low_date: long uint representing LSBytes of 64bit NT date
high_date: long uint representing MSBytes of 64bit NT date
timestamp: datetime.datetime object of NT date, assumed to be UTC
transceiver_count: [long uint] with number of tranceivers
depth: [float], one value for each active channel
reflectivity: [float], one value for each active channel
unused: [float], unused value for each active channel
The following methods are defined:
from_string(str): parse a raw ER60 Depth datagram
(with leading/trailing datagram size stripped)
to_string(): Returns the datagram as a raw string
(including leading/trailing size fields)
ready for writing to disk
"""
def __init__(self):
headers = {
0: [
("type", "4s"),
("low_date", "L"),
("high_date", "L"),
("transceiver_count", "L"),
]
}
_SimradDatagramParser.__init__(self, "DEP", headers)
def _unpack_contents(self, raw_string, bytes_read, version):
""""""
header_values = struct.unpack(
self.header_fmt(version), raw_string[: self.header_size(version)]
)
data = {}
for indx, field in enumerate(self.header_fields(version)):
data[field] = header_values[indx]
if isinstance(data[field], bytes):
data[field] = data[field].decode()
data["timestamp"] = nt_to_unix((data["low_date"], data["high_date"]))
data["bytes_read"] = bytes_read
if version == 0:
data_fmt = "=3f"
data_size = struct.calcsize(data_fmt)
data["depth"] = np.zeros((data["transceiver_count"],))
data["reflectivity"] = np.zeros((data["transceiver_count"],))
data["unused"] = np.zeros((data["transceiver_count"],))
buf_indx = self.header_size(version)
for indx in range(data["transceiver_count"]):
d, r, u = struct.unpack(
data_fmt, raw_string[buf_indx : buf_indx + data_size] # noqa
)
data["depth"][indx] = d
data["reflectivity"][indx] = r
data["unused"][indx] = u
buf_indx += data_size
return data
def _pack_contents(self, data, version):
datagram_fmt = self.header_fmt(version)
datagram_contents = []
if version == 0:
lengths = [
len(data["depth"]),
len(data["reflectivity"]),
len(data["unused"]),
data["transceiver_count"],
]
if len(set(lengths)) != 1:
min_indx = min(lengths)
log.warning(
"Data lengths mismatched: d:%d, r:%d, u:%d, t:%d", *lengths
)
log.warning(" Using minimum value: %d", min_indx)
data["transceiver_count"] = min_indx
else:
min_indx = data["transceiver_count"]
for field in self.header_fields(version):
datagram_contents.append(data[field])
datagram_fmt += "%df" % (3 * data["transceiver_count"])
for indx in range(data["transceiver_count"]):
datagram_contents.extend(
[
data["depth"][indx],
data["reflectivity"][indx],
data["unused"][indx],
]
)
return struct.pack(datagram_fmt, *datagram_contents)
class SimradBottomParser(_SimradDatagramParser):
"""
Bottom Detection datagram contains the following keys:
type: string == 'BOT0'
low_date: long uint representing LSBytes of 64bit NT date
high_date: long uint representing MSBytes of 64bit NT date
datetime: datetime.datetime object of NT date converted to UTC
transceiver_count: long uint with number of tranceivers
depth: [float], one value for each active channel
The following methods are defined:
from_string(str): parse a raw ER60 Bottom datagram
(with leading/trailing datagram size stripped)
to_string(): Returns the datagram as a raw string
(including leading/trailing size fields)
ready for writing to disk
"""
def __init__(self):
headers = {
0: [
("type", "4s"),
("low_date", "L"),
("high_date", "L"),
("transceiver_count", "L"),
]
}
_SimradDatagramParser.__init__(self, "BOT", headers)
def _unpack_contents(self, raw_string, bytes_read, version):
""""""
header_values = struct.unpack(
self.header_fmt(version), raw_string[: self.header_size(version)]
)
data = {}
for indx, field in enumerate(self.header_fields(version)):
data[field] = header_values[indx]
if isinstance(data[field], bytes):
data[field] = data[field].decode()
data["timestamp"] = nt_to_unix((data["low_date"], data["high_date"]))
data["bytes_read"] = bytes_read
if version == 0:
depth_fmt = "=%dd" % (data["transceiver_count"],)
depth_size = struct.calcsize(depth_fmt)
buf_indx = self.header_size(version)
data["depth"] = np.fromiter(
struct.unpack(
depth_fmt, raw_string[buf_indx : buf_indx + depth_size]
), # noqa
"float",
)
return data
def _pack_contents(self, data, version):
datagram_fmt = self.header_fmt(version)
datagram_contents = []
if version == 0:
if len(data["depth"]) != data["transceiver_count"]:
log.warning(
"# of depth values %d does not match transceiver count %d",
len(data["depth"]),
data["transceiver_count"],
)
data["transceiver_count"] = len(data["depth"])
for field in self.header_fields(version):
datagram_contents.append(data[field])
datagram_fmt += "%dd" % (data["transceiver_count"])
datagram_contents.extend(data["depth"])
return struct.pack(datagram_fmt, *datagram_contents)
class SimradAnnotationParser(_SimradDatagramParser):
"""
ER60 Annotation datagram contains the following keys:
type: string == 'TAG0'
low_date: long uint representing LSBytes of 64bit NT date
high_date: long uint representing MSBytes of 64bit NT date
timestamp: datetime.datetime object of NT date, assumed to be UTC
text: Annotation
The following methods are defined:
from_string(str): parse a raw ER60 Annotation datagram
(with leading/trailing datagram size stripped)
to_string(): Returns the datagram as a raw string
(including leading/trailing size fields)
ready for writing to disk
"""
def __init__(self):
headers = {0: [("type", "4s"), ("low_date", "L"), ("high_date", "L")]}
_SimradDatagramParser.__init__(self, "TAG", headers)
def _unpack_contents(self, raw_string, bytes_read, version):
""""""
header_values = struct.unpack(
self.header_fmt(version), raw_string[: self.header_size(version)]
)
data = {}
for indx, field in enumerate(self.header_fields(version)):
data[field] = header_values[indx]
if isinstance(data[field], bytes):
data[field] = data[field].decode()
data["timestamp"] = nt_to_unix((data["low_date"], data["high_date"]))
data["bytes_read"] = bytes_read
# if version == 0:
# data['text'] = raw_string[self.header_size(version):].strip('\x00')
# if isinstance(data['text'], bytes):
# data['text'] = data['text'].decode()
if version == 0:
if sys.version_info.major > 2:
data["text"] = str(
raw_string[self.header_size(version) :].strip(b"\x00"),
"ascii",
errors="replace",
)
else:
data["text"] = unicode( # noqa
raw_string[self.header_size(version) :].strip("\x00"),
"ascii",
errors="replace",
)
return data
def _pack_contents(self, data, version):
datagram_fmt = self.header_fmt(version)
datagram_contents = []
if version == 0:
for field in self.header_fields(version):
datagram_contents.append(data[field])
if data["text"][-1] != "\x00":
tmp_string = data["text"] + "\x00"
else:
tmp_string = data["text"]
# Pad with more nulls to 4-byte word boundry if necessary
if len(tmp_string) % 4:
tmp_string += "\x00" * (4 - (len(tmp_string) % 4))
datagram_fmt += "%ds" % (len(tmp_string))
datagram_contents.append(tmp_string)
return struct.pack(datagram_fmt, *datagram_contents)
class SimradNMEAParser(_SimradDatagramParser):
"""
ER60 NMEA datagram contains the following keys:
type: string == 'NME0'
low_date: long uint representing LSBytes of 64bit NT date
high_date: long uint representing MSBytes of 64bit NT date
timestamp: datetime.datetime object of NT date, assumed to be UTC
nmea_string: full (original) NMEA string
The following methods are defined:
from_string(str): parse a raw ER60 NMEA datagram
(with leading/trailing datagram size stripped)
to_string(): Returns the datagram as a raw string
(including leading/trailing size fields)
ready for writing to disk
"""
nmea_head_re = re.compile(r"\$[A-Za-z]{5},") # noqa
def __init__(self):
headers = {
0: [("type", "4s"), ("low_date", "L"), ("high_date", "L")],
1: [("type", "4s"), ("low_date", "L"), ("high_date", "L"), ("port", "32s")],
}
_SimradDatagramParser.__init__(self, "NME", headers)
def _unpack_contents(self, raw_string, bytes_read, version):
"""
Parses the NMEA string provided in raw_string
:param raw_string: Raw NMEA strin (i.e. '$GPZDA,160012.71,11,03,2004,-1,00*7D')
:type raw_string: str
:returns: None
"""
header_values = struct.unpack(
self.header_fmt(version), raw_string[: self.header_size(version)]
)
data = {}
for indx, field in enumerate(self.header_fields(version)):
data[field] = header_values[indx]
if isinstance(data[field], bytes):
data[field] = data[field].decode()
data["timestamp"] = nt_to_unix((data["low_date"], data["high_date"]))
data["bytes_read"] = bytes_read
# Remove trailing \x00 from the PORT field for NME1, rest of the datagram identical to NME0
if version == 1:
data["port"] = data["port"].strip("\x00")
if version == 0 or version == 1:
if sys.version_info.major > 2:
data["nmea_string"] = str(
raw_string[self.header_size(version) :].strip(b"\x00"),
"ascii",
errors="replace",
)
else:
data["nmea_string"] = unicode( # noqa
raw_string[self.header_size(version) :].strip("\x00"),
"ascii",
errors="replace",
)
if self.nmea_head_re.match(data["nmea_string"][:7]) is not None:
data["nmea_talker"] = data["nmea_string"][1:3]
data["nmea_type"] = data["nmea_string"][3:6]
else:
data["nmea_talker"] = ""
data["nmea_type"] = "UNKNOWN"
return data
def _pack_contents(self, data, version):
datagram_fmt = self.header_fmt(version)
datagram_contents = []
if version == 0:
for field in self.header_fields(version):
datagram_contents.append(data[field])
if data["nmea_string"][-1] != "\x00":
tmp_string = data["nmea_string"] + "\x00"
else:
tmp_string = data["nmea_string"]
# Pad with more nulls to 4-byte word boundry if necessary
if len(tmp_string) % 4:
tmp_string += "\x00" * (4 - (len(tmp_string) % 4))
datagram_fmt += "%ds" % (len(tmp_string))
# Convert to python string if needed
if isinstance(tmp_string, str):
tmp_string = tmp_string.encode("ascii", errors="replace")
datagram_contents.append(tmp_string)
return struct.pack(datagram_fmt, *datagram_contents)
class SimradMRUParser(_SimradDatagramParser):
"""
EK80 MRU datagram contains the following keys:
type: string == 'MRU0'
low_date: long uint representing LSBytes of 64bit NT date
high_date: long uint representing MSBytes of 64bit NT date
timestamp: datetime.datetime object of NT date, assumed to be UTC
heave: float
roll : float
pitch: float
heading: float
The following methods are defined:
from_string(str): parse a raw ER60 NMEA datagram
(with leading/trailing datagram size stripped)
to_string(): Returns the datagram as a raw string
(including leading/trailing size fields)
ready for writing to disk
"""
def __init__(self):
headers = {
0: [
("type", "4s"),
("low_date", "L"),
("high_date", "L"),
("heave", "f"),
("roll", "f"),
("pitch", "f"),
("heading", "f"),
]
}
_SimradDatagramParser.__init__(self, "MRU", headers)
def _unpack_contents(self, raw_string, bytes_read, version):
"""
Unpacks the data in raw_string into dictionary containing MRU data
:param raw_string:
:type raw_string: str
:returns: None
"""
header_values = struct.unpack(
self.header_fmt(version), raw_string[: self.header_size(version)]
)
data = {}
for indx, field in enumerate(self.header_fields(version)):
data[field] = header_values[indx]
if isinstance(data[field], bytes):
data[field] = data[field].decode()
data["timestamp"] = nt_to_unix((data["low_date"], data["high_date"]))
data["bytes_read"] = bytes_read
return data
def _pack_contents(self, data, version):
datagram_fmt = self.header_fmt(version)
datagram_contents = []
if version == 0:
for field in self.header_fields(version):
datagram_contents.append(data[field])
if data["nmea_string"][-1] != "\x00":
tmp_string = data["nmea_string"] + "\x00"
else:
tmp_string = data["nmea_string"]
# Pad with more nulls to 4-byte word boundry if necessary
if len(tmp_string) % 4:
tmp_string += "\x00" * (4 - (len(tmp_string) % 4))
datagram_fmt += "%ds" % (len(tmp_string))
# Convert to python string if needed
if isinstance(tmp_string, str):
tmp_string = tmp_string.encode("ascii", errors="replace")
datagram_contents.append(tmp_string)
return struct.pack(datagram_fmt, *datagram_contents)
class SimradXMLParser(_SimradDatagramParser):
"""
EK80 XML datagram contains the following keys:
type: string == 'XML0'
low_date: long uint representing LSBytes of 64bit NT date
high_date: long uint representing MSBytes of 64bit NT date
timestamp: datetime.datetime object of NT date, assumed to be UTC
subtype: string representing Simrad XML datagram type:
configuration, environment, or parameter
[subtype]: dict containing the data specific to the XML subtype.
The following methods are defined:
from_string(str): parse a raw EK80 XML datagram
(with leading/trailing datagram size stripped)
to_string(): Returns the datagram as a raw string
(including leading/trailing size fields)
ready for writing to disk
"""
# define the XML parsing options - here we define dictionaries for various xml datagram
# types. When parsing that xml datagram, these dictionaries are used to inform the parser about
# type conversion, name wrangling, and delimiter. If a field is missing, the parser
# assumes no conversion: type will be string, default mangling, and that there is only 1
# element.
#
# the dicts are in the form:
# 'XMLParamName':[converted type,'fieldname', 'parse char']
#
# For example: 'PulseDurationFM':[float,'pulse_duration_fm',';']
#
# will result in a return dictionary field named 'pulse_duration_fm' that contains a list
# of float values parsed from a string that uses ';' to separate values. Empty strings
# for fieldname and/or parse char result in the default action for those parsing steps.
channel_parsing_options = {
"MaxTxPowerTransceiver": [int, "", ""],
"PulseDuration": [float, "", ";"],
"PulseDurationFM": [float, "pulse_duration_fm", ";"],
"SampleInterval": [float, "", ";"],
"ChannelID": [str, "channel_id", ""],
"HWChannelConfiguration": [str, "hw_channel_configuration", ""],
}
transceiver_parsing_options = {
"TransceiverNumber": [int, "", ""],
"Version": [str, "transceiver_version", ""],
"IPAddress": [str, "ip_address", ""],
"Impedance": [int, "", ""],
}
transducer_parsing_options = {
"SerialNumber": [str, "transducer_serial_number", ""],
"Frequency": [float, "transducer_frequency", ""],
"FrequencyMinimum": [float, "transducer_frequency_minimum", ""],
"FrequencyMaximum": [float, "transducer_frequency_maximum", ""],
"BeamType": [int, "transducer_beam_type", ""],
"Gain": [float, "", ";"],
"SaCorrection": [float, "", ";"],
"MaxTxPowerTransducer": [float, "", ""],
"EquivalentBeamAngle": [float, "", ""],
"BeamWidthAlongship": [float, "", ""],
"BeamWidthAthwartship": [float, "", ""],
"AngleSensitivityAlongship": [float, "", ""],
"AngleSensitivityAthwartship": [float, "", ""],
"AngleOffsetAlongship": [float, "", ""],
"AngleOffsetAthwartship": [float, "", ""],
"DirectivityDropAt2XBeamWidth": [
float,
"directivity_drop_at_2x_beam_width",
"",
],
"TransducerOffsetX": [float, "", ""],
"TransducerOffsetY": [float, "", ""],
"TransducerOffsetZ": [float, "", ""],
"TransducerAlphaX": [float, "", ""],
"TransducerAlphaY": [float, "", ""],
"TransducerAlphaZ": [float, "", ""],
}
header_parsing_options = {"Version": [str, "application_version", ""]}
envxdcr_parsing_options = {"SoundSpeed": [float, "transducer_sound_speed", ""]}
environment_parsing_options = {
"Depth": [float, "", ""],
"Acidity": [float, "", ""],
"Salinity": [float, "", ""],
"SoundSpeed": [float, "", ""],
"Temperature": [float, "", ""],
"Latitude": [float, "", ""],
"SoundVelocityProfile": [float, "", ";"],
"DropKeelOffset": [float, "", ""],
"DropKeelOffsetIsManual": [int, "", ""],
"WaterLevelDraft": [float, "", ""],
"WaterLevelDraftIsManual": [int, "", ""],
}
parameter_parsing_options = {
"ChannelID": [str, "channel_id", ""],
"ChannelMode": [int, "", ""],
"PulseForm": [int, "", ""],
"Frequency": [float, "", ""],
"PulseDuration": [float, "", ""],
"SampleInterval": [float, "", ""],
"TransmitPower": [float, "", ""],
"Slope": [float, "", ""],
}
def __init__(self):
headers = {0: [("type", "4s"), ("low_date", "L"), ("high_date", "L")]}
_SimradDatagramParser.__init__(self, "XML", headers)
def _unpack_contents(self, raw_string, bytes_read, version):
"""
Parses the NMEA string provided in raw_string
:param raw_string: Raw NMEA strin (i.e. '$GPZDA,160012.71,11,03,2004,-1,00*7D')
:type raw_string: str
:returns: None
"""
def from_CamelCase(xml_param):
"""
convert name from CamelCase to fit with existing naming convention by
inserting an underscore before each capital and then lowering the caps
e.g. CamelCase becomes camel_case.
"""
idx = list(reversed([i for i, c in enumerate(xml_param) if c.isupper()]))
param_len = len(xml_param)
for i in idx:
# check if we should insert an underscore
if i > 0 and i < param_len:
xml_param = xml_param[:i] + "_" + xml_param[i:]
xml_param = xml_param.lower()
return xml_param
def dict_to_dict(xml_dict, data_dict, parse_opts):
"""
dict_to_dict appends the ETree xml value dicts to a provided dictionary
and along the way converts the key name to conform to the project's
naming convention and optionally parses and or converts values as
specified in the parse_opts dictionary.
"""
for k in xml_dict:
# check if we're parsing this key/value
if k in parse_opts:
# try to parse the string
if parse_opts[k][2]:
try:
data = xml_dict[k].split(parse_opts[k][2])
except:
# bad or empty parse chararacter(s) provided
data = xml_dict[k]
else:
# no parse char provided - nothing to parse
data = xml_dict[k]
# try to convert to specified type
if isinstance(data, list):
for i in range(len(data)):
try:
data[i] = parse_opts[k][0](data[i])
except:
pass
else:
data = parse_opts[k][0](data)
# and add the value to the provided dict
if parse_opts[k][1]:
# add using the specified key name
data_dict[parse_opts[k][1]] = data
else:
# add using the default key name wrangling
data_dict[from_CamelCase(k)] = data
else:
# nothing to do with the value string
data = xml_dict[k]
# add the parameter to the provided dictionary
data_dict[from_CamelCase(k)] = data
header_values = struct.unpack(
self.header_fmt(version), raw_string[: self.header_size(version)]
)
data = {}
for indx, field in enumerate(self.header_fields(version)):
data[field] = header_values[indx]
if isinstance(data[field], bytes):
data[field] = data[field].decode()
data["timestamp"] = nt_to_unix((data["low_date"], data["high_date"]))
data["bytes_read"] = bytes_read
if version == 0:
if sys.version_info.major > 2:
xml_string = str(
raw_string[self.header_size(version) :].strip(b"\x00"),
"ascii",
errors="replace",
)
else:
xml_string = unicode( # noqa
raw_string[self.header_size(version) :].strip("\x00"),
"ascii",
errors="replace",
)
# get the ElementTree element
root = ET.fromstring(xml_string)
# get the XML message type
data["subtype"] = root.tag.lower()
# create the dictionary that contains the message data
data[data["subtype"]] = {}
# parse it
if data["subtype"] == "configuration":
# parse the Transceiver section
for tcvr in root.iter("Transceiver"):
# parse the Transceiver section
tcvr_xml = tcvr.attrib
# parse the Channel section -- this works with multiple channels
# under 1 transceiver
for tcvr_ch in tcvr.iter("Channel"):
tcvr_ch_xml = tcvr_ch.attrib
channel_id = tcvr_ch_xml["ChannelID"]
# create the configuration dict for this channel
data["configuration"][channel_id] = {}
# add the transceiver data to the config dict (this is
# replicated for all channels)
dict_to_dict(
tcvr_xml,
data["configuration"][channel_id],
self.transceiver_parsing_options,
)
# add the general channel data to the config dict
dict_to_dict(
tcvr_ch_xml,
data["configuration"][channel_id],
self.channel_parsing_options,
)
# check if there are >1 transducer under a single transceiver channel
if len(list(tcvr_ch)) > 1:
ValueError(
"Found >1 transducer under a single transceiver channel!"
)
else: # should only have 1 transducer
tcvr_ch_xducer = tcvr_ch.find(
"Transducer"
) # get Element of this xducer
f_par = tcvr_ch_xducer.findall("FrequencyPar")
# Save calibration parameters
if f_par:
cal_par = {
"frequency": np.array(
[int(f.attrib["Frequency"]) for f in f_par]
),
"gain": np.array(
[float(f.attrib["Gain"]) for f in f_par]
),
"impedance": np.array(
[int(f.attrib["Impedance"]) for f in f_par]
),
"phase": np.array(
[float(f.attrib["Phase"]) for f in f_par]
),
"beamwidth_alongship": np.array(
[
float(f.attrib["BeamWidthAlongship"])
for f in f_par
]
),
"beamwidth_athwartship": np.array(
[
float(f.attrib["BeamWidthAthwartship"])
for f in f_par
]
),
"angle_offset_alongship": np.array(
[
float(f.attrib["AngleOffsetAlongship"])
for f in f_par
]
),
"angle_offset_athwartship": np.array(
[
float(f.attrib["AngleOffsetAthwartship"])
for f in f_par
]
),
}
data["configuration"][channel_id][
"calibration"
] = cal_par
# add the transducer data to the config dict
dict_to_dict(
tcvr_ch_xducer.attrib,
data["configuration"][channel_id],
self.transducer_parsing_options,
)
# get unique transceiver channel number stored in channel_id
tcvr_ch_num = TCVR_CH_NUM_MATCHER.search(channel_id)[0]
# parse the Transducers section from the root
# TODO Remove Transducers if doesnt exist
xducer = root.find("Transducers")
if xducer is not None:
# built occurrence lookup table for transducer name
xducer_name_list = []
for xducer_ch in xducer.iter("Transducer"):
xducer_name_list.append(
xducer_ch.attrib["TransducerName"]
)
# find matching transducer for this channel_id
match_found = False
for xducer_ch in xducer.iter("Transducer"):
if not match_found:
xducer_ch_xml = xducer_ch.attrib
match_name = (
xducer_ch.attrib["TransducerName"]
== tcvr_ch_xducer.attrib["TransducerName"]
)
if xducer_ch.attrib["TransducerSerialNumber"] == "":
match_sn = False
else:
match_sn = (
xducer_ch.attrib["TransducerSerialNumber"]
== tcvr_ch_xducer.attrib["SerialNumber"]
)
match_tcvr = (
tcvr_ch_num
in xducer_ch.attrib["TransducerCustomName"]
)
# if find match add the transducer mounting details
if (
Counter(xducer_name_list)[
xducer_ch.attrib["TransducerName"]
]
> 1
):
# if more than one transducer has the same name
# only check sn and transceiver unique number
match_found = match_sn or match_tcvr
else:
match_found = (
match_name or match_sn or match_tcvr
)
# add transducer mounting details
if match_found:
dict_to_dict(
xducer_ch_xml,
data["configuration"][channel_id],
self.transducer_parsing_options,
)
# add the header data to the config dict
h = root.find("Header")
dict_to_dict(
h.attrib,
data["configuration"][channel_id],
self.header_parsing_options,
)
elif data["subtype"] == "parameter":
# parse the parameter XML datagram
for h in root.iter("Channel"):
parm_xml = h.attrib
# add the data to the environment dict
dict_to_dict(
parm_xml, data["parameter"], self.parameter_parsing_options
)
elif data["subtype"] == "environment":
# parse the environment XML datagram
for h in root.iter("Environment"):
env_xml = h.attrib
# add the data to the environment dict
dict_to_dict(
env_xml, data["environment"], self.environment_parsing_options
)
for h in root.iter("Transducer"):
transducer_xml = h.attrib
# add the data to the environment dict
dict_to_dict(
transducer_xml,
data["environment"],
self.envxdcr_parsing_options,
)
data["xml"] = xml_string
return data
def _pack_contents(self, data, version):
def to_CamelCase(xml_param):
"""
convert name from project's convention to CamelCase for converting back to
XML to in Kongsberg's convention.
"""
idx = list(reversed([i for i, c in enumerate(xml_param) if c.isupper()]))
param_len = len(xml_param)
for i in idx:
# check if we should insert an underscore
if idx > 0 and idx < param_len - 1:
xml_param = xml_param[:idx] + "_" + xml_param[idx:]
xml_param = xml_param.lower()
return xml_param
datagram_fmt = self.header_fmt(version)
datagram_contents = []
if version == 0:
for field in self.header_fields(version):
datagram_contents.append(data[field])
if data["nmea_string"][-1] != "\x00":
tmp_string = data["nmea_string"] + "\x00"
else:
tmp_string = data["nmea_string"]
# Pad with more nulls to 4-byte word boundry if necessary
if len(tmp_string) % 4:
tmp_string += "\x00" * (4 - (len(tmp_string) % 4))
datagram_fmt += "%ds" % (len(tmp_string))
# Convert to python string if needed
if isinstance(tmp_string, str):
tmp_string = tmp_string.encode("ascii", errors="replace")
datagram_contents.append(tmp_string)
return struct.pack(datagram_fmt, *datagram_contents)
class SimradFILParser(_SimradDatagramParser):
"""
EK80 FIL datagram contains the following keys:
type: string == 'FIL1'
low_date: long uint representing LSBytes of 64bit NT date
high_date: long uint representing MSBytes of 64bit NT date
timestamp: datetime.datetime object of NT date, assumed to be UTC
stage: int
channel_id: string
n_coefficients: int
decimation_factor: int
coefficients: np.complex64
The following methods are defined:
from_string(str): parse a raw EK80 FIL datagram
(with leading/trailing datagram size stripped)
to_string(): Returns the datagram as a raw string
(including leading/trailing size fields)
ready for writing to disk
"""
def __init__(self):
headers = {
1: [
("type", "4s"),
("low_date", "L"),
("high_date", "L"),
("stage", "h"),
("spare", "2s"),
("channel_id", "128s"),
("n_coefficients", "h"),
("decimation_factor", "h"),
]
}
_SimradDatagramParser.__init__(self, "FIL", headers)
def _unpack_contents(self, raw_string, bytes_read, version):
data = {}
header_values = struct.unpack(
self.header_fmt(version), raw_string[: self.header_size(version)]
)
for indx, field in enumerate(self.header_fields(version)):
data[field] = header_values[indx]
# handle Python 3 strings
if (sys.version_info.major > 2) and isinstance(data[field], bytes):
data[field] = data[field].decode("latin_1")
data["timestamp"] = nt_to_unix((data["low_date"], data["high_date"]))
data["bytes_read"] = bytes_read
if version == 1:
# clean up the channel ID
data["channel_id"] = data["channel_id"].strip("\x00")
# unpack the coefficients
indx = self.header_size(version)
block_size = data["n_coefficients"] * 8
data["coefficients"] = np.frombuffer(
raw_string[indx : indx + block_size], dtype="complex64" # noqa
)
return data
def _pack_contents(self, data, version):
datagram_fmt = self.header_fmt(version)
datagram_contents = []
if version == 0:
pass
elif version == 1:
for field in self.header_fields(version):
datagram_contents.append(data[field])
datagram_fmt += "%ds" % (len(data["beam_config"]))
datagram_contents.append(data["beam_config"])
return struct.pack(datagram_fmt, *datagram_contents)
class SimradConfigParser(_SimradDatagramParser):
"""
Simrad Configuration Datagram parser operates on dictionaries with the following keys:
type: string == 'CON0'
low_date: long uint representing LSBytes of 64bit NT date
high_date: long uint representing MSBytes of 64bit NT date
timestamp: datetime.datetime object of NT date, assumed to be UTC
survey_name [str]
transect_name [str]
sounder_name [str]
version [str]
spare0 [str]
transceiver_count [long]
transceivers [list] List of dicts representing Transducer Configs:
ME70 Data contains the following additional values (data contained w/in first 14
bytes of the spare0 field)
multiplexing [short] Always 0
time_bias [long] difference between UTC and local time in min.
sound_velocity_avg [float] [m/s]
sound_velocity_transducer [float] [m/s]
beam_config [str] Raw XML string containing beam config. info
Transducer Config Keys (ER60/ES60/ES70 sounders):
channel_id [str] channel ident string
beam_type [long] Type of channel (0 = Single, 1 = Split)
frequency [float] channel frequency
equivalent_beam_angle [float] dB
beamwidth_alongship [float]
beamwidth_athwartship [float]
angle_sensitivity_alongship [float]
angle_sensitivity_athwartship [float]
angle_offset_alongship [float]
angle_offset_athwartship [float]
pos_x [float]
pos_y [float]
pos_z [float]
dir_x [float]
dir_y [float]
dir_z [float]
pulse_length_table [float[5]]
spare1 [str]
gain_table [float[5]]
spare2 [str]
sa_correction_table [float[5]]
spare3 [str]
gpt_software_version [str]
spare4 [str]
Transducer Config Keys (ME70 sounders):
channel_id [str] channel ident string
beam_type [long] Type of channel (0 = Single, 1 = Split)
reserved1 [float] channel frequency
equivalent_beam_angle [float] dB
beamwidth_alongship [float]
beamwidth_athwartship [float]
angle_sensitivity_alongship [float]
angle_sensitivity_athwartship [float]
angle_offset_alongship [float]
angle_offset_athwartship [float]
pos_x [float]
pos_y [float]
pos_z [float]
beam_steering_angle_alongship [float]
beam_steering_angle_athwartship [float]
beam_steering_angle_unused [float]
pulse_length [float]
reserved2 [float]
spare1 [str]
gain [float]
reserved3 [float]
spare2 [str]
sa_correction [float]
reserved4 [float]
spare3 [str]
gpt_software_version [str]
spare4 [str]
from_string(str): parse a raw config datagram
(with leading/trailing datagram size stripped)
to_string(dict): Returns raw string (including leading/trailing size fields)
ready for writing to disk
"""
COMMON_KEYS = [
("channel_id", "128s"),
("beam_type", "l"),
("frequency", "f"),
("gain", "f"),
("equivalent_beam_angle", "f"),
("beamwidth_alongship", "f"),
("beamwidth_athwartship", "f"),
("angle_sensitivity_alongship", "f"),
("angle_sensitivity_athwartship", "f"),
("angle_offset_alongship", "f"),
("angle_offset_athwartship", "f"),
("pos_x", "f"),
("pos_y", "f"),
("pos_z", "f"),
("dir_x", "f"),
("dir_y", "f"),
("dir_z", "f"),
("pulse_length_table", "5f"),
("spare1", "8s"),
("gain_table", "5f"),
("spare2", "8s"),
("sa_correction_table", "5f"),
("spare3", "8s"),
("gpt_software_version", "16s"),
("spare4", "28s"),
]
def __init__(self):
headers = {
0: [
("type", "4s"),
("low_date", "L"),
("high_date", "L"),
("survey_name", "128s"),
("transect_name", "128s"),
("sounder_name", "128s"),
("version", "30s"),
("spare0", "98s"),
("transceiver_count", "l"),
],
1: [("type", "4s"), ("low_date", "L"), ("high_date", "L")],
}
_SimradDatagramParser.__init__(self, "CON", headers)
self._transducer_headers = {
"ER60": self.COMMON_KEYS,
"ES60": self.COMMON_KEYS,
"ES70": self.COMMON_KEYS,
"MBES": [
("channel_id", "128s"),
("beam_type", "l"),
("frequency", "f"),
("reserved1", "f"),
("equivalent_beam_angle", "f"),
("beamwidth_alongship", "f"),
("beamwidth_athwartship", "f"),
("angle_sensitivity_alongship", "f"),
("angle_sensitivity_athwartship", "f"),
("angle_offset_alongship", "f"),
("angle_offset_athwartship", "f"),
("pos_x", "f"),
("pos_y", "f"),
("pos_z", "f"),
("beam_steering_angle_alongship", "f"),
("beam_steering_angle_athwartship", "f"),
("beam_steering_angle_unused", "f"),
("pulse_length", "f"),
("reserved2", "f"),
("spare1", "20s"),
("gain", "f"),
("reserved3", "f"),
("spare2", "20s"),
("sa_correction", "f"),
("reserved4", "f"),
("spare3", "20s"),
("gpt_software_version", "16s"),
("spare4", "28s"),
],
}
def _unpack_contents(self, raw_string, bytes_read, version):
data = {}
round6 = lambda x: round(x, ndigits=6) # noqa
header_values = struct.unpack(
self.header_fmt(version), raw_string[: self.header_size(version)]
)
for indx, field in enumerate(self.header_fields(version)):
data[field] = header_values[indx]
# handle Python 3 strings
if (sys.version_info.major > 2) and isinstance(data[field], bytes):
data[field] = data[field].decode("latin_1")
data["timestamp"] = nt_to_unix((data["low_date"], data["high_date"]))
data["bytes_read"] = bytes_read
if version == 0:
data["transceivers"] = {}
for field in ["transect_name", "version", "survey_name", "sounder_name"]:
data[field] = data[field].strip("\x00")
sounder_name = data["sounder_name"]
if sounder_name == "MBES":
_me70_extra_values = struct.unpack("=hLff", data["spare0"][:14])
data["multiplexing"] = _me70_extra_values[0]
data["time_bias"] = _me70_extra_values[1]
data["sound_velocity_avg"] = _me70_extra_values[2]
data["sound_velocity_transducer"] = _me70_extra_values[3]
data["spare0"] = data["spare0"][:14] + data["spare0"][14:].strip("\x00")
else:
data["spare0"] = data["spare0"].strip("\x00")
buf_indx = self.header_size(version)
try:
transducer_header = self._transducer_headers[sounder_name]
_sounder_name_used = sounder_name
except KeyError:
log.warning(
"Unknown sounder_name: %s, (no one of %s)",
sounder_name,
list(self._transducer_headers.keys()),
)
log.warning("Will use ER60 transducer config fields as default")
transducer_header = self._transducer_headers["ER60"]
_sounder_name_used = "ER60"
txcvr_header_fields = [x[0] for x in transducer_header]
txcvr_header_fmt = "=" + "".join([x[1] for x in transducer_header])
txcvr_header_size = struct.calcsize(txcvr_header_fmt)
for txcvr_indx in range(1, data["transceiver_count"] + 1):
txcvr_header_values_encoded = struct.unpack(
txcvr_header_fmt,
raw_string[buf_indx : buf_indx + txcvr_header_size], # noqa
)
txcvr_header_values = list(txcvr_header_values_encoded)
for tx_idx, tx_val in enumerate(txcvr_header_values_encoded):
if isinstance(tx_val, bytes):
txcvr_header_values[tx_idx] = tx_val.decode("latin_1")
txcvr = data["transceivers"].setdefault(txcvr_indx, {})
if _sounder_name_used in ["ER60", "ES60", "ES70"]:
for txcvr_field_indx, field in enumerate(txcvr_header_fields[:17]):
txcvr[field] = txcvr_header_values[txcvr_field_indx]
txcvr["pulse_length_table"] = np.fromiter(
list(map(round6, txcvr_header_values[17:22])), "float"
)
txcvr["spare1"] = txcvr_header_values[22]
txcvr["gain_table"] = np.fromiter(
list(map(round6, txcvr_header_values[23:28])), "float"
)
txcvr["spare2"] = txcvr_header_values[28]
txcvr["sa_correction_table"] = np.fromiter(
list(map(round6, txcvr_header_values[29:34])), "float"
)
txcvr["spare3"] = txcvr_header_values[34]
txcvr["gpt_software_version"] = txcvr_header_values[35]
txcvr["spare4"] = txcvr_header_values[36]
elif _sounder_name_used == "MBES":
for txcvr_field_indx, field in enumerate(txcvr_header_fields):
txcvr[field] = txcvr_header_values[txcvr_field_indx]
else:
raise RuntimeError(
"Unknown _sounder_name_used (Should not happen, this is a bug!)"
)
txcvr["channel_id"] = txcvr["channel_id"].strip("\x00")
txcvr["spare1"] = txcvr["spare1"].strip("\x00")
txcvr["spare2"] = txcvr["spare2"].strip("\x00")
txcvr["spare3"] = txcvr["spare3"].strip("\x00")
txcvr["spare4"] = txcvr["spare4"].strip("\x00")
txcvr["gpt_software_version"] = txcvr["gpt_software_version"].strip(
"\x00"
)
buf_indx += txcvr_header_size
elif version == 1:
# CON1 only has a single data field: beam_config, holding an xml string
data["beam_config"] = raw_string[self.header_size(version) :].strip("\x00")
return data
def _pack_contents(self, data, version):
datagram_fmt = self.header_fmt(version)
datagram_contents = []
if version == 0:
if data["transceiver_count"] != len(data["transceivers"]):
log.warning(
"Mismatch between 'transceiver_count' and actual # of transceivers"
)
data["transceiver_count"] = len(data["transceivers"])
sounder_name = data["sounder_name"]
if sounder_name == "MBES":
_packed_me70_values = struct.pack(
"=hLff",
data["multiplexing"],
data["time_bias"],
data["sound_velocity_avg"],
data["sound_velocity_transducer"],
)
data["spare0"] = _packed_me70_values + data["spare0"][14:]
for field in self.header_fields(version):
datagram_contents.append(data[field])
try:
transducer_header = self._transducer_headers[sounder_name]
_sounder_name_used = sounder_name
except KeyError:
log.warning(
"Unknown sounder_name: %s, (no one of %s)",
sounder_name,
list(self._transducer_headers.keys()),
)
log.warning("Will use ER60 transducer config fields as default")
transducer_header = self._transducer_headers["ER60"]
_sounder_name_used = "ER60"
txcvr_header_fields = [x[0] for x in transducer_header]
txcvr_header_fmt = "=" + "".join([x[1] for x in transducer_header])
txcvr_header_size = struct.calcsize(txcvr_header_fmt) # noqa
for txcvr_indx, txcvr in list(data["transceivers"].items()):
txcvr_contents = []
if _sounder_name_used in ["ER60", "ES60", "ES70"]:
for field in txcvr_header_fields[:17]:
txcvr_contents.append(txcvr[field])
txcvr_contents.extend(txcvr["pulse_length_table"])
txcvr_contents.append(txcvr["spare1"])
txcvr_contents.extend(txcvr["gain_table"])
txcvr_contents.append(txcvr["spare2"])
txcvr_contents.extend(txcvr["sa_correction_table"])
txcvr_contents.append(txcvr["spare3"])
txcvr_contents.extend(
[txcvr["gpt_software_version"], txcvr["spare4"]]
)
txcvr_contents_str = struct.pack(txcvr_header_fmt, *txcvr_contents)
elif _sounder_name_used == "MBES":
for field in txcvr_header_fields:
txcvr_contents.append(txcvr[field])
txcvr_contents_str = struct.pack(txcvr_header_fmt, *txcvr_contents)
else:
raise RuntimeError(
"Unknown _sounder_name_used (Should not happen, this is a bug!)"
)
datagram_fmt += "%ds" % (len(txcvr_contents_str))
datagram_contents.append(txcvr_contents_str)
elif version == 1:
for field in self.header_fields(version):
datagram_contents.append(data[field])
datagram_fmt += "%ds" % (len(data["beam_config"]))
datagram_contents.append(data["beam_config"])
return struct.pack(datagram_fmt, *datagram_contents)
class SimradRawParser(_SimradDatagramParser):
"""
Sample Data Datagram parser operates on dictonaries with the following keys:
type: string == 'RAW0'
low_date: long uint representing LSBytes of 64bit NT date
high_date: long uint representing MSBytes of 64bit NT date
timestamp: datetime.datetime object of NT date, assumed to be UTC
channel [short] Channel number
mode [short] 1 = Power only, 2 = Angle only 3 = Power & Angle
transducer_depth [float]
frequency [float]
transmit_power [float]
pulse_length [float]
bandwidth [float]
sample_interval [float]
sound_velocity [float]
absorption_coefficient [float]
heave [float]
roll [float]
pitch [float]
temperature [float]
heading [float]
transmit_mode [short] 0 = Active, 1 = Passive, 2 = Test, -1 = Unknown
spare0 [str]
offset [long]
count [long]
power [numpy array] Unconverted power values (if present)
angle [numpy array] Unconverted angle values (if present)
from_string(str): parse a raw sample datagram
(with leading/trailing datagram size stripped)
to_string(dict): Returns raw string (including leading/trailing size fields)
ready for writing to disk
"""
def __init__(self):
headers = {
0: [
("type", "4s"),
("low_date", "L"),
("high_date", "L"),
("channel", "h"),
("mode", "h"),
("transducer_depth", "f"),
("frequency", "f"),
("transmit_power", "f"),
("pulse_length", "f"),
("bandwidth", "f"),
("sample_interval", "f"),
("sound_velocity", "f"),
("absorption_coefficient", "f"),
("heave", "f"),
("roll", "f"),
("pitch", "f"),
("temperature", "f"),
("heading", "f"),
("transmit_mode", "h"),
("spare0", "6s"),
("offset", "l"),
("count", "l"),
],
3: [
("type", "4s"),
("low_date", "L"),
("high_date", "L"),
("channel_id", "128s"),
("data_type", "h"),
("spare", "2s"),
("offset", "l"),
("count", "l"),
],
}
_SimradDatagramParser.__init__(self, "RAW", headers)
def _unpack_contents(self, raw_string, bytes_read, version):
header_values = struct.unpack(
self.header_fmt(version), raw_string[: self.header_size(version)]
)
data = {}
for indx, field in enumerate(self.header_fields(version)):
data[field] = header_values[indx]
if isinstance(data[field], bytes):
data[field] = data[field].decode()
data["timestamp"] = nt_to_unix((data["low_date"], data["high_date"]))
data["bytes_read"] = bytes_read
if version == 0:
if data["count"] > 0:
block_size = data["count"] * 2
indx = self.header_size(version)
if int(data["mode"]) & 0x1:
data["power"] = np.frombuffer(
raw_string[indx : indx + block_size], dtype="int16" # noqa
)
indx += block_size
else:
data["power"] = None
if int(data["mode"]) & 0x2:
data["angle"] = np.frombuffer(
raw_string[indx : indx + block_size], dtype="int8" # noqa
)
data["angle"] = data["angle"].reshape((-1, 2))
else:
data["angle"] = None
else:
data["power"] = np.empty((0,), dtype="int16")
data["angle"] = np.empty((0, 2), dtype="int8")
elif version == 3:
# result = 1j*Data[...,1]; result += Data[...,0]
# clean up the channel ID
data["channel_id"] = data["channel_id"].strip("\x00")
if data["count"] > 0:
# set the initial block size and indx value.
block_size = data["count"] * 2
indx = self.header_size(version)
if data["data_type"] & 0b1:
data["power"] = np.frombuffer(
raw_string[indx : indx + block_size], dtype="int16" # noqa
)
indx += block_size
else:
data["power"] = None
if data["data_type"] & 0b10:
data["angle"] = np.frombuffer(
raw_string[indx : indx + block_size], dtype="int8" # noqa
)
data["angle"] = data["angle"].reshape((-1, 2))
indx += block_size
else:
data["angle"] = None
# determine the complex sample data type - this is contained in bits 2 and 3
# of the datatype <short> value. I'm assuming the types are exclusive...
data["complex_dtype"] = np.float16
type_bytes = 2
if data["data_type"] & 0b1000:
data["complex_dtype"] = np.float32
type_bytes = 8
# determine the number of complex samples
data["n_complex"] = data["data_type"] >> 8
# unpack the complex samples
if data["n_complex"] > 0:
# determine the block size
block_size = data["count"] * data["n_complex"] * type_bytes
data["complex"] = np.frombuffer(
raw_string[indx : indx + block_size], # noqa
dtype=data["complex_dtype"],
)
data["complex"].dtype = np.complex64
else:
data["complex"] = None
else:
data["power"] = np.empty((0,), dtype="int16")
data["angle"] = np.empty((0,), dtype="int8")
data["complex"] = np.empty((0,), dtype="complex64")
data["n_complex"] = 0
return data
def _pack_contents(self, data, version):
datagram_fmt = self.header_fmt(version)
datagram_contents = []
if version == 0:
if data["count"] > 0:
if (int(data["mode"]) & 0x1) and (
len(data.get("power", [])) != data["count"]
):
log.warning(
"Data 'count' = %d, but contains %d power samples. Ignoring power."
)
data["mode"] &= ~(1 << 0)
if (int(data["mode"]) & 0x2) and (
len(data.get("angle", [])) != data["count"]
):
log.warning(
"Data 'count' = %d, but contains %d angle samples. Ignoring angle."
)
data["mode"] &= ~(1 << 1)
if data["mode"] == 0:
log.warning(
"Data 'count' = %d, but mode == 0. Setting count to 0",
data["count"],
)
data["count"] = 0
for field in self.header_fields(version):
datagram_contents.append(data[field])
if data["count"] > 0:
if int(data["mode"]) & 0x1:
datagram_fmt += "%dh" % (data["count"])
datagram_contents.extend(data["power"])
if int(data["mode"]) & 0x2:
datagram_fmt += "%dH" % (data["count"])
datagram_contents.extend(data["angle"])
return struct.pack(datagram_fmt, *datagram_contents)
|
pyw | 1a301d4cdf3f0000f2f0525ab19727244bd43bae | import os
import tkinter as tk
from tkinter import ttk
from tkinter import filedialog
from tkinter import PhotoImage
from tkinter import messagebox
import pafy
import youtube_dl
# if you get api limit exceeded error, get an api key and paste
# here as a string value
# pafy.set_api_key(key)
# sample video url
# https://www.youtube.com/watch?v=CjeYOtL6ORE
cwd = os.getcwd()
class CustomEntry(tk.Entry):
def __init__(self, parent, *args, **kwargs):
tk.Entry.__init__(self, parent, *args, **kwargs)
self.parent = parent
self.bind('<FocusOut>', self.add_placeholder)
self.bind('<FocusIn>', self.clear_placeholder)
self.configure(fg="gray70")
self.insert(0, 'Enter Video URL')
def add_placeholder(self, event=None):
if not self.get():
self.configure(fg="gray70")
self.insert(0, 'Enter Video URL')
def clear_placeholder(self, event):
if event and self.get() == 'Enter Video URL':
self.delete('0', 'end')
self.configure(fg="black")
# Application Class -----------------------------------------------
class Application(tk.Frame):
def __init__(self, master=None):
super().__init__(master=master)
self.master = master
self.master.focus_set()
self.pack()
self.url = ''
self.video_quality = tk.StringVar()
self.filesize = 0
self.is_video_downloading = False
self.is_audio_downloading = False
self.draw_title_frame()
self.draw_main_frame()
self.bind('<Return>', self.search_video)
def draw_title_frame(self):
self.title_frame = tk.Frame(self, bg='red', width=440, height=60)
self.title_frame.grid(row=0, column=0, columnspan=5, pady=5)
self.title_frame.grid_propagate(False)
self.title = tk.Label(self.title_frame, text=' SaveFromYT - Youtube Audio/Video Downloader',
fg='white', bg='red', font=('Times', 14),
width=450, height=50, image=youtube_icon, compound=tk.LEFT,
anchor = 'w')
self.title.grid(row=0, column=0, padx=5, ipadx=20)
def draw_main_frame(self):
self.main_frame = tk.Frame(self, width=440, height=240, highlightthickness=1,
highlightbackground='red')
self.main_frame.grid(row=1, column=0, columnspan=5, pady=5, rowspan=3)
self.main_frame.grid_propagate(False)
self.entry = CustomEntry(self.main_frame, width=52)
self.entry.grid(row=0, column=0, columnspan=3, pady=100, padx=(20,10))
self.entry.bind('<Return>', self.search_video)
self.search = tk.Button(self.main_frame, image=search_icon,
fg='white', cursor='hand2', command=self.search_video,
relief=tk.FLAT)
self.search.grid(row=0, column=4, pady=100, padx=(30,10))
def draw_download_frame(self):
self.main_frame.destroy()
self.info_frame = tk.Frame(self, width=150, height=173, highlightthickness=1,
highlightbackground='red')
self.info_frame.grid(row=1, column=0, columnspan=2)
self.info_frame.grid_propagate(False)
self.video_frame = tk.Frame(self, width=290, height=173, highlightthickness=1,
highlightbackground='red')
self.video_frame.grid(row=1, column=2, columnspan=3)
self.video_frame.grid_propagate(False)
self.audio_frame = tk.Frame(self, width=370, height=67, highlightthickness=1,
highlightbackground='red')
self.audio_frame.grid(row=2, column=0, columnspan=4)
self.audio_frame.grid_propagate(False)
self.back_frame = tk.Frame(self, width=70, height=67, highlightthickness=1,
highlightbackground='red')
self.back_frame.grid(row=2, column=4)
self.back_frame.grid_propagate(False)
def draw_download_widgets(self):
# self.info_frame
self.title = tk.Label(self.info_frame, width=20, height=3, bg='red',
wraplength=120, fg='white')
self.title.grid(row=0, column=0, padx=1, pady=2)
self.views = tk.Label(self.info_frame, width=20, height=2, bg='red',
fg='white')
self.views.grid(row=1, column=0, padx=1, pady=1)
self.duration = tk.Label(self.info_frame, width=20, height=2, bg='red',
fg='white')
self.duration.grid(row=2, column=0, padx=1, pady=1)
self.published = tk.Label(self.info_frame, width=20, height=2, bg='red',
fg='white')
self.published.grid(row=3, column=0, padx=1, pady=1)
# self.video_frame
self.video_quality.set(self.option_streams[0])
self.options = tk.OptionMenu(self.video_frame, self.video_quality,
*self.option_streams)
self.options.config(bg='red', fg='white')
self.options['menu'].config(bg='red', fg='white')
self.options.grid(row=0, column=0, padx=50, pady=20, columnspan=5)
self.video_dwn = tk.Button(self.video_frame, text='Download MP4',
command=self.download_video, bg='red', fg='white',
width=15, cursor='hand2')
self.video_dwn.grid(row=1, column=0, padx=50, pady=10, columnspan=5)
# self.audio_frame
self.audio_dwn = tk.Button(self.audio_frame, text='Download MP3',
command=self.download_mp3, bg='red', fg='white',
width=15, cursor='hand2')
self.audio_dwn.grid(row=0, column=0, padx=20, pady=20)
# self.back_frame
self.back = tk.Button(self.back_frame, text='back', image=back_icon,
command=self.go_back, relief=tk.FLAT)
self.back.grid(row=0, column=0, pady=10, padx=10)
def cease_buttons(self):
if self.is_video_downloading:
self.video_dwn['text'] = 'downloading'
if self.is_audio_downloading:
self.audio_dwn['text'] = 'downloading'
self.video_dwn.config(state='disabled')
self.audio_dwn.config(state='disabled')
def release_buttons(self):
self.video_dwn.config(state='normal')
self.audio_dwn.config(state='normal')
if not self.is_video_downloading:
self.video_dwn['text'] = 'Download MP4'
if not self.is_audio_downloading:
self.audio_dwn['text'] = 'Download MP3'
def search_video(self, event=None):
self.url = self.entry.get()
self.master.focus_set()
if self.url and ' ' not in self.url:
try:
video = pafy.new(self.url)
self.video_title = video.title
duration = video.duration
views = video.viewcount
published = video.published
thumbnail = video.thumb
self.streams = video.streams
self.option_streams = self.streams[::-1]
self.draw_download_frame()
self.draw_download_widgets()
self.title['text'] = self.video_title[:50]
self.views['text'] = f'Views : {views:,}'
self.duration['text'] = f'Length : {duration}'
self.published['text'] = f'Pub : {published[:10]}'
except OSError:
messagebox.showerror('SaveFromYT', 'Cannot extract data')
except ValueError:
messagebox.showerror('SaveFromYT', 'Invalid URL')
except:
messagebox.showerror('SaveFromYT', 'Cannot connect with internet')
def download_video(self):
filetypes = [('MP4', '.mp4')]
filepath = filedialog.asksaveasfilename(initialdir=cwd,
initialfile=self.video_title[:25]+'.mp4',
filetypes=filetypes)
if filepath:
self.is_video_downloading = True
self.cease_buttons()
vq = self.video_quality.get()
l = len(self.streams)
opts = [str(stream) for stream in self.option_streams]
stream = self.streams[opts.index(vq) - l + 1]
self.filesize = stream.get_filesize()
self.sizelabel = tk.Label(self.video_frame, bg='red', fg='white',
text=f'Filesize : {self.filesize/(1024*1024):.2f} Mb')
self.sizelabel.grid(row=2, column=0, pady=5)
self.pb = ttk.Progressbar(self.video_frame, orient=tk.HORIZONTAL,
mode='determinate', length=100)
self.pb.grid(row=2, column=2, columnspan=3, pady=5)
try:
stream.download(quiet=True, callback=self.download_callback,
filepath=filepath)
messagebox.showinfo('SaveFromYT', 'Video Downloaded Successfully')
except:
messagebox.showerror('SaveFromYT', 'Cannot connect with internet')
self.pb.destroy()
self.sizelabel.destroy()
self.is_video_downloading = False
self.release_buttons()
def download_callback(self, total, recvd, ratio, rate, eta):
perc = (recvd / total) * 100
self.pb['value'] = int(perc)
self.update()
def download_mp3(self):
filetypes = ['MP3', '.mp3']
filepath = filedialog.asksaveasfilename(initialdir=cwd,
initialfile=''.join(self.video_title[:25]+'.mp3'))
if filepath:
ydl_opts = {
'format': 'bestaudio/best',
'outtmpl' : filepath,
'postprocessors': [{
'key': 'FFmpegExtractAudio',
'preferredcodec': 'mp3',
'preferredquality': '192'
}],
'postprocessor_args': [
'-ar', '16000'
],
'prefer_ffmpeg': True,
'keepvideo': True,
'progress_hooks': [self.download_hook]
}
self.is_audio_downloading = True
self.cease_buttons()
try:
self.pb = ttk.Progressbar(self.audio_frame, orient=tk.HORIZONTAL,
mode='determinate', length=100)
self.pb.grid(row=0, column=2, pady=20, padx=20)
with youtube_dl.YoutubeDL(ydl_opts) as ydl:
ydl.download([self.url])
for file in os.listdir():
if file.endswith('.webm'):
os.remove(file)
self.pb.destroy()
messagebox.showinfo('SaveFromYT', 'Successfully Downloaded Mp3')
except:
messagebox.showinfo('SaveFromYT', "Can't connect with internet")
self.is_audio_downloading = False
self.release_buttons()
def download_hook(self, d):
if d['status'] == 'downloading':
p = d['_percent_str']
p = float(p.replace('%','').replace(' ',''))
self.pb['value'] = round(p)
self.update()
def go_back(self):
self.info_frame.destroy()
self.video_frame.destroy()
self.audio_frame.destroy()
self.back_frame.destroy()
self.draw_main_frame()
if __name__ == '__main__':
root = tk.Tk()
root.geometry('450x320')
root.title('SaveFromYT')
root.resizable(0,0)
youtube_icon = PhotoImage(file='icons/youtube.png')
back_icon = PhotoImage(file='icons/back.png')
search_icon = PhotoImage(file='icons/search.png')
app = Application(master=root)
app.mainloop() |
py | 1a301db2a25fe29e7e0a916e2b7cd125cbe4262e | #!/usr/bin/env python
import glob
import os
import sys
import time
import re
import shutil
from os.path import expanduser
home = expanduser("~")
rosey_dir = home + "/.rosey/"
rosey_config = rosey_dir + "config"
rosey_log = rosey_dir + "rosey.log"
class Rosey():
def __init__(self, config):
"""Configs come one in a list of three member lists"""
"""glob-file-pattern-to-match, prefix to remove, directory-to-move-matched-files"""
self.config = config
def FileMoveToDoList(self):
files = []
todos = []
configs = iter(self.config)
for config in configs:
files = self.findMatchingFiles(config[0])
for f in files:
todos += [
[f, self.replacePatternWithNewPath(f, config[1], config[2])]
]
return todos
def replacePatternWithNewPath(self, file, remove_this, dest_path):
t = time.localtime(os.path.getctime(file))
timestamp = time.strftime("%Y-%m-%d", t) + "-"
orig_name = os.path.basename(file)
trimmed_name = orig_name.replace(remove_this, "")
no_spaces_name = trimmed_name.replace(" ", "-")
timestamped_name = timestamp + no_spaces_name
new_name = re.sub("-+", "-", timestamped_name)
new_path = dest_path + new_name
return new_path
def findMatchingFiles(self, glob_spec):
all = glob.glob(glob_spec)
return all
def check_config(config):
findings = []
regexes = [f[0] for f in config]
if (regexes.sort() != list(set(regexes)).sort()):
findings += "You have one or more duplicate patterns"
return
dest_dirs = [f[2] for f in config]
for dest in dest_dirs:
if (not os.path.isdir(dest)):
findings += "Destination directory does not exist: '{0}'".format(dest)
return findings
def cleanup_config(config):
config_list = [line.rstrip().split(',') for line in config]
trimmed_config = []
for config_item in config_list:
trimmed_config += [[f.lstrip().rstrip() for f in config_item]]
return trimmed_config
def moveEm(todo, really_move = True):
with open(rosey_log, "a") as myfile:
for t in todo:
message = "Moving: {0}\n to: {1}\n".format(t[0], t[1])
if really_move:
try:
shutil.move(t[0], t[1])
message += " : Move Successful"
except Exception as e:
message += " : Move Fails {0}.".format(e)
myfile.write(message + "\n");
print message
def show_findings(findings):
for f in findings:
print f
def main(arg):
if (not os.path.exists(rosey_config)):
print "You need to create ~/.rosey/config"
exit(1)
with open(rosey_config) as f:
config = f.readlines()
clean_config = cleanup_config(config)
findings = check_config(clean_config)
if findings != []:
show_findings(findings)
exit(1)
rosey = Rosey(clean_config)
todo = rosey.FileMoveToDoList()
if arg == "move":
moveEm(todo)
if arg == "show":
moveEm(todo, False)
if __name__ == '__main__':
if len(sys.argv) != 2:
print "usage: {0} [move, show] {1}".format(sys.argv[0], len(sys.argv))
exit(0)
main(sys.argv[1]) # Run the example
|
py | 1a301e340b3e8aa8123ecd2dee29023be07ec43e | """Test different accessory types: HumidifierDehumidifier."""
from pyhap.const import (
CATEGORY_HUMIDIFIER,
HAP_REPR_AID,
HAP_REPR_CHARS,
HAP_REPR_IID,
HAP_REPR_VALUE,
)
from homeassistant.components.homekit.const import (
ATTR_VALUE,
CONF_LINKED_HUMIDITY_SENSOR,
PROP_MAX_VALUE,
PROP_MIN_STEP,
PROP_MIN_VALUE,
PROP_VALID_VALUES,
)
from homeassistant.components.homekit.type_humidifiers import HumidifierDehumidifier
from homeassistant.components.humidifier.const import (
ATTR_HUMIDITY,
ATTR_MAX_HUMIDITY,
ATTR_MIN_HUMIDITY,
DEFAULT_MAX_HUMIDITY,
DEFAULT_MIN_HUMIDITY,
DEVICE_CLASS_DEHUMIDIFIER,
DEVICE_CLASS_HUMIDIFIER,
DOMAIN,
SERVICE_SET_HUMIDITY,
)
from homeassistant.const import (
ATTR_DEVICE_CLASS,
ATTR_ENTITY_ID,
ATTR_UNIT_OF_MEASUREMENT,
DEVICE_CLASS_HUMIDITY,
PERCENTAGE,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
STATE_OFF,
STATE_ON,
STATE_UNAVAILABLE,
)
from tests.common import async_mock_service
async def test_humidifier(hass, hk_driver, events):
"""Test if humidifier accessory and HA are updated accordingly."""
entity_id = "humidifier.test"
hass.states.async_set(entity_id, STATE_OFF)
await hass.async_block_till_done()
acc = HumidifierDehumidifier(
hass, hk_driver, "HumidifierDehumidifier", entity_id, 1, None
)
hk_driver.add_accessory(acc)
await acc.run()
await hass.async_block_till_done()
assert acc.aid == 1
assert acc.category == CATEGORY_HUMIDIFIER
assert acc.char_current_humidifier_dehumidifier.value == 0
assert acc.char_target_humidifier_dehumidifier.value == 1
assert acc.char_current_humidity.value == 0
assert acc.char_target_humidity.value == 45.0
assert acc.char_active.value == 0
assert acc.char_target_humidity.properties[PROP_MAX_VALUE] == DEFAULT_MAX_HUMIDITY
assert acc.char_target_humidity.properties[PROP_MIN_VALUE] == DEFAULT_MIN_HUMIDITY
assert acc.char_target_humidity.properties[PROP_MIN_STEP] == 1.0
assert acc.char_target_humidifier_dehumidifier.properties[PROP_VALID_VALUES] == {
"Humidifier": 1
}
hass.states.async_set(
entity_id,
STATE_ON,
{ATTR_HUMIDITY: 47},
)
await hass.async_block_till_done()
assert acc.char_target_humidity.value == 47.0
assert acc.char_current_humidifier_dehumidifier.value == 2
assert acc.char_target_humidifier_dehumidifier.value == 1
assert acc.char_active.value == 1
hass.states.async_set(
entity_id,
STATE_OFF,
{ATTR_HUMIDITY: 42, ATTR_DEVICE_CLASS: DEVICE_CLASS_HUMIDIFIER},
)
await hass.async_block_till_done()
assert acc.char_target_humidity.value == 42.0
assert acc.char_current_humidifier_dehumidifier.value == 0
assert acc.char_target_humidifier_dehumidifier.value == 1
assert acc.char_active.value == 0
# Set from HomeKit
call_set_humidity = async_mock_service(hass, DOMAIN, SERVICE_SET_HUMIDITY)
char_target_humidity_iid = acc.char_target_humidity.to_HAP()[HAP_REPR_IID]
hk_driver.set_characteristics(
{
HAP_REPR_CHARS: [
{
HAP_REPR_AID: acc.aid,
HAP_REPR_IID: char_target_humidity_iid,
HAP_REPR_VALUE: 39.0,
},
]
},
"mock_addr",
)
await hass.async_block_till_done()
assert len(call_set_humidity) == 1
assert call_set_humidity[0].data[ATTR_ENTITY_ID] == entity_id
assert call_set_humidity[0].data[ATTR_HUMIDITY] == 39.0
assert acc.char_target_humidity.value == 39.0
assert len(events) == 1
assert events[-1].data[ATTR_VALUE] == "RelativeHumidityHumidifierThreshold to 39.0%"
async def test_dehumidifier(hass, hk_driver, events):
"""Test if dehumidifier accessory and HA are updated accordingly."""
entity_id = "humidifier.test"
hass.states.async_set(
entity_id, STATE_OFF, {ATTR_DEVICE_CLASS: DEVICE_CLASS_DEHUMIDIFIER}
)
await hass.async_block_till_done()
acc = HumidifierDehumidifier(
hass, hk_driver, "HumidifierDehumidifier", entity_id, 1, None
)
hk_driver.add_accessory(acc)
await acc.run()
await hass.async_block_till_done()
assert acc.aid == 1
assert acc.category == CATEGORY_HUMIDIFIER
assert acc.char_current_humidifier_dehumidifier.value == 0
assert acc.char_target_humidifier_dehumidifier.value == 2
assert acc.char_current_humidity.value == 0
assert acc.char_target_humidity.value == 45.0
assert acc.char_active.value == 0
assert acc.char_target_humidity.properties[PROP_MAX_VALUE] == DEFAULT_MAX_HUMIDITY
assert acc.char_target_humidity.properties[PROP_MIN_VALUE] == DEFAULT_MIN_HUMIDITY
assert acc.char_target_humidity.properties[PROP_MIN_STEP] == 1.0
assert acc.char_target_humidifier_dehumidifier.properties[PROP_VALID_VALUES] == {
"Dehumidifier": 2
}
hass.states.async_set(
entity_id,
STATE_ON,
{ATTR_HUMIDITY: 30},
)
await hass.async_block_till_done()
assert acc.char_target_humidity.value == 30.0
assert acc.char_current_humidifier_dehumidifier.value == 3
assert acc.char_target_humidifier_dehumidifier.value == 2
assert acc.char_active.value == 1
hass.states.async_set(
entity_id,
STATE_OFF,
{ATTR_HUMIDITY: 42},
)
await hass.async_block_till_done()
assert acc.char_target_humidity.value == 42.0
assert acc.char_current_humidifier_dehumidifier.value == 0
assert acc.char_target_humidifier_dehumidifier.value == 2
assert acc.char_active.value == 0
# Set from HomeKit
call_set_humidity = async_mock_service(hass, DOMAIN, SERVICE_SET_HUMIDITY)
char_target_humidity_iid = acc.char_target_humidity.to_HAP()[HAP_REPR_IID]
hk_driver.set_characteristics(
{
HAP_REPR_CHARS: [
{
HAP_REPR_AID: acc.aid,
HAP_REPR_IID: char_target_humidity_iid,
HAP_REPR_VALUE: 39.0,
},
]
},
"mock_addr",
)
await hass.async_block_till_done()
assert len(call_set_humidity) == 1
assert call_set_humidity[0].data[ATTR_ENTITY_ID] == entity_id
assert call_set_humidity[0].data[ATTR_HUMIDITY] == 39.0
assert acc.char_target_humidity.value == 39.0
assert len(events) == 1
assert (
events[-1].data[ATTR_VALUE] == "RelativeHumidityDehumidifierThreshold to 39.0%"
)
async def test_hygrostat_power_state(hass, hk_driver, events):
"""Test if accessory and HA are updated accordingly."""
entity_id = "humidifier.test"
hass.states.async_set(
entity_id,
STATE_ON,
{ATTR_HUMIDITY: 43},
)
await hass.async_block_till_done()
acc = HumidifierDehumidifier(
hass, hk_driver, "HumidifierDehumidifier", entity_id, 1, None
)
hk_driver.add_accessory(acc)
await acc.run()
await hass.async_block_till_done()
assert acc.char_current_humidifier_dehumidifier.value == 2
assert acc.char_target_humidifier_dehumidifier.value == 1
assert acc.char_active.value == 1
hass.states.async_set(
entity_id,
STATE_OFF,
{ATTR_HUMIDITY: 43},
)
await hass.async_block_till_done()
assert acc.char_current_humidifier_dehumidifier.value == 0
assert acc.char_target_humidifier_dehumidifier.value == 1
assert acc.char_active.value == 0
# Set from HomeKit
call_turn_on = async_mock_service(hass, DOMAIN, SERVICE_TURN_ON)
char_active_iid = acc.char_active.to_HAP()[HAP_REPR_IID]
hk_driver.set_characteristics(
{
HAP_REPR_CHARS: [
{
HAP_REPR_AID: acc.aid,
HAP_REPR_IID: char_active_iid,
HAP_REPR_VALUE: 1,
},
]
},
"mock_addr",
)
await hass.async_block_till_done()
assert len(call_turn_on) == 1
assert call_turn_on[0].data[ATTR_ENTITY_ID] == entity_id
assert acc.char_active.value == 1
assert len(events) == 1
assert events[-1].data[ATTR_VALUE] == "Active to 1"
call_turn_off = async_mock_service(hass, DOMAIN, SERVICE_TURN_OFF)
hk_driver.set_characteristics(
{
HAP_REPR_CHARS: [
{
HAP_REPR_AID: acc.aid,
HAP_REPR_IID: char_active_iid,
HAP_REPR_VALUE: 0,
},
]
},
"mock_addr",
)
await hass.async_block_till_done()
assert len(call_turn_off) == 1
assert call_turn_off[0].data[ATTR_ENTITY_ID] == entity_id
assert acc.char_active.value == 0
assert len(events) == 2
assert events[-1].data[ATTR_VALUE] == "Active to 0"
async def test_hygrostat_get_humidity_range(hass, hk_driver):
"""Test if humidity range is evaluated correctly."""
entity_id = "humidifier.test"
hass.states.async_set(
entity_id, STATE_OFF, {ATTR_MIN_HUMIDITY: 40, ATTR_MAX_HUMIDITY: 45}
)
await hass.async_block_till_done()
acc = HumidifierDehumidifier(
hass, hk_driver, "HumidifierDehumidifier", entity_id, 1, None
)
hk_driver.add_accessory(acc)
await acc.run()
await hass.async_block_till_done()
assert acc.char_target_humidity.properties[PROP_MAX_VALUE] == 45
assert acc.char_target_humidity.properties[PROP_MIN_VALUE] == 40
async def test_humidifier_with_linked_humidity_sensor(hass, hk_driver):
"""Test a humidifier with a linked humidity sensor can update."""
humidity_sensor_entity_id = "sensor.bedroom_humidity"
hass.states.async_set(
humidity_sensor_entity_id,
"42.0",
{
ATTR_DEVICE_CLASS: DEVICE_CLASS_HUMIDITY,
ATTR_UNIT_OF_MEASUREMENT: PERCENTAGE,
},
)
await hass.async_block_till_done()
entity_id = "humidifier.test"
hass.states.async_set(entity_id, STATE_OFF)
await hass.async_block_till_done()
acc = HumidifierDehumidifier(
hass,
hk_driver,
"HumidifierDehumidifier",
entity_id,
1,
{CONF_LINKED_HUMIDITY_SENSOR: humidity_sensor_entity_id},
)
hk_driver.add_accessory(acc)
await acc.run()
await hass.async_block_till_done()
assert acc.char_current_humidity.value == 42.0
hass.states.async_set(
humidity_sensor_entity_id,
"43.0",
{
ATTR_DEVICE_CLASS: DEVICE_CLASS_HUMIDITY,
ATTR_UNIT_OF_MEASUREMENT: PERCENTAGE,
},
)
await hass.async_block_till_done()
assert acc.char_current_humidity.value == 43.0
hass.states.async_set(
humidity_sensor_entity_id,
STATE_UNAVAILABLE,
{
ATTR_DEVICE_CLASS: DEVICE_CLASS_HUMIDITY,
ATTR_UNIT_OF_MEASUREMENT: PERCENTAGE,
},
)
await hass.async_block_till_done()
assert acc.char_current_humidity.value == 43.0
hass.states.async_remove(humidity_sensor_entity_id)
await hass.async_block_till_done()
assert acc.char_current_humidity.value == 43.0
async def test_humidifier_with_a_missing_linked_humidity_sensor(hass, hk_driver):
"""Test a humidifier with a configured linked motion sensor that is missing."""
humidity_sensor_entity_id = "sensor.bedroom_humidity"
entity_id = "humidifier.test"
hass.states.async_set(entity_id, STATE_OFF)
await hass.async_block_till_done()
acc = HumidifierDehumidifier(
hass,
hk_driver,
"HumidifierDehumidifier",
entity_id,
1,
{CONF_LINKED_HUMIDITY_SENSOR: humidity_sensor_entity_id},
)
hk_driver.add_accessory(acc)
await acc.run()
await hass.async_block_till_done()
assert acc.char_current_humidity.value == 0
async def test_humidifier_as_dehumidifier(hass, hk_driver, events, caplog):
"""Test an invalid char_target_humidifier_dehumidifier from HomeKit."""
entity_id = "humidifier.test"
hass.states.async_set(entity_id, STATE_OFF)
await hass.async_block_till_done()
acc = HumidifierDehumidifier(
hass, hk_driver, "HumidifierDehumidifier", entity_id, 1, None
)
hk_driver.add_accessory(acc)
await acc.run()
await hass.async_block_till_done()
assert acc.char_target_humidifier_dehumidifier.value == 1
# Set from HomeKit
char_target_humidifier_dehumidifier_iid = (
acc.char_target_humidifier_dehumidifier.to_HAP()[HAP_REPR_IID]
)
hk_driver.set_characteristics(
{
HAP_REPR_CHARS: [
{
HAP_REPR_AID: acc.aid,
HAP_REPR_IID: char_target_humidifier_dehumidifier_iid,
HAP_REPR_VALUE: 0,
},
]
},
"mock_addr",
)
await hass.async_block_till_done()
assert "TargetHumidifierDehumidifierState is not supported" in caplog.text
assert len(events) == 0
|
py | 1a301e3a89ed6eb0361d8d6bfa049bf53d80761e | #!/usr/bin/python
# -*- coding: utf8 -*-
import os
import logging
import sys
import argparse
sys.path.append("../core")
from qgis_project_substitute import substitute_project
from processor import Processor
def argparser_prepare():
class PrettyFormatter(argparse.ArgumentDefaultsHelpFormatter,
argparse.RawDescriptionHelpFormatter):
max_help_position = 35
parser = argparse.ArgumentParser(description='OSMTram process',
formatter_class=PrettyFormatter)
parser.add_argument('--prune',dest='prune', required=False, action='store_true', help='Clear temporary folder')
parser.add_argument('--skip-osmupdate',dest='skip_osmupdate', required=False, action='store_true')
parser.add_argument('--workdir',dest='WORKDIR', required=True)
parser.epilog = \
'''Samples:
%(prog)s
''' \
% {'prog': parser.prog}
return parser
dump_url = 'http://download.geofabrik.de/europe/latvia-latest.osm.pbf'
parser = argparser_prepare()
args = parser.parse_args()
WORKDIR=args.WORKDIR
logging.basicConfig(level=logging.DEBUG,format='%(asctime)s %(levelname)-8s %(message)s',datefmt='%Y-%m-%d %H:%M:%S')
logger = logging.getLogger(__name__)
logger.info('Start')
processor = Processor()
processor.process_sheets('latvia.geojson',WORKDIR,dump_url,dump_name='latvia')
#,attribute_filter='''"name_ru"= 'Лиепая' and "type"='tram' '''
|
py | 1a301e9b700cc4f1eb12f4e8395c89aa84d99dfa | import numpy as np
from src.util import Util, Article
class Answer:
"""Answer questions based on the initialized article."""
def __init__(self, article):
"""
Create a new instance of the Answer class.
Args:
article: An instance of the Article class
"""
self.article = article
def answer(self, question, return_score=False):
"""
Answer the given question.
Args:
question: Question string
Returns:
Answer to question as string
"""
u = Util()
question_embedding = u.embeddings([question])[0]
sentences_list = []
for paragraph in self.article.sentences:
paragraph_text = [s.text for s in paragraph]
sentences_list += paragraph_text
sentences_embeddings = u.embeddings(sentences_list)
distances = []
for i, embedding in enumerate(sentences_embeddings):
diffs = np.inner(question_embedding, embedding)
dist = diffs
distances.append((dist, sentences_list[i]))
distances.sort(key=lambda x: x[0], reverse=True)
most_similar_sentence = distances[0][1]
most_similar_score = distances[0][0]
if return_score:
return (most_similar_sentence, most_similar_score)
return most_similar_sentence
if __name__ == "__main__":
u = Util()
art = Article(u.load_txt_article("../articles/Development_data/set4/set4/a1.txt"))
a = Answer(art)
q = "Who studied the stars of the southern hemisphere from 1750 until 1754 from Cape of Good Hope?"
print(a.answer(q))
# Who is a product of a revision of the Old Babylonian system in later Neo-Babylonian astronomy 6th century BC?
# Who interpreted the creatures appearing in the books of Ezekiel (and thence in Revelation) as the middle signs of the four quarters of the Zodiac?
# Who studied the stars of the southern hemisphere from 1750 until 1754 from Cape of Good Hope?
# Who aided the IAU (International Astronomical Union) in dividing the celestial sphere into 88 official constellations?
# Who is a product of a revision of the Old Babylonian system in later Neo-Babylonian astronomy 6th century BC?
|
py | 1a301f062ea2131e6769a035818f1cba3b2d8e5b | from math import ceil
from hashlib import md5
from pecan import expose, request, abort, response, redirect
from pecan.secure import secure
from pecan.ext.wtforms import with_form
from sqlalchemy import select, and_, or_, asc, desc, func, case, literal
from draughtcraft import model
from draughtcraft.lib.beerxml import export
from draughtcraft.lib.forms.recipes.browse import RecipeSearchForm
from create import RecipeCreationController
from builder import RecipeBuilderController
class SlugController(object):
def __init__(self, slug):
self.slug = slug
# Make sure the provided slug is valid
if not slug:
redirect(request.context['recipe'].slugs[0].slug)
if slug not in [slug.slug for slug in request.context['recipe'].slugs]:
abort(404)
@expose('recipes/builder/index.html')
@expose('json', content_type='application/json')
def index(self):
recipe = request.context['recipe']
if recipe.state == "DRAFT":
if recipe.author and recipe.author != request.context['user']:
abort(404)
if not recipe.author and recipe != request.context['trial_recipe']:
abort(404)
# Log a view for the recipe (if the viewer *is not* the author)
if recipe.author != request.context['user'] and \
request.pecan.get('content_type') == 'application/json':
model.RecipeView(recipe=recipe)
return dict(
recipe=recipe,
editable=False
)
@expose(content_type='application/xml')
def xml(self):
recipe = request.context['recipe']
if recipe.state == "DRAFT":
if recipe.author and recipe.author != request.context['user']:
abort(404)
response.headers['Content-Disposition'] = \
'attachment; filename="%s.xml"' % self.slug
return export.to_xml([request.context['recipe']])
@expose(generic=True)
def draft(self):
abort(405)
@draft.when(method="POST")
def do_draft(self):
source = request.context['recipe']
if source.author is None or source.author != request.context['user']:
abort(401)
if source.state != "PUBLISHED":
abort(401)
draft = source.draft()
draft.flush()
redirect("%sbuilder" % draft.url())
@expose(generic=True)
def copy(self):
abort(405)
@copy.when(method="POST")
def do_copy(self):
source = request.context['recipe']
if request.context['user'] is None:
redirect("/signup")
if source.author is None:
abort(401)
diff_user = source.author != request.context['user']
name = source.name if diff_user else "%s (Duplicate)" % source.name
copy = source.duplicate({
'name': name,
'author': request.context['user']
})
if diff_user:
copy.copied_from = source
redirect("/")
@expose(generic=True)
def delete(self):
abort(405)
@delete.when(method="POST")
def do_delete(self):
source = request.context['recipe']
if source.author is None or source.author != request.context['user']:
abort(401)
source.delete()
redirect("/")
builder = secure(
RecipeBuilderController(),
RecipeBuilderController.check_permissions
)
class RecipeController(object):
@expose()
def _lookup(self, slug, *remainder):
return SlugController(slug), remainder
def __init__(self, recipeID):
try:
primary_key = int(str(recipeID), 16)
except ValueError:
abort(404)
recipe = model.Recipe.get(primary_key)
if recipe is None:
abort(404)
request.context['recipe'] = recipe
class RecipesController(object):
@expose()
def _lookup(self, recipeID, *remainder):
return RecipeController(recipeID), remainder
@expose('recipes/browse/index.html')
def index(self):
return dict(
styles=model.Style.query.order_by(model.Style.name).all()
)
@expose(template='recipes/browse/list.html')
@with_form(RecipeSearchForm, validate_safe=True)
def recipes(self, **kw):
if request.pecan['form'].errors:
abort(400)
perpage = 25.0
offset = int(perpage * (kw['page'] - 1))
views = func.count(model.RecipeView.id).label('views')
username = func.lower(model.User.username).label('username')
sortable_type = case([
(model.Recipe.type == 'MASH', literal('All Grain')),
(model.Recipe.type == 'EXTRACT', literal('Extract')),
(
model.Recipe.type == 'EXTRACTSTEEP',
literal('Extract w/ Steeped Grains')
),
(model.Recipe.type == 'MINIMASH', literal('Mini-Mash')),
]).label('type')
# map of columns
column_map = dict(
type=(sortable_type,),
srm=(model.Recipe._srm,),
name=(model.Recipe.name,),
author=(username,),
style=(model.Style.name,),
last_updated=(model.Recipe.last_updated,),
views=(views,)
)
# determine the sorting direction and column
order_column = column_map.get(kw['order_by'])
order_direction = dict(
ASC=asc,
DESC=desc
).get(kw['direction'])
where = [
model.Recipe.state == 'PUBLISHED'
]
# If applicable, filter by style
if kw['style']:
query = where.append(model.Recipe.style == kw['style'])
# If applicable, filter by type (MASH, etc...)
where.append(or_(
model.Recipe.id is None,
model.Recipe.type == 'MASH' if kw['mash'] else None,
model.Recipe.type == 'MINIMASH' if kw['minimash'] else None,
model.Recipe.type.in_(('EXTRACTSTEEP', 'EXTRACT'))
if kw['extract'] else None,
))
# If applicable, filter by color
if kw['color']:
start, end = {
'light': (0, 8),
'amber': (8, 18),
'brown': (16, 25),
'dark': (25, 5000)
}.get(kw['color'])
where.append(and_(
model.Recipe._srm >= start,
model.Recipe._srm <= end,
))
# Join the `recipe`, `recipeview`, `user`, and `style` tables
from_obj = model.Recipe.table.outerjoin(
model.RecipeView.table,
onclause=model.RecipeView.recipe_id == model.Recipe.id
).outerjoin(
model.Style.table,
onclause=model.Recipe.style_id == model.Style.id
).join(
model.User.table,
onclause=model.Recipe.author_id == model.User.id
)
username_full = model.User.username.label('username')
email = model.User.email.label('email')
style_name = model.Style.name.label('style_name')
style_url = model.Style.url.label('style_url')
query = select(
[
model.Recipe.id,
model.Recipe.name,
model.Recipe._srm,
username_full,
email,
sortable_type,
style_name,
style_url,
model.Recipe.last_updated,
views
],
and_(*where),
from_obj=[from_obj],
group_by=model.Recipe.id
)
total = select(
[func.count(model.Recipe.id)],
and_(*where)
).execute().fetchone()[0]
if views not in order_column:
query = query.group_by(*order_column)
query = query.group_by(username_full)
query = query.group_by(email)
query = query.group_by(style_name)
query = query.group_by(style_url)
recipes = query.order_by(
*[order_direction(column) for column in order_column]
).offset(
offset
).limit(
perpage
).execute().fetchall()
class RecipeProxy(object):
def __init__(self, recipe):
self.id, self.name, self._srm, self.username, self.email, self.printable_type, self.style_name, self.style_url, self.last_updated, self.views = recipe
@property
def metric_unit(self):
return 'EBC' if request.context['metric'] is True else 'SRM'
@property
def color(self):
if self.metric_unit is 'SRM':
return self._srm
round(self._srm * 1.97, 1)
@property
def gravatar(self):
return 'https://www.gravatar.com/avatar/%s?d=https://draughtcraft.com/images/glass-square.png' % (
md5(self.email.strip().lower()).hexdigest()
)
@property
def url(self):
return '/recipes/%s/' % (('%x' % self.id).lower())
return dict(
pages=max(1, int(ceil(total / perpage))),
current_page=kw['page'],
offset=offset,
perpage=perpage,
total=total,
order_by=kw['order_by'],
direction=kw['direction'],
recipes=map(RecipeProxy, recipes)
)
create = RecipeCreationController()
|
py | 1a301f14b2a1db234d1df5d719a51461f77c8d12 | """Data classes that are returned by functions within ``pymel.core``
A wrap of Maya's Vector, Point, Color, Matrix, TransformationMatrix, Quaternion, EulerRotation types
"""
import sys
import math
import copy
import operator
import colorsys
import pymel.util as util
import pymel.api as _api
from pymel.util.arrays import *
from pymel.util.arrays import _toCompOrArrayInstance
import pymel.internal.factories as _factories
# patch some Maya api classes that miss __iter__ to make them iterable / convertible to list
def _patchMVector():
def __len__(self):
""" Number of components in the Maya api Vector, ie 3 """
return 3
type.__setattr__(_api.MVector, '__len__', __len__)
def __iter__(self):
""" Iterates on all components of a Maya api Vector """
for i in xrange(len(self)):
yield _api.MVector.__getitem__(self, i)
type.__setattr__(_api.MVector, '__iter__', __iter__)
def _patchMFloatVector():
def __len__(self):
""" Number of components in the Maya api FloatVector, ie 3 """
return 3
type.__setattr__(_api.MFloatVector, '__len__', __len__)
def __iter__(self):
""" Iterates on all components of a Maya api FloatVector """
for i in xrange(len(self)):
yield _api.MFloatVector.__getitem__(self, i)
type.__setattr__(_api.MFloatVector, '__iter__', __iter__)
def _patchMPoint():
def __len__(self):
""" Number of components in the Maya api Point, ie 4 """
return 4
type.__setattr__(_api.MPoint, '__len__', __len__)
def __iter__(self):
""" Iterates on all components of a Maya api Point """
for i in xrange(len(self)):
yield _api.MPoint.__getitem__(self, i)
type.__setattr__(_api.MPoint, '__iter__', __iter__)
def _patchMFloatPoint():
def __len__(self):
""" Number of components in the Maya api FloatPoint, ie 4 """
return 4
type.__setattr__(_api.MFloatPoint, '__len__', __len__)
def __iter__(self):
""" Iterates on all components of a Maya api FloatPoint """
for i in xrange(len(self)):
yield _api.MFloatPoint.__getitem__(self, i)
type.__setattr__(_api.MFloatPoint, '__iter__', __iter__)
def _patchMColor():
def __len__(self):
""" Number of components in the Maya api Color, ie 4 """
return 4
type.__setattr__(_api.MColor, '__len__', __len__)
def __iter__(self):
""" Iterates on all components of a Maya api Color """
for i in xrange(len(self)):
yield _api.MColor.__getitem__(self, i)
type.__setattr__(_api.MColor, '__iter__', __iter__)
def _patchMMatrix():
def __len__(self):
""" Number of rows in the Maya api Matrix, ie 4.
Not to be confused with the number of components (16) given by the size method """
return 4
type.__setattr__(_api.MMatrix, '__len__', __len__)
def __iter__(self):
""" Iterates on all 4 rows of a Maya api Matrix """
for r in xrange(4):
yield Array([_api.MScriptUtil.getDoubleArrayItem(_api.MMatrix.__getitem__(self, r), c) for c in xrange(4)])
type.__setattr__(_api.MMatrix, '__iter__', __iter__)
def _patchMFloatMatrix():
def __len__(self):
""" Number of rows in the Maya api FloatMatrix, ie 4.
Not to be confused with the number of components (16) given by the size method """
return 4
type.__setattr__(_api.MFloatMatrix, '__len__', __len__)
def __iter__(self):
""" Iterates on all 4 rows of a Maya api FloatMatrix """
for r in xrange(4):
yield Array([_api.MScriptUtil.getFloatArrayItem(_api.MFloatMatrix.__getitem__(self, r), c) for c in xrange(4)])
type.__setattr__(_api.MFloatMatrix, '__iter__', __iter__)
def _patchMTransformationMatrix():
def __len__(self):
""" Number of rows in the Maya api Matrix, ie 4.
Not to be confused with the number of components (16) given by the size method """
return 4
type.__setattr__(_api.MTransformationMatrix, '__len__', __len__)
def __iter__(self):
""" Iterates on all 4 rows of a Maya api TransformationMatrix """
return self.asMatrix().__iter__()
type.__setattr__(_api.MTransformationMatrix, '__iter__', __iter__)
def _patchMQuaternion():
def __len__(self):
""" Number of components in the Maya api Quaternion, ie 4 """
return 4
type.__setattr__(_api.MQuaternion, '__len__', __len__)
def __iter__(self):
""" Iterates on all components of a Maya api Quaternion """
for i in xrange(len(self)):
yield _api.MQuaternion.__getitem__(self, i)
type.__setattr__(_api.MQuaternion, '__iter__', __iter__)
def _patchMEulerRotation():
def __len__(self):
""" Number of components in the Maya api EulerRotation, ie 3 """
return 3
type.__setattr__(_api.MEulerRotation, '__len__', __len__)
def __iter__(self):
""" Iterates on all components of a Maya api EulerRotation """
for i in xrange(len(self)):
yield _api.MEulerRotation.__getitem__(self, i)
type.__setattr__(_api.MEulerRotation, '__iter__', __iter__)
_patchMVector()
_patchMFloatVector()
_patchMPoint()
_patchMFloatPoint()
_patchMColor()
_patchMMatrix()
_patchMFloatMatrix()
_patchMTransformationMatrix()
_patchMQuaternion()
_patchMEulerRotation()
# the meta class of metaMayaWrapper
class MetaMayaArrayTypeWrapper(_factories.MetaMayaTypeWrapper):
""" A metaclass to wrap Maya array type classes such as Vector, Matrix """
def __new__(mcl, classname, bases, classdict):
""" Create a new wrapping class for a Maya api type, such as Vector or Matrix """
if 'shape' in classdict:
# fixed shape means also fixed ndim and size
shape = classdict['shape']
ndim = len(shape)
size = reduce(operator.mul, shape, 1)
if 'ndim' not in classdict:
classdict['ndim'] = ndim
elif classdict['ndim'] != ndim:
raise ValueError, "class %s shape definition %s and number of dimensions definition %s do not match" % (classname, shape, ndim)
if 'size' not in classdict:
classdict['size'] = size
elif classdict['size'] != size:
raise ValueError, "class %s shape definition %s and size definition %s do not match" % (classname, shape, size)
# create the new class
newcls = super(MetaMayaArrayTypeWrapper, mcl).__new__(mcl, classname, bases, classdict)
try:
apicls = newcls.apicls
except:
apicls = None
try:
shape = newcls.shape
except:
shape = None
try:
cnames = newcls.cnames
except:
cnames = ()
if shape is not None:
# fixed shape means also fixed ndim and size
ndim = len(shape)
size = reduce(operator.mul, shape, 1)
if cnames:
# definition for component names
type.__setattr__(newcls, 'cnames', cnames)
subsizes = [reduce(operator.mul, shape[i + 1:], 1) for i in xrange(ndim)]
for index, compname in enumerate(cnames):
coords = []
for i in xrange(ndim):
c = index // subsizes[i]
index -= c * subsizes[i]
coords.append(c)
if len(coords) == 1:
coords = coords[0]
else:
coords = tuple(coords)
# def _get(self):
# return self.__getitem__(coords)
# _get.__name__ = '_get_' + compname
#
# # FIXME : the set property does not do anything in python 2.4 !!! It doesn't even get called.
#
# def _set(self, val):
# self.__setitem__(coords, val)
#
# _set.__name__ = '_set_' + compname
#
# p = property( _get, _set, None, 'set and get %s component' % compname )
cmd = "property( lambda self: self.__getitem__(%s) , lambda self, val: self.__setitem__(%s,val) )" % (coords, coords)
p = eval(cmd)
if compname not in classdict:
type.__setattr__(newcls, compname, p)
else:
raise AttributeError, "component name %s clashes with class method %r" % (compname, classdict[compname])
elif cnames:
raise ValueError, "can only define component names for classes with a fixed shape/size"
# constants for shape, ndim, size
if shape is not None:
type.__setattr__(newcls, 'shape', shape)
if ndim is not None:
type.__setattr__(newcls, 'ndim', ndim)
if size is not None:
type.__setattr__(newcls, 'size', size)
#__slots__ = ['_data', '_shape', '_size']
# add component names to read-only list
readonly = newcls.__readonly__
if hasattr(newcls, 'shape'):
readonly['shape'] = None
if hasattr(newcls, 'ndim'):
readonly['ndim'] = None
if hasattr(newcls, 'size'):
readonly['size'] = None
if 'cnames' not in readonly:
readonly['cnames'] = None
type.__setattr__(newcls, '__readonly__', readonly)
# print "created class", newcls
# print "bases", newcls.__bases__
# print "readonly", newcls.__readonly__
# print "slots", newcls.__slots__
return newcls
# generic math function that can operate on Arrays herited from arrays
# (min, max, sum, prod...)
# Functions that work on vectors will now be inherited from Array and properly defer
# to the class methods
class Vector(VectorN):
"""
A 3 dimensional vector class that wraps Maya's api Vector class
>>> from pymel.all import *
>>> import pymel.core.datatypes as dt
>>>
>>> v = dt.Vector(1, 2, 3)
>>> w = dt.Vector(x=1, z=2)
>>> z = dt.Vector( dt.Vector.xAxis, z=1)
>>> v = dt.Vector(1, 2, 3, unit='meters')
>>> print v
[1.0, 2.0, 3.0]
"""
__metaclass__ = MetaMayaArrayTypeWrapper
__slots__ = ()
# class specific info
apicls = _api.MVector
cnames = ('x', 'y', 'z')
shape = (3,)
unit = None
def __new__(cls, *args, **kwargs):
shape = kwargs.get('shape', None)
ndim = kwargs.get('ndim', None)
size = kwargs.get('size', None)
# will default to class constant shape = (3,), so it's just an error check to catch invalid shapes,
# as no other option is actually possible on Vector, but this method could be used to allow wrapping
# of Maya array classes that can have a variable number of elements
shape, ndim, size = cls._expandshape(shape, ndim, size)
new = cls.apicls.__new__(cls)
cls.apicls.__init__(new)
return new
def __init__(self, *args, **kwargs):
""" __init__ method, valid for Vector, Point and Color classes """
cls = self.__class__
if args:
# allow both forms for arguments
if len(args) == 1 and hasattr(args[0], '__iter__'):
args = args[0]
# shortcut when a direct api init is possible
try:
self.assign(args)
except:
# special exception to the rule that you cannot drop data in Arrays __init__
# to allow all conversion from Vector derived classes (MPoint, MColor) to a base class
# special case for MPoint to cartesianize if necessary
# note : we may want to premultiply MColor by the alpha in a similar way
if isinstance(args, _api.MPoint) and args.w != 1.0:
args = copy.deepcopy(args).cartesianize()
if isinstance(args, _api.MColor) and args.a != 1.0:
# note : we may want to premultiply Color by the alpha in a similar way
pass
if isinstance(args, _api.MVector) or isinstance(args, _api.MPoint) or isinstance(args, _api.MColor):
args = tuple(args)
if len(args) > len(self):
args = args[slice(self.shape[0])]
super(Vector, self).__init__(*args)
if hasattr(cls, 'cnames') and len(set(cls.cnames) & set(kwargs)):
# can also use the form <componentname>=<number>
l = list(self.flat)
setcomp = False
for i, c in enumerate(cls.cnames):
if c in kwargs:
if float(l[i]) != float(kwargs[c]):
l[i] = float(kwargs[c])
setcomp = True
if setcomp:
try:
self.assign(l)
except:
msg = ", ".join(map(lambda x, y: x + "=<" + util.clsname(y) + ">", cls.cnames, l))
raise TypeError, "in %s(%s), at least one of the components is of an invalid type, check help(%s) " % (cls.__name__, msg, cls.__name__)
# units handling
self.unit = kwargs.get('unit', None)
if self.unit is not None:
self.assign([Distance(x, self.unit) for x in self])
def __repr__(self):
if hasattr(self, 'unit') and self.unit:
return "dt.%s(%s, unit='%s')" % (self.__class__.__name__, str(self), self.unit)
else:
return "dt.%s(%s)" % (self.__class__.__name__, str(self))
# for compatibility with base classes Array that actually hold a nested list in their _data attribute
# here, there is no _data attribute as we subclass _api.MVector directly, thus v.data is v
# for wraps
def _getdata(self):
return self.apicls(self)
def _setdata(self, value):
self.assign(value)
def _deldata(self):
if hasattr(self.apicls, 'clear'):
self.apicls.clear(self)
else:
raise TypeError, "cannot clear stored elements of %s" % (self.__class__.__name__)
data = property(_getdata, _setdata, _deldata, "The Vector/FloatVector/Point/FloatPoint/Color data")
# overloads for assign and get though standard way should be to use the data property
# to access stored values
def assign(self, value):
""" Wrap the Vector api assign method """
# don't accept instances as assign works on exact types
if type(value) != self.apicls and type(value) != type(self):
if not hasattr(value, '__iter__'):
value = (value,)
value = self.apicls(*value)
self.apicls.assign(self, value)
return self
# API get, actually not faster than pulling self[i] for such a short structure
def get(self):
""" Wrap the Vector api get method """
# need to keep a ref to the MScriptUtil alive until
# all pointers aren't needed...
ms = _api.MScriptUtil()
l = (0,) * self.size
ms.createFromDouble(*l)
p = ms.asDoublePtr()
self.apicls.get(self, p)
return tuple([ms.getDoubleArrayItem(p, i) for i in xrange(self.size)])
def __len__(self):
""" Number of components in the Vector instance, 3 for Vector, 4 for Point and Color """
return self.apicls.__len__(self)
# __getitem__ / __setitem__ override
# faster to override __getitem__ cause we know Vector only has one dimension
def __getitem__(self, i):
""" Get component i value from self """
if hasattr(i, '__iter__'):
i = list(i)
if len(i) == 1:
i = i[0]
else:
raise IndexError, "class %s instance %s has only %s dimension(s), index %s is out of bounds" % (util.clsname(self), self, self.ndim, i)
if isinstance(i, slice):
return _toCompOrArrayInstance(list(self)[i], VectorN)
try:
return _toCompOrArrayInstance(list(self)[i], VectorN)
except:
raise IndexError, "class %s instance %s is of size %s, index %s is out of bounds" % (util.clsname(self), self, self.size, i)
else:
if i < 0:
i = self.size + i
if i < self.size and not i < 0:
if hasattr(self.apicls, '__getitem__'):
return self.apicls.__getitem__(self, i)
else:
return list(self)[i]
else:
raise IndexError, "class %s instance %s is of size %s, index %s is out of bounds" % (util.clsname(self), self, self.size, i)
# as _api.Vector has no __setitem__ method, so need to reassign the whole Vector
def __setitem__(self, i, a):
""" Set component i value on self """
v = VectorN(self)
v.__setitem__(i, a)
self.assign(v)
# iterator override
# TODO : support for optional __iter__ arguments
def __iter__(self, *args, **kwargs):
""" Iterate on the api components """
return self.apicls.__iter__(self.data)
def __contains__(self, value):
""" True if at least one of the vector components is equal to the argument """
return value in self.__iter__()
# common operators without an api equivalent are herited from VectorN
# operators using the Maya API when applicable, but that can delegate to VectorN
def __eq__(self, other):
""" u.__eq__(v) <==> u == v
Equivalence test """
try:
return bool(self.apicls.__eq__(self, other))
except Exception:
return bool(super(Vector, self).__eq__(other))
def __ne__(self, other):
""" u.__ne__(v) <==> u != v
Equivalence test """
return (not self.__eq__(other))
def __neg__(self):
""" u.__neg__() <==> -u
The unary minus operator. Negates the value of each of the components of u """
return self.__class__(self.apicls.__neg__(self))
def __add__(self, other):
""" u.__add__(v) <==> u+v
Returns the result of the addition of u and v if v is convertible to a VectorN (element-wise addition),
adds v to every component of u if v is a scalar """
try:
return self.__class__._convert(self.apicls.__add__(self, other))
except Exception:
return self.__class__._convert(super(Vector, self).__add__(other))
def __radd__(self, other):
""" u.__radd__(v) <==> v+u
Returns the result of the addition of u and v if v is convertible to a VectorN (element-wise addition),
adds v to every component of u if v is a scalar """
try:
return self.__class__._convert(self.apicls.__radd__(self, other))
except Exception:
return self.__class__._convert(super(Vector, self).__radd__(other))
def __iadd__(self, other):
""" u.__iadd__(v) <==> u += v
In place addition of u and v, see __add__ """
try:
return self.__class__(self.__add__(other))
except Exception:
return NotImplemented
def __sub__(self, other):
""" u.__sub__(v) <==> u-v
Returns the result of the substraction of v from u if v is convertible to a VectorN (element-wise substration),
substract v to every component of u if v is a scalar """
try:
return self.__class__._convert(self.apicls.__sub__(self, other))
except Exception:
return self.__class__._convert(super(Vector, self).__sub__(other))
def __rsub__(self, other):
""" u.__rsub__(v) <==> v-u
Returns the result of the substraction of u from v if v is convertible to a VectorN (element-wise substration),
replace every component c of u by v-c if v is a scalar """
try:
return self.__class__._convert(self.apicls.__rsub__(self, other))
except Exception:
return self.__class__._convert(super(Vector, self).__rsub__(other))
def __isub__(self, other):
""" u.__isub__(v) <==> u -= v
In place substraction of u and v, see __sub__ """
try:
return self.__class__(self.__sub__(other))
except Exception:
return NotImplemented
def __div__(self, other):
""" u.__div__(v) <==> u/v
Returns the result of the division of u by v if v is convertible to a VectorN (element-wise division),
divide every component of u by v if v is a scalar """
try:
return self.__class__._convert(self.apicls.__div__(self, other))
except Exception:
return self.__class__._convert(super(Vector, self).__div__(other))
def __rdiv__(self, other):
""" u.__rdiv__(v) <==> v/u
Returns the result of of the division of v by u if v is convertible to a VectorN (element-wise division),
invert every component of u and multiply it by v if v is a scalar """
try:
return self.__class__._convert(self.apicls.__rdiv__(self, other))
except Exception:
return self.__class__._convert(super(Vector, self).__rdiv__(other))
def __idiv__(self, other):
""" u.__idiv__(v) <==> u /= v
In place division of u by v, see __div__ """
try:
return self.__class__(self.__div__(other))
except Exception:
return NotImplemented
# action depends on second object type
def __mul__(self, other):
""" u.__mul__(v) <==> u*v
The multiply '*' operator is mapped to the dot product when both objects are Vectors,
to the transformation of u by matrix v when v is a MatrixN,
to element wise multiplication when v is a sequence,
and multiplies each component of u by v when v is a numeric type. """
try:
res = self.apicls.__mul__(self, other)
assert res is not NotImplemented
except Exception:
res = super(Vector, self).__mul__(other)
if util.isNumeric(res) or res is NotImplemented:
return res
else:
return self.__class__._convert(res)
def __rmul__(self, other):
""" u.__rmul__(v) <==> v*u
The multiply '*' operator is mapped to the dot product when both objects are Vectors,
to the left side multiplication (pre-multiplication) of u by matrix v when v is a MatrixN,
to element wise multiplication when v is a sequence,
and multiplies each component of u by v when v is a numeric type. """
try:
res = self.apicls.__rmul__(self, other)
except:
res = super(Vector, self).__rmul__(other)
if util.isNumeric(res):
return res
else:
return self.__class__._convert(res)
def __imul__(self, other):
""" u.__imul__(v) <==> u *= v
Valid for Vector * Matrix multiplication, in place transformation of u by Matrix v
or Vector by scalar multiplication only """
try:
return self.__class__(self.__mul__(other))
except:
return NotImplemented
# special operators
def __xor__(self, other):
""" u.__xor__(v) <==> u^v
Defines the cross product operator between two 3D vectors,
if v is a MatrixN, u^v is equivalent to u.transformAsNormal(v) """
if isinstance(other, VectorN):
return self.cross(other)
elif isinstance(other, MatrixN):
return self.transformAsNormal(other)
else:
return NotImplemented
def __ixor__(self, other):
""" u.__xor__(v) <==> u^=v
Inplace cross product or transformation by inverse transpose of v is v is a MatrixN """
try:
return self.__class__(self.__xor__(other))
except:
return NotImplemented
# wrap of other API MVector methods, we use the api method if possible and delegate to Vector else
def isEquivalent(self, other, tol=None):
""" Returns true if both arguments considered as Vector are equal within the specified tolerance """
if tol is None:
tol = _api.MVector_kTol
try:
nself, nother = coerce(self, other)
except:
return False
if isinstance(nself, Vector):
return bool(nself.apicls.isEquivalent(nself, nother, tol))
else:
return bool(super(Vector, nself).isEquivalent(nother, tol))
def isParallel(self, other, tol=None):
""" Returns true if both arguments considered as Vector are parallel within the specified tolerance """
if tol is None:
tol = _api.MVector_kTol
try:
return bool(self.apicls.isParallel(Vector(self), Vector(other), tol))
except:
return super(Vector, self).isParallel(other, tol)
def distanceTo(self, other):
try:
return self.apicls.distanceTo(Point(self), Point(other))
except:
return super(Vector, self).dist(other)
def length(self):
""" Return the length of the vector """
return Vector.apicls.length(Vector(self))
def sqlength(self):
""" Return the square length of the vector """
return self.dot(self)
def normal(self):
""" Return a normalized copy of self """
return self.__class__(Vector.apicls.normal(Vector(self)))
def normalize(self):
""" Performs an in place normalization of self """
if type(self) is Vector:
Vector.apicls.normalize(self)
else:
self.assign(self.normal())
# additional api methods that work on Vector only, and don't have an equivalent on VectorN
def rotateTo(self, other):
""" u.rotateTo(v) --> Quaternion
Returns the Quaternion that represents the rotation of the Vector u into the Vector v
around their mutually perpendicular axis. It amounts to rotate u by angle(u, v) around axis(u, v) """
if isinstance(other, Vector):
return Quaternion(Vector.apicls.rotateTo(Vector(self), Vector(other)))
else:
raise TypeError, "%r is not a Vector instance" % other
def rotateBy(self, *args):
""" u.rotateBy(*args) --> Vector
Returns the result of rotating u by the specified arguments.
There are several ways the rotation can be specified:
args is a tuple of one Matrix, TransformationMatrix, Quaternion, EulerRotation
arg is tuple of 4 arguments, 3 rotation value and an optionnal rotation order
args is a tuple of one Vector, the axis and one float, the angle to rotate around that axis in radians"""
if args:
if len(args) == 2 and isinstance(args[0], Vector):
return self.__class__(self.apicls.rotateBy(self, Quaternion(Vector(args[0]), float(args[1]))))
elif len(args) == 1 and isinstance(args[0], Matrix):
return self.__class__(self.apicls.rotateBy(self, args[0].rotate))
else:
return self.__class__(self.apicls.rotateBy(self, EulerRotation(unit='radians', *args)))
else:
return self
# def asUnit(self, unit) :
# #kUnit = Distance.kUnit(unit)
# return self.__class__( [ Distance(x).asUnit(unit) for x in self ] )
#
# def asUnit(self) :
# return self.asUnit(self.unit)
#
# def asUIUnit()nits()self) :
# return self.asUnit(Distance.getUIUnit())
#
# def asInternalUnit(self) :
# return self.asUnit(Distance.getInternalUnit())
#
# def asMillimeter(self) :
# return self.asUnit('millimeter')
# def asCentimeters(self) :
# return self.asUnit('centimeters')
# def asKilometers(self) :
# return self.asUnit('kilometers')
# def asMeters(self) :
# return self.asUnit('meters')
#
# def asInches(self) :
# return self.asUnit('inches')
# def asFeet(self) :
# return self.asUnit('feet')
# def asYards(self) :
# return self.asUnit('yards')
# def asMiles(self) :
# return self.asUnit('miles')
# additional api methods that work on Vector only, but can also be delegated to VectorN
def transformAsNormal(self, other):
""" Returns the vector transformed by the matrix as a normal
Normal vectors are not transformed in the same way as position vectors or points.
If this vector is treated as a normal vector then it needs to be transformed by
post multiplying it by the inverse transpose of the transformation matrix.
This method will apply the proper transformation to the vector as if it were a normal. """
if isinstance(other, Matrix):
return self.__class__._convert(Vector.apicls.transformAsNormal(Vector(self), Matrix(other)))
else:
return self.__class__._convert(super(Vector, self).transformAsNormal(other))
def dot(self, other):
""" dot product of two vectors """
if isinstance(other, Vector):
return Vector.apicls.__mul__(Vector(self), Vector(other))
else:
return super(Vector, self).dot(other)
def cross(self, other):
""" cross product, only defined for two 3D vectors """
if isinstance(other, Vector):
return self.__class__._convert(Vector.apicls.__xor__(Vector(self), Vector(other)))
else:
return self.__class__._convert(super(Vector, self).cross(other))
def axis(self, other, normalize=False):
""" u.axis(v) <==> angle(u, v) --> Vector
Returns the axis of rotation from u to v as the vector n = u ^ v
if the normalize keyword argument is set to True, n is also normalized """
if isinstance(other, Vector):
if normalize:
return self.__class__._convert(Vector.apicls.__xor__(Vector(self), Vector(other)).normal())
else:
return self.__class__._convert(Vector.apicls.__xor__(Vector(self), Vector(other)))
else:
return self.__class__._convert(super(Vector, self).axis(other, normalize))
def angle(self, other):
""" u.angle(v) <==> angle(u, v) --> float
Returns the angle (in radians) between the two vectors u and v
Note that this angle is not signed, use axis to know the direction of the rotation """
if isinstance(other, Vector):
return Vector.apicls.angle(Vector(self), Vector(other))
else:
return super(Vector, self).angle(other)
# methods without an api equivalent
# cotan on MVectors only takes 2 arguments
def cotan(self, other):
""" u.cotan(v) <==> cotan(u, v) --> float :
cotangent of the a, b angle, a and b should be MVectors"""
return VectorN.cotan(self, other)
# rest derived from VectorN class
class FloatVector(Vector):
""" A 3 dimensional vector class that wraps Maya's api FloatVector class,
It behaves identically to Vector, but it also derives from api's FloatVector
to keep api methods happy
"""
apicls = _api.MFloatVector
# Point specific functions
def planar(p, *args, **kwargs):
""" planar(p[, q, r, s (...), tol=tolerance]) --> bool
Returns True if all provided MPoints are planar within given tolerance """
if not isinstance(p, Point):
try:
p = Point(p)
except:
raise TypeError, "%s is not convertible to type Point, planar is only defined for n MPoints" % (util.clsname(p))
return p.planar(*args, **kwargs)
def center(p, *args):
""" center(p[, q, r, s (...)]) --> Point
Returns the Point that is the center of p, q, r, s (...) """
if not isinstance(p, Point):
try:
p = Point(p)
except:
raise TypeError, "%s is not convertible to type Point, center is only defined for n MPoints" % (util.clsname(p))
return p.center(*args)
def bWeights(p, *args):
""" bWeights(p[, p0, p1, (...), pn]) --> tuple
Returns a tuple of (n0, n1, ...) normalized barycentric weights so that n0*p0 + n1*p1 + ... = p """
if not isinstance(p, Point):
try:
p = Point(p)
except:
raise TypeError, "%s is not convertible to type Point, bWeights is only defined for n MPoints" % (util.clsname(p))
return p.bWeights(*args)
class Point(Vector):
""" A 4 dimensional vector class that wraps Maya's api Point class,
"""
apicls = _api.MPoint
cnames = ('x', 'y', 'z', 'w')
shape = (4,)
def __melobject__(self):
"""Special method for returning a mel-friendly representation. In this case, a cartesian 3D point """
return self.cartesian()
# # base methods are inherited from Vector
# we only show the x, y, z components on an iter
def __len__(self):
l = len(self.data)
if self.w == 1.0:
l -= 1
return l
def __iter__(self, *args, **kwargs):
""" Iterate on the api components """
l = len(self)
for c in list(self.apicls.__iter__(self.data))[:l]:
yield c
# modified operators, when adding 2 Point consider second as Vector
def __add__(self, other):
""" u.__add__(v) <==> u+v
Returns the result of the addition of u and v if v is convertible to a VectorN (element-wise addition),
adds v to every component of u if v is a scalar """
# prb with coerce when delegating to VectorN, either redefine coerce for Point or other fix
# if isinstance(other, Point) :
# other = Vector(other)
try:
other = Vector(other)
except:
pass
try:
return self.__class__._convert(self.apicls.__add__(self, other))
except:
return self.__class__._convert(super(Vector, self).__add__(other))
def __radd__(self, other):
""" u.__radd__(v) <==> v+u
Returns the result of the addition of u and v if v is convertible to a VectorN (element-wise addition),
adds v to every component of u if v is a scalar """
if isinstance(other, Point):
other = Vector(other)
try:
return self.__class__._convert(self.apicls.__radd__(self, other))
except:
return self.__class__._convert(super(Point, self).__radd__(other))
def __iadd__(self, other):
""" u.__iadd__(v) <==> u += v
In place addition of u and v, see __add__ """
try:
return self.__class__(self.__add__(other))
except:
return NotImplemented
# specific api methods
def cartesianize(self):
""" p.cartesianize() --> Point
If the point instance p is of the form P(W*x, W*y, W*z, W), for some scale factor W != 0,
then it is reset to be P(x, y, z, 1).
This will only work correctly if the point is in homogenous form or cartesian form.
If the point is in rational form, the results are not defined. """
return self.__class__(self.apicls.cartesianize(self))
def cartesian(self):
""" p.cartesian() --> Point
Returns the cartesianized version of p, without changing p. """
t = copy.deepcopy(self)
self.apicls.cartesianize(t)
return t
def rationalize(self):
""" p.rationalize() --> Point
If the point instance p is of the form P(W*x, W*y, W*z, W) (ie. is in homogenous or (for W==1) cartesian form),
for some scale factor W != 0, then it is reset to be P(x, y, z, W).
This will only work correctly if the point is in homogenous or cartesian form.
If the point is already in rational form, the results are not defined. """
return self.__class__(self.apicls.rationalize(self))
def rational(self):
""" p.rational() --> Point
Returns the rationalized version of p, without changing p. """
t = copy.deepcopy(self)
self.apicls.rationalize(t)
return t
def homogenize(self):
""" p.homogenize() --> Point
If the point instance p is of the form P(x, y, z, W) (ie. is in rational or (for W==1) cartesian form),
for some scale factor W != 0, then it is reset to be P(W*x, W*y, W*z, W). """
return self.__class__(self.apicls.homogenize(self))
def homogen(self):
""" p.homogen() --> Point
Returns the homogenized version of p, without changing p. """
t = copy.deepcopy(self)
self.apicls.homogenize(t)
return t
# additionnal methods
def isEquivalent(self, other, tol=None):
""" Returns true if both arguments considered as Point are equal within the specified tolerance """
if tol is None:
tol = _api.MPoint_kTol
try:
nself, nother = coerce(self, other)
except:
return False
if isinstance(nself, Point):
return bool(nself.apicls.isEquivalent(nself, nother, tol))
else:
return bool(super(Point, nself).isEquivalent(nother, tol))
def axis(self, start, end, normalize=False):
""" a.axis(b, c) --> Vector
Returns the axis of rotation from point b to c around a as the vector n = (b-a)^(c-a)
if the normalize keyword argument is set to True, n is also normalized """
return Vector.axis(start - self, end - self, normalize=normalize)
def angle(self, start, end):
""" a.angle(b, c) --> float
Returns the angle (in radians) of rotation from point b to c around a.
Note that this angle is not signed, use axis to know the direction of the rotation """
return Vector.angle(start - self, end - self)
def cotan(self, start, end):
""" a.cotan(b, c) --> float :
cotangent of the (b-a), (c-a) angle, a, b, and c should be MPoints representing points a, b, c"""
return VectorN.cotan(start - self, end - self)
def planar(self, *args, **kwargs):
""" p.planar(q, r, s (...), tol=tolerance) --> bool
Returns True if all provided points are planar within given tolerance """
if len(args) > 2:
tol = kwargs.get('tol', None)
n = (args[0] - self) ^ (args[1] - self)
return reduce(operator.and_, map(lambda x: n.isParallel(x, tol), [(args[0] - self) ^ (a - self) for a in args[2:]]), True)
else:
return True
def center(self, *args):
""" p.center(q, r, s (...)) --> Point
Returns the Point that is the center of p, q, r, s (...) """
return sum((self,) + args) / float(len(args) + 1)
def bWeights(self, *args):
""" p.bWeights(p0, p1, (...), pn) --> tuple
Returns a tuple of (n0, n1, ...) normalized barycentric weights so that n0*p0 + n1*p1 + ... = p.
This method works for n points defining a concave or convex n sided face,
always returns positive normalized weights, and is continuous on the face limits (on the edges),
but the n points must be coplanar, and p must be inside the face delimited by (p0, ..., pn) """
if args:
p = self
q = list(args)
np = len(q)
w = VectorN(0.0, size=np)
weightSum = 0.0
pOnEdge = False
tol = _api.MPoint_kTol
# all args should be MPoints
for i in xrange(np):
if not isinstance(q[i], Point):
try:
q[i] = Point(q[i])
except:
raise TypeError, "cannot convert %s to Point, bWeights is defined for n MPoints" % (util.clsname(q[i]))
# if p sits on an edge, it' a limit case and there is an easy solution,
# all weights are 0 but for the 2 edge end points
for i in xrange(np):
next = (i + 1) % np
e = ((q[next] - q[i]) ^ (p - q[i])).sqlength()
l = (q[next] - q[i]).sqlength()
if e <= (tol * l):
if l < tol:
# p is on a 0 length edge, point and next point are on top of each other, as is p then
w[i] = 0.5
w[next] = 0.5
else:
# p is somewhere on that edge between point and next point
di = (p - q[i]).length()
w[next] = float(di / sqrt(l))
w[i] = 1.0 - w[next]
# in both case update the weights sum and mark p as being on an edge,
# problem is solved
weightSum += 1.0
pOnEdge = True
break
# If p not on edge, use the cotangents method
if not pOnEdge:
for i in xrange(np):
prev = (i + np - 1) % np
next = (i + 1) % np
lenSq = (p - q[i]).sqlength()
w[i] = (q[i].cotan(p, q[prev]) + q[i].cotan(p, q[next])) / lenSq
weightSum += w[i]
# then normalize result
if abs(weightSum):
w /= weightSum
else:
raise ValueError, "failed to compute bWeights for %s and %s.\nThe point bWeights are computed for must be inside the planar face delimited by the n argument points" % (self, args)
return tuple(w)
else:
return ()
class FloatPoint(Point):
""" A 4 dimensional vector class that wraps Maya's api FloatPoint class,
It behaves identically to Point, but it also derives from api's FloatPoint
to keep api methods happy
"""
apicls = _api.MFloatPoint
class Color(Vector):
""" A 4 dimensional vector class that wraps Maya's api Color class,
It stores the r, g, b, a components of the color, as normalized (Python) floats
"""
apicls = _api.MColor
cnames = ('r', 'g', 'b', 'a')
shape = (4,)
# modes = ('rgb', 'hsv', 'cmy', 'cmyk')
modes = ('rgb', 'hsv')
# constants
red = _api.MColor(1.0, 0.0, 0.0)
green = _api.MColor(0.0, 1.0, 0.0)
blue = _api.MColor(0.0, 0.0, 1.0)
white = _api.MColor(1.0, 1.0, 1.0)
black = _api.MColor(0.0, 0.0, 0.0)
opaque = _api.MColor(0.0, 0.0, 0.0, 1.0)
clear = _api.MColor(0.0, 0.0, 0.0, 0.0)
# static methods
@staticmethod
def rgbtohsv(c):
c = tuple(c)
return tuple(colorsys.rgb_to_hsv(*clamp(c[:3])) + c[3:4])
@staticmethod
def hsvtorgb(c):
c = tuple(c)
# return colorsys.hsv_to_rgb(clamp(c[0]), clamp(c[1]), clamp(c[2]))
return tuple(colorsys.hsv_to_rgb(*clamp(c[:3])) + c[3:4])
# TODO : could define rgb and hsv iterators and allow __setitem__ and __getitem__ on these iterators
# like (it's more simple) it's done in ArrayIter
def _getrgba(self):
return tuple(self)
def _setrgba(self, value):
if not hasattr(value, '__iter__'):
# the way api interprets a single value
# value = (None, None, None, value)
value = (value,) * 4
l = list(self)
for i, v in enumerate(value[:4]):
if v is not None:
l[i] = float(v)
self.assign(*l)
rgba = property(_getrgba, _setrgba, None, "The r,g,b,a Color components""")
def _getrgb(self):
return self.rgba[:3]
def _setrgb(self, value):
if not hasattr(value, '__iter__'):
value = (value,) * 3
self.rgba = value[:3]
rgb = property(_getrgb, _setrgb, None, "The r,g,b Color components""")
def _gethsva(self):
return tuple(Color.rgbtohsv(self))
def _sethsva(self, value):
if not hasattr(value, '__iter__'):
# the way api interprets a single value
# value = (None, None, None, value)
value = (value,) * 4
l = list(Color.rgbtohsv(self))
for i, v in enumerate(value[:4]):
if v is not None:
l[i] = float(v)
self.assign(*Color.hsvtorgb(self))
hsva = property(_gethsva, _sethsva, None, "The h,s,v,a Color components""")
def _gethsv(self):
return tuple(Color.rgbtohsv(self))[:3]
def _sethsv(self, value):
if not hasattr(value, '__iter__'):
value = (value,) * 3
self.hsva = value[:3]
hsv = property(_gethsv, _sethsv, None, "The h,s,v,a Color components""")
def _geth(self):
return self.hsva[0]
def _seth(self, value):
self.hsva = (value, None, None, None)
h = property(_geth, _seth, None, "The h Color component""")
def _gets(self):
return self.hsva[1]
def _sets(self, value):
self.hsva = (None, value, None, None)
s = property(_gets, _sets, None, "The s Color component""")
def _getv(self):
return self.hsva[2]
def _setv(self, value):
self.hsva = (None, None, value, None)
v = property(_getv, _setv, None, "The v Color component""")
# __new__ is herited from Point/Vector, need to override __init__ to accept hsv mode though
def __init__(self, *args, **kwargs):
""" Init a Color instance
Can pass one argument being another Color instance , or the color components """
cls = self.__class__
mode = kwargs.get('mode', None)
if mode is not None and mode not in cls.modes:
raise ValueError, "unknown mode %s for %s" % (mode, util.clsname(self))
# can also use the form <componentname>=<number>
# for now supports only rgb and hsv flags
hsvflag = {}
rgbflag = {}
for a in 'hsv':
if a in kwargs:
hsvflag[a] = kwargs[a]
for a in 'rgb':
if a in kwargs:
rgbflag[a] = kwargs[a]
# can't mix them
if hsvflag and rgbflag:
raise ValueError, "can not mix r,g,b and h,s,v keyword arguments in a %s declaration" % util.clsname(self)
# if no mode specified, guess from what keyword arguments where used, else use 'rgb' as default
if mode is None:
if hsvflag:
mode = 'hsv'
else:
mode = 'rgb'
# can't specify a mode and use keywords of other modes
if mode is not 'hsv' and hsvflag:
raise ValueError, "Can not use h,s,v keyword arguments while specifying %s mode in %s" % (mode, util.clsname(self))
elif mode is not 'rgb' and rgbflag:
raise ValueError, "Can not use r,g,b keyword arguments while specifying %s mode in %s" % (mode, util.clsname(self))
# NOTE: do not try to use mode with _api.Color, it seems bugged as of 2008
#import colorsys
#colorsys.rgb_to_hsv(0.0, 0.0, 1.0)
## Result: (0.66666666666666663, 1.0, 1.0) #
#c = _api.Color(_api.Color.kHSV, 0.66666666666666663, 1.0, 1.0)
# print "# Result: ",c[0], c[1], c[2], c[3]," #"
## Result: 1.0 0.666666686535 1.0 1.0 #
#c = _api.Color(_api.Color.kHSV, 0.66666666666666663*360, 1.0, 1.0)
# print "# Result: ",c[0], c[1], c[2], c[3]," #"
## Result: 1.0 240.0 1.0 1.0 #
#colorsys.hsv_to_rgb(0.66666666666666663, 1.0, 1.0)
## Result: (0.0, 0.0, 1.0) #
# we'll use Color only to store RGB values internally and do the conversion a read/write if desired
# which I think make more sense anyway
# quantize (255, 65535, no quantize means colors are 0.0-1.0 float values)
# Initializing api's Color with int values seems also not to always behave so we quantize first and
# use a float init always
quantize = kwargs.get('quantize', None)
if quantize is not None:
try:
quantize = float(quantize)
except:
raise ValueError, "quantize must be a numeric value, not %s" % (util.clsname(quantize))
# can be initilized with a single argument (other Color, Vector, VectorN)
if len(args) == 1:
args = args[0]
# we dont rely much on Color api as it doesn't seem totally finished, and do some things directly here
if isinstance(args, self.__class__) or isinstance(args, self.apicls):
# alternatively could be just ignored / output as warning
if quantize:
raise ValueError, "Can not quantize a Color argument, a Color is always stored internally as float color" % (mode, util.clsname(self))
if mode == 'rgb':
args = VectorN(args)
elif mode == 'hsv':
args = VectorN(cls.rgbtohsv(args))
else:
# single alpha value, as understood by api will break coerce behavior in operations
# where other operand is a scalar
# if not hasattr(args, '__iter__') :
# args = VectorN(0.0, 0.0, 0.0, args)
if hasattr(args, '__len__'):
shape = (min(len(args), cls.size),)
else:
shape = cls.shape
args = VectorN(args, shape=shape)
# quantize if needed
if quantize:
args /= quantize
# pad to a full Color size
args.stack(self[len(args):])
# apply keywords arguments, and convert if mode is not rgb
if mode == 'rgb':
if rgbflag:
for i, a in enumerate('rgb'):
if a in rgbflag:
if quantize:
args[i] = float(rgbflag[a]) / quantize
else:
args[i] = float(rgbflag[a])
elif mode == 'hsv':
if hsvflag:
for i, a in enumerate('hsv'):
if a in hsvflag:
if quantize:
args[i] = float(hsvflag[a]) / quantize
else:
args[i] = float(hsvflag[a])
args = VectorN(cls.hsvtorgb(args))
# finally alpha keyword
a = kwargs.get('a', None)
if a is not None:
if quantize:
args[-1] = float(a) / quantize
else:
args[-1] = float(a)
try:
self.assign(args)
except:
msg = ", ".join(map(lambda x, y: x + "=<" + util.clsname(y) + ">", mode, args))
raise TypeError, "in %s(%s), at least one of the components is of an invalid type, check help(%s) " % (util.clsname(self), msg, util.clsname(self))
def __melobject__(self):
"""Special method for returning a mel-friendly representation. In this case, a 3-component color (RGB) """
return [self.r, self.g, self.b]
# overriden operators
# defined for two MColors only
def __add__(self, other):
""" c.__add__(d) <==> c+d
Returns the result of the addition of MColors c and d if d is convertible to a Color,
adds d to every component of c if d is a scalar """
# prb with coerce when delegating to VectorN, either redefine coerce for Point or other fix
# if isinstance(other, Point) :
# other = Vector(other)
try:
other = Color(other)
except:
pass
try:
return self.__class__._convert(self.apicls.__add__(self, other))
except:
return self.__class__._convert(super(Vector, self).__add__(other))
def __radd__(self, other):
""" c.__radd__(d) <==> d+c
Returns the result of the addition of MColors c and d if d is convertible to a Color,
adds d to every component of c if d is a scalar """
try:
other = Color(other)
except:
pass
try:
return self.__class__._convert(self.apicls.__radd__(self, other))
except:
return self.__class__._convert(super(Point, self).__radd__(other))
def __iadd__(self, other):
""" c.__iadd__(d) <==> c += d
In place addition of c and d, see __add__ """
try:
return self.__class__(self.__add__(other))
except:
return NotImplemented
def __sub__(self, other):
""" c.__add__(d) <==> c+d
Returns the result of the substraction of Color d from c if d is convertible to a Color,
substract d from every component of c if d is a scalar """
try:
other = Color(other)
except:
pass
try:
return self.__class__._convert(self.apicls.__sub__(self, other))
except:
return self.__class__._convert(super(Vector, self).__sub__(other))
def __rsub__(self, other):
""" c.__rsub__(d) <==> d-c
Returns the result of the substraction of Color c from d if d is convertible to a Color,
replace every component c[i] of c by d-c[i] if d is a scalar """
try:
other = Color(other)
except:
pass
try:
return self.__class__._convert(self.apicls.__rsub__(self, other))
except:
return self.__class__._convert(super(Point, self).__rsub__(other))
def __isub__(self, other):
""" c.__isub__(d) <==> c -= d
In place substraction of d from c, see __sub__ """
try:
return self.__class__(self.__sub__(other))
except:
return NotImplemented
# action depends on second object type
# TODO : would be nice to define LUT classes and allow MColor * LUT transform
# overloaded operators
def __mul__(self, other):
""" a.__mul__(b) <==> a*b
If b is a 1D sequence (Array, VectorN, Color), __mul__ is mapped to element-wise multiplication,
If b is a MatrixN, __mul__ is similar to Point a by MatrixN b multiplication (post multiplication or transformation of a by b),
multiplies every component of a by b if b is a single numeric value """
if isinstance(other, MatrixN):
# will defer to MatrixN rmul
return NotImplemented
else:
# will defer to Array.__mul__
return Array.__mul__(self, other)
def __rmul__(self, other):
""" a.__rmul__(b) <==> b*a
If b is a 1D sequence (Array, VectorN, Color), __mul__ is mapped to element-wise multiplication,
If b is a MatrixN, __mul__ is similar to MatrixN b by Point a matrix multiplication,
multiplies every component of a by b if b is a single numeric value """
if isinstance(other, MatrixN):
# will defer to MatrixN mul
return NotImplemented
else:
# will defer to Array.__rmul__
return Array.__rmul__(self, other)
def __imul__(self, other):
""" a.__imul__(b) <==> a *= b
In place multiplication of VectorN a and b, see __mul__, result must fit a's type """
res = self * other
if isinstance(res, self.__class__):
return self.__class__(res)
else:
raise TypeError, "result of in place multiplication of %s by %s is not a %s" % (clsname(self), clsname(other), clsname(self))
# additionnal methods, to be extended
def over(self, other):
""" c1.over(c2): Composites c1 over other c2 using c1's alpha, the resulting color has the alpha of c2 """
if isinstance(other, Color):
a = self.a
return Color(Vector(other).blend(Vector(self), self.a), a=other.a)
else:
raise TypeError, "over is defined for Color instances, not %s" % (util.clsname(other))
# return Vector instead ? Keeping alpha doesn't make much sense
def premult(self):
""" Premultiply Color r, g and b by it's alpha and resets alpha to 1.0 """
return self.__class__(Vector(self) * self.a)
def gamma(self, g):
""" c.gamma(g) applies gamma correction g to Color c, g can be a scalar and then will be applied to r, g, b
or an iterable of up to 3 (r, g, b) independant gamma correction values """
if not hasattr(g, '__iter__'):
g = (g,) * 3 + (1.0,)
else:
g = g[:3] + (1.0,) * (4 - len(g[:3]))
return gamma(self, g)
def hsvblend(self, other, weight=0.5):
""" c1.hsvblend(c2) --> Color
Returns the result of blending c1 with c2 in hsv space, using the given weight """
c1 = list(self.hsva)
c2 = list(other.hsva)
if abs(c2[0] - c1[0]) >= 0.5:
if abs(c2[0] - c1[0]) == 0.5:
c1[1], c2[1] = 0.0, 0.0
if c1[0] > 0.5:
c1[0] -= 1.0
if c2[0] > 0.5:
c2[0] -= 1.0
c = blend(c1, c2, weight=weight)
if c[0] < 0.0:
c[0] += 1.0
return self.__class__(c, mode='hsv')
# to specify space of transforms
class Space(_api.MSpace):
apicls = _api.MSpace
__metaclass__ = _factories.MetaMayaTypeWrapper
pass
Spaces = Space.Space
def equivalentSpace(space1, space2, rotationOnly=False):
'''Compare the two given space values to see if they are equal
Parameters
----------
space1 : int or str
the first space to compare (may be either the integer enum value, or the
api enum name - ie, "kPostTransform" - or the pymel enum name - ie,
"postTransform" )
space2 : int or str
the seoncd space to compare (may be either the integer enum value, or
the api enum name - ie, "kPostTransform" - or the pymel enum name - ie,
"postTransform")
rotationOnly : bool
If true, then compare the spaces, assuming we are only considering
rotation - in rotation, transform is the same as preTransform/object
(the reason being that in maya, preTransform means rotation +
translation are both defined in the preTransform/object coordinate
system, while transform means rotation is defined in preTransform/object
coordinates, while translate is given in the postTransform space...
which matches the way maya applies transforms)
'''
translated = []
for space in space1, space2:
space = _factories.ApiArgUtil.castInputEnum('MSpace', 'Space', space)
if rotationOnly:
# for the purposes of rotations, maya treats transform and
# preTransform/object as the same (the reason being that in maya,
# preTransform means both rotation + translation are both defined in
# the preTransform/object coordinate system, while transform means
# rotation is defined in preTransform/object coordinates, while
# translate is given in the postTransform space... which matches the
# way maya applies transforms)
if space == _api.MSpace.kTransform:
space = _api.MSpace.kPreTransform
translated.append(space)
# kInvalid
# kTransform
# Transform matrix (relative) space
# kPreTransform
# Pre-transform matrix (geometry)
# kPostTransform
# Post-transform matrix (world) space
# kWorld
# transform in world space
# kObject
# Same as pre-transform space
# kLast
# sadly TransformationMatrix.RotationOrder and EulerRotation.RotationOrder don't match
# class MRotationOrder(int):
# pass
# kInvalid
# kXYZ
# kYZX
# kZXY
# kXZY
# kYXZ
# kZYX
# kLast
# kXYZ
# kYZX
# kZXY
# kXZY
# kYXZ
# kZYX
# functions that work on MatrixN (det(), inv(), ...) herited from arrays
# and properly defer to the class methods
# For row, column order, see the definition of a TransformationMatrix in docs :
# T = | 1 0 0 0 |
# | 0 1 0 0 |
# | 0 0 1 0 |
# | tx ty tz 1 |
# and m(r, c) should return value of cell at r row and c column :
# t = _api.TransformationMatrix()
# t.setTranslation(_api.Vector(1, 2, 3), _api.MSpace.kWorld)
# m = t.asMatrix()
# mm(3,0)
# 1.0
# mm(3,1)
# 2.0
# mm(3,2)
# 3.0
class Matrix(MatrixN):
"""
A 4x4 transformation matrix based on api Matrix
>>> from pymel.all import *
>>> import pymel.core.datatypes as dt
>>>
>>> i = dt.Matrix()
>>> print i.formated()
[[1.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 1.0]]
>>> v = dt.Matrix(1, 2, 3)
>>> print v.formated()
[[1.0, 2.0, 3.0, 0.0],
[1.0, 2.0, 3.0, 0.0],
[1.0, 2.0, 3.0, 0.0],
[1.0, 2.0, 3.0, 0.0]]
"""
__metaclass__ = MetaMayaArrayTypeWrapper
apicls = _api.MMatrix
shape = (4, 4)
cnames = ('a00', 'a01', 'a02', 'a03',
'a10', 'a11', 'a12', 'a13',
'a20', 'a21', 'a22', 'a23',
'a30', 'a31', 'a32', 'a33')
# constants
identity = _api.MMatrix()
def __new__(cls, *args, **kwargs):
shape = kwargs.get('shape', None)
ndim = kwargs.get('ndim', None)
size = kwargs.get('size', None)
# will default to class constant shape = (4, 4), so it's just an error check to catch invalid shapes,
# as no other option is actually possible on Matrix, but this method could be used to allow wrapping
# of Maya array classes that can have a variable number of elements
shape, ndim, size = cls._expandshape(shape, ndim, size)
new = cls.apicls.__new__(cls)
cls.apicls.__init__(new)
return new
def __init__(self, *args, **kwargs):
""" __init__ method, valid for Vector, Point and Color classes """
cls = self.__class__
if args:
# allow both forms for arguments
if len(args) == 1 and hasattr(args[0], '__iter__'):
args = args[0]
# shape = kwargs.get('shape', None)
# ndim = kwargs.get('ndim', None)
# size = kwargs.get('size', None)
# if shape is not None or ndim is not None or size is not None :
# shape, ndim, size = cls._expandshape(shape, ndim, size)
# args = MatrixN(args, shape=shape, ndim=ndim, size=size)
# shortcut when a direct api init is possible
try:
self.assign(args)
except:
super(MatrixN, self).__init__(*args)
# value = list(Matrix(value, shape=self.shape).flat)
# data = self.apicls()
# _api.MScriptUtil.createMatrixFromList ( value, data )
if hasattr(cls, 'cnames') and len(set(cls.cnames) & set(kwargs)):
# can also use the form <componentname>=<number>
l = list(self.flat)
setcomp = False
for i, c in enumerate(cls.cnames):
if c in kwargs:
if float(l[i]) != float(kwargs[c]):
l[i] = float(kwargs[c])
setcomp = True
if setcomp:
try:
self.assign(l)
except:
msg = ", ".join(map(lambda x, y: x + "=<" + util.clsname(y) + ">", cls.cnames, l))
raise TypeError, "in %s(%s), at least one of the components is of an invalid type, check help(%s) " % (cls.__name__, msg, cls.__name__)
# for compatibility with base classes Array that actually hold a nested list in their _data attribute
# here, there is no _data attribute as we subclass _api.Vector directly, thus v.data is v
# for wraps
def _getdata(self):
return self
def _setdata(self, value):
self.assign(value)
def _deldata(self):
if hasattr(self.apicls, 'clear'):
self.apicls.clear(self)
else:
raise TypeError, "cannot clear stored elements of %s" % (self.__class__.__name__)
data = property(_getdata, _setdata, _deldata, "The Matrix/FloatMatrix/TransformationMatrix/Quaternion/EulerRotation data")
# set properties for easy acces to translation / rotation / scale of a Matrix or derived class
# some of these will only yield dependable results if Matrix is a TransformationMatrix and some
# will always be zero for some classes (ie only rotation has a value on a Quaternion
def _getTranslate(self):
t = TransformationMatrix(self)
return Vector(t.getTranslation(_api.MSpace.kTransform))
def _setTranslate(self, value):
t = TransformationMatrix(self)
t.setTranslation(Vector(value), _api.MSpace.kTransform)
self.assign(t.asMatrix())
translate = property(_getTranslate, _setTranslate, None, "The translation expressed in this Matrix, in transform space")
def _getRotate(self):
t = TransformationMatrix(self)
return Quaternion(t.apicls.rotation(t))
def _setRotate(self, value):
t = TransformationMatrix(self)
q = Quaternion(value)
t.rotateTo(q)
# values = (q.x, q.y, q.z, q.w)
# t.setRotationQuaternion(q.x, q.y, q.z, q.w)
self.assign(t.asMatrix())
rotate = property(_getRotate, _setRotate, None, "The rotation expressed in this Matrix, in transform space")
def _getScale(self):
t = TransformationMatrix(self)
return Vector(t.getScale(_api.MSpace.kTransform))
def _setScale(self, value):
t = TransformationMatrix(self)
t.setScale(value, _api.MSpace.kTransform)
self.assign(t.asMatrix())
scale = property(_getScale, _setScale, None, "The scale expressed in this Matrix, in transform space")
def __melobject__(self):
"""Special method for returning a mel-friendly representation. In this case, a flat list of 16 values """
return [x for x in self.flat]
# some Matrix derived classes can actually be represented as matrix but not stored
# internally as such by the API
def asMatrix(self, percent=None):
"The matrix representation for this Matrix/TransformationMatrix/Quaternion/EulerRotation instance"
if percent is not None and percent != 1.0:
if type(self) is not TransformationMatrix:
self = TransformationMatrix(self)
return Matrix(self.apicls.asMatrix(self, percent))
else:
if type(self) is Matrix:
return self
else:
return Matrix(self.apicls.asMatrix(self))
matrix = property(asMatrix, None, None, "The Matrix representation for this Matrix/TransformationMatrix/Quaternion/EulerRotation instance")
# overloads for assign and get though standard way should be to use the data property
# to access stored values
def assign(self, value):
# don't accept instances as assign works on exact _api.Matrix type
data = None
if type(value) == self.apicls or type(value) == type(self):
data = value
elif hasattr(value, 'asMatrix'):
data = value.asMatrix()
else:
value = list(MatrixN(value).flat)
if len(value) == self.size:
data = self.apicls()
if isinstance(data, _api.MFloatMatrix):
_api.MScriptUtil.createFloatMatrixFromList(value, data)
elif isinstance(data, _api.MMatrix):
_api.MScriptUtil.createMatrixFromList(value, data)
else:
tmp = _api.MMatrix()
_api.MScriptUtil.createMatrixFromList(value, tmp)
data = self.apicls(tmp)
else:
raise TypeError, "cannot assign %s to a %s" % (value, util.clsname(self))
self.apicls.assign(self, data)
return self
# API get, actually not faster than pulling self[i] for such a short structure
def get(self):
""" Wrap the Matrix api get method """
mat = self.matrix
return tuple(tuple(_api.MScriptUtil.getDoubleArrayItem(_api.MMatrix.__getitem__(mat, r), c) for c in xrange(Matrix.shape[1])) for r in xrange(Matrix.shape[0]))
# ptr = _api.Matrix(self.matrix).matrix
# return tuple(tuple(_api.MScriptUtil.getDouble2ArrayItem ( ptr, r, c) for c in xrange(Matrix.shape[1])) for r in xrange(Matrix.shape[0]))
def __len__(self):
""" Number of components in the Matrix instance """
return self.apicls.__len__(self)
# iterator override
# TODO : support for optionnal __iter__ arguments
def __iter__(self, *args, **kwargs):
""" Iterate on the Matrix rows """
return self.apicls.__iter__(self.data)
# contains is herited from Array contains
# __getitem__ / __setitem__ override
def __getitem__(self, index):
""" m.__getitem__(index) <==> m[index]
Get component index value from self.
index can be a single numeric value or slice, thus one or more rows will be returned,
or a row,column tuple of numeric values / slices """
m = MatrixN(self)
# print list(m)
return m.__getitem__(index)
# return super(MatrixN, self).__getitem__(index)
# deprecated and __getitem__ should accept slices anyway
def __getslice__(self, start, end):
return self.__getitem__(slice(start, end))
# as _api.Matrix has no __setitem__ method
def __setitem__(self, index, value):
""" m.__setitem__(index, value) <==> m[index] = value
Set value of component index on self
index can be a single numeric value or slice, thus one or more rows will be returned,
or a row,column tuple of numeric values / slices """
m = MatrixN(self)
m.__setitem__(index, value)
self.assign(m)
# deprecated and __setitem__ should accept slices anyway
def __setslice__(self, start, end, value):
self.__setitem__(slice(start, end), value)
def __delitem__(self, index):
""" Cannot delete from a class with a fixed shape """
raise TypeError, "deleting %s from an instance of class %s will make it incompatible with class shape" % (index, clsname(self))
def __delslice__(self, start, end):
self.__delitem__(slice(start, end))
# TODO : wrap double Matrix:: operator() (unsigned int row, unsigned int col ) const
# common operators herited from MatrixN
# operators using the Maya API when applicable
def __eq__(self, other):
""" m.__eq__(v) <==> m == v
Equivalence test """
try:
return bool(self.apicls.__eq__(self, other))
except:
return bool(super(Matrix, self).__eq__(other))
def __ne__(self, other):
""" m.__ne__(v) <==> m != v
Equivalence test """
return (not self.__eq__(other))
def __neg__(self):
""" m.__neg__() <==> -m
The unary minus operator. Negates the value of each of the components of m """
return self.__class__(self.apicls.__neg__(self))
def __add__(self, other):
""" m.__add__(v) <==> m+v
Returns the result of the addition of m and v if v is convertible to a MatrixN (element-wise addition),
adds v to every component of m if v is a scalar """
try:
return self.__class__._convert(self.apicls.__add__(self, other))
except:
return self.__class__._convert(super(Matrix, self).__add__(other))
def __radd__(self, other):
""" m.__radd__(v) <==> v+m
Returns the result of the addition of m and v if v is convertible to a MatrixN (element-wise addition),
adds v to every component of m if v is a scalar """
try:
return self.__class__._convert(self.apicls.__radd__(self, other))
except:
return self.__class__._convert(super(Matrix, self).__radd__(other))
def __iadd__(self, other):
""" m.__iadd__(v) <==> m += v
In place addition of m and v, see __add__ """
try:
return self.__class__(self.__add__(other))
except:
return NotImplemented
def __sub__(self, other):
""" m.__sub__(v) <==> m-v
Returns the result of the substraction of v from m if v is convertible to a MatrixN (element-wise substration),
substract v to every component of m if v is a scalar """
try:
return self.__class__._convert(self.apicls.__sub__(self, other))
except:
return self.__class__._convert(super(Matrix, self).__sub__(other))
def __rsub__(self, other):
""" m.__rsub__(v) <==> v-m
Returns the result of the substraction of m from v if v is convertible to a MatrixN (element-wise substration),
replace every component c of m by v-c if v is a scalar """
try:
return self.__class__._convert(self.apicls.__rsub__(self, other))
except:
return self.__class__._convert(super(Matrix, self).__rsub__(other))
def __isub__(self, other):
""" m.__isub__(v) <==> m -= v
In place substraction of m and v, see __sub__ """
try:
return self.__class__(self.__sub__(other))
except:
return NotImplemented
# action depends on second object type
def __mul__(self, other):
""" m.__mul__(x) <==> m*x
If x is a MatrixN, __mul__ is mapped to matrix multiplication m*x, if x is a VectorN, to MatrixN by VectorN multiplication.
Otherwise, returns the result of the element wise multiplication of m and x if x is convertible to Array,
multiplies every component of b by x if x is a single numeric value """
try:
return self.__class__._convert(self.apicls.__mul__(self, other))
except:
return self.__class__._convert(super(Matrix, self).__mul__(other))
def __rmul__(self, other):
""" m.__rmul__(x) <==> x*m
If x is a MatrixN, __rmul__ is mapped to matrix multiplication x*m, if x is a VectorN (or Vector or Point or Color),
to transformation, ie VectorN by MatrixN multiplication.
Otherwise, returns the result of the element wise multiplication of m and x if x is convertible to Array,
multiplies every component of m by x if x is a single numeric value """
try:
return self.__class__._convert(self.apicls.__rmul__(self, other))
except:
return self.__class__._convert(super(Matrix, self).__rmul__(other))
def __imul__(self, other):
""" m.__imul__(n) <==> m *= n
Valid for Matrix * Matrix multiplication, in place multiplication of MatrixN m by MatrixN n """
try:
return self.__class__(self.__mul__(other))
except:
return NotImplemented
# __xor__ will defer to Vector __xor__
# API added methods
def setToIdentity(self):
""" m.setToIdentity() <==> m = a * b
Sets MatrixN to the identity matrix """
try:
self.apicls.setToIdentity(self)
except:
self.assign(self.__class__())
return self
def setToProduct(self, left, right):
""" m.setToProduct(a, b) <==> m = a * b
Sets MatrixN to the result of the product of MatrixN a and MatrixN b """
try:
self.apicls.setToProduct(self.__class__(left), self.__class__(right))
except:
self.assign(self.__class__(self.__class__(left) * self.__class__(right)))
return self
def transpose(self):
""" Returns the transposed Matrix """
try:
return self.__class__._convert(self.apicls.transpose(self))
except:
return self.__class__._convert(super(Matrix, self).transpose())
def inverse(self):
""" Returns the inverse Matrix """
try:
return self.__class__._convert(self.apicls.inverse(self))
except:
return self.__class__._convert(super(Matrix, self).inverse())
def adjoint(self):
""" Returns the adjoint (adjugate) Matrix """
try:
return self.__class__._convert(self.apicls.adjoint(self))
except:
return self.__class__._convert(super(Matrix, self).adjugate())
def homogenize(self):
""" Returns a homogenized version of the Matrix """
try:
return self.__class__._convert(self.apicls.homogenize(self))
except:
return self.__class__._convert(super(Matrix, self).homogenize())
def det(self):
""" Returns the determinant of this Matrix instance """
try:
return self.apicls.det4x4(self)
except:
return super(Matrix, self).det()
def det4x4(self):
""" Returns the 4x4 determinant of this Matrix instance """
try:
return self.apicls.det4x4(self)
except:
return super(Matrix, self[:4, :4]).det()
def det3x3(self):
""" Returns the determinant of the upper left 3x3 submatrix of this Matrix instance,
it's the same as doing det(m[0:3, 0:3]) """
try:
return self.apicls.det3x3(self)
except:
return super(Matrix, self[:3, :3]).det()
def isEquivalent(self, other, tol=_api.MVector_kTol):
""" Returns true if both arguments considered as Matrix are equal within the specified tolerance """
try:
nself, nother = coerce(self, other)
except:
return False
if isinstance(nself, Matrix):
return bool(nself.apicls.isEquivalent(nself, nother, tol))
else:
return bool(super(MatrixN, nself).isEquivalent(nother, tol))
def isSingular(self):
""" Returns True if the given Matrix is singular """
try:
return bool(self.apicls.isSingular(self))
except:
return super(MatrixN, self).isSingular()
# additionnal methods
def blend(self, other, weight=0.5):
""" Returns a 0.0-1.0 scalar weight blend between self and other Matrix,
blend mixes Matrix as transformation matrices """
if isinstance(other, Matrix):
return self.__class__(self.weighted(1.0 - weight) * other.weighted(weight))
else:
return blend(self, other, weight=weight)
def weighted(self, weight):
""" Returns a 0.0-1.0 scalar weighted blend between identity and self """
if type(self) is not TransformationMatrix:
self = TransformationMatrix(self)
return self.__class__._convert(self.asMatrix(weight))
class FloatMatrix(Matrix):
""" A 4x4 matrix class that wraps Maya's api FloatMatrix class,
It behaves identically to Matrix, but it also derives from api's FloatMatrix
to keep api methods happy
"""
apicls = _api.MFloatMatrix
class Quaternion(Matrix):
apicls = _api.MQuaternion
shape = (4,)
cnames = ('x', 'y', 'z', 'w')
def __new__(cls, *args, **kwargs):
shape = kwargs.get('shape', None)
ndim = kwargs.get('ndim', None)
size = kwargs.get('size', None)
# will default to class constant shape = (4,), so it's just an error check to catch invalid shapes,
# as no other option is actually possible on Quaternion, but this method could be used to allow wrapping
# of Maya array classes that can have a variable number of elements
shape, ndim, size = cls._expandshape(shape, ndim, size)
new = cls.apicls.__new__(cls)
cls.apicls.__init__(new)
return new
def __init__(self, *args, **kwargs):
""" __init__ method for Quaternion """
cls = self.__class__
def isVectorLike(x):
return isinstance(x, (_api.MVector, Vector)) \
or hasattr(x, '__len__') and len(x) == 3
if args:
# allow both forms for arguments
if len(args) == 1 and hasattr(args[0], '__iter__') \
and not isinstance(args[0], (_api.MQuaternion, Quaternion)):
args = args[0]
rotate = getattr(args, 'rotate', None)
# TransformationMatrix, Quaternion, EulerRotation api classes can convert to a rotation Quaternion
if rotate is not None and not callable(rotate):
args = args.rotate
self.unit = 'radians'
elif len(args) == 4 and isinstance(args[3], (basestring, util.EnumValue)): # isinstance(args[3], EulerRotation.RotationOrder) ) :
quat = _api.MQuaternion()
quat.assign(EulerRotation(*args, **kwargs))
args = quat
# allow to initialize directly from 3 rotations and a rotation order
# axis-angle - want to authorize
# Quaternion(Vector axis, float angle) as well as Quaternion(float angle, Vector axis)
elif len(args) == 2 and isVectorLike(args[0]) and isinstance(args[1], (int, float)):
args = (args[1], Vector(args[0]))
elif len(args) == 2 and isinstance(args[0], (int, float)) and isVectorLike(args[1]):
args = (args[0], Vector(args[1]))
# rotate vector-to-vector
elif len(args) == 2 and isVectorLike(args[0]) and isVectorLike(args[1]):
args = (Vector(args[0]), Vector(args[1]))
# rotate vector-to-vector, with scalar factor
elif len(args) == 3 and isVectorLike(args[0]) and isVectorLike(args[1]) \
and isinstance(args[2], (int, float)):
args = (Vector(args[0]), Vector(args[1]), args[2])
# shortcut when a direct api init is possible
try:
self.assign(args)
except:
super(Array, self).__init__(*args)
if hasattr(cls, 'cnames') and len(set(cls.cnames) & set(kwargs)):
# can also use the form <componentname>=<number>
l = list(self.flat)
setcomp = False
for i, c in enumerate(cls.cnames):
if c in kwargs:
if float(l[i]) != float(kwargs[c]):
l[i] = float(kwargs[c])
setcomp = True
if setcomp:
try:
self.assign(l)
except:
msg = ", ".join(map(lambda x, y: x + "=<" + util.clsname(y) + ">", cls.cnames, l))
raise TypeError, "in %s(%s), at least one of the components is of an invalid type, check help(%s) " % (cls.__name__, msg, cls.__name__)
# set properties for easy acces to translation / rotation / scale of a MMatrix or derived class
# some of these will only yield dependable results if MMatrix is a MTransformationMatrix and some
# will always be zero for some classes (ie only rotation has a value on a MQuaternion
def _getTranslate(self):
return Vector(0.0, 0.0, 0.0)
translate = property(_getTranslate, None, None, "The translation expressed in this MMQuaternion, which is always (0.0, 0.0, 0.0)")
def _getRotate(self):
return self
def _setRotate(self, value):
self.assign(Quaternion(value))
rotate = property(_getRotate, _setRotate, None, "The rotation expressed in this Quaternion, in transform space")
def _getScale(self):
return Vector(1.0, 1.0, 1.0)
scale = property(_getScale, None, None, "The scale expressed in this Quaternion, which is always (1.0, 1.0, 1.0)")
# overloads for assign and get though standard way should be to use the data property
# to access stored values
def assign(self, value):
""" Wrap the Quaternion api assign method """
# api Quaternion assign accepts Matrix, Quaternion and EulerRotation
if isinstance(value, Matrix):
value = value.rotate
else:
if not hasattr(value, '__iter__'):
value = (value,)
value = self.apicls(*value)
self.apicls.assign(self, value)
return self
# API get, actually not faster than pulling self[i] for such a short structure
def get(self):
""" Wrap the Quaternion api get method """
# need to keep a ref to the MScriptUtil alive until
# all pointers aren't needed...
ms = _api.MScriptUtil()
l = (0,) * self.size
ms.createFromDouble(*l)
p = ms.asDoublePtr()
self.apicls.get(self, p)
return tuple([ms.getDoubleArrayItem(p, i) for i in xrange(self.size)])
def __getitem__(self, i):
return self._getitem(i)
# faster to override __getitem__ cause we know Quaternion only has one dimension
def _getitem(self, i):
""" Get component i value from self """
if hasattr(i, '__iter__'):
i = list(i)
if len(i) == 1:
i = i[0]
else:
raise IndexError, "class %s instance %s has only %s dimension(s), index %s is out of bounds" % (util.clsname(self), self, self.ndim, i)
if isinstance(i, slice):
try:
return list(self)[i]
except:
raise IndexError, "class %s instance %s is of size %s, index %s is out of bounds" % (util.clsname(self), self, self.size, i)
else:
if i < 0:
i = self.size + i
if i < self.size and not i < 0:
if hasattr(self.apicls, '__getitem__'):
res = self.apicls.__getitem__(self, i)
else:
res = list(self)[i]
return res
else:
raise IndexError, "class %s instance %s is of size %s, index %s is out of bounds" % (util.clsname(self), self, self.size, i)
# as _api.Vector has no __setitem__ method, so need to reassign the whole Vector
def __setitem__(self, i, a):
""" Set component i value on self """
v = VectorN(self)
v.__setitem__(i, a)
self.assign(v)
def __iter__(self):
for i in range(self.size):
yield self[i]
def __len__(self):
# api incorrectly returns 4. this might make sense if it did not simply return z a second time as the fourth element
return self.size
#
# # TODO : support for optional __iter__ arguments
# def __iter__(self, *args, **kwargs):
# """ Iterate on the api components """
# return self.apicls.__iter__(self.data)
def __contains__(self, value):
""" True if at least one of the vector components is equal to the argument """
return value in self.__iter__()
class TransformationMatrix(Matrix):
apicls = _api.MTransformationMatrix
def _getTranslate(self):
return Vector(self.getTranslation(_api.MSpace.kTransform))
def _setTranslate(self, value):
self.setTranslation(Vector(value), _api.MSpace.kTransform)
translate = property(_getTranslate, _setTranslate, None, "The translation expressed in this TransformationMatrix, in transform space")
def _getRotate(self):
return Quaternion(self.apicls.rotation(self))
def _setRotate(self, value):
self.rotateTo(Quaternion(value))
rotate = property(_getRotate, _setRotate, None, "The quaternion rotation expressed in this TransformationMatrix, in transform space")
def rotateTo(self, value):
'''Set to the given rotation (and result self)
Value may be either a Quaternion, EulerRotation object, or a list of
floats; if it is floats, if it has length 4 it is interpreted as
a Quaternion; if 3, as a EulerRotation.
'''
if not isinstance(value, (Quaternion, EulerRotation,
_api.MQuaternion, _api.MEulerRotation)):
if len(value) == 3:
value = EulerRotation(value)
elif len(value) == 4:
value = Quaternion(value)
else:
raise ValueError('arg to rotateTo must be a Quaternion, EulerRotation, or an iterable of 3 or 4 floats')
return self.__class__(self.apicls.rotateTo(self, value))
def eulerRotation(self):
return EulerRotation(self.apicls.eulerRotation(self))
def _getEuler(self):
return self.eulerRotation()
def _setEuler(self, value):
self.rotateTo(EulerRotation(value))
euler = property(_getEuler, _getEuler, None, "The euler rotation expressed in this TransformationMatrix, in transform space")
# The apicls getRotation needs a "RotationOrder &" object, which is
# impossible to make in python...
# So instead, wrap eulerRotation
def getRotation(self):
return self.eulerRotation()
def setRotation(self, *args):
self.rotateTo(EulerRotation(*args))
def _getScale(self):
return Vector(self.getScale(_api.MSpace.kTransform))
def _setScale(self, value):
self.setScale(value, _api.MSpace.kTransform)
scale = property(_getScale, _setScale, None, "The scale expressed in this TransformationMatrix, in transform space")
class EulerRotation(Array):
"""
unit handling:
>>> from pymel.all import *
>>> import pymel.core.datatypes as dt
>>>
>>> currentUnit(angle='degree')
u'degree'
>>> e = dt.EulerRotation([math.pi,0,0], unit='radians')
>>> e
dt.EulerRotation([3.14159265359, 0.0, 0.0], unit='radians')
>>> e2 = dt.EulerRotation([180,0,0], unit='degrees')
>>> e2
dt.EulerRotation([180.0, 0.0, 0.0])
>>> e.isEquivalent( e2 )
True
>>> e == e2
True
units are only displayed when they do not match the current ui unit
>>> dt.Angle.getUIUnit() # check current angular unit
'degrees'
>>> e
dt.EulerRotation([3.14159265359, 0.0, 0.0], unit='radians')
>>> dt.Angle.setUIUnit('radians') # change to radians
>>> e
dt.EulerRotation([3.14159265359, 0.0, 0.0])
"""
__metaclass__ = MetaMayaArrayTypeWrapper
apicls = _api.MEulerRotation
shape = (3,)
cnames = ('x', 'y', 'z')
RotationOrder = _factories.apiClassInfo['MEulerRotation']['pymelEnums']['RotationOrder']
def _getorder(self):
return self.RotationOrder[self.apicls.__dict__['order'].__get__(self, self.apicls)]
def _setorder(self, val):
self.apicls.__dict__['order'].__set__(self, self.RotationOrder.getIndex(val))
order = property(_getorder, _setorder)
def __new__(cls, *args, **kwargs):
# shape = kwargs.get('shape', None)
# ndim = kwargs.get('ndim', None)
# size = kwargs.get('size', None)
#
new = cls.apicls.__new__(cls)
cls.apicls.__init__(new)
return new
def __init__(self, *args, **kwargs):
""" __init__ method for EulerRotation """
self.unit = None
self.assign(*args, **kwargs)
def setDisplayUnit(self, unit):
if unit not in Angle.Unit:
raise TypeError, "%s is not a valid angular unit. See Angle.Unit for the list of valid units"
self.unit = unit
def __repr__(self):
argStrs = [str(self)]
if self.unit != Angle.getUIUnit():
argStrs.append('unit=%r' % self.unit)
if self.order != 'XYZ':
argStrs.append('order=%r' % str(self.order))
return "dt.%s(%s)" % (self.__class__.__name__, ', '.join(argStrs))
def __iter__(self):
for i in range(self.size):
yield self[i]
def __getitem__(self, i):
return Angle(self._getitem(i), 'radians').asUnit(self.unit)
def __setitem__(self, key, val):
kwargs = {}
if key in self.cnames:
kwargs[key] = val
else:
kwargs[self.cnames[key]] = val
self.assign(**kwargs)
# faster to override __getitem__ cause we know Vector only has one dimension
def _getitem(self, i):
""" Get component i value from self """
if hasattr(i, '__iter__'):
i = list(i)
if len(i) == 1:
i = i[0]
else:
raise IndexError, "class %s instance %s has only %s dimension(s), index %s is out of bounds" % (util.clsname(self), self, self.ndim, i)
if isinstance(i, slice):
return _toCompOrArrayInstance(list(self)[i], VectorN)
try:
return _toCompOrArrayInstance(list(self)[i], VectorN)
except:
raise IndexError, "class %s instance %s is of size %s, index %s is out of bounds" % (util.clsname(self), self, self.size, i)
else:
if i < 0:
i = self.size + i
if i < self.size and not i < 0:
if hasattr(self.apicls, '__getitem__'):
return self.apicls.__getitem__(self, i)
else:
return list(self)[i]
else:
raise IndexError, "class %s instance %s is of size %s, index %s is out of bounds" % (util.clsname(self), self, self.size, i)
def assign(self, *args, **kwargs):
""" Wrap the Quaternion api assign method """
# After processing, we want to have args be in a format such that
# we may do:
# apicls.assign(*args)
# This means that either:
# args is a list/tuple of
if 'unit' in kwargs:
self.unit = kwargs['unit']
elif self.unit is None:
self.unit = Angle.getUIUnit()
if len(args) == 1 and isinstance(args[0], _api.MTransformationMatrix):
args = [args[0].asMatrix()]
# api MEulerRotation assign accepts Matrix, Quaternion and EulerRotation
validSingleObjs = (_api.MMatrix, _api.MQuaternion, _api.MEulerRotation)
if len(args) == 1 and isinstance(args[0], validSingleObjs):
self.unit = 'radians'
self.apicls.assign(self, args[0])
elif args:
if len(args) == 1:
args = list(args[0])
elif len(args) == 2 and isinstance(args[1], (basestring, util.EnumValue)):
args = list(args[0]) + [args[1]]
else:
# convert to list, as we may have to do modifications
args = list(args)
# If only 3 rotation angles supplied, and current order is
# not default, make sure we maintain it
if self.order != 'XYZ' and len(args) == 3:
args.append(self.apicls.__dict__['order'].__get__(self, self.apicls))
elif len(args) == 4 and isinstance(args[3], (basestring, util.EnumValue)):
# allow to initialize directly from 3 rotations and a rotation order as string
args[3] = self.RotationOrder.getIndex(args[3])
# In case they do something like pass in a mix of Angle objects and
# float numbers, convert to correct unit one-by-one...
for i in xrange(3):
if isinstance(args[i], Angle):
args[i] = args[i].asUnit('radians')
elif self.unit != 'radians' and not isinstance(args[i], Angle):
args[i] = Angle(args[i], self.unit).asUnit('radians')
self.apicls.setValue(self, *args)
# We do kwargs as a separate step after args, instead of trying to combine
# them, in case they do something like pass in a EulerRotation(myMatrix, y=2)
if hasattr(self, 'cnames') and len(set(self.cnames) & set(kwargs)):
# can also use the form <componentname>=<number>
l = list(self.flat)
setcomp = False
for i, c in enumerate(self.cnames):
if c in kwargs:
if float(l[i]) != float(kwargs[c]):
l[i] = float(kwargs[c])
setcomp = True
if setcomp:
try:
self.assign(l)
except:
msg = ", ".join(map(lambda x, y: x + "=<" + util.clsname(y) + ">", cls.cnames, l))
raise TypeError, "in %s(%s), at least one of the components is of an invalid type, check help(%s) " % (cls.__name__, msg, cls.__name__)
return self
# API get, actually not faster than pulling self[i] for such a short structure
def get(self):
""" Wrap the MEulerRotation api get method """
# need to keep a ref to the MScriptUtil alive until
# all pointers aren't needed...
ms = _api.MScriptUtil()
l = (0,) * self.size
ms.createFromDouble(*l)
p = ms.asDoublePtr()
self.apicls.get(self, p)
return tuple([ms.getDoubleArrayItem(p, i) for i in xrange(self.size)])
def __contains__(self, value):
""" True if at least one of the vector components is equal to the argument """
return value in self.__iter__()
def __len__(self):
return self.apicls.__len__(self)
# common operators without an api equivalent are herited from VectorN
# operators using the Maya API when applicable, but that can delegate to VectorN
def __eq__(self, other):
""" u.__eq__(v) <==> u == v
Equivalence test """
if isinstance(other, self.apicls):
return bool(self.apicls.__eq__(self, other))
else:
return bool(super(EulerRotation, self).__eq__(other))
def __ne__(self, other):
""" u.__ne__(v) <==> u != v
Equivalence test """
return (not self.__eq__(other))
def __neg__(self):
""" u.__neg__() <==> -u
The unary minus operator. Negates the value of each of the components of u """
return self.__class__(self.apicls.__neg__(self))
def __add__(self, other):
""" u.__add__(v) <==> u+v
Returns the result of the addition of u and v if v is convertible to a VectorN (element-wise addition),
adds v to every component of u if v is a scalar """
try:
return self.__class__._convert(self.apicls.__add__(self, other))
except:
return self.__class__._convert(super(EulerRotation, self).__add__(other))
def __radd__(self, other):
""" u.__radd__(v) <==> v+u
Returns the result of the addition of u and v if v is convertible to a VectorN (element-wise addition),
adds v to every component of u if v is a scalar """
try:
return self.__class__._convert(self.apicls.__radd__(self, other))
except:
return self.__class__._convert(super(EulerRotation, self).__radd__(other))
def __iadd__(self, other):
""" u.__iadd__(v) <==> u += v
In place addition of u and v, see __add__ """
try:
return self.__class__(self.__add__(other))
except:
return NotImplemented
def __sub__(self, other):
""" u.__sub__(v) <==> u-v
Returns the result of the substraction of v from u if v is convertible to a VectorN (element-wise substration),
substract v to every component of u if v is a scalar """
try:
return self.__class__._convert(self.apicls.__sub__(self, other))
except:
return self.__class__._convert(super(EulerRotation, self).__sub__(other))
def __rsub__(self, other):
""" u.__rsub__(v) <==> v-u
Returns the result of the substraction of u from v if v is convertible to a VectorN (element-wise substration),
replace every component c of u by v-c if v is a scalar """
try:
return self.__class__._convert(self.apicls.__rsub__(self, other))
except:
return self.__class__._convert(super(EulerRotation, self).__rsub__(other))
def __isub__(self, other):
""" u.__isub__(v) <==> u -= v
In place substraction of u and v, see __sub__ """
try:
return self.__class__(self.__sub__(other))
except:
return NotImplemented
def __div__(self, other):
""" u.__div__(v) <==> u/v
Returns the result of the division of u by v if v is convertible to a VectorN (element-wise division),
divide every component of u by v if v is a scalar """
try:
return self.__class__._convert(self.apicls.__div__(self, other))
except:
return self.__class__._convert(super(EulerRotation, self).__div__(other))
def __rdiv__(self, other):
""" u.__rdiv__(v) <==> v/u
Returns the result of of the division of v by u if v is convertible to a VectorN (element-wise division),
invert every component of u and multiply it by v if v is a scalar """
try:
return self.__class__._convert(self.apicls.__rdiv__(self, other))
except:
return self.__class__._convert(super(EulerRotation, self).__rdiv__(other))
def __idiv__(self, other):
""" u.__idiv__(v) <==> u /= v
In place division of u by v, see __div__ """
try:
return self.__class__(self.__div__(other))
except:
return NotImplemented
# action depends on second object type
def __mul__(self, other):
""" u.__mul__(v) <==> u*v
The multiply '*' operator is mapped to the dot product when both objects are Vectors,
to the transformation of u by matrix v when v is a MatrixN,
to element wise multiplication when v is a sequence,
and multiplies each component of u by v when v is a numeric type. """
try:
res = self.apicls.__mul__(self, other)
except:
res = super(EulerRotation, self).__mul__(other)
if util.isNumeric(res):
return res
else:
return self.__class__._convert(res)
def __rmul__(self, other):
""" u.__rmul__(v) <==> v*u
The multiply '*' operator is mapped to the dot product when both objects are Vectors,
to the left side multiplication (pre-multiplication) of u by matrix v when v is a MatrixN,
to element wise multiplication when v is a sequence,
and multiplies each component of u by v when v is a numeric type. """
try:
res = self.apicls.__rmul__(self, other)
except:
res = super(EulerRotation, self).__rmul__(other)
if util.isNumeric(res):
return res
else:
return self.__class__._convert(res)
def __imul__(self, other):
""" u.__imul__(v) <==> u *= v
Valid for EulerRotation * Matrix multiplication, in place transformation of u by Matrix v
or EulerRotation by scalar multiplication only """
try:
return self.__class__(self.__mul__(other))
except:
return NotImplemented
# special operators
# def __xor__(self, other):
# """ u.__xor__(v) <==> u^v
# Defines the cross product operator between two 3D vectors,
# if v is a MatrixN, u^v is equivalent to u.transformAsNormal(v) """
# if isinstance(other, VectorN) :
# return self.cross(other)
# elif isinstance(other, MatrixN) :
# return self.transformAsNormal(other)
# else :
# return NotImplemented
# def __ixor__(self, other):
# """ u.__xor__(v) <==> u^=v
# Inplace cross product or transformation by inverse transpose of v is v is a MatrixN """
# try :
# return self.__class__(self.__xor__(other))
# except :
# return NotImplemented
class Unit(float):
__slots__ = ['unit', 'data', '_unit']
# TODO: implement proper equality comparison - currently,
# Distance(5, 'meters') == Distance(5, 'centimeters')
@classmethod
def getUIUnit(cls):
"""
Returns the global UI units currently in use for that type
"""
return cls.sUnit(cls.apicls.uiUnit())
@classmethod
def setUIUnit(cls, unit=None):
"""
Sets the global UI units currently to use for that type
"""
if unit is None:
cls.apicls.setUIUnit(cls.apicls.internalUnit())
else:
cls.apicls.setUIUnit(cls.kUnit(unit))
@classmethod
def getInternalUnit(cls):
"""
Returns the internal units currently in use for that type
"""
return cls.sUnit(cls.apicls.internalUnit())
@classmethod
def uiToInternal(cls, value):
d = cls(value, cls.getUIUnit())
return d.asInternalUnit()
@classmethod
def kUnit(cls, unit=None):
"""
Converts a string unit name to the internal int unit enum representation
"""
if unit:
return cls.Unit.getIndex(unit)
else:
return cls.apicls.uiUnit()
@classmethod
def sUnit(cls, unit=None):
"""
Converts an internal int unit enum representation to the string unit name
"""
if unit:
return cls.Unit.getKey(unit)
else:
return str(cls.unit[cls.apicls.uiUnit()])
def getUnit(self):
"""
Returns the units currently in effect for this instance
"""
return self.__class__.sUnit(self._unit)
# def setUnit(self, unit=None) :
# """
# Sets the units currently in effect for this instance
# """
# self._unit = self.__class__.kUnit(unit)
unit = property(getUnit, None, None, "The units currently in effect for this instance")
def __new__(cls, value, unit=None):
unit = cls.kUnit(unit)
if isinstance(value, cls.apicls):
value = value.asUnits(unit)
elif isinstance(value, cls):
value = value.asUnit(unit)
#data = cls.apicls(value, unit)
# the float representation uses internal units so that arithmetics work
#newobj = float.__new__(cls, data.asUnit(cls.apicls.internalUnit()))
#newobj = float.__new__(cls, data.asUnit(unit))
newobj = float.__new__(cls, value)
#ewobj._data = data
newobj._unit = unit
newobj._data = cls.apicls(value, unit)
return newobj
def assign(self, *args):
if isinstance(args, self.__class__):
args = (args._data, args._unit)
self._data.assign(*args)
def __repr__(self):
return 'dt.%s(%s, unit=%r)' % (self.__class__.__name__, self, self.unit)
def asUnit(self, unit):
return self._data.asUnits(self.__class__.kUnit(unit))
# def asUnit(self) :
# return self.asUnit(self.unit)
def asUIUnit(self):
return self.asUnit(self.__class__.getUIUnit())
def asInternalUnit(self):
return self.asUnit(self.__class__.getInternalUnit())
class Time(Unit):
apicls = _api.MTime
Unit = _factories.apiClassInfo['MTime']['pymelEnums']['Unit']
@classmethod
def _inCast(cls, x):
return cls(x)._data
class Distance(Unit):
"""
>>> from pymel.core import *
>>> import pymel.core.datatypes as dt
>>>
>>> dt.Distance.getInternalUnit()
'centimeters'
>>> dt.Distance.setUIUnit('meters')
>>> dt.Distance.getUIUnit()
'meters'
>>> d = dt.Distance(12)
>>> d.unit
'meters'
>>> print d
12.0
>>> print repr(d)
dt.Distance(12.0, unit='meters')
>>> print d.asUIUnit()
12.0
>>> print d.asInternalUnit()
1200.0
>>> dt.Distance.setUIUnit('centimeters')
>>> dt.Distance.getUIUnit()
'centimeters'
>>> e = dt.Distance(12)
>>> e.unit
'centimeters'
>>> print e
12.0
>>> str(e)
'12.0'
>>> print repr(e)
dt.Distance(12.0, unit='centimeters')
>>> print e.asUIUnit()
12.0
>>> print e.asInternalUnit()
12.0
>>> f = dt.Distance(12, 'feet')
>>> print f
12.0
>>> print repr(f)
dt.Distance(12.0, unit='feet')
>>> f.unit
'feet'
>>> print f.asUIUnit()
365.76
>>> dt.Distance.setUIUnit('meters')
>>> dt.Distance.getUIUnit()
'meters'
>>> print f.asUIUnit()
3.6576
>>> dt.Distance.getInternalUnit()
'centimeters'
>>> print f.asInternalUnit()
365.76
>>> print f.asFeet()
12.0
>>> print f.asMeters()
3.6576
>>> print f.asCentimeters()
365.76
>>> dt.Distance.setUIUnit()
>>> dt.Distance.getUIUnit()
'centimeters'
"""
apicls = _api.MDistance
Unit = _factories.apiClassInfo['MDistance']['pymelEnums']['Unit']
def asMillimeter(self):
return self.asUnit('millimeter')
def asCentimeters(self):
return self.asUnit('centimeters')
def asKilometers(self):
return self.asUnit('kilometers')
def asMeters(self):
return self.asUnit('meters')
def asInches(self):
return self.asUnit('inches')
def asFeet(self):
return self.asUnit('feet')
def asYards(self):
return self.asUnit('yards')
def asMiles(self):
return self.asUnit('miles')
@classmethod
def _outCast(cls, instance, result):
return cls(result, 'centimeters').asUIUnit()
class Angle(Unit):
apicls = _api.MAngle
Unit = _factories.apiClassInfo['MAngle']['pymelEnums']['Unit']
def asRadians(self):
return self.asUnit('radians')
def asDegrees(self):
return self.asUnit('degrees')
def asAngMinutes(self):
return self.asUnit('angMinutes')
def asAngSeconds(self):
return self.asUnit('angSeconds')
@classmethod
def _outCast(cls, instance, result):
return cls(result, 'radians').asUIUnit()
class BoundingBox(_api.MBoundingBox):
apicls = _api.MBoundingBox
__metaclass__ = _factories.MetaMayaTypeWrapper
def __init__(self, *args):
if len(args) == 2:
args = list(args)
if not isinstance(args[0], _api.MPoint):
args[0] = Point(args[0])
if not isinstance(args[1], _api.MPoint):
args[1] = Point(args[1])
_api.MBoundingBox.__init__(self, *args)
def __str__(self):
return 'dt.%s(%s,%s)' % (self.__class__.__name__, self.min(), self.max())
def __repr__(self):
return str(self)
def __getitem__(self, item):
if item == 0:
return self.min()
elif item == 1:
return self.max()
raise IndexError, "Index out of range"
def __melobject__(self):
"""A flat list of 6 values [minx, miny, minz, maxx, maxy, maxz]"""
return list(self.min()) + list(self.max())
repr = __str__
w = property(_factories.wrapApiMethod(_api.MBoundingBox, 'width'))
h = property(_factories.wrapApiMethod(_api.MBoundingBox, 'height'))
d = property(_factories.wrapApiMethod(_api.MBoundingBox, 'depth'))
#_factories.ApiTypeRegister.register( 'MVector', Vector )
#_factories.ApiTypeRegister.register( 'MMatrix', Matrix )
#_factories.ApiTypeRegister.register( 'MPoint', Point )
#_factories.ApiTypeRegister.register( 'MColor', Color )
#_factories.ApiTypeRegister.register( 'MQuaternion', Quaternion )
#_factories.ApiTypeRegister.register( 'MEulerRotation', EulerRotation )
_factories.ApiTypeRegister.register('MTime', Time, inCast=Time._inCast)
_factories.ApiTypeRegister.register('MDistance', Distance, outCast=Distance._outCast)
_factories.ApiTypeRegister.register('MAngle', Angle, outCast=Angle._outCast)
#_floatUpConvertDict = {_api.MFloatArray:_api.MDoubleArray,
# _api.MFloatMatrix:_api.MMatrix,
# _api.MFloatPoint:_api.MPoint,
# _api.MFloatPointArray:_api.MPointArray,
# _api.MFloatVector:_api.MVector,
# _api.MFloatVectorArray:_api.MVectorArray,
# FloatMatrix:Matrix,
# FloatPoint:Point,
# FloatVector:Vector
# }
# def _floatUpConvert(input):
# """Will convert various Float* objects to their corresponding double object
#
# ie, api.MFloatMatrix => api.MMatrix, FloatPoint => Point
# """
# newClass = _floatUpConvertDict.get(input.__class__)
# if newClass:
# return newClass(input)
# else:
# return input
def getPlugValue(plug):
"""given an MPlug, get its value as a pymel-style object"""
# if plug.isArray():
# raise TypeError, "array plugs of this type are not supported"
obj = plug.attribute()
apiType = obj.apiType()
# Float Pairs
if apiType in [_api.MFn.kAttribute2Double, _api.MFn.kAttribute2Float]:
res = []
for i in range(plug.numChildren()):
res.append(getPlugValue(plug.child(i)))
if isinstance(res[0], Distance):
return Vector(res)
return res
# Integer Groups
elif apiType in [_api.MFn.kAttribute2Short, _api.MFn.kAttribute2Int, _api.MFn.kAttribute3Short, _api.MFn.kAttribute3Int]:
res = []
for i in range(plug.numChildren()):
res.append(getPlugValue(plug.child(i)))
return res
# Float Groups
elif apiType in [_api.MFn.kAttribute3Double, _api.MFn.kAttribute3Float, _api.MFn.kAttribute4Double]:
res = []
for i in range(plug.numChildren()):
res.append(getPlugValue(plug.child(i)))
if isinstance(res[0], Distance):
return Vector(res)
elif _api.MFnAttribute(obj).isUsedAsColor():
return Color(res)
return res
# Compound
elif apiType in [_api.MFn.kCompoundAttribute]:
res = []
for i in range(plug.numChildren()):
res.append(getPlugValue(plug.child(i)))
return tuple(res)
# Distance
elif apiType in [_api.MFn.kDoubleLinearAttribute, _api.MFn.kFloatLinearAttribute]:
val = plug.asMDistance()
unit = _api.MDistance.uiUnit()
# as becomes a keyword in python 2.6
return Distance(val.asUnits(unit), unit)
# Angle
elif apiType in [_api.MFn.kDoubleAngleAttribute, _api.MFn.kFloatAngleAttribute]:
val = plug.asMAngle()
unit = _api.MAngle.uiUnit()
# as becomes a keyword in python 2.6
return Angle(val.asUnits(unit), unit)
# Time
elif apiType == _api.MFn.kTimeAttribute:
val = plug.asMTime()
unit = _api.MTime.uiUnit()
# as becomes a keyword in python 2.6
return Time(val.asUnits(unit), unit)
elif apiType == _api.MFn.kNumericAttribute:
nAttr = _api.MFnNumericAttribute(obj)
dataType = nAttr.unitType()
if dataType == _api.MFnNumericData.kBoolean:
return plug.asBool()
elif dataType in [_api.MFnNumericData.kShort, _api.MFnNumericData.kInt, _api.MFnNumericData.kLong, _api.MFnNumericData.kByte]:
return plug.asInt()
elif dataType in [_api.MFnNumericData.kFloat, _api.MFnNumericData.kDouble, _api.MFnNumericData.kAddr]:
return plug.asDouble()
raise "%s: unknown numeric attribute type: %s" % (plug.partialName(True, True, True, False, True, True), dataType)
elif apiType == _api.MFn.kEnumAttribute:
# TODO : use EnumValue class?
return plug.asInt()
elif apiType == _api.MFn.kTypedAttribute:
tAttr = _api.MFnTypedAttribute(obj)
dataType = tAttr.attrType()
if dataType == _api.MFnData.kInvalid: # 0
return None
elif dataType == _api.MFnData.kNumeric: # 1
# all of the dynamic mental ray attributes fail here, but i have no idea why they are numeric attrs and not message attrs.
# cmds.getAttr returns None, so we will too.
try:
dataObj = plug.asMObject()
except:
return
try:
numFn = _api.MFnNumericData(dataObj)
except RuntimeError:
if plug.isArray():
raise TypeError, "%s: numeric arrays are not supported" % plug.partialName(True, True, True, False, True, True)
else:
raise TypeError, "%s: attribute type is numeric, but its data cannot be interpreted numerically" % plug.partialName(True, True, True, False, True, True)
dataType = numFn.numericType()
if dataType == _api.MFnNumericData.kBoolean:
return plug.asBool()
elif dataType in [_api.MFnNumericData.kShort, _api.MFnNumericData.kInt, _api.MFnNumericData.kLong, _api.MFnNumericData.kByte]:
return plug.asInt()
elif dataType in [_api.MFnNumericData.kFloat, _api.MFnNumericData.kDouble, _api.MFnNumericData.kAddr]:
return plug.asDouble()
elif dataType == _api.MFnNumericData.k2Short:
ptr1 = _api.SafeApiPtr('short')
ptr2 = _api.SafeApiPtr('short')
numFn.getData2Short(ptr1(), ptr2())
return (ptr1.get(), ptr2.get())
elif dataType in [_api.MFnNumericData.k2Int, _api.MFnNumericData.k2Long]:
ptr1 = _api.SafeApiPtr('int')
ptr2 = _api.SafeApiPtr('int')
numFn.getData2Int(ptr1(), ptr2())
return (ptr1.get(), ptr2.get())
elif dataType == _api.MFnNumericData.k2Float:
ptr1 = _api.SafeApiPtr('float')
ptr2 = _api.SafeApiPtr('float')
numFn.getData2Float(ptr1(), ptr2())
return (ptr1.get(), ptr2.get())
elif dataType == _api.MFnNumericData.k2Double:
ptr1 = _api.SafeApiPtr('double')
ptr2 = _api.SafeApiPtr('double')
numFn.getData2Double(ptr1(), ptr2())
return (ptr1.get(), ptr2.get())
elif dataType == _api.MFnNumericData.k3Float:
ptr1 = _api.SafeApiPtr('float')
ptr2 = _api.SafeApiPtr('float')
ptr3 = _api.SafeApiPtr('float')
numFn.getData3Float(ptr1(), ptr2(), ptr3())
return (ptr1.get(), ptr2.get(), ptr3.get())
elif dataType == _api.MFnNumericData.k3Double:
ptr1 = _api.SafeApiPtr('double')
ptr2 = _api.SafeApiPtr('double')
ptr3 = _api.SafeApiPtr('double')
numFn.getData3Double(ptr1(), ptr2(), ptr3())
return (ptr1.get(), ptr2.get(), ptr3.get())
elif dataType == _api.MFnNumericData.kChar:
return plug.asChar()
raise TypeError, "%s: Unsupported numeric attribute: %s" % (plug.partialName(True, True, True, False, True, True), dataType)
elif dataType == _api.MFnData.kString: # 4
return plug.asString()
elif dataType == _api.MFnData.kMatrix: # 5
return Matrix(_api.MFnMatrixData(plug.asMObject()).matrix())
elif dataType == _api.MFnData.kStringArray: # 6
try:
dataObj = plug.asMObject()
except RuntimeError:
return []
array = _api.MFnStringArrayData(dataObj).array()
return [array[i] for i in range(array.length())]
elif dataType == _api.MFnData.kDoubleArray: # 7
try:
dataObj = plug.asMObject()
except RuntimeError:
return []
array = _api.MFnDoubleArrayData(dataObj).array()
return [array[i] for i in range(array.length())]
elif dataType == _api.MFnData.kIntArray: # 8
try:
dataObj = plug.asMObject()
except RuntimeError:
return []
array = _api.MFnIntArrayData(dataObj).array()
return [array[i] for i in range(array.length())]
elif dataType == _api.MFnData.kPointArray: # 9
try:
dataObj = plug.asMObject()
except RuntimeError:
return []
array = _api.MFnPointArrayData(dataObj).array()
return [Point(array[i]) for i in range(array.length())]
elif dataType == _api.MFnData.kVectorArray: # 10
try:
dataObj = plug.asMObject()
except RuntimeError:
return []
array = _api.MFnVectorArrayData(dataObj).array()
return [Vector(array[i]) for i in range(array.length())]
# this block crashes maya under certain circumstances
# elif dataType == _api.MFnData.kComponentList : # 11
# try:
# dataObj = plug.asMObject()
# except RuntimeError:
# return []
# array = _api.MFnComponentListData( dataObj )
# return array
# #return [ Vector(array[i]) for i in range(array.length()) ]
raise TypeError, "%s: Unsupported typed attribute: %s" % (plug.partialName(True, True, True, False, True, True), dataType)
raise TypeError, "%s: Unsupported Type: %s" % (plug.partialName(True, True, True, False, True, True), _factories.apiEnumsToApiTypes.get(apiType, apiType))
def _testMVector():
print "Vector class:", dir(Vector)
u = Vector()
print u
print "Vector instance:", dir(u)
print repr(u)
print Vector.__readonly__
print Vector.__slots__
print Vector.shape
print Vector.ndim
print Vector.size
print u.shape
print u.ndim
print u.size
# should fail
u.shape = 2
u.assign(Vector(4, 5, 6))
print repr(u)
#Vector([4.0, 5.0, 6.0])
u = Vector(1, 2, 3)
print repr(u)
# Vector([1.0, 2.0, 3.0])
print len(u)
# 3
# inherits from VectorN --> Array
print isinstance(u, VectorN)
# True
print isinstance(u, Array)
# True
# as well as _api.Vector
print isinstance(u, _api.MVector)
# True
# accepted directly by API methods
M = _api.MTransformationMatrix()
M.setTranslation(u, _api.MSpace.kWorld)
# need conversion on the way back though
u = Vector(M.getTranslation(_api.MSpace.kWorld))
print repr(u)
# Vector([1.0, 2.0, 3.0])
u = Vector(x=1, y=2, z=3)
print repr(u)
# Vector([1.0, 2.0, 3.0])
u = Vector([1, 2], z=3)
print repr(u)
# Vector([1.0, 2.0, 3.0])
u = Vector(_api.MPoint(1, 2, 3))
print repr(u)
# Vector([1.0, 2.0, 3.0])
print "u = Vector(VectorN(1, 2, 3))"
u = Vector(VectorN(1, 2, 3))
print repr(u)
# Vector([1.0, 2.0, 3.0])
u = Vector(1)
print repr(u)
# Vector([1.0, 1.0, 1.0])
u = Vector(1, 2)
print repr(u)
# Vector([1.0, 2.0, 0.0])
u = Vector(VectorN(1, shape=(2,)))
print repr(u)
# Vector([1.0, 1.0, 0.0])
u = Vector(Point(1, 2, 3))
print repr(u)
# Vector([1.0, 2.0, 3.0])
u = Vector(Point(1, 2, 3, 1), y=20, z=30)
print repr(u)
# Vector([1.0, 20.0, 30.0])
# should fail
print "Vector(VectorN(1, 2, 3, 4))"
try:
u = Vector(VectorN(1, 2, 3, 4))
except:
print "will raise ValueError: could not cast [1, 2, 3, 4] to Vector of size 3, some data would be lost"
print u.get()
# (1.0, 20.0, 30.0)
print u[0]
1.0
u[0] = 10
print repr(u)
# Vector([10.0, 20.0, 30.0])
print (10 in u)
# True
print list(u)
# [10.0, 20.0, 30.0]
u = Vector.xAxis
v = Vector.yAxis
print Vector.xAxis
print str(Vector.xAxis)
print unicode(Vector.xAxis)
print repr(Vector.xAxis)
print "u = Vector.xAxis:"
print repr(u)
# Vector([1.0, 0.0, 0.0])
print "v = Vector.yAxis:"
print repr(v)
# Vector([0.0, 1.0, 0.0])
n = u ^ v
print "n = u ^ v:"
print repr(n)
# Vector([0.0, 0.0, 1.0])
print "n.x=%s, n.y=%s, n.z=%s" % (n.x, n.y, n.z)
# n.x=0.0, n.y=0.0, n.z=1.0
n = u ^ VectorN(v)
print "n = u ^ VectorN(v):"
print repr(n)
# Vector([0.0, 0.0, 1.0])
n = u ^ [0, 1, 0]
print "n = u ^ [0, 1, 0]:"
print repr(n)
# Vector([0.0, 0.0, 1.0])
n[0:2] = [1, 1]
print "n[0:2] = [1, 1]:"
print repr(n)
# Vector([1.0, 1.0, 1.0])
print "n = n * 2 :"
n = n * 2
print repr(n)
# Vector([2.0, 2.0, 2.0])
print "n = n * [0.5, 1.0, 2.0]:"
n = n * [0.5, 1.0, 2.0]
print repr(n)
# Vector([1.0, 2.0, 4.0])
print "n * n :"
print n * n
# 21.0
print repr(n.clamp(1.0, 2.0))
# Vector([1.0, 2.0, 2.0])
print repr(-n)
# Vector([-1.0, -2.0, -4.0])
w = u + v
print repr(w)
# Vector([1.0, 1.0, 0.0])
p = Point(1, 2, 3)
q = u + p
print repr(q)
# Point([2.0, 2.0, 3.0, 1.0])
q = p + u
print repr(q)
# Point([2.0, 2.0, 3.0, 1.0])
print repr(p + q)
# Point([3.0, 4.0, 6.0, 1.0])
w = u + VectorN(1, 2, 3, 4)
print repr(w)
# VectorN([2.0, 2.0, 3.0, 4])
print repr(u + 2)
# Vector([3.0, 2.0, 2.0])
print repr(2 + u)
# Vector([3.0, 2.0, 2.0])
print repr(p + 2)
# Point([3.0, 4.0, 5.0, 1.0])
print repr(2 + p)
# Point([3.0, 4.0, 5.0, 1.0])
print repr(p + u)
# Point([2.0, 2.0, 3.0, 1.0])
print repr(VectorN(1, 2, 3, 4) + u)
# VectorN([2.0, 2.0, 3.0, 4])
print repr([1, 2, 3] + u)
# Vector([2.0, 2.0, 3.0])
u = Vector(1, 2, 3)
print repr(u)
# Vector([1.0, 2.0, 3.0])
print u.length()
# 3.74165738677
print length(u)
# 3.74165738677
print length([1, 2, 3])
# 3.74165738677
print length(VectorN(1, 2, 3))
# 3.74165738677
print VectorN(1, 2, 3).length()
# 3.74165738677
print length(VectorN(1, 2, 3, 4))
# 5.47722557505
print VectorN(1, 2, 3, 4).length()
# 5.47722557505
print length(1)
# 1.0
print length([1, 2])
# 2.2360679775
print length([1, 2, 3])
# 3.74165738677
print length([1, 2, 3, 4])
# 5.47722557505
print length([1, 2, 3, 4], 0)
# 5.47722557505
print length([1, 2, 3, 4], (0,))
# 5.47722557505
print length([[1, 2], [3, 4]], 1)
# [3.16227766017, 4.472135955]
# should fail
try:
print length([1, 2, 3, 4], 1)
except:
print "Will raise ValueError, \"axis 0 is the only valid axis for a Vector, 1 invalid\""
u = Vector(1, 2, 3)
print repr(u)
# Vector([1.0, 2.0, 3.0])
print u.sqlength()
# 14
print repr(u.normal())
# Vector([0.267261241912, 0.534522483825, 0.801783725737])
u.normalize()
print repr(u)
# Vector([0.267261241912, 0.534522483825, 0.801783725737])
u = Vector(1, 2, 3)
print repr(u)
# Vector([1.0, 2.0, 3.0])
w = u + [0.01, 0.01, 0.01]
print repr(w)
# Vector([1.01, 2.01, 3.01])
print (u == u)
# True
print (u == w)
# False
print (u == Vector(1.0, 2.0, 3.0))
# True
print (u == [1.0, 2.0, 3.0])
# False
print (u == Point(1.0, 2.0, 3.0))
# False
print u.isEquivalent([1.0, 2.0, 3.0])
# True
print u.isEquivalent(Vector(1.0, 2.0, 3.0))
# True
print u.isEquivalent(Point(1.0, 2.0, 3.0))
# True
print u.isEquivalent(w)
# False
print u.isEquivalent(w, 0.1)
# True
u = Vector(1, 0, 0)
print repr(u)
# Vector([1.0, 0.0, 0.0])
v = Vector(0.707, 0, -0.707)
print repr(v)
# Vector([0.707, 0.0, -0.707])
print repr(axis(u, v))
# Vector([-0.0, 0.707, 0.0])
print repr(u.axis(v))
# Vector([-0.0, 0.707, 0.0])
print repr(axis(VectorN(u), VectorN(v)))
# VectorN([-0.0, 0.707, 0.0])
print repr(axis(u, v, normalize=True))
# Vector([-0.0, 1.0, 0.0])
print repr(v.axis(u, normalize=True))
# Vector([-0.0, -1.0, 0.0])
print repr(axis(VectorN(u), VectorN(v), normalize=True))
# VectorN([-0.0, 1.0, 0.0])
print angle(u, v)
# 0.785398163397
print v.angle(u)
# 0.785398163397
print angle(VectorN(u), VectorN(v))
# 0.785398163397
print cotan(u, v)
# 1.0
print repr(u.rotateTo(v))
# Quaternion([-0.0, 0.382683432365, 0.0, 0.923879532511])
print repr(u.rotateBy(u.axis(v), u.angle(v)))
# Vector([0.707106781187, 0.0, -0.707106781187])
q = Quaternion([-0.0, 0.382683432365, 0.0, 0.923879532511])
print repr(u.rotateBy(q))
# Vector([0.707106781187, 0.0, -0.707106781187])
print u.distanceTo(v)
# 0.765309087885
print u.isParallel(v)
# False
print u.isParallel(2 * u)
# True
print repr(u.blend(v))
# Vector([0.8535, 0.0, -0.3535])
print "end tests Vector"
def _testMPoint():
print "Point class", dir(Point)
print hasattr(Point, 'data')
p = Point()
print repr(p)
# Point([0.0, 0.0, 0.0])
print "Point instance", dir(p)
print hasattr(p, 'data')
print repr(p.data)
# <maya.OpenMaya.Point; proxy of <Swig Object of type 'Point *' at 0x84a1270> >
p = Point(1, 2, 3)
print repr(p)
# Point([1.0, 2.0, 3.0])
v = Vector(p)
print repr(v)
# Vector([1.0, 2.0, 3.0])
V = VectorN(p)
print repr(V)
# VectorN([1.0, 2.0, 3.0, 1.0])
print list(p)
# [1.0, 2.0, 3.0]
print len(p)
# 3
print p.size
# 4
print p.x, p.y, p.z, p.w
# 1.0 2.0 3.0 1.0
print p[0], p[1], p[2], p[3]
# 1.0 2.0 3.0 1.0
p.get()
# 1.0 2.0 3.0 1.0
# accepted by api
q = _api.MPoint()
print q.distanceTo(p)
# 3.74165738677
# support for non cartesian points still there
p = Point(1, 2, 3, 2)
print repr(p)
# Point([1.0, 2.0, 3.0, 2.0])
v = Vector(p)
print repr(v)
# Vector([0.5, 1.0, 1.5])
V = VectorN(p)
print repr(V)
# VectorN([1.0, 2.0, 3.0, 2.0])
print list(p)
# [1.0, 2.0, 3.0, 2.0]
print len(p)
# 4
print p.size
# 4
print p.x, p.y, p.z, p.w
# 1.0 2.0 3.0 2.0
print p[0], p[1], p[2], p[3]
# 1.0 2.0 3.0 2.0
p.get()
# 1.0 2.0 3.0 2.0
# accepted by api
q = _api.MPoint()
print q.distanceTo(p)
# 1.87082869339
p = Point(_api.MPoint())
print repr(p)
# Point([0.0, 0.0, 0.0])
p = Point(1)
print repr(p)
# Point([1.0, 1.0, 1.0])
p = Point(1, 2)
print repr(p)
# Point([1.0, 2.0, 0.0])
p = Point(1, 2, 3)
print repr(p)
# Point([1.0, 2.0, 3.0])
p = Point(_api.MPoint(1, 2, 3))
print repr(p)
# Point([1.0, 2.0, 3.0])
p = Point(VectorN(1, 2))
print repr(p)
# Point([1.0, 2.0, 0.0])
p = Point(Vector(1, 2, 3))
print repr(p)
# Point([1.0, 2.0, 3.0])
p = Point(_api.MVector(1, 2, 3))
print repr(p)
# Point([1.0, 2.0, 3.0])
p = Point(VectorN(1, 2, 3, 4))
print repr(p)
# Point([1.0, 2.0, 3.0, 4.0])
print repr(Vector(p))
# Vector([0.25, 0.5, 0.75])
print repr(VectorN(p))
# VectorN([1.0, 2.0, 3.0, 4.0])
p = Point(p, w=1)
print repr(p)
# Point([1.0, 2.0, 3.0])
print repr(Vector(p))
# Vector([1.0, 2.0, 3.0])
print repr(VectorN(p))
# VectorN([1.0, 2.0, 3.0, 1.0])
p = Point.origin
print repr(p)
# Point([0.0, 0.0, 0.0])
p = Point.xAxis
print repr(p)
# Point([1.0, 0.0, 0.0])
p = Point(1, 2, 3)
print repr(p)
# Point([1.0, 2.0, 3.0])
print repr(p + Vector([1, 2, 3]))
# Point([2.0, 4.0, 6.0])
print repr(p + Point([1, 2, 3]))
# Point([2.0, 4.0, 6.0])
print repr(p + [1, 2, 3])
# Point([2.0, 4.0, 6.0])
print repr(p + [1, 2, 3, 1])
# Point([2.0, 4.0, 6.0])
print repr(p + Point([1, 2, 3, 1]))
# Point([2.0, 4.0, 6.0])
print repr(p + [1, 2, 3, 2])
# Point([2.0, 4.0, 6.0, 3.0]) TODO : convert to Point always?
print repr(p + Point([1, 2, 3, 2]))
# Point([1.5, 3.0, 4.5])
print repr(Vector([1, 2, 3]) + p)
# Point([2.0, 4.0, 6.0])
print repr(Point([1, 2, 3]) + p)
# Point([2.0, 4.0, 6.0])
print repr([1, 2, 3] + p)
# Point([2.0, 4.0, 6.0])
print repr([1, 2, 3, 1] + p)
# Point([2.0, 4.0, 6.0])
print repr(Point([1, 2, 3, 1]) + p)
# Point([2.0, 4.0, 6.0])
print repr([1, 2, 3, 2] + p)
# Point([2.0, 4.0, 6.0, 3.0])
print repr(Point([1, 2, 3, 2]) + p)
# Point([1.5, 3.0, 4.5])
# various operation, on cartesian and non cartesian points
print "p = Point(1, 2, 3)"
p = Point(1, 2, 3)
print repr(p)
# Point([1.0, 2.0, 3.0])
print "p/2"
print repr(p / 2)
# Point([0.5, 1.0, 1.5])
print "p*2"
print repr(p * 2)
# Point([2.0, 4.0, 6.0])
print "q = Point(0.25, 0.5, 1.0)"
q = Point(0.25, 0.5, 1.0)
print repr(q)
# Point([0.25, 0.5, 1.0])
print repr(q + 2)
# Point([2.25, 2.5, 3.0])
print repr(q / 2)
# Point([0.125, 0.25, 0.5])
print repr(p + q)
# Point([1.25, 2.5, 4.0])
print repr(p - q)
# Vector([0.75, 1.5, 2.0])
print repr(q - p)
# Vector([-0.75, -1.5, -2.0])
print repr(p - (p - q))
# Point([0.25, 0.5, 1.0])
print repr(Vector(p) * Vector(q))
# 4.25
print repr(p * q)
# 4.25
print repr(p / q)
# Point([4.0, 4.0, 3.0])
print "p = Point(1, 2, 3)"
p = Point(1, 2, 3)
print repr(p)
# Point([1.0, 2.0, 3.0])
print "p/2"
print repr(p / 2)
# Point([0.5, 1.0, 1.5])
print "p*2"
print repr(p * 2)
# Point([2.0, 4.0, 6.0])
print "q = Point(0.25, 0.5, 1.0, 0.5)"
q = Point(0.25, 0.5, 1.0, 0.5)
print repr(q)
# Point([0.25, 0.5, 1.0, 0.5])
r = q.deepcopy()
print repr(r)
# Point([0.25, 0.5, 1.0, 0.5])
print repr(r.cartesianize())
# Point([0.5, 1.0, 2.0])
print repr(r)
# Point([0.5, 1.0, 2.0])
print repr(q)
# Point([0.25, 0.5, 1.0, 0.5])
print repr(q.cartesian())
# Point([0.5, 1.0, 2.0])
r = q.deepcopy()
print repr(r)
# Point([0.25, 0.5, 1.0, 0.5])
print repr(r.rationalize())
# Point([0.5, 1.0, 2.0, 0.5])
print repr(r)
# Point([0.5, 1.0, 2.0, 0.5])
print repr(q.rational())
# Point([0.5, 1.0, 2.0, 0.5])
r = q.deepcopy()
print repr(r.homogenize())
# Point([0.125, 0.25, 0.5, 0.5])
print repr(r)
# Point([0.125, 0.25, 0.5, 0.5])
print repr(q.homogen())
# Point([0.125, 0.25, 0.5, 0.5])
print repr(q)
# Point([0.25, 0.5, 1.0, 0.5])
print Vector(q)
# [0.5, 1.0, 2.0]
print Vector(q.cartesian())
# [0.5, 1.0, 2.0]
# ignore w
print "q/2"
print repr(q / 2)
# Point([0.125, 0.25, 0.5, 0.5])
print "q*2"
print repr(q * 2)
# Point([0.5, 1.0, 2.0, 0.5])
print repr(q + 2) # cartesianize is done by Vector add
# Point([2.5, 3.0, 4.0])
print repr(q)
# Point([0.25, 0.5, 1.0, 0.5])
print repr(p + Vector(1, 2, 3))
# Point([2.0, 4.0, 6.0])
print repr(q + Vector(1, 2, 3))
# Point([1.5, 3.0, 5.0])
print repr(q.cartesian() + Vector(1, 2, 3))
# Point([1.5, 3.0, 5.0])
print repr(p - q)
# Vector([0.5, 1.0, 1.0])
print repr(p - q.cartesian())
# Vector([0.5, 1.0, 1.0])
print repr(q - p)
# Vector([-0.5, -1.0, -1.0])
print repr(p - (p - q))
# Point([0.5, 1.0, 2.0])
print repr(Vector(p) * Vector(q))
# 4.25
print repr(p * q)
# 4.25
print repr(p / q) # need explicit homogenize as division not handled by api
# Point([4.0, 4.0, 3.0, 2.0]) TODO : what do we want here ?
# Vector([2.0, 2.0, 1.5])
# additionnal methods
print "p = Point(x=1, y=2, z=3)"
p = Point(x=1, y=2, z=3)
print p.length()
# 3.74165738677
print p[:1].length()
# 1.0
print p[:2].length()
# 2.2360679775
print p[:3].length()
# 3.74165738677
p = Point(1.0, 0.0, 0.0)
q = Point(0.707, 0.0, -0.707)
print repr(p)
# Point([1.0, 0.0, 0.0, 1.0])
print repr(q)
# Point([0.707, 0.0, -0.707, 1.0])
print repr(q - p)
# Vector([-0.293, 0.0, -0.707])
print repr(axis(Point.origin, p, q))
# Vector([-0.0, 0.707, 0.0])
print repr(Point.origin.axis(p, q))
# Vector([-0.0, 0.707, 0.0])
print repr(Point.origin.axis(q, p))
# Vector([0.0, -0.707, 0.0])
print angle(Point.origin, p, q)
# 0.785398163397
print angle(Point.origin, q, p)
# 0.785398163397
print Point.origin.angle(p, q)
# 0.785398163397
print p.distanceTo(q)
# 0.765309087885
print (q - p).length()
# 0.765309087885
print cotan(Point.origin, p, q)
# 1.0
# obviously True
print planar(Point.origin, p, q)
# True
r = center(Point.origin, p, q)
print repr(r)
# Point([0.569, 0.0, -0.235666666667, 1.0])
print planar(Point.origin, p, q, r)
# True
print planar(Point.origin, p, q, r + Vector(0.0, 0.1, 0.0))
# False
print bWeights(r, Point.origin, p, q)
# (0.33333333333333337, 0.33333333333333331, 0.33333333333333343)
p = Point([0.33333, 0.66666, 1.333333, 0.33333])
print repr(round(p, 3))
# Point([0.333, 0.667, 1.333, 0.333])
print "end tests Point"
def _testMColor():
print "Color class", dir(Color)
print hasattr(Color, 'data')
c = Color()
print repr(c)
# Color([0.0, 0.0, 0.0, 1.0])
print "Color instance", dir(c)
print hasattr(c, 'data')
print repr(c.data)
# Color([0.0, 0.0, 0.0, 1.0])
c = Color(_api.MColor())
print repr(c)
# Color([0.0, 0.0, 0.0, 1.0])
# using api convetion of single value would mean alpha
# instead of VectorN convention of filling all with value
# which would yield # Color([0.5, 0.5, 0.5, 0.5]) instead
# This would break coerce behavior for Color
print "c = Color(0.5)"
c = Color(0.5)
print repr(c)
# Color([0.5, 0.5, 0.5, 0.5])
print "c = round(Color(128, quantize=255), 2)"
c = Color(128, quantize=255)
print repr(c)
# Color([0.501999974251, 0.501999974251, 0.501999974251, 0.501999974251])
c = Color(255, 128, b=64, a=32, quantize=255)
print repr(c)
# Color([1.0 0.501999974251 0.250999987125 0.125490196078])
print "c = Color(1, 1, 1)"
c = Color(1, 1, 1)
print repr(c)
# Color([1.0, 1.0, 1.0, 1.0])
print "c = round(Color(255, 0, 255, g=128, quantize=255, mode='rgb'), 2)"
c = round(Color(255, 0, 255, g=128, quantize=255, mode='rgb'), 2)
print repr(c)
# Color([1.0, 0.5, 1.0, 1.0])
print "c = round(Color(255, b=128, quantize=255, mode='rgb'), 2)"
c = round(Color(255, b=128, quantize=255, mode='rgb'), 2)
print repr(c)
# Color([1.0, 1.0, 0.5, 1.0])
print "c = Color(1, 0.5, 2, 0.5)"
c = Color(1, 0.5, 2, 0.5)
print repr(c)
# Color([1.0, 0.5, 2.0, 0.5])
print "c = Color(0, 65535, 65535, quantize=65535, mode='hsv')"
c = Color(0, 65535, 65535, quantize=65535, mode='hsv')
print repr(c)
# Color([1.0, 0.0, 0.0, 1.0])
print "c.rgb"
print repr(c.rgb)
# (1.0, 0.0, 0.0)
print "c.hsv"
print repr(c.hsv)
# (0.0, 1.0, 1.0)
d = Color(c, v=0.5, mode='hsv')
print repr(d)
# Color([0.5, 0.0, 0.0, 1.0])
print repr(d.hsv)
# (0.0, 1.0, 0.5)
print "c = Color(Color.blue, v=0.5)"
c = Color(Color.blue, v=0.5)
print repr(c)
# Color([0.0, 0.0, 0.5, 1.0])
print "c.hsv"
print c.hsv
# (0.66666666666666663, 1.0, 0.5)
c.r = 1.0
print repr(c)
# Color([1.0, 0.0, 0.5, 1.0])
print "c.hsv"
print c.hsv
# (0.91666666666666663, 1.0, 1.0)
print "c = Color(1, 0.5, 2, 0.5).clamp()"
c = Color(1, 0.5, 2, 0.5).clamp()
print repr(c)
# Color([1.0, 0.5, 1.0, 0.5])
print c.hsv
# (0.83333333333333337, 0.5, 1.0)
print "Color(c, v=0.5)"
d = Color(c, v=0.5)
print repr(d)
# Color([0.5, 0.25, 0.5, 0.5])
print "d.hsv"
print d.hsv
# (0.83333333333333337, 0.5, 0.5)
print "c = Color(0.0, 0.5, 1.0, 0.5)"
c = Color(0.0, 0.5, 1.0, 0.5)
print repr(c)
# Color(0.0, 0.5, 1.0, 0.5)
print "d = c.gamma(2.0)"
d = c.gamma(2.0)
print repr(d)
# Color([0.0, 0.25, 1.0, 0.5])
print "c = Color.red.blend(Color.blue, 0.5)"
c = Color.red.blend(Color.blue, 0.5)
print repr(c)
# Color([0.5, 0.0, 0.5, 1.0])
print c.hsv
# (0.83333333333333337, 1.0, 0.5)
c = Color.red.hsvblend(Color.blue, 0.5)
print repr(c)
# Color([1.0, 0.0, 1.0, 1.0])
print c.hsv
# (0.83333333333333337, 1.0, 1.0)
print "c = Color(0.25, 0.5, 0.75, 0.5)"
c = Color(0.25, 0.5, 0.75, 0.5)
print repr(c)
# Color([0.25, 0.5, 0.75, 0.5])
print "d = Color.black"
d = Color.black
print repr(d)
# Color([0.0, 0.0, 0.0, 1.0])
print "c.over(d)"
print repr(c.over(d))
# Color([0.125, 0.25, 0.375, 1.0])
print "d.over(c)"
print repr(d.over(c))
# Color([0.0, 0.0, 0.0, 0.5])
print "c.premult()"
print repr(c.premult())
# Color([0.125, 0.25, 0.375, 1.0])
# herited from Vector
print "c = Color(0.25, 0.5, 1.0, 1.0)"
c = Color(0.25, 0.5, 1.0, 1.0)
print repr(c)
# Color([0.25, 0.5, 1.0, 1.0])
print "d = Color(2.0, 1.0, 0.5, 0.25)"
d = Color(2.0, 1.0, 0.5, 0.25)
print repr(d)
# Color([2.0, 1.0, 0.5, 0.25])
print "-c"
print repr(-c)
# Color([-0.25, -0.5, -1.0, 1.0])
print "e = c*d"
e = c * d
print repr(e)
# Color([0.5, 0.5, 0.5, 0.25])
print "e + 2"
print repr(e + 2)
# Color([2.5, 2.5, 2.5, 0.25])
print "e * 2.0" # mult by scalar float is defined in api for colors and also multiplies alpha
print repr(e * 2.0)
# Color([1.0, 1.0, 1.0, 0.5])
print "e / 2.0" # as is divide, that ignores alpha now for some reason
print repr(e / 2.0)
# Color([0.25, 0.25, 0.25, 0.25])
print "e+Vector(1, 2, 3)"
print repr(e + Vector(1, 2, 3))
# Color([1.5, 2.5, 3.5, 0.25])
# how to handle operations on colors ?
# here behaves like api but does it make any sense
# for colors as it is now ?
print "c+c"
print repr(c + c)
# Color([0.5, 1.0, 2.0, 1.0])
print "c+d"
print repr(c + d)
# Color([2.25, 1.5, 1.5, 1.0])
print "d-c"
print repr(d - c)
# Color([1.75, 0.5, -0.5, 0.25])
print "end tests Color"
def _testMMatrix():
print "Matrix class", dir(Matrix)
m = Matrix()
print m.formated()
#[[1.0, 0.0, 0.0, 0.0],
# [0.0, 1.0, 0.0, 0.0],
# [0.0, 0.0, 1.0, 0.0],
# [0.0, 0.0, 0.0, 1.0]]
print m[0, 0]
# 1.0
print repr(m[0:2, 0:3])
# [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0]]
print m(0, 0)
# 1.0
print "Matrix instance:", dir(m)
print Matrix.__readonly__
print Matrix.__slots__
print Matrix.shape
print Matrix.ndim
print Matrix.size
print m.shape
print m.ndim
print m.size
# should fail
m.shape = (4, 4)
m.shape = 2
print dir(Space)
m = Matrix.identity
# inherits from MatrixN --> Array
print isinstance(m, MatrixN)
# True
print isinstance(m, Array)
# True
# as well as _api.Matrix
print isinstance(m, _api.MMatrix)
# True
# accepted directly by API methods
n = _api.MMatrix()
m = n.setToProduct(m, m)
print repr(m)
print repr(n)
# inits
m = Matrix(range(16))
print m.formated()
#[[0.0, 1.0, 2.0, 3.0],
# [4.0, 5.0, 6.0, 7.0],
# [8.0, 9.0, 10.0, 11.0],
# [12.0, 13.0, 14.0, 15.0]]
M = Array(range(16), shape=(8, 2))
m = Matrix(M)
print m.formated()
#[[0.0, 1.0, 2.0, 3.0],
# [4.0, 5.0, 6.0, 7.0],
# [8.0, 9.0, 10.0, 11.0],
# [12.0, 13.0, 14.0, 15.0]]
M = MatrixN(range(9), shape=(3, 3))
m = Matrix(M)
print m.formated()
#[[0.0, 1.0, 2.0, 0.0],
# [3.0, 4.0, 5.0, 0.0],
# [6.0, 7.0, 8.0, 0.0],
# [0.0, 0.0, 0.0, 1.0]]
# inherits from MatrixN --> Array
print isinstance(m, MatrixN)
# True
print isinstance(m, Array)
# True
# as well as _api.Matrix
print isinstance(m, _api.MMatrix)
# True
# accepted directly by API methods
n = _api.MMatrix()
m = n.setToProduct(m, m)
print repr(m)
print repr(n)
t = _api.MTransformationMatrix()
t.setTranslation(Vector(1, 2, 3), _api.MSpace.kWorld)
m = Matrix(t)
print m.formated()
#[[1.0, 0.0, 0.0, 0.0],
# [0.0, 1.0, 0.0, 0.0],
# [0.0, 0.0, 1.0, 0.0],
# [1.0, 2.0, 3.0, 1.0]]
m = Matrix(m, a30=10)
print m.formated()
#[[1.0, 0.0, 0.0, 0.0],
# [0.0, 1.0, 0.0, 0.0],
# [0.0, 0.0, 1.0, 0.0],
# [10.0, 2.0, 3.0, 1.0]]
# should fail
print "Matrix(range(20)"
try:
m = Matrix(range(20))
print m.formated()
except:
print "will raise ValueError: cannot initialize a Matrix of shape (4, 4) from (0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19), some information would be lost, use an explicit resize or trim"
m = Matrix.identity
M = m.trimmed(shape=(3, 3))
print repr(M)
# MatrixN([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]])
print M.formated()
#[[1.0, 0.0, 0.0],
# [0.0, 1.0, 0.0],
# [0.0, 0.0, 1.0]]
try:
m.trim(shape=(3, 3))
except:
print "will raise TypeError: new shape (3, 3) is not compatible with class Matrix"
print m.nrow
# 4
print m.ncol
# 4
# should fail
try:
m.nrow = 3
except:
print "will raise TypeError: new shape (3, 4) is not compatible with class Matrix"
print list(m.row)
# [Array([1.0, 0.0, 0.0, 0.0]), Array([0.0, 1.0, 0.0, 0.0]), Array([0.0, 0.0, 1.0, 0.0]), Array([0.0, 0.0, 0.0, 1.0])]
print list(m.col)
# [Array([1.0, 0.0, 0.0, 0.0]), Array([0.0, 1.0, 0.0, 0.0]), Array([0.0, 0.0, 1.0, 0.0]), Array([0.0, 0.0, 0.0, 1.0])]
m = Matrix(MatrixN(range(9), shape=(3, 3)).trimmed(shape=(4, 4), value=10))
print m.formated()
#[[0.0, 1.0, 2.0, 10.0],
# [3.0, 4.0, 5.0, 10.0],
# [6.0, 7.0, 8.0, 10.0],
# [10.0, 10.0, 10.0, 10.0]]
print m.get()
# ((0.0, 1.0, 2.0, 10.0), (3.0, 4.0, 5.0, 10.0), (6.0, 7.0, 8.0, 10.0), (10.0, 10.0, 10.0, 10.0))
print repr(m[0])
# [0.0, 1.0, 2.0, 10.0]
m[0] = 10
print m.formated()
#[[10.0, 10.0, 10.0, 10.0],
# [3.0, 4.0, 5.0, 10.0],
# [6.0, 7.0, 8.0, 10.0],
# [10.0, 10.0, 10.0, 10.0]]
print (10 in m)
# True
print list(m)
# [Array([10.0, 10.0, 10.0, 10.0]), Array([3.0, 4.0, 5.0, 10.0]), Array([6.0, 7.0, 8.0, 10.0]), Array([10.0, 10.0, 10.0, 10.0])]
print list(m.flat)
# [10.0, 10.0, 10.0, 10.0, 3.0, 4.0, 5.0, 10.0, 6.0, 7.0, 8.0, 10.0, 10.0, 10.0, 10.0, 10.0]
u = Vector.xAxis
v = Vector.yAxis
print Vector.xAxis
print str(Vector.xAxis)
print unicode(Vector.xAxis)
print repr(Vector.xAxis)
print "u = Vector.xAxis:"
print repr(u)
# trans matrix : t: 1, 2, 3, r: 45, 90, 30, s: 0.5, 1.0, 2.0
m = Matrix([0.0, 4.1633363423443383e-17, -0.5, 0.0, 0.25881904510252079, 0.96592582628906831, 1.3877787807814459e-16, 0.0, 1.9318516525781366, -0.51763809020504159, 0.0, 0.0, 1.0, 2.0, 3.0, 1.0])
print "m:"
print round(m, 2).formated()
#[[0.0, 0.0, -0.5, 0.0],
# [0.26, 0.97, 0.0, 0.0],
# [1.93, -0.52, 0.0, 0.0],
# [1.0, 2.0, 3.0, 1.0]]
x = Vector.xAxis
y = Vector.yAxis
z = Vector.zAxis
u = Vector(1, 2, 3)
print "u:"
print repr(u)
# Vector([1, 2, 3])
print "u*m"
print repr(u * m)
# Vector([6.31319304794, 0.378937381963, -0.5])
print "m*u"
print repr(m * u)
# Vector([-1.5, 2.19067069768, 0.896575472168])
p = Point(1, 10, 100, 1)
print "p:"
print repr(p)
# Point([1.0, 10.0, 100.0, 1.0])
print "p*m"
print repr(p * m)
# Point([196.773355709, -40.1045507576, 2.5, 1.0])
print "m*p"
print repr(m * p)
# Point([-50.0, 9.91807730799, -3.24452924947, 322.0])
print "v = [1, 2, 3]*m"
v = VectorN([1, 2, 3]) * m
print repr(v)
# VectorN([6.31319304794, 0.378937381963, -0.5])
print "v = [1, 2, 3, 1]*m"
v = VectorN([1, 2, 3, 1]) * m
print repr(v)
# VectorN([7.31319304794, 2.37893738196, 2.5, 1.0])
# should fail
print "VectorN([1, 2, 3, 4, 5])*m"
try:
v = VectorN([1, 2, 3, 4, 5]) * m
except:
print "Will raise ValueError: vector of size 5 and matrix of shape (4, 4) are not conformable for a VectorN * MatrixN multiplication"
# herited
print "m = Matrix(range(1, 17))"
m = Matrix(range(1, 17))
print m.formated()
#[[1.0, 2.0, 3.0, 4.0],
# [5.0, 6.0, 7.0, 8.0],
# [9.0, 10.0, 11.0, 12.0],
# [13.0, 14.0, 15.0, 16.0]]
# element wise
print "[1, 10, 100]*m"
print repr([1, 10, 100] * m)
# Matrix([[1.0, 20.0, 300.0, 0.0], [5.0, 60.0, 700.0, 0.0], [9.0, 100.0, 1100.0, 0.0], [13.0, 140.0, 1500.0, 0.0]])
print "M = MatrixN(range(20), shape=(4, 5))"
M = MatrixN(range(1, 21), shape=(4, 5))
print M.formated()
#[[1, 2, 3, 4, 5],
# [6, 7, 8, 9, 10],
# [11, 12, 13, 14, 15],
# [16, 17, 18, 19, 20]]
print "m*M"
n = m * M
print (n).formated()
#[[110.0, 120.0, 130.0, 140.0, 150.0],
# [246.0, 272.0, 298.0, 324.0, 350.0],
# [382.0, 424.0, 466.0, 508.0, 550.0],
# [518.0, 576.0, 634.0, 692.0, 750.0]]
print util.clsname(n)
# MatrixN
print "m*2"
n = m * 2
print (n).formated()
#[[2.0, 4.0, 6.0, 8.0],
# [10.0, 12.0, 14.0, 16.0],
# [18.0, 20.0, 22.0, 24.0],
# [26.0, 28.0, 30.0, 32.0]]
print util.clsname(n)
# Matrix
print "2*m"
n = 2 * m
print (n).formated()
#[[2.0, 4.0, 6.0, 8.0],
# [10.0, 12.0, 14.0, 16.0],
# [18.0, 20.0, 22.0, 24.0],
# [26.0, 28.0, 30.0, 32.0]]
print util.clsname(n)
# Matrix
print "m+2"
n = m + 2
print (n).formated()
#[[3.0, 4.0, 5.0, 6.0],
# [7.0, 8.0, 9.0, 10.0],
# [11.0, 12.0, 13.0, 14.0],
# [15.0, 16.0, 17.0, 18.0]]
print util.clsname(n)
# Matrix
print "2+m"
n = 2 + m
print (n).formated()
#[[3.0, 4.0, 5.0, 6.0],
# [7.0, 8.0, 9.0, 10.0],
# [11.0, 12.0, 13.0, 14.0],
# [15.0, 16.0, 17.0, 18.0]]
print util.clsname(n)
# Matrix
try:
m.setToProduct(m, M)
except:
print """Will raise TypeError: cannot initialize a Matrix of shape (4, 4) from (Array([0, 1, 2, 3, 4]), Array([5, 6, 7, 8, 9]), Array([10, 11, 12, 13, 14]), Array([15, 16, 17, 18, 19])) of shape (4, 5),
as it would truncate data or reduce the number of dimensions"""
print m.isEquivalent(m * M)
# False
# trans matrix : t: 1, 2, 3, r: 45, 90, 30, s: 0.5, 1.0, 2.0
m = Matrix([0.0, 4.1633363423443383e-17, -0.5, 0.0, 0.25881904510252079, 0.96592582628906831, 1.3877787807814459e-16, 0.0, 1.9318516525781366, -0.51763809020504159, 0.0, 0.0, 1.0, 2.0, 3.0, 1.0])
print "m:"
print round(m, 2).formated()
#[[0.0, 0.0, -0.5, 0.0],
# [0.26, 0.97, 0.0, 0.0],
# [1.93, -0.52, 0.0, 0.0],
# [1.0, 2.0, 3.0, 1.0]]
print "m.transpose():"
print round(m.transpose(), 2).formated()
#[[0.0, 0.26, 1.93, 1.0],
# [0.0, 0.97, -0.52, 2.0],
# [-0.5, 0.0, 0.0, 3.0],
# [0.0, 0.0, 0.0, 1.0]]
print "m.isSingular():"
print m.isSingular()
# False
print "m.inverse():"
print round(m.inverse(), 2).formated()
#[[0.0, 0.26, 0.48, 0.0],
# [0.0, 0.97, -0.13, 0.0],
# [-2.0, 0.0, 0.0, 0.0],
# [6.0, -2.19, -0.22, 1.0]]
print "m.adjoint():"
print round(m.adjoint(), 2).formated()
#[[0.0, 0.26, 0.48, 0.0],
# [0.0, 0.97, -0.13, 0.0],
# [-2.0, 0.0, -0.0, 0.0],
# [6.0, -2.19, -0.22, 1.0]]
print "m.adjugate():"
print round(m.adjugate(), 2).formated()
#[[0.0, 0.26, 0.48, 0.0],
# [0.0, 0.97, -0.13, 0.0],
# [-2.0, 0.0, -0.0, 0.0],
# [6.0, -2.19, -0.22, 1.0]]
print "m.homogenize():"
print round(m.homogenize(), 2).formated()
#[[0.0, 0.0, -1.0, 0.0],
# [0.26, 0.97, 0.0, 0.0],
# [0.97, -0.26, -0.0, 0.0],
# [1.0, 2.0, 3.0, 1.0]]
print "m.det():"
print m.det()
# 1.0
print "m.det4x4():"
print m.det4x4()
# 1.0
print "m.det3x3():"
print m.det3x3()
# 1.0
print "m.weighted(0.5):"
print round(m.weighted(0.5), 2).formated()
#[[0.53, 0.0, -0.53, 0.0],
# [0.09, 0.99, 0.09, 0.0],
# [1.05, -0.2, 1.05, 0.0],
# [0.5, 1.0, 1.5, 1.0]]
print "m.blend(Matrix.identity, 0.5):"
print round(m.blend(Matrix.identity, 0.5), 2).formated()
#[[0.53, 0.0, -0.53, 0.0],
# [0.09, 0.99, 0.09, 0.0],
# [1.05, -0.2, 1.05, 0.0],
# [0.5, 1.0, 1.5, 1.0]]
print "end tests Matrix"
def _testMTransformationMatrix():
q = Quaternion()
print repr(q)
# Quaternion([0.0, 0.0, 0.0, 1.0])
q = Quaternion(1, 2, 3, 0.5)
print repr(q)
# Quaternion([1.0, 2.0, 3.0, 0.5])
q = Quaternion(0.785, 0.785, 0.785, "xyz")
print repr(q)
# Quaternion([0.191357439088, 0.461717715523, 0.191357439088, 0.844737481223])
m = Matrix()
m.rotate = q
print repr(m)
# Matrix([[0.500398163355, 0.499999841466, -0.706825181105, 0.0], [-0.146587362969, 0.853529322022, 0.499999841466, 0.0], [0.853295859083, -0.146587362969, 0.500398163355, 0.0], [0.0, 0.0, 0.0, 1.0]])
print "TransformationMatrix class", dir(TransformationMatrix)
m = TransformationMatrix()
print m.formated()
#[[1.0, 0.0, 0.0, 0.0],
# [0.0, 1.0, 0.0, 0.0],
# [0.0, 0.0, 1.0, 0.0],
# [0.0, 0.0, 0.0, 1.0]]
print m[0, 0]
# 1.0
print m[0:2, 0:3]
# [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0]]
print "TransformationMatrix instance:", dir(m)
print TransformationMatrix.__readonly__
print TransformationMatrix.__slots__
print TransformationMatrix.shape
print TransformationMatrix.ndim
print TransformationMatrix.size
print m.shape
print m.ndim
print m.size
# should fail
m.shape = (4, 4)
m.shape = 2
print dir(Space)
m = TransformationMatrix.identity
# inherits from MatrixN --> Array
print isinstance(m, MatrixN)
# True
print isinstance(m, Array)
# True
# as well as _api.TransformationMatrix and _api.Matrix
print isinstance(m, _api.MTransformationMatrix)
# True
print isinstance(m, _api.MMatrix)
# True
# accepted directly by API methods
n = _api.MMatrix()
n = n.setToProduct(m, m)
print repr(n)
n = _api.MTransformationMatrix()
n = n.assign(m)
print repr(n)
m = TransformationMatrix.identity
m.rotation = Quaternion()
print repr(m)
print m.formated()
n = TransformationMatrix.identity
n.translation = Vector(1, 2, 3)
print n.formated()
print repr(n)
o = m * n
print repr(o)
print o.formated()
print "end tests TransformationMatrix"
if __name__ == '__main__':
print Distance.getInternalUnit()
# centimeters
print Distance.getUIUnit()
# centimeters
Distance.setUIUnit('meters')
print Distance.getUIUnit()
# meters
d = Distance(12)
print d.unit
# meters
print d
1200.0
print repr(d)
Distance(12.0, unit='meters')
print d.asUnit()
12.0
print d.asInternalUnit()
1200.0
import doctest
doctest.testmod(verbose=True)
_testMVector()
_testMPoint()
_testMColor()
_testMMatrix()
_testMTransformationMatrix()
|
py | 1a301ff37e88e50ec5a25135e53b40398b1416e6 | # report_tests.py
import unittest
import reporting as report
from unittest.mock import MagicMock, Mock
class ReportTestSolver(unittest.TestCase):
"""[summary]
Args:
unittest ([type]): [description]
"""
def nodes_error_reporting_tests(self):
"""[summary]
"""
report.nodes_error_reporting()
def error_folder_created(self):
"""[summary]
"""
pass
def error_report_created(self):
"""[summary]
"""
pass
def plot_missing_data_tests(self):
"""[summary]
"""
report.plot_missing_data()
def list_unique_values_tests(self):
"""[summary]
"""
report.list_unique_values()
def print_column_info_tests(self):
"""[summary]
"""
report.print_column_info()
def visualise_missing_counts_tests(self):
"""[summary]
"""
report.visualise_missing_counts()
|
py | 1a302137155acd572965bfc0e46523be33b80e40 | from .probe import Probe
from .utils import most_frequent, process_dict_list, merge_dicts
"""
Analyses a group of clips.
"""
class Analysis:
def __init__(self, clips=[]):
self.clips = clips
def summary(self):
file_summary = []
for clip in self.clips:
summary = Probe(clip).run().extract_summary()
file_summary.append(summary)
final_list = None
for item in file_summary:
if final_list == None:
final_list = item
else:
final_list = merge_dicts(final_list, item)
return process_dict_list(final_list, most_frequent)
|
py | 1a3021b2904eedd3d9e140c1608d5f065fa72de3 | from enum import Enum
class TaskLogonTypeEnum(Enum):
""" """
TASK_LOGON_NONE = 0
TASK_LOGON_PASSWORD = 1
TASK_LOGON_S4U = 2
TASK_LOGON_INTERACTIVE_TOKEN = 3
TASK_LOGON_GROUP = 4
TASK_LOGON_SERVICE_ACCOUNT = 5
TASK_LOGON_INTERACTIVE_TOKEN_OR_PASSWORD = 6
|
py | 1a3021eeb8268191c79085cea1a6a5a95d56b66d | ##############################################################################
# Copyright (c) 2017 ZTE Corp and others.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
import os
import pytest
from deploy.post.keystoneauth import Keystoneauth
@pytest.mark.parametrize('openrc, expected', [
('/etc/kolla/admin-openrc.sh', '/etc/kolla/admin-openrc.sh'),
(None, '/etc/kolla/admin-openrc.sh')])
def test_create_Keystoneauth_instance(openrc, expected):
KeystoneClient = Keystoneauth(openrc)
assert KeystoneClient.openrc == expected
@pytest.mark.parametrize('raws, expected', [
(
{
'OS_USERNAME': 'admin',
'OS_PASSWORD': 'keystone',
'OS_AUTH_URL': 'http://10.20.11.11:35357/v3',
'OS_TENANT_NAME': 'admin',
'OS_USER_DOMAIN_NAME': 'Default',
'OS_PROJECT_DOMAIN_NAME': 'Default',
'OS_PROJECT_NAME': 'admin',
'OS_INTERFACE': 'internal',
'OS_IDENTITY_API_VERSION': 'region_name'
},
{
'username': 'admin',
'password': 'keystone',
'auth_url': 'http://10.20.11.11:35357/v3',
'tenant_name': 'admin',
'user_domain_name': 'Default',
'project_domain_name': 'Default',
'project_name': 'admin'
}),
(
{
'OS_USERNAME': 'admin',
'OS_PASSWORD': 'keystone',
'OS_AUTH_URL': 'http://10.20.11.11:35357/v3',
'OS_TENANT_NAME': 'admin',
'OS_USER_DOMAIN_NAME': 'Default',
'OS_PROJECT_DOMAIN_NAME': 'Default',
'OS_PROJECT_NAME': 'admin',
'OS_ENDPOINT_TYPE': 'Default',
'OS_REGION_NAME': 'Default'
},
{
'username': 'admin',
'password': 'keystone',
'auth_url': 'http://10.20.11.11:35357/v3',
'tenant_name': 'admin',
'user_domain_name': 'Default',
'project_domain_name': 'Default',
'project_name': 'admin',
'endpoint_type': 'Default',
'region_name': 'Default'
}
)])
def test__parse_credentials_in_Keystoneauth(raws, expected):
assert Keystoneauth._parse_credentials(raws) == expected
@pytest.fixture(scope="session")
def openrc_conf_file_dir(data_root):
return os.path.join(data_root, 'openrc_conf')
def test_session(openrc_conf_file_dir):
openrc = os.path.join(openrc_conf_file_dir, 'admin-openrc.sh')
KeystoneClient = Keystoneauth(openrc)
assert KeystoneClient.session
@pytest.mark.parametrize('openrc_file_name, expected', [
(
'admin-openrc.sh',
{
'OS_PROJECT_DOMAIN_NAME': 'Default',
'OS_USER_DOMAIN_NAME': 'Default',
'OS_PROJECT_NAME': 'admin',
'OS_TENANT_NAME': 'admin',
'OS_USERNAME': 'admin',
'OS_PASSWORD': 'keystone',
'OS_AUTH_URL': 'http://10.20.11.11:35357/v3',
'OS_INTERFACE': 'internal',
'OS_IDENTITY_API_VERSION': '3'
}
)])
def test__parse_openrc(openrc_conf_file_dir, openrc_file_name, expected):
openrc = os.path.join(openrc_conf_file_dir, openrc_file_name)
KeystoneClient = Keystoneauth(openrc)
ret_openrc_dict = KeystoneClient._parse_openrc()
assert expected == ret_openrc_dict
@pytest.mark.parametrize('openrc_file_name', [
(
'admin-openrc.sh'
)])
def test__get_auth(openrc_conf_file_dir, openrc_file_name,):
openrc = os.path.join(openrc_conf_file_dir, openrc_file_name)
KeystoneClient = Keystoneauth(openrc)
assert KeystoneClient._get_auth()
|
py | 1a3022641185cea50540e9df4b5653ac22fb94cf | import re
from os.path import *
import cv2
import numpy as np
import torch.nn.functional as F
from PIL import Image
cv2.setNumThreads(0)
cv2.ocl.setUseOpenCL(False)
TAG_CHAR = np.array([202021.25], np.float32)
def read_flow_middlebury(fn):
"""
Read .flo file in Middlebury format
Parameters
-----------
fn : str
Absolute path to flow file
Returns
--------
flow : np.ndarray
Optical flow map
"""
# Code adapted from:
# http://stackoverflow.com/questions/28013200/reading-middlebury-flow-files-with-python-bytes-array-numpy
# WARNING: this will work on little-endian architectures (eg Intel x86) only!
# print 'fn = %s'%(fn)
with open(fn, "rb") as f:
magic = np.fromfile(f, np.float32, count=1)
if 202021.25 != magic:
print("Magic number incorrect. Invalid .flo file")
return None
else:
w = np.fromfile(f, np.int32, count=1)
h = np.fromfile(f, np.int32, count=1)
# print 'Reading %d x %d flo file\n' % (w, h)
data = np.fromfile(f, np.float32, count=2 * int(w) * int(h))
# Reshape data into 3D array (banda, columns, rows)
return np.resize(data, (int(h), int(w), 2))
def read_flow_pfm(file):
"""
Read optical flow from a .pfm file
Parameters
-----------
file : str
Path to flow file
Returns
--------
flow : np.ndarray
Optical flow map
"""
file = open(file, "rb")
color = None
width = None
height = None
scale = None
endian = None
header = file.readline().rstrip()
if header == b"PF":
color = True
elif header == b"Pf":
color = False
else:
raise Exception("Not a PFM file.")
dim_match = re.match(rb"^(\d+)\s(\d+)\s$", file.readline())
if dim_match:
width, height = map(int, dim_match.groups())
else:
raise Exception("Malformed PFM header.")
scale = float(file.readline().rstrip())
if scale < 0: # little-endian
endian = "<"
scale = -scale
else:
endian = ">" # big-endian
data = np.fromfile(file, endian + "f")
shape = (height, width, 3) if color else (height, width)
data = np.reshape(data, shape)
data = np.flipud(data)
return data
def read_flow_png(filename):
"""
Read optical flow from a png file.
Parameters
-----------
filename : str
Path to flow file
Returns
--------
flow : np.ndarray
Optical flow map
valid : np.ndarray
Valid flow map
"""
flow = cv2.imread(filename, cv2.IMREAD_ANYDEPTH | cv2.IMREAD_COLOR)
flow = flow[:, :, ::-1].astype(np.float32)
flow, valid = flow[:, :, :2], flow[:, :, 2]
flow = (flow - 2**15) / 64.0
return flow, valid
def write_flow(filename, uv, v=None):
"""Write optical flow to file.
If v is None, uv is assumed to contain both u and v channels,
stacked in depth.
Parameters
----------
filename : str
Path to file
uv : np.ndarray
Optical flow
v : np.ndarray, optional
Optional second channel
"""
# Original code by Deqing Sun, adapted from Daniel Scharstein.
n_bands = 2
if v is None:
assert uv.ndim == 3
assert uv.shape[2] == 2
u = uv[:, :, 0]
v = uv[:, :, 1]
else:
u = uv
assert u.shape == v.shape
height, width = u.shape
f = open(filename, "wb")
# write the header
f.write(TAG_CHAR)
np.array(width).astype(np.int32).tofile(f)
np.array(height).astype(np.int32).tofile(f)
# arrange into matrix form
tmp = np.zeros((height, width * n_bands))
tmp[:, np.arange(width) * 2] = u
tmp[:, np.arange(width) * 2 + 1] = v
tmp.astype(np.float32).tofile(f)
f.close()
def read_image(file_name):
"""
Read images from a variety of file formats
Parameters
-----------
file_name : str
Path to flow file
Returns
--------
flow : np.ndarray
Optical flow map
"""
ext = splitext(file_name)[-1]
if ext == ".png" or ext == ".jpeg" or ext == ".ppm" or ext == ".jpg":
return Image.open(file_name)
elif ext == ".bin" or ext == ".raw":
return np.load(file_name)
return []
def read_flow(file_name):
"""
Read ground truth flow from a variety of file formats
Parameters
-----------
file_name : str
Path to flow file
Returns
--------
flow : np.ndarray
Optical flow map
valid : None if .flo and .pfm files else np.ndarray
Valid flow map
"""
ext = splitext(file_name)[-1]
if ext == ".flo":
flow = read_flow_middlebury(file_name).astype(np.float32)
return flow, None
elif ext == ".pfm":
flow = read_flow_pfm(file_name).astype(np.float32)
if len(flow.shape) == 2:
return flow, None
else:
return flow[:, :, :-1], None
elif ext == ".png":
return read_flow_png(file_name)
return []
class InputPadder:
"""
Class to pad / unpad the input to a network with a given padding
Parameters
-----------
dims : tuple
Dimensions of the input
divisor : int
Divisor to make the input evenly divisible by
mode : str
Padding mode
"""
def __init__(self, dims, divisor=8, mode="sintel"):
self.ht, self.wd = dims[-2:]
pad_ht = (((self.ht // divisor) + 1) * divisor - self.ht) % divisor
pad_wd = (((self.wd // divisor) + 1) * divisor - self.wd) % divisor
if mode == "sintel":
self._pad = [
pad_wd // 2,
pad_wd - pad_wd // 2,
pad_ht // 2,
pad_ht - pad_ht // 2,
]
else:
self._pad = [pad_wd // 2, pad_wd - pad_wd // 2, 0, pad_ht]
def pad(self, *inputs):
"""
Pad the input
Parameters
-----------
inputs : list
List of inputs to pad
Returns
--------
list
Padded inputs
"""
return [F.pad(x, self._pad, mode="replicate") for x in inputs]
def unpad(self, x):
"""
Unpad the input
Parameters
-----------
x : torch.Tensor
Input to unpad
Returns
--------
torch.Tensor
Unpadded input
"""
ht, wd = x.shape[-2:]
c = [self._pad[2], ht - self._pad[3], self._pad[0], wd - self._pad[1]]
return x[..., c[0] : c[1], c[2] : c[3]]
|
py | 1a302273664eae240f4d94c2a86f0229ee564ceb | """Norwegian-specific Form helpers."""
from __future__ import unicode_literals
import datetime
import re
from django.core.validators import EMPTY_VALUES
from django.forms import ValidationError
from django.forms.fields import CharField, Field, RegexField, Select
from django.utils.translation import ugettext_lazy as _
from localflavor.generic.forms import DeprecatedPhoneNumberFormFieldMixin
from .no_municipalities import MUNICIPALITY_CHOICES
class NOZipCodeField(RegexField):
"""
A form field that validates input as a Norwegian zip code.
Valid codes have four digits.
"""
default_error_messages = {
'invalid': _('Enter a zip code in the format XXXX.'),
}
def __init__(self, max_length=None, min_length=None, *args, **kwargs):
super(NOZipCodeField, self).__init__(r'^\d{4}$',
max_length, min_length, *args, **kwargs)
class NOMunicipalitySelect(Select):
"""A Select widget that uses a list of Norwegian municipalities (fylker) as its choices."""
def __init__(self, attrs=None):
super(NOMunicipalitySelect, self).__init__(attrs, choices=MUNICIPALITY_CHOICES)
class NOSocialSecurityNumber(Field):
"""Algorithm is documented at http://no.wikipedia.org/wiki/Personnummer."""
default_error_messages = {
'invalid': _('Enter a valid Norwegian social security number.'),
}
def clean(self, value):
super(NOSocialSecurityNumber, self).clean(value)
if value in EMPTY_VALUES:
return ''
if not re.match(r'^\d{11}$', value):
raise ValidationError(self.error_messages['invalid'])
self.birthday = self._get_birthday(value)
self.gender = self._get_gender(value)
digits = map(int, list(value))
weight_1 = [3, 7, 6, 1, 8, 9, 4, 5, 2, 1, 0]
weight_2 = [5, 4, 3, 2, 7, 6, 5, 4, 3, 2, 1]
def multiply_reduce(aval, bval):
return sum([(a * b) for (a, b) in zip(aval, bval)])
if multiply_reduce(digits, weight_1) % 11 != 0:
raise ValidationError(self.error_messages['invalid'])
if multiply_reduce(digits, weight_2) % 11 != 0:
raise ValidationError(self.error_messages['invalid'])
return value
def _get_gender(self, value):
sexnum = int(value[8])
if sexnum % 2 == 0:
gender = 'F'
else:
gender = 'M'
return gender
def _get_birthday(self, value):
birthday = None
day = int(value[:2])
month = int(value[2:4])
year2 = int(value[4:6])
inum = int(value[6:9])
try:
if 000 <= inum < 500:
birthday = datetime.date(1900 + year2, month, day)
if 500 <= inum < 750 and year2 > 54:
birthday = datetime.date(1800 + year2, month, day)
if 500 <= inum < 1000 and year2 < 40:
birthday = datetime.date(2000 + year2, month, day)
if 900 <= inum < 1000 and year2 > 39:
birthday = datetime.date(1900 + year2, month, day)
except ValueError:
raise ValidationError(self.error_messages['invalid'])
return birthday
class NOBankAccountNumber(CharField):
"""
A form field for Norwegian bank account numbers.
Performs MOD11 with the custom weights for the Norwegian bank account numbers,
including a check for a remainder of 0, in which event the checksum is also 0.
Usually their string representation is along the lines of ZZZZ.YY.XXXXX, where the last X is the check digit.
They're always a total of 11 digits long, with 10 out of these 11 being the actual account number itself.
* Accepts, and strips, account numbers with extra spaces.
* Accepts, and strips, account numbers provided in form of XXXX.YY.XXXXX.
.. note:: No consideration is taking for banking clearing numbers as of yet, seeing as these are only used between
banks themselves.
.. versionadded:: 1.5
"""
default_error_messages = {
'invalid': _('Enter a valid Norwegian bank account number.'),
'invalid_checksum': _('Invalid control digit. Enter a valid Norwegian bank account number.'),
'invalid_length': _('Invalid length. Norwegian bank account numbers are 11 digits long.'),
}
def validate(self, value):
super(NOBankAccountNumber, self).validate(value)
if value is '':
# It's alright to be empty.
return
elif not value.isdigit():
# You must only contain decimals.
raise ValidationError(self.error_messages['invalid'])
elif len(value) is not 11:
# They only have one length: the number is 10!
# That being said, you always store them with the check digit included, so 11.
raise ValidationError(self.error_messages['invalid_length'])
# The control/check digit is the last digit
check_digit = int(value[-1])
bank_number = value[:-1]
# These are the weights by which we multiply to get our checksum digit
weights = [5, 4, 3, 2, 7, 6, 5, 4, 3, 2]
result = sum(w * (int(x)) for w, x in zip(weights, bank_number))
remainder = result % 11
# The checksum is 0 in the event there's no remainder, seeing as we cannot have a checksum of 11
# when 11 is one digit longer than we've got room for
checksum = 0 if remainder is 0 else 11 - remainder
if checksum != check_digit:
raise ValidationError(self.error_messages['invalid_checksum'])
def to_python(self, value):
value = super(NOBankAccountNumber, self).to_python(value)
return value.replace('.', '').replace(' ', '')
def prepare_value(self, value):
if value in EMPTY_VALUES:
return value
return '{}.{}.{}'.format(value[0:4], value[4:6], value[6:11])
class NOPhoneNumberField(RegexField, DeprecatedPhoneNumberFormFieldMixin):
"""
Field with phonenumber validation.
Requires a phone number with 8 digits and optional country code
"""
default_error_messages = {
'invalid': _('A phone number must be 8 digits and may have country code'),
}
def __init__(self, max_length=None, min_length=None, *args, **kwargs):
super(NOPhoneNumberField, self).__init__(
r'^(?:\+47)? ?(\d{3}\s?\d{2}\s?\d{3}|\d{2}\s?\d{2}\s?\d{2}\s?\d{2})$',
max_length, min_length, *args, **kwargs)
|
py | 1a30237343fbc293756aaa70da110dd64c11e79d | from __future__ import division, print_function
import numpy as np
from librmm_cffi import librmm as rmm
import cudf._lib as libcudf
from cudf.core import Series
from cudf.core.column import column
def test_gather_single_col():
col = column.as_column(np.arange(100), dtype=np.int32)
gather_map = np.array([0, 1, 2, 3, 5, 8, 13, 21], dtype=np.int32)
device_gather_map = rmm.to_device(gather_map)
out = libcudf.copying.gather(col, device_gather_map)
np.testing.assert_array_equal(out.to_array(), gather_map)
def test_gather_cols():
cols = [
column.as_column(np.arange(10), dtype=np.int32),
column.as_column(np.arange(0.0, 2.0, 0.2), dtype=np.float32),
]
gather_map = np.array([0, 1, 2, 3, 5, 8], dtype=np.int32)
expected = np.array(gather_map * 0.2, dtype=np.float32)
device_gather_map = rmm.to_device(gather_map)
out = libcudf.copying.gather(cols, device_gather_map)
np.testing.assert_array_equal(out[0].to_array(), gather_map)
np.testing.assert_array_almost_equal(out[1].to_array(), expected)
def test_gather_string_col():
col = column.as_column(["a", "b", "c", "d"])
gather_map = column.as_column([0, 2, 3], dtype="int32").data.mem
result = libcudf.copying.gather(col, gather_map)
assert result.data.to_host() == ["a", "c", "d"]
col = column.as_column(["a", "b", None, "d"])
gather_map = column.as_column([0, 2, 3], dtype="int32").data.mem
result = libcudf.copying.gather(col, gather_map)
assert result.data.to_host() == ["a", None, "d"]
def test_null_copy():
col = Series(np.arange(2049))
col[:] = None
assert len(col) == 2049
|
py | 1a302539f41176b68fc899c25e0f9d2336eec703 | #!/usr/bin/env python
from PyZ3950 import zoom
def run ():
conn = zoom.Connection ('amicus.nlc-bnc.ca', 210)
conn.databaseName = 'NL'
q = zoom.Query ('CCL', 'ti="1066"')
ss = conn.scan (q)
for s in ss[0:10]:
print s
if __name__ == '__main__':
run ()
|
py | 1a30266b5b34e7e07dd8b5ba15729b82ce3d31de | import sys,os,random
from phrasemachine import phrasemachine as pm; reload(pm)
text=open("sloths.txt").read()
# text = open("testdata/wine-nltk.txt").read().decode("utf8",'ignore')
# tt=pm.get_stdeng_spacy_tagger()
d=tt.tag_text(text)
def loop():
while True:
pat = raw_input("Pattern: ")
# p = pm.get_phrases(tokens=d['tokens'], postags=d['pos'], regex=pat, minlen=1)['counts']
p = pm.get_phrases(open("sloths.txt").read(), tagger='spacy', regex=pat, minlen=1)['counts']
phrases = p.keys()
ptok = []
for phrase in phrases:
# print [phrase]
ptok += [phrase]*p[phrase]
if len(ptok) < 10:
xx = ptok
else:
xx = [random.choice(ptok) for i in xrange(10)]
# xx = [random.choice(phrases) for i in xrange(10)]
# print xx
print u', '.join(xx).encode("utf8")
|
py | 1a3026e7f45b00b8eb123506ce710f31ae1ad3e4 | from .Function_Module import Function_Module
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.support.ui import WebDriverWait
import selenium
from geopy.geocoders import Nominatim
import time
import os
import pathlib
class get_gps_location(Function_Module):
name = "get_gps_location"
help_description = "Current location"
time_sleep = 2
error = "Sir, I'm sorry I can't get my location."
chrome_not_found_error = "Sir, I can't find the Chrome binaries. Make sure that C:\Program Files(x86)\Google\Chrome\Application\chrome.exe is present!"
def respond(self, entities):
try:
coordinates = self.getLocation(self)
return self.convert_to_location(self, coordinates)
except selenium.common.exceptions.WebDriverException:
return self.chrome_not_found_error
except:
return self.error
def getLocation(self):
chrome_options = Options()
chrome_options.add_argument("--use-fake-ui-for-media-stream")
# You shouldn't see the browser, so; headless does not work, otherwise gps will not be activated!
# chrome_options.add_argument ("headless")
timeout = 20
# The directory in which the Chrome driver (required for Selenium) is located: Is the script directory, i.e. the same
chrome_driver_path = str( str( pathlib.Path(__file__).parent.absolute() ) + r"\chromedriver.exe" )
print("Chrome-Driver Path: ", chrome_driver_path)
driver = webdriver.Chrome(executable_path=chrome_driver_path, chrome_options=chrome_options)
driver.get("https://mycurrentlocation.net/")
wait = WebDriverWait(driver, timeout)
time.sleep(self.time_sleep)
longitude = driver.find_elements_by_xpath('//*[@id="longitude"]')
longitude = [x.text for x in longitude]
longitude = str(longitude[0])
latitude = driver.find_elements_by_xpath('//*[@id="latitude"]')
latitude = [x.text for x in latitude]
latitude = str(latitude[0])
driver.quit()
coordinates = [latitude, longitude]
return coordinates
def convert_to_location(self, coordinates):
geolocator = Nominatim(user_agent="F.R.I.D.A.Y")
location = geolocator.reverse(coordinates[0] + ',' + coordinates[1])
print(location.raw)
# Compose an answer text from the address
address = location.raw['address']
# Street with 'the'
if("street" in address['road'] or "road" in address['road']):
result = "According to GPS, you are currently in the "+ address['road'] + ', ' + address['town'] + ', ' + address['state'] + ', ' + address['country'] + '.'
else:
result = "According to GPS, you are currently in "+ address['road'] + ', ' + address['town'] + ', ' + address['state'] + ', ' + address['country'] + '.'
return result |
py | 1a3027db99844428cefed2186cc6eaa24fc7c394 | # -*- coding: utf-8 -*-
# =============================================================================
# Copyright (c) 2012, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
# Written by Joel Bernier <[email protected]> and others.
# LLNL-CODE-529294.
# All rights reserved.
#
# This file is part of HEXRD. For details on dowloading the source,
# see the file COPYING.
#
# Please also see the file LICENSE.
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License (as published by the Free
# Software Foundation) version 2.1 dated February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program (see file LICENSE); if not, write to
# the Free Software Foundation, Inc., 59 Temple Place, Suite 330,
# Boston, MA 02111-1307 USA or visit <http://www.gnu.org/licenses/>.
# =============================================================================
"""
Created on Fri Dec 9 13:05:27 2016
@author: bernier2
"""
import copy
import os
from concurrent.futures import ProcessPoolExecutor, ThreadPoolExecutor
from functools import partial
import yaml
import h5py
import numpy as np
from io import IOBase
from scipy import ndimage
from scipy.linalg.matfuncs import logm
from hexrd import constants
from hexrd.gridutil import cellConnectivity, cellIndices, make_tolerance_grid
from hexrd import matrixutil as mutil
from hexrd.transforms.xfcapi import \
anglesToGVec, \
angularDifference, \
detectorXYToGvec, \
gvecToDetectorXY, \
makeOscillRotMat, \
makeRotMatOfExpMap, \
mapAngle, \
oscillAnglesOfHKLs, \
rowNorm, \
unitRowVector
from hexrd import xrdutil
from hexrd.crystallography import PlaneData
from hexrd import constants as ct
from hexrd.rotations import angleAxisOfRotMat, RotMatEuler
from hexrd import distortion as distortion_pkg
from hexrd.utils.compatibility import h5py_read_string
from hexrd.utils.concurrent import distribute_tasks
from hexrd.utils.decorators import memoize
from hexrd.valunits import valWUnit
from hexrd.wppf import LeBail
from skimage.draw import polygon
from skimage.util import random_noise
from hexrd.wppf import wppfsupport
try:
from fast_histogram import histogram1d
fast_histogram = True
except(ImportError):
from numpy import histogram as histogram1d
fast_histogram = False
if ct.USE_NUMBA:
import numba
# =============================================================================
# PARAMETERS
# =============================================================================
instrument_name_DFLT = 'instrument'
beam_energy_DFLT = 65.351
beam_vec_DFLT = ct.beam_vec
eta_vec_DFLT = ct.eta_vec
panel_id_DFLT = 'generic'
nrows_DFLT = 2048
ncols_DFLT = 2048
pixel_size_DFLT = (0.2, 0.2)
tilt_params_DFLT = np.zeros(3)
t_vec_d_DFLT = np.r_[0., 0., -1000.]
chi_DFLT = 0.
t_vec_s_DFLT = np.zeros(3)
max_workers_DFLT = max(1, os.cpu_count() - 1)
"""
Calibration parameter flags
for instrument level, len is 7
[beam energy,
beam azimuth,
beam elevation,
chi,
tvec[0],
tvec[1],
tvec[2],
]
"""
instr_calibration_flags_DFLT = np.zeros(7, dtype=bool)
"""
for each panel, order is:
[tilt[0],
tilt[1],
tilt[2],
tvec[0],
tvec[1],
tvec[2],
<dparams>,
]
len is 6 + len(dparams) for each panel
by default, dparams are not set for refinement
"""
panel_calibration_flags_DFLT = np.array(
[1, 1, 1, 1, 1, 1],
dtype=bool
)
buffer_key = 'buffer'
distortion_key = 'distortion'
# =============================================================================
# UTILITY METHODS
# =============================================================================
def _fix_indices(idx, lo, hi):
nidx = np.array(idx)
off_lo = nidx < lo
off_hi = nidx > hi
nidx[off_lo] = lo
nidx[off_hi] = hi
return nidx
def calc_beam_vec(azim, pola):
"""
Calculate unit beam propagation vector from
spherical coordinate spec in DEGREES.
...MAY CHANGE; THIS IS ALSO LOCATED IN XRDUTIL!
"""
tht = np.radians(azim)
phi = np.radians(pola)
bv = np.r_[
np.sin(phi)*np.cos(tht),
np.cos(phi),
np.sin(phi)*np.sin(tht)]
return -bv
def calc_angles_from_beam_vec(bvec):
"""
Return the azimuth and polar angle from a beam
vector
"""
bvec = np.atleast_1d(bvec).flatten()
nvec = unitRowVector(-bvec)
azim = float(
np.degrees(np.arctan2(nvec[2], nvec[0]))
)
pola = float(np.degrees(np.arccos(nvec[1])))
return azim, pola
def migrate_instrument_config(instrument_config):
"""utility function to generate old instrument config dictionary"""
cfg_list = []
for detector_id in instrument_config['detectors']:
cfg_list.append(
dict(
detector=instrument_config['detectors'][detector_id],
oscillation_stage=instrument_config['oscillation_stage'],
)
)
return cfg_list
def angle_in_range(angle, ranges, ccw=True, units='degrees'):
"""
Return the index of the first wedge the angle is found in
WARNING: always clockwise; assumes wedges are not overlapping
"""
tau = 360.
if units.lower() == 'radians':
tau = 2*np.pi
w = np.nan
for i, wedge in enumerate(ranges):
amin = wedge[0]
amax = wedge[1]
check = amin + np.mod(angle - amin, tau)
if check < amax:
w = i
break
return w
# ???: move to gridutil?
def centers_of_edge_vec(edges):
assert np.r_[edges].ndim == 1, "edges must be 1-d"
return np.average(np.vstack([edges[:-1], edges[1:]]), axis=0)
def max_tth(instr):
"""
Return the maximum Bragg angle (in radians) subtended by the instrument.
Parameters
----------
instr : hexrd.instrument.HEDMInstrument instance
the instrument class to evalutate.
Returns
-------
tth_max : float
The maximum observable Bragg angle by the instrument in radians.
"""
tth_max = 0.
for det in instr.detectors.values():
ptth, peta = det.pixel_angles()
tth_max = max(np.max(ptth), tth_max)
return tth_max
def pixel_resolution(instr):
"""
Return the minimum, median, and maximum angular
resolution of the instrument.
Parameters
----------
instr : HEDMInstrument instance
An instrument.
Returns
-------
tth_stats : float
min/median/max tth resolution in radians.
eta_stats : TYPE
min/median/max eta resolution in radians.
"""
max_tth = np.inf
max_eta = np.inf
min_tth = -np.inf
min_eta = -np.inf
ang_ps_full = []
for panel in instr.detectors.values():
angps = panel.angularPixelSize(
np.stack(
panel.pixel_coords,
axis=0
).reshape(2, np.cumprod(panel.shape)[-1]).T
)
ang_ps_full.append(angps)
max_tth = min(max_tth, np.min(angps[:, 0]))
max_eta = min(max_eta, np.min(angps[:, 1]))
min_tth = max(min_tth, np.max(angps[:, 0]))
min_eta = max(min_eta, np.max(angps[:, 1]))
pass
med_tth, med_eta = np.median(np.vstack(ang_ps_full), axis=0).flatten()
return (min_tth, med_tth, max_tth), (min_eta, med_eta, max_eta)
def max_resolution(instr):
"""
Return the maximum angular resolution of the instrument.
Parameters
----------
instr : HEDMInstrument instance
An instrument.
Returns
-------
max_tth : float
Maximum tth resolution in radians.
max_eta : TYPE
maximum eta resolution in radians.
"""
max_tth = np.inf
max_eta = np.inf
for panel in instr.detectors.values():
angps = panel.angularPixelSize(
np.stack(
panel.pixel_coords,
axis=0
).reshape(2, np.cumprod(panel.shape)[-1]).T
)
max_tth = min(max_tth, np.min(angps[:, 0]))
max_eta = min(max_eta, np.min(angps[:, 1]))
return max_tth, max_eta
def _gaussian_dist(x, cen, fwhm):
sigm = fwhm/(2*np.sqrt(2*np.log(2)))
return np.exp(-0.5*(x - cen)**2/sigm**2)
def _sigma_to_fwhm(sigm):
return sigm*ct.sigma_to_fwhm
def _fwhm_to_sigma(fwhm):
return fwhm/ct.sigma_to_fwhm
# FIXME find a better place for this, and maybe include loop over pixels
if ct.USE_NUMBA:
@numba.njit(nogil=True, cache=True)
def _solid_angle_of_triangle(vtx_list):
norms = np.sqrt(np.sum(vtx_list*vtx_list, axis=1))
norms_prod = norms[0] * norms[1] * norms[2]
scalar_triple_product = np.dot(vtx_list[0],
np.cross(vtx_list[2], vtx_list[1]))
denominator = norms_prod \
+ norms[0]*np.dot(vtx_list[1], vtx_list[2]) \
+ norms[1]*np.dot(vtx_list[2], vtx_list[0]) \
+ norms[2]*np.dot(vtx_list[0], vtx_list[1])
return 2.*np.arctan2(scalar_triple_product, denominator)
else:
def _solid_angle_of_triangle(vtx_list):
norms = rowNorm(vtx_list)
norms_prod = np.cumprod(norms)[-1]
scalar_triple_product = np.dot(vtx_list[0],
np.cross(vtx_list[2], vtx_list[1]))
denominator = norms_prod \
+ norms[0]*np.dot(vtx_list[1], vtx_list[2]) \
+ norms[1]*np.dot(vtx_list[2], vtx_list[0]) \
+ norms[2]*np.dot(vtx_list[0], vtx_list[1])
return 2.*np.arctan2(scalar_triple_product, denominator)
# =============================================================================
# CLASSES
# =============================================================================
class HEDMInstrument(object):
"""
Abstraction of XRD instrument.
* Distortion needs to be moved to a class with registry; tuple unworkable
* where should reference eta be defined? currently set to default config
"""
def __init__(self, instrument_config=None,
image_series=None, eta_vector=None,
instrument_name=None, tilt_calibration_mapping=None,
max_workers=max_workers_DFLT):
self._id = instrument_name_DFLT
if eta_vector is None:
self._eta_vector = eta_vec_DFLT
else:
self._eta_vector = eta_vector
self.max_workers = max_workers
if instrument_config is None:
if instrument_name is not None:
self._id = instrument_name
self._num_panels = 1
self._beam_energy = beam_energy_DFLT
self._beam_vector = beam_vec_DFLT
self._detectors = dict(
panel_id_DFLT=PlanarDetector(
rows=nrows_DFLT, cols=ncols_DFLT,
pixel_size=pixel_size_DFLT,
tvec=t_vec_d_DFLT,
tilt=tilt_params_DFLT,
bvec=self._beam_vector,
evec=self._eta_vector,
distortion=None,
max_workers=self.max_workers),
)
self._tvec = t_vec_s_DFLT
self._chi = chi_DFLT
else:
if isinstance(instrument_config, h5py.File):
tmp = {}
unwrap_h5_to_dict(instrument_config, tmp)
instrument_config.close()
instrument_config = tmp['instrument']
elif not isinstance(instrument_config, dict):
raise RuntimeError(
"instrument_config must be either an HDF5 file object"
+ "or a dictionary. You gave a %s"
% type(instrument_config)
)
if instrument_name is None:
if 'id' in instrument_config:
self._id = instrument_config['id']
else:
self._id = instrument_name
self._num_panels = len(instrument_config['detectors'])
self._beam_energy = instrument_config['beam']['energy'] # keV
self._beam_vector = calc_beam_vec(
instrument_config['beam']['vector']['azimuth'],
instrument_config['beam']['vector']['polar_angle'],
)
# now build detector dict
detectors_config = instrument_config['detectors']
det_dict = dict.fromkeys(detectors_config)
for det_id, det_info in detectors_config.items():
pixel_info = det_info['pixels']
affine_info = det_info['transform']
try:
saturation_level = det_info['saturation_level']
except(KeyError):
saturation_level = 2**16
shape = (pixel_info['rows'], pixel_info['columns'])
panel_buffer = None
if buffer_key in det_info:
det_buffer = det_info[buffer_key]
if det_buffer is not None:
if isinstance(det_buffer, np.ndarray):
if det_buffer.ndim == 2:
assert det_buffer.shape == shape, \
"buffer shape must match detector"
else:
assert len(det_buffer) == 2
panel_buffer = det_buffer
elif isinstance(det_buffer, list):
panel_buffer = np.asarray(det_buffer)
elif np.isscalar(det_buffer):
panel_buffer = det_buffer*np.ones(2)
else:
raise RuntimeError(
"panel buffer spec invalid for %s" % det_id
)
# handle distortion
distortion = None
if distortion_key in det_info:
distortion_cfg = det_info[distortion_key]
if distortion_cfg is not None:
try:
func_name = distortion_cfg['function_name']
dparams = distortion_cfg['parameters']
distortion = distortion_pkg.get_mapping(
func_name, dparams
)
except(KeyError):
raise RuntimeError(
"problem with distortion specification"
)
det_dict[det_id] = PlanarDetector(
name=det_id,
rows=pixel_info['rows'],
cols=pixel_info['columns'],
pixel_size=pixel_info['size'],
panel_buffer=panel_buffer,
saturation_level=saturation_level,
tvec=affine_info['translation'],
tilt=affine_info['tilt'],
bvec=self._beam_vector,
evec=self._eta_vector,
distortion=distortion,
max_workers=self.max_workers)
self._detectors = det_dict
self._tvec = np.r_[
instrument_config['oscillation_stage']['translation']
]
self._chi = instrument_config['oscillation_stage']['chi']
#
# set up calibration parameter list and refinement flags
#
# first, grab the mapping function for tilt parameters if specified
if tilt_calibration_mapping is not None:
if not isinstance(tilt_calibration_mapping, RotMatEuler):
raise RuntimeError(
"tilt mapping must be a 'RotMatEuler' instance"
)
self._tilt_calibration_mapping = tilt_calibration_mapping
# grab angles from beam vec
# !!! these are in DEGREES!
azim, pola = calc_angles_from_beam_vec(self._beam_vector)
# stack instrument level parameters
# units: keV, degrees, mm
self._calibration_parameters = [
self._beam_energy,
azim,
pola,
np.degrees(self._chi),
*self._tvec,
]
self._calibration_flags = instr_calibration_flags_DFLT
# collect info from panels and append
det_params = []
det_flags = []
for detector in self._detectors.values():
this_det_params = detector.calibration_parameters
if self._tilt_calibration_mapping is not None:
rmat = makeRotMatOfExpMap(detector.tilt)
self._tilt_calibration_mapping.rmat = rmat
tilt = np.degrees(self._tilt_calibration_mapping.angles)
this_det_params[:3] = tilt
det_params.append(this_det_params)
det_flags.append(detector.calibration_flags)
det_params = np.hstack(det_params)
det_flags = np.hstack(det_flags)
# !!! hstack here assumes that calib params will be float and
# !!! flags will all be bool
self._calibration_parameters = np.hstack(
[self._calibration_parameters,
det_params]
).flatten()
self._calibration_flags = np.hstack(
[self._calibration_flags,
det_flags]
)
return
# properties for physical size of rectangular detector
@property
def id(self):
return self._id
@property
def num_panels(self):
return self._num_panels
@property
def detectors(self):
return self._detectors
@property
def detector_parameters(self):
pdict = {}
for key, panel in self.detectors.items():
pdict[key] = panel.config_dict(
self.chi, self.tvec,
beam_energy=self.beam_energy,
beam_vector=self.beam_vector
)
return pdict
@property
def tvec(self):
return self._tvec
@tvec.setter
def tvec(self, x):
x = np.array(x).flatten()
assert len(x) == 3, 'input must have length = 3'
self._tvec = x
@property
def chi(self):
return self._chi
@chi.setter
def chi(self, x):
self._chi = float(x)
@property
def beam_energy(self):
return self._beam_energy
@beam_energy.setter
def beam_energy(self, x):
self._beam_energy = float(x)
@property
def beam_wavelength(self):
return ct.keVToAngstrom(self.beam_energy)
@property
def beam_vector(self):
return self._beam_vector
@beam_vector.setter
def beam_vector(self, x):
x = np.array(x).flatten()
if len(x) == 3:
assert sum(x*x) > 1-ct.sqrt_epsf, \
'input must have length = 3 and have unit magnitude'
self._beam_vector = x
elif len(x) == 2:
self._beam_vector = calc_beam_vec(*x)
else:
raise RuntimeError("input must be a unit vector or angle pair")
# ...maybe change dictionary item behavior for 3.x compatibility?
for detector_id in self.detectors:
panel = self.detectors[detector_id]
panel.bvec = self._beam_vector
@property
def eta_vector(self):
return self._eta_vector
@eta_vector.setter
def eta_vector(self, x):
x = np.array(x).flatten()
assert len(x) == 3 and sum(x*x) > 1-ct.sqrt_epsf, \
'input must have length = 3 and have unit magnitude'
self._eta_vector = x
# ...maybe change dictionary item behavior for 3.x compatibility?
for detector_id in self.detectors:
panel = self.detectors[detector_id]
panel.evec = self._eta_vector
@property
def tilt_calibration_mapping(self):
return self._tilt_calibration_mapping
@tilt_calibration_mapping.setter
def tilt_calibration_mapping(self, x):
if not isinstance(x, RotMatEuler) and x is not None:
raise RuntimeError(
"tilt mapping must be None or a 'RotMatEuler' instance"
)
self._tilt_calibration_mapping = x
@property
def calibration_parameters(self):
"""
Yields concatenated list of instrument parameters.
Returns
-------
array_like
concatenated list of instrument parameters.
"""
# grab angles from beam vec
# !!! these are in DEGREES!
azim, pola = calc_angles_from_beam_vec(self.beam_vector)
# stack instrument level parameters
# units: keV, degrees, mm
calibration_parameters = [
self.beam_energy,
azim,
pola,
np.degrees(self.chi),
*self.tvec,
]
# collect info from panels and append
det_params = []
det_flags = []
for detector in self.detectors.values():
this_det_params = detector.calibration_parameters
if self.tilt_calibration_mapping is not None:
rmat = makeRotMatOfExpMap(detector.tilt)
self.tilt_calibration_mapping.rmat = rmat
tilt = np.degrees(self.tilt_calibration_mapping.angles)
this_det_params[:3] = tilt
det_params.append(this_det_params)
det_flags.append(detector.calibration_flags)
det_params = np.hstack(det_params)
det_flags = np.hstack(det_flags)
# !!! hstack here assumes that calib params will be float and
# !!! flags will all be bool
calibration_parameters = np.hstack(
[calibration_parameters,
det_params]
).flatten()
self._calibration_parameters = calibration_parameters
return self._calibration_parameters
@property
def calibration_flags(self):
return self._calibration_flags
@calibration_flags.setter
def calibration_flags(self, x):
x = np.array(x, dtype=bool).flatten()
if len(x) != len(self._calibration_flags):
raise RuntimeError(
"length of parameter list must be %d; you gave %d"
% (len(self._calibration_flags), len(x))
)
ii = 7
for panel in self.detectors.values():
npp = 6
if panel.distortion is not None:
npp += len(panel.distortion.params)
panel.calibration_flags = x[ii:ii + npp]
self._calibration_flags = x
# =========================================================================
# METHODS
# =========================================================================
def write_config(self, filename=None, style='yaml', calibration_dict={}):
""" WRITE OUT YAML FILE """
# initialize output dictionary
assert style.lower() in ['yaml', 'hdf5'], \
"style must be either 'yaml', or 'hdf5'; you gave '%s'" % style
par_dict = {}
par_dict['id'] = self.id
azim, pola = calc_angles_from_beam_vec(self.beam_vector)
beam = dict(
energy=self.beam_energy,
vector=dict(
azimuth=azim,
polar_angle=pola,
)
)
par_dict['beam'] = beam
if calibration_dict:
par_dict['calibration_crystal'] = calibration_dict
ostage = dict(
chi=self.chi,
translation=self.tvec.tolist()
)
par_dict['oscillation_stage'] = ostage
det_dict = dict.fromkeys(self.detectors)
for det_name, detector in self.detectors.items():
# grab panel config
# !!! don't need beam or tvec
# !!! have vetted style
pdict = detector.config_dict(chi=self.chi, tvec=self.tvec,
beam_energy=self.beam_energy,
beam_vector=self.beam_vector,
style=style)
det_dict[det_name] = pdict['detector']
par_dict['detectors'] = det_dict
# handle output file if requested
if filename is not None:
if style.lower() == 'yaml':
with open(filename, 'w') as f:
yaml.dump(par_dict, stream=f)
else:
# hdf5
with h5py.File(filename, 'w') as f:
instr_grp = f.create_group('instrument')
unwrap_dict_to_h5(instr_grp, par_dict, asattr=False)
return par_dict
def update_from_parameter_list(self, p):
"""
Update the instrument class from a parameter list.
Utility function to update instrument parameters from a 1-d master
parameter list (e.g. as used in calibration)
!!! Note that angles are reported in DEGREES!
"""
self.beam_energy = p[0]
self.beam_vector = calc_beam_vec(p[1], p[2])
self.chi = np.radians(p[3])
self.tvec = np.r_[p[4:7]]
ii = 7
for det_name, detector in self.detectors.items():
this_det_params = detector.calibration_parameters
npd = len(this_det_params) # total number of params
dpnp = npd - 6 # number of distortion params
# first do tilt
tilt = np.r_[p[ii:ii + 3]]
if self.tilt_calibration_mapping is not None:
self.tilt_calibration_mapping.angles = np.radians(tilt)
rmat = self.tilt_calibration_mapping.rmat
phi, n = angleAxisOfRotMat(rmat)
tilt = phi*n.flatten()
detector.tilt = tilt
# then do translation
ii += 3
detector.tvec = np.r_[p[ii:ii + 3]]
# then do distortion (if necessart)
# FIXME will need to update this with distortion fix
ii += 3
if dpnp > 0:
if detector.distortion is None:
raise RuntimeError(
"distortion discrepancy for '%s'!"
% det_name
)
else:
try:
detector.distortion.params = p[ii:ii + dpnp]
except(AssertionError):
raise RuntimeError(
"distortion for '%s' " % det_name
+ "expects %d params but got %d"
% (len(detector.distortion.params), dpnp)
)
ii += dpnp
return
def extract_polar_maps(self, plane_data, imgser_dict,
active_hkls=None, threshold=None,
tth_tol=None, eta_tol=0.25):
"""
Extract eta-omega maps from an imageseries.
Quick and dirty way to histogram angular patch data for make
pole figures suitable for fiber generation
TODO: streamline projection code
TODO: normalization
!!!: images must be non-negative!
"""
if tth_tol is not None:
plane_data.tThWidth = np.radians(tth_tol)
else:
tth_tol = np.degrees(plane_data.tThWidth)
tth_ranges = plane_data.getTThRanges()
if active_hkls is not None:
assert hasattr(active_hkls, '__len__'), \
"active_hkls must be an iterable with __len__"
tth_ranges = tth_ranges[active_hkls]
# # need this for making eta ranges
# eta_tol_vec = 0.5*np.radians([-eta_tol, eta_tol])
# make rings clipped to panel
# !!! eta_idx has the same length as plane_data.exclusions
# each entry are the integer indices into the bins
# !!! eta_edges is the list of eta bin EDGES
# We can use the same eta_edge for all detectors, so calculate it once
pow_angs, pow_xys, eta_idx, eta_edges = list(
self.detectors.values()
)[0].make_powder_rings(plane_data,
merge_hkls=False, delta_eta=eta_tol,
full_output=True)
delta_eta = eta_edges[1] - eta_edges[0]
ncols_eta = len(eta_edges) - 1
ring_maps_panel = dict.fromkeys(self.detectors)
for i_d, det_key in enumerate(self.detectors):
print("working on detector '%s'..." % det_key)
# grab panel
panel = self.detectors[det_key]
# native_area = panel.pixel_area # pixel ref area
# pixel angular coords for the detector panel
ptth, peta = panel.pixel_angles()
# grab omegas from imageseries and squawk if missing
try:
omegas = imgser_dict[det_key].metadata['omega']
except(KeyError):
msg = "imageseries for '%s' has no omega info" % det_key
raise RuntimeError(msg)
# initialize maps and assing by row (omega/frame)
nrows_ome = len(omegas)
# init map with NaNs
shape = (len(tth_ranges), nrows_ome, ncols_eta)
ring_maps = np.full(shape, np.nan)
# Generate ring parameters once, and re-use them for each image
ring_params = []
for tthr in tth_ranges:
kwargs = {
'tthr': tthr,
'ptth': ptth,
'peta': peta,
'eta_edges': eta_edges,
'delta_eta': delta_eta,
}
ring_params.append(_generate_ring_params(**kwargs))
# Divide up the images among processes
ims = imgser_dict[det_key]
tasks = distribute_tasks(len(ims), self.max_workers)
func = partial(_run_histograms, ims=ims, tth_ranges=tth_ranges,
ring_maps=ring_maps, ring_params=ring_params,
threshold=threshold)
with ThreadPoolExecutor(max_workers=self.max_workers) as executor:
executor.map(func, tasks)
ring_maps_panel[det_key] = ring_maps
return ring_maps_panel, eta_edges
def extract_line_positions(self, plane_data, imgser_dict,
tth_tol=None, eta_tol=1., npdiv=2,
eta_centers=None,
collapse_eta=True, collapse_tth=False,
do_interpolation=True):
"""
Perform annular interpolation on diffraction images.
Provides data for extracting the line positions from powder diffraction
images, pole figure patches from imageseries, or Bragg peaks from
Laue diffraction images.
Parameters
----------
plane_data : hexrd.crystallography.PlaneData object or array_like
Object determining the 2theta positions for the integration
sectors. If PlaneData, this will be all non-excluded reflections,
subject to merging within PlaneData.tThWidth. If array_like,
interpreted as a list of 2theta angles IN RADIAN (this may change).
imgser_dict : dict
Dictionary of powder diffraction images, one for each detector.
tth_tol : scalar, optional
The radial (i.e. 2theta) width of the integration sectors
IN DEGREES. This arg is required if plane_data is array_like.
The default is None.
eta_tol : scalar, optional
The azimuthal (i.e. eta) width of the integration sectors
IN DEGREES. The default is 1.
npdiv : int, optional
The number of oversampling pixel subdivision (see notes).
The default is 2.
eta_centers : array_like, optional
The desired azimuthal sector centers. The default is None. If
None, then bins are distrubted sequentially from (-180, 180).
collapse_eta : bool, optional
Flag for summing sectors in eta. The default is True.
collapse_tth : bool, optional
Flag for summing sectors in 2theta. The default is False.
do_interpolation : bool, optional
If True, perform bilinear interpolation. The default is True.
Raises
------
RuntimeError
DESCRIPTION.
Returns
-------
panel_data : dict
Dictionary over the detctors with the following structure:
[list over (merged) 2theta ranges]
[list over valid eta sectors]
[angle data <input dependent>,
bin intensities <input dependent>]
Notes
-----
TODO: May change the array_like input units to degrees.
TODO: rename function.
"""
if not hasattr(plane_data, '__len__'):
plane_data = plane_data.makeNew() # make local copy to munge
if tth_tol is not None:
plane_data.tThWidth = np.radians(tth_tol)
tth_ranges = np.degrees(plane_data.getMergedRanges()[1])
tth_tols = np.hstack([i[1] - i[0] for i in tth_ranges])
else:
tth_tols = np.ones(len(plane_data))*tth_tol
# =====================================================================
# LOOP OVER DETECTORS
# =====================================================================
panel_data = dict.fromkeys(self.detectors)
for i_det, detector_id in enumerate(self.detectors):
print("working on detector '%s'..." % detector_id)
# pbar.update(i_det + 1)
# grab panel
panel = self.detectors[detector_id]
instr_cfg = panel.config_dict(
chi=self.chi, tvec=self.tvec,
beam_energy=self.beam_energy,
beam_vector=self.beam_vector
)
native_area = panel.pixel_area # pixel ref area
images = imgser_dict[detector_id]
if images.ndim == 2:
n_images = 1
images = np.tile(images, (1, 1, 1))
elif images.ndim == 3:
n_images = len(images)
else:
raise RuntimeError("images must be 2- or 3-d")
# make rings
pow_angs, pow_xys = panel.make_powder_rings(
plane_data, merge_hkls=True,
delta_tth=tth_tol, delta_eta=eta_tol,
eta_list=eta_centers)
# =================================================================
# LOOP OVER RING SETS
# =================================================================
ring_data = []
for i_ring, these_data in enumerate(zip(pow_angs, pow_xys)):
print("interpolating 2theta bin %d..." % i_ring)
# points are already checked to fall on detector
angs = these_data[0]
xys = these_data[1]
# make the tth,eta patches for interpolation
patches = xrdutil.make_reflection_patches(
instr_cfg, angs, panel.angularPixelSize(xys),
tth_tol=tth_tols[i_ring], eta_tol=eta_tol,
npdiv=npdiv, quiet=True)
# loop over patches
# FIXME: fix initialization
if collapse_tth:
patch_data = np.zeros((len(angs), n_images))
else:
patch_data = []
for i_p, patch in enumerate(patches):
# strip relevant objects out of current patch
vtx_angs, vtx_xys, conn, areas, xys_eval, ijs = patch
# need to reshape eval pts for interpolation
xy_eval = np.vstack([
xys_eval[0].flatten(),
xys_eval[1].flatten()]).T
_, on_panel = panel.clip_to_panel(xy_eval)
if np.any(~on_panel):
continue
if collapse_tth:
ang_data = (vtx_angs[0][0, [0, -1]],
vtx_angs[1][[0, -1], 0])
elif collapse_eta:
# !!! yield the tth bin centers
tth_centers = np.average(
np.vstack([vtx_angs[0][0, :-1], vtx_angs[0][0, 1:]]),
axis=0
)
ang_data = (tth_centers,
angs[i_p][-1])
else:
ang_data = vtx_angs
prows, pcols = areas.shape
area_fac = areas/float(native_area)
# interpolate
if not collapse_tth:
ims_data = []
for j_p in np.arange(len(images)):
# catch interpolation type
image = images[j_p]
if do_interpolation:
tmp = panel.interpolate_bilinear(
xy_eval,
image,
).reshape(prows, pcols)*area_fac
else:
tmp = image[ijs[0], ijs[1]]*area_fac
# catch collapsing options
if collapse_tth:
patch_data[i_p, j_p] = np.average(tmp)
# ims_data.append(np.sum(tmp))
else:
if collapse_eta:
ims_data.append(np.average(tmp, axis=0))
else:
ims_data.append(tmp)
pass # close image loop
if not collapse_tth:
patch_data.append((ang_data, ims_data))
pass # close patch loop
ring_data.append(patch_data)
pass # close ring loop
panel_data[detector_id] = ring_data
pass # close panel loop
# pbar.finish()
return panel_data
def simulate_powder_pattern(self,
mat_list,
params=None,
bkgmethod=None,
origin=None,
noise=None):
"""
Generate powder diffraction iamges from specified materials.
Parameters
----------
mat_list : array_like (n, )
List of Material classes.
params : dict, optional
Dictionary of LeBail parameters (see Notes). The default is None.
bkgmethod : dict, optional
Background function specification. The default is None.
origin : array_like (3,), optional
Vector describing the origin of the diffrction volume.
The default is None, wiich is equivalent to [0, 0, 0].
noise : str, optional
Flag describing type of noise to be applied. The default is None.
Returns
-------
img_dict : dict
Dictionary of diffraciton images over the detectors.
Notes
-----
TODO: add more controls for noise function.
TODO: modify hooks to LeBail parameters.
TODO: add optional volume fraction weights for phases in mat_list
"""
"""
>> @AUTHOR: Saransh Singh, Lanwrence Livermore National Lab,
[email protected]
>> @DATE: 01/22/2021 SS 1.0 original
>> @DETAILS: adding hook to WPPF class. this changes the input list
significantly
"""
if origin is None:
origin = self.tvec
origin = np.asarray(origin).squeeze()
assert len(origin) == 3, \
"origin must be a 3-element sequence"
'''
if params is none, fill in some sane default values
only the first value is used. the rest of the values are
the upper, lower bounds and vary flag for refinement which
are not used but required for interfacing with WPPF
zero_error : zero shift error
U, V, W : Cagliotti parameters
P, X, Y : Lorentzian parameters
eta1, eta2, eta3 : Mixing parameters
'''
if(params is None):
# params = {'zero_error': [0.0, -1., 1., True],
# 'U': [2e-1, -1., 1., True],
# 'V': [2e-2, -1., 1., True],
# 'W': [2e-2, -1., 1., True],
# 'X': [2e-1, -1., 1., True],
# 'Y': [2e-1, -1., 1., True]
# }
params = wppfsupport._generate_default_parameters_LeBail(
mat_list,
1)
'''
use the material list to obtain the dictionary of initial intensities
we need to make sure that the intensities are properly scaled by the
lorentz polarization factor. since the calculation is done in the
LeBail class, all that means is the initial intensity needs that factor
in there
'''
img_dict = dict.fromkeys(self.detectors)
# find min and max tth over all panels
tth_mi = np.inf
tth_ma = 0.
ptth_dict = dict.fromkeys(self.detectors)
for det_key, panel in self.detectors.items():
ptth, peta = panel.pixel_angles(origin=origin)
tth_mi = min(tth_mi, ptth.min())
tth_ma = max(tth_ma, ptth.max())
ptth_dict[det_key] = ptth
'''
now make a list of two theta and dummy ones for the experimental
spectrum this is never really used so any values should be okay. We
could also pas the integrated detector image if we would like to
simulate some realistic background. But thats for another day.
'''
# convert angles to degrees because thats what the WPPF expects
tth_mi = np.degrees(tth_mi)
tth_ma = np.degrees(tth_ma)
# get tth angular resolution for instrument
ang_res = max_resolution(self)
# !!! calc nsteps by oversampling
nsteps = int(np.ceil(2*(tth_ma - tth_mi)/np.degrees(ang_res[0])))
# evaulation vector for LeBail
tth = np.linspace(tth_mi, tth_ma, nsteps)
expt = np.vstack([tth, np.ones_like(tth)]).T
wavelength = [
valWUnit('lp', 'length', self.beam_wavelength, 'angstrom'),
1.
]
'''
now go through the material list and get the intensity dictionary
'''
intensity = {}
for mat in mat_list:
multiplicity = mat.planeData.getMultiplicity()
tth = mat.planeData.getTTh()
LP = (1 + np.cos(tth)**2) / \
np.cos(0.5*tth)/np.sin(0.5*tth)**2
intensity[mat.name] = {}
intensity[mat.name]['synchrotron'] = \
mat.planeData.get_structFact() * LP * multiplicity
kwargs = {
'expt_spectrum': expt,
'params': params,
'phases': mat_list,
'wavelength': {
'synchrotron': wavelength
},
'bkgmethod': bkgmethod,
'intensity_init': intensity,
'peakshape': 'pvtch'
}
self.WPPFclass = LeBail(**kwargs)
self.simulated_spectrum = self.WPPFclass.spectrum_sim
self.background = self.WPPFclass.background
'''
now that we have the simulated intensities, its time to get the
two theta for the detector pixels and interpolate what the intensity
for each pixel should be
'''
img_dict = dict.fromkeys(self.detectors)
for det_key, panel in self.detectors.items():
ptth = ptth_dict[det_key]
img = np.interp(np.degrees(ptth),
self.simulated_spectrum.x,
self.simulated_spectrum.y + self.background.y)
# normalize everything to 0-1
mi = img.min()
ma = img.max()
if(ma > mi):
img = (img - mi) / (ma - mi)
if(noise is None):
img_dict[det_key] = img
else:
if(noise.lower() == 'poisson'):
im_noise = random_noise(img,
mode='poisson',
clip=True)
mi = im_noise.min()
ma = im_noise.max()
if(ma > mi):
im_noise = (im_noise - mi)/(ma - mi)
img_dict[det_key] = im_noise
elif(noise.lower() == 'gaussian'):
img_dict[det_key] = random_noise(img,
mode='gaussian',
clip=True)
elif(noise.lower() == 'salt'):
img_dict[det_key] = random_noise(img, mode='salt')
elif(noise.lower() == 'pepper'):
img_dict[det_key] = random_noise(img, mode='pepper')
elif(noise.lower() == 's&p'):
img_dict[det_key] = random_noise(img, mode='s&p')
elif(noise.lower() == 'speckle'):
img_dict[det_key] = random_noise(img,
mode='speckle',
clip=True)
return img_dict
def simulate_laue_pattern(self, crystal_data,
minEnergy=5., maxEnergy=35.,
rmat_s=None, grain_params=None):
"""
Simulate Laue diffraction over the instrument.
Parameters
----------
crystal_data : TYPE
DESCRIPTION.
minEnergy : TYPE, optional
DESCRIPTION. The default is 5..
maxEnergy : TYPE, optional
DESCRIPTION. The default is 35..
rmat_s : TYPE, optional
DESCRIPTION. The default is None.
grain_params : TYPE, optional
DESCRIPTION. The default is None.
Returns
-------
results : TYPE
DESCRIPTION.
TODO: revisit output; dict, or concatenated list?
"""
results = dict.fromkeys(self.detectors)
for det_key, panel in self.detectors.items():
results[det_key] = panel.simulate_laue_pattern(
crystal_data,
minEnergy=minEnergy, maxEnergy=maxEnergy,
rmat_s=rmat_s, tvec_s=self.tvec,
grain_params=grain_params,
beam_vec=self.beam_vector)
return results
def simulate_rotation_series(self, plane_data, grain_param_list,
eta_ranges=[(-np.pi, np.pi), ],
ome_ranges=[(-np.pi, np.pi), ],
ome_period=(-np.pi, np.pi),
wavelength=None):
"""
Simulate a monochromatic rotation series over the instrument.
Parameters
----------
plane_data : TYPE
DESCRIPTION.
grain_param_list : TYPE
DESCRIPTION.
eta_ranges : TYPE, optional
DESCRIPTION. The default is [(-np.pi, np.pi), ].
ome_ranges : TYPE, optional
DESCRIPTION. The default is [(-np.pi, np.pi), ].
ome_period : TYPE, optional
DESCRIPTION. The default is (-np.pi, np.pi).
wavelength : TYPE, optional
DESCRIPTION. The default is None.
Returns
-------
results : TYPE
DESCRIPTION.
TODO: revisit output; dict, or concatenated list?
"""
results = dict.fromkeys(self.detectors)
for det_key, panel in self.detectors.items():
results[det_key] = panel.simulate_rotation_series(
plane_data, grain_param_list,
eta_ranges=eta_ranges,
ome_ranges=ome_ranges,
ome_period=ome_period,
chi=self.chi, tVec_s=self.tvec,
wavelength=wavelength)
return results
def pull_spots(self, plane_data, grain_params,
imgser_dict,
tth_tol=0.25, eta_tol=1., ome_tol=1.,
npdiv=2, threshold=10,
eta_ranges=[(-np.pi, np.pi), ],
ome_period=(-np.pi, np.pi),
dirname='results', filename=None, output_format='text',
return_spot_list=False,
quiet=True, check_only=False,
interp='nearest'):
"""
Exctract reflection info from a rotation series.
Input must be encoded as an OmegaImageseries object.
Parameters
----------
plane_data : TYPE
DESCRIPTION.
grain_params : TYPE
DESCRIPTION.
imgser_dict : TYPE
DESCRIPTION.
tth_tol : TYPE, optional
DESCRIPTION. The default is 0.25.
eta_tol : TYPE, optional
DESCRIPTION. The default is 1..
ome_tol : TYPE, optional
DESCRIPTION. The default is 1..
npdiv : TYPE, optional
DESCRIPTION. The default is 2.
threshold : TYPE, optional
DESCRIPTION. The default is 10.
eta_ranges : TYPE, optional
DESCRIPTION. The default is [(-np.pi, np.pi), ].
ome_period : TYPE, optional
DESCRIPTION. The default is (-np.pi, np.pi).
dirname : TYPE, optional
DESCRIPTION. The default is 'results'.
filename : TYPE, optional
DESCRIPTION. The default is None.
output_format : TYPE, optional
DESCRIPTION. The default is 'text'.
return_spot_list : TYPE, optional
DESCRIPTION. The default is False.
quiet : TYPE, optional
DESCRIPTION. The default is True.
check_only : TYPE, optional
DESCRIPTION. The default is False.
interp : TYPE, optional
DESCRIPTION. The default is 'nearest'.
Returns
-------
compl : TYPE
DESCRIPTION.
output : TYPE
DESCRIPTION.
"""
# grain parameters
rMat_c = makeRotMatOfExpMap(grain_params[:3])
tVec_c = grain_params[3:6]
# grab omega ranges from first imageseries
#
# WARNING: all imageseries AND all wedges within are assumed to have
# the same omega values; put in a check that they are all the same???
oims0 = next(iter(imgser_dict.values()))
ome_ranges = [np.radians([i['ostart'], i['ostop']])
for i in oims0.omegawedges.wedges]
# delta omega in DEGREES grabbed from first imageseries in the dict
delta_ome = oims0.omega[0, 1] - oims0.omega[0, 0]
# make omega grid for frame expansion around reference frame
# in DEGREES
ndiv_ome, ome_del = make_tolerance_grid(
delta_ome, ome_tol, 1, adjust_window=True,
)
# generate structuring element for connected component labeling
if ndiv_ome == 1:
label_struct = ndimage.generate_binary_structure(2, 2)
else:
label_struct = ndimage.generate_binary_structure(3, 3)
# simulate rotation series
sim_results = self.simulate_rotation_series(
plane_data, [grain_params, ],
eta_ranges=eta_ranges,
ome_ranges=ome_ranges,
ome_period=ome_period)
# patch vertex generator (global for instrument)
tol_vec = 0.5*np.radians(
[-tth_tol, -eta_tol,
-tth_tol, eta_tol,
tth_tol, eta_tol,
tth_tol, -eta_tol])
# prepare output if requested
if filename is not None and output_format.lower() == 'hdf5':
this_filename = os.path.join(dirname, filename)
writer = GrainDataWriter_h5(
os.path.join(dirname, filename),
self.write_config(), grain_params)
# =====================================================================
# LOOP OVER PANELS
# =====================================================================
iRefl = 0
compl = []
output = dict.fromkeys(self.detectors)
for detector_id in self.detectors:
# initialize text-based output writer
if filename is not None and output_format.lower() == 'text':
output_dir = os.path.join(
dirname, detector_id
)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
this_filename = os.path.join(
output_dir, filename
)
writer = PatchDataWriter(this_filename)
# grab panel
panel = self.detectors[detector_id]
instr_cfg = panel.config_dict(
self.chi, self.tvec,
beam_energy=self.beam_energy,
beam_vector=self.beam_vector
)
native_area = panel.pixel_area # pixel ref area
# pull out the OmegaImageSeries for this panel from input dict
ome_imgser = imgser_dict[detector_id]
# extract simulation results
sim_results_p = sim_results[detector_id]
hkl_ids = sim_results_p[0][0]
hkls_p = sim_results_p[1][0]
ang_centers = sim_results_p[2][0]
xy_centers = sim_results_p[3][0]
ang_pixel_size = sim_results_p[4][0]
# now verify that full patch falls on detector...
# ???: strictly necessary?
#
# patch vertex array from sim
nangs = len(ang_centers)
patch_vertices = (
np.tile(ang_centers[:, :2], (1, 4)) +
np.tile(tol_vec, (nangs, 1))
).reshape(4*nangs, 2)
ome_dupl = np.tile(
ang_centers[:, 2], (4, 1)
).T.reshape(len(patch_vertices), 1)
# find vertices that all fall on the panel
det_xy, rmats_s, on_plane = xrdutil._project_on_detector_plane(
np.hstack([patch_vertices, ome_dupl]),
panel.rmat, rMat_c, self.chi,
panel.tvec, tVec_c, self.tvec,
panel.distortion)
_, on_panel = panel.clip_to_panel(det_xy, buffer_edges=True)
# all vertices must be on...
patch_is_on = np.all(on_panel.reshape(nangs, 4), axis=1)
patch_xys = det_xy.reshape(nangs, 4, 2)[patch_is_on]
# re-filter...
hkl_ids = hkl_ids[patch_is_on]
hkls_p = hkls_p[patch_is_on, :]
ang_centers = ang_centers[patch_is_on, :]
xy_centers = xy_centers[patch_is_on, :]
ang_pixel_size = ang_pixel_size[patch_is_on, :]
# TODO: add polygon testing right here!
# done <JVB 06/21/16>
if check_only:
patch_output = []
for i_pt, angs in enumerate(ang_centers):
# the evaluation omegas;
# expand about the central value using tol vector
ome_eval = np.degrees(angs[2]) + ome_del
# ...vectorize the omega_to_frame function to avoid loop?
frame_indices = [
ome_imgser.omega_to_frame(ome)[0] for ome in ome_eval
]
if -1 in frame_indices:
if not quiet:
msg = """
window for (%d%d%d) falls outside omega range
""" % tuple(hkls_p[i_pt, :])
print(msg)
continue
else:
these_vertices = patch_xys[i_pt]
ijs = panel.cartToPixel(these_vertices)
ii, jj = polygon(ijs[:, 0], ijs[:, 1])
contains_signal = False
for i_frame in frame_indices:
contains_signal = contains_signal or np.any(
ome_imgser[i_frame][ii, jj] > threshold
)
compl.append(contains_signal)
patch_output.append((ii, jj, frame_indices))
else:
# make the tth,eta patches for interpolation
patches = xrdutil.make_reflection_patches(
instr_cfg,
ang_centers[:, :2], ang_pixel_size,
omega=ang_centers[:, 2],
tth_tol=tth_tol, eta_tol=eta_tol,
rmat_c=rMat_c, tvec_c=tVec_c,
npdiv=npdiv, quiet=True)
# GRAND LOOP over reflections for this panel
patch_output = []
for i_pt, patch in enumerate(patches):
# strip relevant objects out of current patch
vtx_angs, vtx_xy, conn, areas, xy_eval, ijs = patch
prows, pcols = areas.shape
nrm_fac = areas/float(native_area)
nrm_fac = nrm_fac / np.min(nrm_fac)
# grab hkl info
hkl = hkls_p[i_pt, :]
hkl_id = hkl_ids[i_pt]
# edge arrays
tth_edges = vtx_angs[0][0, :]
delta_tth = tth_edges[1] - tth_edges[0]
eta_edges = vtx_angs[1][:, 0]
delta_eta = eta_edges[1] - eta_edges[0]
# need to reshape eval pts for interpolation
xy_eval = np.vstack([xy_eval[0].flatten(),
xy_eval[1].flatten()]).T
# the evaluation omegas;
# expand about the central value using tol vector
ome_eval = np.degrees(ang_centers[i_pt, 2]) + ome_del
# ???: vectorize the omega_to_frame function to avoid loop?
frame_indices = [
ome_imgser.omega_to_frame(ome)[0] for ome in ome_eval
]
if -1 in frame_indices:
if not quiet:
msg = """
window for (%d%d%d) falls outside omega range
""" % tuple(hkl)
print(msg)
continue
else:
# initialize spot data parameters
# !!! maybe change these to nan to not fuck up writer
peak_id = -999
sum_int = np.nan
max_int = np.nan
meas_angs = np.nan*np.ones(3)
meas_xy = np.nan*np.ones(2)
# quick check for intensity
contains_signal = False
patch_data_raw = []
for i_frame in frame_indices:
tmp = ome_imgser[i_frame][ijs[0], ijs[1]]
contains_signal = contains_signal or np.any(
tmp > threshold
)
patch_data_raw.append(tmp)
pass
patch_data_raw = np.stack(patch_data_raw, axis=0)
compl.append(contains_signal)
if contains_signal:
# initialize patch data array for intensities
if interp.lower() == 'bilinear':
patch_data = np.zeros(
(len(frame_indices), prows, pcols))
for i, i_frame in enumerate(frame_indices):
patch_data[i] = \
panel.interpolate_bilinear(
xy_eval,
ome_imgser[i_frame],
pad_with_nans=False
).reshape(prows, pcols) # * nrm_fac
elif interp.lower() == 'nearest':
patch_data = patch_data_raw # * nrm_fac
else:
msg = "interpolation option " + \
"'%s' not understood"
raise(RuntimeError, msg % interp)
# now have interpolated patch data...
labels, num_peaks = ndimage.label(
patch_data > threshold, structure=label_struct
)
slabels = np.arange(1, num_peaks + 1)
if num_peaks > 0:
peak_id = iRefl
coms = np.array(
ndimage.center_of_mass(
patch_data,
labels=labels,
index=slabels
)
)
if num_peaks > 1:
center = np.r_[patch_data.shape]*0.5
center_t = np.tile(center, (num_peaks, 1))
com_diff = coms - center_t
closest_peak_idx = np.argmin(
np.sum(com_diff**2, axis=1)
)
else:
closest_peak_idx = 0
pass # end multipeak conditional
coms = coms[closest_peak_idx]
# meas_omes = \
# ome_edges[0] + (0.5 + coms[0])*delta_ome
meas_omes = \
ome_eval[0] + coms[0]*delta_ome
meas_angs = np.hstack(
[tth_edges[0] + (0.5 + coms[2])*delta_tth,
eta_edges[0] + (0.5 + coms[1])*delta_eta,
mapAngle(
np.radians(meas_omes), ome_period
)
]
)
# intensities
# - summed is 'integrated' over interpolated
# data
# - max is max of raw input data
sum_int = np.sum(
patch_data[
labels == slabels[closest_peak_idx]
]
)
max_int = np.max(
patch_data_raw[
labels == slabels[closest_peak_idx]
]
)
# ???: Should this only use labeled pixels?
# Those are segmented from interpolated data,
# not raw; likely ok in most cases.
# need MEASURED xy coords
gvec_c = anglesToGVec(
meas_angs,
chi=self.chi,
rMat_c=rMat_c,
bHat_l=self.beam_vector)
rMat_s = makeOscillRotMat(
[self.chi, meas_angs[2]]
)
meas_xy = gvecToDetectorXY(
gvec_c,
panel.rmat, rMat_s, rMat_c,
panel.tvec, self.tvec, tVec_c,
beamVec=self.beam_vector)
if panel.distortion is not None:
meas_xy = panel.distortion.apply_inverse(
np.atleast_2d(meas_xy)
).flatten()
pass
# FIXME: why is this suddenly necessary???
meas_xy = meas_xy.squeeze()
pass # end num_peaks > 0
else:
patch_data = patch_data_raw
pass # end contains_signal
# write output
if filename is not None:
if output_format.lower() == 'text':
writer.dump_patch(
peak_id, hkl_id, hkl, sum_int, max_int,
ang_centers[i_pt], meas_angs,
xy_centers[i_pt], meas_xy)
elif output_format.lower() == 'hdf5':
xyc_arr = xy_eval.reshape(
prows, pcols, 2
).transpose(2, 0, 1)
writer.dump_patch(
detector_id, iRefl, peak_id, hkl_id, hkl,
tth_edges, eta_edges, np.radians(ome_eval),
xyc_arr, ijs, frame_indices, patch_data,
ang_centers[i_pt], xy_centers[i_pt],
meas_angs, meas_xy)
pass # end conditional on write output
pass # end conditional on check only
if return_spot_list:
# Full output
xyc_arr = xy_eval.reshape(
prows, pcols, 2
).transpose(2, 0, 1)
_patch_output = [
detector_id, iRefl, peak_id, hkl_id, hkl,
tth_edges, eta_edges, np.radians(ome_eval),
xyc_arr, ijs, frame_indices, patch_data,
ang_centers[i_pt], xy_centers[i_pt],
meas_angs, meas_xy
]
else:
# Trimmed output
_patch_output = [
peak_id, hkl_id, hkl, sum_int, max_int,
ang_centers[i_pt], meas_angs, meas_xy
]
patch_output.append(_patch_output)
iRefl += 1
pass # end patch conditional
pass # end patch loop
output[detector_id] = patch_output
if filename is not None and output_format.lower() == 'text':
writer.close()
pass # end detector loop
if filename is not None and output_format.lower() == 'hdf5':
writer.close()
return compl, output
"""def fit_grain(self, grain_params, data_dir='results'):"""
pass # end class: HEDMInstrument
class PlanarDetector(object):
"""Base class for 2D planar, rectangular row-column detector"""
__pixelPitchUnit = 'mm'
def __init__(self,
rows=2048, cols=2048,
pixel_size=(0.2, 0.2),
tvec=np.r_[0., 0., -1000.],
tilt=ct.zeros_3,
name='default',
bvec=ct.beam_vec,
evec=ct.eta_vec,
saturation_level=None,
panel_buffer=None,
roi=None,
distortion=None,
max_workers=max_workers_DFLT):
"""
Instantiate a PlanarDetector object.
Parameters
----------
rows : TYPE, optional
DESCRIPTION. The default is 2048.
cols : TYPE, optional
DESCRIPTION. The default is 2048.
pixel_size : TYPE, optional
DESCRIPTION. The default is (0.2, 0.2).
tvec : TYPE, optional
DESCRIPTION. The default is np.r_[0., 0., -1000.].
tilt : TYPE, optional
DESCRIPTION. The default is ct.zeros_3.
name : TYPE, optional
DESCRIPTION. The default is 'default'.
bvec : TYPE, optional
DESCRIPTION. The default is ct.beam_vec.
evec : TYPE, optional
DESCRIPTION. The default is ct.eta_vec.
saturation_level : TYPE, optional
DESCRIPTION. The default is None.
panel_buffer : TYPE, optional
If a scalar or len(2) array_like, the interpretation is a border
in mm. If an array with shape (nrows, ncols), interpretation is a
boolean with True marking valid pixels. The default is None.
roi : TYPE, optional
DESCRIPTION. The default is None.
distortion : TYPE, optional
DESCRIPTION. The default is None.
Returns
-------
None.
"""
self._name = name
self._rows = rows
self._cols = cols
self._pixel_size_row = pixel_size[0]
self._pixel_size_col = pixel_size[1]
self._saturation_level = saturation_level
self._panel_buffer = panel_buffer
self._roi = roi
self._tvec = np.array(tvec).flatten()
self._tilt = np.array(tilt).flatten()
self._bvec = np.array(bvec).flatten()
self._evec = np.array(evec).flatten()
self._distortion = distortion
self.max_workers = max_workers
#
# set up calibration parameter list and refinement flags
#
# order for a single detector will be
#
# [tilt, translation, <distortion>]
dparams = []
if self._distortion is not None:
dparams = self._distortion.params
self._calibration_parameters = np.hstack(
[self._tilt, self._tvec, dparams]
)
self._calibration_flags = np.hstack(
[panel_calibration_flags_DFLT,
np.zeros(len(dparams), dtype=bool)]
)
return
# detector ID
@property
def name(self):
return self._name
@name.setter
def name(self, s):
assert isinstance(s, str), "requires string input"
self._name = s
# properties for physical size of rectangular detector
@property
def rows(self):
return self._rows
@rows.setter
def rows(self, x):
assert isinstance(x, int)
self._rows = x
@property
def cols(self):
return self._cols
@cols.setter
def cols(self, x):
assert isinstance(x, int)
self._cols = x
@property
def pixel_size_row(self):
return self._pixel_size_row
@pixel_size_row.setter
def pixel_size_row(self, x):
self._pixel_size_row = float(x)
@property
def pixel_size_col(self):
return self._pixel_size_col
@pixel_size_col.setter
def pixel_size_col(self, x):
self._pixel_size_col = float(x)
@property
def pixel_area(self):
return self.pixel_size_row * self.pixel_size_col
@property
def saturation_level(self):
return self._saturation_level
@saturation_level.setter
def saturation_level(self, x):
if x is not None:
assert np.isreal(x)
self._saturation_level = x
@property
def panel_buffer(self):
return self._panel_buffer
@panel_buffer.setter
def panel_buffer(self, x):
"""if not None, a buffer in mm (x, y)"""
if x is not None:
assert len(x) == 2 or x.ndim == 2
self._panel_buffer = x
@property
def roi(self):
return self._roi
@roi.setter
def roi(self, vertex_array):
"""
vertex array must be
[[r0, c0], [r1, c1], ..., [rn, cn]]
and have len >= 3
does NOT need to repeat start vertex for closure
"""
if vertex_array is not None:
assert len(vertex_array) >= 3
self._roi = vertex_array
@property
def row_dim(self):
return self.rows * self.pixel_size_row
@property
def col_dim(self):
return self.cols * self.pixel_size_col
@property
def row_pixel_vec(self):
return self.pixel_size_row*(0.5*(self.rows-1)-np.arange(self.rows))
@property
def row_edge_vec(self):
return _row_edge_vec(self.rows, self.pixel_size_row)
@property
def col_pixel_vec(self):
return self.pixel_size_col*(np.arange(self.cols)-0.5*(self.cols-1))
@property
def col_edge_vec(self):
return _col_edge_vec(self.cols, self.pixel_size_col)
@property
def corner_ul(self):
return np.r_[-0.5 * self.col_dim, 0.5 * self.row_dim]
@property
def corner_ll(self):
return np.r_[-0.5 * self.col_dim, -0.5 * self.row_dim]
@property
def corner_lr(self):
return np.r_[0.5 * self.col_dim, -0.5 * self.row_dim]
@property
def corner_ur(self):
return np.r_[0.5 * self.col_dim, 0.5 * self.row_dim]
@property
def shape(self):
return (self.rows, self.cols)
@property
def tvec(self):
return self._tvec
@tvec.setter
def tvec(self, x):
x = np.array(x).flatten()
assert len(x) == 3, 'input must have length = 3'
self._tvec = x
@property
def tilt(self):
return self._tilt
@tilt.setter
def tilt(self, x):
assert len(x) == 3, 'input must have length = 3'
self._tilt = np.array(x).squeeze()
@property
def bvec(self):
return self._bvec
@bvec.setter
def bvec(self, x):
x = np.array(x).flatten()
assert len(x) == 3 and sum(x*x) > 1-ct.sqrt_epsf, \
'input must have length = 3 and have unit magnitude'
self._bvec = x
@property
def evec(self):
return self._evec
@evec.setter
def evec(self, x):
x = np.array(x).flatten()
assert len(x) == 3 and sum(x*x) > 1-ct.sqrt_epsf, \
'input must have length = 3 and have unit magnitude'
self._evec = x
@property
def distortion(self):
return self._distortion
@distortion.setter
def distortion(self, x):
# FIXME: ne to reconcile check with new class type!
assert len(x) == 2 and hasattr(x[0], '__call__'), \
'distortion must be a tuple: (<func>, params)'
self._distortion = x
@property
def rmat(self):
return makeRotMatOfExpMap(self.tilt)
@property
def normal(self):
return self.rmat[:, 2]
@property
def beam_position(self):
"""
returns the coordinates of the beam in the cartesian detector
frame {Xd, Yd, Zd}. NaNs if no intersection.
"""
output = np.nan * np.ones(2)
b_dot_n = np.dot(self.bvec, self.normal)
if np.logical_and(
abs(b_dot_n) > ct.sqrt_epsf,
np.sign(b_dot_n) == -1
):
u = np.dot(self.normal, self.tvec) / b_dot_n
p2_l = u*self.bvec
p2_d = np.dot(self.rmat.T, p2_l - self.tvec)
output = p2_d[:2]
return output
# ...memoize???
@property
def pixel_coords(self):
pix_i, pix_j = np.meshgrid(
self.row_pixel_vec, self.col_pixel_vec,
indexing='ij')
return pix_i, pix_j
@property
def pixel_solid_angles(self):
kwargs = {
'rows': self.rows,
'cols': self.cols,
'pixel_size_row': self.pixel_size_row,
'pixel_size_col': self.pixel_size_col,
'rmat': self.rmat,
'tvec': self.tvec,
'max_workers': self.max_workers,
}
return _pixel_solid_angles(**kwargs)
@property
def calibration_parameters(self):
#
# set up calibration parameter list and refinement flags
#
# order for a single detector will be
#
# [tilt, translation, <distortion>]
dparams = []
if self.distortion is not None:
dparams = self.distortion.params
self._calibration_parameters = np.hstack(
[self.tilt, self.tvec, dparams]
)
return self._calibration_parameters
@property
def calibration_flags(self):
return self._calibration_flags
@calibration_flags.setter
def calibration_flags(self, x):
x = np.array(x, dtype=bool).flatten()
if len(x) != len(self._calibration_flags):
raise RuntimeError(
"length of parameter list must be %d; you gave %d"
% (len(self._calibration_flags), len(x))
)
self._calibration_flags = x
# =========================================================================
# METHODS
# =========================================================================
def lorentz_polarization_factor(self, f_hor, f_vert):
"""
Calculated the lorentz polarization factor for every pixel.
Parameters
----------
f_hor : float
the fraction of horizontal polarization. for XFELs
this is close to 1.
f_vert : TYPE
the fraction of vertical polarization, which is ~0 for XFELs.
Raises
------
RuntimeError
DESCRIPTION.
Returns
-------
TYPE
DESCRIPTION.
"""
s = f_hor + f_vert
if np.abs(s - 1) > constants.sqrt_epsf:
msg = ("sum of fraction of "
"horizontal and vertical polarizations "
"must be equal to 1.")
raise RuntimeError(msg)
if f_hor < 0 or f_vert < 0:
msg = ("fraction of polarization in horizontal "
"or vertical directions can't be negative.")
raise RuntimeError(msg)
tth, eta = self.pixel_angles()
args = (tth, eta, f_hor, f_vert)
return _lorentz_polarization_factor(*args)
def config_dict(self, chi=0, tvec=ct.zeros_3,
beam_energy=beam_energy_DFLT, beam_vector=ct.beam_vec,
sat_level=None, panel_buffer=None, style='yaml'):
"""
Return a dictionary of detector parameters.
Optional instrument level parameters. This is a convenience function
to work with the APIs in several functions in xrdutil.
Parameters
----------
chi : float, optional
DESCRIPTION. The default is 0.
tvec : array_like (3,), optional
DESCRIPTION. The default is ct.zeros_3.
beam_energy : float, optional
DESCRIPTION. The default is beam_energy_DFLT.
beam_vector : aray_like (3,), optional
DESCRIPTION. The default is ct.beam_vec.
sat_level : scalar, optional
DESCRIPTION. The default is None.
panel_buffer : scalar, array_like (2,), optional
DESCRIPTION. The default is None.
Returns
-------
config_dict : dict
DESCRIPTION.
"""
assert style.lower() in ['yaml', 'hdf5'], \
"style must be either 'yaml', or 'hdf5'; you gave '%s'" % style
config_dict = {}
# =====================================================================
# DETECTOR PARAMETERS
# =====================================================================
# transform and pixels
#
# assign local vars; listify if necessary
tilt = self.tilt
translation = self.tvec
if style.lower() == 'yaml':
tilt = tilt.tolist()
translation = translation.tolist()
tvec = tvec.tolist()
det_dict = dict(
transform=dict(
tilt=tilt,
translation=translation,
),
pixels=dict(
rows=self.rows,
columns=self.cols,
size=[self.pixel_size_row, self.pixel_size_col],
)
)
# distortion
if self.distortion is not None:
dparams = self.distortion.params
if style.lower() == 'yaml':
dparams = dparams.tolist()
dist_d = dict(
function_name=self.distortion.maptype,
parameters=dparams
)
det_dict['distortion'] = dist_d
# saturation level
if sat_level is None:
sat_level = self.saturation_level
det_dict['saturation_level'] = sat_level
# panel buffer
if panel_buffer is None:
# could be non, a 2-element list, or a 2-d array (rows, cols)
panel_buffer = copy.deepcopy(self.panel_buffer)
# !!! now we have to do some style-dependent munging of panel_buffer
if isinstance(panel_buffer, np.ndarray):
if panel_buffer.ndim == 1:
assert len(panel_buffer) == 2, \
"length of 1-d buffer must be 2"
# if here is a 2-element array
if style.lower() == 'yaml':
panel_buffer = panel_buffer.tolist()
elif panel_buffer.ndim == 2:
if style.lower() == 'yaml':
# !!! can't practically write array-like buffers to YAML
# so forced to clobber
print("clobbering panel buffer array in yaml-ready output")
panel_buffer = [0., 0.]
else:
raise RuntimeError(
"panel buffer ndim must be 1 or 2; you specified %d"
% panel_buffer.ndmin
)
elif panel_buffer is None:
# still None on self
if style.lower() == 'hdf5':
# !!! can't write None to hdf5; substitute with zeros
panel_buffer = np.r_[0., 0.]
det_dict['buffer'] = panel_buffer
# =====================================================================
# SAMPLE STAGE PARAMETERS
# =====================================================================
stage_dict = dict(
chi=chi,
translation=tvec
)
# =====================================================================
# BEAM PARAMETERS
# =====================================================================
# !!! make_reflection_patches is still using the vector
# azim, pola = calc_angles_from_beam_vec(beam_vector)
# beam_dict = dict(
# energy=beam_energy,
# vector=dict(
# azimuth=azim,
# polar_angle=pola
# )
# )
beam_dict = dict(
energy=beam_energy,
vector=beam_vector
)
config_dict['detector'] = det_dict
config_dict['oscillation_stage'] = stage_dict
config_dict['beam'] = beam_dict
return config_dict
def pixel_angles(self, origin=ct.zeros_3):
return _pixel_angles(origin, self.pixel_coords, self.distortion,
self.rmat, self.tvec, self.bvec, self.evec,
self.rows, self.cols)
def pixel_tth_gradient(self, origin=ct.zeros_3):
assert len(origin) == 3, "origin must have 3 elemnts"
ptth, _ = self.pixel_angles(origin=origin)
return np.linalg.norm(np.stack(np.gradient(ptth)), axis=0)
def pixel_eta_gradient(self, origin=ct.zeros_3):
period = np.r_[0., 2*np.pi]
assert len(origin) == 3, "origin must have 3 elemnts"
_, peta = self.pixel_angles(origin=origin)
# !!! handle cyclic nature of eta
rowmap = np.empty_like(peta)
for i in range(rowmap.shape[0]):
rowmap[i, :] = mapAngle(
peta[i, :], peta[i, 0] + period
)
colmap = np.empty_like(peta)
for i in range(colmap.shape[1]):
colmap[:, i] = mapAngle(
peta[:, i], peta[0, i] + period
)
peta_grad_row = np.gradient(rowmap)
peta_grad_col = np.gradient(colmap)
return np.linalg.norm(
np.stack([peta_grad_col[0], peta_grad_row[1]]),
axis=0
)
def cartToPixel(self, xy_det, pixels=False):
"""
Convert vstacked array or list of [x,y] points in the center-based
cartesian frame {Xd, Yd, Zd} to (i, j) edge-based indices
i is the row index, measured from the upper-left corner
j is the col index, measured from the upper-left corner
if pixels=True, then (i,j) are integer pixel indices.
else (i,j) are continuous coords
"""
xy_det = np.atleast_2d(xy_det)
npts = len(xy_det)
tmp_ji = xy_det - np.tile(self.corner_ul, (npts, 1))
i_pix = -tmp_ji[:, 1] / self.pixel_size_row - 0.5
j_pix = tmp_ji[:, 0] / self.pixel_size_col - 0.5
ij_det = np.vstack([i_pix, j_pix]).T
if pixels:
ij_det = np.array(np.round(ij_det), dtype=int)
return ij_det
def pixelToCart(self, ij_det):
"""
Convert vstacked array or list of [i,j] pixel indices
(or UL corner-based points) and convert to (x,y) in the
cartesian frame {Xd, Yd, Zd}
"""
ij_det = np.atleast_2d(ij_det)
x = (ij_det[:, 1] + 0.5)*self.pixel_size_col\
+ self.corner_ll[0]
y = (self.rows - ij_det[:, 0] - 0.5)*self.pixel_size_row\
+ self.corner_ll[1]
return np.vstack([x, y]).T
def angularPixelSize(self, xy, rMat_s=None, tVec_s=None, tVec_c=None):
"""
Wraps xrdutil.angularPixelSize
"""
# munge kwargs
if rMat_s is None:
rMat_s = ct.identity_3x3
if tVec_s is None:
tVec_s = ct.zeros_3x1
if tVec_c is None:
tVec_c = ct.zeros_3x1
# call function
ang_ps = xrdutil.angularPixelSize(
xy, (self.pixel_size_row, self.pixel_size_col),
self.rmat, rMat_s,
self.tvec, tVec_s, tVec_c,
distortion=self.distortion,
beamVec=self.bvec, etaVec=self.evec)
return ang_ps
def clip_to_panel(self, xy, buffer_edges=True):
"""
if self.roi is not None, uses it by default
TODO: check if need shape kwarg
TODO: optimize ROI search better than list comprehension below
TODO: panel_buffer can be a 2-d boolean mask, but needs testing
"""
xy = np.atleast_2d(xy)
if self.roi is not None:
ij_crds = self.cartToPixel(xy, pixels=True)
ii, jj = polygon(self.roi[:, 0], self.roi[:, 1],
shape=(self.rows, self.cols))
on_panel_rows = [i in ii for i in ij_crds[:, 0]]
on_panel_cols = [j in jj for j in ij_crds[:, 1]]
on_panel = np.logical_and(on_panel_rows, on_panel_cols)
else:
xlim = 0.5*self.col_dim
ylim = 0.5*self.row_dim
if buffer_edges and self.panel_buffer is not None:
if self.panel_buffer.ndim == 2:
pix = self.cartToPixel(xy, pixels=True)
roff = np.logical_or(pix[:, 0] < 0, pix[:, 0] >= self.rows)
coff = np.logical_or(pix[:, 1] < 0, pix[:, 1] >= self.cols)
idx = np.logical_or(roff, coff)
pix[idx, :] = 0
on_panel = self.panel_buffer[pix[:, 0], pix[:, 1]]
on_panel[idx] = False
else:
xlim -= self.panel_buffer[0]
ylim -= self.panel_buffer[1]
on_panel_x = np.logical_and(
xy[:, 0] >= -xlim, xy[:, 0] <= xlim
)
on_panel_y = np.logical_and(
xy[:, 1] >= -ylim, xy[:, 1] <= ylim
)
on_panel = np.logical_and(on_panel_x, on_panel_y)
elif not buffer_edges or self.panel_buffer is None:
on_panel_x = np.logical_and(
xy[:, 0] >= -xlim, xy[:, 0] <= xlim
)
on_panel_y = np.logical_and(
xy[:, 1] >= -ylim, xy[:, 1] <= ylim
)
on_panel = np.logical_and(on_panel_x, on_panel_y)
return xy[on_panel, :], on_panel
def cart_to_angles(self, xy_data, rmat_s=None, tvec_s=None, tvec_c=None):
"""
TODO: distortion
"""
if rmat_s is None:
rmat_s = ct.identity_3x3
if tvec_s is None:
tvec_s = ct.zeros_3
if tvec_c is None:
tvec_c = ct.zeros_3
angs, g_vec = detectorXYToGvec(
xy_data, self.rmat, rmat_s,
self.tvec, tvec_s, tvec_c,
beamVec=self.bvec, etaVec=self.evec)
tth_eta = np.vstack([angs[0], angs[1]]).T
return tth_eta, g_vec
def angles_to_cart(self, tth_eta,
rmat_s=None, tvec_s=None,
rmat_c=None, tvec_c=None):
"""
TODO: distortion
"""
if rmat_s is None:
rmat_s = ct.identity_3x3
if tvec_s is None:
tvec_s = ct.zeros_3
if rmat_c is None:
rmat_c = ct.identity_3x3
if tvec_c is None:
tvec_c = ct.zeros_3
# !!! warning, this assumes an rmat_s made from chi, ome pair
chi = np.arccos(rmat_s[1, 1])
ome = np.arccos(rmat_s[0, 0])
angs = np.hstack([tth_eta, np.tile(ome, (len(tth_eta), 1))])
xy_det = gvecToDetectorXY(
anglesToGVec(angs, bHat_l=self.bvec, eHat_l=self.evec, chi=chi),
self.rmat, rmat_s, rmat_c,
self.tvec, tvec_s, tvec_c,
beamVec=self.bvec)
return xy_det
def interpolate_nearest(self, xy, img, pad_with_nans=True):
"""
TODO: revisit normalization in here?
"""
is_2d = img.ndim == 2
right_shape = img.shape[0] == self.rows and img.shape[1] == self.cols
assert is_2d and right_shape,\
"input image must be 2-d with shape (%d, %d)"\
% (self.rows, self.cols)
# initialize output with nans
if pad_with_nans:
int_xy = np.nan*np.ones(len(xy))
else:
int_xy = np.zeros(len(xy))
# clip away points too close to or off the edges of the detector
xy_clip, on_panel = self.clip_to_panel(xy, buffer_edges=True)
# get pixel indices of clipped points
i_src = cellIndices(self.row_pixel_vec, xy_clip[:, 1])
j_src = cellIndices(self.col_pixel_vec, xy_clip[:, 0])
# next interpolate across cols
int_vals = img[i_src, j_src]
int_xy[on_panel] = int_vals
return int_xy
def interpolate_bilinear(self, xy, img, pad_with_nans=True):
"""
Interpolate an image array at the specified cartesian points.
Parameters
----------
xy : array_like, (n, 2)
Array of cartesian coordinates in the image plane at which
to evaluate intensity.
img : array_like
2-dimensional image array.
pad_with_nans : bool, optional
Toggle for assigning NaN to points that fall off the detector.
The default is True.
Returns
-------
int_xy : array_like, (n,)
The array of interpolated intensities at each of the n input
coordinates.
Notes
-----
TODO: revisit normalization in here?
"""
is_2d = img.ndim == 2
right_shape = img.shape[0] == self.rows and img.shape[1] == self.cols
assert is_2d and right_shape,\
"input image must be 2-d with shape (%d, %d)"\
% (self.rows, self.cols)
# initialize output with nans
if pad_with_nans:
int_xy = np.nan*np.ones(len(xy))
else:
int_xy = np.zeros(len(xy))
# clip away points too close to or off the edges of the detector
xy_clip, on_panel = self.clip_to_panel(xy, buffer_edges=True)
# grab fractional pixel indices of clipped points
ij_frac = self.cartToPixel(xy_clip)
# get floors/ceils from array of pixel _centers_
# and fix indices running off the pixel centers
# !!! notice we already clipped points to the panel!
i_floor = cellIndices(self.row_pixel_vec, xy_clip[:, 1])
i_floor_img = _fix_indices(i_floor, 0, self.rows - 1)
j_floor = cellIndices(self.col_pixel_vec, xy_clip[:, 0])
j_floor_img = _fix_indices(j_floor, 0, self.cols - 1)
# ceilings from floors
i_ceil = i_floor + 1
i_ceil_img = _fix_indices(i_ceil, 0, self.rows - 1)
j_ceil = j_floor + 1
j_ceil_img = _fix_indices(j_ceil, 0, self.cols - 1)
# first interpolate at top/bottom rows
row_floor_int = \
(j_ceil - ij_frac[:, 1])*img[i_floor_img, j_floor_img] \
+ (ij_frac[:, 1] - j_floor)*img[i_floor_img, j_ceil_img]
row_ceil_int = \
(j_ceil - ij_frac[:, 1])*img[i_ceil_img, j_floor_img] \
+ (ij_frac[:, 1] - j_floor)*img[i_ceil_img, j_ceil_img]
# next interpolate across cols
int_vals = \
(i_ceil - ij_frac[:, 0])*row_floor_int \
+ (ij_frac[:, 0] - i_floor)*row_ceil_int
int_xy[on_panel] = int_vals
return int_xy
def make_powder_rings(
self, pd, merge_hkls=False, delta_tth=None,
delta_eta=10., eta_period=None, eta_list=None,
rmat_s=ct.identity_3x3, tvec_s=ct.zeros_3,
tvec_c=ct.zeros_3, full_output=False):
"""
Generate points on Debye_Scherrer rings over the detector.
!!! it is assuming that rmat_s is built from (chi, ome) as it the case
for HEDM!
Parameters
----------
pd : TYPE
DESCRIPTION.
merge_hkls : TYPE, optional
DESCRIPTION. The default is False.
delta_tth : TYPE, optional
DESCRIPTION. The default is None.
delta_eta : TYPE, optional
DESCRIPTION. The default is 10..
eta_period : TYPE, optional
DESCRIPTION. The default is None.
eta_list : TYPE, optional
DESCRIPTION. The default is None.
rmat_s : TYPE, optional
DESCRIPTION. The default is ct.identity_3x3.
tvec_s : TYPE, optional
DESCRIPTION. The default is ct.zeros_3.
tvec_c : TYPE, optional
DESCRIPTION. The default is ct.zeros_3.
full_output : TYPE, optional
DESCRIPTION. The default is False.
Raises
------
RuntimeError
DESCRIPTION.
Returns
-------
TYPE
DESCRIPTION.
"""
# in case you want to give it tth angles directly
if hasattr(pd, '__len__'):
tth = np.array(pd).flatten()
if delta_tth is None:
raise RuntimeError(
"If supplying a 2theta list as first arg, "
+ "must supply a delta_tth")
sector_vertices = np.tile(
0.5*np.radians([-delta_tth, -delta_eta,
-delta_tth, delta_eta,
delta_tth, delta_eta,
delta_tth, -delta_eta,
0.0, 0.0]), (len(tth), 1)
)
# Convert to radians as is done below
del_eta = np.radians(delta_eta)
else:
# Okay, we have a PlaneData object
try:
pd = PlaneData.makeNew(pd) # make a copy to munge
except(TypeError):
# !!! have some other object here, likely a dummy plane data
# object of some sort...
pass
if delta_tth is not None:
pd.tThWidth = np.radians(delta_tth)
else:
delta_tth = np.degrees(pd.tThWidth)
# conversions, meh...
del_eta = np.radians(delta_eta)
# do merging if asked
if merge_hkls:
_, tth_ranges = pd.getMergedRanges(cullDupl=True)
tth = np.array([0.5*sum(i) for i in tth_ranges])
else:
tth_ranges = pd.getTThRanges()
tth = pd.getTTh()
tth_pm = tth_ranges - np.tile(tth, (2, 1)).T
sector_vertices = np.vstack(
[[i[0], -del_eta,
i[0], del_eta,
i[1], del_eta,
i[1], -del_eta,
0.0, 0.0]
for i in tth_pm])
# for generating rings, make eta vector in correct period
if eta_period is None:
eta_period = (-np.pi, np.pi)
if eta_list is None:
neta = int(360./float(delta_eta))
# this is the vector of ETA EDGES
eta_edges = mapAngle(
np.radians(
delta_eta*np.linspace(0., neta, num=neta + 1)
) + eta_period[0],
eta_period
)
# get eta bin centers from edges
"""
# !!! this way is probably overkill, since we have delta eta
eta_centers = np.average(
np.vstack([eta[:-1], eta[1:]),
axis=0)
"""
# !!! should be safe as eta_edges are monotonic
eta_centers = eta_edges[:-1] + 0.5*del_eta
else:
eta_centers = np.radians(eta_list).flatten()
neta = len(eta_centers)
eta_edges = (
np.tile(eta_centers, (2, 1)) +
np.tile(0.5*del_eta*np.r_[-1, 1], (neta, 1)).T
).T.flatten()
# get chi and ome from rmat_s
# ??? not needed chi = np.arctan2(rmat_s[2, 1], rmat_s[1, 1])
ome = np.arctan2(rmat_s[0, 2], rmat_s[0, 0])
# make list of angle tuples
angs = [
np.vstack(
[i*np.ones(neta), eta_centers, ome*np.ones(neta)]
) for i in tth
]
# need xy coords and pixel sizes
valid_ang = []
valid_xy = []
map_indices = []
npp = 5 # [ll, ul, ur, lr, center]
for i_ring in range(len(angs)):
# expand angles to patch vertices
these_angs = angs[i_ring].T
patch_vertices = (
np.tile(these_angs[:, :2], (1, npp))
+ np.tile(sector_vertices[i_ring], (neta, 1))
).reshape(npp*neta, 2)
# duplicate ome array
ome_dupl = np.tile(
these_angs[:, 2], (npp, 1)
).T.reshape(npp*neta, 1)
# find vertices that all fall on the panel
gVec_ring_l = anglesToGVec(
np.hstack([patch_vertices, ome_dupl]),
bHat_l=self.bvec)
all_xy = gvecToDetectorXY(
gVec_ring_l,
self.rmat, rmat_s, ct.identity_3x3,
self.tvec, tvec_s, tvec_c,
beamVec=self.bvec)
if self.distortion is not None:
all_xy = self.distortion.apply_inverse(all_xy)
_, on_panel = self.clip_to_panel(all_xy)
# all vertices must be on...
patch_is_on = np.all(on_panel.reshape(neta, npp), axis=1)
patch_xys = all_xy.reshape(neta, 5, 2)[patch_is_on]
# form output arrays
valid_ang.append(these_angs[patch_is_on, :2])
valid_xy.append(patch_xys[:, -1, :].squeeze())
map_indices.append(patch_is_on)
pass
# ??? is this option necessary?
if full_output:
return valid_ang, valid_xy, map_indices, eta_edges
else:
return valid_ang, valid_xy
def map_to_plane(self, pts, rmat, tvec):
"""
Map detctor points to specified plane.
Parameters
----------
pts : TYPE
DESCRIPTION.
rmat : TYPE
DESCRIPTION.
tvec : TYPE
DESCRIPTION.
Returns
-------
TYPE
DESCRIPTION.
Notes
-----
by convention:
n * (u*pts_l - tvec) = 0
[pts]_l = rmat*[pts]_m + tvec
"""
# arg munging
pts = np.atleast_2d(pts)
npts = len(pts)
# map plane normal & translation vector, LAB FRAME
nvec_map_lab = rmat[:, 2].reshape(3, 1)
tvec_map_lab = np.atleast_2d(tvec).reshape(3, 1)
tvec_d_lab = np.atleast_2d(self.tvec).reshape(3, 1)
# put pts as 3-d in panel CS and transform to 3-d lab coords
pts_det = np.hstack([pts, np.zeros((npts, 1))])
pts_lab = np.dot(self.rmat, pts_det.T) + tvec_d_lab
# scaling along pts vectors to hit map plane
u = np.dot(nvec_map_lab.T, tvec_map_lab) \
/ np.dot(nvec_map_lab.T, pts_lab)
# pts on map plane, in LAB FRAME
pts_map_lab = np.tile(u, (3, 1)) * pts_lab
return np.dot(rmat.T, pts_map_lab - tvec_map_lab)[:2, :].T
def simulate_rotation_series(self, plane_data, grain_param_list,
eta_ranges=[(-np.pi, np.pi), ],
ome_ranges=[(-np.pi, np.pi), ],
ome_period=(-np.pi, np.pi),
chi=0., tVec_s=ct.zeros_3,
wavelength=None):
"""
Simulate a monochromatic rotation series for a list of grains.
Parameters
----------
plane_data : TYPE
DESCRIPTION.
grain_param_list : TYPE
DESCRIPTION.
eta_ranges : TYPE, optional
DESCRIPTION. The default is [(-np.pi, np.pi), ].
ome_ranges : TYPE, optional
DESCRIPTION. The default is [(-np.pi, np.pi), ].
ome_period : TYPE, optional
DESCRIPTION. The default is (-np.pi, np.pi).
chi : TYPE, optional
DESCRIPTION. The default is 0..
tVec_s : TYPE, optional
DESCRIPTION. The default is ct.zeros_3.
wavelength : TYPE, optional
DESCRIPTION. The default is None.
Returns
-------
valid_ids : TYPE
DESCRIPTION.
valid_hkls : TYPE
DESCRIPTION.
valid_angs : TYPE
DESCRIPTION.
valid_xys : TYPE
DESCRIPTION.
ang_pixel_size : TYPE
DESCRIPTION.
"""
# grab B-matrix from plane data
bMat = plane_data.latVecOps['B']
# reconcile wavelength
# * added sanity check on exclusions here; possible to
# * make some reflections invalid (NaN)
if wavelength is None:
wavelength = plane_data.wavelength
else:
if plane_data.wavelength != wavelength:
plane_data.wavelength = ct.keVToAngstrom(wavelength)
assert not np.any(np.isnan(plane_data.getTTh())),\
"plane data exclusions incompatible with wavelength"
# vstacked G-vector id, h, k, l
full_hkls = xrdutil._fetch_hkls_from_planedata(plane_data)
""" LOOP OVER GRAINS """
valid_ids = []
valid_hkls = []
valid_angs = []
valid_xys = []
ang_pixel_size = []
for gparm in grain_param_list:
# make useful parameters
rMat_c = makeRotMatOfExpMap(gparm[:3])
tVec_c = gparm[3:6]
vInv_s = gparm[6:]
# All possible bragg conditions as vstacked [tth, eta, ome]
# for each omega solution
angList = np.vstack(
oscillAnglesOfHKLs(
full_hkls[:, 1:], chi,
rMat_c, bMat, wavelength,
vInv=vInv_s,
)
)
# filter by eta and omega ranges
# ??? get eta range from detector?
allAngs, allHKLs = xrdutil._filter_hkls_eta_ome(
full_hkls, angList, eta_ranges, ome_ranges
)
allAngs[:, 2] = mapAngle(allAngs[:, 2], ome_period)
# find points that fall on the panel
det_xy, rMat_s, on_plane = xrdutil._project_on_detector_plane(
allAngs,
self.rmat, rMat_c, chi,
self.tvec, tVec_c, tVec_s,
self.distortion)
xys_p, on_panel = self.clip_to_panel(det_xy)
valid_xys.append(xys_p)
# filter angs and hkls that are on the detector plane
# !!! check this -- seems unnecessary but the results of
# _project_on_detector_plane() can have len < the input.
# the output of _project_on_detector_plane has been modified to
# hand back the index array to remedy this JVB 2020-05-27
filtered_angs = np.atleast_2d(allAngs[on_plane, :])
filtered_hkls = np.atleast_2d(allHKLs[on_plane, :])
# grab hkls and gvec ids for this panel
valid_hkls.append(filtered_hkls[on_panel, 1:])
valid_ids.append(filtered_hkls[on_panel, 0])
# reflection angles (voxel centers) and pixel size in (tth, eta)
valid_angs.append(filtered_angs[on_panel, :])
ang_pixel_size.append(self.angularPixelSize(xys_p))
return valid_ids, valid_hkls, valid_angs, valid_xys, ang_pixel_size
def simulate_laue_pattern(self, crystal_data,
minEnergy=5., maxEnergy=35.,
rmat_s=None, tvec_s=None,
grain_params=None,
beam_vec=None):
"""
"""
if isinstance(crystal_data, PlaneData):
plane_data = crystal_data
# grab the expanded list of hkls from plane_data
hkls = np.hstack(plane_data.getSymHKLs())
# and the unit plane normals (G-vectors) in CRYSTAL FRAME
gvec_c = np.dot(plane_data.latVecOps['B'], hkls)
elif len(crystal_data) == 2:
# !!! should clean this up
hkls = np.array(crystal_data[0])
bmat = crystal_data[1]
gvec_c = np.dot(bmat, hkls)
else:
raise(RuntimeError, 'argument list not understood')
nhkls_tot = hkls.shape[1]
# parse energy ranges
# TODO: allow for spectrum parsing
multipleEnergyRanges = False
if hasattr(maxEnergy, '__len__'):
assert len(maxEnergy) == len(minEnergy), \
'energy cutoff ranges must have the same length'
multipleEnergyRanges = True
lmin = []
lmax = []
for i in range(len(maxEnergy)):
lmin.append(ct.keVToAngstrom(maxEnergy[i]))
lmax.append(ct.keVToAngstrom(minEnergy[i]))
else:
lmin = ct.keVToAngstrom(maxEnergy)
lmax = ct.keVToAngstrom(minEnergy)
# parse grain parameters kwarg
if grain_params is None:
grain_params = np.atleast_2d(
np.hstack([np.zeros(6), ct.identity_6x1])
)
n_grains = len(grain_params)
# sample rotation
if rmat_s is None:
rmat_s = ct.identity_3x3
# dummy translation vector... make input
if tvec_s is None:
tvec_s = ct.zeros_3
# beam vector
if beam_vec is None:
beam_vec = ct.beam_vec
# =========================================================================
# LOOP OVER GRAINS
# =========================================================================
# pre-allocate output arrays
xy_det = np.nan*np.ones((n_grains, nhkls_tot, 2))
hkls_in = np.nan*np.ones((n_grains, 3, nhkls_tot))
angles = np.nan*np.ones((n_grains, nhkls_tot, 2))
dspacing = np.nan*np.ones((n_grains, nhkls_tot))
energy = np.nan*np.ones((n_grains, nhkls_tot))
for iG, gp in enumerate(grain_params):
rmat_c = makeRotMatOfExpMap(gp[:3])
tvec_c = gp[3:6].reshape(3, 1)
vInv_s = mutil.vecMVToSymm(gp[6:].reshape(6, 1))
# stretch them: V^(-1) * R * Gc
gvec_s_str = np.dot(vInv_s, np.dot(rmat_c, gvec_c))
ghat_c_str = mutil.unitVector(np.dot(rmat_c.T, gvec_s_str))
# project
dpts = gvecToDetectorXY(ghat_c_str.T,
self.rmat, rmat_s, rmat_c,
self.tvec, tvec_s, tvec_c,
beamVec=beam_vec)
# check intersections with detector plane
canIntersect = ~np.isnan(dpts[:, 0])
npts_in = sum(canIntersect)
if np.any(canIntersect):
dpts = dpts[canIntersect, :].reshape(npts_in, 2)
dhkl = hkls[:, canIntersect].reshape(3, npts_in)
# back to angles
tth_eta, gvec_l = detectorXYToGvec(
dpts,
self.rmat, rmat_s,
self.tvec, tvec_s, tvec_c,
beamVec=beam_vec)
tth_eta = np.vstack(tth_eta).T
# warp measured points
if self.distortion is not None:
dpts = self.distortion.apply_inverse(dpts)
# plane spacings and energies
dsp = 1. / rowNorm(gvec_s_str[:, canIntersect].T)
wlen = 2*dsp*np.sin(0.5*tth_eta[:, 0])
# clip to detector panel
_, on_panel = self.clip_to_panel(dpts, buffer_edges=True)
if multipleEnergyRanges:
validEnergy = np.zeros(len(wlen), dtype=bool)
for i in range(len(lmin)):
in_energy_range = np.logical_and(
wlen >= lmin[i],
wlen <= lmax[i])
validEnergy = validEnergy | in_energy_range
pass
else:
validEnergy = np.logical_and(wlen >= lmin, wlen <= lmax)
pass
# index for valid reflections
keepers = np.where(np.logical_and(on_panel, validEnergy))[0]
# assign output arrays
xy_det[iG][keepers, :] = dpts[keepers, :]
hkls_in[iG][:, keepers] = dhkl[:, keepers]
angles[iG][keepers, :] = tth_eta[keepers, :]
dspacing[iG, keepers] = dsp[keepers]
energy[iG, keepers] = ct.keVToAngstrom(wlen[keepers])
pass # close conditional on valids
pass # close loop on grains
return xy_det, hkls_in, angles, dspacing, energy
# =============================================================================
# UTILITIES
# =============================================================================
class PatchDataWriter(object):
"""Class for dumping Bragg reflection data."""
def __init__(self, filename):
self._delim = ' '
header_items = (
'# ID', 'PID',
'H', 'K', 'L',
'sum(int)', 'max(int)',
'pred tth', 'pred eta', 'pred ome',
'meas tth', 'meas eta', 'meas ome',
'pred X', 'pred Y',
'meas X', 'meas Y'
)
self._header = self._delim.join([
self._delim.join(np.tile('{:<6}', 5)).format(*header_items[:5]),
self._delim.join(np.tile('{:<12}', 2)).format(*header_items[5:7]),
self._delim.join(np.tile('{:<23}', 10)).format(*header_items[7:17])
])
if isinstance(filename, IOBase):
self.fid = filename
else:
self.fid = open(filename, 'w')
print(self._header, file=self.fid)
def __del__(self):
self.close()
def close(self):
self.fid.close()
def dump_patch(self, peak_id, hkl_id,
hkl, spot_int, max_int,
pangs, mangs, pxy, mxy):
"""
!!! maybe need to check that last four inputs are arrays
"""
if mangs is None:
spot_int = np.nan
max_int = np.nan
mangs = np.nan*np.ones(3)
mxy = np.nan*np.ones(2)
res = [int(peak_id), int(hkl_id)] \
+ np.array(hkl, dtype=int).tolist() \
+ [spot_int, max_int] \
+ pangs.tolist() \
+ mangs.tolist() \
+ pxy.tolist() \
+ mxy.tolist()
output_str = self._delim.join(
[self._delim.join(np.tile('{:<6d}', 5)).format(*res[:5]),
self._delim.join(np.tile('{:<12e}', 2)).format(*res[5:7]),
self._delim.join(np.tile('{:<23.16e}', 10)).format(*res[7:])]
)
print(output_str, file=self.fid)
return output_str
class GrainDataWriter(object):
"""Class for dumping grain data."""
def __init__(self, filename=None, array=None):
"""Writes to either file or np array
Array must be initialized with number of rows to be written.
"""
if filename is None and array is None:
raise RuntimeError(
'GrainDataWriter must be specified with filename or array')
self.array = None
self.fid = None
# array supersedes filename
if array is not None:
assert array.shape[1] == 21, \
f'grain data table must have 21 columns not {array.shape[21]}'
self.array = array
self._array_row = 0
return
self._delim = ' '
header_items = (
'# grain ID', 'completeness', 'chi^2',
'exp_map_c[0]', 'exp_map_c[1]', 'exp_map_c[2]',
't_vec_c[0]', 't_vec_c[1]', 't_vec_c[2]',
'inv(V_s)[0,0]', 'inv(V_s)[1,1]', 'inv(V_s)[2,2]',
'inv(V_s)[1,2]*sqrt(2)',
'inv(V_s)[0,2]*sqrt(2)',
'inv(V_s)[0,1]*sqrt(2)',
'ln(V_s)[0,0]', 'ln(V_s)[1,1]', 'ln(V_s)[2,2]',
'ln(V_s)[1,2]', 'ln(V_s)[0,2]', 'ln(V_s)[0,1]'
)
self._header = self._delim.join(
[self._delim.join(
np.tile('{:<12}', 3)
).format(*header_items[:3]),
self._delim.join(
np.tile('{:<23}', len(header_items) - 3)
).format(*header_items[3:])]
)
if isinstance(filename, IOBase):
self.fid = filename
else:
self.fid = open(filename, 'w')
print(self._header, file=self.fid)
def __del__(self):
self.close()
def close(self):
if self.fid is not None:
self.fid.close()
def dump_grain(self, grain_id, completeness, chisq,
grain_params):
assert len(grain_params) == 12, \
"len(grain_params) must be 12, not %d" % len(grain_params)
# extract strain
emat = logm(np.linalg.inv(mutil.vecMVToSymm(grain_params[6:])))
evec = mutil.symmToVecMV(emat, scale=False)
res = [int(grain_id), completeness, chisq] \
+ grain_params.tolist() \
+ evec.tolist()
if self.array is not None:
row = self._array_row
assert row < self.array.shape[0], \
f'invalid row {row} in array table'
self.array[row] = res
self._array_row += 1
return res
# (else) format and write to file
output_str = self._delim.join(
[self._delim.join(
['{:<12d}', '{:<12f}', '{:<12e}']
).format(*res[:3]),
self._delim.join(
np.tile('{:<23.16e}', len(res) - 3)
).format(*res[3:])]
)
print(output_str, file=self.fid)
return output_str
class GrainDataWriter_h5(object):
"""Class for dumping grain results to an HDF5 archive.
TODO: add material spec
"""
def __init__(self, filename, instr_cfg, grain_params, use_attr=False):
if isinstance(filename, h5py.File):
self.fid = filename
else:
self.fid = h5py.File(filename + ".hdf5", "w")
icfg = dict(instr_cfg)
# add instrument groups and attributes
self.instr_grp = self.fid.create_group('instrument')
unwrap_dict_to_h5(self.instr_grp, icfg, asattr=use_attr)
# add grain group
self.grain_grp = self.fid.create_group('grain')
rmat_c = makeRotMatOfExpMap(grain_params[:3])
tvec_c = np.array(grain_params[3:6]).flatten()
vinv_s = np.array(grain_params[6:]).flatten()
vmat_s = np.linalg.inv(mutil.vecMVToSymm(vinv_s))
if use_attr: # attribute version
self.grain_grp.attrs.create('rmat_c', rmat_c)
self.grain_grp.attrs.create('tvec_c', tvec_c)
self.grain_grp.attrs.create('inv(V)_s', vinv_s)
self.grain_grp.attrs.create('vmat_s', vmat_s)
else: # dataset version
self.grain_grp.create_dataset('rmat_c', data=rmat_c)
self.grain_grp.create_dataset('tvec_c', data=tvec_c)
self.grain_grp.create_dataset('inv(V)_s', data=vinv_s)
self.grain_grp.create_dataset('vmat_s', data=vmat_s)
data_key = 'reflection_data'
self.data_grp = self.fid.create_group(data_key)
for det_key in self.instr_grp['detectors'].keys():
self.data_grp.create_group(det_key)
# FIXME: throws exception when called after close method
# def __del__(self):
# self.close()
def close(self):
self.fid.close()
def dump_patch(self, panel_id,
i_refl, peak_id, hkl_id, hkl,
tth_edges, eta_edges, ome_centers,
xy_centers, ijs, frame_indices,
spot_data, pangs, pxy, mangs, mxy, gzip=1):
"""
to be called inside loop over patches
default GZIP level for data arrays is 1
"""
fi = np.array(frame_indices, dtype=int)
panel_grp = self.data_grp[panel_id]
spot_grp = panel_grp.create_group("spot_%05d" % i_refl)
spot_grp.attrs.create('peak_id', int(peak_id))
spot_grp.attrs.create('hkl_id', int(hkl_id))
spot_grp.attrs.create('hkl', np.array(hkl, dtype=int))
spot_grp.attrs.create('predicted_angles', pangs)
spot_grp.attrs.create('predicted_xy', pxy)
if mangs is None:
mangs = np.nan*np.ones(3)
spot_grp.attrs.create('measured_angles', mangs)
if mxy is None:
mxy = np.nan*np.ones(3)
spot_grp.attrs.create('measured_xy', mxy)
# get centers crds from edge arrays
# FIXME: export full coordinate arrays, or just center vectors???
#
# ome_crd, eta_crd, tth_crd = np.meshgrid(
# ome_centers,
# centers_of_edge_vec(eta_edges),
# centers_of_edge_vec(tth_edges),
# indexing='ij')
#
# ome_dim, eta_dim, tth_dim = spot_data.shape
# !!! for now just exporting center vectors for spot_data
tth_crd = centers_of_edge_vec(tth_edges)
eta_crd = centers_of_edge_vec(eta_edges)
shuffle_data = True # reduces size by 20%
spot_grp.create_dataset('tth_crd', data=tth_crd,
compression="gzip", compression_opts=gzip,
shuffle=shuffle_data)
spot_grp.create_dataset('eta_crd', data=eta_crd,
compression="gzip", compression_opts=gzip,
shuffle=shuffle_data)
spot_grp.create_dataset('ome_crd', data=ome_centers,
compression="gzip", compression_opts=gzip,
shuffle=shuffle_data)
spot_grp.create_dataset('xy_centers', data=xy_centers,
compression="gzip", compression_opts=gzip,
shuffle=shuffle_data)
spot_grp.create_dataset('ij_centers', data=ijs,
compression="gzip", compression_opts=gzip,
shuffle=shuffle_data)
spot_grp.create_dataset('frame_indices', data=fi,
compression="gzip", compression_opts=gzip,
shuffle=shuffle_data)
spot_grp.create_dataset('intensities', data=spot_data,
compression="gzip", compression_opts=gzip,
shuffle=shuffle_data)
return
def unwrap_dict_to_h5(grp, d, asattr=False):
"""
Unwraps a dictionary to an HDF5 file of the same structure.
Parameters
----------
grp : HDF5 group object
The HDF5 group to recursively unwrap the dict into.
d : dict
Input dict (of dicts).
asattr : bool, optional
Flag to write end member in dictionary tree to an attribute. If False,
if writes the object to a dataset using numpy. The default is False.
Returns
-------
None.
"""
while len(d) > 0:
key, item = d.popitem()
if isinstance(item, dict):
subgrp = grp.create_group(key)
unwrap_dict_to_h5(subgrp, item, asattr=asattr)
else:
if asattr:
grp.attrs.create(key, item)
else:
try:
grp.create_dataset(key, data=np.atleast_1d(item))
except(TypeError):
# probably a string badness
grp.create_dataset(key, data=item)
def unwrap_h5_to_dict(f, d):
"""
Unwraps a simple HDF5 file to a dictionary of the same structure.
Parameters
----------
f : HDF5 file (mode r)
The input HDF5 file object.
d : dict
dictionary object to update.
Returns
-------
None.
Notes
-----
As written, ignores attributes and uses numpy to cast HDF5 datasets to
dict entries. Checks for 'O' type arrays and casts to strings; also
converts single-element arrays to scalars.
"""
for key, val in f.items():
try:
d[key] = {}
unwrap_h5_to_dict(val, d[key])
except(AttributeError):
# reached a dataset
if np.dtype(val) == 'O':
d[key] = h5py_read_string(val)
else:
tmp = np.array(val)
if tmp.ndim == 1 and len(tmp) == 1:
d[key] = tmp[0]
else:
d[key] = tmp
class GenerateEtaOmeMaps(object):
"""
eta-ome map class derived from new image_series and YAML config
...for now...
must provide:
self.dataStore
self.planeData
self.iHKLList
self.etaEdges # IN RADIANS
self.omeEdges # IN RADIANS
self.etas # IN RADIANS
self.omegas # IN RADIANS
"""
def __init__(self, image_series_dict, instrument, plane_data,
active_hkls=None, eta_step=0.25, threshold=None,
ome_period=(0, 360)):
"""
image_series must be OmegaImageSeries class
instrument_params must be a dict (loaded from yaml spec)
active_hkls must be a list (required for now)
"""
self._planeData = plane_data
# ???: change name of iHKLList?
# ???: can we change the behavior of iHKLList?
if active_hkls is None:
n_rings = len(plane_data.getTTh())
self._iHKLList = range(n_rings)
else:
self._iHKLList = active_hkls
n_rings = len(active_hkls)
# ???: need to pass a threshold?
eta_mapping, etas = instrument.extract_polar_maps(
plane_data, image_series_dict,
active_hkls=active_hkls, threshold=threshold,
tth_tol=None, eta_tol=eta_step)
# grab a det key
# WARNING: this process assumes that the imageseries for all panels
# have the same length and omegas
det_key = list(eta_mapping.keys())[0]
data_store = []
for i_ring in range(n_rings):
full_map = np.zeros_like(eta_mapping[det_key][i_ring])
nan_mask_full = np.zeros(
(len(eta_mapping), full_map.shape[0], full_map.shape[1])
)
i_p = 0
for det_key, eta_map in eta_mapping.items():
nan_mask = ~np.isnan(eta_map[i_ring])
nan_mask_full[i_p] = nan_mask
full_map[nan_mask] += eta_map[i_ring][nan_mask]
i_p += 1
re_nan_these = np.sum(nan_mask_full, axis=0) == 0
full_map[re_nan_these] = np.nan
data_store.append(full_map)
self._dataStore = data_store
# handle omegas
omegas_array = image_series_dict[det_key].metadata['omega']
self._omegas = mapAngle(
np.radians(np.average(omegas_array, axis=1)),
np.radians(ome_period)
)
self._omeEdges = mapAngle(
np.radians(np.r_[omegas_array[:, 0], omegas_array[-1, 1]]),
np.radians(ome_period)
)
# !!! must avoid the case where omeEdges[0] = omeEdges[-1] for the
# indexer to work properly
if abs(self._omeEdges[0] - self._omeEdges[-1]) <= ct.sqrt_epsf:
# !!! SIGNED delta ome
del_ome = np.radians(omegas_array[0, 1] - omegas_array[0, 0])
self._omeEdges[-1] = self._omeEdges[-2] + del_ome
# handle etas
# WARNING: unlinke the omegas in imageseries metadata,
# these are in RADIANS and represent bin centers
self._etaEdges = etas
self._etas = self._etaEdges[:-1] + 0.5*np.radians(eta_step)
@property
def dataStore(self):
return self._dataStore
@property
def planeData(self):
return self._planeData
@property
def iHKLList(self):
return np.atleast_1d(self._iHKLList).flatten()
@property
def etaEdges(self):
return self._etaEdges
@property
def omeEdges(self):
return self._omeEdges
@property
def etas(self):
return self._etas
@property
def omegas(self):
return self._omegas
def save(self, filename):
xrdutil.EtaOmeMaps.save_eta_ome_maps(self, filename)
pass # end of class: GenerateEtaOmeMaps
def _row_edge_vec(rows, pixel_size_row):
return pixel_size_row*(0.5*rows-np.arange(rows+1))
def _col_edge_vec(cols, pixel_size_col):
return pixel_size_col*(np.arange(cols+1)-0.5*cols)
def _generate_pixel_solid_angles(start_stop, rows, cols, pixel_size_row,
pixel_size_col, rmat, tvec):
start, stop = start_stop
row_edge_vec = _row_edge_vec(rows, pixel_size_row)
col_edge_vec = _col_edge_vec(cols, pixel_size_col)
nvtx = len(row_edge_vec) * len(col_edge_vec)
# pixel vertex coords
pvy, pvx = np.meshgrid(row_edge_vec, col_edge_vec, indexing='ij')
# add Z_d coord and transform to lab frame
pcrd_array_full = np.dot(
np.vstack([pvx.flatten(), pvy.flatten(), np.zeros(nvtx)]).T,
rmat.T
) + tvec
conn = cellConnectivity(rows, cols)
ret = np.empty(len(range(start, stop)), dtype=float)
for i, ipix in enumerate(range(start, stop)):
pix_conn = conn[ipix]
vtx_list = pcrd_array_full[pix_conn, :]
ret[i] = (_solid_angle_of_triangle(vtx_list[[0, 1, 2], :]) +
_solid_angle_of_triangle(vtx_list[[2, 3, 0], :]))
return ret
@memoize
def _pixel_angles(origin, pixel_coords, distortion, rmat, tvec, bvec, evec,
rows, cols):
assert len(origin) == 3, "origin must have 3 elements"
pix_i, pix_j = pixel_coords
xy = np.ascontiguousarray(
np.vstack([
pix_j.flatten(), pix_i.flatten()
]).T
)
if distortion is not None:
xy = distortion.apply(xy)
angs, g_vec = detectorXYToGvec(
xy, rmat, ct.identity_3x3,
tvec, ct.zeros_3, origin,
beamVec=bvec, etaVec=evec)
tth = angs[0].reshape(rows, cols)
eta = angs[1].reshape(rows, cols)
return tth, eta
@memoize
def _pixel_solid_angles(rows, cols, pixel_size_row, pixel_size_col,
rmat, tvec, max_workers):
# connectivity array for pixels
conn = cellConnectivity(rows, cols)
# result
solid_angs = np.empty(len(conn), dtype=float)
# Distribute tasks to each process
tasks = distribute_tasks(len(conn), max_workers)
kwargs = {
'rows': rows,
'cols': cols,
'pixel_size_row': pixel_size_row,
'pixel_size_col': pixel_size_col,
'rmat': rmat,
'tvec': tvec,
}
func = partial(_generate_pixel_solid_angles, **kwargs)
with ProcessPoolExecutor(max_workers=max_workers) as executor:
results = executor.map(func, tasks)
# Concatenate all the results together
solid_angs[:] = np.concatenate(list(results))
solid_angs = solid_angs.reshape(rows, cols)
mi = solid_angs.min()
if mi > 0.:
solid_angs = solid_angs/mi
return solid_angs
@memoize
def _lorentz_polarization_factor(tth, eta, f_hor, f_vert):
"""
06/14/2021 SS adding lorentz polarization factor computation
to the detector so that it can be compenstated for in the
intensity correction
parameters: tth two theta of every pixel in radians
eta azimuthal angle of every pixel
f_hor fraction of horizontal polarization
(~1 for XFELs)
f_vert fraction of vertical polarization
(~0 for XFELs)
notice f_hor + f_vert = 1
"""
theta = 0.5*tth
cth = np.cos(theta)
sth2 = np.sin(theta)**2
ctth2 = np.cos(tth)**2
seta2 = np.sin(eta)**2
ceta2 = np.cos(eta)**2
L = 1./(cth*sth2)
P = f_hor*(seta2 + ceta2*ctth2) + f_vert*(ceta2 + seta2*ctth2)
return L*P
def _generate_ring_params(tthr, ptth, peta, eta_edges, delta_eta):
# mark pixels in the spec'd tth range
pixels_in_tthr = np.logical_and(
ptth >= tthr[0], ptth <= tthr[1]
)
# catch case where ring isn't on detector
if not np.any(pixels_in_tthr):
return None
# ???: faster to index with bool or use np.where,
# or recode in numba?
rtth_idx = np.where(pixels_in_tthr)
# grab relevant eta coords using histogram
# !!!: This allows use to calculate arc length and
# detect a branch cut. The histogram idx var
# is the left-hand edges...
retas = peta[rtth_idx]
if fast_histogram:
reta_hist = histogram1d(
retas,
len(eta_edges) - 1,
(eta_edges[0], eta_edges[-1])
)
else:
reta_hist, _ = histogram1d(retas, bins=eta_edges)
reta_idx = np.where(reta_hist)[0]
reta_bin_idx = np.hstack(
[reta_idx,
reta_idx[-1] + 1]
)
# ring arc lenght on panel
arc_length = angularDifference(
eta_edges[reta_bin_idx[0]],
eta_edges[reta_bin_idx[-1]]
)
# Munge eta bins
# !!! need to work with the subset to preserve
# NaN values at panel extents!
#
# !!! MUST RE-MAP IF BRANCH CUT IS IN RANGE
#
# The logic below assumes that eta_edges span 2*pi to
# single precision
eta_bins = eta_edges[reta_bin_idx]
if arc_length < 1e-4:
# have branch cut in here
ring_gap = np.where(
reta_idx
- np.arange(len(reta_idx))
)[0]
if len(ring_gap) > 0:
# have incomplete ring
eta_stop_idx = ring_gap[0]
eta_stop = eta_edges[eta_stop_idx]
new_period = np.cumsum([eta_stop, 2*np.pi])
# remap
retas = mapAngle(retas, new_period)
tmp_bins = mapAngle(
eta_edges[reta_idx], new_period
)
tmp_idx = np.argsort(tmp_bins)
reta_idx = reta_idx[np.argsort(tmp_bins)]
eta_bins = np.hstack(
[tmp_bins[tmp_idx],
tmp_bins[tmp_idx][-1] + delta_eta]
)
return retas, eta_bins, rtth_idx, reta_idx
def _run_histograms(rows, ims, tth_ranges, ring_maps, ring_params, threshold):
for i_row in range(*rows):
image = ims[i_row]
# handle threshold if specified
if threshold is not None:
# !!! NaNs get preserved
image = np.array(image)
image[image < threshold] = 0.
for i_r, tthr in enumerate(tth_ranges):
this_map = ring_maps[i_r]
params = ring_params[i_r]
if not params:
# We are supposed to skip this ring...
continue
# Unpack the params
retas, eta_bins, rtth_idx, reta_idx = params
if fast_histogram:
result = histogram1d(retas, len(eta_bins) - 1,
(eta_bins[0], eta_bins[-1]),
weights=image[rtth_idx])
else:
result, _ = histogram1d(retas, bins=eta_bins,
weights=image[rtth_idx])
this_map[i_row, reta_idx] = result
|
py | 1a302845fac2f234c2c5914432911d45aee47749 | #!/usr/bin/env python3
import pathlib
import fileinput
from ci.util import (
check_env,
existing_file,
)
repo_dir = check_env('REPO_DIR')
effective_version = check_env('EFFECTIVE_VERSION')
template_file = existing_file(pathlib.Path(repo_dir, 'concourse', 'resources', 'defaults.mako'))
lines_replaced = 0
string_to_match = 'tag = '
for line in fileinput.FileInput(str(template_file), inplace=True):
if string_to_match in line:
if lines_replaced != 0:
raise RuntimeError(f'More than one image tag found in template file')
leading_spaces = line.index(string_to_match)
print(f'{leading_spaces * " "}{string_to_match}"{effective_version}"')
lines_replaced = 1
else:
print(line, end='')
|
py | 1a3028b0d17e7d9d5cf5ce08108984d51cafc917 | from oslo_log import log
from datetime import datetime
from flask import request, jsonify
from flask_restful import Resource, fields, marshal_with, abort
from clapton import db
from clapton.db.sqlalchemy import models
from clapton.api import types
LOG = log.getLogger(__name__)
class OrderList(Resource):
def get(self):
orders = db.get_session().query(models.Order).all()
return jsonify((types.Order(many=True).dump(orders)).data), 200, {'X-Pagination-Total-Count': 1000}
'''
response = flask.make_response('[{"id": 123}]', 200)
response.headers.extend({'X-Pagination-Total-Count': 1000,
'Content-Type': 'application/json; charset=utf-8'})
return response
'''
return [{"id": 123}], 200, {'X-Pagination-Total-Count': 1000}
def post(self):
'''
validate request
parser = reqparse.RequestParser()
parser.add_argument(
'total_amount',
dest='total_amount',
type=str,
location='form', # form, args, headers, cookies, json, files
required=True,
help='The orders\'s total amount',
)
args = parser.parse_args(strict=True)
LOG.debug(args)
LOG.debug(args.total_amount)
return {}, 201
'''
data = request.get_json()
if not data:
return jsonify({'message': 'No imput data provided'}), 400
data, errors = types.Order().load(data)
if errors:
return jsonify(errors), 422
o = models.Order(id=data['id'])
return jsonify((types.Order().dump(o)).data), 201
class Order(Resource):
@marshal_with({'id': fields.String, 'created_at': fields.DateTime, 'links': fields.Nested({'items': fields.Url('items', absolute=True)})})
def get(self, order_id):
'''
outputing format
'''
return {'id': 123, 'created_at': datetime.now(), 'links': []}
def put(self, order_id):
LOG.debug(request.form)
LOG.debug(request.json)
LOG.debug(dir(request))
return {}, 201
def delete(self, order_id):
abort(500)
# raise ValueError('haha')
# raise werkzeug.exceptions.HTTPException(500)
# raise werkzeug.exceptions.InternalServerError
return '', 204
class OrderItemList(Resource):
def get(self, order_id):
return []
|
py | 1a3028f71a48aad11f4b72c7d043f488cd5eb1b1 | # Copyright 2019 Huawei Technologies Co.,LTD.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
from oslo_db import exception as db_exc
from cyborg.common import exception
from cyborg import objects
from cyborg.tests.unit.db.base import DbTestCase
from cyborg.tests.unit import fake_deployable
from cyborg.tests.unit import fake_device
from cyborg.tests.unit.objects import test_objects
class TestDeployableObject(DbTestCase):
@property
def fake_device(self):
db_device = fake_device.get_fake_devices_as_dict()[2]
return db_device
@property
def fake_deployable(self):
db_deploy = fake_deployable.fake_db_deployable(id=1)
return db_deploy
@property
def fake_deployable2(self):
db_deploy = fake_deployable.fake_db_deployable(id=2)
return db_deploy
def test_create(self):
db_device = self.fake_device
device = objects.Device(context=self.context,
**db_device)
device.create(self.context)
device_get = objects.Device.get(self.context, device.uuid)
db_dpl = self.fake_deployable
dpl = objects.Deployable(context=self.context,
**db_dpl)
dpl.device_id = device_get.id
dpl.create(self.context)
self.assertEqual(db_dpl['uuid'], dpl.uuid)
def test_get(self):
db_device = self.fake_device
device = objects.Device(context=self.context,
**db_device)
device.create(self.context)
device_get = objects.Device.get(self.context, device.uuid)
db_dpl = self.fake_deployable
dpl = objects.Deployable(context=self.context,
**db_dpl)
dpl.device_id = device_get.id
dpl.create(self.context)
dpl_get = objects.Deployable.get(self.context, dpl.uuid)
self.assertEqual(dpl_get.uuid, dpl.uuid)
def test_get_by_filter(self):
db_device = self.fake_device
device = objects.Device(context=self.context,
**db_device)
device.create(self.context)
device_get = objects.Device.get(self.context, device.uuid)
db_dpl = self.fake_deployable
dpl = objects.Deployable(context=self.context,
**db_dpl)
dpl.device_id = device_get.id
dpl.create(self.context)
query = {"uuid": dpl['uuid']}
dpl_get_list = objects.Deployable.get_by_filter(self.context, query)
self.assertEqual(dpl_get_list[0].uuid, dpl.uuid)
def test_save(self):
db_device = self.fake_device
device = objects.Device(context=self.context,
**db_device)
device.create(self.context)
device_get = objects.Device.get(self.context, device.uuid)
db_dpl = self.fake_deployable
dpl = objects.Deployable(context=self.context,
**db_dpl)
dpl.device_id = device_get.id
dpl.create(self.context)
dpl.num_accelerators = 8
dpl.save(self.context)
dpl_get = objects.Deployable.get(self.context, dpl.uuid)
self.assertEqual(dpl_get.num_accelerators, 8)
def test_destroy(self):
db_device = self.fake_device
device = objects.Device(context=self.context,
**db_device)
device.create(self.context)
device_get = objects.Device.get(self.context, device.uuid)
db_dpl = self.fake_deployable
dpl = objects.Deployable(context=self.context,
**db_dpl)
dpl.device_id = device_get.id
dpl.create(self.context)
self.assertEqual(db_dpl['uuid'], dpl.uuid)
dpl.destroy(self.context)
self.assertRaises(exception.ResourceNotFound,
objects.Deployable.get, self.context,
dpl.uuid)
class TestDeployableObject(test_objects._LocalTest,
TestDeployableObject):
def _test_save_objectfield_fk_constraint_fails(self, foreign_key,
expected_exception):
error = db_exc.DBReferenceError('table', 'constraint', foreign_key,
'key_table')
# Prevent lazy-loading any fields, results in InstanceNotFound
deployable = fake_deployable.fake_deployable_obj(self.context)
fields_with_save_methods = [field for field in deployable.fields
if hasattr(deployable, '_save_%s' % field)]
for field in fields_with_save_methods:
@mock.patch.object(deployable, '_save_%s' % field)
@mock.patch.object(deployable, 'obj_attr_is_set')
def _test(mock_is_set, mock_save_field):
mock_is_set.return_value = True
mock_save_field.side_effect = error
deployable.obj_reset_changes(fields=[field])
deployable._changed_fields.add(field)
self.assertRaises(expected_exception, deployable.save)
deployable.obj_reset_changes(fields=[field])
_test()
|
py | 1a3029291fc83c88809f1cd45425b63f1bff23cb | # from django.test import TestCase
# from polls.models import Question,Choice
# from django.utils import timezone
#
#
# # from django.test import Client
# # Create your tests here.
# #用来写测试用例
# # model测试
# class StudyTestCsse(TestCase):
# def setUp(self):
# Question.objects.create(id=1,question_text="你的女朋友是谁?",pub_date=timezone.now())
#
# def test_01(self):
# u'''测试查询问题'''
# question = Question.objects.get(id=1)
# self.assertIn("你的女朋友是谁?",question.question_text)
#
# def test_02(self):
# u'''测试创建问题'''
# Question.objects.create(id=2,question_text="今天吃什么?",pub_date=timezone.now())
# question = Question.objects.get(id=2)
# self.assertIn("今天吃什么",question.question_text)
#
# def test_03(self):
# u'''测试更新数据'''
# question = Question.objects.get(id=1)
# Question.objects.filter(id=1).update(question_text="周末是否加班")
# question = Question.objects.get(id=1)
# self.assertIn("周末是否加班",question.question_text)
#
# def test_04(self):
# u'''测试删除数据'''
# question = Question.objects.get(id=1)
# Question.objects.filter(id=1).delete()
# self.assertEqual(0,len(Question.objects.all()))
#
# class choiceTestcase(TestCase):
#
# def setUp(self):
# Question.objects.create(id=1,question_text="what's new?",pub_date=timezone.now())
# Choice.objects.create(id=1,choice_text='Not Much',votes=0,question_id=1)
# Choice.objects.create(id=2,choice_text='The sky',votes=0,question_id=1)
#
# def test_choice_query(self):
# u'''测试问题选项查询'''
# choice = Choice.objects.get(id=1)
# self.assertEqual(choice.choice_text,"Not Much")
|
py | 1a30292bc2773def9c2a33301a689783ced66f65 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.timezone
from django.conf import settings
import model_utils.fields
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Dataset',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, verbose_name='created', editable=False)),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, verbose_name='modified', editable=False)),
('dataset', models.FileField(upload_to=b'datasets')),
('dimensions', models.PositiveIntegerField(default=0)),
('length', models.PositiveIntegerField(default=0)),
('filesize', models.PositiveIntegerField(default=0)),
('signature', models.CharField(unique=True, max_length=44, blank=True)),
('datatype', models.CharField(default=b'csv', max_length=4, choices=[(b'csv', b'csv'), (b'json', b'json'), (b'xml', b'xml')])),
('delimiter', models.CharField(default=b',', max_length=1)),
('uploader', models.ForeignKey(related_name='datasets', to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ('-created',),
'db_table': 'datasets',
'get_latest_by': 'created',
},
),
]
|
py | 1a302982b0604d6f6f62aa08d229c33367c435e9 | # Copyright 2000-2002 by Andrew Dalke.
# Revisions copyright 2007-2008 by Peter Cock.
# All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Alphabets used in Seq objects etc to declare sequence type and letters.
This is used by sequences which contain a finite number of similar words.
"""
class Alphabet:
size = None # no fixed size for words
letters = None # no fixed alphabet; implement as a list-like
# interface,
def __repr__(self):
return self.__class__.__name__ + "()"
def contains(self, other):
"""Does this alphabet 'contain' the other (OBSOLETE?).
Returns a boolean. This relies on the Alphabet subclassing
hierarchy only, and does not check the letters property.
This isn't ideal, and doesn't seem to work as intended
with the AlphabetEncoder classes."""
return isinstance(other, self.__class__)
def _case_less(self):
"""Return an case-less variant of the current alphabet (PRIVATE)."""
#TODO - remove this method by dealing with things in subclasses?
if isinstance(self, ProteinAlphabet):
return generic_protein
elif isinstance(self, DNAAlphabet):
return generic_dna
elif isinstance(self, NucleotideAlphabet):
return generic_rna
elif isinstance(self, NucleotideAlphabet):
return generic_nucleotide
elif isinstance(self, SingleLetterAlphabet):
return single_letter_alphabet
else:
return generic_alphabet
def _upper(self):
"""Return an upper case variant of the current alphabet (PRIVATE)."""
if not self.letters or self.letters==self.letters.upper():
#Easy case, no letters or already upper case!
return self
else:
#TODO - Raise NotImplementedError and handle via subclass?
return self._case_less()
def _lower(self):
"""Return a lower case variant of the current alphabet (PRIVATE)."""
if not self.letters or self.letters==self.letters.lower():
#Easy case, no letters or already lower case!
return self
else:
#TODO - Raise NotImplementedError and handle via subclass?
return self._case_less()
generic_alphabet = Alphabet()
class SingleLetterAlphabet(Alphabet):
size = 1
letters = None # string of all letters in the alphabet
single_letter_alphabet = SingleLetterAlphabet()
########### Protein
class ProteinAlphabet(SingleLetterAlphabet):
pass
generic_protein = ProteinAlphabet()
########### DNA
class NucleotideAlphabet(SingleLetterAlphabet):
pass
generic_nucleotide = NucleotideAlphabet()
class DNAAlphabet(NucleotideAlphabet):
pass
generic_dna = DNAAlphabet()
########### RNA
class RNAAlphabet(NucleotideAlphabet):
pass
generic_rna = RNAAlphabet()
########### Other per-sequence encodings
class SecondaryStructure(SingleLetterAlphabet):
letters = "HSTC"
class ThreeLetterProtein(Alphabet):
size = 3
letters = [
"Ala", "Asx", "Cys", "Asp", "Glu", "Phe", "Gly", "His", "Ile",
"Lys", "Leu", "Met", "Asn", "Pro", "Gln", "Arg", "Ser", "Thr",
"Sec", "Val", "Trp", "Xaa", "Tyr", "Glx",
]
###### Non per-sequence modifications
# (These are Decorator classes)
class AlphabetEncoder:
def __init__(self, alphabet, new_letters):
self.alphabet = alphabet
self.new_letters = new_letters
if alphabet.letters is not None:
self.letters = alphabet.letters + new_letters
else:
self.letters = None
def __getattr__(self, key):
if key[:2] == "__" and key[-2:] == "__":
raise AttributeError(key)
return getattr(self.alphabet, key)
def __repr__(self):
return "%s(%r, %r)" % (self.__class__.__name__, self.alphabet,
self.new_letters)
def contains(self, other):
"""Does this alphabet 'contain' the other (OBSOLETE?).
This is isn't implemented for the base AlphabetEncoder,
which will always return 0 (False)."""
return 0
def _upper(self):
"""Return an upper case variant of the current alphabet (PRIVATE)."""
return AlphabetEncoder(self.alphabet._upper(), self.new_letters.upper())
def _lower(self):
"""Return a lower case variant of the current alphabet (PRIVATE)."""
return AlphabetEncoder(self.alphabet._lower(), self.new_letters.lower())
class Gapped(AlphabetEncoder):
def __init__(self, alphabet, gap_char = "-"):
AlphabetEncoder.__init__(self, alphabet, gap_char)
self.gap_char = gap_char
def contains(self, other):
"""Does this alphabet 'contain' the other (OBSOLETE?).
Returns a boolean. This relies on the Alphabet subclassing
hierarchy, and attempts to check the gap character. This fails
if the other alphabet does not have a gap character!
"""
return other.gap_char == self.gap_char and \
self.alphabet.contains(other.alphabet)
def _upper(self):
"""Return an upper case variant of the current alphabet (PRIVATE)."""
return Gapped(self.alphabet._upper(), self.gap_char.upper())
def _lower(self):
"""Return a lower case variant of the current alphabet (PRIVATE)."""
return Gapped(self.alphabet._lower(), self.gap_char.lower())
class HasStopCodon(AlphabetEncoder):
def __init__(self, alphabet, stop_symbol = "*"):
AlphabetEncoder.__init__(self, alphabet, stop_symbol)
self.stop_symbol = stop_symbol
def __cmp__(self, other):
x = cmp(self.alphabet, other.alphabet)
if x == 0:
return cmp(self.stop_symbol, other.stop_symbol)
return x
def contains(self, other):
"""Does this alphabet 'contain' the other (OBSOLETE?).
Returns a boolean. This relies on the Alphabet subclassing
hierarchy, and attempts to check the stop symbol. This fails
if the other alphabet does not have a stop symbol!
"""
return other.stop_symbol == self.stop_symbol and \
self.alphabet.contains(other.alphabet)
def _upper(self):
"""Return an upper case variant of the current alphabet (PRIVATE)."""
return HasStopCodon(self.alphabet._upper(), self.stop_symbol.upper())
def _lower(self):
"""Return a lower case variant of the current alphabet (PRIVATE)."""
return HasStopCodon(self.alphabet._lower(), self.stop_symbol.lower())
def _get_base_alphabet(alphabet):
"""Returns the non-gapped non-stop-codon Alphabet object (PRIVATE)."""
a = alphabet
while isinstance(a, AlphabetEncoder):
a = a.alphabet
assert isinstance(a, Alphabet), \
"Invalid alphabet found, %s" % repr(a)
return a
def _ungap(alphabet):
"""Returns the alphabet without any gap encoder (PRIVATE)."""
#TODO - Handle via method of the objects?
if not hasattr(alphabet, "gap_char"):
return alphabet
elif isinstance(alphabet, Gapped):
return alphabet.alphabet
elif isinstance(alphabet, HasStopCodon):
return HasStopCodon(_ungap(alphabet.alphabet), stop_symbol=alphabet.stop_symbol)
elif isinstance(alphabet, AlphabetEncoder):
return AlphabetEncoder(_ungap(alphabet.alphabet), letters=alphabet.letters)
else:
raise NotImplementedError
def _consensus_base_alphabet(alphabets):
"""Returns a common but often generic base alphabet object (PRIVATE).
This throws away any AlphabetEncoder information, e.g. Gapped alphabets.
Note that DNA+RNA -> Nucleotide, and Nucleotide+Protein-> generic single
letter. These DO NOT raise an exception!"""
common = None
for alpha in alphabets:
a = _get_base_alphabet(alpha)
if common is None:
common = a
elif common == a:
pass
elif isinstance(a, common.__class__):
pass
elif isinstance(common, a.__class__):
common = a
elif isinstance(a, NucleotideAlphabet) \
and isinstance(common, NucleotideAlphabet):
#e.g. Give a mix of RNA and DNA alphabets
common = generic_nucleotide
elif isinstance(a, SingleLetterAlphabet) \
and isinstance(common, SingleLetterAlphabet):
#This is a pretty big mis-match!
common = single_letter_alphabet
else:
#We have a major mis-match... take the easy way out!
return generic_alphabet
if common is None:
#Given NO alphabets!
return generic_alphabet
return common
def _consensus_alphabet(alphabets):
"""Returns a common but often generic alphabet object (PRIVATE).
Note that DNA+RNA -> Nucleotide, and Nucleotide+Protein-> generic single
letter. These DO NOT raise an exception!
This is aware of Gapped and HasStopCodon and new letters added by
other AlphabetEncoders. This WILL raise an exception if more than
one gap character or stop symbol is present."""
base = _consensus_base_alphabet(alphabets)
gap = None
stop = None
new_letters = ""
for alpha in alphabets:
#Gaps...
if not hasattr(alpha, "gap_char"):
pass
elif gap is None:
gap = alpha.gap_char
elif gap == alpha.gap_char:
pass
else:
raise ValueError("More than one gap character present")
#Stops...
if not hasattr(alpha, "stop_symbol"):
pass
elif stop is None:
stop = alpha.stop_symbol
elif stop == alpha.stop_symbol:
pass
else:
raise ValueError("More than one stop symbol present")
#New letters...
if hasattr(alpha, "new_letters"):
for letter in alpha.new_letters:
if letter not in new_letters \
and letter != gap and letter != stop:
new_letters += letter
alpha = base
if new_letters:
alpha = AlphabetEncoder(alpha, new_letters)
if gap:
alpha = Gapped(alpha, gap_char=gap)
if stop:
alpha = HasStopCodon(alpha, stop_symbol=stop)
return alpha
def _check_type_compatible(alphabets):
"""Returns True except for DNA+RNA or Nucleotide+Protein (PRIVATE).
This relies on the Alphabet subclassing hierarchy. It does not
check things like gap characters or stop symbols."""
dna, rna, nucl, protein = False, False, False, False
for alpha in alphabets:
a = _get_base_alphabet(alpha)
if isinstance(a, DNAAlphabet):
dna = True
nucl = True
if rna or protein : return False
elif isinstance(a, RNAAlphabet):
rna = True
nucl = True
if dna or protein : return False
elif isinstance(a, NucleotideAlphabet):
nucl = True
if protein : return False
elif isinstance(a, ProteinAlphabet):
protein = True
if nucl : return False
return True
|
py | 1a3029c55a3e76194405a448d8cbe490d1fa4940 | """ Setup remote debugger with Python Tools for Visual Studio (PTVSD)
"""
import os
from .celery_log_setup import get_task_logger
REMOTE_DEBUG_PORT = 3000
log = get_task_logger(__name__)
def setup_remote_debugging(force_enabled: bool = False, *, boot_mode=None) -> None:
""" Programaticaly enables remote debugging if SC_BOOT_MODE==debug-ptvsd
"""
if "SC_BOOT_MODE" not in os.environ:
log.warning("Remote debugging only available when running in a container")
return
boot_mode = boot_mode or os.environ.get("SC_BOOT_MODE")
if boot_mode == "debug-ptvsd" or force_enabled:
try:
log.debug("Enabling attach ptvsd ...")
#
# SEE https://github.com/microsoft/ptvsd#enabling-debugging
#
import ptvsd
ptvsd.enable_attach(
address=("0.0.0.0", REMOTE_DEBUG_PORT), redirect_output=True
) # nosec
except ImportError:
log.exception("Unable to use remote debugging. ptvsd is not installed")
else:
log.info("Remote debugging enabled: listening port %s", REMOTE_DEBUG_PORT)
else:
log.debug("Booting without remote debugging since SC_BOOT_MODE=%s", boot_mode)
__all__ = ["setup_remote_debugging"]
|
py | 1a302b90ca158ff107d25b7cdeffb5f9a5e3e5ba | # -*- encoding: utf-8 -*-
from __future__ import unicode_literals
from slack_g_cal.parse import JSON, Datetime
class WitDatetimeContainer(JSON):
""" Container wrapping datetime values from the Wit API """
def __init__(self, **dt_json):
self.is_interval = dt_json['type'] == 'interval'
# Get rid of values; we don't need this parameter
dt_json.pop('values', None)
if self.is_interval:
from_, to_ = dt_json.pop('from'), dt_json.pop('to')
self.dt_from = WitDatetime(date_input=from_.value, grain=from_.grain)
self.dt_to = WitDatetime(date_input=to_.value, grain=to_.grain)
else:
self.date = WitDatetime(date_input=dt_json.pop('value'), grain=dt_json.pop('grain'))
super(WitDatetimeContainer, self).__init__(**dt_json)
class WitDatetime(Datetime):
def __init__(self, date_input, **dt_json):
self.grain = dt_json.pop('grain')
super(WitDatetime, self).__init__(date_input=date_input, **dt_json)
def adjust_grain_by(self, adj_val):
kwargs = {self.grain: getattr(self._datetime, self.grain) + adj_val}
self._datetime = self._datetime.replace(**kwargs)
|
py | 1a302b9a05e3fe2458365da32681c566fef33e7d | # -*- coding: utf-8 -*-
"""IPython Test Suite Runner.
This module provides a main entry point to a user script to test IPython
itself from the command line. There are two ways of running this script:
1. With the syntax `iptest all`. This runs our entire test suite by
calling this script (with different arguments) recursively. This
causes modules and package to be tested in different processes, using nose
or trial where appropriate.
2. With the regular nose syntax, like `iptest -vvs IPython`. In this form
the script simply calls nose, but with special command line flags and
plugins loaded.
"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import glob
from io import BytesIO
import os
import os.path as path
import sys
from threading import Thread, Lock, Event
import warnings
import nose.plugins.builtin
from nose.plugins.xunit import Xunit
from nose import SkipTest
from nose.core import TestProgram
from nose.plugins import Plugin
from nose.util import safe_str
from IPython import version_info
from IPython.utils.py3compat import decode
from IPython.utils.importstring import import_item
from IPython.testing.plugin.ipdoctest import IPythonDoctest
from IPython.external.decorators import KnownFailure, knownfailureif
pjoin = path.join
# Enable printing all warnings raise by IPython's modules
warnings.filterwarnings('ignore', message='.*Matplotlib is building the font cache.*', category=UserWarning, module='.*')
warnings.filterwarnings('error', message='.*', category=ResourceWarning, module='.*')
warnings.filterwarnings('error', message=".*{'config': True}.*", category=DeprecationWarning, module='IPy.*')
warnings.filterwarnings('default', message='.*', category=Warning, module='IPy.*')
warnings.filterwarnings('error', message='.*apply_wrapper.*', category=DeprecationWarning, module='.*')
warnings.filterwarnings('error', message='.*make_label_dec', category=DeprecationWarning, module='.*')
warnings.filterwarnings('error', message='.*decorated_dummy.*', category=DeprecationWarning, module='.*')
warnings.filterwarnings('error', message='.*skip_file_no_x11.*', category=DeprecationWarning, module='.*')
warnings.filterwarnings('error', message='.*onlyif_any_cmd_exists.*', category=DeprecationWarning, module='.*')
warnings.filterwarnings('error', message='.*disable_gui.*', category=DeprecationWarning, module='.*')
warnings.filterwarnings('error', message='.*ExceptionColors global is deprecated.*', category=DeprecationWarning, module='.*')
# Jedi older versions
warnings.filterwarnings(
'error', message='.*elementwise != comparison failed and.*', category=FutureWarning, module='.*')
if version_info < (6,):
# nose.tools renames all things from `camelCase` to `snake_case` which raise an
# warning with the runner they also import from standard import library. (as of Dec 2015)
# Ignore, let's revisit that in a couple of years for IPython 6.
warnings.filterwarnings(
'ignore', message='.*Please use assertEqual instead', category=Warning, module='IPython.*')
if version_info < (7,):
warnings.filterwarnings('ignore', message='.*Completer.complete.*',
category=PendingDeprecationWarning, module='.*')
else:
warnings.warn(
'Completer.complete was pending deprecation and should be changed to Deprecated', FutureWarning)
# ------------------------------------------------------------------------------
# Monkeypatch Xunit to count known failures as skipped.
# ------------------------------------------------------------------------------
def monkeypatch_xunit():
try:
knownfailureif(True)(lambda: None)()
except Exception as e:
KnownFailureTest = type(e)
def addError(self, test, err, capt=None):
if issubclass(err[0], KnownFailureTest):
err = (SkipTest,) + err[1:]
return self.orig_addError(test, err, capt)
Xunit.orig_addError = Xunit.addError
Xunit.addError = addError
#-----------------------------------------------------------------------------
# Check which dependencies are installed and greater than minimum version.
#-----------------------------------------------------------------------------
def extract_version(mod):
return mod.__version__
def test_for(item, min_version=None, callback=extract_version):
"""Test to see if item is importable, and optionally check against a minimum
version.
If min_version is given, the default behavior is to check against the
`__version__` attribute of the item, but specifying `callback` allows you to
extract the value you are interested in. e.g::
In [1]: import sys
In [2]: from IPython.testing.iptest import test_for
In [3]: test_for('sys', (2,6), callback=lambda sys: sys.version_info)
Out[3]: True
"""
try:
check = import_item(item)
except (ImportError, RuntimeError):
# GTK reports Runtime error if it can't be initialized even if it's
# importable.
return False
else:
if min_version:
if callback:
# extra processing step to get version to compare
check = callback(check)
return check >= min_version
else:
return True
# Global dict where we can store information on what we have and what we don't
# have available at test run time
have = {'matplotlib': test_for('matplotlib'),
'pygments': test_for('pygments'),
'sqlite3': test_for('sqlite3')}
#-----------------------------------------------------------------------------
# Test suite definitions
#-----------------------------------------------------------------------------
test_group_names = ['core',
'extensions', 'lib', 'terminal', 'testing', 'utils',
]
class TestSection(object):
def __init__(self, name, includes):
self.name = name
self.includes = includes
self.excludes = []
self.dependencies = []
self.enabled = True
def exclude(self, module):
if not module.startswith('IPython'):
module = self.includes[0] + "." + module
self.excludes.append(module.replace('.', os.sep))
def requires(self, *packages):
self.dependencies.extend(packages)
@property
def will_run(self):
return self.enabled and all(have[p] for p in self.dependencies)
# Name -> (include, exclude, dependencies_met)
test_sections = {n:TestSection(n, ['IPython.%s' % n]) for n in test_group_names}
# Exclusions and dependencies
# ---------------------------
# core:
sec = test_sections['core']
if not have['sqlite3']:
sec.exclude('tests.test_history')
sec.exclude('history')
if not have['matplotlib']:
sec.exclude('pylabtools'),
sec.exclude('tests.test_pylabtools')
# lib:
sec = test_sections['lib']
sec.exclude('kernel')
if not have['pygments']:
sec.exclude('tests.test_lexers')
# We do this unconditionally, so that the test suite doesn't import
# gtk, changing the default encoding and masking some unicode bugs.
sec.exclude('inputhookgtk')
# We also do this unconditionally, because wx can interfere with Unix signals.
# There are currently no tests for it anyway.
sec.exclude('inputhookwx')
# Testing inputhook will need a lot of thought, to figure out
# how to have tests that don't lock up with the gui event
# loops in the picture
sec.exclude('inputhook')
# testing:
sec = test_sections['testing']
# These have to be skipped on win32 because they use echo, rm, cd, etc.
# See ticket https://github.com/ipython/ipython/issues/87
if sys.platform == 'win32':
sec.exclude('plugin.test_exampleip')
sec.exclude('plugin.dtexample')
# don't run jupyter_console tests found via shim
test_sections['terminal'].exclude('console')
# extensions:
sec = test_sections['extensions']
# This is deprecated in favour of rpy2
sec.exclude('rmagic')
# autoreload does some strange stuff, so move it to its own test section
sec.exclude('autoreload')
sec.exclude('tests.test_autoreload')
test_sections['autoreload'] = TestSection('autoreload',
['IPython.extensions.autoreload', 'IPython.extensions.tests.test_autoreload'])
test_group_names.append('autoreload')
#-----------------------------------------------------------------------------
# Functions and classes
#-----------------------------------------------------------------------------
def check_exclusions_exist():
from IPython.paths import get_ipython_package_dir
from warnings import warn
parent = os.path.dirname(get_ipython_package_dir())
for sec in test_sections:
for pattern in sec.exclusions:
fullpath = pjoin(parent, pattern)
if not os.path.exists(fullpath) and not glob.glob(fullpath + '.*'):
warn("Excluding nonexistent file: %r" % pattern)
class ExclusionPlugin(Plugin):
"""A nose plugin to effect our exclusions of files and directories.
"""
name = 'exclusions'
score = 3000 # Should come before any other plugins
def __init__(self, exclude_patterns=None):
"""
Parameters
----------
exclude_patterns : sequence of strings, optional
Filenames containing these patterns (as raw strings, not as regular
expressions) are excluded from the tests.
"""
self.exclude_patterns = exclude_patterns or []
super(ExclusionPlugin, self).__init__()
def options(self, parser, env=os.environ):
Plugin.options(self, parser, env)
def configure(self, options, config):
Plugin.configure(self, options, config)
# Override nose trying to disable plugin.
self.enabled = True
def wantFile(self, filename):
"""Return whether the given filename should be scanned for tests.
"""
if any(pat in filename for pat in self.exclude_patterns):
return False
return None
def wantDirectory(self, directory):
"""Return whether the given directory should be scanned for tests.
"""
if any(pat in directory for pat in self.exclude_patterns):
return False
return None
class StreamCapturer(Thread):
daemon = True # Don't hang if main thread crashes
started = False
def __init__(self, echo=False):
super(StreamCapturer, self).__init__()
self.echo = echo
self.streams = []
self.buffer = BytesIO()
self.readfd, self.writefd = os.pipe()
self.buffer_lock = Lock()
self.stop = Event()
def run(self):
self.started = True
while not self.stop.is_set():
chunk = os.read(self.readfd, 1024)
with self.buffer_lock:
self.buffer.write(chunk)
if self.echo:
sys.stdout.write(decode(chunk))
os.close(self.readfd)
os.close(self.writefd)
def reset_buffer(self):
with self.buffer_lock:
self.buffer.truncate(0)
self.buffer.seek(0)
def get_buffer(self):
with self.buffer_lock:
return self.buffer.getvalue()
def ensure_started(self):
if not self.started:
self.start()
def halt(self):
"""Safely stop the thread."""
if not self.started:
return
self.stop.set()
os.write(self.writefd, b'\0') # Ensure we're not locked in a read()
self.join()
class SubprocessStreamCapturePlugin(Plugin):
name='subprocstreams'
def __init__(self):
Plugin.__init__(self)
self.stream_capturer = StreamCapturer()
self.destination = os.environ.get('IPTEST_SUBPROC_STREAMS', 'capture')
# This is ugly, but distant parts of the test machinery need to be able
# to redirect streams, so we make the object globally accessible.
nose.iptest_stdstreams_fileno = self.get_write_fileno
def get_write_fileno(self):
if self.destination == 'capture':
self.stream_capturer.ensure_started()
return self.stream_capturer.writefd
elif self.destination == 'discard':
return os.open(os.devnull, os.O_WRONLY)
else:
return sys.__stdout__.fileno()
def configure(self, options, config):
Plugin.configure(self, options, config)
# Override nose trying to disable plugin.
if self.destination == 'capture':
self.enabled = True
def startTest(self, test):
# Reset log capture
self.stream_capturer.reset_buffer()
def formatFailure(self, test, err):
# Show output
ec, ev, tb = err
captured = self.stream_capturer.get_buffer().decode('utf-8', 'replace')
if captured.strip():
ev = safe_str(ev)
out = [ev, '>> begin captured subprocess output <<',
captured,
'>> end captured subprocess output <<']
return ec, '\n'.join(out), tb
return err
formatError = formatFailure
def finalize(self, result):
self.stream_capturer.halt()
def run_iptest():
"""Run the IPython test suite using nose.
This function is called when this script is **not** called with the form
`iptest all`. It simply calls nose with appropriate command line flags
and accepts all of the standard nose arguments.
"""
# Apply our monkeypatch to Xunit
if '--with-xunit' in sys.argv and not hasattr(Xunit, 'orig_addError'):
monkeypatch_xunit()
arg1 = sys.argv[1]
if arg1 in test_sections:
section = test_sections[arg1]
sys.argv[1:2] = section.includes
elif arg1.startswith('IPython.') and arg1[8:] in test_sections:
section = test_sections[arg1[8:]]
sys.argv[1:2] = section.includes
else:
section = TestSection(arg1, includes=[arg1])
argv = sys.argv + [ '--detailed-errors', # extra info in tracebacks
# We add --exe because of setuptools' imbecility (it
# blindly does chmod +x on ALL files). Nose does the
# right thing and it tries to avoid executables,
# setuptools unfortunately forces our hand here. This
# has been discussed on the distutils list and the
# setuptools devs refuse to fix this problem!
'--exe',
]
if '-a' not in argv and '-A' not in argv:
argv = argv + ['-a', '!crash']
if nose.__version__ >= '0.11':
# I don't fully understand why we need this one, but depending on what
# directory the test suite is run from, if we don't give it, 0 tests
# get run. Specifically, if the test suite is run from the source dir
# with an argument (like 'iptest.py IPython.core', 0 tests are run,
# even if the same call done in this directory works fine). It appears
# that if the requested package is in the current dir, nose bails early
# by default. Since it's otherwise harmless, leave it in by default
# for nose >= 0.11, though unfortunately nose 0.10 doesn't support it.
argv.append('--traverse-namespace')
plugins = [ ExclusionPlugin(section.excludes), KnownFailure(),
SubprocessStreamCapturePlugin() ]
# we still have some vestigial doctests in core
if (section.name.startswith(('core', 'IPython.core', 'IPython.utils'))):
plugins.append(IPythonDoctest())
argv.extend([
'--with-ipdoctest',
'--ipdoctest-tests',
'--ipdoctest-extension=txt',
])
# Use working directory set by parent process (see iptestcontroller)
if 'IPTEST_WORKING_DIR' in os.environ:
os.chdir(os.environ['IPTEST_WORKING_DIR'])
# We need a global ipython running in this process, but the special
# in-process group spawns its own IPython kernels, so for *that* group we
# must avoid also opening the global one (otherwise there's a conflict of
# singletons). Ultimately the solution to this problem is to refactor our
# assumptions about what needs to be a singleton and what doesn't (app
# objects should, individual shells shouldn't). But for now, this
# workaround allows the test suite for the inprocess module to complete.
if 'kernel.inprocess' not in section.name:
from IPython.testing import globalipapp
globalipapp.start_ipython()
# Now nose can run
TestProgram(argv=argv, addplugins=plugins)
if __name__ == '__main__':
run_iptest()
|
py | 1a302bfb759b8043e0cff31548f78d641a922257 | #DateTimeExample1.py
from datetime import *
#Will print current date and time
print("Current Date Time : ",datetime.now())
|
py | 1a302d3e780cbfdb4a4eee3fee3214deb23994e4 | import os
from io import StringIO
from django.contrib.gis.geos import Point
from django.test import TestCase
from uk_geo_utils.models import Onspd
from uk_geo_utils.management.commands.import_onspd import Command
class OnsudImportTest(TestCase):
def test_import_onspd(self):
# check table is empty before we start
self.assertEqual(0, Onspd.objects.count())
# path to file we're going to import
csv_path = os.path.abspath(
os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'../fixtures/onspd'
)
)
cmd = Command()
# supress output
out = StringIO()
cmd.stdout = out
# import data
opts = {
'path': csv_path,
}
cmd.handle(**opts)
# ensure all our tasty data has been imported
self.assertEqual(4, Onspd.objects.count())
# row with valid grid ref should have valid Point() location
al11aa = Onspd.objects.filter(pcds="AL1 1AA")[0]
self.assertEqual(Point(-0.341337, 51.749084, srid=4326), al11aa.location)
# row with invalid grid ref should have NULL location
im11aa = Onspd.objects.filter(pcds="IM1 1AA")[0]
self.assertIsNone(im11aa.location)
|
py | 1a302da4c6f30789d8399841111880c8b91a2e66 | #!/usr/bin/env python
# Copyright 2019 Xilinx Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import json
import argparse
import shutil
arg_parser = argparse.ArgumentParser(description="This is a script to convert coco anntations to voc-like annotations.")
arg_parser.add_argument('-ti', '--train_images', type=str, default="./coco2014/train2014", help='where to put coco2014 train images.')
arg_parser.add_argument('-vi', '--val_images', type=str, default='./coco2014/val2014', help='where to put coco2014 val images.')
arg_parser.add_argument('-ta', '--train_anno', type=str, default='./coco2014/instances_train2014.json', help='where to put cooc2014 train set annotations.')
arg_parser.add_argument('-va', '--val_anno', type=str, default='./coco2014/instances_val2014.json', help='where to put coco2014 val set annotations')
arg_parser.add_argument('-tlf', '--tran_list_file', type=str, default='./coco2014/train2014.txt', help='image list for training')
arg_parser.add_argument('-vlf', '--val_list_file', type=str, default='./coco2014/val2014.txt', help='image list for evalution.')
arg_parser.add_argument('-ai', '--all_images', type=str, default='./coco2014/Images', help='where to put all images.')
arg_parser.add_argument('-aa', '--all_anno', type=str, default='./coco2014/Annotations', help='where to put all annotations.')
args = arg_parser.parse_args()
'''How to organize coco dataset folder:
inputs:
coco2014/
|->train2014/
|->val2014/
|->instances_train2014.json
|->instances_val2014.json
outputs:
coco2014/
|->Annotations/
|->Images/
|->train2014.txt
|->val2014.txt
'''
def convert_images_coco2voc(args):
assert os.path.exists(args.train_images)
assert os.path.exists(args.val_images)
os.system('mv ' + args.train_images + ' ' + args.all_images)
imagename_list = os.listdir(args.val_images)
for imagename in imagename_list:
shutil.copy(os.path.join(args.val_images, imagename), args.all_images)
os.system('rm -r ' + args.val_images)
def generate_cid_name(json_object):
id2name_dict = {}
for ind, category_info in enumerate(json_object['categories']):
id2name_dict[category_info['id']] = category_info['name']
return id2name_dict
def generate_image_dict(json_object):
id2image_dict = {}
for ind, image_info in enumerate(json_object['images']):
id2image_dict[image_info['id']] = image_info['file_name']
return id2image_dict
def generate_annotation_files(json_object, annotation_path, id2image_dict, id2name, image_list_file):
if not os.path.exists(annotation_path):
os.mkdir(annotation_path)
f_image = open(image_list_file, 'w')
all_images_name = []
for ind, anno_info in enumerate(json_object['annotations']):
print('preprocess: {}'.format(ind))
category_id = anno_info['category_id']
cls_name = id2name[category_id]
if cls_name != "person":
continue
image_id = anno_info['image_id']
image_name = id2image_dict[image_id]
bbox = anno_info['bbox']
bbox[2] = bbox[0] + bbox[2]
bbox[3] = bbox[3] + bbox[1]
bbox_str = ' '.join([str(int(x)) for x in bbox])
with open(os.path.join(annotation_path, image_name.split('.')[0] + '.txt'), 'a') as f_anno:
f_anno.writelines(image_name.split('.')[0] + " " + cls_name + " " + bbox_str + "\n")
if image_name not in all_images_name:
all_images_name.append(image_name)
for image_name in all_images_name:
f_image.writelines(image_name.split('.')[0] + "\n")
f_image.close()
def convert_anno_coco2voc(coco_anno_file, image_list_file, all_anno_path):
with open(coco_anno_file, 'r') as f_ann:
line = f_ann.readlines()
json_object = json.loads(line[0])
id2name = generate_cid_name(json_object)
id2image_dict = generate_image_dict(json_object)
generate_annotation_files(json_object, all_anno_path, id2image_dict, id2name, image_list_file)
def convert_anno_all(args):
convert_anno_coco2voc(args.train_anno, args.tran_list_file, args.all_anno)
convert_anno_coco2voc(args.val_anno, args.val_list_file, args.all_anno)
if __name__ == "__main__":
convert_anno_all(args)
convert_images_coco2voc(args)
|
py | 1a302defb66f0280aa78784e4c2a5f022f25518d | #!/usr/bin/env python
"""
models for the mailroom program.
This is where the program logic is.
This version has been made Object Oriented.
"""
# handy utility to make pretty printing easier
from textwrap import dedent
from pathlib import Path
import json_save.json_save_dec as js
import json
from . import data_dir
@js.json_save
class Donor:
"""
class to hold the information about a single donor
"""
name = js.String()
donations = js.List()
# reference to the DB its in -- this will be set in the instance
# when added to the DonorDB
_donor_db = None
def __init__(self, name, donations=None):
"""
create a new Donor object
:param name: the full name of the donor
:param donations=None: iterable of past donations
"""
self.norm_name = self.normalize_name(name)
self.name = name.strip()
if donations is None:
self.donations = []
else:
self.donations = list(donations)
def __str__(self):
msg = (f"Donor: {self.name}, with {self.num_donations:d} "
f"donations, totaling: ${self.total_donations:.2f}")
return msg
def mutating(method):
"""
Decorator that saves the DB when a change is made
It should be applied to all mutating methods, so the
data will be saved whenever it's been changed.
NOTE: This requires that the donor object is in a DonorDB.
"""
# note that this is expecting to decorate a method
# so self will be the first argument
def wrapped(self, *args, **kwargs):
print("wrapped method called")
print(self)
print(self._donor_db)
res = method(self, *args, **kwargs)
if self._donor_db is not None:
self._donor_db.save()
return res
return wrapped
@staticmethod
def normalize_name(name):
"""
return a normalized version of a name to use as a comparison key
simple enough to not be in a method now, but maybe you'd want to make it fancier later.
"""
return name.lower().strip()
@property
def last_donation(self):
"""
The most recent donation made
"""
try:
return self.donations[-1]
except IndexError:
return None
@property
def total_donations(self):
return sum(self.donations)
@property
def num_donations(self):
return len(self.donations)
@property
def average_donation(self):
return self.total_donations / self.num_donations
@mutating
def add_donation(self, amount):
"""
add a new donation
"""
print("add_donation called")
amount = float(amount)
if amount <= 0.0:
raise ValueError("Donation must be greater than zero")
self.donations.append(amount)
def gen_letter(self):
"""
Generate a thank you letter for the donor
:param: donor tuple
:returns: string with letter
note: This doesn't actually write to a file -- that's a separate
function. This makes it more flexible and easier to test.
"""
return dedent('''Dear {0:s},
Thank you for your very kind donation of ${1:.2f}.
It will be put to very good use.
Sincerely,
-The Team
'''.format(self.name, self.last_donation)
)
@js.json_save
class DonorDB:
"""
Encapsulation of the entire database of donors and data associated with them.
"""
# specify a json_save dict as the data structure for the data.
donor_data = js.Dict()
_frozen = False
def __init__(self, donors=None, db_file=None):
"""
Initialize a new donor database
:param donors=None: iterable of Donor objects
:param db_file=None: path to file to store the datbase in.
if None, the data will be stored in the
package data_dir
"""
if db_file is None:
self.db_file = data_dir / "mailroom_data.json"
else:
self.db_file = Path(db_file)
self.donor_data = {}
if donors is not None:
# you can set _frozen so that it won't save on every change.
self._frozen = True
for d in donors:
self.add_donor(d)
self.save # save resets _frozen
def mutating(method):
"""
Decorator that saves the DB when a change is made
It should be applied to all mutating methods, so the
data will be saved whenever it's been changed.
NOTE: This is not very efficient -- it will re-write
the entire file each time.
"""
# note that this is expecting to decorate a method
# so self will be the first argument
def wrapped(self, *args, **kwargs):
res = method(self, *args, **kwargs)
if not self._frozen:
self.save()
return res
return wrapped
@classmethod
def load_from_file(cls, filename):
"""
loads a donor database from a raw json file
NOTE: This is not a json_save format file!
-- it is a simpler, less flexible format.
"""
with open(filename) as infile:
donors = json.load(infile)
db = cls([Donor(*d) for d in donors])
return db
@classmethod
def load(cls, filepath):
"""
loads a donor database from a json_save format file.
"""
with open(filepath) as jsfile:
db = js.from_json(jsfile)
db.db_file = filepath
def save(self):
"""
Save the data to a json_save file
"""
# if explicitly called, you want to do it!
self._frozen = False
with open(self.db_file, 'w') as db_file:
self.to_json(db_file)
@property
def donors(self):
"""
an iterable of all the donors
"""
return self.donor_data.values()
def list_donors(self):
"""
creates a list of the donors as a string, so they can be printed
Not calling print from here makes it more flexible and easier to
test
"""
listing = ["Donor list:"]
for donor in self.donors:
listing.append(donor.name)
return "\n".join(listing)
def find_donor(self, name):
"""
find a donor in the donor db
:param: the name of the donor
:returns: The donor data structure -- None if not in the self.donor_data
"""
return self.donor_data.get(Donor.normalize_name(name))
@mutating
def add_donor(self, donor):
"""
Add a new donor to the donor db
:param donor: A Donor instance, or the name of the donor
:returns: The new or existing Donor object
"""
if not isinstance(donor, Donor):
donor = Donor(donor)
self.donor_data[donor.norm_name] = donor
donor._donor_db = self
return donor
@staticmethod
def sort_key(item):
# used to sort on name in self.donor_data
return item[1]
def generate_donor_report(self):
"""
Generate the report of the donors and amounts donated.
:returns: the donor report as a string.
"""
# First, reduce the raw data into a summary list view
report_rows = []
for donor in self.donor_data.values():
name = donor.name
gifts = donor.donations
total_gifts = donor.total_donations
num_gifts = len(gifts)
avg_gift = donor.average_donation
report_rows.append((name, total_gifts, num_gifts, avg_gift))
# sort the report data
report_rows.sort(key=self.sort_key)
report = []
report.append("{:25s} | {:11s} | {:9s} | {:12s}".format("Donor Name",
"Total Given",
"Num Gifts",
"Average Gift"))
report.append("-" * 66)
for row in report_rows:
report.append("{:25s} ${:10.2f} {:9d} ${:11.2f}".format(*row))
return "\n".join(report)
def save_letters_to_disk(self):
"""
make a letter for each donor, and save it to disk.
"""
print("Saving letters:")
for donor in self.donor_data.values():
print("donor:", donor.name)
letter = donor.gen_letter()
# I don't like spaces in filenames...
filename = donor.name.replace(" ", "_") + ".txt"
open(filename, 'w').write(letter)
|
py | 1a302e27e1fa1d06835c690f86b03d4b5266920b | from toga import Key
from toga_cocoa.libs import (
NSEventModifierFlagCapsLock,
NSEventModifierFlagShift,
NSEventModifierFlagControl,
NSEventModifierFlagOption,
NSEventModifierFlagCommand,
)
######################################################################
# Utilities to convert Cocoa constants to Toga ones
######################################################################
def modified_key(key, shift=None):
def mod_fn(modifierFlags):
if modifierFlags & NSEventModifierFlagShift:
return shift
return key
return mod_fn
def toga_key(event):
"""Convert a Cocoa NSKeyEvent into a Toga event."""
key = {
0: Key.A,
1: Key.S,
2: Key.D,
3: Key.F,
4: Key.H,
5: Key.G,
6: Key.Z,
7: Key.X,
8: Key.C,
9: Key.V,
11: Key.B,
12: Key.Q,
13: Key.W,
14: Key.E,
15: Key.R,
16: Key.Y,
17: Key.T,
18: modified_key(Key._1, shift=Key.EXCLAMATION)(event.modifierFlags),
19: modified_key(Key._2, shift=Key.AT)(event.modifierFlags),
20: modified_key(Key._3, shift=Key.HASH)(event.modifierFlags),
21: modified_key(Key._4, shift=Key.DOLLAR)(event.modifierFlags),
22: modified_key(Key._6, shift=Key.CARET)(event.modifierFlags),
23: modified_key(Key._5, shift=Key.PERCENT)(event.modifierFlags),
24: modified_key(Key.PLUS, shift=Key.EQUAL)(event.modifierFlags),
25: modified_key(Key._9, shift=Key.OPEN_PARENTHESIS)(event.modifierFlags),
26: modified_key(Key._7, shift=Key.AND)(event.modifierFlags),
27: modified_key(Key.MINUS, shift=Key.UNDERSCORE)(event.modifierFlags),
28: modified_key(Key._8, shift=Key.ASTERISK)(event.modifierFlags),
29: modified_key(Key._0, shift=Key.CLOSE_PARENTHESIS)(event.modifierFlags),
30: Key.CLOSE_BRACKET,
31: Key.O,
32: Key.U,
33: Key.OPEN_BRACKET,
34: Key.I,
35: Key.P,
36: Key.ENTER,
37: Key.L,
38: Key.J,
39: modified_key(Key.QUOTE, shift=Key.DOUBLE_QUOTE)(event.modifierFlags),
40: Key.K,
41: modified_key(Key.COLON, shift=Key.SEMICOLON)(event.modifierFlags),
42: Key.BACKSLASH,
43: modified_key(Key.COMMA, shift=Key.LESS_THAN)(event.modifierFlags),
44: modified_key(Key.SLASH, shift=Key.QUESTION)(event.modifierFlags),
45: Key.N,
46: Key.M,
47: modified_key(Key.FULL_STOP, shift=Key.GREATER_THAN)(event.modifierFlags),
48: Key.TAB,
49: Key.SPACE,
50: modified_key(Key.BACK_QUOTE, shift=Key.TILDE)(event.modifierFlags),
51: Key.BACKSPACE,
53: Key.ESCAPE,
65: Key.NUMPAD_DECIMAL_POINT,
67: Key.NUMPAD_MULTIPLY,
69: Key.NUMPAD_PLUS,
71: Key.NUMPAD_CLEAR,
75: Key.NUMPAD_DIVIDE,
76: Key.NUMPAD_ENTER,
78: Key.NUMPAD_MINUS,
81: Key.NUMPAD_EQUAL,
82: Key.NUMPAD_0,
83: Key.NUMPAD_1,
84: Key.NUMPAD_2,
85: Key.NUMPAD_3,
86: Key.NUMPAD_4,
87: Key.NUMPAD_5,
88: Key.NUMPAD_6,
89: Key.NUMPAD_7,
91: Key.NUMPAD_8,
92: Key.NUMPAD_9,
# : Key.F4,
96: Key.F5,
97: Key.F7,
98: Key.F5,
99: Key.F3,
100: Key.F8,
101: Key.F9,
109: Key.F9,
115: Key.HOME,
116: Key.PAGE_UP,
117: Key.DELETE,
119: Key.END,
120: Key.F2,
121: Key.PAGE_DOWN,
122: Key.F1,
123: Key.LEFT,
124: Key.RIGHT,
125: Key.DOWN,
126: Key.UP,
}.get(event.keyCode, None)
modifiers = set()
if event.modifierFlags & NSEventModifierFlagCapsLock:
modifiers.add(Key.CAPSLOCK)
if event.modifierFlags & NSEventModifierFlagShift:
modifiers.add(Key.SHIFT)
if event.modifierFlags & NSEventModifierFlagControl:
modifiers.add(Key.CONTROL)
if event.modifierFlags & NSEventModifierFlagOption:
modifiers.add(Key.OPTION)
if event.modifierFlags & NSEventModifierFlagCommand:
modifiers.add(Key.COMMAND)
return {
'key': key,
'modifiers': modifiers
}
|
py | 1a302ea2ccac42f1bd5bfe99e4fba91d1cf256e8 | # All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import typing
# Use consistent types for marshal and unmarshal functions across
# both JSON and Binary format.
MarshallerType = typing.Optional[
typing.Callable[[typing.Any], typing.Union[bytes, str]]
]
UnmarshallerType = typing.Optional[
typing.Callable[[typing.Union[bytes, str]], typing.Any]
]
|
py | 1a302ec1ac0b39ce9c344f0781229746f43ec413 | from typing import Any, Callable, Dict, Optional
from unittest import mock
import orjson
from django.test import override_settings
from django.utils.html import escape
from requests.exceptions import ConnectionError
from zerver.lib.actions import queue_json_publish
from zerver.lib.cache import NotFoundInCache, cache_set, preview_url_cache_key
from zerver.lib.test_classes import ZulipTestCase
from zerver.lib.test_helpers import MockPythonResponse, mock_queue_publish
from zerver.lib.url_preview.oembed import get_oembed_data, strip_cdata
from zerver.lib.url_preview.parsers import GenericParser, OpenGraphParser
from zerver.lib.url_preview.preview import get_link_embed_data, link_embed_data_from_cache
from zerver.models import Message, Realm, UserProfile
from zerver.worker.queue_processors import FetchLinksEmbedData
TEST_CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'default',
},
'database': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'url-preview',
},
'in-memory': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'url-preview',
},
}
@override_settings(INLINE_URL_EMBED_PREVIEW=True)
class OembedTestCase(ZulipTestCase):
@mock.patch('pyoembed.requests.get')
def test_present_provider(self, get: Any) -> None:
get.return_value = response = mock.Mock()
response.headers = {'content-type': 'application/json'}
response.ok = True
response_data = {
'type': 'rich',
'thumbnail_url': 'https://scontent.cdninstagram.com/t51.2885-15/n.jpg',
'thumbnail_width': 640,
'thumbnail_height': 426,
'title': 'NASA',
'html': '<p>test</p>',
'version': '1.0',
'width': 658,
'height': 400}
response.text = orjson.dumps(response_data).decode()
url = 'http://instagram.com/p/BLtI2WdAymy'
data = get_oembed_data(url)
self.assertIsInstance(data, dict)
self.assertIn('title', data)
assert data is not None # allow mypy to infer data is indexable
self.assertEqual(data['title'], response_data['title'])
@mock.patch('pyoembed.requests.get')
def test_photo_provider(self, get: Any) -> None:
get.return_value = response = mock.Mock()
response.headers = {'content-type': 'application/json'}
response.ok = True
response_data = {
'type': 'photo',
'thumbnail_url': 'https://scontent.cdninstagram.com/t51.2885-15/n.jpg',
'url': 'https://scontent.cdninstagram.com/t51.2885-15/n.jpg',
'thumbnail_width': 640,
'thumbnail_height': 426,
'title': 'NASA',
'html': '<p>test</p>',
'version': '1.0',
'width': 658,
'height': 400}
response.text = orjson.dumps(response_data).decode()
url = 'http://imgur.com/photo/158727223'
data = get_oembed_data(url)
self.assertIsInstance(data, dict)
self.assertIn('title', data)
assert data is not None # allow mypy to infer data is indexable
self.assertEqual(data['title'], response_data['title'])
self.assertTrue(data['oembed'])
@mock.patch('pyoembed.requests.get')
def test_video_provider(self, get: Any) -> None:
get.return_value = response = mock.Mock()
response.headers = {'content-type': 'application/json'}
response.ok = True
response_data = {
'type': 'video',
'thumbnail_url': 'https://scontent.cdninstagram.com/t51.2885-15/n.jpg',
'thumbnail_width': 640,
'thumbnail_height': 426,
'title': 'NASA',
'html': '<p>test</p>',
'version': '1.0',
'width': 658,
'height': 400}
response.text = orjson.dumps(response_data).decode()
url = 'http://blip.tv/video/158727223'
data = get_oembed_data(url)
self.assertIsInstance(data, dict)
self.assertIn('title', data)
assert data is not None # allow mypy to infer data is indexable
self.assertEqual(data['title'], response_data['title'])
@mock.patch('pyoembed.requests.get')
def test_error_request(self, get: Any) -> None:
get.return_value = response = mock.Mock()
response.ok = False
url = 'http://instagram.com/p/BLtI2WdAymy'
data = get_oembed_data(url)
self.assertIsNone(data)
@mock.patch('pyoembed.requests.get')
def test_invalid_json_in_response(self, get: Any) -> None:
get.return_value = response = mock.Mock()
response.headers = {'content-type': 'application/json'}
response.ok = True
response.text = '{invalid json}'
url = 'http://instagram.com/p/BLtI2WdAymy'
data = get_oembed_data(url)
self.assertIsNone(data)
def test_oembed_html(self) -> None:
html = '<iframe src="//www.instagram.com/embed.js"></iframe>'
stripped_html = strip_cdata(html)
self.assertEqual(html, stripped_html)
def test_autodiscovered_oembed_xml_format_html(self) -> None:
iframe_content = '<iframe src="https://w.soundcloud.com/player"></iframe>'
html = f'<![CDATA[{iframe_content}]]>'
stripped_html = strip_cdata(html)
self.assertEqual(iframe_content, stripped_html)
class OpenGraphParserTestCase(ZulipTestCase):
def test_page_with_og(self) -> None:
html = """<html>
<head>
<meta property="og:title" content="The Rock" />
<meta property="og:type" content="video.movie" />
<meta property="og:url" content="http://www.imdb.com/title/tt0117500/" />
<meta property="og:image" content="http://ia.media-imdb.com/images/rock.jpg" />
<meta property="og:description" content="The Rock film" />
</head>
</html>"""
parser = OpenGraphParser(html)
result = parser.extract_data()
self.assertIn('title', result)
self.assertEqual(result['title'], 'The Rock')
self.assertEqual(result.get('description'), 'The Rock film')
def test_page_with_evil_og_tags(self) -> None:
html = """<html>
<head>
<meta property="og:title" content="The Rock" />
<meta property="og:type" content="video.movie" />
<meta property="og:url" content="http://www.imdb.com/title/tt0117500/" />
<meta property="og:image" content="http://ia.media-imdb.com/images/rock.jpg" />
<meta property="og:description" content="The Rock film" />
<meta property="og:html" content="<script>alert(window.location)</script>" />
<meta property="og:oembed" content="True" />
</head>
</html>"""
parser = OpenGraphParser(html)
result = parser.extract_data()
self.assertIn('title', result)
self.assertEqual(result['title'], 'The Rock')
self.assertEqual(result.get('description'), 'The Rock film')
self.assertEqual(result.get('oembed'), None)
self.assertEqual(result.get('html'), None)
class GenericParserTestCase(ZulipTestCase):
def test_parser(self) -> None:
html = """
<html>
<head><title>Test title</title></head>
<body>
<h1>Main header</h1>
<p>Description text</p>
</body>
</html>
"""
parser = GenericParser(html)
result = parser.extract_data()
self.assertEqual(result.get('title'), 'Test title')
self.assertEqual(result.get('description'), 'Description text')
def test_extract_image(self) -> None:
html = """
<html>
<body>
<h1>Main header</h1>
<img data-src="Not an image">
<img src="http://test.com/test.jpg">
<div>
<p>Description text</p>
</div>
</body>
</html>
"""
parser = GenericParser(html)
result = parser.extract_data()
self.assertEqual(result.get('title'), 'Main header')
self.assertEqual(result.get('description'), 'Description text')
self.assertEqual(result.get('image'), 'http://test.com/test.jpg')
def test_extract_description(self) -> None:
html = """
<html>
<body>
<div>
<div>
<p>Description text</p>
</div>
</div>
</body>
</html>
"""
parser = GenericParser(html)
result = parser.extract_data()
self.assertEqual(result.get('description'), 'Description text')
html = """
<html>
<head><meta name="description" content="description 123"</head>
<body></body>
</html>
"""
parser = GenericParser(html)
result = parser.extract_data()
self.assertEqual(result.get('description'), 'description 123')
html = "<html><body></body></html>"
parser = GenericParser(html)
result = parser.extract_data()
self.assertIsNone(result.get('description'))
class PreviewTestCase(ZulipTestCase):
open_graph_html = """
<html>
<head>
<title>Test title</title>
<meta property="og:title" content="The Rock" />
<meta property="og:type" content="video.movie" />
<meta property="og:url" content="http://www.imdb.com/title/tt0117500/" />
<meta property="og:image" content="http://ia.media-imdb.com/images/rock.jpg" />
<meta http-equiv="refresh" content="30" />
<meta property="notog:extra-text" content="Extra!" />
</head>
<body>
<h1>Main header</h1>
<p>Description text</p>
</body>
</html>
"""
def setUp(self) -> None:
super().setUp()
Realm.objects.all().update(inline_url_embed_preview=True)
@classmethod
def create_mock_response(cls, url: str, relative_url: bool=False,
headers: Optional[Dict[str, str]]=None,
html: Optional[str]=None) -> Callable[..., MockPythonResponse]:
if html is None:
html = cls.open_graph_html
if relative_url is True:
html = html.replace('http://ia.media-imdb.com', '')
response = MockPythonResponse(html, 200, headers)
return lambda k, **kwargs: {url: response}.get(k, MockPythonResponse('', 404, headers))
@override_settings(INLINE_URL_EMBED_PREVIEW=True)
def test_edit_message_history(self) -> None:
user = self.example_user('hamlet')
self.login_user(user)
msg_id = self.send_stream_message(user, "Scotland",
topic_name="editing", content="original")
url = 'http://test.org/'
mocked_response = mock.Mock(side_effect=self.create_mock_response(url))
with mock_queue_publish('zerver.views.message_edit.queue_json_publish') as patched:
result = self.client_patch("/json/messages/" + str(msg_id), {
'message_id': msg_id, 'content': url,
})
self.assert_json_success(result)
patched.assert_called_once()
queue = patched.call_args[0][0]
self.assertEqual(queue, "embed_links")
event = patched.call_args[0][1]
with self.settings(TEST_SUITE=False, CACHES=TEST_CACHES):
with mock.patch('requests.get', mocked_response), self.assertLogs(level='INFO') as info_logs:
FetchLinksEmbedData().consume(event)
self.assertTrue(
'INFO:root:Time spent on get_link_embed_data for http://test.org/: ' in info_logs.output[0]
)
embedded_link = f'<a href="{url}" title="The Rock">The Rock</a>'
msg = Message.objects.select_related("sender").get(id=msg_id)
self.assertIn(embedded_link, msg.rendered_content)
@override_settings(INLINE_URL_EMBED_PREVIEW=True)
def _send_message_with_test_org_url(self, sender: UserProfile, queue_should_run: bool=True,
relative_url: bool=False) -> Message:
url = 'http://test.org/'
with mock_queue_publish('zerver.lib.actions.queue_json_publish') as patched:
msg_id = self.send_personal_message(
sender,
self.example_user('cordelia'),
content=url,
)
if queue_should_run:
patched.assert_called_once()
queue = patched.call_args[0][0]
self.assertEqual(queue, "embed_links")
event = patched.call_args[0][1]
else:
patched.assert_not_called()
# If we nothing was put in the queue, we don't need to
# run the queue processor or any of the following code
return Message.objects.select_related("sender").get(id=msg_id)
# Verify the initial message doesn't have the embedded links rendered
msg = Message.objects.select_related("sender").get(id=msg_id)
self.assertNotIn(
f'<a href="{url}" title="The Rock">The Rock</a>',
msg.rendered_content)
# Mock the network request result so the test can be fast without Internet
mocked_response = mock.Mock(side_effect=self.create_mock_response(url, relative_url=relative_url))
# Run the queue processor to potentially rerender things
with self.settings(TEST_SUITE=False, CACHES=TEST_CACHES):
with mock.patch('requests.get', mocked_response), self.assertLogs(level='INFO') as info_logs:
FetchLinksEmbedData().consume(event)
self.assertTrue(
'INFO:root:Time spent on get_link_embed_data for http://test.org/: ' in info_logs.output[0]
)
msg = Message.objects.select_related("sender").get(id=msg_id)
return msg
@override_settings(INLINE_URL_EMBED_PREVIEW=True)
def test_message_update_race_condition(self) -> None:
user = self.example_user('hamlet')
self.login_user(user)
original_url = 'http://test.org/'
edited_url = 'http://edited.org/'
with mock_queue_publish('zerver.lib.actions.queue_json_publish') as patched:
msg_id = self.send_stream_message(user, "Scotland",
topic_name="foo", content=original_url)
patched.assert_called_once()
queue = patched.call_args[0][0]
self.assertEqual(queue, "embed_links")
event = patched.call_args[0][1]
def wrapped_queue_json_publish(*args: Any, **kwargs: Any) -> None:
# Mock the network request result so the test can be fast without Internet
mocked_response_original = mock.Mock(side_effect=self.create_mock_response(original_url))
mocked_response_edited = mock.Mock(side_effect=self.create_mock_response(edited_url))
with self.settings(TEST_SUITE=False, CACHES=TEST_CACHES):
with mock.patch('requests.get', mocked_response_original), self.assertLogs(level='INFO') as info_logs:
# Run the queue processor. This will simulate the event for original_url being
# processed after the message has been edited.
FetchLinksEmbedData().consume(event)
self.assertTrue(
'INFO:root:Time spent on get_link_embed_data for http://test.org/: ' in info_logs.output[0]
)
msg = Message.objects.select_related("sender").get(id=msg_id)
# The content of the message has changed since the event for original_url has been created,
# it should not be rendered. Another, up-to-date event will have been sent (edited_url).
self.assertNotIn(f'<a href="{original_url}" title="The Rock">The Rock</a>',
msg.rendered_content)
mocked_response_edited.assert_not_called()
with self.settings(TEST_SUITE=False, CACHES=TEST_CACHES):
with mock.patch('requests.get', mocked_response_edited), self.assertLogs(level='INFO') as info_logs:
# Now proceed with the original queue_json_publish and call the
# up-to-date event for edited_url.
queue_json_publish(*args, **kwargs)
msg = Message.objects.select_related("sender").get(id=msg_id)
self.assertIn(f'<a href="{edited_url}" title="The Rock">The Rock</a>',
msg.rendered_content)
self.assertTrue(
'INFO:root:Time spent on get_link_embed_data for http://edited.org/: ' in info_logs.output[0]
)
with mock_queue_publish('zerver.views.message_edit.queue_json_publish', wraps=wrapped_queue_json_publish):
result = self.client_patch("/json/messages/" + str(msg_id), {
'message_id': msg_id, 'content': edited_url,
})
self.assert_json_success(result)
def test_get_link_embed_data(self) -> None:
url = 'http://test.org/'
embedded_link = f'<a href="{url}" title="The Rock">The Rock</a>'
# When humans send, we should get embedded content.
msg = self._send_message_with_test_org_url(sender=self.example_user('hamlet'))
self.assertIn(embedded_link, msg.rendered_content)
# We don't want embedded content for bots.
msg = self._send_message_with_test_org_url(sender=self.example_user('webhook_bot'),
queue_should_run=False)
self.assertNotIn(embedded_link, msg.rendered_content)
# Try another human to make sure bot failure was due to the
# bot sending the message and not some other reason.
msg = self._send_message_with_test_org_url(sender=self.example_user('prospero'))
self.assertIn(embedded_link, msg.rendered_content)
def test_inline_url_embed_preview(self) -> None:
with_preview = '<p><a href="http://test.org/">http://test.org/</a></p>\n<div class="message_embed"><a class="message_embed_image" href="http://test.org/" style="background-image: url(http://ia.media-imdb.com/images/rock.jpg)"></a><div class="data-container"><div class="message_embed_title"><a href="http://test.org/" title="The Rock">The Rock</a></div><div class="message_embed_description">Description text</div></div></div>'
without_preview = '<p><a href="http://test.org/">http://test.org/</a></p>'
msg = self._send_message_with_test_org_url(sender=self.example_user('hamlet'))
self.assertEqual(msg.rendered_content, with_preview)
realm = msg.get_realm()
setattr(realm, 'inline_url_embed_preview', False)
realm.save()
msg = self._send_message_with_test_org_url(sender=self.example_user('prospero'), queue_should_run=False)
self.assertEqual(msg.rendered_content, without_preview)
@override_settings(INLINE_URL_EMBED_PREVIEW=True)
def test_inline_relative_url_embed_preview(self) -> None:
# Relative URLs should not be sent for URL preview.
with mock_queue_publish('zerver.lib.actions.queue_json_publish') as patched:
self.send_personal_message(
self.example_user('prospero'),
self.example_user('cordelia'),
content="http://zulip.testserver/api/",
)
patched.assert_not_called()
def test_inline_url_embed_preview_with_relative_image_url(self) -> None:
with_preview_relative = '<p><a href="http://test.org/">http://test.org/</a></p>\n<div class="message_embed"><a class="message_embed_image" href="http://test.org/" style="background-image: url(http://test.org/images/rock.jpg)"></a><div class="data-container"><div class="message_embed_title"><a href="http://test.org/" title="The Rock">The Rock</a></div><div class="message_embed_description">Description text</div></div></div>'
# Try case where the Open Graph image is a relative URL.
msg = self._send_message_with_test_org_url(sender=self.example_user('prospero'), relative_url=True)
self.assertEqual(msg.rendered_content, with_preview_relative)
def test_http_error_get_data(self) -> None:
url = 'http://test.org/'
msg_id = self.send_personal_message(
self.example_user('hamlet'),
self.example_user('cordelia'),
content=url,
)
msg = Message.objects.select_related("sender").get(id=msg_id)
event = {
'message_id': msg_id,
'urls': [url],
'message_realm_id': msg.sender.realm_id,
'message_content': url}
with self.settings(INLINE_URL_EMBED_PREVIEW=True, TEST_SUITE=False, CACHES=TEST_CACHES):
with mock.patch('requests.get', mock.Mock(side_effect=ConnectionError())), \
self.assertLogs(level='INFO') as info_logs:
FetchLinksEmbedData().consume(event)
self.assertTrue(
'INFO:root:Time spent on get_link_embed_data for http://test.org/: ' in info_logs.output[0]
)
msg = Message.objects.get(id=msg_id)
self.assertEqual(
'<p><a href="http://test.org/">http://test.org/</a></p>',
msg.rendered_content)
def test_invalid_link(self) -> None:
with self.settings(INLINE_URL_EMBED_PREVIEW=True, TEST_SUITE=False, CACHES=TEST_CACHES):
self.assertIsNone(get_link_embed_data('com.notvalidlink'))
self.assertIsNone(get_link_embed_data('μένει.com.notvalidlink'))
def test_link_embed_data_from_cache(self) -> None:
url = 'http://test.org/'
link_embed_data = 'test data'
with self.assertRaises(NotFoundInCache):
link_embed_data_from_cache(url)
with self.settings(CACHES=TEST_CACHES):
key = preview_url_cache_key(url)
cache_set(key, link_embed_data, 'database')
self.assertEqual(link_embed_data, link_embed_data_from_cache(url))
@override_settings(INLINE_URL_EMBED_PREVIEW=True)
def test_link_preview_non_html_data(self) -> None:
user = self.example_user('hamlet')
self.login_user(user)
url = 'http://test.org/audio.mp3'
with mock_queue_publish('zerver.lib.actions.queue_json_publish') as patched:
msg_id = self.send_stream_message(user, "Scotland", topic_name="foo", content=url)
patched.assert_called_once()
queue = patched.call_args[0][0]
self.assertEqual(queue, "embed_links")
event = patched.call_args[0][1]
headers = {'content-type': 'application/octet-stream'}
mocked_response = mock.Mock(side_effect=self.create_mock_response(url, headers=headers))
with self.settings(TEST_SUITE=False, CACHES=TEST_CACHES):
with mock.patch('requests.get', mocked_response), self.assertLogs(level='INFO') as info_logs:
FetchLinksEmbedData().consume(event)
cached_data = link_embed_data_from_cache(url)
self.assertTrue(
'INFO:root:Time spent on get_link_embed_data for http://test.org/audio.mp3: ' in info_logs.output[0]
)
self.assertIsNone(cached_data)
msg = Message.objects.select_related("sender").get(id=msg_id)
self.assertEqual(
('<p><a href="http://test.org/audio.mp3">'
'http://test.org/audio.mp3</a></p>'),
msg.rendered_content)
@override_settings(INLINE_URL_EMBED_PREVIEW=True)
def test_link_preview_no_open_graph_image(self) -> None:
user = self.example_user('hamlet')
self.login_user(user)
url = 'http://test.org/foo.html'
with mock_queue_publish('zerver.lib.actions.queue_json_publish') as patched:
msg_id = self.send_stream_message(user, "Scotland", topic_name="foo", content=url)
patched.assert_called_once()
queue = patched.call_args[0][0]
self.assertEqual(queue, "embed_links")
event = patched.call_args[0][1]
# HTML without the og:image metadata
html = '\n'.join(line for line in self.open_graph_html.splitlines() if 'og:image' not in line)
mocked_response = mock.Mock(side_effect=self.create_mock_response(url, html=html))
with self.settings(TEST_SUITE=False, CACHES=TEST_CACHES):
with mock.patch('requests.get', mocked_response), self.assertLogs(level='INFO') as info_logs:
FetchLinksEmbedData().consume(event)
cached_data = link_embed_data_from_cache(url)
self.assertTrue(
'INFO:root:Time spent on get_link_embed_data for http://test.org/foo.html: ' in info_logs.output[0]
)
self.assertIn('title', cached_data)
self.assertNotIn('image', cached_data)
msg = Message.objects.select_related("sender").get(id=msg_id)
self.assertEqual(
('<p><a href="http://test.org/foo.html">'
'http://test.org/foo.html</a></p>'),
msg.rendered_content)
@override_settings(INLINE_URL_EMBED_PREVIEW=True)
def test_link_preview_open_graph_image_missing_content(self) -> None:
user = self.example_user('hamlet')
self.login_user(user)
url = 'http://test.org/foo.html'
with mock_queue_publish('zerver.lib.actions.queue_json_publish') as patched:
msg_id = self.send_stream_message(user, "Scotland", topic_name="foo", content=url)
patched.assert_called_once()
queue = patched.call_args[0][0]
self.assertEqual(queue, "embed_links")
event = patched.call_args[0][1]
# HTML without the og:image metadata
html = '\n'.join(line if 'og:image' not in line else '<meta property="og:image"/>'
for line in self.open_graph_html.splitlines())
mocked_response = mock.Mock(side_effect=self.create_mock_response(url, html=html))
with self.settings(TEST_SUITE=False, CACHES=TEST_CACHES):
with mock.patch('requests.get', mocked_response), self.assertLogs(level='INFO') as info_logs:
FetchLinksEmbedData().consume(event)
cached_data = link_embed_data_from_cache(url)
self.assertTrue(
'INFO:root:Time spent on get_link_embed_data for http://test.org/foo.html: ' in info_logs.output[0]
)
self.assertIn('title', cached_data)
self.assertNotIn('image', cached_data)
msg = Message.objects.select_related("sender").get(id=msg_id)
self.assertEqual(
('<p><a href="http://test.org/foo.html">'
'http://test.org/foo.html</a></p>'),
msg.rendered_content)
@override_settings(INLINE_URL_EMBED_PREVIEW=True)
def test_link_preview_no_content_type_header(self) -> None:
user = self.example_user('hamlet')
self.login_user(user)
url = 'http://test.org/'
with mock_queue_publish('zerver.lib.actions.queue_json_publish') as patched:
msg_id = self.send_stream_message(user, "Scotland", topic_name="foo", content=url)
patched.assert_called_once()
queue = patched.call_args[0][0]
self.assertEqual(queue, "embed_links")
event = patched.call_args[0][1]
headers = {'content-type': ''} # No content type header
mocked_response = mock.Mock(side_effect=self.create_mock_response(url, headers=headers))
with self.settings(TEST_SUITE=False, CACHES=TEST_CACHES):
with mock.patch('requests.get', mocked_response), self.assertLogs(level='INFO') as info_logs:
FetchLinksEmbedData().consume(event)
data = link_embed_data_from_cache(url)
self.assertTrue(
'INFO:root:Time spent on get_link_embed_data for http://test.org/: ' in info_logs.output[0]
)
self.assertIn('title', data)
self.assertIn('image', data)
msg = Message.objects.select_related("sender").get(id=msg_id)
self.assertIn(data['title'], msg.rendered_content)
self.assertIn(data['image'], msg.rendered_content)
@override_settings(INLINE_URL_EMBED_PREVIEW=True)
def test_valid_content_type_error_get_data(self) -> None:
url = 'http://test.org/'
with mock_queue_publish('zerver.lib.actions.queue_json_publish'):
msg_id = self.send_personal_message(
self.example_user('hamlet'),
self.example_user('cordelia'),
content=url,
)
msg = Message.objects.select_related("sender").get(id=msg_id)
event = {
'message_id': msg_id,
'urls': [url],
'message_realm_id': msg.sender.realm_id,
'message_content': url}
with mock.patch('zerver.lib.url_preview.preview.get_oembed_data', side_effect=lambda *args, **kwargs: None):
with mock.patch('zerver.lib.url_preview.preview.valid_content_type', side_effect=lambda k: True):
with self.settings(TEST_SUITE=False, CACHES=TEST_CACHES):
with mock.patch('requests.get', mock.Mock(side_effect=ConnectionError())), \
self.assertLogs(level='INFO') as info_logs:
FetchLinksEmbedData().consume(event)
self.assertTrue(
'INFO:root:Time spent on get_link_embed_data for http://test.org/: ' in info_logs.output[0]
)
with self.assertRaises(NotFoundInCache):
link_embed_data_from_cache(url)
msg.refresh_from_db()
self.assertEqual(
'<p><a href="http://test.org/">http://test.org/</a></p>',
msg.rendered_content)
@override_settings(INLINE_URL_EMBED_PREVIEW=True)
def test_invalid_url(self) -> None:
url = 'http://test.org/'
error_url = 'http://test.org/x'
with mock_queue_publish('zerver.lib.actions.queue_json_publish'):
msg_id = self.send_personal_message(
self.example_user('hamlet'),
self.example_user('cordelia'),
content=error_url,
)
msg = Message.objects.select_related("sender").get(id=msg_id)
event = {
'message_id': msg_id,
'urls': [error_url],
'message_realm_id': msg.sender.realm_id,
'message_content': error_url}
mocked_response = mock.Mock(side_effect=self.create_mock_response(url))
with self.settings(TEST_SUITE=False, CACHES=TEST_CACHES):
with mock.patch('requests.get', mocked_response), self.assertLogs(level='INFO') as info_logs:
FetchLinksEmbedData().consume(event)
self.assertTrue(
'INFO:root:Time spent on get_link_embed_data for http://test.org/x: ' in info_logs.output[0]
)
cached_data = link_embed_data_from_cache(error_url)
# FIXME: Should we really cache this, especially without cache invalidation?
self.assertIsNone(cached_data)
msg.refresh_from_db()
self.assertEqual(
'<p><a href="http://test.org/x">http://test.org/x</a></p>',
msg.rendered_content)
@override_settings(INLINE_URL_EMBED_PREVIEW=True)
def test_safe_oembed_html_url(self) -> None:
url = 'http://test.org/'
with mock_queue_publish('zerver.lib.actions.queue_json_publish'):
msg_id = self.send_personal_message(
self.example_user('hamlet'),
self.example_user('cordelia'),
content=url,
)
msg = Message.objects.select_related("sender").get(id=msg_id)
event = {
'message_id': msg_id,
'urls': [url],
'message_realm_id': msg.sender.realm_id,
'message_content': url}
mocked_data = {'html': f'<iframe src="{url}"></iframe>',
'oembed': True, 'type': 'video', 'image': f'{url}/image.png'}
mocked_response = mock.Mock(side_effect=self.create_mock_response(url))
with self.settings(TEST_SUITE=False, CACHES=TEST_CACHES):
with mock.patch('requests.get', mocked_response), self.assertLogs(level='INFO') as info_logs:
with mock.patch('zerver.lib.url_preview.preview.get_oembed_data',
lambda *args, **kwargs: mocked_data):
FetchLinksEmbedData().consume(event)
data = link_embed_data_from_cache(url)
self.assertTrue(
'INFO:root:Time spent on get_link_embed_data for http://test.org/: ' in info_logs.output[0]
)
self.assertEqual(data, mocked_data)
msg.refresh_from_db()
self.assertIn('a data-id="{}"'.format(escape(mocked_data['html'])), msg.rendered_content)
@override_settings(INLINE_URL_EMBED_PREVIEW=True)
def test_youtube_url_title_replaces_url(self) -> None:
url = 'https://www.youtube.com/watch?v=eSJTXC7Ixgg'
with mock_queue_publish('zerver.lib.actions.queue_json_publish'):
msg_id = self.send_personal_message(
self.example_user('hamlet'),
self.example_user('cordelia'),
content=url,
)
msg = Message.objects.select_related("sender").get(id=msg_id)
event = {
'message_id': msg_id,
'urls': [url],
'message_realm_id': msg.sender.realm_id,
'message_content': url}
mocked_data = {'title': 'Clearer Code at Scale - Static Types at Zulip and Dropbox'}
mocked_response = mock.Mock(side_effect=self.create_mock_response(url))
with self.settings(TEST_SUITE=False, CACHES=TEST_CACHES):
with mock.patch('requests.get', mocked_response), self.assertLogs(level='INFO') as info_logs:
with mock.patch('zerver.lib.markdown.link_preview.link_embed_data_from_cache',
lambda *args, **kwargs: mocked_data):
FetchLinksEmbedData().consume(event)
self.assertTrue(
'INFO:root:Time spent on get_link_embed_data for https://www.youtube.com/watch?v=eSJTXC7Ixgg:' in info_logs.output[0]
)
msg.refresh_from_db()
expected_content = '<p><a href="https://www.youtube.com/watch?v=eSJTXC7Ixgg">YouTube - Clearer Code at Scale - Static Types at Zulip and Dropbox</a></p>\n<div class="youtube-video message_inline_image"><a data-id="eSJTXC7Ixgg" href="https://www.youtube.com/watch?v=eSJTXC7Ixgg"><img src="https://i.ytimg.com/vi/eSJTXC7Ixgg/default.jpg"></a></div>'
self.assertEqual(expected_content, msg.rendered_content)
@override_settings(INLINE_URL_EMBED_PREVIEW=True)
def test_custom_title_replaces_youtube_url_title(self) -> None:
url = '[YouTube link](https://www.youtube.com/watch?v=eSJTXC7Ixgg)'
with mock_queue_publish('zerver.lib.actions.queue_json_publish'):
msg_id = self.send_personal_message(
self.example_user('hamlet'),
self.example_user('cordelia'),
content=url,
)
msg = Message.objects.select_related("sender").get(id=msg_id)
event = {
'message_id': msg_id,
'urls': [url],
'message_realm_id': msg.sender.realm_id,
'message_content': url}
mocked_data = {'title': 'Clearer Code at Scale - Static Types at Zulip and Dropbox'}
mocked_response = mock.Mock(side_effect=self.create_mock_response(url))
with self.settings(TEST_SUITE=False, CACHES=TEST_CACHES):
with mock.patch('requests.get', mocked_response), self.assertLogs(level='INFO') as info_logs:
with mock.patch('zerver.lib.markdown.link_preview.link_embed_data_from_cache',
lambda *args, **kwargs: mocked_data):
FetchLinksEmbedData().consume(event)
self.assertTrue(
'INFO:root:Time spent on get_link_embed_data for [YouTube link](https://www.youtube.com/watch?v=eSJTXC7Ixgg):' in info_logs.output[0]
)
msg.refresh_from_db()
expected_content = '<p><a href="https://www.youtube.com/watch?v=eSJTXC7Ixgg">YouTube link</a></p>\n<div class="youtube-video message_inline_image"><a data-id="eSJTXC7Ixgg" href="https://www.youtube.com/watch?v=eSJTXC7Ixgg"><img src="https://i.ytimg.com/vi/eSJTXC7Ixgg/default.jpg"></a></div>'
self.assertEqual(expected_content, msg.rendered_content)
|
py | 1a302fc538ed2033f4d04d52bf90988eb52c0149 | import math
from ..utility.static_vars import *
def _constant(parameter, value):
parameter.data.fill_(value)
def _zero(parameter):
_constant(parameter, 0)
def _one(parameter):
_constant(parameter, 1)
def _normal(parameter, mean, std):
parameter.data.normal_(mean, std)
def _uniform(parameter, bound):
parameter.data.uniform_(-bound, bound)
def _kaiming(parameter, fan, a):
_uniform(parameter, math.sqrt(6 / ((1 + a ** 2) * fan)))
def _glorot(parameter):
_uniform(parameter, math.sqrt(6.0 / (parameter.size(-2) + parameter.size(-1))))
@static_vars(fun=None)
def init(parameter, type, *args, **kwargs):
"""
Initialize the given parameters with the specified initialization type
Parameters
----------
parameter : torch.nn.Parameter
the parameter to initialize
type : str
the initialization type. Should be one between:
'constant', 'zero', 'one', 'normal', 'kaiming', 'glorot' (or 'xavier')
args : ...
the arguments of the specified initialization type
kwargs : ...
the keyword arguments of the specified initialization type
Returns
-------
torch.nn.Parameter
the initialized parameter
"""
if not init.fun:
init.fun = {
'constant': _constant,
'zero': _zero,
'one': _one,
'normal': _normal,
'kaiming': _kaiming,
'glorot': _glorot,
'xavier': _glorot,
}
if parameter is not None:
if type not in init.fun:
raise ValueError('Type unknown. Please choose among one of the following: {}'.format('\n'.join(list(init.fun.keys()))))
init.fun[type](parameter, *args, **kwargs)
return parameter
|
py | 1a302ff1179861321b53b352710d282bb3b342bd | # Copyright 2014 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""ml2_vnic_type
Revision ID: 27cc183af192
Revises: 4ca36cfc898c
Create Date: 2014-02-09 12:19:21.362967
"""
# revision identifiers, used by Alembic.
revision = '27cc183af192'
down_revision = '4ca36cfc898c'
from alembic import op
import sqlalchemy as sa
from neutron.db import migration
def upgrade():
if migration.schema_has_table('ml2_port_bindings'):
op.add_column('ml2_port_bindings',
sa.Column('vnic_type', sa.String(length=64),
nullable=False,
server_default='normal'))
|
py | 1a3030fa80ac15146753e094f3ad868bd2a41199 | # Copyright (C) 2010 Google Inc. All rights reserved.
# Copyright (C) 2010 Gabor Rapcsanyi ([email protected]), University of Szeged
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
class TestInput(object):
"""Groups information about a test for easy passing of data."""
def __init__(self, test_name, timeout=None, requires_lock=None, reference_files=None, should_run_pixel_tests=None, should_add_missing_baselines=True):
# TestInput objects are normally constructed by the manager and passed
# to the workers, but these some fields are set lazily in the workers where possible
# because they require us to look at the filesystem and we want to be able to do that in parallel.
self.test_name = test_name
self.timeout = timeout # in msecs; should rename this for consistency
self.requires_lock = requires_lock
self.reference_files = reference_files
self.should_run_pixel_tests = should_run_pixel_tests
self.should_add_missing_baselines = should_add_missing_baselines
def __repr__(self):
return "TestInput('%s', timeout=%s, requires_lock=%s, reference_files=%s, should_run_pixel_tests=%s, should_add_missing_baselines%s)" % (self.test_name, self.timeout, self.requires_lock, self.reference_files, self.should_run_pixel_tests, self.should_add_missing_baselines)
|
py | 1a3031c093c77a2f9fca69a9a31d97ce454786c8 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
__all__ = [
'GetAntiAffinityGroupResult',
'AwaitableGetAntiAffinityGroupResult',
'get_anti_affinity_group',
'get_anti_affinity_group_output',
]
@pulumi.output_type
class GetAntiAffinityGroupResult:
"""
A collection of values returned by getAntiAffinityGroup.
"""
def __init__(__self__, id=None, instances=None, name=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if instances and not isinstance(instances, list):
raise TypeError("Expected argument 'instances' to be a list")
pulumi.set(__self__, "instances", instances)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def id(self) -> Optional[str]:
return pulumi.get(self, "id")
@property
@pulumi.getter
def instances(self) -> Sequence[str]:
"""
A list of Compute instance IDs belonging to the Anti-Affinity Group.
"""
return pulumi.get(self, "instances")
@property
@pulumi.getter
def name(self) -> Optional[str]:
return pulumi.get(self, "name")
class AwaitableGetAntiAffinityGroupResult(GetAntiAffinityGroupResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetAntiAffinityGroupResult(
id=self.id,
instances=self.instances,
name=self.name)
def get_anti_affinity_group(id: Optional[str] = None,
name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetAntiAffinityGroupResult:
"""
Provides information on an [Anti-Affinity Group][aag-doc] for use in other resources such as a [`Compute`][r-compute] resource.
## Example Usage
```python
import pulumi
import pulumi_exoscale as exoscale
zone = "ch-gva-2"
web = exoscale.get_anti_affinity_group(name="web")
ubuntu = exoscale.get_compute_template(zone=zone,
name="Linux Ubuntu 20.04 LTS 64-bit")
my_server = exoscale.ComputeInstance("my-server",
zone=zone,
type="standard.medium",
template_id=ubuntu.id,
disk_size=20,
anti_affinity_group_ids=[web.id])
```
:param str id: The ID of the Anti-Affinity Group (conflicts with `name`).
:param str name: The name of the Anti-Affinity Group (conflicts with `id`).
"""
__args__ = dict()
__args__['id'] = id
__args__['name'] = name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('exoscale:index/getAntiAffinityGroup:getAntiAffinityGroup', __args__, opts=opts, typ=GetAntiAffinityGroupResult).value
return AwaitableGetAntiAffinityGroupResult(
id=__ret__.id,
instances=__ret__.instances,
name=__ret__.name)
@_utilities.lift_output_func(get_anti_affinity_group)
def get_anti_affinity_group_output(id: Optional[pulumi.Input[Optional[str]]] = None,
name: Optional[pulumi.Input[Optional[str]]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetAntiAffinityGroupResult]:
"""
Provides information on an [Anti-Affinity Group][aag-doc] for use in other resources such as a [`Compute`][r-compute] resource.
## Example Usage
```python
import pulumi
import pulumi_exoscale as exoscale
zone = "ch-gva-2"
web = exoscale.get_anti_affinity_group(name="web")
ubuntu = exoscale.get_compute_template(zone=zone,
name="Linux Ubuntu 20.04 LTS 64-bit")
my_server = exoscale.ComputeInstance("my-server",
zone=zone,
type="standard.medium",
template_id=ubuntu.id,
disk_size=20,
anti_affinity_group_ids=[web.id])
```
:param str id: The ID of the Anti-Affinity Group (conflicts with `name`).
:param str name: The name of the Anti-Affinity Group (conflicts with `id`).
"""
...
|
py | 1a30322d7e1936cec3355bfbf561d0a3d3d83712 | from students import views as students_views
from django.urls import path
from django.contrib.auth import views as auth_views
urlpatterns = [
path('login/', auth_views.LoginView.as_view(template_name='students/student/login.html'), name = 'login'),
path('logout/', auth_views.LogoutView.as_view(template_name='students/student/logout.html'), name = 'logout'),
path('register/',students_views.StudentRegistrationView.as_view(), name='student_registration'),
path('enroll-course/',students_views.StudentEnrollCourseView.as_view(), name='student_enroll_course'),
path('courses/', students_views.StudentCourseListView.as_view(), name='student_course_list'),
path('course/<pk>/', students_views.StudentCourseDetailView.as_view(), name='student_course_detail'),
path('course/<pk>/<module_id>/', students_views.StudentCourseDetailView.as_view(), name='student_course_detail_module'),
] |
py | 1a303245c72a284cd75bda66d762438a81461f37 | import os
import unittest
from pathlib import Path
import paramak
import pytest
class test_object_properties(unittest.TestCase):
def test_shape_default_properties(self):
"""Creates a Shape object and checks that the points attribute has
a default of None."""
test_shape = paramak.Shape()
assert test_shape.points is None
def test_incorrect_workplane(self):
"""Creates Shape object with incorrect workplane and checks ValueError
is raised."""
test_shape = paramak.Shape()
def incorrect_workplane():
"""Creates Shape object with unacceptable workplane."""
test_shape.workplane = "ZY"
self.assertRaises(ValueError, incorrect_workplane)
def test_incorrect_points(self):
"""Creates Shape objects and checks errors are raised correctly when
specifying points."""
test_shape = paramak.Shape()
def incorrect_points_end_point_is_start_point():
"""Checks ValueError is raised when the start and end points are
the same."""
test_shape.points = [(0, 200), (200, 100), (0, 0), (0, 200)]
self.assertRaises(
ValueError,
incorrect_points_end_point_is_start_point)
def incorrect_points_missing_z_value():
"""Checks ValueError is raised when a point is missing a z
value."""
test_shape.points = [(0, 200), (200), (0, 0), (0, 50)]
self.assertRaises(ValueError, incorrect_points_missing_z_value)
def incorrect_points_not_a_list():
"""Checks ValueError is raised when the points are not a list."""
test_shape.points = (0, 0), (0, 20), (20, 20), (20, 0)
self.assertRaises(ValueError, incorrect_points_not_a_list)
def incorrect_points_wrong_number_of_entries():
"""Checks ValueError is raised when individual points dont have 2
or 3 entries."""
test_shape.points = [(0, 0), (0, 20), (20, 20, 20, 20)]
self.assertRaises(ValueError, incorrect_points_wrong_number_of_entries)
def incorrect_x_point_value_type():
"""Checks ValueError is raised when X point is not a number."""
test_shape.points = [("string", 0), (0, 20), (20, 20)]
self.assertRaises(ValueError, incorrect_x_point_value_type)
def incorrect_y_point_value_type():
"""Checks ValueError is raised when Y point is not a number."""
test_shape.points = [(0, "string"), (0, 20), (20, 20)]
self.assertRaises(ValueError, incorrect_y_point_value_type)
def test_create_limits(self):
"""Creates a Shape object and checks that the create_limits function
returns the expected values for x_min, x_max, z_min and z_max."""
test_shape = paramak.Shape()
test_shape.points = [
(0, 0),
(0, 10),
(0, 20),
(10, 20),
(20, 20),
(20, 10),
(20, 0),
(10, 0),
]
assert test_shape.create_limits() == (0.0, 20.0, 0.0, 20.0)
# test with a component which has a find_points method
test_shape2 = paramak.Plasma()
test_shape2.create_limits()
assert test_shape2.x_min is not None
def test_create_limits_error(self):
"""Checks error is raised when no points are given."""
test_shape = paramak.Shape()
def limits():
test_shape.create_limits()
self.assertRaises(ValueError, limits)
def test_export_2d_image(self):
"""Creates a Shape object and checks that a png file of the object with
the correct suffix can be exported using the export_2d_image method."""
test_shape = paramak.Shape()
test_shape.points = [(0, 0), (0, 20), (20, 20), (20, 0)]
os.system("rm filename.png")
test_shape.export_2d_image("filename")
assert Path("filename.png").exists() is True
os.system("rm filename.png")
test_shape.export_2d_image("filename.png")
assert Path("filename.png").exists() is True
os.system("rm filename.png")
def test_initial_solid_construction(self):
"""Creates a shape and checks that a cadquery solid with a unique hash
value is created when .solid is called."""
test_shape = paramak.RotateStraightShape(
points=[(0, 0), (0, 20), (20, 20), (20, 0)], rotation_angle=360
)
assert test_shape.hash_value is None
assert test_shape.solid is not None
assert type(test_shape.solid).__name__ == "Workplane"
assert test_shape.hash_value is not None
def test_solid_return(self):
"""Checks that the same cadquery solid with the same unique hash value
is returned when shape.solid is called again after no changes have been
made to the Shape."""
test_shape = paramak.RotateStraightShape(
points=[(0, 0), (0, 20), (20, 20), (20, 0)], rotation_angle=360
)
assert test_shape.solid is not None
initial_hash_value = test_shape.hash_value
assert test_shape.solid is not None
assert initial_hash_value == test_shape.hash_value
def test_conditional_solid_reconstruction(self):
"""Checks that a new cadquery solid with a new unique hash value is
constructed when shape.solid is called after changes to the Shape have
been made."""
test_shape = paramak.RotateStraightShape(
points=[(0, 0), (0, 20), (20, 20)], rotation_angle=360
)
assert test_shape.solid is not None
assert test_shape.hash_value is not None
initial_hash_value = test_shape.hash_value
test_shape.rotation_angle = 180
assert test_shape.solid is not None
assert test_shape.hash_value is not None
assert initial_hash_value != test_shape.hash_value
def test_hash_value_update(self):
"""Checks that the hash value of a Shape is not updated until a new
cadquery solid has been created."""
test_shape = paramak.RotateStraightShape(
points=[(0, 0), (0, 20), (20, 20)], rotation_angle=360
)
test_shape.solid
assert test_shape.hash_value is not None
initial_hash_value = test_shape.hash_value
test_shape.rotation_angle = 180
assert test_shape.hash_value == initial_hash_value
test_shape.solid
assert test_shape.hash_value != initial_hash_value
def test_material_tag_warning(self):
"""Checks that a warning is raised when a Shape has a material tag >
28 characters."""
test_shape = paramak.Shape()
def warning_material_tag():
test_shape.material_tag = "abcdefghijklmnopqrstuvwxyz12345"
self.assertWarns(UserWarning, warning_material_tag)
def test_invalid_material_tag(self):
"""Checks a ValueError is raised when a Shape has an invalid material
tag."""
test_shape = paramak.Shape()
def invalid_material_tag():
test_shape.material_tag = 123
self.assertRaises(ValueError, invalid_material_tag)
def test_export_html(self):
"""Checks a plotly figure of the Shape is exported by the export_html
method with the correct filename with RGB and RGBA colors."""
test_shape = paramak.RotateStraightShape(
points=[(0, 0), (0, 20), (20, 20), (20, 0)], rotation_angle=360
)
os.system("rm filename.html")
test_shape.export_html('filename')
assert Path("filename.html").exists() is True
os.system("rm filename.html")
test_shape.color = (1, 0, 0, 0.5)
test_shape.export_html('filename')
assert Path("filename.html").exists() is True
os.system("rm filename.html")
def test_export_html_with_points_None(self):
"""Checks that an error is raised when points is None and export_html
"""
test_shape = paramak.Shape()
def export():
test_shape.export_html("out.html")
self.assertRaises(ValueError, export)
def test_invalid_stp_filename(self):
"""Checks ValueError is raised when invalid stp filenames are used."""
def invalid_filename_suffix():
paramak.RotateStraightShape(
points=[(0, 0), (0, 20), (20, 20)],
stp_filename="filename.invalid_suffix"
)
self.assertRaises(ValueError, invalid_filename_suffix)
def invalid_filename_type():
paramak.RotateStraightShape(
points=[(0, 0), (0, 20), (20, 20)],
stp_filename=123456
)
self.assertRaises(ValueError, invalid_filename_type)
def test_invalid_stl_filename(self):
"""Checks ValueError is raised when invalid stl filenames are used."""
def invalid_filename_suffix():
paramak.RotateStraightShape(
points=[(0, 0), (0, 20), (20, 20)],
stl_filename="filename.invalid_suffix"
)
self.assertRaises(ValueError, invalid_filename_suffix)
def invalid_filename_type():
paramak.RotateStraightShape(
points=[(0, 0), (0, 20), (20, 20)],
stl_filename=123456
)
self.assertRaises(ValueError, invalid_filename_type)
def test_invalid_color(self):
"""Checks ValueError is raised when invalid colors are used."""
def invalid_color_type():
paramak.RotateStraightShape(
points=[(0, 0), (0, 20), (20, 20)],
color=255
)
self.assertRaises(ValueError, invalid_color_type)
def invalid_color_length():
paramak.RotateStraightShape(
points=[(0, 0), (0, 20), (20, 20)],
color=(255, 255, 255, 1, 1)
)
self.assertRaises(ValueError, invalid_color_length)
def test_volumes_add_up_to_total_volume_Compound(self):
"""Checks the volume and volumes attributes are correct types
and that the volumes sum to equalt the volume for a Compound."""
test_shape = paramak.PoloidalFieldCoilSet(
heights=[10, 10],
widths=[20, 20],
center_points=[(15, 15), (50, 50)]
)
assert isinstance(test_shape.volume, float)
assert isinstance(test_shape.volumes, list)
assert isinstance(test_shape.volumes[0], float)
assert isinstance(test_shape.volumes[1], float)
assert len(test_shape.volumes) == 2
assert sum(test_shape.volumes) == pytest.approx(test_shape.volume)
def test_volumes_add_up_to_total_volume(self):
"""Checks the volume and volumes attributes are correct types
and that the volumes sum to equalt the volume."""
test_shape = paramak.PoloidalFieldCoil(
center_point=(100, 100),
height=50,
width=50
)
assert isinstance(test_shape.volume, float)
assert isinstance(test_shape.volumes, list)
assert isinstance(test_shape.volumes[0], float)
assert len(test_shape.volumes) == 1
assert sum(test_shape.volumes) == pytest.approx(test_shape.volume)
def test_areas_add_up_to_total_area_Compound(self):
"""Checks the area and areas attributes are correct types
and that the areas sum to equalt the area for a Compound."""
test_shape = paramak.PoloidalFieldCoilSet(
heights=[10, 10],
widths=[20, 20],
center_points=[(15, 15), (50, 50)]
)
assert isinstance(test_shape.area, float)
assert isinstance(test_shape.areas, list)
assert isinstance(test_shape.areas[0], float)
assert isinstance(test_shape.areas[1], float)
assert isinstance(test_shape.areas[2], float)
assert isinstance(test_shape.areas[3], float)
assert isinstance(test_shape.areas[4], float)
assert isinstance(test_shape.areas[5], float)
assert isinstance(test_shape.areas[6], float)
assert isinstance(test_shape.areas[7], float)
assert len(test_shape.areas) == 8
assert sum(test_shape.areas) == pytest.approx(test_shape.area)
def test_areas_add_up_to_total_area(self):
"""Checks the area and areas attributes are correct types
and that the areas sum to equalt the area."""
test_shape = paramak.PoloidalFieldCoil(
center_point=(100, 100),
height=50,
width=50
)
assert isinstance(test_shape.area, float)
assert isinstance(test_shape.areas, list)
assert isinstance(test_shape.areas[0], float)
assert isinstance(test_shape.areas[1], float)
assert isinstance(test_shape.areas[2], float)
assert isinstance(test_shape.areas[3], float)
assert len(test_shape.areas) == 4
assert sum(test_shape.areas) == pytest.approx(test_shape.area)
def test_trace(self):
"""Test trace method is populated"""
test_shape = paramak.PoloidalFieldCoil(
center_point=(100, 100),
height=50,
width=50,
name="coucou"
)
assert test_shape._trace() is not None
def test_create_patch_error(self):
"""Checks _create_patch raises a ValueError when points is None."""
test_shape = paramak.Shape()
def patch():
test_shape._create_patch()
self.assertRaises(ValueError, patch)
def test_create_patch_alpha(self):
"""Checks _create_patch returns a patch when alpha is given."""
test_shape = paramak.PoloidalFieldCoil(
center_point=(100, 100),
height=50,
width=50,
color=(0.5, 0.5, 0.5, 0.1)
)
assert test_shape._create_patch() is not None
def test_azimuth_placement_angle_error(self):
"""Checks an error is raised when invalid value for
azimuth_placement_angle is set.
"""
test_shape = paramak.Shape()
def angle_str():
test_shape.azimuth_placement_angle = "coucou"
def angle_str_in_Iterable():
test_shape.azimuth_placement_angle = [0, "coucou"]
self.assertRaises(ValueError, angle_str)
self.assertRaises(ValueError, angle_str_in_Iterable)
def test_name_error(self):
"""Checks an error is raised when invalid value for name is set."""
test_shape = paramak.Shape()
def name_float():
test_shape.name = 2.0
def name_int():
test_shape.name = 1
def name_list():
test_shape.name = ['coucou']
self.assertRaises(ValueError, name_float)
self.assertRaises(ValueError, name_int)
self.assertRaises(ValueError, name_list)
def test_tet_mesh_error(self):
"""Checks an error is raised when invalid value for tet_mesh is set.
"""
test_shape = paramak.Shape()
def tet_mesh_float():
test_shape.tet_mesh = 2.0
def tet_mesh_int():
test_shape.tet_mesh = 1
def tet_mesh_list():
test_shape.tet_mesh = ['coucou']
self.assertRaises(ValueError, tet_mesh_float)
self.assertRaises(ValueError, tet_mesh_int)
self.assertRaises(ValueError, tet_mesh_list)
if __name__ == "__main__":
unittest.main()
|
py | 1a30325f5d4164daea8c87fa9097fd7f33691106 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.18 on 2019-02-02 02:47
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('analytics', '0012_add_on_delete'),
]
operations = [
migrations.RemoveField(
model_name='installationcount',
name='anomaly',
),
migrations.RemoveField(
model_name='realmcount',
name='anomaly',
),
migrations.RemoveField(
model_name='streamcount',
name='anomaly',
),
migrations.RemoveField(
model_name='usercount',
name='anomaly',
),
migrations.DeleteModel(
name='Anomaly',
),
]
|
py | 1a30344239c042a1e6ef36359372f2b2b353fca3 | """This file is part of Splitter which is released under MIT License.
agg.py defines aggregation functions
"""
from splitter.dataflow.validation import check_metrics_and_filters, countable
from splitter.struct import IteratorVideoStream
from splitter.dataflow.xform import Null
import logging
import time
import itertools
def count(stream, keys, stats=False):
"""Count counts the true hits of a defined event.
"""
#actual logic is here
counter = {}
frame_count = 0
now = time.time()
for frame in stream:
frame_count += 1
if frame_count == 1:
logging.info("Processing first frame of stream")
for key in keys:
if frame[key]:
subkey = key + '_' + str(frame[key])
counter[subkey] = counter.get(subkey,0) + 1
# profiling
for obj in stream.lineage():
if hasattr(obj, "time_elapsed"):
logging.info("%s: %s" % (type(obj).__name__, obj.time_elapsed))
else:
logging.info("%s time not measured" % type(obj).__name__)
if not stats:
return counter
else:
return counter, {'frames': frame_count, \
'elapsed': (time.time() - now)}
def counts(streams, keys, stats=False):
"""Count counts the true hits of a defined event.
"""
stream = IteratorVideoStream(itertools.chain(*streams), streams)
lineage = []
for s in streams:
lineage.extend(s.lineage())
stream.global_lineage = lineage
return count(stream, keys, stats)
def get(stream, key, frame_rate=-1):
if frame_rate == -1:
return [(v['frame'], v['data']) for v in stream if v[key]]
else:
return [( int(v['frame']/frame_rate) , v['data']) for v in stream if v[key]]
|