max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
setup.py | zrthstr/uncurl | 460 | 12613921 | #!/usr/bin/env python
from setuptools import setup, find_packages
setup(
name='uncurl',
version='0.0.11',
description='A library to convert curl requests to python-requests.',
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/spulec/uncurl',
entry_points={
'console_scripts': [
'uncurl = uncurl.bin:main',
],
},
install_requires=['pyperclip', 'six'],
packages=find_packages(exclude=("tests", "tests.*")),
)
|
Dynamic Programming/Matrix Chain Multiplication/Python/MatrixChainMult.py | iabhimanyu/Algorithms | 715 | 12613922 | <reponame>iabhimanyu/Algorithms
import sys
def MatrixChainOrder(p, n):
m = [[0 for x in range(n)] for x in range(n)]
for i in range(1, n):
m[i][i] = 0
for L in range(2, n):
for i in range(1, n-L+1):
j = i+L-1
m[i][j] = sys.maxint
for k in range(i, j):
q = m[i][k] + m[k+1][j] + p[i-1]*p[k]*p[j]
if q < m[i][j]:
m[i][j] = q
return m[1][n-1]
arr = [1, 2, 3 ,4]
size = len(arr)
print("Minimum number of multiplications is " +
str(MatrixChainOrder(arr, size)))
|
wordpress_xmlrpc/methods/__init__.py | hishamnajam/python-wordpress-xmlrpc | 218 | 12613956 | <gh_stars>100-1000
"""
Implementations of standard WordPress XML-RPC APIs.
"""
from wordpress_xmlrpc.methods import posts
from wordpress_xmlrpc.methods import pages
from wordpress_xmlrpc.methods import demo
from wordpress_xmlrpc.methods import users
from wordpress_xmlrpc.methods import options
from wordpress_xmlrpc.methods import comments
from wordpress_xmlrpc.methods import media
|
bcs-ui/backend/templatesets/legacy_apps/configuration/fixture/template_k8s.py | laodiu/bk-bcs | 599 | 12613970 | <gh_stars>100-1000
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
from django.conf import settings
# 镜像路径前缀
if settings.DEPOT_PREFIX:
image_path_prefix = f'{settings.DEPOT_PREFIX}/public'
else:
image_path_prefix = 'public/bcs'
image_prefix = f'{settings.DEVOPS_ARTIFACTORY_HOST}/{image_path_prefix}'
K8S_TEMPLATE = {
"code": 0,
"message": "OK",
"data": {
"K8sService": [
{
"id": 77,
"name": "service-redis1",
"deploy_tag_list": ["1527667491806501|K8sDeployment"],
"service_tag": "1527670651988263",
"config": {
"apiVersion": "v1",
"kind": "Service",
"webCache": {"link_app": [], "link_labels": ["app:redis"], "serviceIPs": ""},
"metadata": {"name": "service-redis1", "labels": {}, "annotations": {}},
"spec": {
"type": "ClusterIP",
"selector": {"app": "redis"},
"clusterIP": "",
"ports": [
{
"name": "port",
"port": 6379,
"protocol": "TCP",
"targetPort": "port",
"nodePort": "",
"id": 1527667131558,
}
],
},
},
},
{
"id": 78,
"name": "service-sts1",
"deploy_tag_list": [],
"service_tag": "1527670676238869",
"config": {
"apiVersion": "v1",
"kind": "Service",
"webCache": {"link_app": [], "link_labels": [], "serviceIPs": ""},
"metadata": {"name": "service-sts1", "labels": {}, "annotations": {}},
"spec": {"type": "ClusterIP", "selector": {}, "clusterIP": "None", "ports": []},
},
},
{
"id": 79,
"name": "service-nginx1",
"service_tag": "1527670770986669",
"deploy_tag_list": ["1527670584670192|K8sDeployment"],
"config": {
"apiVersion": "v1",
"kind": "Service",
"webCache": {"link_app": [], "link_labels": ["app:nginx"], "serviceIPs": ""},
"metadata": {"name": "service-nginx1", "labels": {}, "annotations": {}},
"spec": {
"type": "NodePort",
"selector": {"app": "nginx"},
"clusterIP": "",
"ports": [
{
"id": 1527667508909,
"name": "nginx",
"port": 8080,
"protocol": "TCP",
"targetPort": "nginx",
"nodePort": "",
}
],
},
},
},
],
"K8sStatefulSet": [
{
"id": 12,
"name": "statefulset-rumpetroll-v1",
"desc": "",
"deploy_tag": "1527671007581743",
"config": {
"apiVersion": "apps/v1beta2",
"kind": "Deployment",
"webCache": {
"volumes": [{"type": "emptyDir", "name": "", "source": ""}],
"isUserConstraint": False,
"remarkListCache": [{"key": "", "value": ""}],
"labelListCache": [{"key": "app", "value": "rumpetroll", "isSelector": True}],
"logLabelListCache": [{"key": "", "value": ""}],
"isMetric": False,
"metricIdList": [],
"affinityYaml": "",
},
"customLogLabel": {},
"metadata": {"name": "statefulset-rumpetroll-v1"},
"spec": {
"replicas": 1,
"updateStrategy": {"type": "OnDelete", "rollingUpdate": {"partition": 0}},
"podManagementPolicy": "OrderedReady",
"volumeClaimTemplates": [
{
"metadata": {"name": ""},
"spec": {
"accessModes": [],
"storageClassName": "",
"resources": {"requests": {"storage": 1}},
},
}
],
"selector": {"matchLabels": {"app": "rumpetroll"}},
"template": {
"metadata": {"labels": {"app": "rumpetroll"}, "annotations": {}},
"spec": {
"restartPolicy": "Always",
"terminationGracePeriodSeconds": 10,
"nodeSelector": {},
"affinity": {},
"hostNetwork": 0,
"dnsPolicy": "ClusterFirst",
"volumes": [],
"containers": [
{
"name": "container-rumpetroll-v1",
"webCache": {
"desc": "",
# NOTE: imageName仅供前端匹配镜像使用,格式是镜像列表中name:value
"imageName": f"{image_path_prefix}/k8s/pyrumpetroll:{image_path_prefix}/k8s/pyrumpetroll", # noqa
"imageVersion": "",
"containerType": "container",
"args_text": "",
"livenessProbeType": "HTTP",
"readinessProbeType": "HTTP",
"logListCache": [{"value": ""}],
"env_list": [
{"type": "custom", "key": "DOMAIN", "value": "rumpetroll-game.bk.com"},
{"type": "custom", "key": "MAX_CLIENT", "value": "2"},
{"type": "custom", "key": "MAX_ROOM", "value": "100"},
{"type": "custom", "key": "REDIS_HOST", "value": "service-redis1"},
{"type": "custom", "key": "REDIS_PORT", "value": "6379"},
{"type": "custom", "key": "REDIS_DB", "value": "0"},
{"type": "custom", "key": "NUMPROCS", "value": "1"},
{"type": "valueFrom", "key": "HOST", "value": "status.podIP"},
],
},
"volumeMounts": [],
"image": f"{image_prefix}/k8s/pyrumpetroll:0.3",
"imagePullPolicy": "IfNotPresent",
"ports": [{"id": 1527670806610, "containerPort": 20000, "name": "port"}],
"command": "",
"args": "",
"env": [],
"envFrom": [],
"resources": {
"limits": {"cpu": "", "memory": ""},
"requests": {"cpu": "", "memory": ""},
},
"livenessProbe": {
"httpGet": {"port": "port", "path": "", "httpHeaders": []},
"tcpSocket": {"port": ""},
"exec": {"command": ""},
"initialDelaySeconds": 15,
"periodSeconds": 10,
"timeoutSeconds": 5,
"failureThreshold": 3,
"successThreshold": 1,
},
"readinessProbe": {
"httpGet": {"port": "", "path": "", "httpHeaders": []},
"tcpSocket": {"port": "esdisc"},
"exec": {"command": ""},
"initialDelaySeconds": 15,
"periodSeconds": 10,
"timeoutSeconds": 5,
"failureThreshold": 3,
"successThreshold": 1,
},
"lifecycle": {
"preStop": {"exec": {"command": ""}},
"postStart": {"exec": {"command": ""}},
},
"imageVersion": "0.3",
"logPathList": [],
}
],
"initContainers": [],
},
},
},
"monitorLevel": "general",
},
"service_tag": "1527670676238869",
}
],
"K8sDeployment": [
{
"id": 462,
"deploy_tag": "1527667491806501",
"name": "deploy-redis1",
"desc": "",
"config": {
"apiVersion": "apps/v1beta2",
"kind": "Deployment",
"webCache": {
"volumes": [{"type": "emptyDir", "name": "", "source": ""}],
"isUserConstraint": True,
"remarkListCache": [{"key": "", "value": ""}],
"labelListCache": [{"key": "app", "value": "redis", "isSelector": True}],
"logLabelListCache": [{"key": "", "value": ""}],
"isMetric": False,
"metricIdList": [],
"nodeSelectorList": [{"key": "app", "value": "redis"}],
},
"customLogLabel": {},
"metadata": {"name": "deploy-redis1"},
"spec": {
"minReadySeconds": 0,
"replicas": 1,
"strategy": {"type": "RollingUpdate", "rollingUpdate": {"maxUnavailable": 1, "maxSurge": 0}},
"selector": {"matchLabels": {"app": "redis"}},
"template": {
"metadata": {"labels": {"app": "redis"}, "annotations": {}},
"spec": {
"restartPolicy": "Always",
"terminationGracePeriodSeconds": 10,
"nodeSelector": {},
"affinity": {
"podAntiAffinity": {
"requiredDuringSchedulingIgnoredDuringExecution": [
{
"labelSelector": {
"matchExpressions": [
{"key": "app", "operator": "In", "values": ["redis"]}
]
},
"topologyKey": "kubernetes.io/hostname",
}
]
}
},
"hostNetwork": 0,
"dnsPolicy": "ClusterFirst",
"volumes": [],
"containers": [
{
"name": "container-redis-default",
"webCache": {
"desc": "",
# NOTE: imageName仅供前端匹配镜像使用,格式是镜像列表中name:value
"imageName": f"{image_path_prefix}/k8s/redis:{image_path_prefix}/k8s/redis", # noqa
"imageVersion": "",
"args_text": "",
"containerType": "container",
"livenessProbeType": "TCP",
"readinessProbeType": "HTTP",
"logListCache": [{"value": ""}],
"env_list": [{"type": "custom", "key": "", "value": ""}],
},
"volumeMounts": [],
"image": f"{image_prefix}/k8s/redis:1.0",
"imagePullPolicy": "IfNotPresent",
"ports": [{"id": 1527667131558, "containerPort": 6379, "name": "port"}],
"command": "",
"args": "",
"env": [],
"resources": {
"limits": {"cpu": "", "memory": ""},
"requests": {"cpu": "", "memory": ""},
},
"livenessProbe": {
"httpGet": {"port": "", "path": "", "httpHeaders": []},
"tcpSocket": {"port": "port"},
"exec": {"command": ""},
"initialDelaySeconds": 15,
"periodSeconds": 10,
"timeoutSeconds": 5,
"failureThreshold": 3,
"successThreshold": 1,
},
"readinessProbe": {
"httpGet": {"port": "", "path": "", "httpHeaders": []},
"tcpSocket": {"port": ""},
"exec": {"command": ""},
"initialDelaySeconds": 15,
"periodSeconds": 10,
"timeoutSeconds": 5,
"failureThreshold": 3,
"successThreshold": 1,
},
"lifecycle": {
"preStop": {"exec": {"command": ""}},
"postStart": {"exec": {"command": ""}},
},
"imageVersion": "1.0",
"logPathList": [],
}
],
"initContainers": [],
},
},
},
"monitorLevel": "general",
},
},
{
"id": 463,
"deploy_tag": "1527670584670192",
"name": "deploy-nginx1",
"desc": "",
"config": {
"apiVersion": "apps/v1beta2",
"kind": "Deployment",
"webCache": {
"volumes": [{"type": "emptyDir", "name": "", "source": ""}],
"isUserConstraint": True,
"remarkListCache": [{"key": "", "value": ""}],
"labelListCache": [{"key": "app", "value": "nginx", "isSelector": True}],
"logLabelListCache": [{"key": "", "value": ""}],
"isMetric": False,
"metricIdList": [],
},
"customLogLabel": {},
"metadata": {"name": "deploy-nginx1"},
"spec": {
"minReadySeconds": 0,
"replicas": 1,
"strategy": {"type": "RollingUpdate", "rollingUpdate": {"maxUnavailable": 1, "maxSurge": 0}},
"selector": {"matchLabels": {"app": "nginx"}},
"template": {
"metadata": {"labels": {"app": "nginx"}, "annotations": {}},
"spec": {
"restartPolicy": "Always",
"terminationGracePeriodSeconds": 10,
"nodeSelector": {},
"affinity": {
"podAntiAffinity": {
"requiredDuringSchedulingIgnoredDuringExecution": [
{
"labelSelector": {
"matchExpressions": [
{"key": "app", "operator": "In", "values": ["nginx"]}
]
},
"topologyKey": "kubernetes.io/hostname",
}
]
},
"podAffinity": {
"requiredDuringSchedulingIgnoredDuringExecution": [
{
"labelSelector": {
"matchExpressions": [
{"key": "app", "operator": "In", "values": ["redis"]}
]
},
"topologyKey": "kubernetes.io/hostname",
}
]
},
},
"hostNetwork": 1,
"dnsPolicy": "ClusterFirstWithHostNet",
"volumes": [],
"containers": [
{
"name": "container-nginx-default",
"webCache": {
"desc": "",
# NOTE: imageName仅供前端匹配镜像使用,格式是镜像列表中name:value
"imageName": f"{image_path_prefix}/k8s/rumpetroll-openresty:{image_path_prefix}/k8s/rumpetroll-openresty", # noqa
"imageVersion": "",
"args_text": "",
"containerType": "container",
"livenessProbeType": "TCP",
"readinessProbeType": "HTTP",
"logListCache": [{"value": ""}],
"env_list": [
{"type": "custom", "key": "DOMAIN", "value": "rumpetroll-game.bk.com"},
{"type": "custom", "key": "MAX_CLIENT", "value": "2"},
{"type": "custom", "key": "MAX_ROOM", "value": "100"},
{"type": "custom", "key": "REDIS_HOST", "value": "service-redis1"},
{
"type": "valueFrom",
"key": "NAMESPACE",
"value": "metadata.namespace",
},
{"type": "custom", "key": "REDIS_PORT", "value": "6379"},
{"type": "custom", "key": "REDIS_DB", "value": "0"},
{"type": "custom", "key": "PORT", "value": "80"},
],
},
"volumeMounts": [],
"image": f"{image_prefix}/k8s/rumpetroll-openresty:0.51", # noqa
"imagePullPolicy": "IfNotPresent",
"ports": [{"id": 1527667508909, "containerPort": 80, "name": "nginx"}],
"command": "",
"args": "",
"env": [],
"resources": {
"limits": {"cpu": 300, "memory": 200},
"requests": {"cpu": "", "memory": ""},
},
"livenessProbe": {
"httpGet": {"port": "", "path": "", "httpHeaders": []},
"tcpSocket": {"port": "nginx"},
"exec": {"command": ""},
"initialDelaySeconds": 15,
"periodSeconds": 10,
"timeoutSeconds": 5,
"failureThreshold": 3,
"successThreshold": 1,
},
"readinessProbe": {
"httpGet": {"port": "", "path": "", "httpHeaders": []},
"tcpSocket": {"port": ""},
"exec": {"command": ""},
"initialDelaySeconds": 15,
"periodSeconds": 10,
"timeoutSeconds": 5,
"failureThreshold": 3,
"successThreshold": 1,
},
"lifecycle": {
"preStop": {"exec": {"command": ""}},
"postStart": {"exec": {"command": ""}},
},
"imageVersion": "0.50",
"logPathList": [],
}
],
"initContainers": [],
},
},
},
"monitorLevel": "general",
},
},
],
},
}
|
src/textacy/viz/network.py | austinjp/textacy | 1,929 | 12613996 | <reponame>austinjp/textacy<filename>src/textacy/viz/network.py
import math
import networkx as nx
try:
import matplotlib.pyplot as plt
except ImportError:
pass
RC_PARAMS = {
"axes.axisbelow": True,
"axes.edgecolor": ".8",
"axes.facecolor": "white",
"axes.grid": False,
"axes.labelcolor": ".15",
"axes.linewidth": 1.0,
"figure.facecolor": "white",
"font.family": ["sans-serif"],
"font.sans-serif": ["Arial", "Liberation Sans", "sans-serif"],
"grid.color": ".8",
"grid.linestyle": "-",
"image.cmap": "Greys",
"legend.frameon": False,
"legend.numpoints": 1,
"legend.scatterpoints": 1,
"lines.solid_capstyle": "round",
"text.color": ".15",
"xtick.color": ".15",
"xtick.direction": "out",
"xtick.major.size": 0.0,
"xtick.minor.size": 0.0,
"ytick.color": ".15",
"ytick.direction": "out",
"ytick.major.size": 0.0,
"ytick.minor.size": 0.0,
}
def draw_semantic_network(
graph,
*,
node_weights=None,
spread=3.0,
draw_nodes=False,
base_node_size=300,
node_alpha=0.25,
line_width=0.5,
line_alpha=0.1,
base_font_size=12,
save=False,
):
"""
Draw a semantic network with nodes representing either terms or sentences,
edges representing coocurrence or similarity, and positions given by a force-
directed layout.
Args:
graph (``networkx.Graph``):
node_weights (dict): mapping of node: weight, used to size node labels
(and, optionally, node circles) according to their weight
spread (float): number that drives the spread of the network; higher
values give more spread-out networks
draw_nodes (bool): if True, circles are drawn under the node labels
base_node_size (int): if `node_weights` not given and `draw_nodes` is True,
this is the size of all nodes in the network; if `node_weights` _is_
given, node sizes will be scaled against this value based on their
weights compared to the max weight
node_alpha (float): alpha of the circular nodes drawn behind labels if
`draw_nodes` is True
line_width (float): width of the lines (edges) drawn between nodes
line_alpha (float): alpha of the lines (edges) drawn between nodes
base_font_size (int): if `node_weights` not given, this is the font size
used to draw all labels; otherwise, font sizes will be scaled against
this value based on the corresponding node weights compared to the max
save (str): give the full /path/to/fname on disk to save figure (optional)
Returns:
:obj:`matplotlib.axes.Axes.axis`: Axis on which network plot is drawn.
Note:
This function requires `matplotlib <https://matplotlib.org/>`_.
"""
try:
plt
except NameError:
raise ImportError(
"`matplotlib` is not installed, so `textacy.viz` won't work; "
"install it individually via `$ pip install matplotlib`, or "
"along with textacy via `pip install textacy[viz]`."
)
with plt.rc_context(RC_PARAMS):
fig, ax = plt.subplots(figsize=(12, 12))
pos = nx.layout.spring_layout(graph, k=spread / math.sqrt(len(graph.nodes())))
_ = nx.draw_networkx_edges(
graph, ax=ax, pos=pos, width=line_width, alpha=line_alpha, arrows=False
)
if node_weights is None:
if draw_nodes is True:
_ = nx.draw_networkx_nodes(
graph,
ax=ax,
pos=pos,
alpha=node_alpha,
linewidths=0.5,
node_size=base_node_size,
)
_ = nx.draw_networkx_labels(
graph,
pos,
ax=ax,
font_size=base_font_size,
font_color="black",
font_family="sans-serif",
)
else:
max_node_weight = max(node_weights.values())
if draw_nodes is True:
node_sizes = [
base_node_size * pow(node_weights[node] / max_node_weight, 0.75)
for node in graph.nodes()
]
_ = nx.draw_networkx_nodes(
graph,
ax=ax,
pos=pos,
node_size=node_sizes,
alpha=node_alpha,
linewidths=0.5,
)
for node, weight in node_weights.items():
_ = nx.draw_networkx_labels(
graph,
pos,
labels={node: node},
ax=ax,
font_color="black",
font_family="sans-serif",
font_size=base_font_size * pow(weight / max_node_weight, 0.15),
)
ax.set_frame_on(False)
ax.set_xticklabels(["" for _ in range(len(ax.get_xticklabels()))])
ax.set_yticklabels(["" for _ in range(len(ax.get_yticklabels()))])
if save:
fig.savefig(save, bbox_inches="tight", dpi=100)
return ax
|
api/src/opentrons/util/helpers.py | knownmed/opentrons | 235 | 12614015 | import typing
from datetime import datetime, timezone
def deep_get(
obj: typing.Union[typing.Mapping, typing.Sequence],
key: typing.Sequence[typing.Union[str, int]],
default=None,
):
"""
Utility to get deeply nested element in a list, tuple or dict without
resorting to some_dict.get('k1', {}).get('k2', {}).get('k3', {})....etc.
:param obj: A dict, list, or tuple
:param key: collection of keys
:param default: the default to return on error
:return: value or default
"""
if not key:
return default
for k in key:
try:
obj = obj[k] # type: ignore
except (KeyError, TypeError, IndexError):
return default
return obj
def utc_now() -> datetime:
"""Return the UTC time with timezone"""
return datetime.now(tz=timezone.utc)
|
dali/test/python/test_operator_water.py | cyyever/DALI | 3,967 | 12614021 | # Copyright (c) 2019-2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nvidia.dali.pipeline import Pipeline
import nvidia.dali.ops as ops
import nvidia.dali.fn as fn
import nvidia.dali.types as types
import nvidia.dali as dali
import numpy as np
import os
import cv2
import math
from test_utils import compare_pipelines
from test_utils import get_dali_extra_path
test_data_root = get_dali_extra_path()
caffe_db_folder = os.path.join(test_data_root, 'db', 'lmdb')
class WaterPipeline(Pipeline):
def __init__(self, device, batch_size, phase_y, phase_x, freq_x, freq_y, ampl_x, ampl_y,
num_threads=3, device_id=0, num_gpus=1, dtype=types.UINT8, prime_size=False,
do_mask=False):
super(WaterPipeline, self).__init__(batch_size, num_threads, device_id)
self.device = device
self.dtype = dtype
self.prime_size = prime_size
self.do_mask = do_mask
self.input = ops.readers.Caffe(path = caffe_db_folder, shard_id = device_id, num_shards = num_gpus)
self.decode = ops.decoders.Image(device = "cpu", output_type = types.RGB)
self.water = ops.Water(device = self.device, ampl_x=ampl_x, ampl_y=ampl_y,
phase_x=phase_x, phase_y=phase_y, freq_x=freq_x, freq_y=freq_y,
interp_type = dali.types.INTERP_LINEAR)
def define_graph(self):
inputs, labels = self.input(name="Reader")
images = self.decode(inputs)
if self.device == 'gpu':
images = images.gpu()
if self.prime_size:
images = fn.resize(images, resize_x=101, resize_y=43)
mask = fn.random.coin_flip(seed=42) if self.do_mask else None
images = fn.cast(images, dtype=self.dtype)
images = self.water(images, mask=mask)
return images
def python_water(img, phase_y, phase_x, freq_x, freq_y, ampl_x, ampl_y):
nh,nw=img.shape[:2]
img_x=np.zeros((nh,nw),np.float32)
img_y=np.zeros((nh,nw),np.float32)
x_idx = np.arange(0, nw, 1, np.float32)
y_idx = np.arange(0, nh, 1, np.float32)
x_wave = ampl_y * np.cos(freq_y * x_idx + phase_y)
y_wave = ampl_x * np.sin(freq_x * y_idx + phase_x)
for x in range(nw):
img_x[:,x] = y_wave + x - 0.5
for y in range(nh):
img_y[y,:] = x_wave + y - 0.5
return cv2.remap(img, img_x, img_y, cv2.INTER_LINEAR)
class WaterPythonPipeline(Pipeline):
def __init__(self, batch_size, function, num_threads=1, device_id=0, num_gpus=1,
dtype=types.UINT8, prime_size=False):
super(WaterPythonPipeline, self).__init__(batch_size,
num_threads,
device_id,
exec_async=False,
exec_pipelined=False)
self.dtype = dtype
self.prime_size = prime_size
self.input = ops.readers.Caffe(path = caffe_db_folder, shard_id = device_id, num_shards = num_gpus)
self.decode = ops.decoders.Image(device = "cpu", output_type = types.RGB)
self.water = ops.PythonFunction(function=function, output_layouts="HWC")
def define_graph(self):
inputs, labels = self.input(name="Reader")
images = self.decode(inputs)
if self.prime_size:
images = fn.resize(images, resize_x=101, resize_y=43)
images = fn.cast(images, dtype=self.dtype)
images = self.water(images)
return images
def check_water_cpu_vs_gpu(batch_size, niter, dtype, do_mask):
phase_y=0.5
phase_x=0.2
freq_x=0.06
freq_y=0.08
ampl_x=2.0
ampl_y=3.0
compare_pipelines(WaterPipeline('cpu', batch_size, ampl_x=ampl_x, ampl_y=ampl_y,
phase_x=phase_x, phase_y=phase_y, freq_x=freq_x, freq_y=freq_y,
dtype=dtype, do_mask=do_mask),
WaterPipeline('gpu', batch_size, ampl_x=ampl_x, ampl_y=ampl_y,
phase_x=phase_x, phase_y=phase_y, freq_x=freq_x, freq_y=freq_y,
dtype=dtype, do_mask=do_mask),
batch_size=batch_size, N_iterations=niter, eps=1)
def test_water_cpu_vs_gpu():
niter = 3
for batch_size in [1, 3]:
for do_mask in [False, True]:
for dtype in [types.UINT8, types.FLOAT]:
yield check_water_cpu_vs_gpu, batch_size, niter, dtype, do_mask
def check_water_vs_cv(device, batch_size, niter, dtype, prime_size):
phase_y=0.5
phase_x=0.2
freq_x=0.06
freq_y=0.08
ampl_x=2.0
ampl_y=3.0
python_func = lambda img: python_water(img, phase_y, phase_x, freq_x, freq_y, ampl_x, ampl_y)
compare_pipelines(WaterPipeline(device, batch_size, ampl_x=ampl_x, ampl_y=ampl_y,
phase_x=phase_x, phase_y=phase_y, freq_x=freq_x, freq_y=freq_y,
dtype=dtype, prime_size=prime_size),
WaterPythonPipeline(batch_size, python_func, dtype=dtype,
prime_size=prime_size),
batch_size=batch_size, N_iterations=niter, eps=8)
def test_water_vs_cv():
niter = 3
for device in ['cpu', 'gpu']:
for batch_size in [1, 3]:
for dtype in [types.UINT8, types.FLOAT]:
for prime_size in [False, True]:
yield check_water_vs_cv, device, batch_size, niter, dtype, prime_size
|
alipay/aop/api/domain/AlipayOpenMiniResourceRecordNotifyModel.py | antopen/alipay-sdk-python-all | 213 | 12614030 | <filename>alipay/aop/api/domain/AlipayOpenMiniResourceRecordNotifyModel.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayOpenMiniResourceRecordNotifyModel(object):
def __init__(self):
self._author_id = None
self._mini_app_id = None
self._params = None
self._site_id = None
self._source = None
self._taobao_id = None
self._taobao_nick = None
@property
def author_id(self):
return self._author_id
@author_id.setter
def author_id(self, value):
self._author_id = value
@property
def mini_app_id(self):
return self._mini_app_id
@mini_app_id.setter
def mini_app_id(self, value):
self._mini_app_id = value
@property
def params(self):
return self._params
@params.setter
def params(self, value):
self._params = value
@property
def site_id(self):
return self._site_id
@site_id.setter
def site_id(self, value):
self._site_id = value
@property
def source(self):
return self._source
@source.setter
def source(self, value):
self._source = value
@property
def taobao_id(self):
return self._taobao_id
@taobao_id.setter
def taobao_id(self, value):
self._taobao_id = value
@property
def taobao_nick(self):
return self._taobao_nick
@taobao_nick.setter
def taobao_nick(self, value):
self._taobao_nick = value
def to_alipay_dict(self):
params = dict()
if self.author_id:
if hasattr(self.author_id, 'to_alipay_dict'):
params['author_id'] = self.author_id.to_alipay_dict()
else:
params['author_id'] = self.author_id
if self.mini_app_id:
if hasattr(self.mini_app_id, 'to_alipay_dict'):
params['mini_app_id'] = self.mini_app_id.to_alipay_dict()
else:
params['mini_app_id'] = self.mini_app_id
if self.params:
if hasattr(self.params, 'to_alipay_dict'):
params['params'] = self.params.to_alipay_dict()
else:
params['params'] = self.params
if self.site_id:
if hasattr(self.site_id, 'to_alipay_dict'):
params['site_id'] = self.site_id.to_alipay_dict()
else:
params['site_id'] = self.site_id
if self.source:
if hasattr(self.source, 'to_alipay_dict'):
params['source'] = self.source.to_alipay_dict()
else:
params['source'] = self.source
if self.taobao_id:
if hasattr(self.taobao_id, 'to_alipay_dict'):
params['taobao_id'] = self.taobao_id.to_alipay_dict()
else:
params['taobao_id'] = self.taobao_id
if self.taobao_nick:
if hasattr(self.taobao_nick, 'to_alipay_dict'):
params['taobao_nick'] = self.taobao_nick.to_alipay_dict()
else:
params['taobao_nick'] = self.taobao_nick
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayOpenMiniResourceRecordNotifyModel()
if 'author_id' in d:
o.author_id = d['author_id']
if 'mini_app_id' in d:
o.mini_app_id = d['mini_app_id']
if 'params' in d:
o.params = d['params']
if 'site_id' in d:
o.site_id = d['site_id']
if 'source' in d:
o.source = d['source']
if 'taobao_id' in d:
o.taobao_id = d['taobao_id']
if 'taobao_nick' in d:
o.taobao_nick = d['taobao_nick']
return o
|
pythran/tests/openmp.legacy/omp_task_imp_firstprivate.py | davidbrochart/pythran | 1,647 | 12614035 | import omp
def omp_task_imp_firstprivate():
i = 5
k = 0
result = False
NUM_TASKS = 25
task_result = True
if 'omp parallel firstprivate(i)':
in_parallel = omp.in_parallel()
if 'omp single':
for k in range(NUM_TASKS):
if 'omp task shared(result, task_result)':
if i != 5:
task_result = False
for j in range(0, NUM_TASKS):
i += 1
'omp taskwait'
result = task_result and i == 5
return result or not in_parallel
|
clr.py | SunYanCN/keras-one-cycle | 282 | 12614045 | import os
import numpy as np
import warnings
from keras.callbacks import Callback
from keras import backend as K
# Code is ported from https://github.com/fastai/fastai
class OneCycleLR(Callback):
def __init__(self,
num_samples,
batch_size,
max_lr,
end_percentage=0.1,
scale_percentage=None,
maximum_momentum=0.95,
minimum_momentum=0.85,
verbose=True):
""" This callback implements a cyclical learning rate policy (CLR).
This is a special case of Cyclic Learning Rates, where we have only 1 cycle.
After the completion of 1 cycle, the learning rate will decrease rapidly to
100th its initial lowest value.
# Arguments:
num_samples: Integer. Number of samples in the dataset.
batch_size: Integer. Batch size during training.
max_lr: Float. Initial learning rate. This also sets the
starting learning rate (which will be 10x smaller than
this), and will increase to this value during the first cycle.
end_percentage: Float. The percentage of all the epochs of training
that will be dedicated to sharply decreasing the learning
rate after the completion of 1 cycle. Must be between 0 and 1.
scale_percentage: Float or None. If float, must be between 0 and 1.
If None, it will compute the scale_percentage automatically
based on the `end_percentage`.
maximum_momentum: Optional. Sets the maximum momentum (initial)
value, which gradually drops to its lowest value in half-cycle,
then gradually increases again to stay constant at this max value.
Can only be used with SGD Optimizer.
minimum_momentum: Optional. Sets the minimum momentum at the end of
the half-cycle. Can only be used with SGD Optimizer.
verbose: Bool. Whether to print the current learning rate after every
epoch.
# Reference
- [A disciplined approach to neural network hyper-parameters: Part 1 -- learning rate, batch size, weight_decay, and weight decay](https://arxiv.org/abs/1803.09820)
- [Super-Convergence: Very Fast Training of Residual Networks Using Large Learning Rates](https://arxiv.org/abs/1708.07120)
"""
super(OneCycleLR, self).__init__()
if end_percentage < 0. or end_percentage > 1.:
raise ValueError("`end_percentage` must be between 0 and 1")
if scale_percentage is not None and (scale_percentage < 0. or scale_percentage > 1.):
raise ValueError("`scale_percentage` must be between 0 and 1")
self.initial_lr = max_lr
self.end_percentage = end_percentage
self.scale = float(scale_percentage) if scale_percentage is not None else float(end_percentage)
self.max_momentum = maximum_momentum
self.min_momentum = minimum_momentum
self.verbose = verbose
if self.max_momentum is not None and self.min_momentum is not None:
self._update_momentum = True
else:
self._update_momentum = False
self.clr_iterations = 0.
self.history = {}
self.epochs = None
self.batch_size = batch_size
self.samples = num_samples
self.steps = None
self.num_iterations = None
self.mid_cycle_id = None
def _reset(self):
"""
Reset the callback.
"""
self.clr_iterations = 0.
self.history = {}
def compute_lr(self):
"""
Compute the learning rate based on which phase of the cycle it is in.
- If in the first half of training, the learning rate gradually increases.
- If in the second half of training, the learning rate gradually decreases.
- If in the final `end_percentage` portion of training, the learning rate
is quickly reduced to near 100th of the original min learning rate.
# Returns:
the new learning rate
"""
if self.clr_iterations > 2 * self.mid_cycle_id:
current_percentage = (self.clr_iterations - 2 * self.mid_cycle_id)
current_percentage /= float((self.num_iterations - 2 * self.mid_cycle_id))
new_lr = self.initial_lr * (1. + (current_percentage *
(1. - 100.) / 100.)) * self.scale
elif self.clr_iterations > self.mid_cycle_id:
current_percentage = 1. - (
self.clr_iterations - self.mid_cycle_id) / self.mid_cycle_id
new_lr = self.initial_lr * (1. + current_percentage *
(self.scale * 100 - 1.)) * self.scale
else:
current_percentage = self.clr_iterations / self.mid_cycle_id
new_lr = self.initial_lr * (1. + current_percentage *
(self.scale * 100 - 1.)) * self.scale
if self.clr_iterations == self.num_iterations:
self.clr_iterations = 0
return new_lr
def compute_momentum(self):
"""
Compute the momentum based on which phase of the cycle it is in.
- If in the first half of training, the momentum gradually decreases.
- If in the second half of training, the momentum gradually increases.
- If in the final `end_percentage` portion of training, the momentum value
is kept constant at the maximum initial value.
# Returns:
the new momentum value
"""
if self.clr_iterations > 2 * self.mid_cycle_id:
new_momentum = self.max_momentum
elif self.clr_iterations > self.mid_cycle_id:
current_percentage = 1. - ((self.clr_iterations - self.mid_cycle_id) / float(
self.mid_cycle_id))
new_momentum = self.max_momentum - current_percentage * (
self.max_momentum - self.min_momentum)
else:
current_percentage = self.clr_iterations / float(self.mid_cycle_id)
new_momentum = self.max_momentum - current_percentage * (
self.max_momentum - self.min_momentum)
return new_momentum
def on_train_begin(self, logs={}):
logs = logs or {}
self.epochs = self.params['epochs']
# When fit generator is used
# self.params don't have the elements 'batch_size' and 'samples'
# self.batch_size = self.params['batch_size']
# self.samples = self.params['samples']
self.steps = self.params['steps']
if self.steps is not None:
self.num_iterations = self.epochs * self.steps
else:
if (self.samples % self.batch_size) == 0:
remainder = 0
else:
remainder = 1
self.num_iterations = (self.epochs + remainder) * self.samples // self.batch_size
self.mid_cycle_id = int(self.num_iterations * ((1. - self.end_percentage)) / float(2))
self._reset()
K.set_value(self.model.optimizer.lr, self.compute_lr())
if self._update_momentum:
if not hasattr(self.model.optimizer, 'momentum'):
raise ValueError("Momentum can be updated only on SGD optimizer !")
new_momentum = self.compute_momentum()
K.set_value(self.model.optimizer.momentum, new_momentum)
def on_batch_end(self, epoch, logs=None):
logs = logs or {}
self.clr_iterations += 1
new_lr = self.compute_lr()
self.history.setdefault('lr', []).append(
K.get_value(self.model.optimizer.lr))
K.set_value(self.model.optimizer.lr, new_lr)
if self._update_momentum:
if not hasattr(self.model.optimizer, 'momentum'):
raise ValueError("Momentum can be updated only on SGD optimizer !")
new_momentum = self.compute_momentum()
self.history.setdefault('momentum', []).append(
K.get_value(self.model.optimizer.momentum))
K.set_value(self.model.optimizer.momentum, new_momentum)
for k, v in logs.items():
self.history.setdefault(k, []).append(v)
def on_epoch_end(self, epoch, logs=None):
if self.verbose:
if self._update_momentum:
print(" - lr: %0.5f - momentum: %0.2f " %
(self.history['lr'][-1], self.history['momentum'][-1]))
else:
print(" - lr: %0.5f " % (self.history['lr'][-1]))
class LRFinder(Callback):
def __init__(self,
num_samples,
batch_size,
minimum_lr=1e-5,
maximum_lr=10.,
lr_scale='exp',
validation_data=None,
validation_sample_rate=5,
stopping_criterion_factor=4.,
loss_smoothing_beta=0.98,
save_dir=None,
verbose=True):
"""
This class uses the Cyclic Learning Rate history to find a
set of learning rates that can be good initializations for the
One-Cycle training proposed by <NAME> in the paper referenced
below.
A port of the Fast.ai implementation for Keras.
# Note
This requires that the model be trained for exactly 1 epoch. If the model
is trained for more epochs, then the metric calculations are only done for
the first epoch.
# Interpretation
Upon visualizing the loss plot, check where the loss starts to increase
rapidly. Choose a learning rate at somewhat prior to the corresponding
position in the plot for faster convergence. This will be the maximum_lr lr.
Choose the max value as this value when passing the `max_val` argument
to OneCycleLR callback.
Since the plot is in log-scale, you need to compute 10 ^ (-k) of the x-axis
# Arguments:
num_samples: Integer. Number of samples in the dataset.
batch_size: Integer. Batch size during training.
minimum_lr: Float. Initial learning rate (and the minimum).
maximum_lr: Float. Final learning rate (and the maximum).
lr_scale: Can be one of ['exp', 'linear']. Chooses the type of
scaling for each update to the learning rate during subsequent
batches. Choose 'exp' for large range and 'linear' for small range.
validation_data: Requires the validation dataset as a tuple of
(X, y) belonging to the validation set. If provided, will use the
validation set to compute the loss metrics. Else uses the training
batch loss. Will warn if not provided to alert the user.
validation_sample_rate: Positive or Negative Integer. Number of batches to sample from the
validation set per iteration of the LRFinder. Larger number of
samples will reduce the variance but will take longer time to execute
per batch.
If Positive > 0, will sample from the validation dataset
If Megative, will use the entire dataset
stopping_criterion_factor: Integer or None. A factor which is used
to measure large increase in the loss value during training.
Since callbacks cannot stop training of a model, it will simply
stop logging the additional values from the epochs after this
stopping criterion has been met.
If None, this check will not be performed.
loss_smoothing_beta: Float. The smoothing factor for the moving
average of the loss function.
save_dir: Optional, String. If passed a directory path, the callback
will save the running loss and learning rates to two separate numpy
arrays inside this directory. If the directory in this path does not
exist, they will be created.
verbose: Whether to print the learning rate after every batch of training.
# References:
- [A disciplined approach to neural network hyper-parameters: Part 1 -- learning rate, batch size, weight_decay, and weight decay](https://arxiv.org/abs/1803.09820)
"""
super(LRFinder, self).__init__()
if lr_scale not in ['exp', 'linear']:
raise ValueError("`lr_scale` must be one of ['exp', 'linear']")
if validation_data is not None:
self.validation_data = validation_data
self.use_validation_set = True
if validation_sample_rate > 0 or validation_sample_rate < 0:
self.validation_sample_rate = validation_sample_rate
else:
raise ValueError("`validation_sample_rate` must be a positive or negative integer other than o")
else:
self.use_validation_set = False
self.validation_sample_rate = 0
self.num_samples = num_samples
self.batch_size = batch_size
self.initial_lr = minimum_lr
self.final_lr = maximum_lr
self.lr_scale = lr_scale
self.stopping_criterion_factor = stopping_criterion_factor
self.loss_smoothing_beta = loss_smoothing_beta
self.save_dir = save_dir
self.verbose = verbose
self.num_batches_ = num_samples // batch_size
self.current_lr_ = minimum_lr
if lr_scale == 'exp':
self.lr_multiplier_ = (maximum_lr / float(minimum_lr)) ** (
1. / float(self.num_batches_))
else:
extra_batch = int((num_samples % batch_size) != 0)
self.lr_multiplier_ = np.linspace(
minimum_lr, maximum_lr, num=self.num_batches_ + extra_batch)
# If negative, use entire validation set
if self.validation_sample_rate < 0:
self.validation_sample_rate = self.validation_data[0].shape[0] // batch_size
self.current_batch_ = 0
self.current_epoch_ = 0
self.best_loss_ = 1e6
self.running_loss_ = 0.
self.history = {}
def on_train_begin(self, logs=None):
self.current_epoch_ = 1
K.set_value(self.model.optimizer.lr, self.initial_lr)
warnings.simplefilter("ignore")
def on_epoch_begin(self, epoch, logs=None):
self.current_batch_ = 0
if self.current_epoch_ > 1:
warnings.warn(
"\n\nLearning rate finder should be used only with a single epoch. "
"Hereafter, the callback will not measure the losses.\n\n")
def on_batch_begin(self, batch, logs=None):
self.current_batch_ += 1
def on_batch_end(self, batch, logs=None):
if self.current_epoch_ > 1:
return
if self.use_validation_set:
X, Y = self.validation_data[0], self.validation_data[1]
# use 5 random batches from test set for fast approximate of loss
num_samples = self.batch_size * self.validation_sample_rate
if num_samples > X.shape[0]:
num_samples = X.shape[0]
idx = np.random.choice(X.shape[0], num_samples, replace=False)
x = X[idx]
y = Y[idx]
values = self.model.evaluate(x, y, batch_size=self.batch_size, verbose=False)
loss = values[0]
else:
loss = logs['loss']
# smooth the loss value and bias correct
running_loss = self.loss_smoothing_beta * loss + (
1. - self.loss_smoothing_beta) * loss
running_loss = running_loss / (
1. - self.loss_smoothing_beta**self.current_batch_)
# stop logging if loss is too large
if self.current_batch_ > 1 and self.stopping_criterion_factor is not None and (
running_loss >
self.stopping_criterion_factor * self.best_loss_):
if self.verbose:
print(" - LRFinder: Skipping iteration since loss is %d times as large as best loss (%0.4f)"
% (self.stopping_criterion_factor, self.best_loss_))
return
if running_loss < self.best_loss_ or self.current_batch_ == 1:
self.best_loss_ = running_loss
current_lr = K.get_value(self.model.optimizer.lr)
self.history.setdefault('running_loss_', []).append(running_loss)
if self.lr_scale == 'exp':
self.history.setdefault('log_lrs', []).append(np.log10(current_lr))
else:
self.history.setdefault('log_lrs', []).append(current_lr)
# compute the lr for the next batch and update the optimizer lr
if self.lr_scale == 'exp':
current_lr *= self.lr_multiplier_
else:
current_lr = self.lr_multiplier_[self.current_batch_ - 1]
K.set_value(self.model.optimizer.lr, current_lr)
# save the other metrics as well
for k, v in logs.items():
self.history.setdefault(k, []).append(v)
if self.verbose:
if self.use_validation_set:
print(" - LRFinder: val_loss: %1.4f - lr = %1.8f " %
(values[0], current_lr))
else:
print(" - LRFinder: lr = %1.8f " % current_lr)
def on_epoch_end(self, epoch, logs=None):
if self.save_dir is not None and self.current_epoch_ <= 1:
if not os.path.exists(self.save_dir):
os.makedirs(self.save_dir)
losses_path = os.path.join(self.save_dir, 'losses.npy')
lrs_path = os.path.join(self.save_dir, 'lrs.npy')
np.save(losses_path, self.losses)
np.save(lrs_path, self.lrs)
if self.verbose:
print("\tLR Finder : Saved the losses and learning rate values in path : {%s}"
% (self.save_dir))
self.current_epoch_ += 1
warnings.simplefilter("default")
def plot_schedule(self, clip_beginning=None, clip_endding=None):
"""
Plots the schedule from the callback itself.
# Arguments:
clip_beginning: Integer or None. If positive integer, it will
remove the specified portion of the loss graph to remove the large
loss values in the beginning of the graph.
clip_endding: Integer or None. If negative integer, it will
remove the specified portion of the ending of the loss graph to
remove the sharp increase in the loss values at high learning rates.
"""
try:
import matplotlib.pyplot as plt
plt.style.use('seaborn-white')
except ImportError:
print(
"Matplotlib not found. Please use `pip install matplotlib` first."
)
return
if clip_beginning is not None and clip_beginning < 0:
clip_beginning = -clip_beginning
if clip_endding is not None and clip_endding > 0:
clip_endding = -clip_endding
losses = self.losses
lrs = self.lrs
if clip_beginning:
losses = losses[clip_beginning:]
lrs = lrs[clip_beginning:]
if clip_endding:
losses = losses[:clip_endding]
lrs = lrs[:clip_endding]
plt.plot(lrs, losses)
plt.title('Learning rate vs Loss')
plt.xlabel('learning rate')
plt.ylabel('loss')
plt.show()
@classmethod
def restore_schedule_from_dir(cls,
directory,
clip_beginning=None,
clip_endding=None):
"""
Loads the training history from the saved numpy files in the given directory.
# Arguments:
directory: String. Path to the directory where the serialized numpy
arrays of the loss and learning rates are saved.
clip_beginning: Integer or None. If positive integer, it will
remove the specified portion of the loss graph to remove the large
loss values in the beginning of the graph.
clip_endding: Integer or None. If negative integer, it will
remove the specified portion of the ending of the loss graph to
remove the sharp increase in the loss values at high learning rates.
Returns:
tuple of (losses, learning rates)
"""
if clip_beginning is not None and clip_beginning < 0:
clip_beginning = -clip_beginning
if clip_endding is not None and clip_endding > 0:
clip_endding = -clip_endding
losses_path = os.path.join(directory, 'losses.npy')
lrs_path = os.path.join(directory, 'lrs.npy')
if not os.path.exists(losses_path) or not os.path.exists(lrs_path):
print("%s and %s could not be found at directory : {%s}" %
(losses_path, lrs_path, directory))
losses = None
lrs = None
else:
losses = np.load(losses_path)
lrs = np.load(lrs_path)
if clip_beginning:
losses = losses[clip_beginning:]
lrs = lrs[clip_beginning:]
if clip_endding:
losses = losses[:clip_endding]
lrs = lrs[:clip_endding]
return losses, lrs
@classmethod
def plot_schedule_from_file(cls,
directory,
clip_beginning=None,
clip_endding=None):
"""
Plots the schedule from the saved numpy arrays of the loss and learning
rate values in the specified directory.
# Arguments:
directory: String. Path to the directory where the serialized numpy
arrays of the loss and learning rates are saved.
clip_beginning: Integer or None. If positive integer, it will
remove the specified portion of the loss graph to remove the large
loss values in the beginning of the graph.
clip_endding: Integer or None. If negative integer, it will
remove the specified portion of the ending of the loss graph to
remove the sharp increase in the loss values at high learning rates.
"""
try:
import matplotlib.pyplot as plt
plt.style.use('seaborn-white')
except ImportError:
print("Matplotlib not found. Please use `pip install matplotlib` first.")
return
losses, lrs = cls.restore_schedule_from_dir(
directory,
clip_beginning=clip_beginning,
clip_endding=clip_endding)
if losses is None or lrs is None:
return
else:
plt.plot(lrs, losses)
plt.title('Learning rate vs Loss')
plt.xlabel('learning rate')
plt.ylabel('loss')
plt.show()
@property
def lrs(self):
return np.array(self.history['log_lrs'])
@property
def losses(self):
return np.array(self.history['running_loss_'])
|
drain/test_drain.py | bit0fun/plugins | 173 | 12614047 | <filename>drain/test_drain.py
from flaky import flaky
from pyln.testing.fixtures import * # noqa: F401,F403
from pyln.testing.utils import DEVELOPER
from pyln.client import RpcError
from .utils import get_ours, get_theirs, wait_ours, wait_for_all_htlcs
import os
import unittest
import pytest
plugin_path = os.path.join(os.path.dirname(__file__), "drain.py")
pluginopt = {'plugin': plugin_path}
EXPERIMENTAL_FEATURES = int(os.environ.get("EXPERIMENTAL_FEATURES", "0"))
def test_plugin_starts(node_factory):
l1 = node_factory.get_node()
# Test dynamically
l1.rpc.plugin_start(plugin_path)
l1.rpc.plugin_stop(plugin_path)
l1.rpc.plugin_start(plugin_path)
l1.stop()
# Then statically
l1.daemon.opts["plugin"] = plugin_path
l1.start()
@flaky
@unittest.skipIf(not DEVELOPER, "slow gossip, needs DEVELOPER=1")
def test_drain_and_refill(node_factory, bitcoind):
# Scenario: first drain then refill
#
# SETUP: A basic circular setup to run drain and fill tests
#
# l1---l2
# | |
# l4---l3
#
l1, l2, l3, l4 = node_factory.line_graph(4, opts=pluginopt)
l4.rpc.connect(l1.info['id'], 'localhost', l1.port)
nodes = [l1, l2, l3, l4]
scid12 = l1.get_channel_scid(l2)
scid23 = l2.get_channel_scid(l3)
scid34 = l3.get_channel_scid(l4)
l4.openchannel(l1, 10**6)
scid41 = l4.get_channel_scid(l1)
# disable fees to make circular line graph tests a lot easier
for n in nodes:
n.rpc.setchannelfee('all', 0, 0)
# wait for each others gossip
bitcoind.generate_block(6)
for n in nodes:
for scid in [scid12, scid23, scid34, scid41]:
n.wait_channel_active(scid)
# do some draining and filling
ours_before = get_ours(l1, scid12)
assert(l1.rpc.drain(scid12))
wait_for_all_htlcs(nodes)
assert(get_ours(l1, scid12) < ours_before * 0.05) # account some reserves
# refill again with 100% should not be possible in a line_graph circle,
# this is not because of ln routing fees (turned off) but because of
# HTLC commitment tx fee margin that applies for the funder.
with pytest.raises(RpcError, match=r"Outgoing capacity problem"):
l1.rpc.fill(scid12)
# If we only go for 99.9% or exatctly 9741msat less, this must work.
theirs_before = get_theirs(l1, scid12)
assert(l1.rpc.fill(scid12, 99.9))
wait_for_all_htlcs(nodes)
assert(get_theirs(l1, scid12) < theirs_before * 0.05) # account some reserves
@unittest.skipIf(not DEVELOPER, "slow gossip, needs DEVELOPER=1")
def test_fill_and_drain(node_factory, bitcoind):
# Scenario: first fill of an empty channel and drain afterwards.
#
# SETUP: A basic circular setup to run drain and fill tests#
#
# l1---l2
# | |
# l4---l3
#
l1, l2, l3, l4 = node_factory.line_graph(4, opts=pluginopt)
l4.rpc.connect(l1.info['id'], 'localhost', l1.port)
nodes = [l1, l2, l3, l4]
scid12 = l1.get_channel_scid(l2)
scid23 = l2.get_channel_scid(l3)
scid34 = l3.get_channel_scid(l4)
l4.openchannel(l1, 10**6)
scid41 = l4.get_channel_scid(l1)
# disable fees to make circular line graph tests a lot easier
for n in nodes:
n.rpc.setchannelfee('all', 0, 0)
# wait for each others gossip
bitcoind.generate_block(6)
for n in nodes:
for scid in [scid12, scid23, scid34, scid41]:
n.wait_channel_active(scid)
# for l2 to fill scid12, it needs to send on scid23, where its funder
# commit tx fee applies, so doing 99.9% or exactly 9741msat less must work.
ours_before = get_ours(l1, scid12)
assert(l2.rpc.fill(scid12, 99.9))
wait_for_all_htlcs(nodes)
assert(get_ours(l1, scid12) < ours_before * 0.05) # account some reserves
# note: fees are disabled, drain 100% must work,
# as fundee doesnt pay commit tx fee
theirs_before = get_theirs(l1, scid12)
l2.rpc.drain(scid12)
wait_for_all_htlcs(nodes)
assert(get_theirs(l1, scid12) < theirs_before * 0.05) # account some reserves
@unittest.skipIf(not DEVELOPER, "slow gossip, needs DEVELOPER=1")
@unittest.skipIf(EXPERIMENTAL_FEATURES, "temporarily disabled since amounts seem to change")
def test_setbalance(node_factory, bitcoind):
# SETUP: a basic circular setup to run setbalance tests
#
# l1---l2
# | |
# l4---l3
#
l1, l2, l3, l4 = node_factory.line_graph(4, opts=pluginopt)
l4.rpc.connect(l1.info['id'], 'localhost', l1.port)
nodes = [l1, l2, l3, l4]
scid12 = l1.get_channel_scid(l2)
scid23 = l2.get_channel_scid(l3)
scid34 = l3.get_channel_scid(l4)
l4.openchannel(l1, 10**6)
scid41 = l4.get_channel_scid(l1)
# wait for each others gossip
bitcoind.generate_block(6)
for n in nodes:
for scid in [scid12, scid23, scid34, scid41]:
n.wait_channel_active(scid)
# test auto 50/50 balancing
ours_before = get_ours(l1, scid12)
assert(l1.rpc.setbalance(scid12))
ours_after = wait_ours(l1, scid12, ours_before)
# TODO: can we fix/change/improve this to be more precise?
assert(ours_after < ours_before * 0.52)
assert(ours_after > ours_before * 0.48)
# set and test some 70/30 specific balancing
assert(l1.rpc.setbalance(scid12, 30))
wait_for_all_htlcs(nodes)
ours_after = get_ours(l1, scid12)
assert(ours_after < ours_before * 0.34)
assert(ours_after > ours_before * 0.27)
assert(l1.rpc.setbalance(scid12, 70))
wait_for_all_htlcs(nodes)
ours_after = get_ours(l1, scid12)
assert(ours_after < ours_before * 0.73)
assert(ours_after > ours_before * 0.67)
# helper function that balances incoming capacity, so autodetection edge case
# testing gets a lot simpler.
def balance(node, node_a, scid_a, node_b, scid_b, node_c):
msat_a = get_ours(node_a, scid_a)
msat_b = get_ours(node_b, scid_b)
if (msat_a > msat_b):
node.pay(node_b, msat_a - msat_b)
node_b.pay(node_c, msat_a - msat_b)
if (msat_b > msat_a):
node.pay(node_a, msat_b - msat_a)
node_a.pay(node_c, msat_b - msat_a)
wait_for_all_htlcs([node, node_a, node_b])
@unittest.skipIf(not DEVELOPER, "slow gossip, needs DEVELOPER=1")
def test_drain_chunks(node_factory, bitcoind):
# SETUP: a small mesh that enables testing chunks
#
# l2-- --l3
# | \ / |
# | l1 |
# | || |
# | || |
# o----l4----o
#
# In such a scenario we can disstribute the funds in such a way
# that only correct chunking allows rebalancing for l1
#
# FUNDING:
# scid12: l1 -> l2 10**6
# scid13: l1 -> l3 10**6
# scid24: l2 -> l4 10**6
# scid34: l4 -> l4 10**6
# scid41: l4 -> l1 11**6 (~1.750.000 sat)
l1, l2, l3, l4 = node_factory.get_nodes(4, opts=pluginopt)
l1.connect(l2)
l1.connect(l3)
l2.connect(l4)
l3.connect(l4)
l4.connect(l1)
l1.openchannel(l2, 10**6)
l1.openchannel(l3, 10**6)
l2.openchannel(l4, 10**6)
l3.openchannel(l4, 10**6)
l4.openchannel(l1, 11**6)
scid12 = l1.get_channel_scid(l2)
scid13 = l1.get_channel_scid(l3)
scid24 = l2.get_channel_scid(l4)
scid34 = l3.get_channel_scid(l4)
scid41 = l4.get_channel_scid(l1)
nodes = [l1, l2, l3, l4]
scids = [scid12, scid13, scid24, scid34, scid41]
# wait for each others gossip
bitcoind.generate_block(6)
for n in nodes:
for scid in scids:
n.wait_channel_active(scid)
amount = get_ours(l4, scid41)
# drain in one chunk should be impossible and detected before doing anything
with pytest.raises(RpcError, match=r"Selected chunks \(1\) will not fit incoming channel capacities."):
l4.rpc.drain(scid41, 100, 1)
# using 3 chunks should also not be possible, as it would overfill one of the incoming channels
with pytest.raises(RpcError, match=r"Selected chunks \(3\) will not fit incoming channel capacities."):
l4.rpc.drain(scid41, 100, 3)
# test chunk autodetection and even chunks 2,4,6
assert(l4.rpc.drain(scid41))
wait_for_all_htlcs(nodes)
assert(get_ours(l1, scid41) > amount * 0.9)
balance(l1, l2, scid12, l3, scid13, l4)
assert(l1.rpc.drain(scid41))
wait_for_all_htlcs(nodes)
assert(get_theirs(l1, scid41) > amount * 0.9)
assert(l4.rpc.drain(scid41, 100, 2))
wait_for_all_htlcs(nodes)
assert(get_ours(l1, scid41) > amount * 0.9)
assert(l1.rpc.drain(scid41, 100, 2))
wait_for_all_htlcs(nodes)
assert(get_theirs(l1, scid41) > amount * 0.9)
assert(l4.rpc.drain(scid41, 100, 4))
wait_for_all_htlcs(nodes)
assert(get_ours(l1, scid41) > amount * 0.9)
assert(l1.rpc.drain(scid41, 100, 4))
wait_for_all_htlcs(nodes)
assert(get_theirs(l1, scid41) > amount * 0.9)
assert(l4.rpc.drain(scid41, 100, 6))
wait_for_all_htlcs(nodes)
assert(get_ours(l1, scid41) > amount * 0.9)
assert(l1.rpc.drain(scid41, 100, 6))
wait_for_all_htlcs(nodes)
assert(get_theirs(l1, scid41) > amount * 0.9)
@unittest.skipIf(not DEVELOPER, "slow gossip, needs DEVELOPER=1")
def test_fill_chunks(node_factory, bitcoind):
# SETUP: a small mesh that enables testing chunks
#
# l2-- --l3
# | \ / |
# | l1 |
# | || |
# | || |
# o----l4----o
#
# In such a scenario we can disstribute the funds in such a way
# that only correct chunking allows rebalancing for l1
#
# FUNDING:
# scid12: l1 -> l2 10**6
# scid13: l1 -> l3 10**6
# scid24: l2 -> l4 10**6
# scid34: l4 -> l4 10**6
# scid41: l4 -> l1 11**6 (~1.750.000 sat)
l1, l2, l3, l4 = node_factory.get_nodes(4, opts=pluginopt)
l1.connect(l2)
l1.connect(l3)
l2.connect(l4)
l3.connect(l4)
l4.connect(l1)
l1.openchannel(l2, 10**6)
l1.openchannel(l3, 10**6)
l2.openchannel(l4, 10**6)
l3.openchannel(l4, 10**6)
l4.openchannel(l1, 11**6)
scid12 = l1.get_channel_scid(l2)
scid13 = l1.get_channel_scid(l3)
scid24 = l2.get_channel_scid(l4)
scid34 = l3.get_channel_scid(l4)
scid41 = l4.get_channel_scid(l1)
nodes = [l1, l2, l3, l4]
scids = [scid12, scid13, scid24, scid34, scid41]
# wait for each others gossip
bitcoind.generate_block(6)
for n in nodes:
for scid in scids:
n.wait_channel_active(scid)
amount = get_ours(l4, scid41)
# fill in one chunk should be impossible and detected before doing anything
with pytest.raises(RpcError, match=r"Selected chunks \(1\) will not fit outgoing channel capacities."):
l1.rpc.fill(scid41, 100, 1)
# using 3 chunks should also not be possible, as it would overdrain one of the outgoing channels
with pytest.raises(RpcError, match=r"Selected chunks \(3\) will not fit outgoing channel capacities."):
print(l1.rpc.fill(scid41, 100, 3))
# test chunk autodetection and even chunks 2,4,6
assert(l1.rpc.fill(scid41))
wait_for_all_htlcs(nodes)
assert(get_ours(l1, scid41) > amount * 0.9)
balance(l1, l2, scid12, l3, scid13, l4)
assert(l4.rpc.fill(scid41))
wait_for_all_htlcs(nodes)
assert(get_theirs(l1, scid41) > amount * 0.9)
assert(l1.rpc.fill(scid41, 100, 2))
wait_for_all_htlcs(nodes)
assert(get_ours(l1, scid41) > amount * 0.9)
assert(l4.rpc.fill(scid41, 100, 2))
wait_for_all_htlcs(nodes)
assert(get_theirs(l1, scid41) > amount * 0.9)
assert(l1.rpc.fill(scid41, 100, 4))
wait_for_all_htlcs(nodes)
assert(get_ours(l1, scid41) > amount * 0.9)
assert(l4.rpc.fill(scid41, 100, 4))
wait_for_all_htlcs(nodes)
assert(get_theirs(l1, scid41) > amount * 0.9)
|
Bot/Target.py | mtranhoangson/bot | 199 | 12614052 | <reponame>mtranhoangson/bot<gh_stars>100-1000
from collections import OrderedDict
from datetime import datetime
from Bot.CustomSerializable import CustomSerializable
from Bot.TradeEnums import OrderStatus
from Bot.Value import Value
from Utils import Utils
class Target(CustomSerializable):
def __init__(self, price=0, vol='100%', **kvargs):
self.vol = Value(vol)
self.price = PriceHelper.parse_price(price)
self.id = kvargs.get('id')
self.date = kvargs.get('date')
self.status = OrderStatus(kvargs.get('status', OrderStatus.NEW.name).lower())
sl_input = kvargs.get('sl', 0)
self.sl = float(sl_input if sl_input else 0)
self.smart = self.s2b(kvargs.get('smart', None))
self.parent_smart = kvargs.get('parent_smart', None)
self.best_price = float(kvargs.get('best_price', 0))
cv = kvargs.get('calculated_volume', None)
self.calculated_volume = float(cv) if cv else None
def s2b(self, s):
return Utils.s2b(s)
def is_completed(self):
return self.status.is_completed()
def is_new(self):
return self.status.is_new()
def is_active(self):
return self.status.is_active()
def has_id(self):
return self.id is not None
# def set_completed(self, date_str=datetime.now().replace(microsecond=0).isoformat(' ')):
def set_completed(self, id=None, date=datetime.now()):
self.status = OrderStatus.COMPLETED
self.date = date
if id:
self.id = id
def set_canceled(self):
self.status = OrderStatus.NEW
self.id = None
self.calculated_volume = None
def set_active(self, id=None):
self.status = OrderStatus.ACTIVE
if id:
self.id = id
def has_custom_stop(self):
return self.sl != 0
def custom_stop(self):
return self.sl
def is_stoploss_target(self):
return False
def is_exit_target(self):
return False
def is_entry_target(self):
return False
def is_smart(self):
if self.parent_smart is not None:
if self.smart is not None:
return self.smart
return self.parent_smart
return False if self.smart is None else self.smart
def __str__(self):
desc = ('{}:{:.08f}@{}; Smart:{}' if PriceHelper.is_float_price(self.price) else '{}:{}@{}{}').format(
self.__class__.__name__, self.price, self.vol, self.is_smart())
if self.calculated_volume and self.vol.is_rel():
desc += '; Abs Vol:{:.08f}'.format(self.calculated_volume)
desc += ' ;Stoploss:{}'.format(self.sl)
desc += ' ;Status:{}'.format(self.status)
desc += ' ;ID:{}'.format(self.id)
return desc
# '(abs vol: {:.08f})'.format(self.calculated_volume) if self.vol.is_rel() and self.calculated_volume else ''
def __repr__(self):
return self.__str__()
def serializable_dict(self):
d = OrderedDict()
if not self.status.is_new():
d['status'] = self.status
if self.id:
d['id'] = self.id
if self.date:
d['date'] = self.date
if PriceHelper.is_float_price(self.price):
d['price'] = self.format_float(self.price)
else:
d['price'] = self.price
d['vol'] = self.vol
if self.smart is not None:
d['smart'] = self.smart
if self.sl != 0:
d['sl'] = self.format_float(self.sl)
if self.best_price > 0:
d['best_price'] = self.format_float(self.best_price)
if self.calculated_volume:
d['calculated_volume'] = self.format_float(self.calculated_volume)
return d
class PriceHelper:
CURR_PRICE_TOKEN = 'cp'
def __init__(self, is_digit, price_val, operand, operation_val):
self.is_digit = is_digit
self.price_val = price_val
self.operand = operand
self.operation_val: Value = operation_val
def get_value(self, ref_price):
if self.is_digit:
return self.price_val
if str(self.price_val).lower() == PriceHelper.CURR_PRICE_TOKEN:
if not self.operand:
return ref_price
if self.operand in ['+', '-']:
return round(ref_price + self.operation_val.get_val(ref_price) * (1 if self.operand == '+' else -1), 8)
else:
raise SyntaxError('Operation "{}" is unsupported. Use only + or -'.format(self.operand))
raise SyntaxError('Reference price "{}" is unsupported. Use only "CP"'.format(str(self.price_val)))
@classmethod
def parse_price(cls, price_str):
try:
return float(price_str)
except ValueError:
return price_str
@classmethod
def is_float_price(cls, price_str):
try:
float(price_str)
return True
except ValueError:
return False
@classmethod
def create_price_helper(cls, price_str):
#Issue 21 float parsing
s = str(price_str).strip().lower().replace(',', '.')
if PriceHelper.is_float_price(s):
return PriceHelper(True, float(s), None, None)
token = s
operand = None
val = None
if s.startswith(PriceHelper.CURR_PRICE_TOKEN):
token = PriceHelper.CURR_PRICE_TOKEN
s = s[len(PriceHelper.CURR_PRICE_TOKEN):]
if len(s) > 0 and s[0] in ['+', '-']:
operand = s[0]
s = s[1:]
if len(s) > 0:
val = Value(s)
return PriceHelper(False, token, operand, val)
class ExitTarget(Target):
def __init__(self, **kvargs):
super().__init__(**kvargs)
def is_exit_target(self):
return True
class StopLossTarget(Target):
def __init__(self, **kvargs):
super().__init__(**kvargs)
def is_stoploss_target(self):
return True
class EntryTarget(Target):
def __init__(self, **kvargs):
super().__init__(**kvargs)
def is_entry_target(self):
return True
|
investpy/etfs.py | julianogv/investpy2 | 985 | 12614071 | <filename>investpy/etfs.py<gh_stars>100-1000
# Copyright 2018-2021 <NAME>, alvarobartt @ GitHub
# See LICENSE for details.
from datetime import datetime, date, timedelta
import pytz
import json
from random import randint
import warnings
import pandas as pd
import pkg_resources
import requests
from unidecode import unidecode
from lxml.html import fromstring
from .utils.extra import random_user_agent
from .utils.data import Data
from .data.etfs_data import etfs_as_df, etfs_as_list, etfs_as_dict
from .data.etfs_data import etf_countries_as_list
def get_etfs(country=None):
"""
This function retrieves all the available etfs indexed on Investing.com, already stored on `etfs.csv`.
This function also allows the users to specify which country do they want to retrieve data from or if they
want to retrieve it from every listed country; so on, all the indexed etfs will be returned.
Args:
country (:obj:`str`, optional): name of the country to retrieve all its available etfs from.
Returns:
:obj:`pandas.DataFrame` - etfs:
The resulting :obj:`pandas.DataFrame` contains all the etfs basic information stored on `etfs.csv`, since it
was previously retrieved by investpy. Unless the country is specified, all the available etfs indexed on
Investing.com is returned, but if it is specified, just the etfs from that country are returned.
In the case that the file reading of `etfs.csv` or the retrieval process from Investing.com was
successfully completed, the resulting :obj:`pandas.DataFrame` will look like::
country | name | full_name | symbol | isin | asset_class | currency | stock_exchange | def_stock_exchange
--------|------|-----------|--------|------|-------------|----------|----------------|--------------------
xxxxxxx | xxxx | xxxxxxxxx | xxxxxx | xxxx | xxxxxxxxxxx | xxxxxxxx | xxxxxxxxxxxxxx | xxxxxxxxxxxxxxxxxx
Raises:
ValueError: raised when any of the input arguments is not valid.
FileNotFoundError: raised when `etfs.csv` file was not found.
IOError: raised when `etfs.csv` file is missing.
"""
return etfs_as_df(country=country)
def get_etfs_list(country=None):
"""
This function retrieves all the available etfs indexed on Investing.com, already stored on `etfs.csv`.
This function also allows the users to specify which country do they want to retrieve data from or if they
want to retrieve it from every listed country; so on, a listing of etfs will be returned. This function
helps the user to get to know which etfs are available on Investing.com.
Args:
country (:obj:`str`, optional): name of the country to retrieve all its available etfs from.
Returns:
:obj:`list` - etfs_list:
The resulting :obj:`list` contains the retrieved data from the `etfs.csv` file, which is
a listing of the names of the etfs listed on Investing.com, which is the input for data
retrieval functions as the name of the etf to retrieve data from needs to be specified.
In case the listing was successfully retrieved, the :obj:`list` will look like::
etfs_list = [
'Betashares U.S. Equities Strong Bear Currency Hedg',
'Betashares Active Australian Hybrids',
'Australian High Interest Cash', ...
]
Raises:
ValueError: raised when any of the input arguments is not valid.
FileNotFoundError: raised when `etfs.csv` file was not found.
IOError: raised when `etfs.csv` file is missing.
"""
return etfs_as_list(country=country)
def get_etfs_dict(country=None, columns=None, as_json=False):
"""
This function retrieves all the available etfs indexed on Investing.com, already stored on `etfs.csv`.
This function also allows the user to specify which country do they want to retrieve data from,
or from every listed country; the columns which the user wants to be included on the resulting
:obj:`dict`; and the output of the function will either be a :obj:`dict` or a :obj:`json`.
Args:
country (:obj:`str`, optional): name of the country to retrieve all its available etfs from.
columns (:obj:`list`, optional):
names of the columns of the etf data to retrieve <country, name, full_name, symbol, isin, asset_class,
currency, stock_exchange>
as_json (:obj:`bool`, optional):
value to determine the format of the output data which can either be a :obj:`dict` or a :obj:`json`.
Returns:
:obj:`dict` or :obj:`json` - etfs_dict:
The resulting :obj:`dict` contains the retrieved data if found, if not, the corresponding
fields are filled with `None` values.
In case the information was successfully retrieved, the :obj:`dict` will look like::
etfs_dict = {
"country": country,
"name": name,
"full_name": full_name,
"symbol": symbol,
"isin": isin,
"asset_class": asset_class,
"currency": currency,
"stock_exchange": stock_exchange,
"def_stock_exchange": def_stock_exchange
}
Raises:
ValueError: raised when any of the input arguments is not valid.
FileNotFoundError: raised when `etfs.csv` file was not found.
IOError: raised when `etfs.csv` file is missing.
"""
return etfs_as_dict(country=country, columns=columns, as_json=as_json)
def get_etf_countries():
"""
This function retrieves all the available countries to retrieve etfs from, as the listed
countries are the ones indexed on Investing.com. The purpose of this function is to list
the countries which have available etfs according to Investing.com data, so to ease the
etf retrieval process of a particular country.
Returns:
:obj:`list` - countries:
The resulting :obj:`list` contains all the countries listed on Investing.com with
etfs available to retrieve data from.
In the case that the file reading of `etf_countries.csv` which contains the names and codes of the countries
with etfs was successfully completed, the resulting :obj:`list` will look like::
countries = ['australia', 'austria', 'belgium', 'brazil', ...]
Raises:
FileNotFoundError: raised when `etf_countries.csv` file was not found.
"""
return etf_countries_as_list()
def get_etf_recent_data(etf, country, stock_exchange=None, as_json=False, order='ascending', interval='Daily'):
"""
This function retrieves recent historical data from the introduced `etf` from Investing
via Web Scraping. The resulting data can it either be stored in a :obj:`pandas.DataFrame` or in a
:obj:`json` file, with `ascending` or `descending` order.
Args:
etf (:obj:`str`): name of the etf to retrieve recent historical data from.
country (:obj:`str`): name of the country from where the etf is.
as_json (:obj:`bool`, optional):
optional argument to determine the format of the output data (:obj:`pandas.DataFrame` or :obj:`json`).
order (:obj:`str`, optional):
optional argument to define the order of the retrieved data (`ascending`, `asc` or `descending`, `desc`).
interval (:obj:`str`, optional):
value to define the historical data interval to retrieve, by default `Daily`, but it can also be `Weekly` or `Monthly`.
Returns:
:obj:`pandas.DataFrame` or :obj:`json`:
The function returns either a :obj:`pandas.DataFrame` or a :obj:`json` file containing the retrieved
recent data from the specified etf via argument. The dataset contains the open, high, low and close
values for the selected etf on market days.
The returned data is case we use default arguments will look like::
Date || Open | High | Low | Close | Volume | Currency | Exchange
-----||------|------|-----|-------|--------|----------|---------
xxxx || xxxx | xxxx | xxx | xxxxx | xxxxxx | xxxxxxxx | xxxxxxxx
but if we define `as_json=True`, then the output will be::
{
name: name,
recent: [
{
date: dd/mm/yyyy,
open: x,
high: x,
low: x,
close: x,
volume: x,
currency: x,
exchange: x
},
...
]
}
Raises:
ValueError: raised whenever any of the arguments is not valid or errored.
IOError: raised if etfs object/file not found or unable to retrieve.
RuntimeError:raised if the introduced etf does not match any of the indexed ones.
ConnectionError: raised if GET requests does not return 200 status code.
IndexError: raised if etf information was unavailable or not found.
Examples:
>>> data = investpy.get_etf_recent_data(etf='bbva accion dj eurostoxx 50', country='spain')
>>> data.head()
Open High Low Close Volume Currency Exchange
Date
2020-04-09 28.890 29.155 28.40 28.945 20651 EUR Madrid
2020-04-14 29.345 30.235 28.94 29.280 14709 EUR Madrid
2020-04-15 29.125 29.125 28.11 28.130 14344 EUR Madrid
2020-04-16 28.505 28.590 28.08 28.225 17662 EUR Madrid
2020-04-17 29.000 29.325 28.80 28.895 19578 EUR Madrid
"""
if not etf:
raise ValueError("ERR#0031: etf parameter is mandatory and must be a valid etf name.")
if not isinstance(etf, str):
raise ValueError("ERR#0030: etf argument needs to be a str.")
if country is None:
raise ValueError("ERR#0039: country can not be None, it should be a str.")
if country is not None and not isinstance(country, str):
raise ValueError("ERR#0025: specified country value not valid.")
if stock_exchange is not None and not isinstance(stock_exchange, str):
raise ValueError("ERR#0125: specified stock_exchange value is not valid, it should be a str.")
if not isinstance(as_json, bool):
raise ValueError("ERR#0002: as_json argument can just be True or False, bool type.")
if order not in ['ascending', 'asc', 'descending', 'desc']:
raise ValueError("ERR#0003: order argument can just be ascending (asc) or descending (desc), str type.")
if not interval:
raise ValueError("ERR#0073: interval value should be a str type and it can just be either 'Daily', 'Weekly' or 'Monthly'.")
if not isinstance(interval, str):
raise ValueError("ERR#0073: interval value should be a str type and it can just be either 'Daily', 'Weekly' or 'Monthly'.")
interval = interval.lower()
if interval not in ['daily', 'weekly', 'monthly']:
raise ValueError("ERR#0073: interval value should be a str type and it can just be either 'Daily', 'Weekly' or 'Monthly'.")
resource_package = 'investpy'
resource_path = '/'.join(('resources', 'etfs.csv'))
if pkg_resources.resource_exists(resource_package, resource_path):
etfs = pd.read_csv(pkg_resources.resource_filename(resource_package, resource_path), keep_default_na=False)
else:
raise FileNotFoundError("ERR#0058: etfs file not found or errored.")
if etfs is None:
raise IOError("ERR#0009: etfs object not found or unable to retrieve.")
country = unidecode(country.strip().lower())
if country not in get_etf_countries():
raise RuntimeError("ERR#0034: country " + country + " not found, check if it is correct.")
etf = unidecode(etf.strip().lower())
def_exchange = etfs.loc[((etfs['name'].apply(unidecode).str.lower() == etf) & (etfs['def_stock_exchange'] == True)).idxmax()]
etfs = etfs[etfs['country'].str.lower() == country]
if etf not in list(etfs['name'].apply(unidecode).str.lower()):
raise RuntimeError("ERR#0019: etf " + etf + " not found, check if it is correct.")
etfs = etfs[etfs['name'].apply(unidecode).str.lower() == etf]
if def_exchange['country'] != country:
warnings.warn(
'Selected country does not contain the default stock exchange of the introduced ETF. ' + \
'Default country is: \"' + def_exchange['country'] + '\" and default stock_exchange: \"' + \
def_exchange['stock_exchange'] + '\".',
Warning
)
if stock_exchange:
if stock_exchange.lower() not in etfs['stock_exchange'].str.lower():
raise ValueError("ERR#0126: introduced stock_exchange value does not exists, leave this parameter to None to use default stock_exchange.")
etf_exchange = etfs.loc[(etfs['stock_exchange'].str.lower() == stock_exchange.lower()).idxmax(), 'stock_exchange']
else:
found_etfs = etfs[etfs['name'].apply(unidecode).str.lower() == etf]
if len(found_etfs) > 1:
warnings.warn(
'Note that the displayed information can differ depending on the stock exchange. Available stock_exchange' + \
' values for \"' + country + '\" are: \"' + '\", \"'.join(found_etfs['stock_exchange']) + '\".',
Warning
)
del found_etfs
etf_exchange = etfs.loc[(etfs['name'].apply(unidecode).str.lower() == etf).idxmax(), 'stock_exchange']
else:
if stock_exchange:
if stock_exchange.lower() not in etfs['stock_exchange'].str.lower():
raise ValueError("ERR#0126: introduced stock_exchange value does not exists, leave this parameter to None to use default stock_exchange.")
if def_exchange['stock_exchange'].lower() != stock_exchange.lower():
warnings.warn(
'Selected stock_exchange is not the default one of the introduced ETF. ' + \
'Default country is: \"' + def_exchange['country'] + '\" and default stock_exchange: \"' + \
def_exchange['stock_exchange'].lower() + '\".',
Warning
)
etf_exchange = etfs.loc[(etfs['stock_exchange'].str.lower() == stock_exchange.lower()).idxmax(), 'stock_exchange']
else:
etf_exchange = def_exchange['stock_exchange']
symbol = etfs.loc[((etfs['name'].apply(unidecode).str.lower() == etf) & (etfs['stock_exchange'].str.lower() == etf_exchange.lower())).idxmax(), 'symbol']
id_ = etfs.loc[((etfs['name'].apply(unidecode).str.lower() == etf) & (etfs['stock_exchange'].str.lower() == etf_exchange.lower())).idxmax(), 'id']
name = etfs.loc[((etfs['name'].apply(unidecode).str.lower() == etf) & (etfs['stock_exchange'].str.lower() == etf_exchange.lower())).idxmax(), 'name']
etf_currency = etfs.loc[((etfs['name'].apply(unidecode).str.lower() == etf) & (etfs['stock_exchange'].str.lower() == etf_exchange.lower())).idxmax(), 'currency']
header = symbol + ' Historical Data'
head = {
"User-Agent": random_user_agent(),
"X-Requested-With": "XMLHttpRequest",
"Accept": "text/html",
"Accept-Encoding": "gzip, deflate",
"Connection": "keep-alive",
}
params = {
"curr_id": id_,
"smlID": str(randint(1000000, 99999999)),
"header": header,
"interval_sec": interval.capitalize(),
"sort_col": "date",
"sort_ord": "DESC",
"action": "historical_data"
}
url = "https://www.investing.com/instruments/HistoricalDataAjax"
req = requests.post(url, headers=head, data=params)
if req.status_code != 200:
raise ConnectionError("ERR#0015: error " + str(req.status_code) + ", try again later.")
root_ = fromstring(req.text)
path_ = root_.xpath(".//table[@id='curr_table']/tbody/tr")
result = list()
if path_:
for elements_ in path_:
if elements_.xpath(".//td")[0].text_content() == 'No results found':
raise IndexError("ERR#0010: etf information unavailable or not found.")
info = []
for nested_ in elements_.xpath(".//td"):
info.append(nested_.get('data-real-value'))
etf_date = datetime.strptime(str(datetime.fromtimestamp(int(info[0]), tz=pytz.timezone('GMT')).date()), '%Y-%m-%d')
etf_close = float(info[1].replace(',', ''))
etf_open = float(info[2].replace(',', ''))
etf_high = float(info[3].replace(',', ''))
etf_low = float(info[4].replace(',', ''))
etf_volume = int(info[5])
result.insert(len(result),
Data(etf_date, etf_open, etf_high, etf_low, etf_close, etf_volume, etf_currency, etf_exchange))
if order in ['ascending', 'asc']:
result = result[::-1]
elif order in ['descending', 'desc']:
result = result
if as_json is True:
json_ = {
'name': name,
'recent':
[value.etf_as_json() for value in result]
}
return json.dumps(json_, sort_keys=False)
elif as_json is False:
df = pd.DataFrame.from_records([value.etf_to_dict() for value in result])
df.set_index('Date', inplace=True)
return df
else:
raise RuntimeError("ERR#0004: data retrieval error while scraping.")
def get_etf_historical_data(etf, country, from_date, to_date, stock_exchange=None, as_json=False, order='ascending', interval='Daily'):
"""
This function retrieves historical data from the introduced `etf` from Investing.com via Web Scraping on the
introduced date range. The resulting data can it either be stored in a :obj:`pandas.DataFrame` or in a
:obj:`json` object with `ascending` or `descending` order.
Args:
etf (:obj:`str`): name of the etf to retrieve recent historical data from.
country (:obj:`str`): name of the country from where the etf is.
from_date (:obj:`str`): date as `str` formatted as `dd/mm/yyyy`, from where data is going to be retrieved.
to_date (:obj:`str`): date as `str` formatted as `dd/mm/yyyy`, until where data is going to be retrieved.
as_json (:obj:`bool`, optional):
to determine the format of the output data (:obj:`pandas.DataFrame` or :obj:`json`).
order (:obj:`str`, optional):
optional argument to define the order of the retrieved data (`ascending`, `asc` or `descending`, `desc`).
interval (:obj:`str`, optional):
value to define the historical data interval to retrieve, by default `Daily`, but it can also be `Weekly` or `Monthly`.
Returns:
:obj:`pandas.DataFrame` or :obj:`json`:
The function returns either a :obj:`pandas.DataFrame` or a :obj:`json` file containing the retrieved
recent data from the specified etf via argument. The dataset contains the open, high, low and close
values for the selected etf on market days.
The returned data is case we use default arguments will look like::
Date || Open | High | Low | Close | Volume | Currency | Exchange
-----||------|------|-----|-------|--------|----------|---------
xxxx || xxxx | xxxx | xxx | xxxxx | xxxxxx | xxxxxxxx | xxxxxxxx
but if we define `as_json=True`, then the output will be::
{
name: name,
historical: [
{
date: dd/mm/yyyy,
open: x,
high: x,
low: x,
close: x,
volume: x,
currency: x,
exchange: x
},
...
]
}
Raises:
ValueError: raised whenever any of the arguments is not valid or errored.
IOError: raised if etfs object/file not found or unable to retrieve.
RuntimeError:raised if the introduced etf does not match any of the indexed ones.
ConnectionError: raised if GET requests does not return 200 status code.
IndexError: raised if etf information was unavailable or not found.
Examples:
>>> data = investpy.get_etf_historical_data(etf='bbva accion dj eurostoxx 50', country='spain', from_date='01/01/2010', to_date='01/01/2019')
>>> data.head()
Open High Low Close Volume Currency Exchange
Date
2011-12-07 23.70 23.70 23.70 23.62 2000 EUR Madrid
2011-12-08 23.53 23.60 23.15 23.04 599 EUR Madrid
2011-12-09 23.36 23.60 23.36 23.62 2379 EUR Madrid
2011-12-12 23.15 23.26 23.00 22.88 10695 EUR Madrid
2011-12-13 22.88 22.88 22.88 22.80 15 EUR Madrid
"""
if not etf:
raise ValueError("ERR#0031: etf parameter is mandatory and must be a valid etf name.")
if not isinstance(etf, str):
raise ValueError("ERR#0030: etf argument needs to be a str.")
if country is None:
raise ValueError("ERR#0039: country can not be None, it should be a str.")
if country is not None and not isinstance(country, str):
raise ValueError("ERR#0025: specified country value not valid.")
if stock_exchange is not None and not isinstance(stock_exchange, str):
raise ValueError("ERR#0125: specified stock_exchange value is not valid, it should be a str.")
if not isinstance(as_json, bool):
raise ValueError("ERR#0002: as_json argument can just be True or False, bool type.")
if order not in ['ascending', 'asc', 'descending', 'desc']:
raise ValueError("ERR#0003: order argument can just be ascending (asc) or descending (desc), str type.")
if not interval:
raise ValueError("ERR#0073: interval value should be a str type and it can just be either 'Daily', 'Weekly' or 'Monthly'.")
if not isinstance(interval, str):
raise ValueError("ERR#0073: interval value should be a str type and it can just be either 'Daily', 'Weekly' or 'Monthly'.")
interval = interval.lower()
if interval not in ['daily', 'weekly', 'monthly']:
raise ValueError("ERR#0073: interval value should be a str type and it can just be either 'Daily', 'Weekly' or 'Monthly'.")
try:
datetime.strptime(from_date, '%d/%m/%Y')
except ValueError:
raise ValueError("ERR#0011: incorrect data format, it should be 'dd/mm/yyyy'.")
try:
datetime.strptime(to_date, '%d/%m/%Y')
except ValueError:
raise ValueError("ERR#0011: incorrect data format, it should be 'dd/mm/yyyy'.")
start_date = datetime.strptime(from_date, '%d/%m/%Y')
end_date = datetime.strptime(to_date, '%d/%m/%Y')
if start_date >= end_date:
raise ValueError("ERR#0032: to_date should be greater than from_date, both formatted as 'dd/mm/yyyy'.")
date_interval = {
'intervals': [],
}
flag = True
while flag is True:
diff = end_date.year - start_date.year
if diff > 19:
obj = {
'start': start_date.strftime('%m/%d/%Y'),
'end': start_date.replace(year=start_date.year + 19).strftime('%m/%d/%Y'),
}
date_interval['intervals'].append(obj)
start_date = start_date.replace(year=start_date.year + 19) + timedelta(days=1)
else:
obj = {
'start': start_date.strftime('%m/%d/%Y'),
'end': end_date.strftime('%m/%d/%Y'),
}
date_interval['intervals'].append(obj)
flag = False
interval_limit = len(date_interval['intervals'])
interval_counter = 0
data_flag = False
resource_package = 'investpy'
resource_path = '/'.join(('resources', 'etfs.csv'))
if pkg_resources.resource_exists(resource_package, resource_path):
etfs = pd.read_csv(pkg_resources.resource_filename(resource_package, resource_path), keep_default_na=False)
else:
raise FileNotFoundError("ERR#0058: etfs file not found or errored.")
if etfs is None:
raise IOError("ERR#0009: etfs object not found or unable to retrieve.")
country = unidecode(country.strip().lower())
if country not in get_etf_countries():
raise RuntimeError("ERR#0034: country " + country + " not found, check if it is correct.")
etf = unidecode(etf.strip().lower())
def_exchange = etfs.loc[((etfs['name'].apply(unidecode).str.lower() == etf) & (etfs['def_stock_exchange'] == True)).idxmax()]
etfs = etfs[etfs['country'].str.lower() == country]
if etf not in list(etfs['name'].apply(unidecode).str.lower()):
raise RuntimeError("ERR#0019: etf " + etf + " not found, check if it is correct.")
etfs = etfs[etfs['name'].apply(unidecode).str.lower() == etf]
if def_exchange['country'] != country:
warnings.warn(
'Selected country does not contain the default stock exchange of the introduced ETF. ' + \
'Default country is: \"' + def_exchange['country'] + '\" and default stock_exchange: \"' + \
def_exchange['stock_exchange'] + '\".',
Warning
)
if stock_exchange:
if stock_exchange.lower() not in etfs['stock_exchange'].str.lower():
raise ValueError("ERR#0126: introduced stock_exchange value does not exists, leave this parameter to None to use default stock_exchange.")
etf_exchange = etfs.loc[(etfs['stock_exchange'].str.lower() == stock_exchange.lower()).idxmax(), 'stock_exchange']
else:
found_etfs = etfs[etfs['name'].apply(unidecode).str.lower() == etf]
if len(found_etfs) > 1:
warnings.warn(
'Note that the displayed information can differ depending on the stock exchange. Available stock_exchange' + \
' values for \"' + country + '\" are: \"' + '\", \"'.join(found_etfs['stock_exchange']) + '\".',
Warning
)
del found_etfs
etf_exchange = etfs.loc[(etfs['name'].apply(unidecode).str.lower() == etf).idxmax(), 'stock_exchange']
else:
if stock_exchange:
if stock_exchange.lower() not in etfs['stock_exchange'].str.lower():
raise ValueError("ERR#0126: introduced stock_exchange value does not exists, leave this parameter to None to use default stock_exchange.")
if def_exchange['stock_exchange'].lower() != stock_exchange.lower():
warnings.warn(
'Selected stock_exchange is not the default one of the introduced ETF. ' + \
'Default country is: \"' + def_exchange['country'] + '\" and default stock_exchange: \"' + \
def_exchange['stock_exchange'].lower() + '\".',
Warning
)
etf_exchange = etfs.loc[(etfs['stock_exchange'].str.lower() == stock_exchange.lower()).idxmax(), 'stock_exchange']
else:
etf_exchange = def_exchange['stock_exchange']
symbol = etfs.loc[((etfs['name'].apply(unidecode).str.lower() == etf) & (etfs['stock_exchange'].str.lower() == etf_exchange.lower())).idxmax(), 'symbol']
id_ = etfs.loc[((etfs['name'].apply(unidecode).str.lower() == etf) & (etfs['stock_exchange'].str.lower() == etf_exchange.lower())).idxmax(), 'id']
name = etfs.loc[((etfs['name'].apply(unidecode).str.lower() == etf) & (etfs['stock_exchange'].str.lower() == etf_exchange.lower())).idxmax(), 'name']
etf_currency = etfs.loc[((etfs['name'].apply(unidecode).str.lower() == etf) & (etfs['stock_exchange'].str.lower() == etf_exchange.lower())).idxmax(), 'currency']
final = list()
header = symbol + ' Historical Data'
for index in range(len(date_interval['intervals'])):
interval_counter += 1
params = {
"curr_id": id_,
"smlID": str(randint(1000000, 99999999)),
"header": header,
"st_date": date_interval['intervals'][index]['start'],
"end_date": date_interval['intervals'][index]['end'],
"interval_sec": interval.capitalize(),
"sort_col": "date",
"sort_ord": "DESC",
"action": "historical_data"
}
head = {
"User-Agent": random_user_agent(),
"X-Requested-With": "XMLHttpRequest",
"Accept": "text/html",
"Accept-Encoding": "gzip, deflate",
"Connection": "keep-alive",
}
url = "https://www.investing.com/instruments/HistoricalDataAjax"
req = requests.post(url, headers=head, data=params)
if req.status_code != 200:
raise ConnectionError("ERR#0015: error " + str(req.status_code) + ", try again later.")
if not req.text:
continue
root_ = fromstring(req.text)
path_ = root_.xpath(".//table[@id='curr_table']/tbody/tr")
result = list()
if path_:
for elements_ in path_:
if elements_.xpath(".//td")[0].text_content() == 'No results found':
if interval_counter < interval_limit:
data_flag = False
else:
raise IndexError("ERR#0010: etf information unavailable or not found.")
else:
data_flag = True
info = []
for nested_ in elements_.xpath(".//td"):
info.append(nested_.get('data-real-value'))
if data_flag is True:
etf_date = datetime.strptime(str(datetime.fromtimestamp(int(info[0]), tz=pytz.timezone('GMT')).date()), '%Y-%m-%d')
etf_close = float(info[1].replace(',', ''))
etf_open = float(info[2].replace(',', ''))
etf_high = float(info[3].replace(',', ''))
etf_low = float(info[4].replace(',', ''))
etf_volume = int(info[5])
result.insert(len(result),
Data(etf_date, etf_open, etf_high, etf_low, etf_close, etf_volume, etf_currency, etf_exchange))
if data_flag is True:
if order in ['ascending', 'asc']:
result = result[::-1]
elif order in ['descending', 'desc']:
result = result
if as_json is True:
json_list = [value.etf_as_json() for value in result]
final.append(json_list)
elif as_json is False:
df = pd.DataFrame.from_records([value.etf_to_dict() for value in result])
df.set_index('Date', inplace=True)
final.append(df)
else:
raise RuntimeError("ERR#0004: data retrieval error while scraping.")
if order in ['descending', 'desc']:
final.reverse()
if as_json is True:
json_ = {
'name': name,
'historical': [value for json_list in final for value in json_list]
}
return json.dumps(json_, sort_keys=False)
elif as_json is False:
return pd.concat(final)
def get_etf_information(etf, country, as_json=False):
"""
This function retrieves fundamental financial information from the specified ETF. The retrieved
information from the ETF can be valuable as it is additional information that can be used combined
with OHLC values, so to determine financial insights from the company which holds the specified ETF.
Args:
etf (:obj:`str`): name of the ETF to retrieve recent historical data from.
country (:obj:`str`): name of the country from where the ETF is.
as_json (:obj:`bool`, optional):
optional argument to determine the format of the output data (:obj:`dict` or :obj:`json`).
Returns:
:obj:`pandas.DataFrame` or :obj:`dict`- etf_information:
The resulting :obj:`pandas.DataFrame` contains the information fields retrieved from Investing.com
from the specified ETF; it can also be returned as a :obj:`dict`, if argument `as_json=True`.
If any of the information fields could not be retrieved, that field/s will be filled with
None values. If the retrieval process succeeded, the resulting :obj:`dict` will look like::
etf_information = {
"1-Year Change": "21.83%",
"52 wk Range": "233.76 - 320.06",
"Asset Class": "Equity",
"Average Vol. (3m)": 59658771.0,
"Beta": 1.01,
"Dividend Yield": "1.73%",
"Dividends (TTM)": 4.03,
"ETF Name": "SPDR S&P 500",
"Market Cap": 296440000000.0,
"Open": 319.25,
"Prev. Close": 317.27,
"ROI (TTM)": "- 0.46%",
"Shares Outstanding": 934132116.0,
"Todays Range": "319.18 - 320.06",
"Total Assets": 167650000000.0,
"Volume": 27928710.0
}
Raises:
ValueError: raised if any of the introduced arguments is not valid or errored.
FileNotFoundError: raised if `etfs.csv` file was not found or errored.
IOError: raised if `etfs.csv` file is empty or errored.
RuntimeError: raised if scraping process failed while running.
ConnectionError: raised if the connection to Investing.com errored (did not return HTTP 200)
"""
if not etf:
raise ValueError("ERR#0031: etf parameter is mandatory and must be a valid etf name.")
if not isinstance(etf, str):
raise ValueError("ERR#0030: etf argument needs to be a str.")
if country is None:
raise ValueError("ERR#0039: country can not be None, it should be a str.")
if country is not None and not isinstance(country, str):
raise ValueError("ERR#0025: specified country value not valid.")
if not isinstance(as_json, bool):
raise ValueError("ERR#0002: as_json argument can just be True or False, bool type.")
resource_package = 'investpy'
resource_path = '/'.join(('resources', 'etfs.csv'))
if pkg_resources.resource_exists(resource_package, resource_path):
etfs = pd.read_csv(pkg_resources.resource_filename(resource_package, resource_path), keep_default_na=False)
else:
raise FileNotFoundError("ERR#0058: etfs file not found or errored.")
if etfs is None:
raise IOError("ERR#0009: etfs object not found or unable to retrieve.")
country = unidecode(country.strip().lower())
if country not in get_etf_countries():
raise RuntimeError("ERR#0034: country " + country + " not found, check if it is correct.")
etfs = etfs[etfs['country'] == country]
etf = unidecode(etf.strip().lower())
if etf not in list(etfs['name'].apply(unidecode).str.lower()):
raise RuntimeError("ERR#0019: etf " + etf + " not found, check if it is correct.")
name = etfs.loc[(etfs['name'].apply(unidecode).str.lower() == etf).idxmax(), 'name']
tag = etfs.loc[(etfs['name'].apply(unidecode).str.lower() == etf).idxmax(), 'tag']
url = "https://www.investing.com/etfs/" + tag
head = {
"User-Agent": random_user_agent(),
"X-Requested-With": "XMLHttpRequest",
"Accept": "text/html",
"Accept-Encoding": "gzip, deflate",
"Connection": "keep-alive",
}
req = requests.get(url, headers=head)
if req.status_code != 200:
raise ConnectionError("ERR#0015: error " + str(req.status_code) + ", try again later.")
root_ = fromstring(req.text)
path_ = root_.xpath("//div[contains(@class, 'overviewDataTable')]/div")
result = pd.DataFrame(columns=['ETF Name', 'Prev. Close', 'Todays Range', 'ROI (TTM)',
'Open', '52 wk Range', 'Dividends (TTM)', 'Volume',
'Market Cap', 'Dividend Yield', 'Average Vol. (3m)',
'Total Assets', 'Beta', '1-Year Change', 'Shares Outstanding',
'Asset Class'])
result.at[0, 'ETF Name'] = name
if path_:
for elements_ in path_:
element = elements_.xpath(".//span[@class='float_lang_base_1']")[0]
title_ = element.text_content()
if title_ == "Day's Range":
title_ = 'Todays Range'
if title_ in result.columns.tolist():
try:
result.at[0, title_] = float(element.getnext().text_content().replace(',', ''))
continue
except:
pass
try:
text = element.getnext().text_content().strip()
result.at[0, title_] = datetime.strptime(text, "%b %d, %Y").strftime("%d/%m/%Y")
continue
except:
pass
try:
value = element.getnext().text_content().strip()
if value.__contains__('K'):
value = float(value.replace('K', '').replace(',', '')) * 1e3
elif value.__contains__('M'):
value = float(value.replace('M', '').replace(',', '')) * 1e6
elif value.__contains__('B'):
value = float(value.replace('B', '').replace(',', '')) * 1e9
elif value.__contains__('T'):
value = float(value.replace('T', '').replace(',', '')) * 1e12
result.at[0, title_] = value
continue
except:
pass
result.replace({'N/A': None}, inplace=True)
if as_json is True:
json_ = result.iloc[0].to_dict()
return json_
elif as_json is False:
return result
else:
raise RuntimeError("ERR#0004: data retrieval error while scraping.")
def get_etfs_overview(country, as_json=False, n_results=100):
"""
This function retrieves an overview containing all the real time data available for the main ETFs from a country,
such as the ETF names, symbols, current value, etc. as indexed in Investing.com. So on, the main usage of this
function is to get an overview on the main ETFs from a country, so to get a general view. Note that since
this function is retrieving a lot of information at once, by default just the overview of the Top 100 ETFs
is being retrieved, but an additional parameter called n_results can be specified so to retrieve N results.
Args:
country (:obj:`str`): name of the country to retrieve the ETFs overview from.
as_json (:obj:`bool`, optional):
optional argument to determine the format of the output data (:obj:`pandas.DataFrame` or :obj:`json`).
n_results (:obj:`int`, optional): number of results to be displayed on the overview table (0-1000).
Returns:
:obj:`pandas.DataFrame` - etfs_overview:
The resulting :obj:`pandas.DataFrame` contains all the data available in Investing.com of the main ETFs
from a country in order to get an overview of it.
If the retrieval process succeeded, the resulting :obj:`pandas.DataFrame` should look like::
country | name | full_name | symbol | last | change | turnover
--------|------|-----------|--------|------|--------|----------
xxxxxxx | xxxx | xxxxxxxxx | xxxxxx | xxxx | xxxxxx | xxxxxxxx
Raises:
ValueError: raised if there was any argument error.
FileNotFoundError: raised when `etfs.csv` file is missing.
IOError: raised if data could not be retrieved due to file error.
RuntimeError:
raised either if the introduced country does not match any of the listed ones or if no overview results could be
retrieved from Investing.com.
ConnectionError: raised if GET requests does not return 200 status code.
"""
if country is None:
raise ValueError("ERR#0039: country can not be None, it should be a str.")
if country is not None and not isinstance(country, str):
raise ValueError("ERR#0025: specified country value not valid.")
if not isinstance(as_json, bool):
raise ValueError("ERR#0002: as_json argument can just be True or False, bool type.")
if not isinstance(n_results, int):
raise ValueError("ERR#0089: n_results argument should be an integer between 1 and 1000.")
if 1 > n_results or n_results > 1000:
raise ValueError("ERR#0089: n_results argument should be an integer between 1 and 1000.")
resource_package = 'investpy'
resource_path = '/'.join(('resources', 'etfs.csv'))
if pkg_resources.resource_exists(resource_package, resource_path):
etfs = pd.read_csv(pkg_resources.resource_filename(resource_package, resource_path), keep_default_na=False)
else:
raise FileNotFoundError("ERR#0058: etfs file not found or errored.")
if etfs is None:
raise IOError("ERR#0009: etfs object not found or unable to retrieve.")
country = unidecode(country.strip().lower())
if country not in get_etf_countries():
raise RuntimeError('ERR#0025: specified country value is not valid.')
etfs = etfs[etfs['country'] == country]
if country.lower() == 'united states':
country= 'usa'
elif country.lower() == 'united kingdom':
country = 'uk'
head = {
"User-Agent": random_user_agent(),
"X-Requested-With": "XMLHttpRequest",
"Accept": "text/html",
"Accept-Encoding": "gzip, deflate",
"Connection": "keep-alive",
}
url = "https://www.investing.com/etfs/" + country.replace(' ', '-') + "-etfs?&issuer_filter=0"
req = requests.get(url, headers=head)
if req.status_code != 200:
raise ConnectionError("ERR#0015: error " + str(req.status_code) + ", try again later.")
root_ = fromstring(req.text)
table = root_.xpath(".//table[@id='etfs']/tbody/tr")
results = list()
if len(table) > 0:
for row in table[:n_results]:
id_ = row.get('id').replace('pair_', '')
symbol = row.xpath(".//td[contains(@class, 'symbol')]")[0].get('title')
nested = row.xpath(".//a")[0]
name = nested.text.strip()
full_name = nested.get('title').rstrip()
# In Euro Zone the ETFs are from different countries so the country is specified
country_flag = row.xpath(".//td[@class='flag']/span")[0].get('title')
country_flag = unidecode(country_flag.lower())
last_path = ".//td[@class='" + 'pid-' + str(id_) + '-last' + "']"
last = row.xpath(last_path)[0].text_content()
change_path = ".//td[contains(@class, '" + 'pid-' + str(id_) + '-pcp' + "')]"
change = row.xpath(change_path)[0].text_content()
turnover_path = ".//td[contains(@class, '" + 'pid-' + str(id_) + '-turnover' + "')]"
turnover = row.xpath(turnover_path)[0].text_content()
if turnover == '':
continue
if turnover.__contains__('K'):
turnover = float(turnover.replace('K', '').replace(',', '')) * 1e3
elif turnover.__contains__('M'):
turnover = float(turnover.replace('M', '').replace(',', '')) * 1e6
elif turnover.__contains__('B'):
turnover = float(turnover.replace('B', '').replace(',', '')) * 1e9
else:
turnover = float(turnover.replace(',', ''))
data = {
"country": country_flag,
"name": name,
"full_name": full_name,
"symbol": symbol,
"last": float(last.replace(',', '')),
"change": change,
"turnover": int(turnover),
"currency": etfs.loc[(etfs['name'] == name).idxmax(), 'currency']
}
results.append(data)
else:
raise RuntimeError("ERR#0092: no data found while retrieving the overview from Investing.com")
df = pd.DataFrame(results)
if as_json:
return json.loads(df.to_json(orient='records'))
else:
return df
def search_etfs(by, value):
"""
This function searches etfs by the introduced value for the specified field. This means that this function
is going to search if there is a value that matches the introduced value for the specified field which is the
`etfs.csv` column name to search in. Available fields to search etfs are 'name', 'full_name' and 'symbol'.
Args:
by (:obj:`str`): name of the field to search for, which is the column name ('name', 'full_name' or 'symbol').
value (:obj:`str`): value of the field to search for, which is the str that is going to be searched.
Returns:
:obj:`pandas.DataFrame` - search_result:
The resulting `pandas.DataFrame` contains the search results from the given query (the specified value
in the specified field). If there are no results and error will be raised, but otherwise this
`pandas.DataFrame` will contain all the available field values that match the introduced query.
Raises:
ValueError: raised if any of the introduced params is not valid or errored.
FileNotFoundError: raised if `etfs.csv` file is missing.
IOError: raised if data could not be retrieved due to file error.
RuntimeError: raised if no results were found for the introduced value in the introduced field.
"""
if not by:
raise ValueError('ERR#0006: the introduced field to search is mandatory and should be a str.')
if not isinstance(by, str):
raise ValueError('ERR#0006: the introduced field to search is mandatory and should be a str.')
if not value:
raise ValueError('ERR#0017: the introduced value to search is mandatory and should be a str.')
if not isinstance(value, str):
raise ValueError('ERR#0017: the introduced value to search is mandatory and should be a str.')
resource_package = 'investpy'
resource_path = '/'.join(('resources', 'etfs.csv'))
if pkg_resources.resource_exists(resource_package, resource_path):
etfs = pd.read_csv(pkg_resources.resource_filename(resource_package, resource_path), keep_default_na=False)
else:
raise FileNotFoundError("ERR#0058: etfs file not found or errored.")
if etfs is None:
raise IOError("ERR#0009: etfs object not found or unable to retrieve.")
etfs.drop(columns=['tag', 'id'], inplace=True)
available_search_fields = etfs.columns.tolist()
if isinstance(by, str) and by not in available_search_fields:
raise ValueError('ERR#0026: the introduced field to search can either just be '
+ ' or '.join(available_search_fields))
etfs['matches'] = etfs[by].str.contains(value, case=False)
search_result = etfs.loc[etfs['matches'] == True].copy()
if len(search_result) == 0:
raise RuntimeError('ERR#0043: no results were found for the introduced ' + str(by) + '.')
search_result.drop(columns=['matches'], inplace=True)
search_result.reset_index(drop=True, inplace=True)
return search_result
|
tock/employees/migrations/0007_auto_20160428_0105.py | mikiec84/tock | 134 | 12614126 | <gh_stars>100-1000
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
('employees', '0006_userdata_current_employee'),
]
operations = [
migrations.AlterModelOptions(
name='userdata',
options={'verbose_name_plural': 'Employees', 'verbose_name': 'Employee'},
),
migrations.AddField(
model_name='userdata',
name='is_18f_employee',
field=models.BooleanField(verbose_name='18F Employee', default=True),
),
migrations.AddField(
model_name='userdata',
name='unit',
field=models.IntegerField(verbose_name='Select unit', choices=[(0, 'Operations-Team Operations'), (1, 'Operations-Talent'), (2, 'Operations-Infrastructure'), (3, 'Operations-Front Office'), (4, 'Chapters-Acquisition Managers'), (5, 'Chapters-Engineering'), (6, 'Chapters-Experience Design'), (7, 'Chapters-Product'), (8, 'Chapters-Strategists'), (9, 'Business-Acquisition Services'), (10, 'Business-Custom Partner Solutions'), (11, 'Business-Learn'), (12, 'Business-Products & Platforms'), (13, 'Business-Transformation Services'), (14, 'PIF-Fellows'), (15, 'PIF-Operations'), (16, 'Unknown / N/A')], blank=True, null=True),
),
migrations.AlterField(
model_name='userdata',
name='current_employee',
field=models.BooleanField(verbose_name='Current Employee', default=True),
),
migrations.AlterField(
model_name='userdata',
name='end_date',
field=models.DateField(verbose_name='Employee end date', null=True, blank=True),
),
migrations.AlterField(
model_name='userdata',
name='start_date',
field=models.DateField(verbose_name='Employee start date', null=True, blank=True),
),
migrations.AlterField(
model_name='userdata',
name='user',
field=models.OneToOneField(
verbose_name='<NAME>',
to=settings.AUTH_USER_MODEL,
related_name='user_data',
on_delete=models.CASCADE
),
),
]
|
snmp/datadog_checks/snmp/parsing/metric_tags.py | mchelen-gov/integrations-core | 663 | 12614145 | <filename>snmp/datadog_checks/snmp/parsing/metric_tags.py
# (C) Datadog, Inc. 2020-present
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
"""
Helpers to parse the `metric_tags` section of a config file.
"""
import re
from typing import Dict, List, NamedTuple, TypedDict
from datadog_checks.base import ConfigurationError
from ..models import OID
from ..pysnmp_types import ObjectIdentity
from ..resolver import OIDResolver
from .parsed_metrics import ParsedMatchMetricTag, ParsedMetricTag, ParsedSimpleMetricTag
SymbolTag = NamedTuple('SymbolTag', [('parsed_metric_tag', ParsedMetricTag), ('symbol', str)])
ParsedSymbolTagsResult = TypedDict('ParsedSymbolTagsResult', {'oids': List[OID], 'parsed_symbol_tags': List[SymbolTag]})
def parse_symbol_metric_tags(metric_tags, resolver):
# type: (List[MetricTag], OIDResolver) -> ParsedSymbolTagsResult
"""
Parse the symbol based `metric_tags` section of a config file, and return OIDs to fetch and metric tags to submit.
"""
oids = [] # type: List[OID]
parsed_symbol_tags = [] # type: List[SymbolTag]
for metric_tag in metric_tags:
if 'symbol' not in metric_tag:
raise ConfigurationError('A metric tag must specify a symbol: {}'.format(metric_tag))
result = _parse_symbol_metric_tag(metric_tag)
for name, oid in result.oids_to_resolve.items():
resolver.register(oid, name)
oids.append(result.oid)
parsed_symbol_tags.append(result.symbol_tag)
return {'oids': oids, 'parsed_symbol_tags': parsed_symbol_tags}
# Helpers below.
# Also some type definitions to make sure we only manipulate known fields with correct types.
MetricTagParseResult = NamedTuple(
'MetricTagParseResult', [('oid', OID), ('symbol_tag', SymbolTag), ('oids_to_resolve', Dict[str, OID])]
)
MetricTag = TypedDict(
'MetricTag',
{
'symbol': str,
'MIB': str,
'OID': str,
# Simple tag.
'tag': str,
# Regex matching.
'match': str,
'tags': List[str],
},
total=False,
)
def _parse_symbol_metric_tag(metric_tag):
# type: (MetricTag) -> MetricTagParseResult
oids_to_resolve = {}
if 'MIB' in metric_tag:
oid = OID(ObjectIdentity(metric_tag['MIB'], metric_tag['symbol']))
elif 'OID' in metric_tag:
oid = OID(metric_tag['OID'])
oids_to_resolve[metric_tag['symbol']] = oid
else:
raise ConfigurationError('A metric tag must specify an OID or a MIB: {}'.format(metric_tag))
symbol_tag = SymbolTag(parsed_metric_tag=parse_metric_tag(metric_tag), symbol=metric_tag['symbol'])
return MetricTagParseResult(oid=oid, symbol_tag=symbol_tag, oids_to_resolve=oids_to_resolve)
def parse_metric_tag(metric_tag):
# type: (MetricTag) -> ParsedMetricTag
if 'tag' in metric_tag:
parsed_metric_tag = _parse_simple_metric_tag(metric_tag)
elif 'match' in metric_tag and 'tags' in metric_tag:
parsed_metric_tag = _parse_regex_metric_tag(metric_tag)
else:
raise ConfigurationError(
'A metric tag must specify either a tag, '
'or a mapping of tags and a regular expression: {}'.format(metric_tag)
)
return parsed_metric_tag
def _parse_simple_metric_tag(metric_tag):
# type: (MetricTag) -> ParsedMetricTag
return ParsedSimpleMetricTag(name=metric_tag['tag'])
def _parse_regex_metric_tag(metric_tag):
# type: (MetricTag) -> ParsedMetricTag
match = metric_tag['match']
tags = metric_tag['tags']
if not isinstance(tags, dict):
raise ConfigurationError(
'Specified tags needs to be a mapping of tag name to regular expression matching: {}'.format(metric_tag)
)
try:
pattern = re.compile(match)
except re.error as exc:
raise ConfigurationError('Failed to compile regular expression {!r}: {}'.format(match, exc))
return ParsedMatchMetricTag(tags, pattern=pattern)
|
codigo/Live90/temperaturas.py | cassiasamp/live-de-python | 572 | 12614176 | from csv import reader
from matplotlib import pyplot as plt
with open('temperaturas.csv') as file:
parsed = reader(file)
data_1999 = filter(lambda v: v[0] == '1999.0', parsed)
max_temp = [float(v[3]) for v in data_1999 if v[3]]
min_temp = [float(v[4]) for v in data_1999 if v[4]]
med_temp = [float(v[5]) for v in data_1999 if v[5]]
# import pdb; pdb.set_trace()
plt.plot(max_temp, label='MAX')
plt.plot(min_temp, label='MIN')
plt.plot(med_temp, label='MED')
|
astropy/units/__init__.py | jayvdb/astropy | 445 | 12614178 | <filename>astropy/units/__init__.py<gh_stars>100-1000
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This subpackage contains classes and functions for defining and converting
between different physical units.
This code is adapted from the `pynbody
<https://github.com/pynbody/pynbody>`_ units module written by Andrew
Pontzen, who has granted the Astropy project permission to use the
code under a BSD license.
"""
# Lots of things to import - go from more basic to advanced, so that
# whatever advanced ones need generally has been imported already;
# this helps prevent circular imports and makes it easier to understand
# where most time is spent (e.g., using python -X importtime).
from .core import *
from .quantity import *
from . import si
from . import cgs
from . import astrophys
from . import photometric
from .function import units as function_units
from .si import *
from .astrophys import *
from .photometric import *
from .cgs import *
from .physical import *
from .function.units import *
from .equivalencies import *
from .function.core import *
from .function.logarithmic import *
from .decorators import *
del bases
# Enable the set of default units. This notably does *not* include
# Imperial units.
set_enabled_units([si, cgs, astrophys, function_units, photometric])
|
demo/pcd_demo.py | BB88Lee/mmdetection3d | 136 | 12614180 | <filename>demo/pcd_demo.py
from argparse import ArgumentParser
from mmdet3d.apis import inference_detector, init_detector, show_result_meshlab
def main():
parser = ArgumentParser()
parser.add_argument('pcd', help='Point cloud file')
parser.add_argument('config', help='Config file')
parser.add_argument('checkpoint', help='Checkpoint file')
parser.add_argument(
'--device', default='cuda:0', help='Device used for inference')
parser.add_argument(
'--score-thr', type=float, default=0.6, help='bbox score threshold')
parser.add_argument(
'--out-dir', type=str, default='demo', help='dir to save results')
args = parser.parse_args()
# build the model from a config file and a checkpoint file
model = init_detector(args.config, args.checkpoint, device=args.device)
# test a single image
result, data = inference_detector(model, args.pcd)
# show the results
show_result_meshlab(data, result, args.out_dir)
if __name__ == '__main__':
main()
|
Stephanie/Modules/gmail_module.py | JeremyARussell/stephanie-va | 866 | 12614188 | <gh_stars>100-1000
import imaplib
import email
import re
from dateutil import parser
from Stephanie.Modules.base_module import BaseModule
class GmailModule(BaseModule):
def __init__(self, *args):
super(GmailModule, self).__init__(*args)
self.gmail_address = self.get_configuration('gmail_address')
self.password = self.get_configuration('gmail_password')
self.conn = None
if self.gmail_address and self.password:
self.do_init()
else:
return False
def do_init(self):
try:
self.conn = imaplib.IMAP4_SSL('imap.gmail.com')
self.conn.debug = 0
self.conn.login(self.gmail_address, self.password)
except:
response = ("Either your credentials are wrong mate, or there is some problem going on, do me a favor, I know "
"you won't but whatever, just inform me in the forums.")
print(response)
return response
def fetch_unread_emails(self, since=None, markRead=False, limit=None):
"""
Fetches a list of unread email objects from a user's Gmail inbox.
Arguments:
profile -- contains information related to the user (e.g., Gmail
address)
since -- if provided, no emails before this date will be returned
markRead -- if True, marks all returned emails as read in target inbox
Returns:
A list of unread email objects.
"""
self.conn.select(readonly=(not markRead))
msgs = []
(retcode, messages) = self.conn.search(None, '(UNSEEN)')
if retcode == 'OK' and messages != ['']:
num_unread = len(messages[0].split())
# if limit and num_unread > limit:
# return num_unread
i = 0
for num in messages[0].split():
# parse email RFC822 format
if i > 5:
break
ret, data = self.conn.fetch(num, '(RFC822)')
correct_format_message = data[0][1].decode("utf-8")
msg = email.message_from_string(correct_format_message)
if not since or self.get_date(msg) > since:
msgs.append(msg)
i += 1
self.conn.close()
self.conn.logout()
return num_unread, msgs
@staticmethod
def get_sender(email_found):
"""
Returns the best-guess sender of an email.
Arguments:
email -- the email whose sender is desired
Returns:
Sender of the email.
"""
sender = email_found['From']
m = re.match(r'(.*)\s<.*>', sender)
if m:
return m.group(1)
return sender
@staticmethod
def get_date(email_found):
return parser.parse(email_found.get('date'))
@staticmethod
def get_most_recent_date(emails):
"""
Returns the most recent date of any email in the list provided.
Arguments:
emails -- a list of emails to check
Returns:
Date of the most recent email.
"""
dates = [GmailModule.get_date(e) for e in emails]
dates.sort(reverse=True)
if dates:
return dates[0]
return None
def handle(self):
"""
Responds to user-input, typically speech text, with a summary of
the user's Gmail inbox, reporting on the number of unread emails
in the inbox, as well as their senders.
Arguments:
text -- user-input, typically transcribed speech
self.assistant -- used to interact with the user (for both input and output)
profile -- contains information related to the user (e.g., Gmail
address)
"""
try:
num_unread, msgs = self.fetch_unread_emails(limit=5)
if num_unread > 5:
response = "You have %d unread emails, out of which 5 latest ones are as follows, please wait a second, as I process" % num_unread
self.assistant.say(response)
senders = []
for e in msgs:
senders.append(self.get_sender(e))
except imaplib.IMAP4.error:
return "I'm sorry. I'm not authenticated to work with your Gmail."
if not senders:
return "You have no unread emails."
elif len(senders) == 1:
return "You have one unread email from " + senders[0] + "."
else:
response = "You have %d unread emails" % len(
senders)
unique_senders = list(set(senders))
if len(unique_senders) > 1:
unique_senders[-1] = 'and ' + unique_senders[-1]
response += ". Senders include: "
response += '...'.join(senders)
else:
response += " from " + unique_senders[0]
return response
|
examples/advanced/autowrap_ufuncify.py | utkarshdeorah/sympy | 8,323 | 12614191 | #!/usr/bin/env python
"""
Setup ufuncs for the legendre polynomials
-----------------------------------------
This example demonstrates how you can use the ufuncify utility in SymPy
to create fast, customized universal functions for use with numpy
arrays. An autowrapped sympy expression can be significantly faster than
what you would get by applying a sequence of the ufuncs shipped with
numpy. [0]
You need to have numpy installed to run this example, as well as a
working fortran compiler.
[0]:
http://ojensen.wordpress.com/2010/08/10/fast-ufunc-ish-hydrogen-solutions/
"""
import sys
from sympy.external import import_module
np = import_module('numpy')
if not np:
sys.exit("Cannot import numpy. Exiting.")
plt = import_module('matplotlib.pyplot')
if not plt:
sys.exit("Cannot import matplotlib.pyplot. Exiting.")
import mpmath
from sympy.utilities.autowrap import ufuncify
from sympy import symbols, legendre, pprint
def main():
print(__doc__)
x = symbols('x')
# a numpy array we can apply the ufuncs to
grid = np.linspace(-1, 1, 1000)
# set mpmath precision to 20 significant numbers for verification
mpmath.mp.dps = 20
print("Compiling legendre ufuncs and checking results:")
# Let's also plot the ufunc's we generate
for n in range(6):
# Setup the SymPy expression to ufuncify
expr = legendre(n, x)
print("The polynomial of degree %i is" % n)
pprint(expr)
# This is where the magic happens:
binary_poly = ufuncify(x, expr)
# It's now ready for use with numpy arrays
polyvector = binary_poly(grid)
# let's check the values against mpmath's legendre function
maxdiff = 0
for j in range(len(grid)):
precise_val = mpmath.legendre(n, grid[j])
diff = abs(polyvector[j] - precise_val)
if diff > maxdiff:
maxdiff = diff
print("The largest error in applied ufunc was %e" % maxdiff)
assert maxdiff < 1e-14
# We can also attach the autowrapped legendre polynomial to a sympy
# function and plot values as they are calculated by the binary function
plot1 = plt.pyplot.plot(grid, polyvector, hold=True)
print("Here's a plot with values calculated by the wrapped binary functions")
plt.pyplot.show()
if __name__ == '__main__':
main()
|
tensorflow2/tf2cv/models/resnet_cub.py | naviocean/imgclsmob | 2,649 | 12614212 | <reponame>naviocean/imgclsmob
"""
ResNet for CUB-200-2011, implemented in TensorFlow.
Original paper: 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
"""
__all__ = ['resnet10_cub', 'resnet12_cub', 'resnet14_cub', 'resnetbc14b_cub', 'resnet16_cub', 'resnet18_cub',
'resnet26_cub', 'resnetbc26b_cub', 'resnet34_cub', 'resnetbc38b_cub', 'resnet50_cub', 'resnet50b_cub',
'resnet101_cub', 'resnet101b_cub', 'resnet152_cub', 'resnet152b_cub', 'resnet200_cub', 'resnet200b_cub']
from .common import is_channels_first
from .resnet import get_resnet
def resnet10_cub(classes=200, **kwargs):
"""
ResNet-10 model for CUB-200-2011 from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385. It's an experimental model.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnet(classes=classes, blocks=10, model_name="resnet10_cub", **kwargs)
def resnet12_cub(classes=200, **kwargs):
"""
ResNet-12 model for CUB-200-2011 from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385. It's an experimental model.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnet(classes=classes, blocks=12, model_name="resnet12_cub", **kwargs)
def resnet14_cub(classes=200, **kwargs):
"""
ResNet-14 model for CUB-200-2011 from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385. It's an experimental model.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnet(classes=classes, blocks=14, model_name="resnet14_cub", **kwargs)
def resnetbc14b_cub(classes=200, **kwargs):
"""
ResNet-BC-14b model for CUB-200-2011 from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385. It's an experimental model (bottleneck compressed).
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnet(classes=classes, blocks=14, bottleneck=True, conv1_stride=False, model_name="resnetbc14b_cub",
**kwargs)
def resnet16_cub(classes=200, **kwargs):
"""
ResNet-16 model for CUB-200-2011 from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385. It's an experimental model.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnet(classes=classes, blocks=16, model_name="resnet16_cub", **kwargs)
def resnet18_cub(classes=200, **kwargs):
"""
ResNet-18 model for CUB-200-2011 from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnet(classes=classes, blocks=18, model_name="resnet18_cub", **kwargs)
def resnet26_cub(classes=200, **kwargs):
"""
ResNet-26 model for CUB-200-2011 from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385. It's an experimental model.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnet(classes=classes, blocks=26, bottleneck=False, model_name="resnet26_cub", **kwargs)
def resnetbc26b_cub(classes=200, **kwargs):
"""
ResNet-BC-26b model for CUB-200-2011 from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385. It's an experimental model (bottleneck compressed).
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnet(classes=classes, blocks=26, bottleneck=True, conv1_stride=False, model_name="resnetbc26b_cub",
**kwargs)
def resnet34_cub(classes=200, **kwargs):
"""
ResNet-34 model for CUB-200-2011 from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnet(classes=classes, blocks=34, model_name="resnet34_cub", **kwargs)
def resnetbc38b_cub(classes=200, **kwargs):
"""
ResNet-BC-38b model for CUB-200-2011 from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385. It's an experimental model (bottleneck compressed).
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnet(classes=classes, blocks=38, bottleneck=True, conv1_stride=False, model_name="resnetbc38b_cub",
**kwargs)
def resnet50_cub(classes=200, **kwargs):
"""
ResNet-50 model for CUB-200-2011 from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnet(classes=classes, blocks=50, model_name="resnet50_cub", **kwargs)
def resnet50b_cub(classes=200, **kwargs):
"""
ResNet-50 model with stride at the second convolution in bottleneck block from 'Deep Residual Learning for Image
Recognition,' https://arxiv.org/abs/1512.03385.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnet(classes=classes, blocks=50, conv1_stride=False, model_name="resnet50b_cub", **kwargs)
def resnet101_cub(classes=200, **kwargs):
"""
ResNet-101 model for CUB-200-2011 from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnet(classes=classes, blocks=101, model_name="resnet101_cub", **kwargs)
def resnet101b_cub(classes=200, **kwargs):
"""
ResNet-101 model with stride at the second convolution in bottleneck block from 'Deep Residual Learning for Image
Recognition,' https://arxiv.org/abs/1512.03385.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnet(classes=classes, blocks=101, conv1_stride=False, model_name="resnet101b_cub", **kwargs)
def resnet152_cub(classes=200, **kwargs):
"""
ResNet-152 model for CUB-200-2011 from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnet(classes=classes, blocks=152, model_name="resnet152_cub", **kwargs)
def resnet152b_cub(classes=200, **kwargs):
"""
ResNet-152 model with stride at the second convolution in bottleneck block from 'Deep Residual Learning for Image
Recognition,' https://arxiv.org/abs/1512.03385.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnet(classes=classes, blocks=152, conv1_stride=False, model_name="resnet152b_cub", **kwargs)
def resnet200_cub(classes=200, **kwargs):
"""
ResNet-200 model for CUB-200-2011 from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385. It's an experimental model.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnet(classes=classes, blocks=200, model_name="resnet200_cub", **kwargs)
def resnet200b_cub(classes=200, **kwargs):
"""
ResNet-200 model with stride at the second convolution in bottleneck block from 'Deep Residual Learning for Image
Recognition,' https://arxiv.org/abs/1512.03385. It's an experimental model.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnet(classes=classes, blocks=200, conv1_stride=False, model_name="resnet200b_cub", **kwargs)
def _test():
import numpy as np
import tensorflow as tf
import tensorflow.keras.backend as K
data_format = "channels_last"
# data_format = "channels_first"
pretrained = False
models = [
resnet10_cub,
resnet12_cub,
resnet14_cub,
resnetbc14b_cub,
resnet16_cub,
resnet18_cub,
resnet26_cub,
resnetbc26b_cub,
resnet34_cub,
resnetbc38b_cub,
resnet50_cub,
resnet50b_cub,
resnet101_cub,
resnet101b_cub,
resnet152_cub,
resnet152b_cub,
resnet200_cub,
resnet200b_cub,
]
for model in models:
net = model(pretrained=pretrained, data_format=data_format)
batch = 14
x = tf.random.normal((batch, 3, 224, 224) if is_channels_first(data_format) else (batch, 224, 224, 3))
y = net(x)
assert (tuple(y.shape.as_list()) == (batch, 200))
weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != resnet10_cub or weight_count == 5008392)
assert (model != resnet12_cub or weight_count == 5082376)
assert (model != resnet14_cub or weight_count == 5377800)
assert (model != resnetbc14b_cub or weight_count == 8425736)
assert (model != resnet16_cub or weight_count == 6558472)
assert (model != resnet18_cub or weight_count == 11279112)
assert (model != resnet26_cub or weight_count == 17549832)
assert (model != resnetbc26b_cub or weight_count == 14355976)
assert (model != resnet34_cub or weight_count == 21387272)
assert (model != resnetbc38b_cub or weight_count == 20286216)
assert (model != resnet50_cub or weight_count == 23917832)
assert (model != resnet50b_cub or weight_count == 23917832)
assert (model != resnet101_cub or weight_count == 42909960)
assert (model != resnet101b_cub or weight_count == 42909960)
assert (model != resnet152_cub or weight_count == 58553608)
assert (model != resnet152b_cub or weight_count == 58553608)
assert (model != resnet200_cub or weight_count == 63034632)
assert (model != resnet200b_cub or weight_count == 63034632)
if __name__ == "__main__":
_test()
|
tests/conditional_processing/tests.py | ni-ning/django | 61,676 | 12614225 | <reponame>ni-ning/django
from datetime import datetime
from django.test import SimpleTestCase, override_settings
FULL_RESPONSE = 'Test conditional get response'
LAST_MODIFIED = datetime(2007, 10, 21, 23, 21, 47)
LAST_MODIFIED_STR = 'Sun, 21 Oct 2007 23:21:47 GMT'
LAST_MODIFIED_NEWER_STR = 'Mon, 18 Oct 2010 16:56:23 GMT'
LAST_MODIFIED_INVALID_STR = 'Mon, 32 Oct 2010 16:56:23 GMT'
EXPIRED_LAST_MODIFIED_STR = 'Sat, 20 Oct 2007 23:21:47 GMT'
ETAG = '"b4246ffc4f62314ca13147c9d4f76974"'
WEAK_ETAG = 'W/"b4246ffc4f62314ca13147c9d4f76974"' # weak match to ETAG
EXPIRED_ETAG = '"7fae4cd4b0f81e7d2914700043aa8ed6"'
@override_settings(ROOT_URLCONF='conditional_processing.urls')
class ConditionalGet(SimpleTestCase):
def assertFullResponse(self, response, check_last_modified=True, check_etag=True):
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, FULL_RESPONSE.encode())
if response.request['REQUEST_METHOD'] in ('GET', 'HEAD'):
if check_last_modified:
self.assertEqual(response.headers['Last-Modified'], LAST_MODIFIED_STR)
if check_etag:
self.assertEqual(response.headers['ETag'], ETAG)
else:
self.assertNotIn('Last-Modified', response.headers)
self.assertNotIn('ETag', response.headers)
def assertNotModified(self, response):
self.assertEqual(response.status_code, 304)
self.assertEqual(response.content, b'')
def test_without_conditions(self):
response = self.client.get('/condition/')
self.assertFullResponse(response)
def test_if_modified_since(self):
self.client.defaults['HTTP_IF_MODIFIED_SINCE'] = LAST_MODIFIED_STR
response = self.client.get('/condition/')
self.assertNotModified(response)
response = self.client.put('/condition/')
self.assertFullResponse(response)
self.client.defaults['HTTP_IF_MODIFIED_SINCE'] = LAST_MODIFIED_NEWER_STR
response = self.client.get('/condition/')
self.assertNotModified(response)
response = self.client.put('/condition/')
self.assertFullResponse(response)
self.client.defaults['HTTP_IF_MODIFIED_SINCE'] = LAST_MODIFIED_INVALID_STR
response = self.client.get('/condition/')
self.assertFullResponse(response)
self.client.defaults['HTTP_IF_MODIFIED_SINCE'] = EXPIRED_LAST_MODIFIED_STR
response = self.client.get('/condition/')
self.assertFullResponse(response)
def test_if_unmodified_since(self):
self.client.defaults['HTTP_IF_UNMODIFIED_SINCE'] = LAST_MODIFIED_STR
response = self.client.get('/condition/')
self.assertFullResponse(response)
self.client.defaults['HTTP_IF_UNMODIFIED_SINCE'] = LAST_MODIFIED_NEWER_STR
response = self.client.get('/condition/')
self.assertFullResponse(response)
self.client.defaults['HTTP_IF_UNMODIFIED_SINCE'] = LAST_MODIFIED_INVALID_STR
response = self.client.get('/condition/')
self.assertFullResponse(response)
self.client.defaults['HTTP_IF_UNMODIFIED_SINCE'] = EXPIRED_LAST_MODIFIED_STR
response = self.client.get('/condition/')
self.assertEqual(response.status_code, 412)
def test_if_none_match(self):
self.client.defaults['HTTP_IF_NONE_MATCH'] = ETAG
response = self.client.get('/condition/')
self.assertNotModified(response)
response = self.client.put('/condition/')
self.assertEqual(response.status_code, 412)
self.client.defaults['HTTP_IF_NONE_MATCH'] = EXPIRED_ETAG
response = self.client.get('/condition/')
self.assertFullResponse(response)
# Several etags in If-None-Match is a bit exotic but why not?
self.client.defaults['HTTP_IF_NONE_MATCH'] = '%s, %s' % (ETAG, EXPIRED_ETAG)
response = self.client.get('/condition/')
self.assertNotModified(response)
def test_weak_if_none_match(self):
"""
If-None-Match comparisons use weak matching, so weak and strong ETags
with the same value result in a 304 response.
"""
self.client.defaults['HTTP_IF_NONE_MATCH'] = ETAG
response = self.client.get('/condition/weak_etag/')
self.assertNotModified(response)
response = self.client.put('/condition/weak_etag/')
self.assertEqual(response.status_code, 412)
self.client.defaults['HTTP_IF_NONE_MATCH'] = WEAK_ETAG
response = self.client.get('/condition/weak_etag/')
self.assertNotModified(response)
response = self.client.put('/condition/weak_etag/')
self.assertEqual(response.status_code, 412)
response = self.client.get('/condition/')
self.assertNotModified(response)
response = self.client.put('/condition/')
self.assertEqual(response.status_code, 412)
def test_all_if_none_match(self):
self.client.defaults['HTTP_IF_NONE_MATCH'] = '*'
response = self.client.get('/condition/')
self.assertNotModified(response)
response = self.client.put('/condition/')
self.assertEqual(response.status_code, 412)
response = self.client.get('/condition/no_etag/')
self.assertFullResponse(response, check_last_modified=False, check_etag=False)
def test_if_match(self):
self.client.defaults['HTTP_IF_MATCH'] = ETAG
response = self.client.put('/condition/')
self.assertFullResponse(response)
self.client.defaults['HTTP_IF_MATCH'] = EXPIRED_ETAG
response = self.client.put('/condition/')
self.assertEqual(response.status_code, 412)
def test_weak_if_match(self):
"""
If-Match comparisons use strong matching, so any comparison involving
a weak ETag return a 412 response.
"""
self.client.defaults['HTTP_IF_MATCH'] = ETAG
response = self.client.get('/condition/weak_etag/')
self.assertEqual(response.status_code, 412)
self.client.defaults['HTTP_IF_MATCH'] = WEAK_ETAG
response = self.client.get('/condition/weak_etag/')
self.assertEqual(response.status_code, 412)
response = self.client.get('/condition/')
self.assertEqual(response.status_code, 412)
def test_all_if_match(self):
self.client.defaults['HTTP_IF_MATCH'] = '*'
response = self.client.get('/condition/')
self.assertFullResponse(response)
response = self.client.get('/condition/no_etag/')
self.assertEqual(response.status_code, 412)
def test_both_headers(self):
# see https://tools.ietf.org/html/rfc7232#section-6
self.client.defaults['HTTP_IF_MODIFIED_SINCE'] = LAST_MODIFIED_STR
self.client.defaults['HTTP_IF_NONE_MATCH'] = ETAG
response = self.client.get('/condition/')
self.assertNotModified(response)
self.client.defaults['HTTP_IF_MODIFIED_SINCE'] = EXPIRED_LAST_MODIFIED_STR
self.client.defaults['HTTP_IF_NONE_MATCH'] = ETAG
response = self.client.get('/condition/')
self.assertNotModified(response)
self.client.defaults['HTTP_IF_MODIFIED_SINCE'] = LAST_MODIFIED_STR
self.client.defaults['HTTP_IF_NONE_MATCH'] = EXPIRED_ETAG
response = self.client.get('/condition/')
self.assertFullResponse(response)
self.client.defaults['HTTP_IF_MODIFIED_SINCE'] = EXPIRED_LAST_MODIFIED_STR
self.client.defaults['HTTP_IF_NONE_MATCH'] = EXPIRED_ETAG
response = self.client.get('/condition/')
self.assertFullResponse(response)
def test_both_headers_2(self):
self.client.defaults['HTTP_IF_UNMODIFIED_SINCE'] = LAST_MODIFIED_STR
self.client.defaults['HTTP_IF_MATCH'] = ETAG
response = self.client.get('/condition/')
self.assertFullResponse(response)
self.client.defaults['HTTP_IF_UNMODIFIED_SINCE'] = EXPIRED_LAST_MODIFIED_STR
self.client.defaults['HTTP_IF_MATCH'] = ETAG
response = self.client.get('/condition/')
self.assertFullResponse(response)
self.client.defaults['HTTP_IF_UNMODIFIED_SINCE'] = EXPIRED_LAST_MODIFIED_STR
self.client.defaults['HTTP_IF_MATCH'] = EXPIRED_ETAG
response = self.client.get('/condition/')
self.assertEqual(response.status_code, 412)
self.client.defaults['HTTP_IF_UNMODIFIED_SINCE'] = LAST_MODIFIED_STR
self.client.defaults['HTTP_IF_MATCH'] = EXPIRED_ETAG
response = self.client.get('/condition/')
self.assertEqual(response.status_code, 412)
def test_single_condition_1(self):
self.client.defaults['HTTP_IF_MODIFIED_SINCE'] = LAST_MODIFIED_STR
response = self.client.get('/condition/last_modified/')
self.assertNotModified(response)
response = self.client.get('/condition/etag/')
self.assertFullResponse(response, check_last_modified=False)
def test_single_condition_2(self):
self.client.defaults['HTTP_IF_NONE_MATCH'] = ETAG
response = self.client.get('/condition/etag/')
self.assertNotModified(response)
response = self.client.get('/condition/last_modified/')
self.assertFullResponse(response, check_etag=False)
def test_single_condition_3(self):
self.client.defaults['HTTP_IF_MODIFIED_SINCE'] = EXPIRED_LAST_MODIFIED_STR
response = self.client.get('/condition/last_modified/')
self.assertFullResponse(response, check_etag=False)
def test_single_condition_4(self):
self.client.defaults['HTTP_IF_NONE_MATCH'] = EXPIRED_ETAG
response = self.client.get('/condition/etag/')
self.assertFullResponse(response, check_last_modified=False)
def test_single_condition_5(self):
self.client.defaults['HTTP_IF_MODIFIED_SINCE'] = LAST_MODIFIED_STR
response = self.client.get('/condition/last_modified2/')
self.assertNotModified(response)
response = self.client.get('/condition/etag2/')
self.assertFullResponse(response, check_last_modified=False)
def test_single_condition_6(self):
self.client.defaults['HTTP_IF_NONE_MATCH'] = ETAG
response = self.client.get('/condition/etag2/')
self.assertNotModified(response)
response = self.client.get('/condition/last_modified2/')
self.assertFullResponse(response, check_etag=False)
def test_single_condition_7(self):
self.client.defaults['HTTP_IF_UNMODIFIED_SINCE'] = EXPIRED_LAST_MODIFIED_STR
response = self.client.get('/condition/last_modified/')
self.assertEqual(response.status_code, 412)
response = self.client.get('/condition/etag/')
self.assertEqual(response.status_code, 412)
def test_single_condition_8(self):
self.client.defaults['HTTP_IF_UNMODIFIED_SINCE'] = LAST_MODIFIED_STR
response = self.client.get('/condition/last_modified/')
self.assertFullResponse(response, check_etag=False)
def test_single_condition_9(self):
self.client.defaults['HTTP_IF_UNMODIFIED_SINCE'] = EXPIRED_LAST_MODIFIED_STR
response = self.client.get('/condition/last_modified2/')
self.assertEqual(response.status_code, 412)
response = self.client.get('/condition/etag2/')
self.assertEqual(response.status_code, 412)
def test_single_condition_head(self):
self.client.defaults['HTTP_IF_MODIFIED_SINCE'] = LAST_MODIFIED_STR
response = self.client.head('/condition/')
self.assertNotModified(response)
def test_unquoted(self):
"""
The same quoted ETag should be set on the header regardless of whether
etag_func() in condition() returns a quoted or an unquoted ETag.
"""
response_quoted = self.client.get('/condition/etag/')
response_unquoted = self.client.get('/condition/unquoted_etag/')
self.assertEqual(response_quoted['ETag'], response_unquoted['ETag'])
# It's possible that the matching algorithm could use the wrong value even
# if the ETag header is set correctly correctly (as tested by
# test_unquoted()), so check that the unquoted value is matched.
def test_unquoted_if_none_match(self):
self.client.defaults['HTTP_IF_NONE_MATCH'] = ETAG
response = self.client.get('/condition/unquoted_etag/')
self.assertNotModified(response)
response = self.client.put('/condition/unquoted_etag/')
self.assertEqual(response.status_code, 412)
self.client.defaults['HTTP_IF_NONE_MATCH'] = EXPIRED_ETAG
response = self.client.get('/condition/unquoted_etag/')
self.assertFullResponse(response, check_last_modified=False)
def test_invalid_etag(self):
self.client.defaults['HTTP_IF_NONE_MATCH'] = '"""'
response = self.client.get('/condition/etag/')
self.assertFullResponse(response, check_last_modified=False)
|
kafka/__init__.py | timgates42/kafka-python | 4,389 | 12614287 | from __future__ import absolute_import
__title__ = 'kafka'
from kafka.version import __version__
__author__ = '<NAME>'
__license__ = 'Apache License 2.0'
__copyright__ = 'Copyright 2016 <NAME>, <NAME>, and Contributors'
# Set default logging handler to avoid "No handler found" warnings.
import logging
try: # Python 2.7+
from logging import NullHandler
except ImportError:
class NullHandler(logging.Handler):
def emit(self, record):
pass
logging.getLogger(__name__).addHandler(NullHandler())
from kafka.admin import KafkaAdminClient
from kafka.client_async import KafkaClient
from kafka.consumer import KafkaConsumer
from kafka.consumer.subscription_state import ConsumerRebalanceListener
from kafka.producer import KafkaProducer
from kafka.conn import BrokerConnection
from kafka.serializer import Serializer, Deserializer
from kafka.structs import TopicPartition, OffsetAndMetadata
__all__ = [
'BrokerConnection', 'ConsumerRebalanceListener', 'KafkaAdminClient',
'KafkaClient', 'KafkaConsumer', 'KafkaProducer',
]
|
contrib/AutoNUE/core/infer_ensemble.py | JamesLim-sy/PaddleSeg | 4,708 | 12614290 | <reponame>JamesLim-sy/PaddleSeg<gh_stars>1000+
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections.abc
from itertools import combinations
import numpy as np
import paddle
import paddle.nn.functional as F
def get_reverse_list(ori_shape, transforms):
"""
get reverse list of transform.
Args:
ori_shape (list): Origin shape of image.
transforms (list): List of transform.
Returns:
list: List of tuple, there are two format:
('resize', (h, w)) The image shape before resize,
('padding', (h, w)) The image shape before padding.
"""
reverse_list = []
h, w = ori_shape[0], ori_shape[1]
for op in transforms:
if op.__class__.__name__ in ['Resize', 'ResizeByLong']:
reverse_list.append(('resize', (h, w)))
h, w = op.target_size[0], op.target_size[1]
if op.__class__.__name__ in ['Padding']:
reverse_list.append(('padding', (h, w)))
w, h = op.target_size[0], op.target_size[1]
if op.__class__.__name__ in ['LimitLong']:
long_edge = max(h, w)
short_edge = min(h, w)
if ((op.max_long is not None) and (long_edge > op.max_long)):
reverse_list.append(('resize', (h, w)))
long_edge = op.max_long
short_edge = int(round(short_edge * op.max_long / long_edge))
elif ((op.min_long is not None) and (long_edge < op.min_long)):
reverse_list.append(('resize', (h, w)))
long_edge = op.min_long
short_edge = int(round(short_edge * op.min_long / long_edge))
if h > w:
h = long_edge
w = short_edge
else:
w = long_edge
h = short_edge
return reverse_list
def reverse_transform(pred, ori_shape, transforms):
"""recover pred to origin shape"""
reverse_list = get_reverse_list(ori_shape, transforms)
for item in reverse_list[::-1]:
if item[0] == 'resize':
h, w = item[1][0], item[1][1]
pred = F.interpolate(pred, (h, w), mode='nearest')
elif item[0] == 'padding':
h, w = item[1][0], item[1][1]
pred = pred[:, :, 0:h, 0:w]
else:
raise Exception("Unexpected info '{}' in im_info".format(item[0]))
return pred
def flip_combination(flip_horizontal=False, flip_vertical=False):
"""
Get flip combination.
Args:
flip_horizontal (bool): Whether to flip horizontally. Default: False.
flip_vertical (bool): Whether to flip vertically. Default: False.
Returns:
list: List of tuple. The first element of tuple is whether to flip horizontally,
and the second is whether to flip vertically.
"""
flip_comb = [(False, False)]
if flip_horizontal:
flip_comb.append((True, False))
if flip_vertical:
flip_comb.append((False, True))
if flip_horizontal:
flip_comb.append((True, True))
return flip_comb
def tensor_flip(x, flip):
"""Flip tensor according directions"""
if flip[0]:
x = x[:, :, :, ::-1]
if flip[1]:
x = x[:, :, ::-1, :]
return x
def inference(model,
model_hard,
im,
ori_shape=None,
transforms=None,
is_slide=False,
stride=None,
crop_size=None):
"""
Inference for image.
Args:
model (paddle.nn.Layer): model to get logits of image.
im (Tensor): the input image.
ori_shape (list): Origin shape of image.
transforms (list): Transforms for image.
is_slide (bool): Whether to infer by sliding window. Default: False.
crop_size (tuple|list). The size of sliding window, (w, h). It should be probided if is_slide is True.
stride (tuple|list). The size of stride, (w, h). It should be probided if is_slide is True.
Returns:
Tensor: If ori_shape is not None, a prediction with shape (1, 1, h, w) is returned.
If ori_shape is None, a logit with shape (1, num_classes, h, w) is returned.
"""
if not is_slide:
# logits = model(im)[0]
# logits_hard = model_hard(im)[0]
logits = F.softmax(model(im)[0], axis=1)
logits_hard = F.softmax(model_hard(im)[0], axis=1)
# logit_hard = logits.clone()
# for ii in range(logits.shape[0]):
# logit_hard[ii] = paddle.scatter(logit_hard[ii], paddle.to_tensor([3, 7, 12, 14, 15, 16, 18, 19, 20, 21]), logits_hard[ii][1:], overwrite=True, name=None)
logit = (logits + logits_hard) / 2
# logit = logit_hard
if ori_shape is not None:
pred = paddle.argmax(logit, axis=1, keepdim=True, dtype='int32')
pred = reverse_transform(pred, ori_shape, transforms)
return pred
else:
return logit
def aug_inference(model,
model_hard,
im,
ori_shape,
transforms,
scales=1.0,
flip_horizontal=False,
flip_vertical=False,
is_slide=False,
stride=None,
crop_size=None):
"""
Infer with augmentation.
Args:
model (paddle.nn.Layer): model to get logits of image.
im (Tensor): the input image.
ori_shape (list): Origin shape of image.
transforms (list): Transforms for image.
scales (float|tuple|list): Scales for resize. Default: 1.
flip_horizontal (bool): Whether to flip horizontally. Default: False.
flip_vertical (bool): Whether to flip vertically. Default: False.
is_slide (bool): Whether to infer by sliding wimdow. Default: False.
crop_size (tuple|list). The size of sliding window, (w, h). It should be probided if is_slide is True.
stride (tuple|list). The size of stride, (w, h). It should be probided if is_slide is True.
Returns:
Tensor: Prediction of image with shape (1, 1, h, w) is returned.
"""
if isinstance(scales, float):
scales = [scales]
elif not isinstance(scales, (tuple, list)):
raise TypeError(
'`scales` expects float/tuple/list type, but received {}'.format(
type(scales)))
final_logit = 0
h_input, w_input = im.shape[-2], im.shape[-1]
flip_comb = flip_combination(flip_horizontal, flip_vertical)
for scale in scales:
h = int(h_input * scale + 0.5)
w = int(w_input * scale + 0.5)
im = F.interpolate(im, (h, w), mode='bilinear')
for flip in flip_comb:
im_flip = tensor_flip(im, flip)
logit = inference(
model,
model_hard,
im_flip,
is_slide=is_slide,
crop_size=crop_size,
stride=stride)
logit = tensor_flip(logit, flip)
logit = F.interpolate(logit, (h_input, w_input), mode='bilinear')
# logit = F.softmax(logit, axis=1)
final_logit = final_logit + logit
pred = paddle.argmax(final_logit, axis=1, keepdim=True, dtype='int32')
pred = F.interpolate(pred, (ori_shape[0], ori_shape[1]), mode='nearest')
return pred
|
layers/cider_scorer.py | quangvy2703/Up-Down-Captioner | 232 | 12614300 | <gh_stars>100-1000
#!/usr/bin/env python
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# Modified to be more amenable to efficiently scoring minibatches during RNN training.
from collections import defaultdict
import numpy as np
import math
from scripts.preprocess_coco import *
def precook(words, n=4, out=False):
"""
Takes a string as input and returns an object that can be given to
either cook_refs or cook_test. This is optional: cook_refs and cook_test
can take string arguments as well.
:param s: string : sentence to be converted into ngrams
:param n: int : number of ngrams for which representation is calculated
:return: term frequency vector for occuring ngrams
"""
counts = defaultdict(int)
for k in xrange(1,n+1):
for i in xrange(len(words)-k+1):
ngram = tuple(words[i:i+k])
counts[ngram] += 1
return counts
def cook_refs(refs, n=4): ## lhuang: oracle will call with "average"
'''Takes a list of reference sentences for a single segment
and returns an object that encapsulates everything that BLEU
needs to know about them.
:param refs: list of string : reference sentences for some image
:param n: int : number of ngrams for which (ngram) representation is calculated
:return: result (list of dict)
'''
return [precook(split_sentence(ref), n) for ref in refs]
def cook_refs_eos(refs, n=4):
return [precook(split_sentence(ref)+['.'], n) for ref in refs]
def cook_test(test, n=4):
'''Takes a test sentence and returns an object that
encapsulates everything that BLEU needs to know about it.
:param test: list of string : hypothesis sentence for some image
:param n: int : number of ngrams for which (ngram) representation is calculated
:return: result (dict)
'''
return precook(test, n, True)
class CiderScorer(object):
"""CIDEr scorer.
"""
def __init__(self, gt_paths, n=4, sigma=6.0, include_eos=False):
''' singular instance '''
self.n = n
self.sigma = sigma
# process reference captions
self.crefs = {}
for gt_path in gt_paths:
coco = COCO(gt_path)
for imgId in coco.getImgIds():
assert imgId not in self.crefs
refs = [item['caption'] for item in coco.imgToAnns[imgId]]
if include_eos:
self.crefs[imgId] = cook_refs_eos(refs)
else:
self.crefs[imgId] = cook_refs(refs)
# compute idf
self.document_frequency = defaultdict(float)
for refs in self.crefs.values():
# refs, k ref captions of one image
for ngram in set([ngram for ref in refs for (ngram,count) in ref.iteritems()]):
self.document_frequency[ngram] += 1
# compute log reference length
self.ref_len = np.log(float(len(self.crefs)))
def compute_scores(self, image_ids, captions):
def counts2vec(cnts):
"""
Function maps counts of ngram to vector of tfidf weights.
The function returns vec, an array of dictionary that store mapping of n-gram and tf-idf weights.
The n-th entry of array denotes length of n-grams.
:param cnts:
:return: vec (array of dict), norm (array of float), length (int)
"""
vec = [defaultdict(float) for _ in range(self.n)]
length = 0
norm = [0.0 for _ in range(self.n)]
for (ngram,term_freq) in cnts.iteritems():
# give word count 1 if it doesn't appear in reference corpus
df = np.log(max(1.0, self.document_frequency[ngram]))
# ngram index
n = len(ngram)-1
# tf (term_freq) * idf (precomputed idf) for n-grams
vec[n][ngram] = float(term_freq)*(self.ref_len - df)
# compute norm for the vector. the norm will be used for computing similarity
norm[n] += pow(vec[n][ngram], 2)
if n == 1:
length += term_freq
norm = [np.sqrt(n) for n in norm]
return vec, norm, length
def sim(vec_hyp, vec_ref, norm_hyp, norm_ref, length_hyp, length_ref):
'''
Compute the cosine similarity of two vectors.
:param vec_hyp: array of dictionary for vector corresponding to hypothesis
:param vec_ref: array of dictionary for vector corresponding to reference
:param norm_hyp: array of float for vector corresponding to hypothesis
:param norm_ref: array of float for vector corresponding to reference
:param length_hyp: int containing length of hypothesis
:param length_ref: int containing length of reference
:return: array of score for each n-grams cosine similarity
'''
delta = float(length_hyp - length_ref)
# measure consine similarity
val = np.array([0.0 for _ in range(self.n)])
for n in range(self.n):
# ngram
for (ngram,count) in vec_hyp[n].iteritems():
# vrama91 : added clipping
val[n] += min(vec_hyp[n][ngram], vec_ref[n][ngram]) * vec_ref[n][ngram]
if (norm_hyp[n] != 0) and (norm_ref[n] != 0):
val[n] /= (norm_hyp[n]*norm_ref[n])
assert(not math.isnan(val[n]))
# vrama91: added a length based gaussian penalty
val[n] *= np.e**(-(delta**2)/(2*self.sigma**2))
return val
scores = []
for imgId, test in zip(image_ids,captions):
refs = self.crefs[imgId]
test = cook_test(test)
# compute vector for test captions
vec, norm, length = counts2vec(test)
# compute vector for ref captions
score = np.array([0.0 for _ in range(self.n)])
for ref in refs:
vec_ref, norm_ref, length_ref = counts2vec(ref)
score += sim(vec, vec_ref, norm, norm_ref, length, length_ref)
# change by vrama91 - mean of ngram scores, instead of sum
score_avg = np.mean(score)
# divide by number of references
score_avg /= len(refs)
# multiply score by 10
score_avg *= 10.0
# append score of an image to the score list
scores.append(score_avg)
return np.array(scores)
if __name__ == "__main__":
gt_paths = ['data/coco/captions_val2014.json']
cider = CiderScorer(gt_paths)
# Inputs should be lower case, tokenized, without full stop (to match training tokenization)
captions = [
['a', 'brown', 'teddy', 'bear', 'sitting', 'in', 'a', 'basket'],
['a', 'motorcycle', 'parked', 'on', 'the', 'side', 'of', 'a', 'road'],
['a', 'dog', 'sitting', 'on', 'a', 'bench', 'in', 'a', 'city']
]
image_ids = [42,73,74]
scores = cider.compute_scores(image_ids, captions)
np.testing.assert_approx_equal(scores[0], 0.087433, significant=4)
np.testing.assert_approx_equal(scores[1], 1.0032, significant=4)
np.testing.assert_approx_equal(scores[2], 0.4705, significant=4)
print scores
|
train.py | uoguelph-mlrg/theano_alexnet | 248 | 12614302 | import sys
import time
from multiprocessing import Process, Queue
import yaml
import numpy as np
import zmq
import pycuda.driver as drv
sys.path.append('./lib')
from tools import (save_weights, load_weights,
save_momentums, load_momentums)
from train_funcs import (unpack_configs, adjust_learning_rate,
get_val_error_loss, get_rand3d, train_model_wrap,
proc_configs)
def train_net(config):
# UNPACK CONFIGS
(flag_para_load, train_filenames, val_filenames,
train_labels, val_labels, img_mean) = unpack_configs(config)
# pycuda set up
drv.init()
dev = drv.Device(int(config['gpu'][-1]))
ctx = dev.make_context()
if flag_para_load:
# zmq set up
sock = zmq.Context().socket(zmq.PAIR)
sock.connect('tcp://localhost:{0}'.format(config['sock_data']))
load_send_queue = config['queue_t2l']
load_recv_queue = config['queue_l2t']
else:
load_send_queue = None
load_recv_queue = None
import theano.sandbox.cuda
theano.sandbox.cuda.use(config['gpu'])
import theano
theano.config.on_unused_input = 'warn'
from layers import DropoutLayer
from alex_net import AlexNet, compile_models
import theano.misc.pycuda_init
import theano.misc.pycuda_utils
## BUILD NETWORK ##
model = AlexNet(config)
layers = model.layers
batch_size = model.batch_size
## COMPILE FUNCTIONS ##
(train_model, validate_model, train_error, learning_rate,
shared_x, shared_y, rand_arr, vels) = compile_models(model, config)
######################### TRAIN MODEL ################################
print '... training'
if flag_para_load:
# pass ipc handle and related information
gpuarray_batch = theano.misc.pycuda_utils.to_gpuarray(
shared_x.container.value)
h = drv.mem_get_ipc_handle(gpuarray_batch.ptr)
sock.send_pyobj((gpuarray_batch.shape, gpuarray_batch.dtype, h))
load_send_queue.put(img_mean)
n_train_batches = len(train_filenames)
minibatch_range = range(n_train_batches)
# Start Training Loop
epoch = 0
step_idx = 0
val_record = []
while epoch < config['n_epochs']:
epoch = epoch + 1
if config['shuffle']:
np.random.shuffle(minibatch_range)
if config['resume_train'] and epoch == 1:
load_epoch = config['load_epoch']
load_weights(layers, config['weights_dir'], load_epoch)
lr_to_load = np.load(
config['weights_dir'] + 'lr_' + str(load_epoch) + '.npy')
val_record = list(
np.load(config['weights_dir'] + 'val_record.npy'))
learning_rate.set_value(lr_to_load)
load_momentums(vels, config['weights_dir'], load_epoch)
epoch = load_epoch + 1
if flag_para_load:
# send the initial message to load data, before each epoch
load_send_queue.put(str(train_filenames[minibatch_range[0]]))
load_send_queue.put(get_rand3d())
# clear the sync before 1st calc
load_send_queue.put('calc_finished')
count = 0
for minibatch_index in minibatch_range:
num_iter = (epoch - 1) * n_train_batches + count
count = count + 1
if count == 1:
s = time.time()
if count == 20:
e = time.time()
print "time per 20 iter:", (e - s)
cost_ij = train_model_wrap(train_model, shared_x,
shared_y, rand_arr, img_mean,
count, minibatch_index,
minibatch_range, batch_size,
train_filenames, train_labels,
flag_para_load,
config['batch_crop_mirror'],
send_queue=load_send_queue,
recv_queue=load_recv_queue)
if num_iter % config['print_freq'] == 0:
print 'training @ iter = ', num_iter
print 'training cost:', cost_ij
if config['print_train_error']:
print 'training error rate:', train_error()
if flag_para_load and (count < len(minibatch_range)):
load_send_queue.put('calc_finished')
############### Test on Validation Set ##################
DropoutLayer.SetDropoutOff()
this_validation_error, this_validation_loss = get_val_error_loss(
rand_arr, shared_x, shared_y,
val_filenames, val_labels,
flag_para_load, img_mean,
batch_size, validate_model,
send_queue=load_send_queue, recv_queue=load_recv_queue)
print('epoch %i: validation loss %f ' %
(epoch, this_validation_loss))
print('epoch %i: validation error %f %%' %
(epoch, this_validation_error * 100.))
val_record.append([this_validation_error, this_validation_loss])
np.save(config['weights_dir'] + 'val_record.npy', val_record)
DropoutLayer.SetDropoutOn()
############################################
# Adapt Learning Rate
step_idx = adjust_learning_rate(config, epoch, step_idx,
val_record, learning_rate)
# Save weights
if epoch % config['snapshot_freq'] == 0:
save_weights(layers, config['weights_dir'], epoch)
np.save(config['weights_dir'] + 'lr_' + str(epoch) + '.npy',
learning_rate.get_value())
save_momentums(vels, config['weights_dir'], epoch)
print('Optimization complete.')
if __name__ == '__main__':
with open('config.yaml', 'r') as f:
config = yaml.load(f)
with open('spec_1gpu.yaml', 'r') as f:
config = dict(config.items() + yaml.load(f).items())
config = proc_configs(config)
if config['para_load']:
from proc_load import fun_load
config['queue_l2t'] = Queue(1)
config['queue_t2l'] = Queue(1)
train_proc = Process(target=train_net, args=(config,))
load_proc = Process(
target=fun_load, args=(config, config['sock_data']))
train_proc.start()
load_proc.start()
train_proc.join()
load_proc.join()
else:
train_proc = Process(target=train_net, args=(config,))
train_proc.start()
train_proc.join()
|
dbReports/iondb/product_integration/urls.py | konradotto/TS | 125 | 12614308 | <filename>dbReports/iondb/product_integration/urls.py
# Copyright (C) 2017 Ion Torrent Systems, Inc. All Rights Reserved
from django.conf.urls import patterns, url, include
from tastypie.api import Api
from iondb.product_integration import api
v1_api = Api(api_name="v1")
v1_api.register(api.DeepLaserResponseResource())
urlpatterns = patterns(
"iondb.product_integration",
url(r"^tfc/configure/$", "views.configure", name="tfc_configure"),
url(r"^tfc/configure/(?P<pk>\d+)/delete$", "views.delete", name="tfc_delete"),
url(r"^api/", include(v1_api.urls)),
)
|
_solved/solutions/04-spatial-joins7.py | lleondia/geopandas-tutorial | 341 | 12614309 | # Convert the series to a DataFrame and specify column name
trees_by_district = trees_by_district.to_frame(name='n_trees') |
python-midonetclient/src/midonetclient/mirror.py | yantarou/midonet | 221 | 12614311 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2015 Midokura PTE LTD.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from midonetclient import condition
from midonetclient import resource_base
from midonetclient import vendor_media_type
class Mirror(resource_base.ResourceBase):
media_type = vendor_media_type.APPLICATION_MIRROR_JSON
def __init__(self, uri, dto, auth):
super(Mirror, self).__init__(uri, dto, auth)
def get_id(self):
return self.dto['id']
def get_to_port(self):
return self.dto['toPortId']
def to_port(self, to_port_id):
self.dto['toPortId'] = to_port_id
return self
def get_conditions(self):
return self.dto['conditions']
def set_conditions(self, conditions):
self.dto['conditions'] = conditions
return self
def add_condition(self):
return condition.Condition('fake-URI', {}, 'fake-AUTH')
|
homeassistant/components/zha/number.py | mib1185/core | 30,023 | 12614336 | <reponame>mib1185/core
"""Support for ZHA AnalogOutput cluster."""
from __future__ import annotations
import functools
import logging
from typing import TYPE_CHECKING
import zigpy.exceptions
from zigpy.zcl.foundation import Status
from homeassistant.components.number import NumberEntity
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import Platform
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.entity import EntityCategory
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from .core import discovery
from .core.const import (
CHANNEL_ANALOG_OUTPUT,
CHANNEL_LEVEL,
DATA_ZHA,
SIGNAL_ADD_ENTITIES,
SIGNAL_ATTR_UPDATED,
)
from .core.registries import ZHA_ENTITIES
from .entity import ZhaEntity
if TYPE_CHECKING:
from .core.channels.base import ZigbeeChannel
from .core.device import ZHADevice
_LOGGER = logging.getLogger(__name__)
STRICT_MATCH = functools.partial(ZHA_ENTITIES.strict_match, Platform.NUMBER)
CONFIG_DIAGNOSTIC_MATCH = functools.partial(
ZHA_ENTITIES.config_diagnostic_match, Platform.NUMBER
)
UNITS = {
0: "Square-meters",
1: "Square-feet",
2: "Milliamperes",
3: "Amperes",
4: "Ohms",
5: "Volts",
6: "Kilo-volts",
7: "Mega-volts",
8: "Volt-amperes",
9: "Kilo-volt-amperes",
10: "Mega-volt-amperes",
11: "Volt-amperes-reactive",
12: "Kilo-volt-amperes-reactive",
13: "Mega-volt-amperes-reactive",
14: "Degrees-phase",
15: "Power-factor",
16: "Joules",
17: "Kilojoules",
18: "Watt-hours",
19: "Kilowatt-hours",
20: "BTUs",
21: "Therms",
22: "Ton-hours",
23: "Joules-per-kilogram-dry-air",
24: "BTUs-per-pound-dry-air",
25: "Cycles-per-hour",
26: "Cycles-per-minute",
27: "Hertz",
28: "Grams-of-water-per-kilogram-dry-air",
29: "Percent-relative-humidity",
30: "Millimeters",
31: "Meters",
32: "Inches",
33: "Feet",
34: "Watts-per-square-foot",
35: "Watts-per-square-meter",
36: "Lumens",
37: "Luxes",
38: "Foot-candles",
39: "Kilograms",
40: "Pounds-mass",
41: "Tons",
42: "Kilograms-per-second",
43: "Kilograms-per-minute",
44: "Kilograms-per-hour",
45: "Pounds-mass-per-minute",
46: "Pounds-mass-per-hour",
47: "Watts",
48: "Kilowatts",
49: "Megawatts",
50: "BTUs-per-hour",
51: "Horsepower",
52: "Tons-refrigeration",
53: "Pascals",
54: "Kilopascals",
55: "Bars",
56: "Pounds-force-per-square-inch",
57: "Centimeters-of-water",
58: "Inches-of-water",
59: "Millimeters-of-mercury",
60: "Centimeters-of-mercury",
61: "Inches-of-mercury",
62: "°C",
63: "°K",
64: "°F",
65: "Degree-days-Celsius",
66: "Degree-days-Fahrenheit",
67: "Years",
68: "Months",
69: "Weeks",
70: "Days",
71: "Hours",
72: "Minutes",
73: "Seconds",
74: "Meters-per-second",
75: "Kilometers-per-hour",
76: "Feet-per-second",
77: "Feet-per-minute",
78: "Miles-per-hour",
79: "Cubic-feet",
80: "Cubic-meters",
81: "Imperial-gallons",
82: "Liters",
83: "Us-gallons",
84: "Cubic-feet-per-minute",
85: "Cubic-meters-per-second",
86: "Imperial-gallons-per-minute",
87: "Liters-per-second",
88: "Liters-per-minute",
89: "Us-gallons-per-minute",
90: "Degrees-angular",
91: "Degrees-Celsius-per-hour",
92: "Degrees-Celsius-per-minute",
93: "Degrees-Fahrenheit-per-hour",
94: "Degrees-Fahrenheit-per-minute",
95: None,
96: "Parts-per-million",
97: "Parts-per-billion",
98: "%",
99: "Percent-per-second",
100: "Per-minute",
101: "Per-second",
102: "Psi-per-Degree-Fahrenheit",
103: "Radians",
104: "Revolutions-per-minute",
105: "Currency1",
106: "Currency2",
107: "Currency3",
108: "Currency4",
109: "Currency5",
110: "Currency6",
111: "Currency7",
112: "Currency8",
113: "Currency9",
114: "Currency10",
115: "Square-inches",
116: "Square-centimeters",
117: "BTUs-per-pound",
118: "Centimeters",
119: "Pounds-mass-per-second",
120: "Delta-Degrees-Fahrenheit",
121: "Delta-Degrees-Kelvin",
122: "Kilohms",
123: "Megohms",
124: "Millivolts",
125: "Kilojoules-per-kilogram",
126: "Megajoules",
127: "Joules-per-degree-Kelvin",
128: "Joules-per-kilogram-degree-Kelvin",
129: "Kilohertz",
130: "Megahertz",
131: "Per-hour",
132: "Milliwatts",
133: "Hectopascals",
134: "Millibars",
135: "Cubic-meters-per-hour",
136: "Liters-per-hour",
137: "Kilowatt-hours-per-square-meter",
138: "Kilowatt-hours-per-square-foot",
139: "Megajoules-per-square-meter",
140: "Megajoules-per-square-foot",
141: "Watts-per-square-meter-Degree-Kelvin",
142: "Cubic-feet-per-second",
143: "Percent-obscuration-per-foot",
144: "Percent-obscuration-per-meter",
145: "Milliohms",
146: "Megawatt-hours",
147: "Kilo-BTUs",
148: "Mega-BTUs",
149: "Kilojoules-per-kilogram-dry-air",
150: "Megajoules-per-kilogram-dry-air",
151: "Kilojoules-per-degree-Kelvin",
152: "Megajoules-per-degree-Kelvin",
153: "Newton",
154: "Grams-per-second",
155: "Grams-per-minute",
156: "Tons-per-hour",
157: "Kilo-BTUs-per-hour",
158: "Hundredths-seconds",
159: "Milliseconds",
160: "Newton-meters",
161: "Millimeters-per-second",
162: "Millimeters-per-minute",
163: "Meters-per-minute",
164: "Meters-per-hour",
165: "Cubic-meters-per-minute",
166: "Meters-per-second-per-second",
167: "Amperes-per-meter",
168: "Amperes-per-square-meter",
169: "Ampere-square-meters",
170: "Farads",
171: "Henrys",
172: "Ohm-meters",
173: "Siemens",
174: "Siemens-per-meter",
175: "Teslas",
176: "Volts-per-degree-Kelvin",
177: "Volts-per-meter",
178: "Webers",
179: "Candelas",
180: "Candelas-per-square-meter",
181: "Kelvins-per-hour",
182: "Kelvins-per-minute",
183: "Joule-seconds",
185: "Square-meters-per-Newton",
186: "Kilogram-per-cubic-meter",
187: "Newton-seconds",
188: "Newtons-per-meter",
189: "Watts-per-meter-per-degree-Kelvin",
}
ICONS = {
0: "mdi:temperature-celsius",
1: "mdi:water-percent",
2: "mdi:gauge",
3: "mdi:speedometer",
4: "mdi:percent",
5: "mdi:air-filter",
6: "mdi:fan",
7: "mdi:flash",
8: "mdi:current-ac",
9: "mdi:flash",
10: "mdi:flash",
11: "mdi:flash",
12: "mdi:counter",
13: "mdi:thermometer-lines",
14: "mdi:timer",
}
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up the Zigbee Home Automation Analog Output from config entry."""
entities_to_create = hass.data[DATA_ZHA][Platform.NUMBER]
unsub = async_dispatcher_connect(
hass,
SIGNAL_ADD_ENTITIES,
functools.partial(
discovery.async_add_entities,
async_add_entities,
entities_to_create,
),
)
config_entry.async_on_unload(unsub)
@STRICT_MATCH(channel_names=CHANNEL_ANALOG_OUTPUT)
class ZhaNumber(ZhaEntity, NumberEntity):
"""Representation of a ZHA Number entity."""
def __init__(self, unique_id, zha_device, channels, **kwargs):
"""Init this entity."""
super().__init__(unique_id, zha_device, channels, **kwargs)
self._analog_output_channel = self.cluster_channels.get(CHANNEL_ANALOG_OUTPUT)
async def async_added_to_hass(self):
"""Run when about to be added to hass."""
await super().async_added_to_hass()
self.async_accept_signal(
self._analog_output_channel, SIGNAL_ATTR_UPDATED, self.async_set_state
)
@property
def native_value(self):
"""Return the current value."""
return self._analog_output_channel.present_value
@property
def native_min_value(self):
"""Return the minimum value."""
min_present_value = self._analog_output_channel.min_present_value
if min_present_value is not None:
return min_present_value
return 0
@property
def native_max_value(self):
"""Return the maximum value."""
max_present_value = self._analog_output_channel.max_present_value
if max_present_value is not None:
return max_present_value
return 1023
@property
def native_step(self):
"""Return the value step."""
resolution = self._analog_output_channel.resolution
if resolution is not None:
return resolution
return super().native_step
@property
def name(self):
"""Return the name of the number entity."""
description = self._analog_output_channel.description
if description is not None and len(description) > 0:
return f"{super().name} {description}"
return super().name
@property
def icon(self):
"""Return the icon to be used for this entity."""
application_type = self._analog_output_channel.application_type
if application_type is not None:
return ICONS.get(application_type >> 16, super().icon)
return super().icon
@property
def native_unit_of_measurement(self):
"""Return the unit the value is expressed in."""
engineering_units = self._analog_output_channel.engineering_units
return UNITS.get(engineering_units)
@callback
def async_set_state(self, attr_id, attr_name, value):
"""Handle value update from channel."""
self.async_write_ha_state()
async def async_set_native_value(self, value):
"""Update the current value from HA."""
num_value = float(value)
if await self._analog_output_channel.async_set_present_value(num_value):
self.async_write_ha_state()
async def async_update(self):
"""Attempt to retrieve the state of the entity."""
await super().async_update()
_LOGGER.debug("polling current state")
if self._analog_output_channel:
value = await self._analog_output_channel.get_attribute_value(
"present_value", from_cache=False
)
_LOGGER.debug("read value=%s", value)
class ZHANumberConfigurationEntity(ZhaEntity, NumberEntity):
"""Representation of a ZHA number configuration entity."""
_attr_entity_category = EntityCategory.CONFIG
_attr_native_step: float = 1.0
_zcl_attribute: str
@classmethod
def create_entity(
cls,
unique_id: str,
zha_device: ZHADevice,
channels: list[ZigbeeChannel],
**kwargs,
) -> ZhaEntity | None:
"""Entity Factory.
Return entity if it is a supported configuration, otherwise return None
"""
channel = channels[0]
if (
cls._zcl_attribute in channel.cluster.unsupported_attributes
or channel.cluster.get(cls._zcl_attribute) is None
):
_LOGGER.debug(
"%s is not supported - skipping %s entity creation",
cls._zcl_attribute,
cls.__name__,
)
return None
return cls(unique_id, zha_device, channels, **kwargs)
def __init__(
self,
unique_id: str,
zha_device: ZHADevice,
channels: list[ZigbeeChannel],
**kwargs,
) -> None:
"""Init this number configuration entity."""
self._channel: ZigbeeChannel = channels[0]
super().__init__(unique_id, zha_device, channels, **kwargs)
@property
def native_value(self) -> float:
"""Return the current value."""
return self._channel.cluster.get(self._zcl_attribute)
async def async_set_native_value(self, value: float) -> None:
"""Update the current value from HA."""
try:
res = await self._channel.cluster.write_attributes(
{self._zcl_attribute: int(value)}
)
except zigpy.exceptions.ZigbeeException as ex:
self.error("Could not set value: %s", ex)
return
if not isinstance(res, Exception) and all(
record.status == Status.SUCCESS for record in res[0]
):
self.async_write_ha_state()
async def async_update(self) -> None:
"""Attempt to retrieve the state of the entity."""
await super().async_update()
_LOGGER.debug("polling current state")
if self._channel:
value = await self._channel.get_attribute_value(
self._zcl_attribute, from_cache=False
)
_LOGGER.debug("read value=%s", value)
@CONFIG_DIAGNOSTIC_MATCH(channel_names="opple_cluster", models={"lumi.motion.ac02"})
class AqaraMotionDetectionInterval(
ZHANumberConfigurationEntity, id_suffix="detection_interval"
):
"""Representation of a ZHA on off transition time configuration entity."""
_attr_native_min_value: float = 2
_attr_native_max_value: float = 65535
_zcl_attribute: str = "detection_interval"
@CONFIG_DIAGNOSTIC_MATCH(channel_names=CHANNEL_LEVEL)
class OnOffTransitionTimeConfigurationEntity(
ZHANumberConfigurationEntity, id_suffix="on_off_transition_time"
):
"""Representation of a ZHA on off transition time configuration entity."""
_attr_native_min_value: float = 0x0000
_attr_native_max_value: float = 0xFFFF
_zcl_attribute: str = "on_off_transition_time"
@CONFIG_DIAGNOSTIC_MATCH(channel_names=CHANNEL_LEVEL)
class OnLevelConfigurationEntity(ZHANumberConfigurationEntity, id_suffix="on_level"):
"""Representation of a ZHA on level configuration entity."""
_attr_native_min_value: float = 0x00
_attr_native_max_value: float = 0xFF
_zcl_attribute: str = "on_level"
@CONFIG_DIAGNOSTIC_MATCH(channel_names=CHANNEL_LEVEL)
class OnTransitionTimeConfigurationEntity(
ZHANumberConfigurationEntity, id_suffix="on_transition_time"
):
"""Representation of a ZHA on transition time configuration entity."""
_attr_native_min_value: float = 0x0000
_attr_native_max_value: float = 0xFFFE
_zcl_attribute: str = "on_transition_time"
@CONFIG_DIAGNOSTIC_MATCH(channel_names=CHANNEL_LEVEL)
class OffTransitionTimeConfigurationEntity(
ZHANumberConfigurationEntity, id_suffix="off_transition_time"
):
"""Representation of a ZHA off transition time configuration entity."""
_attr_native_min_value: float = 0x0000
_attr_native_max_value: float = 0xFFFE
_zcl_attribute: str = "off_transition_time"
@CONFIG_DIAGNOSTIC_MATCH(channel_names=CHANNEL_LEVEL)
class DefaultMoveRateConfigurationEntity(
ZHANumberConfigurationEntity, id_suffix="default_move_rate"
):
"""Representation of a ZHA default move rate configuration entity."""
_attr_native_min_value: float = 0x00
_attr_native_max_value: float = 0xFE
_zcl_attribute: str = "default_move_rate"
@CONFIG_DIAGNOSTIC_MATCH(channel_names=CHANNEL_LEVEL)
class StartUpCurrentLevelConfigurationEntity(
ZHANumberConfigurationEntity, id_suffix="start_up_current_level"
):
"""Representation of a ZHA startup current level configuration entity."""
_attr_native_min_value: float = 0x00
_attr_native_max_value: float = 0xFF
_zcl_attribute: str = "start_up_current_level"
@CONFIG_DIAGNOSTIC_MATCH(
channel_names="tuya_manufacturer",
manufacturers={
"_TZE200_htnnfasr",
},
)
class TimerDurationMinutes(ZHANumberConfigurationEntity, id_suffix="timer_duration"):
"""Representation of a ZHA timer duration configuration entity."""
_attr_entity_category = EntityCategory.CONFIG
_attr_icon: str = ICONS[14]
_attr_native_min_value: float = 0x00
_attr_native_max_value: float = 0x257
_attr_unit_of_measurement: str | None = UNITS[72]
_zcl_attribute: str = "timer_duration"
|
pytorch_toolbelt/losses/wing_loss.py | mohitktanwr/toolkits | 1,281 | 12614349 | <reponame>mohitktanwr/toolkits
from torch.nn.modules.loss import _Loss
from . import functional as F
__all__ = ["WingLoss"]
class WingLoss(_Loss):
def __init__(self, width=5, curvature=0.5, reduction="mean"):
super(WingLoss, self).__init__(reduction=reduction)
self.width = width
self.curvature = curvature
def forward(self, prediction, target):
return F.wing_loss(prediction, target, self.width, self.curvature, self.reduction)
|
tests/unittests/dataframe/test_dataframe_v2.py | L-Net-1992/towhee | 365 | 12614356 | <reponame>L-Net-1992/towhee
# Copyright 2021 Zilliz. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from towhee.dataframe.array import Array
from towhee.dataframe.dataframe_v2 import DataFrame
class TestDataframe(unittest.TestCase):
"""
Test dataframe basic function
"""
def test_constructors(self):
def get_columns():
return ['digit', 'letter']
def get_tuples():
return [(0, 'a'), (1, 'b'), (2, 'c')]
def get_arrays():
return [Array([0, 1, 2]), Array(['a', 'b', 'c'])]
def get_dict():
return {'digit': Array([0, 1, 2]), 'letter': Array(['a', 'b', 'c'])}
def check_data(df):
for i in range(3):
self.assertEqual(df['digit'][i], i)
self.assertEqual(df['letter'][i], chr(ord('a') + i))
self.assertEqual(df[i][0], i)
self.assertEqual(df[i][1], chr(ord('a') + i))
for i, row in enumerate(df.iter()):
self.assertEqual(row[0], i)
self.assertEqual(row[1], chr(ord('a') + i))
# empty df
df = DataFrame('my_df')
df.seal()
self.assertEqual(df.name, 'my_df')
# from list[tuple]
data = get_tuples()
columns = get_columns()
df = DataFrame('my_df', data, columns)
df.seal()
check_data(df)
# from list[towhee.Array]
data = get_arrays()
columns = get_columns()
df = DataFrame('my_df', data, columns)
df.seal()
check_data(df)
# from dict[str, towhee.Array]
data = get_dict()
df = DataFrame('my_df', data)
df.seal()
check_data(df)
|
egg/modules/random/hatch.py | ustcsq/nsimd | 247 | 12614361 | # Copyright (c) 2020 Agenium Scale
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import common
import collections
# -----------------------------------------------------------------------------
rand_functions = list()
class MAddToRands(type):
def __new__(cls, name, bases, dct):
ret = type.__new__(cls, name, bases, dct)
if name != 'Rand':
rand_functions.append(ret())
return ret
class Rand(object, metaclass=MAddToRands):
def gen_function_name(self, nwords, word_size, nrounds):
return '{}_{}x{}_{}'.format(self.name, nwords, word_size, nrounds)
def gen_headers(self, opts):
res = ''
for word_size, nwords_nrounds in self.wordsize_nwords_nrounds.items():
for nwords, list_nrounds in nwords_nrounds.items():
for nrounds in list_nrounds:
res += self.gen_signature(nwords, word_size, nrounds)+';'
return res
def gen_tests(self, opts, nrounds, word_size, nwords):
key_size = self.get_key_size(nwords)
key_initialization = 'nsimd::packx{}<u{}> key_pack;'. \
format(key_size, word_size)
for i in range (0, key_size):
key_initialization += '''
i = {i};
for (int j = 0; j < len; j++) {{
key[j + i * len] = (u{word_size})(j + i * len);
}}
key_pack.v{i} = nsimd::loadu(&key[i*len], u{word_size}());
'''.format(i=i, word_size=word_size)
input_initilization = \
'memset(in, 0, sizeof(u{}) * {} * ulen);\n'. \
format(word_size, nwords)
for i in range (0, nwords):
input_initilization += 'in_pack.v{} = nsimd::pack<u{}>(0);'. \
format(i, word_size)
compare = ''
for i in range (0, nwords):
compare += '''
if (i=={i}) {{
nsimd::storeu(out_nsimd, out_pack.v{i});
}}
'''.format(i=i)
l = 'll' if word_size == 64 else ''
cast = '(nsimd_ulonglong)' if word_size == 64 else ''
res = '''
#include <nsimd/modules/random/functions.hpp>
#include "reference.hpp"
#include <iostream>
#ifdef NSIMD_LONGLONG_IS_EXTENSION
#if defined(NSIMD_IS_GCC) || defined(NSIMD_IS_CLANG)
#pragma GCC diagnostic ignored "-Wformat"
#endif
#endif
int main() {{
int res = EXIT_SUCCESS;
printf("Test of {function_name} ...\\n");
nsimd::packx{nwords}<u{word_size}> in_pack;
nsimd::packx{nwords}<u{word_size}> out_pack;
const int len = nsimd::len(u{word_size}());
const unsigned int ulen = (unsigned int)len;
u{word_size} *key = (u{word_size}*)malloc(ulen *
sizeof(u{word_size}) * {key_size});
u{word_size} *in = (u{word_size}*)malloc(ulen *
sizeof(u{word_size}) * {nwords});
u{word_size} *out = (u{word_size}*)malloc(ulen *
sizeof(u{word_size}) * {nwords});
u{word_size} *out_nsimd = (u{word_size}*)malloc(ulen *
sizeof(u{word_size}));
tab{word_size}x{nwords}_t in_ref;
tab{word_size}x{key_size}_t key_ref;
tab{word_size}x{nwords}_t out_ref;
int i;
// Keys
{key_initialization}
{input_initilization}
for (int cpt=0; cpt < 100000; ++cpt) {{
out_pack = nsimd::random::{function_name}(in_pack, key_pack);
for (int i=0; i<len; ++i) {{
for (int j=0; j<{nwords}; ++j) {{
in_ref.v[j] = in[i + j * len];
}}
for (int j=0; j<{key_size}; ++j) {{
key_ref.v[j] = key[i + j*len];
}}
out_ref = branson_{name}{nwords}x{word_size}_R({nrounds},
in_ref, key_ref);
for (int j=0; j<{nwords}; ++j) {{
out[i + j * len] = out_ref.v[j];
}}
}}
for (int i=0; i<{nwords}; ++i) {{
{compare}
if (memcmp(out_nsimd, &out[i * len],
ulen * sizeof(u{word_size}))) {{
printf ("%i\\n", i);
for (int j=0; j<len; ++j) {{
printf ("%{l}u\\t(0x%{l}x)\\t\\t%{l}u\\t(0x%{l}x)\\n",
{cast}out[j+i*len], {cast}out[j+i*len],
{cast}out_nsimd[j], {cast}out_nsimd[j]);
}}
res = EXIT_FAILURE;
printf("... FAILED\\n");
goto cleanup;
}}
}}
in_pack = out_pack;
memcpy(in, out, sizeof(u{word_size}) * {nwords} * ulen);
}}
fprintf(stdout, "... OK\\n");
cleanup:
free(key);
free(in);
free(out);
free(out_nsimd);
return res;
}}
'''.format(function_name=self.gen_function_name(nwords, word_size,
nrounds), word_size=word_size, key_size=key_size,
nwords=nwords, key_initialization=key_initialization,
nrounds=nrounds, input_initilization=input_initilization,
compare=compare, l=l, name = self.name, cast=cast)
# Write file
return res
class Philox(Rand):
name = 'philox'
wordsize_nwords_nrounds = {32: {2: [10],
4: [7, 10]},
64: {2: [6, 10],
4: [7, 10]}}
mullohi='''
#if 1
void mulhilo32(pack<u32> a, pack<u32> b, pack<u32> *low, pack<u32> *high) {
nsimd::packx2<u64> a64 = nsimd::upcvt(nsimd::packx2<u64>(), a);
nsimd::packx2<u64> b64 = nsimd::upcvt(nsimd::packx2<u64>(), b);
nsimd::packx2<u64> product;
product.v0 = a64.v0 * b64.v0;
product.v1 = a64.v1 * b64.v1;
*high =
nsimd::downcvt(nsimd::pack<u32>(), product.v0 >> 32, product.v1 >> 32);
*low = nsimd::downcvt(nsimd::pack<u32>(), product.v0, product.v1);
}
#else
void mulhilo32(pack<u32> a, pack<u32> b, pack<u32> *low, pack<u32> *high) {
nsimd::pack<u32> ah = nsimd::shr(a, 16);
nsimd::pack<u32> bh = nsimd::shr(b, 16);
nsimd::pack<u32> al = nsimd::shr(nsimd::shl(a, 16), 16);
nsimd::pack<u32> bl = nsimd::shr(nsimd::shl(b, 16), 16);
nsimd::pack<u32> ahbh = ah * bh;
nsimd::pack<u32> ahbl = ah * bl;
nsimd::pack<u32> albh = al * bh;
nsimd::pack<u32> albl = al * bl;
nsimd::pack<u32> tmp1 = nsimd::shl(albh, 16);
nsimd::pack<u32> tmp2 = nsimd::shl(ahbl, 16);
nsimd::pack<u32> tmp3 = tmp1 + tmp2;
nsimd::pack<u32> _1 = nsimd::set1(nsimd::pack<u32>(), 1u);
nsimd::pack<u32> _0 = nsimd::set1(nsimd::pack<u32>(), 0u);
nsimd::pack<u32> carry =
nsimd::if_else1((tmp3 < tmp1) || (tmp3 < tmp2), _1, _0);
*low = tmp3 + albl;
carry = carry + nsimd::if_else1((*low < tmp3) || (*low < albl), _1, _0);
*high = ahbh + nsimd::shr(albh, 16) + nsimd::shr(ahbl, 16) + carry;
}
#endif
#if 0
void mulhilo64(pack<u64> a, pack<u64> b, pack<u64> *low, pack<u64> *high) {
u64 a_buf[8];
u64 b_buf[8];
u64 low_buf[8];
u64 high_buf[8];
nsimd::storeu(a_buf, a);
nsimd::storeu(b_buf, b);
for (int i = 0; i < nsimd::len(u64()); ++i) {
__uint128_t product = ((__uint128_t)a_buf[i]) * ((__uint128_t)b_buf[i]);
high_buf[i] = (u64)(product >> 64);
low_buf[i] = (u64)product;
}
*high = nsimd::loadu(high_buf, u64());
*low = nsimd::loadu(low_buf, u64());
}
#else
void mulhilo64(pack<u64> a, pack<u64> b, pack<u64> *low, pack<u64> *high) {
nsimd::pack<u64> ah = nsimd::shr(a, 32);
nsimd::pack<u64> bh = nsimd::shr(b, 32);
nsimd::pack<u64> al = nsimd::shr(nsimd::shl(a, 32), 32);
nsimd::pack<u64> bl = nsimd::shr(nsimd::shl(b, 32), 32);
nsimd::pack<u64> ahbh = ah * bh;
nsimd::pack<u64> ahbl = ah * bl;
nsimd::pack<u64> albh = al * bh;
nsimd::pack<u64> albl = al * bl;
nsimd::pack<u64> tmp1 = nsimd::shl(albh, 32);
nsimd::pack<u64> tmp2 = nsimd::shl(ahbl, 32);
nsimd::pack<u64> tmp3 = tmp1 + tmp2;
nsimd::pack<u64> _1 = nsimd::set1(nsimd::pack<u64>(), (u64)1);
nsimd::pack<u64> _0 = nsimd::set1(nsimd::pack<u64>(), (u64)0);
nsimd::pack<u64> carry =
nsimd::if_else1((tmp3 < tmp1) || (tmp3 < tmp2), _1, _0);
*low = tmp3 + albl;
carry = carry + nsimd::if_else1((*low < tmp3) || (*low < albl), _1, _0);
*high = ahbh + nsimd::shr(albh, 32) + nsimd::shr(ahbl, 32) + carry;
}
#endif
'''
def gen_signature(self, nwords, word_size, nrounds):
return ('nsimd::packx{nwords}<u{word_size}> {fun_name}' \
'(nsimd::packx{nwords}<u{word_size}> in, ' \
'nsimd::packx{key_size}<u{word_size}> key)'). \
format(nwords = nwords, word_size = word_size,
fun_name = self.gen_function_name(nwords, word_size,
nrounds),
key_size = self.get_key_size(nwords))
def get_key_size(self, nwords):
return int(nwords/2)
def gen_func(self, opts, nrounds, word_size, nwords):
if nwords == 2:
bump_keys_init = \
'nsimd::pack<u{word_size}> bump = ' \
'nsimd::set1(nsimd::pack<u{word_size}>(), {bump});'.\
format(word_size=word_size, bump = '(u64)0x9E3779B97F4A7C15ULL' \
if word_size == 64 else '(u32)0x9E3779B9U')
bump_keys = 'key.v0 = key.v0 + bump;'
round_init = '''
nsimd::pack<u{word_size}> mul =
nsimd::set1(nsimd::pack<u{word_size}>(), {mul});
nsimd::pack<u{word_size}> high, low;'''. \
format(word_size=word_size, mul='(u64)0xD2B74407B1CE6E93ULL' \
if word_size == 64 else '(u32)0xD256D193U')
round='''
mulhilo{word_size}(mul, in.v0, &low, &high);
in.v0 = high ^ key.v0 ^ in.v1;
in.v1 = low;
'''.format(word_size=word_size)
elif nwords == 4:
bump_keys_init = '''
nsimd::pack<u{word_size}> bump0 =
nsimd::set1(nsimd::pack<u{word_size}>(), {bump0});
nsimd::pack<u{word_size}> bump1 =
nsimd::set1(nsimd::pack<u{word_size}>(), {bump1});'''.\
format(word_size=word_size,
bump0 = '(u64)0x9E3779B97F4A7C15ULL' \
if word_size == 64 else '(u32)0x9E3779B9U',
bump1 = '(u64)0xBB67AE8584CAA73BULL' \
if word_size == 64 else '(u32)0xBB67AE85U')
bump_keys = 'key.v0 = key.v0 + bump0;\nkey.v1 = key.v1 + bump1;'
round_init = '''
nsimd::pack<u{word_size}> mul0 =
nsimd::set1(nsimd::pack<u{word_size}>(), {mul0});
nsimd::pack<u{word_size}> mul1 =
nsimd::set1(nsimd::pack<u{word_size}>(), {mul1});
nsimd::pack<u{word_size}> low0, high0, low1, high1;
'''.format(word_size=word_size,
mul0='(u64)0xD2E7470EE14C6C93ULL' \
if word_size == 64 else '(u32)0xD2511F53U',
mul1='(u64)0xCA5A826395121157ULL' \
if word_size == 64 else '(u32)0xCD9E8D57U')
round='''
mulhilo{word_size}(mul0, in.v0, &low0, &high0);
mulhilo{word_size}(mul1, in.v2, &low1, &high1);
in.v0 = high1 ^ key.v0 ^ in.v1;
in.v1 = low1;
in.v2 = high0 ^ key.v1 ^ in.v3;
in.v3 = low0;'''.format(word_size=word_size)
res = self.gen_signature (nwords, word_size, nrounds)
res += ' {{ nsimd::packx{}<u{}> out;'.format(nwords, word_size)
res += bump_keys_init
res += round_init
# Round 0:
res += round;
for i in range(1, nrounds):
res += bump_keys
res += round
res+='''
return in;
}
'''
return res
def generate(self, opts):
res = self.mullohi
for word_size, nwords_nrounds in self.wordsize_nwords_nrounds.items():
for nwords, list_nrounds in nwords_nrounds.items():
for nrounds in list_nrounds:
res += self.gen_func(opts, nrounds, word_size, nwords)
return res
class ThreeFry(Rand):
name = 'threefry'
enums='''
enum enum_threefry32x2_rotations {
Rot_32x2_0 = 13,
Rot_32x2_1 = 15,
Rot_32x2_2 = 26,
Rot_32x2_3 = 6,
Rot_32x2_4 = 17,
Rot_32x2_5 = 29,
Rot_32x2_6 = 16,
Rot_32x2_7 = 24
};
enum enum_threefry32x4_rotations {
Rot_32x4_0_0 = 10,
Rot_32x4_0_2 = 26,
Rot_32x4_1_0 = 11,
Rot_32x4_1_2 = 21,
Rot_32x4_2_0 = 13,
Rot_32x4_2_2 = 27,
Rot_32x4_3_0 = 23,
Rot_32x4_3_2 = 5,
Rot_32x4_4_0 = 6,
Rot_32x4_4_2 = 20,
Rot_32x4_5_0 = 17,
Rot_32x4_5_2 = 11,
Rot_32x4_6_0 = 25,
Rot_32x4_6_2 = 10,
Rot_32x4_7_0 = 18,
Rot_32x4_7_2 = 20
};
enum enum_threefry64x2_rotations {
Rot_64x2_0 = 16,
Rot_64x2_1 = 42,
Rot_64x2_2 = 12,
Rot_64x2_3 = 31,
Rot_64x2_4 = 16,
Rot_64x2_5 = 32,
Rot_64x2_6 = 24,
Rot_64x2_7 = 21
};
enum enum_threefry64x4_rotations {
Rot_64x4_0_0 = 14,
Rot_64x4_0_2 = 16,
Rot_64x4_1_0 = 52,
Rot_64x4_1_2 = 57,
Rot_64x4_2_0 = 23,
Rot_64x4_2_2 = 40,
Rot_64x4_3_0 = 5,
Rot_64x4_3_2 = 37,
Rot_64x4_4_0 = 25,
Rot_64x4_4_2 = 33,
Rot_64x4_5_0 = 46,
Rot_64x4_5_2 = 12,
Rot_64x4_6_0 = 58,
Rot_64x4_6_2 = 22,
Rot_64x4_7_0 = 32,
Rot_64x4_7_2 = 32
};
'''
# Following macros should not be changed to function : gcc can't inline them
rotations='''
#define SHIFT_MOD_32(x, N) ((x << (N & 31)) | (x >> ((32 - N) & 31)))
#define SHIFT_MOD_64(x, N) ((x << (N & 63)) | (x >> ((64 - N) & 63)))
'''
undef_macro='''
#undef SHIFT_MOD_32
#undef SHIFT_MOD_64
'''
wordsize_nwords_nrounds = {32: {2: [12, 20, 32],
4: [12, 20, 72]},
64: {2: [13, 20, 32],
4: [12, 20, 72]}}
def gen_signature(self, nwords, word_size, nrounds):
return '''nsimd::packx{nwords}<u{word_size}> \
{fun_name} \
(nsimd::packx{nwords}<u{word_size}> in, \
nsimd::packx{nwords}<u{word_size}> key)'''. \
format(nwords=nwords, word_size = word_size,
fun_name=self.gen_function_name(nwords, word_size, nrounds))
def get_key_size(self, nwords):
return nwords
def gen_body(self, opts, nrounds, word_size, nwords):
if word_size == 32:
initialize_keys = '''nsimd::pack<u32> ks{nwords} =
nsimd::set1(nsimd::pack<u32>(), 0x1BD11BDAU);'''. \
format(nwords=nwords)
elif word_size == 64:
initialize_keys = '''nsimd::pack<u64> ks{nwords} =
nsimd::set1(nsimd::pack<u64>(), (u64)0x1BD11BDAA9FC1A22ULL);'''. \
format(nwords=nwords)
res = self.gen_signature(nwords, word_size, nrounds)
res += ' {{ nsimd::packx{}<u{}> out;'.format(nwords, word_size)
res += initialize_keys
initialisation_keys = '''
nsimd::pack<u{word_size}> ks{i};
ks{i} = key.v{i};
out.v{i} = in.v{i};
ks{nwords} = ks{nwords} ^ key.v{i};
out.v{i} = out.v{i} + key.v{i};
'''
for i in range(0,nwords):
res += initialisation_keys.format(i=i, nwords=nwords,
word_size=word_size)
for i in range(0, nrounds):
if nwords == 4:
indexes= [1 if i%2==0 else 3, 1 if i%2==1 else 3]
res += '''
out.v0 = out.v0 + out.v{index0};
out.v{index0} = SHIFT_MOD_{word_size}(out.v{index0},
Rot_{word_size}x{nwords}_{i_mod}_0);
out.v{index0} = out.v{index0} ^ out.v0;
out.v2 = out.v2 + out.v{index1};
out.v{index1} = SHIFT_MOD_{word_size}(out.v{index1},
Rot_{word_size}x{nwords}_{i_mod}_2);
out.v{index1} = out.v{index1} ^ out.v2;
'''.format(index0=indexes[0], index1=indexes[1], i_mod=i%8,
word_size=word_size, nwords=nwords)
elif nwords == 2:
res += '''
out.v0 = out.v0 + out.v1;
out.v1 = SHIFT_MOD_{word_size}(out.v1,
Rot_{word_size}x{nwords}_{i_mod});
out.v1 = out.v1 ^ out.v0;'''. \
format(i_mod=i % 8, word_size=word_size, nwords=nwords)
#if (i % nwords) == nwords - 1:
if (i % 4) == 3:
d = int(i / 4 + 1)
res += '\n'
for j in range(0, nwords):
res += 'out.v{j} = out.v{j} + ks{calc};\n'. \
format(j=j, calc=str(int((d+j)%(nwords+1))))
res += 'out.v{n} = out.v{n} + ' \
'nsimd::pack<u{word_size}>({d});\n'. \
format(d=d, n=nwords-1, word_size=word_size)
res+='''
return out;
}
'''
return res
def generate(self, opts):
res = ''
res += self.enums
res += self.rotations
for word_size, nwords_nrounds in self.wordsize_nwords_nrounds.items():
for nwords, list_nrounds in nwords_nrounds.items():
for nrounds in list_nrounds:
res += self.gen_body(opts, nrounds, word_size, nwords)
res += self.undef_macro
return res
def gen_functions(opts):
## Write source files
#dirname = os.path.join(opts.include_dir, 'modules', 'random')
#common.mkdir_p(dirname)
#filename = os.path.join(dirname, 'functions.cpp')
#print(filename)
#with common.open_utf8(opts, filename) as out:
# out.write('#include "functions.hpp"\n')
# out.write('{}\n\n'.format(common.hbar))
# out.write(gen(opts))
# out.write('#endif\n')
#common.clang_format(opts, filename)
# Write headers
dirname = os.path.join(opts.include_dir, 'modules', 'random')
common.mkdir_p(dirname)
filename = os.path.join(dirname, 'functions.hpp')
with common.open_utf8(opts, filename) as out:
out.write(
'''#ifndef NSIMD_MODULES_RANDOM_FUNCTIONS_HPP
#define NSIMD_MODULES_RANDOM_FUNCTIONS_HPP
#include <nsimd/nsimd.h>
#include <nsimd/cxx_adv_api.hpp>
#include <nsimd/cxx_adv_api_functions.hpp>
#ifdef NSIMD_LONGLONG_IS_EXTENSION
#if defined(NSIMD_IS_GCC)
/* Not emitting the warning -Wlong-long is not possible */
/* with GCC <= 12. It is a bug. A workaround is to tell GCC */
/* to consider this header file as a system header file so */
/* that all warnings are not emitted. This is not satisfying */
/* but necessary for the moment. */
#pragma GCC system_header
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wlong-long"
#elif defined(NSIMD_IS_CLANG)
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wlong-long"
#endif
#endif
namespace nsimd {
namespace random {
''')
out.write('{}\n\n'.format(common.hbar))
for func in rand_functions:
out.write(func.gen_headers(opts))
out.write(func.generate(opts))
out.write(
'''#ifdef NSIMD_LONGLONG_IS_EXTENSION
#if defined(NSIMD_IS_GCC)
#pragma GCC diagnostic pop
#elif defined(NSIMD_IS_CLANG)
#pragma clang diagnostic pop
#endif
#endif
} // namespace nsimd
} // namespace random
#endif
''')
common.clang_format(opts, filename)
def gen_tests(opts):
for func in rand_functions:
for word_size, nwords_nrounds in func.wordsize_nwords_nrounds.items():
for nwords, list_nrounds in nwords_nrounds.items():
for nrounds in list_nrounds:
# Write headers
dirname = os.path.join(opts.tests_dir, 'modules', 'random')
common.mkdir_p(dirname)
filename = os.path.join(dirname, '{}.cpp'. \
format(func.gen_function_name(nwords, word_size,
nrounds)))
with common.open_utf8(opts, filename) as out:
out.write(func.gen_tests(opts, nrounds, word_size,
nwords))
common.clang_format(opts, filename)
# -----------------------------------------------------------------------------
def name():
return 'Random number generators'
def desc():
return \
'This module define functions that generate pseudorandom numbers using' \
'algorithms described in Parallel Random Numbers: As Easy as 1,2,3, by' \
'<NAME>, <NAME>, <NAME> and <NAME>.'
def gen_doc(opts):
api = ''
for func in rand_functions:
for word_size, nwords_nrounds in func.wordsize_nwords_nrounds.items():
for nwords, list_nrounds in nwords_nrounds.items():
for nrounds in list_nrounds:
api += '- `' + func.gen_signature(nwords, word_size,
nrounds) + '`; \n'
api += ' Returns a random number using the ' \
'{func_name} generator\n\n'. \
format(func_name=func.name)
res = '''
# NSIMD Random module overview
{desc}
Two different algorithms are proposed : threefry and philox. Both should give
high quality random number.
Threefry is quicker on CPU, while philox is best used on GPU.
Both algorithms are counter based pseudorandom number generator, meaning that
they need two parameters:
- a key, each key will generate an unique sequence,
- a counter, which will give the different numbers in the sequence.
# NSIMD Random API reference
{api}
'''.format(desc = desc(), api=api)
filename = common.get_markdown_file(opts, 'overview', 'random')
if not common.can_create_filename(opts, filename):
return
with common.open_utf8(opts, filename) as fout:
fout.write(res)
def doc_menu():
return dict()
# -----------------------------------------------------------------------------
def doit(opts):
common.myprint(opts, 'Generating module random')
if opts.library:
gen_functions(opts)
if opts.tests:
gen_tests(opts)
if opts.doc:
gen_doc(opts)
|
setup.py | BlackLight/platypush | 228 | 12614365 | <gh_stars>100-1000
#!/usr/bin/env python
import os
from setuptools import setup, find_packages
def path(fname=''):
return os.path.abspath(os.path.join(os.path.dirname(__file__), fname))
def readfile(fname):
with open(path(fname)) as f:
return f.read()
# noinspection PyShadowingBuiltins
def pkg_files(dir):
paths = []
# noinspection PyShadowingNames
for (path, dirs, files) in os.walk(dir):
for file in files:
paths.append(os.path.join('..', path, file))
return paths
plugins = pkg_files('platypush/plugins')
backend = pkg_files('platypush/backend')
setup(
name="platypush",
version="0.22.4",
author="<NAME>",
author_email="<EMAIL>",
description="Platypush service",
license="MIT",
python_requires='>= 3.6',
keywords="home-automation automation iot mqtt websockets redis dashboard notifications",
url="https://platypush.tech",
packages=find_packages(),
include_package_data=True,
entry_points={
'console_scripts': [
'platypush=platypush:main',
'platydock=platypush.platydock:main',
],
},
scripts=['bin/platyvenv'],
long_description=readfile('README.md'),
long_description_content_type='text/markdown',
classifiers=[
"Topic :: Utilities",
"License :: OSI Approved :: MIT License",
"Development Status :: 4 - Beta",
],
install_requires=[
'pyyaml',
'redis',
'requests',
'croniter',
'sqlalchemy',
'websockets',
'websocket-client',
'wheel',
'zeroconf>=0.27.0',
'tz',
'python-dateutil',
'cryptography',
'pyjwt',
'marshmallow',
'frozendict',
],
extras_require={
# Support for thread custom name
'threadname': ['python-prctl'],
# Support for Kafka backend and plugin
'kafka': ['kafka-python'],
# Support for Pushbullet backend and plugin
'pushbullet': ['pushbullet.py @ https://github.com/rbrcsk/pushbullet.py/tarball/master'],
# Support for HTTP backend
'http': ['flask', 'bcrypt', 'python-magic'],
# Support for uWSGI HTTP backend
'uwsgi': ['flask', 'uwsgi', 'bcrypt', 'python-magic'],
# Support for MQTT backends
'mqtt': ['paho-mqtt'],
# Support for RSS feeds parser
'rss': ['feedparser'],
# Support for PDF generation
'pdf': ['weasyprint'],
# Support for Philips Hue plugin
'hue': ['phue'],
# Support for MPD/Mopidy music server plugin and backend
'mpd': ['python-mpd2'],
# Support for Google text2speech plugin
'google-tts': ['oauth2client', 'google-api-python-client', 'google-cloud-texttospeech'],
# Support for OMXPlayer plugin
'omxplayer': ['omxplayer-wrapper'],
# Support for YouTube
'youtube': ['youtube-dl'],
# Support for torrents download
'torrent': ['python-libtorrent-bin'],
# Generic support for cameras
'camera': ['numpy', 'Pillow'],
# Support for RaspberryPi camera
'picamera': ['picamera', 'numpy', 'Pillow'],
# Support for inotify file monitors
'inotify': ['inotify'],
# Support for Google Assistant
'google-assistant-legacy': ['google-assistant-library'],
'google-assistant': ['google-assistant-sdk[samples]'],
# Support for the Google APIs
'google': ['oauth2client', 'google-api-python-client'],
# Support for Last.FM scrobbler plugin
'lastfm': ['pylast'],
# Support for custom hotword detection
'hotword': ['snowboy'],
'snowboy': ['snowboy'],
# Support for real-time MIDI events
'midi': ['rtmidi'],
# Support for RaspberryPi GPIO
'rpi-gpio': ['RPi.GPIO'],
# Support for MCP3008 analog-to-digital converter plugin
'mcp3008': ['adafruit-mcp3008'],
# Support for smart cards detection
'scard': ['pyscard'],
# Support for serial port plugin
'serial': ['pyserial'],
# Support for ICal calendars
'ical': ['icalendar'],
# Support for joystick backend
'joystick': ['inputs'],
# Support for Kodi plugin
'kodi': ['kodi-json'],
# Support for Plex plugin
'plex': ['plexapi'],
# Support for Chromecast plugin
'chromecast': ['pychromecast'],
# Support for sound devices
'sound': ['sounddevice', 'soundfile', 'numpy'],
# Support for web media subtitles
'subtitles': [
'webvtt-py',
'python-opensubtitles @ https://github.com/agonzalezro/python-opensubtitles/tarball/master'],
# Support for mpv player plugin
'mpv': ['python-mpv'],
# Support for NFC tags
'nfc': ['nfcpy>=1.0', 'ndeflib'],
# Support for enviropHAT
'envirophat': ['envirophat'],
# Support for GPS
'gps': ['gps'],
# Support for BME280 environment sensor
'bme280': ['pimoroni-bme280'],
# Support for LTR559 light/proximity sensor
'ltr559': ['ltr559'],
# Support for VL53L1X laser ranger/distance sensor
'vl53l1x': ['smbus2', 'vl53l1x'],
# Support for Dropbox integration
'dropbox': ['dropbox'],
# Support for Leap Motion backend
'leap': ['leap-sdk @ https://github.com/BlackLight/leap-sdk-python3/tarball/master'],
# Support for Flic buttons
'flic': ['flic @ https://github.com/50ButtonsEach/fliclib-linux-hci/tarball/master'],
# Support for Alexa/Echo plugin
'alexa': ['avs @ https://github.com:BlackLight/avs/tarball/master'],
# Support for bluetooth devices
'bluetooth': ['pybluez', 'gattlib',
'pyobex @ https://github.com/BlackLight/PyOBEX/tarball/master'],
# Support for TP-Link devices
'tplink': ['pyHS100'],
# Support for PMW3901 2-Dimensional Optical Flow Sensor
'pmw3901': ['pmw3901'],
# Support for MLX90640 thermal camera
'mlx90640': ['Pillow'],
# Support for machine learning models and cameras over OpenCV
'cv': ['opencv-python', 'numpy', 'Pillow'],
# Support for the generation of HTML documentation from docstring
'htmldoc': ['docutils'],
# Support for Node-RED integration
'nodered': ['pynodered'],
# Support for Todoist integration
'todoist': ['todoist-python'],
# Support for Trello integration
'trello': ['py-trello'],
# Support for Google Pub/Sub
'google-pubsub': ['google-cloud-pubsub'],
# Support for Google Translate
'google-translate': ['google-cloud-translate'],
# Support for keyboard/mouse plugin
'inputs': ['pyuserinput'],
# Support for Buienradar weather forecast
'buienradar': ['buienradar'],
# Support for Telegram integration
'telegram': ['python-telegram-bot'],
# Support for Arduino integration
'arduino': ['pyserial', 'pyfirmata2'],
# Support for CUPS printers management
'cups': ['pycups'],
# Support for Graphite integration
'graphite': ['graphyte'],
# Support for CPU and memory monitoring and info
'sys': ['py-cpuinfo', 'psutil'],
# Support for nmap integration
'nmap': ['python-nmap'],
# Support for zigbee2mqtt
'zigbee': ['paho-mqtt'],
# Support for Z-Wave
'zwave': ['python-openzwave'],
# Support for Mozilla DeepSpeech speech-to-text engine
'deepspeech': ['deepspeech', 'numpy','sounddevice'],
# Support for PicoVoice hotword detection engine
'picovoice-hotword': ['pvporcupine'],
# Support for PicoVoice speech-to-text engine
'picovoice-speech': ['pvcheetah @ git+https://github.com/BlackLight/cheetah'],
# Support for OTP (One-Time Password) generation
'otp': ['pyotp'],
# Support for Linode integration
'linode': ['linode_api4'],
# Support for QR codes
'qrcode': ['numpy','qrcode[pil]', 'Pillow', 'pyzbar'],
# Support for Tensorflow
'tensorflow': ['numpy', 'tensorflow>=2.0', 'keras', 'pandas'],
# Support for Samsung TizenOS-based smart TVs
'samsungtv': ['samsungtvws'],
# Support for SSH integration
'ssh': ['paramiko'],
# Support for clipboard integration
'clipboard': ['pyperclip'],
# Support for luma.oled display drivers
'luma-oled': ['luma.oled @ git+https://github.com/rm-hull/luma.oled'],
# Support for DBus integration
'dbus': ['dbus-python'],
# Support for Twilio integration
'twilio': ['twilio'],
# Support for Github integration
'github': ['pytz'],
# Support for DHT11/DHT22/AM2302 temperature/humidity sensors
'dht': ['Adafruit_Python_DHT @ git+https://github.com/adafruit/Adafruit_Python_DHT'],
# Support for LCD display integration
'lcd': ['RPi.GPIO', 'RPLCD'],
# Support for IMAP mail integration
'imap': ['imapclient'],
# Support for NextCloud integration
'nextcloud': ['nextcloud-api-wrapper'],
# Support for VLC integration
'vlc': ['python-vlc'],
# Support for SmartThings integration
'smartthings': ['pysmartthings', 'aiohttp'],
# Support for file.monitor backend
'filemonitor': ['watchdog'],
# Support for Adafruit PCA9685 PWM controller
'pca9685': ['adafruit-python-shell', 'adafruit-circuitpython-pca9685'],
# Support for ngrok integration
'ngrok': ['pyngrok'],
},
)
|
recommenders/datasets/amazon_reviews.py | aeroabir/recommenders | 10,147 | 12614373 | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import os
import re
import shutil
import warnings
import pandas as pd
import gzip
import random
import logging
import _pickle as cPickle
from recommenders.utils.constants import SEED
from recommenders.datasets.download_utils import maybe_download, download_path
random.seed(SEED)
logger = logging.getLogger()
def data_preprocessing(
reviews_file,
meta_file,
train_file,
valid_file,
test_file,
user_vocab,
item_vocab,
cate_vocab,
sample_rate=0.01,
valid_num_ngs=4,
test_num_ngs=9,
is_history_expanding=True,
):
"""Create data for training, validation and testing from original dataset
Args:
reviews_file (str): Reviews dataset downloaded from former operations.
meta_file (str): Meta dataset downloaded from former operations.
"""
reviews_output = _reviews_preprocessing(reviews_file)
meta_output = _meta_preprocessing(meta_file)
instance_output = _create_instance(reviews_output, meta_output)
_create_item2cate(instance_output)
sampled_instance_file = _get_sampled_data(instance_output, sample_rate=sample_rate)
preprocessed_output = _data_processing(sampled_instance_file)
if is_history_expanding:
_data_generating(preprocessed_output, train_file, valid_file, test_file)
else:
_data_generating_no_history_expanding(
preprocessed_output, train_file, valid_file, test_file
)
_create_vocab(train_file, user_vocab, item_vocab, cate_vocab)
_negative_sampling_offline(
sampled_instance_file, valid_file, test_file, valid_num_ngs, test_num_ngs
)
def _create_vocab(train_file, user_vocab, item_vocab, cate_vocab):
f_train = open(train_file, "r")
user_dict = {}
item_dict = {}
cat_dict = {}
logger.info("vocab generating...")
for line in f_train:
arr = line.strip("\n").split("\t")
uid = arr[1]
mid = arr[2]
cat = arr[3]
mid_list = arr[5]
cat_list = arr[6]
if uid not in user_dict:
user_dict[uid] = 0
user_dict[uid] += 1
if mid not in item_dict:
item_dict[mid] = 0
item_dict[mid] += 1
if cat not in cat_dict:
cat_dict[cat] = 0
cat_dict[cat] += 1
if len(mid_list) == 0:
continue
for m in mid_list.split(","):
if m not in item_dict:
item_dict[m] = 0
item_dict[m] += 1
for c in cat_list.split(","):
if c not in cat_dict:
cat_dict[c] = 0
cat_dict[c] += 1
sorted_user_dict = sorted(user_dict.items(), key=lambda x: x[1], reverse=True)
sorted_item_dict = sorted(item_dict.items(), key=lambda x: x[1], reverse=True)
sorted_cat_dict = sorted(cat_dict.items(), key=lambda x: x[1], reverse=True)
uid_voc = {}
index = 0
for key, value in sorted_user_dict:
uid_voc[key] = index
index += 1
mid_voc = {}
mid_voc["default_mid"] = 0
index = 1
for key, value in sorted_item_dict:
mid_voc[key] = index
index += 1
cat_voc = {}
cat_voc["default_cat"] = 0
index = 1
for key, value in sorted_cat_dict:
cat_voc[key] = index
index += 1
cPickle.dump(uid_voc, open(user_vocab, "wb"))
cPickle.dump(mid_voc, open(item_vocab, "wb"))
cPickle.dump(cat_voc, open(cate_vocab, "wb"))
def _negative_sampling_offline(
instance_input_file, valid_file, test_file, valid_neg_nums=4, test_neg_nums=49
):
columns = ["label", "user_id", "item_id", "timestamp", "cate_id"]
ns_df = pd.read_csv(instance_input_file, sep="\t", names=columns)
items_with_popular = list(ns_df["item_id"])
global item2cate
# valid negative sampling
logger.info("start valid negative sampling")
with open(valid_file, "r") as f:
valid_lines = f.readlines()
write_valid = open(valid_file, "w")
for line in valid_lines:
write_valid.write(line)
words = line.strip().split("\t")
positive_item = words[2]
count = 0
neg_items = set()
while count < valid_neg_nums:
neg_item = random.choice(items_with_popular)
if neg_item == positive_item or neg_item in neg_items:
continue
count += 1
neg_items.add(neg_item)
words[0] = "0"
words[2] = neg_item
words[3] = item2cate[neg_item]
write_valid.write("\t".join(words) + "\n")
# test negative sampling
logger.info("start test negative sampling")
with open(test_file, "r") as f:
test_lines = f.readlines()
write_test = open(test_file, "w")
for line in test_lines:
write_test.write(line)
words = line.strip().split("\t")
positive_item = words[2]
count = 0
neg_items = set()
while count < test_neg_nums:
neg_item = random.choice(items_with_popular)
if neg_item == positive_item or neg_item in neg_items:
continue
count += 1
neg_items.add(neg_item)
words[0] = "0"
words[2] = neg_item
words[3] = item2cate[neg_item]
write_test.write("\t".join(words) + "\n")
def _data_generating(input_file, train_file, valid_file, test_file, min_sequence=1):
"""produce train, valid and test file from processed_output file
Each user's behavior sequence will be unfolded and produce multiple lines in trian file.
Like, user's behavior sequence: 12345, and this function will write into train file:
1, 12, 123, 1234, 12345
"""
f_input = open(input_file, "r")
f_train = open(train_file, "w")
f_valid = open(valid_file, "w")
f_test = open(test_file, "w")
logger.info("data generating...")
last_user_id = None
for line in f_input:
line_split = line.strip().split("\t")
tfile = line_split[0]
label = int(line_split[1])
user_id = line_split[2]
movie_id = line_split[3]
date_time = line_split[4]
category = line_split[5]
if tfile == "train":
fo = f_train
elif tfile == "valid":
fo = f_valid
elif tfile == "test":
fo = f_test
if user_id != last_user_id:
movie_id_list = []
cate_list = []
dt_list = []
else:
history_clk_num = len(movie_id_list)
cat_str = ""
mid_str = ""
dt_str = ""
for c1 in cate_list:
cat_str += c1 + ","
for mid in movie_id_list:
mid_str += mid + ","
for dt_time in dt_list:
dt_str += dt_time + ","
if len(cat_str) > 0:
cat_str = cat_str[:-1]
if len(mid_str) > 0:
mid_str = mid_str[:-1]
if len(dt_str) > 0:
dt_str = dt_str[:-1]
if history_clk_num >= min_sequence:
fo.write(
line_split[1]
+ "\t"
+ user_id
+ "\t"
+ movie_id
+ "\t"
+ category
+ "\t"
+ date_time
+ "\t"
+ mid_str
+ "\t"
+ cat_str
+ "\t"
+ dt_str
+ "\n"
)
last_user_id = user_id
if label:
movie_id_list.append(movie_id)
cate_list.append(category)
dt_list.append(date_time)
def _data_generating_no_history_expanding(
input_file, train_file, valid_file, test_file, min_sequence=1
):
"""Produce train, valid and test file from processed_output file
Each user's behavior sequence will only produce one line in train file.
Like, user's behavior sequence: 12345, and this function will write into train file: 12345
"""
f_input = open(input_file, "r")
f_train = open(train_file, "w")
f_valid = open(valid_file, "w")
f_test = open(test_file, "w")
logger.info("data generating...")
last_user_id = None
last_movie_id = None
last_category = None
last_datetime = None
last_tfile = None
for line in f_input:
line_split = line.strip().split("\t")
tfile = line_split[0]
label = int(line_split[1])
user_id = line_split[2]
movie_id = line_split[3]
date_time = line_split[4]
category = line_split[5]
if last_tfile == "train":
fo = f_train
elif last_tfile == "valid":
fo = f_valid
elif last_tfile == "test":
fo = f_test
if user_id != last_user_id or tfile == "valid" or tfile == "test":
if last_user_id is not None:
history_clk_num = len(movie_id_list)
cat_str = ""
mid_str = ""
dt_str = ""
for c1 in cate_list[:-1]:
cat_str += c1 + ","
for mid in movie_id_list[:-1]:
mid_str += mid + ","
for dt_time in dt_list[:-1]:
dt_str += dt_time + ","
if len(cat_str) > 0:
cat_str = cat_str[:-1]
if len(mid_str) > 0:
mid_str = mid_str[:-1]
if len(dt_str) > 0:
dt_str = dt_str[:-1]
if history_clk_num > min_sequence:
fo.write(
line_split[1]
+ "\t"
+ last_user_id
+ "\t"
+ last_movie_id
+ "\t"
+ last_category
+ "\t"
+ last_datetime
+ "\t"
+ mid_str
+ "\t"
+ cat_str
+ "\t"
+ dt_str
+ "\n"
)
if tfile == "train" or last_user_id == None:
movie_id_list = []
cate_list = []
dt_list = []
last_user_id = user_id
last_movie_id = movie_id
last_category = category
last_datetime = date_time
last_tfile = tfile
if label:
movie_id_list.append(movie_id)
cate_list.append(category)
dt_list.append(date_time)
def _create_item2cate(instance_file):
logger.info("creating item2cate dict")
global item2cate
instance_df = pd.read_csv(
instance_file,
sep="\t",
names=["label", "user_id", "item_id", "timestamp", "cate_id"],
)
item2cate = instance_df.set_index("item_id")["cate_id"].to_dict()
def _get_sampled_data(instance_file, sample_rate):
logger.info("getting sampled data...")
global item2cate
output_file = instance_file + "_" + str(sample_rate)
columns = ["label", "user_id", "item_id", "timestamp", "cate_id"]
ns_df = pd.read_csv(instance_file, sep="\t", names=columns)
items_num = ns_df["item_id"].nunique()
items_with_popular = list(ns_df["item_id"])
items_sample, count = set(), 0
while count < int(items_num * sample_rate):
random_item = random.choice(items_with_popular)
if random_item not in items_sample:
items_sample.add(random_item)
count += 1
ns_df_sample = ns_df[ns_df["item_id"].isin(items_sample)]
ns_df_sample.to_csv(output_file, sep="\t", index=None, header=None)
return output_file
def _meta_preprocessing(meta_readfile):
logger.info("start meta preprocessing...")
meta_writefile = meta_readfile + "_output"
meta_r = open(meta_readfile, "r")
meta_w = open(meta_writefile, "w")
for line in meta_r:
line_new = eval(line)
meta_w.write(line_new["asin"] + "\t" + line_new["categories"][0][-1] + "\n")
meta_r.close()
meta_w.close()
return meta_writefile
def _reviews_preprocessing(reviews_readfile):
logger.info("start reviews preprocessing...")
reviews_writefile = reviews_readfile + "_output"
reviews_r = open(reviews_readfile, "r")
reviews_w = open(reviews_writefile, "w")
for line in reviews_r:
line_new = eval(line.strip())
reviews_w.write(
str(line_new["reviewerID"])
+ "\t"
+ str(line_new["asin"])
+ "\t"
+ str(line_new["unixReviewTime"])
+ "\n"
)
reviews_r.close()
reviews_w.close()
return reviews_writefile
def _create_instance(reviews_file, meta_file):
logger.info("start create instances...")
dirs, _ = os.path.split(reviews_file)
output_file = os.path.join(dirs, "instance_output")
f_reviews = open(reviews_file, "r")
user_dict = {}
item_list = []
for line in f_reviews:
line = line.strip()
reviews_things = line.split("\t")
if reviews_things[0] not in user_dict:
user_dict[reviews_things[0]] = []
user_dict[reviews_things[0]].append((line, float(reviews_things[-1])))
item_list.append(reviews_things[1])
f_meta = open(meta_file, "r")
meta_dict = {}
for line in f_meta:
line = line.strip()
meta_things = line.split("\t")
if meta_things[0] not in meta_dict:
meta_dict[meta_things[0]] = meta_things[1]
f_output = open(output_file, "w")
for user_behavior in user_dict:
sorted_user_behavior = sorted(user_dict[user_behavior], key=lambda x: x[1])
for line, _ in sorted_user_behavior:
user_things = line.split("\t")
asin = user_things[1]
if asin in meta_dict:
f_output.write("1" + "\t" + line + "\t" + meta_dict[asin] + "\n")
else:
f_output.write("1" + "\t" + line + "\t" + "default_cat" + "\n")
f_reviews.close()
f_meta.close()
f_output.close()
return output_file
def _data_processing(input_file):
logger.info("start data processing...")
dirs, _ = os.path.split(input_file)
output_file = os.path.join(dirs, "preprocessed_output")
f_input = open(input_file, "r")
f_output = open(output_file, "w")
user_count = {}
for line in f_input:
line = line.strip()
user = line.split("\t")[1]
if user not in user_count:
user_count[user] = 0
user_count[user] += 1
f_input.seek(0)
i = 0
last_user = None
for line in f_input:
line = line.strip()
user = line.split("\t")[1]
if user == last_user:
if i < user_count[user] - 2:
f_output.write("train" + "\t" + line + "\n")
elif i < user_count[user] - 1:
f_output.write("valid" + "\t" + line + "\n")
else:
f_output.write("test" + "\t" + line + "\n")
else:
last_user = user
i = 0
if i < user_count[user] - 2:
f_output.write("train" + "\t" + line + "\n")
elif i < user_count[user] - 1:
f_output.write("valid" + "\t" + line + "\n")
else:
f_output.write("test" + "\t" + line + "\n")
i += 1
return output_file
def download_and_extract(name, dest_path):
"""Downloads and extracts Amazon reviews and meta datafiles if they don’t already exist
Args:
name (str): Category of reviews.
dest_path (str): File path for the downloaded file.
Returns:
str: File path for the extracted file.
"""
dirs, _ = os.path.split(dest_path)
if not os.path.exists(dirs):
os.makedirs(dirs)
file_path = os.path.join(dirs, name)
if not os.path.exists(file_path):
_download_reviews(name, dest_path)
_extract_reviews(file_path, dest_path)
return file_path
def _download_reviews(name, dest_path):
"""Downloads Amazon reviews datafile.
Args:
name (str): Category of reviews
dest_path (str): File path for the downloaded file
"""
url = (
"http://snap.stanford.edu/data/amazon/productGraph/categoryFiles/"
+ name
+ ".gz"
)
dirs, file = os.path.split(dest_path)
maybe_download(url, file + ".gz", work_directory=dirs)
def _extract_reviews(file_path, zip_path):
"""Extract Amazon reviews and meta datafiles from the raw zip files.
To extract all files,
use ZipFile's extractall(path) instead.
Args:
file_path (str): Destination path for datafile
zip_path (str): zipfile path
"""
with gzip.open(zip_path + ".gz", "rb") as zf, open(file_path, "wb") as f:
shutil.copyfileobj(zf, f)
|
salt/states/netusers.py | tomdoherty/salt | 9,425 | 12614383 | <filename>salt/states/netusers.py
"""
Network Users
=============
Manage the users configuration on network devices via the NAPALM proxy.
:codeauthor: <NAME> <<EMAIL>>
:maturity: new
:depends: napalm
:platform: unix
Dependencies
------------
- :mod:`NAPALM proxy minion <salt.proxy.napalm>`
- :mod:`Users configuration management module <salt.modules.napalm_users>`
.. versionadded:: 2016.11.0
"""
import copy
import logging
import salt.utils.json
import salt.utils.napalm
log = logging.getLogger(__name__)
# ----------------------------------------------------------------------------------------------------------------------
# state properties
# ----------------------------------------------------------------------------------------------------------------------
__virtualname__ = "netusers"
# ----------------------------------------------------------------------------------------------------------------------
# global variables
# ----------------------------------------------------------------------------------------------------------------------
# ----------------------------------------------------------------------------------------------------------------------
# property functions
# ----------------------------------------------------------------------------------------------------------------------
def __virtual__():
"""
NAPALM library must be installed for this module to work and run in a (proxy) minion.
"""
return salt.utils.napalm.virtual(__opts__, __virtualname__, __file__)
# ----------------------------------------------------------------------------------------------------------------------
# helper functions -- will not be exported
# ----------------------------------------------------------------------------------------------------------------------
def _retrieve_users():
"""Retrieves configured users"""
return __salt__["users.config"]()
def _ordered_dict_to_dict(probes):
"""."""
return salt.utils.json.loads(salt.utils.json.dumps(probes))
def _expand_users(device_users, common_users):
"""Creates a longer list of accepted users on the device."""
expected_users = copy.deepcopy(common_users)
expected_users.update(device_users)
return expected_users
def _check_users(users):
"""Checks if the input dictionary of users is valid."""
messg = ""
valid = True
for user, user_details in users.items():
if not user_details:
valid = False
messg += "Please provide details for username {user}.\n".format(user=user)
continue
if not (
isinstance(user_details.get("level"), int)
or 0 <= user_details.get("level") <= 15
):
# warn!
messg += (
"Level must be a integer between 0 and 15 for username {user}. Will"
" assume 0.\n".format(user=user)
)
return valid, messg
def _compute_diff(configured, expected):
"""Computes the differences between the actual config and the expected config"""
diff = {"add": {}, "update": {}, "remove": {}}
configured_users = set(configured.keys())
expected_users = set(expected.keys())
add_usernames = expected_users - configured_users
remove_usernames = configured_users - expected_users
common_usernames = expected_users & configured_users
add = {username: expected.get(username) for username in add_usernames}
remove = {username: configured.get(username) for username in remove_usernames}
update = {}
for username in common_usernames:
user_configuration = configured.get(username)
user_expected = expected.get(username)
if user_configuration == user_expected:
continue
update[username] = {}
for field, field_value in user_expected.items():
if user_configuration.get(field) != field_value:
update[username][field] = field_value
diff.update({"add": add, "update": update, "remove": remove})
return diff
def _set_users(users):
"""Calls users.set_users."""
return __salt__["users.set_users"](users, commit=False)
def _update_users(users):
"""Calls users.set_users."""
return __salt__["users.set_users"](users, commit=False)
def _delete_users(users):
"""Calls users.delete_users."""
return __salt__["users.delete_users"](users, commit=False)
# ----------------------------------------------------------------------------------------------------------------------
# callable functions
# ----------------------------------------------------------------------------------------------------------------------
def managed(name, users=None, defaults=None):
"""
Manages the configuration of the users on the device, as specified in the state SLS file. Users not defined in that
file will be removed whilst users not configured on the device, will be added.
SLS Example:
.. code-block:: yaml
netusers_example:
netusers.managed:
- users:
admin:
level: 15
password: <PASSWORD>
sshkeys: []
restricted:
level: 1
password: <PASSWORD>
martin:
level: 15
password: ''
sshkeys:
- <KEY>
jonathan:
level: 15
password: ''
sshkeys:
- <KEY>
CLI Example:
.. code-block:: bash
salt 'edge01.kix01' state.sls router.users
Output example (raw python - can be reused in other modules):
.. code-block:: python
{
'netusers_|-netusers_example_|-netusers_example_|-managed': {
'comment': 'Configuration updated!',
'name': 'netusers_example',
'start_time': '10:57:08.678811',
'__id__': 'netusers_example',
'duration': 1620.982,
'__run_num__': 0,
'changes': {
'updated': {
'admin': {
'level': 15
},
'restricted': {
'level': 1
},
'martin': {
'sshkeys': [
'<KEY>'
]
}
},
'added': {
'jonathan': {
'password': '',
'sshkeys': [
'ssh-rsa A<KEY>'
],
'level': 15
}
},
'removed': {
}
},
'result': True
}
}
CLI Output:
.. code-block:: bash
edge01.kix01:
----------
ID: netusers_example
Function: netusers.managed
Result: True
Comment: Configuration updated!
Started: 11:03:31.957725
Duration: 1220.435 ms
Changes:
----------
added:
----------
jonathan:
----------
level:
15
password:
sshkeys:
- ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDcgxE6HZF/xjFtIt0thEDKPjFJxW9BpZtTVstYbDgG
R9zPkHGZJT/j345jk345jk453jk43545j35nl3kln34n5kl4ghv3/JzWt/0Js5KZp/51KRNCs9O4t07qao
qwpLB15GwLfEXBx9dW26zc4O+hi6754trxcfghvjbo98765drt/LYIEg0KSQPWyJEK1g31gacbxN7Ab006
xeHh7rv7HtXF6zH3WIdUhq9rtdUag6kYnv6qvjG7sbCyHGYu5vZB7GytnNuVNbZuI+RdFvmHSnErV9HCu9
xZBq6DBb+sESMS4s7nFcsruMoedb+BAc3aww0naeWpogjSt+We7y2N
removed:
----------
updated:
----------
martin:
----------
sshkeys:
- ssh-dss A<KEY>
vwWHh0wJPjQmJYafAqZTnlgi0srGjyifFwPtODppDWLCgLe2M4LXnu3OMqknr54w344zPHP3iFwWxHrBrZ
KtCjO8LhbWCa+X528+i87t6r5e4ersdfxgchvjbknlio87t6r5drcfhgjhbknio8976tycv7t86ftyiu87
Oz1nKsKuNzm2csoUQlJtrmRfpjsOPNookmOz5wG0YxhwDmKeo6fWK+ATk1OiP+QT39fn4G77j8o+e4WAwx
M570s35Of/vV0zoOccj753sXnpvJenvwpM2H6o3a9ALvehAJKWodAgZT7X8+iu786r5drtycghvjbiu78t
+wAAAIBURwSPZVElXe+9a43sF6M4ysT7Xv+6wTsa8q86E3+RYyu8O2ObI2kwNLC3/HTgFniE/YqRG+WJac
81/VHWQNP822gns8RVrWKjqBktmQoEm7z5yy0bkjui78675dytcghvjkoi9y7t867ftcuvhbuu9t78gy/v
+zvMmv8KvQgHg
admin:
----------
level:
15
restricted:
----------
level:
1
Summary for edge01.kix01
------------
Succeeded: 1 (changed=1)
Failed: 0
------------
Total states run: 1
Total run time: 1.220 s
"""
result = False
comment = ""
changes = {}
ret = {"name": name, "changes": changes, "result": result, "comment": comment}
users = _ordered_dict_to_dict(users)
defaults = _ordered_dict_to_dict(defaults)
expected_users = _expand_users(users, defaults)
valid, message = _check_users(expected_users)
if not valid: # check and clean
ret["comment"] = "Please provide a valid configuration: {error}".format(
error=message
)
return ret
# ----- Retrieve existing users configuration and determine differences ------------------------------------------->
users_output = _retrieve_users()
if not users_output.get("result"):
ret["comment"] = "Cannot retrieve users from the device: {reason}".format(
reason=users_output.get("comment")
)
return ret
configured_users = users_output.get("out", {})
if configured_users == expected_users:
ret.update({"comment": "Users already configured as needed.", "result": True})
return ret
diff = _compute_diff(configured_users, expected_users)
users_to_add = diff.get("add", {})
users_to_update = diff.get("update", {})
users_to_remove = diff.get("remove", {})
changes = {
"added": users_to_add,
"updated": users_to_update,
"removed": users_to_remove,
}
ret.update({"changes": changes})
if __opts__["test"] is True:
ret.update(
{"result": None, "comment": "Testing mode: configuration was not changed!"}
)
return ret
# <---- Retrieve existing NTP peers and determine peers to be added/removed --------------------------------------->
# ----- Call _set_users and _delete_users as needed --------------------------------------------------------------->
expected_config_change = False
successfully_changed = True
if users_to_add:
_set = _set_users(users_to_add)
if _set.get("result"):
expected_config_change = True
else: # something went wrong...
successfully_changed = False
comment += "Cannot configure new users: {reason}".format(
reason=_set.get("comment")
)
if users_to_update:
_update = _update_users(users_to_update)
if _update.get("result"):
expected_config_change = True
else: # something went wrong...
successfully_changed = False
comment += "Cannot update the users configuration: {reason}".format(
reason=_update.get("comment")
)
if users_to_remove:
_delete = _delete_users(users_to_remove)
if _delete.get("result"):
expected_config_change = True
else: # something went wrong...
successfully_changed = False
comment += "Cannot remove users: {reason}".format(
reason=_delete.get("comment")
)
# <---- Call _set_users and _delete_users as needed ----------------------------------------------------------------
# ----- Try to commit changes ------------------------------------------------------------------------------------->
if expected_config_change and successfully_changed:
config_result, config_comment = __salt__["net.config_control"]()
result = config_result
comment += config_comment
# <---- Try to commit changes --------------------------------------------------------------------------------------
if expected_config_change and result and not comment:
comment = "Configuration updated!"
ret.update({"result": result, "comment": comment})
return ret
|
python/solver.py | motus/neurosat | 226 | 12614387 | <reponame>motus/neurosat<filename>python/solver.py
# Copyright 2018 <NAME>. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import PyMiniSolvers.minisolvers as minisolvers
def solve_sat(n_vars, iclauses):
solver = minisolvers.MinisatSolver()
for i in range(n_vars): solver.new_var(dvar=True)
for iclause in iclauses: solver.add_clause(iclause)
is_sat = solver.solve()
stats = solver.get_stats()
return is_sat, stats
|
locations/spiders/walmart.py | davidchiles/alltheplaces | 297 | 12614413 | <gh_stars>100-1000
# -*- coding: utf-8 -*-
import scrapy
import json
import re
from collections import defaultdict
from locations.items import GeojsonPointItem
class WalmartSpider(scrapy.Spider):
name = "walmart"
item_attributes = {'brand': "Walmart", 'brand_wikidata': "Q483551"}
allowed_domains = ["walmart.com"]
start_urls = (
'https://www.walmart.com/sitemap_store_main.xml',
)
retries = defaultdict(int)
def store_hours(self, store_hours):
if store_hours.get('operationalHours').get('open24Hours') is True:
return u'24/7'
elif not store_hours.get('operationalHoursCombined'):
return None
else:
op_hours = store_hours.get('operationalHoursCombined')
open_hours = []
for op_hour in op_hours:
if op_hour.get('dailyHours').get('closed') is True:
continue
if op_hour.get('dailyHours').get('openFullDay') is True:
start_hr = '00:00'
end_hr = '24:00'
else:
start_hr = op_hour.get('dailyHours').get('startHr')
end_hr = op_hour.get('dailyHours').get('endHr')
start_day = op_hour.get('startDayName')
end_day = op_hour.get('endDayName')
if end_day is None:
end_day = ''
hours = start_day+'-'+end_day+' '+start_hr+'-'+end_hr
open_hours.append(hours)
hours_combined = '; '.join(open_hours)
return hours_combined
def parse(self, response):
response.selector.remove_namespaces()
for u in response.xpath('//loc/text()').extract():
if u.endswith('/details'):
yield scrapy.Request(u.strip(), callback=self.parse_store)
def parse_store(self, response):
script = response.xpath(
"//script[contains(.,'__WML_REDUX_INITIAL_STATE__ = ')]").extract_first()
# In rare cases will hit page before script tag loads with content
if script is None:
if self.retries.get(response.url, 0) <= 2:
self.retries[response.url] += 1
# Try again
yield scrapy.Request(response.url, callback=self.parse_store)
else:
raise Exception('Retried too many times')
script_content = re.search(r'window.__WML_REDUX_INITIAL_STATE__ = (.*);</script>', script,
flags=re.IGNORECASE | re.DOTALL).group(1)
store_data = json.loads(script_content).get('store')
services = store_data['primaryServices'] + \
store_data['secondaryServices']
yield GeojsonPointItem(
lat=store_data.get('geoPoint').get('latitude'),
lon=store_data.get('geoPoint').get('longitude'),
ref=store_data.get('id'),
phone=store_data.get('phone'),
name=store_data.get('displayName'),
opening_hours=self.store_hours(store_data),
addr_full=store_data.get('address').get('streetAddress'),
city=store_data.get('address').get('city'),
state=store_data.get('address').get('state'),
postcode=store_data.get('address').get('postalCode'),
website=store_data.get('detailsPageURL'),
extras={
'amenity:fuel': any(s['name'] == 'GAS_STATION' and s['active'] for s in services),
'shop': 'department_store' if store_data['storeType']['id'] == 2 else 'supermarket'
})
|
src/openfermion/circuits/vpe_circuits.py | bene1337/OpenFermion | 1,291 | 12614475 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Circuit generation functions for verified phase estimation (2010.02538)"""
from typing import Sequence, Optional
import numpy
import cirq
def vpe_single_circuit(qubits: Sequence[cirq.Qid], prep: cirq.Circuit,
evolve: cirq.Circuit, initial_rotation: cirq.Gate,
final_rotation: cirq.Gate) -> cirq.Circuit:
"""
Combines the different parts that make up a VPE circuit
The protocol for VPE requires combining preparation, evolution, and
measurement circuits for different values of time in order to estimate
the phase function. This function takes these parts and combines them.
Note that we need not specify the time of evolution as this is contained
already within evolve.
Arguments:
prep [cirq.Circuit] -- The circuit to prepare the initial state
(|psi_s>+|psi_r>) from |0>+|1>
evolve [cirq.Circuit] -- The circuit to evolve for time t
initial_rotation [cirq.Gate] -- The initial rotation on the target qubit
(Note that the gate should already be targeting the qubit)
final_rotation [cirq.Gate] -- The final rotation on the target qubit
(Note that the gate should already be targeting the qubit)
"""
circuit = cirq.Circuit()
circuit.append(initial_rotation)
circuit.append(prep)
circuit.append(evolve)
circuit.append(cirq.inverse(prep))
circuit.append(final_rotation)
circuit.append(cirq.measure(*qubits, key='msmt'))
return circuit
# Turning off yapf here as its formatting suggestion is bad.
# yapf: disable
standard_vpe_rotation_set = [
[0.25, cirq.ry(numpy.pi / 2), cirq.ry(-numpy.pi / 2)],
[-0.25, cirq.ry(numpy.pi / 2), cirq.ry(numpy.pi / 2)],
[-0.25j, cirq.ry(numpy.pi / 2), cirq.rx(-numpy.pi / 2)],
[0.25j, cirq.ry(numpy.pi / 2), cirq.rx(numpy.pi / 2)],
[0.25, cirq.rx(numpy.pi / 2), cirq.rx(-numpy.pi / 2)],
[-0.25, cirq.rx(numpy.pi / 2), cirq.rx(numpy.pi / 2)],
[0.25j, cirq.rx(numpy.pi / 2), cirq.ry(-numpy.pi / 2)],
[-0.25j, cirq.rx(numpy.pi / 2), cirq.ry(numpy.pi / 2)],
]
# yapf: enable
def vpe_circuits_single_timestep(qubits: Sequence[cirq.Qid],
prep: cirq.Circuit,
evolve: cirq.Circuit,
target_qubit: cirq.Qid,
rotation_set: Optional[Sequence] = None
) -> Sequence[cirq.Circuit]:
"""Prepares the circuits to perform VPE at a fixed time
Puts together the set of pre- and post-rotations to implement
VPE at for a given state preparation and time evolution.
[description]
Arguments:
prep [cirq.Circuit] -- The circuit to prepare the target state
(|psi_s>+|psi_r>) from |0>+|1>
evolve [cirq.Circuit] -- The circuit to evolve for time t
target_qubit [cirq.Qid] -- The qubit on which the phase
function is encoded
rotation_set [Sequence] -- A set of initial and final rotations for the
target qubit. We average the phase function estimation over multiple
such rotations to cancel out readout noise, final T1 decay, etc.
The standard rotation set is typically sufficient for these
purposes. The first element of each gate is the multiplier to get
the phase function; we do not need this for this function.
If rotation_set is set to None, the 'standard rotation set' of all
possible X and Y rotations before and after the circuit is used.
"""
if rotation_set is None:
rotation_set = standard_vpe_rotation_set
circuits = [
vpe_single_circuit(qubits, prep, evolve, rdata[1].on(target_qubit),
rdata[2].on(target_qubit)) for rdata in rotation_set
]
return circuits
|
falcon_kit/mains/tasks.py | peterjc/FALCON | 216 | 12614477 | <reponame>peterjc/FALCON
"""Executable tasks.
To be called by pbsmrtpipe.
pypeFLOW uses its own adaptors instead.
"""
from __future__ import absolute_import
from __future__ import print_function
from .. import run_support as support
import sys
def help():
print("""
Usage:
falcon-task [task] <[task-args]>
tasks:
make-fofn-abs
""")
sys.exit(2)
def main_make_fofn_abs(i_fofn_fn, o_fofn_fn):
support.make_fofn_abs(i_fofn_fn, o_fofn_fn)
def main(argv=sys.argv):
if len(argv) < 2 or argv[1].startswith('-'):
help()
task = argv[1]
tasks = {
'make-fofn-abs': main_make_fofn_abs,
}
return tasks[task](*argv[2:])
if __name__ == "__main__":
main(sys.argv)
|
python/190 Reverse Bits.py | allandproust/leetcode-share | 156 | 12614498 | <reponame>allandproust/leetcode-share
'''
Reverse bits of a given 32 bits unsigned integer.
For example, given input 43261596 (represented in binary as 00000010100101000001111010011100), return 964176192 (represented in binary as 00111001011110000010100101000000).
Follow up:
If this function is called many times, how would you optimize it?
'''
class Solution(object):
def reverseBits(self, n):
"""
:type n: int
:rtype: int
"""
n = ((n & 0x55555555) << 1) | ((n >> 1) & 0x55555555)
n = ((n & 0x33333333) << 2) | ((n >> 2) & 0x33333333)
n = ((n & 0x0f0f0f0f) << 4) | ((n >> 4) & 0x0f0f0f0f)
n = (n << 24) | ((n & 0xff00) << 8) | ((n >> 8) & 0xff00) | (n >> 24)
return n & 0xffffffff
if __name__ == "__main__":
assert Solution().reverseBits(43261596) == 964176192 |
openrec/tf1/legacy/recommenders/cdl.py | pbaiz/openrec | 399 | 12614509 | <reponame>pbaiz/openrec
from openrec.tf1.legacy.recommenders import PMF
from openrec.tf1.legacy.modules.extractions import SDAE
from openrec.tf1.legacy.modules.fusions import Average
class CDL(PMF):
def __init__(self, batch_size, max_user, max_item, dim_embed, item_f, dims, dropout=None, test_batch_size=None,
item_serving_size=None, l2_reg=None, l2_reg_mlp=None, l2_reconst=None, opt='SGD',
sess_config=None):
self._item_f = item_f
self._dims = dims
self._dropout = dropout
self._l2_reg_mlp = l2_reg_mlp
self._l2_reconst = l2_reconst
super(CDL, self).__init__(batch_size=batch_size, max_user=max_user, max_item=max_item, dim_embed=dim_embed,
l2_reg=l2_reg, test_batch_size=test_batch_size, opt=opt, sess_config=sess_config)
def _build_item_inputs(self, train=True):
super(CDL, self)._build_item_inputs(train)
if train:
self._add_input(name='item_feature', dtype='float32', shape=[self._batch_size, self._item_f.shape[1]])
else:
self._add_input(name='item_id', dtype='int32', shape=[None], train=False)
self._add_input(name='item_feature', dtype='float32', shape=[None, self._item_f.shape[1]], train=False)
def _input_mappings(self, batch_data, train):
default_input_map = super(CDL, self)._input_mappings(batch_data=batch_data, train=train)
if train:
default_input_map[self._get_input('item_feature')] = self._item_f[batch_data['item_id_input']]
else:
default_input_map[self._get_input('item_id', train=False)] = batch_data['item_id_input']
default_input_map[self._get_input('item_feature', train=False)] = self._item_f[batch_data['item_id_input']]
return default_input_map
def _build_item_extractions(self, train=True):
super(CDL, self)._build_item_extractions(train)
self._add_module('item_f',
SDAE(in_tensor=self._get_input('item_feature', train=train), dims=self._dims, l2_reg=self._l2_reg_mlp,
l2_reconst=self._l2_reconst, dropout=self._dropout, scope='AutoEncoder', reuse=False),
train=train)
def _build_default_fusions(self, train=True):
self._add_module('item_vec',
Average(scope='item_average', reuse=not train, module_list=[self._get_module('item_vec', train=train),
self._get_module('item_f', train=train)], weight=2.0),
train=train)
|
notifications/rest/users/remove-user-from-segment/remove-user-from-segment.6.x.py | Tshisuaka/api-snippets | 234 | 12614535 | #!/usr/bin/env python
# Install the Python helper library from twilio.com/docs/python/install
import os
from twilio.rest import Client
# To set up environmental variables, see http://twil.io/secure
ACCOUNT_SID = os.environ['TWILIO_ACCOUNT_SID']
AUTH_TOKEN = os.environ['TWILIO_AUTH_TOKEN']
client = Client(ACCOUNT_SID, AUTH_TOKEN)
client.notify.services('ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX') \
.users('User0001') \
.segment_memberships('premium').delete()
|
alembic/versions/2020-04-27_8b536bc5d716_add_skipped_enum_value.py | fake-name/ReadableWebProxy | 193 | 12614536 | <gh_stars>100-1000
"""Add Skipped enum value
Revision ID: 8b536bc5d716
Revises: <KEY>
Create Date: 2020-04-27 12:12:47.075110
"""
# revision identifiers, used by Alembic.
revision = '<KEY>'
down_revision = '<KEY>'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
from sqlalchemy_utils.types import TSVectorType
from sqlalchemy_searchable import make_searchable
import sqlalchemy_utils
# Patch in knowledge of the citext type, so it reflects properly.
from sqlalchemy.dialects.postgresql.base import ischema_names
import citext
import queue
import datetime
from sqlalchemy.dialects.postgresql import ENUM
from sqlalchemy.dialects.postgresql import JSON
from sqlalchemy.dialects.postgresql import JSONB
from sqlalchemy.dialects.postgresql import TSVECTOR
ischema_names['citext'] = citext.CIText
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.execute("COMMIT")
op.execute("ALTER TYPE dlstate_enum ADD VALUE 'skipped';")
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
### end Alembic commands ###
pass |
tools/android/loading/sandwich_swr.py | google-ar/chromium | 2,151 | 12614561 | <reponame>google-ar/chromium
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
""" This module implements the Stale-While-Revalidate performance improvement
experiment on third parties' resources.
The top level operations of the experiment are:
1. Record WPR archive;
2. Create a patched WPR archive so that all resource are getting cached;
3. Record original cache using the patched WPR archive;
4. Setup the benchmark producing the list of URL to enable SWR in a JSON file;
5. Create the benchmark cache by:
- Remove No-Store resources;
- Adding the SWR header on resources that are experimentally required to
have it;
- Patch SWR header on resources that already had it to make sure the
the SWR freshness is not out of date;
- And restore all other headers so that response headers such as
Set-Cookie are still in the cache to avoid entropy caused by
different cookie values.
6. Run the benchmark;
7. Extract metrics into CSV files.
"""
import csv
import json
import logging
import os
import shutil
from urlparse import urlparse
import chrome_cache
import common_util
import loading_trace
import request_track
import sandwich_metrics
import sandwich_runner
import sandwich_utils
import task_manager
import wpr_backend
def _ExtractRegexMatchingUrls(urls, domain_regexes):
urls_to_enable = set()
for url in urls:
if url in urls_to_enable:
continue
parsed_url = urlparse(url)
for domain_regex in domain_regexes:
if domain_regex.search(parsed_url.netloc):
urls_to_enable.add(url)
break
return urls_to_enable
def _BuildBenchmarkCache(
original_wpr_trace_path, urls_to_enable_swr,
original_cache_trace_path, original_cache_archive_path,
cache_archive_dest_path):
# Load trace that was generated at original cache creation.
logging.info('loading %s', original_wpr_trace_path)
trace = loading_trace.LoadingTrace.FromJsonFile(original_wpr_trace_path)
# Lists URLs that should not be in the cache or already have SWR headers.
urls_should_not_be_cached = set()
urls_already_with_swr = set()
for request in trace.request_track.GetEvents():
caching_policy = request_track.CachingPolicy(request)
if not caching_policy.IsCacheable():
urls_should_not_be_cached.add(request.url)
elif caching_policy.GetFreshnessLifetimes()[1] > 0:
urls_already_with_swr.add(request.url)
# Trace are fat, kill this one to save up memory for the next one to load in
# this scope.
del trace
# Load trace that was generated at original cache creation.
logging.info('loading %s', original_cache_trace_path)
trace = loading_trace.LoadingTrace.FromJsonFile(original_cache_trace_path)
# Create cache contents.
delete_count = 0
swr_patch_count = 0
originaly_swr_patch_count = 0
noswr_patch_count = 0
with common_util.TemporaryDirectory(prefix='sandwich_tmp') as tmp_path:
cache_path = os.path.join(tmp_path, 'cache')
chrome_cache.UnzipDirectoryContent(original_cache_archive_path, cache_path)
cache_backend = chrome_cache.CacheBackend(cache_path, 'simple')
cache_keys = set(cache_backend.ListKeys())
for request in trace.request_track.GetEvents():
if request.url not in cache_keys:
continue
if request.url in urls_should_not_be_cached:
cache_backend.DeleteKey(request.url)
delete_count += 1
continue
if not request.HasReceivedResponse():
continue
if request.url in urls_to_enable_swr:
request.SetHTTPResponseHeader(
'cache-control', 'max-age=0,stale-while-revalidate=315360000')
request.SetHTTPResponseHeader(
'last-modified', 'Thu, 23 Jun 2016 11:30:00 GMT')
swr_patch_count += 1
elif request.url in urls_already_with_swr:
# Force to use SWR on resources that originally attempted to use it.
request.SetHTTPResponseHeader(
'cache-control', 'max-age=0,stale-while-revalidate=315360000')
# The resource originally had SWR enabled therefore we don't
# Last-Modified to repro exactly the performance impact in case these
# headers were not set properly causing an invalidation instead of a
# revalidation.
originaly_swr_patch_count += 1
else:
# Force synchronous revalidation.
request.SetHTTPResponseHeader('cache-control', 'max-age=0')
noswr_patch_count += 1
raw_headers = request.GetRawResponseHeaders()
cache_backend.UpdateRawResponseHeaders(request.url, raw_headers)
chrome_cache.ZipDirectoryContent(cache_path, cache_archive_dest_path)
logging.info('patched %d cached resources with forced SWR', swr_patch_count)
logging.info('patched %d cached resources with original SWR',
originaly_swr_patch_count)
logging.info('patched %d cached resources without SWR', noswr_patch_count)
logging.info('deleted %d cached resources', delete_count)
def _ProcessRunOutputDir(benchmark_setup, runner_output_dir):
"""Process benchmark's run output directory.
Args:
cache_validation_result: Same as for _RunOutputVerifier
benchmark_setup: Same as for _RunOutputVerifier
runner_output_dir: Same as for SandwichRunner.output_dir
Returns:
List of dictionary.
"""
run_metrics_list = []
for repeat_id, repeat_dir in sandwich_runner.WalkRepeatedRuns(
runner_output_dir):
trace_path = os.path.join(repeat_dir, sandwich_runner.TRACE_FILENAME)
logging.info('processing trace: %s', trace_path)
trace = loading_trace.LoadingTrace.FromJsonFile(trace_path)
served_from_cache_urls = sandwich_utils.ListUrlRequests(
trace, sandwich_utils.RequestOutcome.ServedFromCache)
matching_subresource_count_used_from_cache = (
served_from_cache_urls.intersection(
set(benchmark_setup['urls_to_enable_swr'])))
run_metrics = {
'url': trace.url,
'repeat_id': repeat_id,
'benchmark_name': benchmark_setup['benchmark_name'],
'cache_recording.subresource_count':
len(benchmark_setup['effective_subresource_urls']),
'cache_recording.matching_subresource_count':
len(benchmark_setup['urls_to_enable_swr']),
'benchmark.matching_subresource_count_used_from_cache':
len(matching_subresource_count_used_from_cache)
}
run_metrics.update(
sandwich_metrics.ExtractCommonMetricsFromRepeatDirectory(
repeat_dir, trace))
run_metrics_list.append(run_metrics)
return run_metrics_list
class StaleWhileRevalidateBenchmarkBuilder(task_manager.Builder):
"""A builder for a graph of tasks for Stale-While-Revalidate study benchmarks.
"""
def __init__(self, common_builder):
task_manager.Builder.__init__(self,
common_builder.output_directory,
common_builder.output_subdirectory)
self._common_builder = common_builder
self._patched_wpr_path = None
self._original_cache_task = None
self._original_cache_trace_path = None
self._PopulateCommonPipelines()
def _PopulateCommonPipelines(self):
"""Creates necessary tasks to produce initial cache archives.
Here is the full dependency tree for the returned task:
depends on: common/original-cache.zip
depends on: common/webpages-patched.wpr
depends on: common/webpages.wpr
"""
@self.RegisterTask('common/webpages-patched.wpr',
dependencies=[self._common_builder.original_wpr_task])
def BuildPatchedWpr():
shutil.copyfile(
self._common_builder.original_wpr_task.path, BuildPatchedWpr.path)
wpr_archive = wpr_backend.WprArchiveBackend(BuildPatchedWpr.path)
wpr_url_entries = wpr_archive.ListUrlEntries()
for wpr_url_entry in wpr_url_entries:
sandwich_utils.PatchWprEntryToBeCached(wpr_url_entry)
logging.info('number of patched entries: %d', len(wpr_url_entries))
wpr_archive.Persist()
@self.RegisterTask('common/original-cache.zip',
dependencies=[BuildPatchedWpr])
def BuildOriginalCache():
runner = self._common_builder.CreateSandwichRunner()
runner.wpr_archive_path = BuildPatchedWpr.path
runner.cache_archive_path = BuildOriginalCache.path
runner.cache_operation = sandwich_runner.CacheOperation.SAVE
runner.output_dir = BuildOriginalCache.run_path
runner.Run()
BuildOriginalCache.run_path = BuildOriginalCache.path[:-4] + '-run'
self._original_cache_trace_path = os.path.join(
BuildOriginalCache.run_path, '0', sandwich_runner.TRACE_FILENAME)
self._patched_wpr_path = BuildPatchedWpr.path
self._original_cache_task = BuildOriginalCache
def PopulateBenchmark(self, benchmark_name, domain_regexes,
transformer_list_name, transformer_list):
"""Populate benchmarking tasks.
Args:
benchmark_name: Name of the benchmark.
domain_regexes: Compiled regexes of domains to enable SWR.
transformer_list_name: A string describing the transformers, will be used
in Task names (prefer names without spaces and special characters).
transformer_list: An ordered list of function that takes an instance of
SandwichRunner as parameter, would be applied immediately before
SandwichRunner.Run() in the given order.
Here is the full dependency of the added tree for the returned task:
<transformer_list_name>/<benchmark_name>-metrics.csv
depends on: <transformer_list_name>/<benchmark_name>-run/
depends on: common/<benchmark_name>-cache.zip
depends on: common/<benchmark_name>-setup.json
depends on: common/patched-cache.zip
"""
additional_column_names = [
'url',
'repeat_id',
'benchmark_name',
# Number of resources of the page.
'cache_recording.subresource_count',
# Number of resources matching at least one domain regex, to give an
# idea in the CSV how much the threshold influence additional SWR uses.
'cache_recording.matching_subresource_count',
# Number of resources fetched from cache matching at least one domain
# regex, to give an actual idea if it is possible to have performance
# improvement on the web page (or not because only XHR), but also tells
# if the page loading time should see a performance improvement or not
# compared with a different thresholds.
'benchmark.matching_subresource_count_used_from_cache']
shared_task_prefix = os.path.join('common', benchmark_name)
task_prefix = os.path.join(transformer_list_name, benchmark_name)
@self.RegisterTask(shared_task_prefix + '-setup.json', merge=True,
dependencies=[self._original_cache_task])
def SetupBenchmark():
logging.info('loading %s', self._original_cache_trace_path)
trace = loading_trace.LoadingTrace.FromJsonFile(
self._original_cache_trace_path)
logging.info('generating %s', SetupBenchmark.path)
effective_subresource_urls = sandwich_utils.ListUrlRequests(
trace, sandwich_utils.RequestOutcome.All)
urls_to_enable_swr = _ExtractRegexMatchingUrls(
effective_subresource_urls, domain_regexes)
logging.info(
'count of urls to enable SWR: %s', len(urls_to_enable_swr))
with open(SetupBenchmark.path, 'w') as output:
json.dump({
'benchmark_name': benchmark_name,
'urls_to_enable_swr': [url for url in urls_to_enable_swr],
'effective_subresource_urls':
[url for url in effective_subresource_urls]
}, output)
@self.RegisterTask(shared_task_prefix + '-cache.zip', merge=True,
dependencies=[SetupBenchmark])
def BuildBenchmarkCacheArchive():
benchmark_setup = json.load(open(SetupBenchmark.path))
_BuildBenchmarkCache(
original_wpr_trace_path=(
self._common_builder.original_wpr_recording_trace_path),
urls_to_enable_swr=set(benchmark_setup['urls_to_enable_swr']),
original_cache_trace_path=self._original_cache_trace_path,
original_cache_archive_path=self._original_cache_task.path,
cache_archive_dest_path=BuildBenchmarkCacheArchive.path)
@self.RegisterTask(task_prefix + '-run/', [BuildBenchmarkCacheArchive])
def RunBenchmark():
runner = self._common_builder.CreateSandwichRunner()
for transformer in transformer_list:
transformer(runner)
runner.wpr_archive_path = self._patched_wpr_path
runner.wpr_out_log_path = os.path.join(
RunBenchmark.path, sandwich_runner.WPR_LOG_FILENAME)
runner.cache_archive_path = BuildBenchmarkCacheArchive.path
runner.cache_operation = sandwich_runner.CacheOperation.PUSH
runner.output_dir = RunBenchmark.path
runner.chrome_args.append('--enable-features=StaleWhileRevalidate2')
runner.Run()
@self.RegisterTask(task_prefix + '-metrics.csv', [RunBenchmark])
def ExtractMetrics():
benchmark_setup = json.load(open(SetupBenchmark.path))
run_metrics_list = _ProcessRunOutputDir(
benchmark_setup, RunBenchmark.path)
run_metrics_list.sort(key=lambda e: e['repeat_id'])
with open(ExtractMetrics.path, 'w') as csv_file:
writer = csv.DictWriter(csv_file, fieldnames=(additional_column_names +
sandwich_metrics.COMMON_CSV_COLUMN_NAMES))
writer.writeheader()
for run_metrics in run_metrics_list:
writer.writerow(run_metrics)
self._common_builder.default_final_tasks.append(ExtractMetrics)
|
third_party/cython/src/Cython/Compiler/Tests/TestMemView.py | domenic/mojo | 652 | 12614595 | from Cython.TestUtils import CythonTest
import Cython.Compiler.Errors as Errors
from Cython.Compiler.Nodes import *
from Cython.Compiler.ParseTreeTransforms import *
from Cython.Compiler.Buffer import *
class TestMemviewParsing(CythonTest):
def parse(self, s):
return self.should_not_fail(lambda: self.fragment(s)).root
def not_parseable(self, expected_error, s):
e = self.should_fail(lambda: self.fragment(s), Errors.CompileError)
self.assertEqual(expected_error, e.message_only)
def test_default_1dim(self):
self.parse(u"cdef int[:] x")
self.parse(u"cdef short int[:] x")
def test_default_ndim(self):
self.parse(u"cdef int[:,:,:,:,:] x")
self.parse(u"cdef unsigned long int[:,:,:,:,:] x")
self.parse(u"cdef unsigned int[:,:,:,:,:] x")
def test_zero_offset(self):
self.parse(u"cdef long double[0:] x")
self.parse(u"cdef int[0:] x")
def test_zero_offset_ndim(self):
self.parse(u"cdef int[0:,0:,0:,0:] x")
def test_def_arg(self):
self.parse(u"def foo(int[:,:] x): pass")
def test_cdef_arg(self):
self.parse(u"cdef foo(int[:,:] x): pass")
def test_general_slice(self):
self.parse(u'cdef float[::ptr, ::direct & contig, 0::full & strided] x')
def test_non_slice_memview(self):
self.not_parseable(u"An axis specification in memoryview declaration does not have a ':'.",
u"cdef double[:foo, bar] x")
self.not_parseable(u"An axis specification in memoryview declaration does not have a ':'.",
u"cdef double[0:foo, bar] x")
def test_basic(self):
t = self.parse(u"cdef int[:] x")
memv_node = t.stats[0].base_type
self.assert_(isinstance(memv_node, MemoryViewSliceTypeNode))
# we also test other similar declarations (buffers, anonymous C arrays)
# since the parsing has to distinguish between them.
def disable_test_no_buf_arg(self): # TODO
self.not_parseable(u"Expected ']'",
u"cdef extern foo(object[int, ndim=2])")
def disable_test_parse_sizeof(self): # TODO
self.parse(u"sizeof(int[NN])")
self.parse(u"sizeof(int[])")
self.parse(u"sizeof(int[][NN])")
self.not_parseable(u"Expected an identifier or literal",
u"sizeof(int[:NN])")
self.not_parseable(u"Expected ']'",
u"sizeof(foo[dtype=bar]")
if __name__ == '__main__':
import unittest
unittest.main()
|
openid/extensions/ax.py | cjwatson/python-openid | 176 | 12614599 | """Implements the OpenID Attribute Exchange specification, version 1.0.
@since: 2.1.0
"""
from __future__ import unicode_literals
import six
from openid import extension
from openid.message import OPENID_NS, NamespaceMap
from openid.oidutil import force_text, string_to_text
from openid.server.trustroot import TrustRoot
__all__ = [
'AttrInfo',
'FetchRequest',
'FetchResponse',
'StoreRequest',
'StoreResponse',
]
# Use this as the 'count' value for an attribute in a FetchRequest to
# ask for as many values as the OP can provide.
UNLIMITED_VALUES = "unlimited"
# Minimum supported alias length in characters. Here for
# completeness.
MINIMUM_SUPPORTED_ALIAS_LENGTH = 32
def checkAlias(alias):
"""
Check an alias for invalid characters; raise AXError if any are
found. Return None if the alias is valid.
"""
if ',' in alias:
raise AXError("Alias %r must not contain comma" % (alias,))
if '.' in alias:
raise AXError("Alias %r must not contain period" % (alias,))
class AXError(ValueError):
"""Results from data that does not meet the attribute exchange 1.0
specification"""
class NotAXMessage(AXError):
"""Raised when there is no Attribute Exchange mode in the message."""
def __repr__(self):
return self.__class__.__name__
def __str__(self):
return self.__class__.__name__
class AXMessage(extension.Extension):
"""Abstract class containing common code for attribute exchange messages
@cvar ns_alias: The preferred namespace alias for attribute
exchange messages
@cvar mode: The type of this attribute exchange message. This must
be overridden in subclasses.
"""
ns_alias = 'ax'
mode = None
ns_uri = 'http://openid.net/srv/ax/1.0'
def _checkMode(self, ax_args):
"""Raise an exception if the mode in the attribute exchange
arguments does not match what is expected for this class.
@raises NotAXMessage: When there is no mode value in ax_args at all.
@raises AXError: When mode does not match.
"""
mode = ax_args.get('mode')
if mode != self.mode:
if not mode:
raise NotAXMessage()
else:
raise AXError(
'Expected mode %r; got %r' % (self.mode, mode))
def _newArgs(self):
"""Return a set of attribute exchange arguments containing the
basic information that must be in every attribute exchange
message.
"""
return {'mode': self.mode}
class AttrInfo(object):
"""Represents a single attribute in an attribute exchange
request. This should be added to an AXRequest object in order to
request the attribute.
@ivar required: Whether the attribute will be marked as required
when presented to the subject of the attribute exchange
request.
@type required: bool
@ivar count: How many values of this type to request from the
subject. Defaults to one.
@type count: int
@ivar type_uri: The identifier that determines what the attribute
represents and how it is serialized. For example, one type URI
representing dates could represent a Unix timestamp in base 10
and another could represent a human-readable string.
@type type_uri: six.text_type
@ivar alias: The name that should be given to this alias in the
request. If it is not supplied, a generic name will be
assigned. For example, if you want to call a Unix timestamp
value 'tstamp', set its alias to that value. If two attributes
in the same message request to use the same alias, the request
will fail to be generated.
@type alias: six.text_type or NoneType
"""
def __init__(self, type_uri, count=1, required=False, alias=None):
self.required = required
self.count = count
self.type_uri = type_uri
self.alias = alias
if self.alias is not None:
checkAlias(self.alias)
def wantsUnlimitedValues(self):
"""
When processing a request for this attribute, the OP should
call this method to determine whether all available attribute
values were requested. If self.count == UNLIMITED_VALUES,
this returns True. Otherwise this returns False, in which
case self.count is an integer.
"""
return self.count == UNLIMITED_VALUES
def toTypeURIs(namespace_map, alias_list_s):
"""Given a namespace mapping and a string containing a
comma-separated list of namespace aliases, return a list of type
URIs that correspond to those aliases.
@param namespace_map: The mapping from namespace URI to alias
@type namespace_map: openid.message.NamespaceMap
@param alias_list_s: The string containing the comma-separated
list of aliases. May also be None for convenience.
@type alias_list_s: Optional[six.text_type], six.binary_type is deprecated
@returns: The list of namespace URIs that corresponds to the
supplied list of aliases. If the string was zero-length or
None, an empty list will be returned.
@raise KeyError: If an alias is present in the list of aliases but
is not present in the namespace map.
"""
uris = []
if alias_list_s:
alias_list_s = string_to_text(alias_list_s,
"Binary values for alias_list_s are deprecated. Use text input instead.")
for alias in alias_list_s.split(','):
type_uri = namespace_map.getNamespaceURI(alias)
if type_uri is None:
raise KeyError(
'No type is defined for attribute name %r' % (alias,))
else:
uris.append(type_uri)
return uris
class FetchRequest(AXMessage):
"""An attribute exchange 'fetch_request' message. This message is
sent by a relying party when it wishes to obtain attributes about
the subject of an OpenID authentication request.
@ivar requested_attributes: The attributes that have been
requested thus far, indexed by the type URI.
@type requested_attributes: Dict[six.text_type, AttrInfo]
@ivar update_url: A URL that will accept responses for this
attribute exchange request, even in the absence of the user
who made this request.
"""
mode = 'fetch_request'
def __init__(self, update_url=None):
AXMessage.__init__(self)
self.requested_attributes = {}
self.update_url = update_url
def add(self, attribute):
"""Add an attribute to this attribute exchange request.
@param attribute: The attribute that is being requested
@type attribute: C{L{AttrInfo}}
@returns: None
@raise KeyError: when the requested attribute is already
present in this fetch request.
"""
if attribute.type_uri in self.requested_attributes:
raise KeyError('The attribute %r has already been requested'
% (attribute.type_uri,))
self.requested_attributes[attribute.type_uri] = attribute
def getExtensionArgs(self):
"""Get the serialized form of this attribute fetch request.
@returns: The fetch request message parameters
@rtype: Dict[six.text_type, six.text_type]
"""
aliases = NamespaceMap()
required = []
if_available = []
ax_args = self._newArgs()
for type_uri, attribute in six.iteritems(self.requested_attributes):
if attribute.alias is None:
alias = aliases.add(type_uri)
else:
# This will raise an exception when the second
# attribute with the same alias is added. I think it
# would be better to complain at the time that the
# attribute is added to this object so that the code
# that is adding it is identified in the stack trace,
# but it's more work to do so, and it won't be 100%
# accurate anyway, since the attributes are
# mutable. So for now, just live with the fact that
# we'll learn about the error later.
#
# The other possible approach is to hide the error and
# generate a new alias on the fly. I think that would
# probably be bad.
alias = aliases.addAlias(type_uri, attribute.alias)
if attribute.required:
required.append(alias)
else:
if_available.append(alias)
if attribute.count != 1:
ax_args['count.' + alias] = six.text_type(attribute.count)
ax_args['type.' + alias] = type_uri
if required:
ax_args['required'] = ','.join(required)
if if_available:
ax_args['if_available'] = ','.join(if_available)
return ax_args
def getRequiredAttrs(self):
"""Get the type URIs for all attributes that have been marked
as required.
@returns: A list of the type URIs for attributes that have
been marked as required.
@rtype: List[six.text_type]
"""
required = []
for type_uri, attribute in six.iteritems(self.requested_attributes):
if attribute.required:
required.append(type_uri)
return required
@classmethod
def fromOpenIDRequest(cls, openid_request):
"""Extract a FetchRequest from an OpenID message
@param openid_request: The OpenID authentication request
containing the attribute fetch request
@type openid_request: C{L{openid.server.server.CheckIDRequest}}
@rtype: C{L{FetchRequest}} or C{None}
@returns: The FetchRequest extracted from the message or None, if
the message contained no AX extension.
@raises KeyError: if the AuthRequest is not consistent in its use
of namespace aliases.
@raises AXError: When parseExtensionArgs would raise same.
@see: L{parseExtensionArgs}
"""
message = openid_request.message
ax_args = message.getArgs(cls.ns_uri)
self = cls()
try:
self.parseExtensionArgs(ax_args)
except NotAXMessage:
return None
if self.update_url:
# Update URL must match the openid.realm of the underlying
# OpenID 2 message.
realm = message.getArg(OPENID_NS, 'realm',
message.getArg(OPENID_NS, 'return_to'))
if not realm:
raise AXError("Cannot validate update_url %r against absent realm" % self.update_url)
tr = TrustRoot.parse(realm)
if not tr.validateURL(self.update_url):
raise AXError("Update URL %r failed validation against realm %r" %
(self.update_url, realm,))
return self
def parseExtensionArgs(self, ax_args):
"""Given attribute exchange arguments, populate this FetchRequest.
@param ax_args: Attribute Exchange arguments from the request.
As returned from L{Message.getArgs<openid.message.Message.getArgs>}.
@type ax_args: dict
@raises KeyError: if the message is not consistent in its use
of namespace aliases.
@raises NotAXMessage: If ax_args does not include an Attribute Exchange
mode.
@raises AXError: If the data to be parsed does not follow the
attribute exchange specification. At least when
'if_available' or 'required' is not specified for a
particular attribute type.
"""
# Raises an exception if the mode is not the expected value
self._checkMode(ax_args)
aliases = NamespaceMap()
for key, value in six.iteritems(ax_args):
if key.startswith('type.'):
alias = key[5:]
type_uri = value
aliases.addAlias(type_uri, alias)
count_key = 'count.' + alias
count_s = ax_args.get(count_key)
if count_s:
try:
count = int(count_s)
if count <= 0:
raise AXError("Count %r must be greater than zero, got %r" % (count_key, count_s,))
except ValueError:
if count_s != UNLIMITED_VALUES:
raise AXError("Invalid count value for %r: %r" % (count_key, count_s,))
count = count_s
else:
count = 1
self.add(AttrInfo(type_uri, alias=alias, count=count))
required = toTypeURIs(aliases, ax_args.get('required'))
for type_uri in required:
self.requested_attributes[type_uri].required = True
if_available = toTypeURIs(aliases, ax_args.get('if_available'))
all_type_uris = required + if_available
for type_uri in aliases.iterNamespaceURIs():
if type_uri not in all_type_uris:
raise AXError(
'Type URI %r was in the request but not '
'present in "required" or "if_available"' % (type_uri,))
self.update_url = ax_args.get('update_url')
def iterAttrs(self):
"""Iterate over the AttrInfo objects that are
contained in this fetch_request.
"""
return six.itervalues(self.requested_attributes)
def __iter__(self):
"""Iterate over the attribute type URIs in this fetch_request
"""
return iter(self.requested_attributes)
def has_key(self, type_uri):
"""Is the given type URI present in this fetch_request?
"""
return type_uri in self.requested_attributes
__contains__ = has_key
class AXKeyValueMessage(AXMessage):
"""An abstract class that implements a message that has attribute
keys and values. It contains the common code between
fetch_response and store_request.
"""
def __init__(self):
AXMessage.__init__(self)
self.data = {}
def addValue(self, type_uri, value):
"""Add a single value for the given attribute type to the
message. If there are already values specified for this type,
this value will be sent in addition to the values already
specified.
@param type_uri: The URI for the attribute
@param value: The value to add to the response to the relying party for this attribute. It the value is not
a text, it will be converted.
@type value: Any
@returns: None
"""
try:
values = self.data[type_uri]
except KeyError:
values = self.data[type_uri] = []
values.append(force_text(value))
def setValues(self, type_uri, values):
"""Set the values for the given attribute type. This replaces
any values that have already been set for this attribute.
@param type_uri: The URI for the attribute
@param values: A list of values to send for this attribute. Values which are not text, will be converted.
@type values: List[Any]
"""
self.data[type_uri] = [force_text(v) for v in values]
def _getExtensionKVArgs(self, aliases=None):
"""Get the extension arguments for the key/value pairs
contained in this message.
@param aliases: An alias mapping. Set to None if you don't
care about the aliases for this request.
"""
if aliases is None:
aliases = NamespaceMap()
ax_args = {}
for type_uri, values in six.iteritems(self.data):
alias = aliases.add(type_uri)
ax_args['type.' + alias] = type_uri
ax_args['count.' + alias] = six.text_type(len(values))
for i, value in enumerate(values):
key = 'value.%s.%d' % (alias, i + 1)
ax_args[key] = value
return ax_args
def parseExtensionArgs(self, ax_args):
"""Parse attribute exchange key/value arguments into this
object.
@param ax_args: The attribute exchange fetch_response
arguments, with namespacing removed.
@type ax_args: Dict[six.text_type, six.text_type]
@returns: None
@raises ValueError: If the message has bad values for
particular fields
@raises KeyError: If the namespace mapping is bad or required
arguments are missing
"""
self._checkMode(ax_args)
aliases = NamespaceMap()
for key, value in six.iteritems(ax_args):
if key.startswith('type.'):
type_uri = value
alias = key[5:]
checkAlias(alias)
aliases.addAlias(type_uri, alias)
for type_uri, alias in aliases.items():
try:
count_s = ax_args['count.' + alias]
except KeyError:
value = ax_args['value.' + alias]
if value == '':
values = []
else:
values = [value]
else:
count = int(count_s)
values = []
for i in range(1, count + 1):
value_key = 'value.%s.%d' % (alias, i)
value = ax_args[value_key]
values.append(value)
self.data[type_uri] = values
def getSingle(self, type_uri, default=None):
"""Get a single value for an attribute. If no value was sent
for this attribute, use the supplied default. If there is more
than one value for this attribute, this method will fail.
@param type_uri: The URI for the attribute
@type type_uri: six.text_type, six.binary_type is deprecated
@param default: The value to return if the attribute was not
sent in the fetch_response.
@returns: The value of the attribute in the fetch_response
message, or the default supplied
@rtype: six.text_type or NoneType
@raises ValueError: If there is more than one value for this
parameter in the fetch_response message.
@raises KeyError: If the attribute was not sent in this response
"""
type_uri = string_to_text(type_uri, "Binary values for type_uri are deprecated. Use text input instead.")
values = self.data.get(type_uri)
if not values:
return default
elif len(values) == 1:
return values[0]
else:
raise AXError(
'More than one value present for %r' % (type_uri,))
def get(self, type_uri):
"""Get the list of values for this attribute in the
fetch_response.
XXX: what to do if the values are not present? default
parameter? this is funny because it's always supposed to
return a list, so the default may break that, though it's
provided by the user's code, so it might be okay. If no
default is supplied, should the return be None or []?
@param type_uri: The URI of the attribute
@returns: The list of values for this attribute in the
response. May be an empty list.
@rtype: List[six.text_type]
@raises KeyError: If the attribute was not sent in the response
"""
return self.data[type_uri]
def count(self, type_uri):
"""Get the number of responses for a particular attribute in
this fetch_response message.
@param type_uri: The URI of the attribute
@returns: The number of values sent for this attribute
@raises KeyError: If the attribute was not sent in the
response. KeyError will not be raised if the number of
values was zero.
"""
return len(self.get(type_uri))
class FetchResponse(AXKeyValueMessage):
"""A fetch_response attribute exchange message
"""
mode = 'fetch_response'
def __init__(self, request=None, update_url=None):
"""
@param request: When supplied, I will use namespace aliases
that match those in this request. I will also check to
make sure I do not respond with attributes that were not
requested.
@type request: L{FetchRequest}
@param update_url: By default, C{update_url} is taken from the
request. But if you do not supply the request, you may set
the C{update_url} here.
@type update_url: Optional[six.text_type], six.binary_type is deprecated
"""
AXKeyValueMessage.__init__(self)
if update_url is not None:
update_url = string_to_text(update_url,
"Binary values for update_url are deprecated. Use text input instead.")
self.update_url = update_url
self.request = request
def getExtensionArgs(self):
"""Serialize this object into arguments in the attribute
exchange namespace
@returns: The dictionary of unqualified attribute exchange
arguments that represent this fetch_response.
@rtype: Dict[six.text_type, six.text_type]
"""
aliases = NamespaceMap()
zero_value_types = []
if self.request is not None:
# Validate the data in the context of the request (the
# same attributes should be present in each, and the
# counts in the response must be no more than the counts
# in the request)
for type_uri in self.data:
if type_uri not in self.request:
raise KeyError(
'Response attribute not present in request: %r'
% (type_uri,))
for attr_info in self.request.iterAttrs():
# Copy the aliases from the request so that reading
# the response in light of the request is easier
if attr_info.alias is None:
aliases.add(attr_info.type_uri)
else:
aliases.addAlias(attr_info.type_uri, attr_info.alias)
try:
values = self.data[attr_info.type_uri]
except KeyError:
values = []
zero_value_types.append(attr_info)
if (attr_info.count != UNLIMITED_VALUES) and (attr_info.count < len(values)):
raise AXError(
'More than the number of requested values were '
'specified for %r' % (attr_info.type_uri,))
kv_args = self._getExtensionKVArgs(aliases)
# Add the KV args into the response with the args that are
# unique to the fetch_response
ax_args = self._newArgs()
# For each requested attribute, put its type/alias and count
# into the response even if no data were returned.
for attr_info in zero_value_types:
alias = aliases.getAlias(attr_info.type_uri)
kv_args['type.' + alias] = attr_info.type_uri
kv_args['count.' + alias] = '0'
update_url = ((self.request and self.request.update_url) or self.update_url)
if update_url:
ax_args['update_url'] = update_url
ax_args.update(kv_args)
return ax_args
def parseExtensionArgs(self, ax_args):
"""@see: {Extension.parseExtensionArgs<openid.extension.Extension.parseExtensionArgs>}"""
super(FetchResponse, self).parseExtensionArgs(ax_args)
self.update_url = ax_args.get('update_url')
@classmethod
def fromSuccessResponse(cls, success_response, signed=True):
"""Construct a FetchResponse object from an OpenID library
SuccessResponse object.
@param success_response: A successful id_res response object
@type success_response: openid.consumer.consumer.SuccessResponse
@param signed: Whether non-signed args should be
processsed. If True (the default), only signed arguments
will be processsed.
@type signed: bool
@returns: A FetchResponse containing the data from the OpenID
message, or None if the SuccessResponse did not contain AX
extension data.
@raises AXError: when the AX data cannot be parsed.
"""
self = cls()
ax_args = success_response.extensionResponse(self.ns_uri, signed)
try:
self.parseExtensionArgs(ax_args)
except NotAXMessage:
return None
else:
return self
class StoreRequest(AXKeyValueMessage):
"""A store request attribute exchange message representation
"""
mode = 'store_request'
def __init__(self, aliases=None):
"""
@param aliases: The namespace aliases to use when making this
store request. Leave as None to use defaults.
"""
super(StoreRequest, self).__init__()
self.aliases = aliases
def getExtensionArgs(self):
"""
@see: L{Extension.getExtensionArgs<openid.extension.Extension.getExtensionArgs>}
"""
ax_args = self._newArgs()
kv_args = self._getExtensionKVArgs(self.aliases)
ax_args.update(kv_args)
return ax_args
@classmethod
def fromOpenIDRequest(cls, openid_request):
"""Extract a StoreRequest from an OpenID message
@param openid_request: The OpenID authentication request
containing the attribute fetch request
@type openid_request: C{L{openid.server.server.CheckIDRequest}}
@rtype: C{L{StoreRequest}} or C{None}
@returns: The StoreRequest extracted from the message or None, if
the message contained no AX extension.
@raises KeyError: if the AuthRequest is not consistent in its use
of namespace aliases.
@raises AXError: When parseExtensionArgs would raise same.
@see: L{parseExtensionArgs}
"""
message = openid_request.message
ax_args = message.getArgs(cls.ns_uri)
self = cls()
try:
self.parseExtensionArgs(ax_args)
except NotAXMessage:
return None
return self
class StoreResponse(AXMessage):
"""An indication that the store request was processed along with
this OpenID transaction.
"""
SUCCESS_MODE = 'store_response_success'
FAILURE_MODE = 'store_response_failure'
def __init__(self, succeeded=True, error_message=None):
AXMessage.__init__(self)
if succeeded and error_message is not None:
raise AXError('An error message may only be included in a failing fetch response')
if succeeded:
self.mode = self.SUCCESS_MODE
else:
self.mode = self.FAILURE_MODE
self.error_message = error_message
def succeeded(self):
"""Was this response a success response?"""
return self.mode == self.SUCCESS_MODE
def getExtensionArgs(self):
"""@see: {Extension.getExtensionArgs<openid.extension.Extension.getExtensionArgs>}"""
ax_args = self._newArgs()
if not self.succeeded() and self.error_message:
ax_args['error'] = self.error_message
return ax_args
@classmethod
def fromSuccessResponse(cls, success_response, signed=True):
"""Construct a StoreResponse object from an OpenID library
SuccessResponse object.
@param success_response: A successful id_res response object
@type success_response: openid.consumer.consumer.SuccessResponse
@param signed: Whether non-signed args should be
processsed. If True (the default), only signed arguments
will be processsed.
@type signed: bool
@returns: A StoreResponse containing the data from the OpenID
message, or None if the SuccessResponse did not contain AX
extension data.
@raises AXError: when the AX data cannot be parsed.
"""
self = cls()
ax_args = success_response.extensionResponse(self.ns_uri, signed)
try:
self.parseExtensionArgs(ax_args)
except NotAXMessage:
return None
else:
return self
|
tests/ted.py | VickyZengg/MediaSDK | 782 | 12614612 | <filename>tests/ted.py
# -*- coding: utf-8 -*-
# Copyright (c) 2017-2020 Intel Corporation
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import re
import sys
import argparse
from pathlib import Path
from ted import discover
if __name__ == '__main__':
print('Intel(R) Media SDK Open Source TEst Driver')
print('Copyright (c) Intel Corporation\n')
parser = argparse.ArgumentParser()
parser.add_argument(
'test',
nargs='*',
help='tests to run, if not specified all tests will be executed'
)
parser.add_argument(
'--device', action='store', default='/dev/dri/renderD128',
help='provide device on which to run tests (default: /dev/dri/renderD128)'
)
args = parser.parse_args()
base_dir = Path(__file__).parent.absolute()
try:
print("Setting up test environment...")
cfg = discover.config(base_dir)
except Exception as ex:
msg = "Can't load configuration - {}".format(ex)
sys.exit(msg)
test_re = None
if args.test:
test_re = re.compile('|'.join(args.test))
tests_to_run = []
print("Disovering tests...")
for test in discover.tests(base_dir, cfg, args):
if test_re and not test_re.search(test.name):
print(' {} - skipped'.format(test.name))
continue
tests_to_run.append(test)
print(' {} - {} cases'.format(test.name, len(test.cases)))
if not tests_to_run:
sys.exit("Nothing to run")
n = len(tests_to_run)
print("\nRunning {} test{}...".format(n, 's' if n > 1 else ''))
results = []
total = passed = 0
for test in tests_to_run:
print(' {}'.format(test.name))
total_, passed_, details = test.run()
results.append(details)
total += total_
passed += passed_
print("\n{} of {} cases passed".format(passed, total))
# return code is number of failed cases
sys.exit(total - passed)
|
malib/algorithm/ddpg/__init__.py | ReinholdM/play_football_with_human | 258 | 12614618 | from .policy import DDPG
from .trainer import DDPGTrainer
from .loss import DDPGLoss
NAME = "DDPG"
LOSS = DDPGLoss
TRAINER = DDPGTrainer
POLICY = DDPG
TRAINING_CONFIG = {
"update_interval": 1,
"batch_size": 1024,
"tau": 0.01,
"optimizer": "Adam",
"actor_lr": 1e-2,
"critic_lr": 1e-2,
"grad_norm_clipping": 0.5,
}
|
recipes/Python/578159_State_tree_/recipe-578159.py | tdiprima/code | 2,023 | 12614636 | """
A 'state tree' is (as its name implies) an object that represents a tree of
states. The tree is built by having an initial state (the 'root') and a
rule whereby child states can be reached from a parent state.
State trees are useful, for example, in solving puzzles where there are a
fixed set of moves from any given position, and a goal position which is
the solution. A couple of example puzzles are given below.
"""
from collections import deque
class StateTree(object):
"""
Representation of a tree of states.
States must be hashable (i.e., usable as dictionary keys---for example,
tuples). The initial(), reachable() and endcondtion() methods must be
subclassed. After that, the findpath() method will return a list of
states from the initial to an end state.
"""
def initial(self):
"Return the initial state."
raise NotImplementedError
def reachable(self, state):
"Yield states reachable from a given state."
raise NotImplementedError
def endcondition(self, state):
"Return whether state satisfies the end condition."
raise NotImplementedError
def findpath(self):
"Find and return shortest path from initial to end state."
# Get the initial state.
state = self.initial()
# Mapping of state to its parent, or None if initial.
parentmap = {state: None}
# Queue of states to examine.
queue = deque([state])
# Process each state in the queue.
while len(queue) > 0:
state = queue.popleft()
# Examine each new reachable state.
for child in self.reachable(state):
if child not in parentmap:
parentmap[child] = state
queue.append(child)
# If this state satisfies the end condition, return it.
if self.endcondition(state):
states = [state]
while parentmap[state] is not None:
state = parentmap[state]
states.insert(0, state)
return states
class BottlePuzzle(StateTree):
"""
There are three bottles, with capacities of 3, 5 and 8 litres. You can
pour the contents of one bottle into another until it's empty or the
other is full. How do you measure out 4 litres?
"""
# Bottle sizes.
sizes = (3, 5, 8)
# Possible pourings (from -> to).
pourings = ((0, 1), (0, 2), (1, 0), (1, 2), (2, 0), (2, 1))
def initial(self):
return (0, 0, 8)
def reachable(self, state):
for a, b in self.pourings:
bottles = list(state)
# Get the amount poured from bottle A to bottle B.
poured = min(self.sizes[b] - bottles[b], bottles[a])
# If some was poured, yield the new state.
if poured > 0:
bottles[a] -= poured
bottles[b] += poured
yield tuple(bottles)
def endcondition(self, state):
return 4 in state
class FrogsAndToads(StateTree):
"""
The classic frogs-and-toads puzzle. An equal number of frogs and toads
face each other on a log, with a gap between them, and need to swap
places. Toads only move right, frogs only move left. A thing can move
into the gap, or jump over a different thing into the gap. How do they
do it?
"""
def initial(self):
return ('T', 'T', 'T', ' ', 'F', 'F', 'F')
def reachable(self, state):
for thing, move in (('T', 1), ('T', 2), ('F', -1), ('F', -2)):
pos = list(state)
# Find where the empty space is.
space = pos.index(' ')
# Find start position of the thing to move.
start = space - move
# If start position is out of bounds, or not of the right type,
# disallow it.
if not 0 <= start < len(pos) or pos[start] != thing:
continue
# If it's a jump, and the jumped thing isn't a different kind,
# disallow it.
if abs(move) == 2 and pos[(space + start) / 2] == thing:
continue
# Do the move and yield it.
pos[start], pos[space] = pos[space], pos[start]
yield tuple(pos)
def endcondition(self, state):
return state == ('F', 'F', 'F', ' ', 'T', 'T', 'T')
if __name__ == "__main__":
for puzzle in FrogsAndToads, BottlePuzzle:
print
print puzzle.__name__
print
for state in puzzle().findpath():
print " ", state
|
test/test_hex_binary.py | ROZBEH/rdflib | 1,424 | 12614696 | # -*- coding: utf-8 -*-
import unittest
import binascii
from rdflib import Literal, XSD
class HexBinaryTestCase(unittest.TestCase):
def test_int(self):
self._test_integer(5)
self._test_integer(3452)
self._test_integer(4886)
def _test_integer(self, i):
hex_i = format(i, "x")
# Make it has a even-length (Byte)
len_hex_i = len(hex_i)
hex_i = hex_i.zfill(len_hex_i + len_hex_i % 2)
l = Literal(hex_i, datatype=XSD.hexBinary)
bin_i = l.toPython()
self.assertEqual(int(binascii.hexlify(bin_i), 16), i)
self.assertEqual(str(l), hex_i)
self.assertEqual(int(hex_i, 16), i)
self.assertEqual(int(l, 16), i)
self.assertEqual(int(str(l), 16), i)
def test_unicode(self):
str1 = "Test utf-8 string éàë"
# u hexstring
hex_str1 = binascii.hexlify(str1.encode("utf-8")).decode()
l1 = Literal(hex_str1, datatype=XSD.hexBinary)
b_str1 = l1.toPython()
self.assertEqual(b_str1.decode("utf-8"), str1)
self.assertEqual(str(l1), hex_str1)
# b hexstring
hex_str1b = binascii.hexlify(str1.encode("utf-8"))
l1b = Literal(hex_str1b, datatype=XSD.hexBinary)
b_str1b = l1b.toPython()
self.assertEqual(b_str1, b_str1b)
self.assertEqual(b_str1b.decode("utf-8"), str1)
self.assertEqual(str(l1b), hex_str1)
if __name__ == "__main__":
unittest.main()
|
goodsspecs/urls.py | wh8983298/GreaterWMS | 1,063 | 12614701 | from django.urls import path, re_path
from . import views
urlpatterns = [
path(r'', views.APIViewSet.as_view({"get": "list", "post": "create"}), name="goodsspecs"),
re_path(r'^(?P<pk>\d+)/$', views.APIViewSet.as_view({
'get': 'retrieve',
'put': 'update',
'patch': 'partial_update',
'delete': 'destroy'
}), name="goodsspecs_1")
]
|
calico/tests/test_e2e.py | davidlrosenblum/integrations-extras | 158 | 12614727 | <gh_stars>100-1000
# (C) Datadog, Inc. 2021-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import pytest
from datadog_checks.dev.utils import get_metadata_metrics
from . import common
@pytest.mark.e2e
def test_check_ok(dd_agent_check):
aggregator = dd_agent_check(rate=True)
metrics = common.FORMATTED_EXTRA_METRICS
for metric in metrics:
aggregator.assert_metric(metric)
aggregator.assert_metrics_using_metadata(get_metadata_metrics())
|
front-end/qemu-2.3/scripts/tracetool/transform.py | zheli-1/crete-dev | 473 | 12614797 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Type-transformation rules.
"""
__author__ = "<NAME> <<EMAIL>>"
__copyright__ = "Copyright 2012-2014, <NAME> <<EMAIL>>"
__license__ = "GPL version 2 or (at your option) any later version"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
def _transform_type(type_, trans):
if isinstance(trans, str):
return trans
elif isinstance(trans, dict):
if type_ in trans:
return _transform_type(type_, trans[type_])
elif None in trans:
return _transform_type(type_, trans[None])
else:
return type_
elif callable(trans):
return trans(type_)
else:
raise ValueError("Invalid type transformation rule: %s" % trans)
def transform_type(type_, *trans):
"""Return a new type transformed according to the given rules.
Applies each of the transformation rules in trans in order.
If an element of trans is a string, return it.
If an element of trans is a function, call it with type_ as its only
argument.
If an element of trans is a dict, search type_ in its keys. If type_ is
a key, use the value as a transformation rule for type_. Otherwise, if
None is a key use the value as a transformation rule for type_.
Otherwise, return type_.
Parameters
----------
type_ : str
Type to transform.
trans : list of function or dict
Type transformation rules.
"""
if len(trans) == 0:
raise ValueError
res = type_
for t in trans:
res = _transform_type(res, t)
return res
##################################################
# tcg -> host
def _tcg_2_host(type_):
if type_ == "TCGv":
# force a fixed-size type (target-independent)
return "uint64_t"
else:
return type_
TCG_2_HOST = {
"TCGv_i32": "uint32_t",
"TCGv_i64": "uint64_t",
"TCGv_ptr": "void *",
None: _tcg_2_host,
}
##################################################
# host -> host compatible with tcg sizes
HOST_2_TCG_COMPAT = {
"uint8_t": "uint32_t",
}
##################################################
# host/tcg -> tcg
def _host_2_tcg(type_):
if type_.startswith("TCGv"):
return type_
raise ValueError("Don't know how to translate '%s' into a TCG type\n" % type_)
HOST_2_TCG = {
"uint32_t": "TCGv_i32",
"uint64_t": "TCGv_i64",
"void *" : "TCGv_ptr",
None: _host_2_tcg,
}
##################################################
# tcg -> tcg helper definition
def _tcg_2_helper_def(type_):
if type_ == "TCGv":
return "target_ulong"
else:
return type_
TCG_2_TCG_HELPER_DEF = {
"TCGv_i32": "uint32_t",
"TCGv_i64": "uint64_t",
"TCGv_ptr": "void *",
None: _tcg_2_helper_def,
}
##################################################
# tcg -> tcg helper declaration
def _tcg_2_tcg_helper_decl_error(type_):
raise ValueError("Don't know how to translate type '%s' into a TCG helper declaration type\n" % type_)
TCG_2_TCG_HELPER_DECL = {
"TCGv" : "tl",
"TCGv_ptr": "ptr",
"TCGv_i32": "i32",
"TCGv_i64": "i64",
None: _tcg_2_tcg_helper_decl_error,
}
##################################################
# host/tcg -> tcg temporal constant allocation
def _host_2_tcg_tmp_new(type_):
if type_.startswith("TCGv"):
return "tcg_temp_new_nop"
raise ValueError("Don't know how to translate type '%s' into a TCG temporal allocation" % type_)
HOST_2_TCG_TMP_NEW = {
"uint32_t": "tcg_const_i32",
"uint64_t": "tcg_const_i64",
"void *" : "tcg_const_ptr",
None: _host_2_tcg_tmp_new,
}
##################################################
# host/tcg -> tcg temporal constant deallocation
def _host_2_tcg_tmp_free(type_):
if type_.startswith("TCGv"):
return "tcg_temp_free_nop"
raise ValueError("Don't know how to translate type '%s' into a TCG temporal deallocation" % type_)
HOST_2_TCG_TMP_FREE = {
"uint32_t": "tcg_temp_free_i32",
"uint64_t": "tcg_temp_free_i64",
"void *" : "tcg_temp_free_ptr",
None: _host_2_tcg_tmp_free,
}
|
tests/conftest.py | den4uk/pipenv | 18,636 | 12614823 | import pytest
@pytest.fixture()
def project():
from pipenv.project import Project
return Project()
|
1000-1100q/1041.py | rampup01/Leetcode | 990 | 12614857 | <reponame>rampup01/Leetcode<gh_stars>100-1000
'''
On an infinite plane, a robot initially stands at (0, 0) and faces north. The robot can receive one of three instructions:
"G": go straight 1 unit;
"L": turn 90 degrees to the left;
"R": turn 90 degress to the right.
The robot performs the instructions given in order, and repeats them forever.
Return true if and only if there exists a circle in the plane such that the robot never leaves the circle.
Example 1:
Input: "GGLLGG"
Output: true
Explanation:
The robot moves from (0,0) to (0,2), turns 180 degrees, and then returns to (0,0).
When repeating these instructions, the robot remains in the circle of radius 2 centered at the origin.
Example 2:
Input: "GG"
Output: false
Explanation:
The robot moves north indefinetely.
Example 3:
Input: "GL"
Output: true
Explanation:
The robot moves from (0, 0) -> (0, 1) -> (-1, 1) -> (-1, 0) -> (0, 0) -> ...
Note:
1 <= instructions.length <= 100
instructions[i] is in {'G', 'L', 'R'}
'''
class Solution(object):
def isRobotBounded(self, instructions):
"""
:type instructions: str
:rtype: bool
"""
start_x, start_y = 0, 0
left, direct = 0, 0
moves = [[0, 1], [-1, 0], [0, -1], [1, 0]]
instructions = instructions*4
for instruction in instructions:
if instruction == 'G':
start_x += moves[direct][0]
start_y += moves[direct][1]
elif instruction == 'L':
direct = (direct+1)%4
elif instruction == 'R':
direct = (direct+3)%4
if(start_x == 0 and start_y == 0):
return True
return False
|
torch/utils/data/datapipes/map/__init__.py | vuanvin/pytorch | 183 | 12614858 | # Functional DataPipe
from torch.utils.data.datapipes.map.callable import MapperMapDataPipe as Mapper
from torch.utils.data.datapipes.map.combinatorics import ShufflerMapDataPipe as Shuffler
from torch.utils.data.datapipes.map.combining import (
ConcaterMapDataPipe as Concater,
ZipperMapDataPipe as Zipper
)
from torch.utils.data.datapipes.map.grouping import (
BatcherMapDataPipe as Batcher
)
from torch.utils.data.datapipes.map.utils import SequenceWrapperMapDataPipe as SequenceWrapper
__all__ = ['Batcher', 'Concater', 'Mapper', 'SequenceWrapper', 'Shuffler', 'Zipper']
# Please keep this list sorted
assert __all__ == sorted(__all__)
|
view_model.py | rehohoho/coiltraine | 204 | 12614884 | import argparse
import sys
import os
import glob
import torch
# First thing we should try is to import two CARLAS depending on the version
from drive import CoILAgent
from configs import g_conf, merge_with_yaml, set_type_of_process
# Control for CARLA 9
if __name__ == '__main__':
argparser = argparse.ArgumentParser(description=__doc__)
argparser.add_argument(
'-cv',
'--carla-version',
dest='carla_version',
default='0.9',
type=str
)
argparser.add_argument(
'-f',
'--folder',
type=str
)
argparser.add_argument(
'-e',
'--exp',
type=str
)
argparser.add_argument(
'--host',
metavar='H',
default='127.0.0.1',
help='IP of the host server (default: 127.0.0.1)')
argparser.add_argument(
'-p', '--port',
metavar='P',
default=2000,
type=int,
help='TCP port to listen to (default: 2000)'
)
argparser.add_argument(
'-cp', '--checkpoint',
metavar='P',
default=100000,
type=int,
help='The checkpoint used for the model visualization'
)
argparser.add_argument(
'--res',
metavar='WIDTHxHEIGHT',
default='1280x720',
help='window resolution (default: 1280x720)'
)
argparser.add_argument(
'-o', '--output_folder',
metavar='P',
default=None,
type=str,
help='The folder to store images received by the network and its activations'
)
args = argparser.parse_args()
args.width, args.height = [int(x) for x in args.res.split('x')]
merge_with_yaml(os.path.join('configs', args.folder, args.exp + '.yaml'))
checkpoint = torch.load(os.path.join('_logs', args.folder, args.exp
, 'checkpoints', str(args.checkpoint) + '.pth'))
agent = CoILAgent(checkpoint, '_', args.carla_version)
# Decide the version
if args.carla_version == '0.9':
try:
sys.path.append(glob.glob('**/carla-*%d.%d-%s.egg' % (
sys.version_info.major,
sys.version_info.minor,
'win-amd64' if os.name == 'nt' else 'linux-x86_64'))[0])
except IndexError:
pass
import model_view.carla09interface as carla09interface
carla09interface.game_loop(args, agent)
else:
import model_view.carla08interface as carla08interface
carla08interface.game_loop(args, agent)
|
Tests/t_edit.py | reqa/python-ldap | 299 | 12614920 | <reponame>reqa/python-ldap
import os
import unittest
# Switch off processing .ldaprc or ldap.conf before importing _ldap
os.environ['LDAPNOINIT'] = '1'
import ldap
from ldap.ldapobject import LDAPObject
from slapdtest import SlapdTestCase
class EditionTests(SlapdTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
base = cls.server.suffix
suffix_dc = base.split(',')[0][3:]
# insert some Foo* objects via ldapadd
cls.server.ldapadd("\n".join([
'dn: '+cls.server.suffix,
'objectClass: dcObject',
'objectClass: organization',
'dc: '+suffix_dc,
'o: '+suffix_dc,
'',
'dn: '+cls.server.root_dn,
'objectClass: applicationProcess',
'cn: '+cls.server.root_cn,
'',
"dn: cn=Foo1,"+base,
"objectClass: organizationalRole",
"cn: Foo1",
"",
"dn: cn=Foo2,"+base,
"objectClass: organizationalRole",
"cn: Foo2",
"",
"dn: cn=Foo3,"+base,
"objectClass: organizationalRole",
"cn: Foo3",
"",
"dn: ou=Container,"+base,
"objectClass: organizationalUnit",
"ou: Container",
"",
"dn: cn=Foo4,ou=Container,"+base,
"objectClass: organizationalRole",
"cn: Foo4",
"",
])+"\n")
def setUp(self):
self.ldap = LDAPObject(self.server.ldap_uri, bytes_mode=False)
self.ldap.protocol_version = 3
self.ldap.set_option(ldap.OPT_REFERRALS, 0)
self.ldap.simple_bind_s(
self.server.root_dn,
self.server.root_pw
)
def tearDown(self):
self.ldap.unbind()
def test_add_object(self):
base = self.server.suffix
dn = "cn=Added,ou=Container," + base
self.ldap.add_ext_s(dn, [
("objectClass", [b'organizationalRole']),
("cn", [b'Added']),
])
# Lookup the object
result = self.ldap.search_s(base, ldap.SCOPE_SUBTREE, '(cn=Added)', ['*'])
self.assertEqual(result, [
("cn=Added,ou=Container," + base,
{'cn': [b'Added'], 'objectClass': [b'organizationalRole']}),
])
# Delete object
self.ldap.delete_s(dn)
result = self.ldap.search_s(
base, ldap.SCOPE_SUBTREE, '(cn=Added)', ['*']
)
self.assertEqual(result, [])
if __name__ == '__main__':
unittest.main()
|
capstone/capdb/migrations/0099_auto_20200327_1936.py | rachelaus/capstone | 134 | 12614931 | <gh_stars>100-1000
# Generated by Django 2.2.11 on 2020-03-27 19:36
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('capdb', '0098_auto_20200325_1547'),
]
operations = [
migrations.AlterField(
model_name='extractedcitation',
name='page_number_original',
field=models.IntegerField(blank=True, null=True),
),
]
|
tests/test_image_prompts.py | ProjectsStartUp/ru-dalle | 1,134 | 12614944 | # -*- coding: utf-8 -*-
import pytest
from rudalle.image_prompts import ImagePrompts
@pytest.mark.parametrize('borders, crop_first', [
({'up': 4, 'right': 0, 'left': 0, 'down': 0}, False),
({'up': 4, 'right': 0, 'left': 0, 'down': 0}, True),
({'up': 4, 'right': 3, 'left': 3, 'down': 3}, False)
])
def test_image_prompts(sample_image, vae, borders, crop_first):
img = sample_image.copy()
img = img.resize((256, 256))
image_prompt = ImagePrompts(img, borders, vae, crop_first=crop_first)
assert image_prompt.image_prompts.shape[1] == 32 * 32
assert len(image_prompt.image_prompts_idx) == (borders['up'] + borders['down']) * 32 \
+ (borders['left'] + borders['right']) * (32 - borders['up'] - borders['down'])
|
tensorflow/python/data/experimental/kernel_tests/checkpoint_input_pipeline_hook_test.py | EricRemmerswaal/tensorflow | 190,993 | 12614949 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for experimental iterator_ops."""
from absl.testing import parameterized
from tensorflow.python.data.experimental.ops import iterator_ops
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import combinations
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import checkpoint_management
from tensorflow.python.training import saver as saver_lib
from tensorflow.python.training import training_util
from tensorflow_estimator.python.estimator import estimator
from tensorflow_estimator.python.estimator import model_fn
# TODO(b/123904664)
class CheckpointInputPipelineHookTest(test.TestCase, parameterized.TestCase):
@staticmethod
def _model_fn(features, labels, mode, config):
del labels
del mode
del config
global_step = training_util.get_or_create_global_step()
update_global_step_op = global_step.assign_add(1)
latest_feature = variables.VariableV1(
0, name='latest_feature', dtype=dtypes.int64)
store_latest_feature_op = latest_feature.assign(features)
ops.add_to_collection('my_vars', global_step)
ops.add_to_collection('my_vars', latest_feature)
return model_fn.EstimatorSpec(
mode='train',
train_op=control_flow_ops.group(
[update_global_step_op, store_latest_feature_op]),
loss=constant_op.constant(2.0))
def _read_vars(self, model_dir):
"""Returns (global_step, latest_feature)."""
with ops.Graph().as_default() as g:
ckpt_path = checkpoint_management.latest_checkpoint(model_dir)
meta_filename = ckpt_path + '.meta'
saver_lib.import_meta_graph(meta_filename)
saver = saver_lib.Saver()
with self.session(graph=g) as sess:
saver.restore(sess, ckpt_path)
return sess.run(ops.get_collection('my_vars'))
def _build_iterator_saver_hook(self, est):
return iterator_ops.CheckpointInputPipelineHook(est)
@combinations.generate(test_base.v1_only_combinations())
def testReturnDatasetFromInputFn(self):
def _input_fn():
return dataset_ops.Dataset.range(10)
est = estimator.Estimator(model_fn=self._model_fn)
est.train(_input_fn, steps=2, hooks=[self._build_iterator_saver_hook(est)])
self.assertSequenceEqual(self._read_vars(est.model_dir), (2, 1))
est.train(_input_fn, steps=2, hooks=[self._build_iterator_saver_hook(est)])
self.assertSequenceEqual(self._read_vars(est.model_dir), (4, 3))
@combinations.generate(test_base.v1_only_combinations())
def testBuildIteratorInInputFn(self):
def _input_fn():
ds = dataset_ops.Dataset.range(10)
iterator = ds.make_one_shot_iterator()
return iterator.get_next()
est = estimator.Estimator(model_fn=self._model_fn)
est.train(_input_fn, steps=2, hooks=[self._build_iterator_saver_hook(est)])
self.assertSequenceEqual(self._read_vars(est.model_dir), (2, 1))
est.train(_input_fn, steps=2, hooks=[self._build_iterator_saver_hook(est)])
self.assertSequenceEqual(self._read_vars(est.model_dir), (4, 3))
@combinations.generate(test_base.v1_only_combinations())
def testDoNotRestore(self):
def _input_fn():
return dataset_ops.Dataset.range(10)
est = estimator.Estimator(model_fn=self._model_fn)
est.train(_input_fn, steps=2, hooks=[self._build_iterator_saver_hook(est)])
self.assertSequenceEqual(self._read_vars(est.model_dir), (2, 1))
est.train(_input_fn, steps=2, hooks=[self._build_iterator_saver_hook(est)])
self.assertSequenceEqual(self._read_vars(est.model_dir), (4, 3))
# Hook not provided, input pipeline was not restored.
est.train(_input_fn, steps=2)
self.assertSequenceEqual(self._read_vars(est.model_dir), (6, 1))
@combinations.generate(test_base.v1_only_combinations())
def testRaiseErrorIfNoIterator(self):
def _input_fn():
return constant_op.constant(1, dtype=dtypes.int64)
est = estimator.Estimator(model_fn=self._model_fn)
with self.assertRaises(ValueError):
est.train(
_input_fn, steps=2, hooks=[self._build_iterator_saver_hook(est)])
if __name__ == '__main__':
test.main()
|
GeneratorInterface/Herwig7Interface/test/Herwig7_Standalone_DYLO_cfg.py | ckamtsikis/cmssw | 852 | 12614966 | # Auto generated configuration file
# using:
# Revision: 1.19
# Source: /local/reps/CMSSW/CMSSW/Configuration/Applications/python/ConfigBuilder.py,v
# with command line options: GeneratorInterface/Herwig7Interface/python/Herwig7_Standalone_DYLO_cff.py --eventcontent RAWSIM --datatier GEN --conditions auto:run2_mc --step GEN --python_filename Herwig7_Standalone_DYLO_cfg.py --no_exec -n 100
import FWCore.ParameterSet.Config as cms
process = cms.Process('GEN')
# import of standard configurations
process.load('Configuration.StandardSequences.Services_cff')
process.load('SimGeneral.HepPDTESSource.pythiapdt_cfi')
process.load('FWCore.MessageService.MessageLogger_cfi')
process.load('Configuration.EventContent.EventContent_cff')
process.load('SimGeneral.MixingModule.mixNoPU_cfi')
process.load('Configuration.StandardSequences.GeometryRecoDB_cff')
process.load('Configuration.StandardSequences.MagneticField_cff')
process.load('Configuration.StandardSequences.Generator_cff')
process.load('IOMC.EventVertexGenerators.VtxSmearedRealistic50ns13TeVCollision_cfi')
process.load('GeneratorInterface.Core.genFilterSummary_cff')
process.load('Configuration.StandardSequences.EndOfProcess_cff')
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(100),
output = cms.optional.untracked.allowed(cms.int32,cms.PSet)
)
# Input source
process.source = cms.Source("EmptySource")
process.options = cms.untracked.PSet(
FailPath = cms.untracked.vstring(),
IgnoreCompletely = cms.untracked.vstring(),
Rethrow = cms.untracked.vstring(),
SkipEvent = cms.untracked.vstring(),
allowUnscheduled = cms.obsolete.untracked.bool,
canDeleteEarly = cms.untracked.vstring(),
emptyRunLumiMode = cms.obsolete.untracked.string,
eventSetup = cms.untracked.PSet(
forceNumberOfConcurrentIOVs = cms.untracked.PSet(
allowAnyLabel_=cms.required.untracked.uint32
),
numberOfConcurrentIOVs = cms.untracked.uint32(1)
),
fileMode = cms.untracked.string('FULLMERGE'),
forceEventSetupCacheClearOnNewRun = cms.untracked.bool(False),
makeTriggerResults = cms.obsolete.untracked.bool,
numberOfConcurrentLuminosityBlocks = cms.untracked.uint32(1),
numberOfConcurrentRuns = cms.untracked.uint32(1),
numberOfStreams = cms.untracked.uint32(0),
numberOfThreads = cms.untracked.uint32(1),
printDependencies = cms.untracked.bool(False),
sizeOfStackForThreadsInKB = cms.optional.untracked.uint32,
throwIfIllegalParameter = cms.untracked.bool(True),
wantSummary = cms.untracked.bool(False)
)
# Production Info
process.configurationMetadata = cms.untracked.PSet(
annotation = cms.untracked.string('GeneratorInterface/Herwig7Interface/python/Herwig7_Standalone_DYLO_cff.py nevts:100'),
name = cms.untracked.string('Applications'),
version = cms.untracked.string('$Revision: 1.19 $')
)
# Output definition
process.RAWSIMoutput = cms.OutputModule("PoolOutputModule",
SelectEvents = cms.untracked.PSet(
SelectEvents = cms.vstring('generation_step')
),
compressionAlgorithm = cms.untracked.string('LZMA'),
compressionLevel = cms.untracked.int32(1),
dataset = cms.untracked.PSet(
dataTier = cms.untracked.string('GEN'),
filterName = cms.untracked.string('')
),
eventAutoFlushCompressedSize = cms.untracked.int32(20971520),
fileName = cms.untracked.string('Herwig7_Standalone_DYLO_cff_py_GEN.root'),
outputCommands = process.RAWSIMEventContent.outputCommands,
splitLevel = cms.untracked.int32(0)
)
# Additional output definition
# Other statements
process.genstepfilter.triggerConditions=cms.vstring("generation_step")
from Configuration.AlCa.GlobalTag import GlobalTag
process.GlobalTag = GlobalTag(process.GlobalTag, 'auto:run2_mc', '')
process.generator = cms.EDFilter("Herwig7GeneratorFilter",
configFiles = cms.vstring(),
crossSection = cms.untracked.double(-1),
dataLocation = cms.string('${HERWIGPATH:-6}'),
eventHandlers = cms.string('/Herwig/EventHandlers'),
filterEfficiency = cms.untracked.double(1.0),
generatorModule = cms.string('/Herwig/Generators/EventGenerator'),
herwig7CH3AlphaS = cms.vstring(
'cd /Herwig/Shower',
'set AlphaQCD:AlphaIn 0.118',
'cd /'
),
herwig7CH3MPISettings = cms.vstring(
'set /Herwig/Hadronization/ColourReconnector:ReconnectionProbability 0.4712',
'set /Herwig/UnderlyingEvent/MPIHandler:pTmin0 3.04',
'set /Herwig/UnderlyingEvent/MPIHandler:InvRadius 1.284',
'set /Herwig/UnderlyingEvent/MPIHandler:Power 0.1362'
),
herwig7CH3PDF = cms.vstring(
'cd /Herwig/Partons',
'create ThePEG::LHAPDF PDFSet_nnlo ThePEGLHAPDF.so',
'set PDFSet_nnlo:PDFName NNPDF31_nnlo_as_0118.LHgrid',
'set PDFSet_nnlo:RemnantHandler HadronRemnants',
'set /Herwig/Particles/p+:PDF PDFSet_nnlo',
'set /Herwig/Particles/pbar-:PDF PDFSet_nnlo',
'set /Herwig/Partons/PPExtractor:FirstPDF PDFSet_nnlo',
'set /Herwig/Partons/PPExtractor:SecondPDF PDFSet_nnlo',
'set /Herwig/Shower/ShowerHandler:PDFA PDFSet_nnlo',
'set /Herwig/Shower/ShowerHandler:PDFB PDFSet_nnlo',
'create ThePEG::LHAPDF PDFSet_lo ThePEGLHAPDF.so',
'set PDFSet_lo:PDFName NNPDF31_lo_as_0130.LHgrid',
'set PDFSet_lo:RemnantHandler HadronRemnants',
'set /Herwig/Shower/ShowerHandler:PDFARemnant PDFSet_lo',
'set /Herwig/Shower/ShowerHandler:PDFBRemnant PDFSet_lo',
'set /Herwig/Partons/MPIExtractor:FirstPDF PDFSet_lo',
'set /Herwig/Partons/MPIExtractor:SecondPDF PDFSet_lo',
'cd /'
),
herwig7StableParticlesForDetector = cms.vstring(
'set /Herwig/Decays/DecayHandler:MaxLifeTime 10*mm',
'set /Herwig/Decays/DecayHandler:LifeTimeOption Average'
),
parameterSets = cms.vstring(
'herwig7CH3PDF',
'herwig7CH3AlphaS',
'herwig7StableParticlesForDetector',
'pptoll'
),
pptoll = cms.vstring(
'read snippets/PPCollider.in',
'cd /Herwig/Generators',
'set EventGenerator:EventHandler:LuminosityFunction:Energy 13000.0',
'cd /Herwig/MatrixElements/',
'insert SubProcess:MatrixElements[0] MEqq2gZ2ff'
),
repository = cms.string('${HERWIGPATH}/HerwigDefaults.rpo'),
run = cms.string('InterfaceMatchboxTest')
)
process.ProductionFilterSequence = cms.Sequence(process.generator)
# Path and EndPath definitions
process.generation_step = cms.Path(process.pgen)
process.genfiltersummary_step = cms.EndPath(process.genFilterSummary)
process.endjob_step = cms.EndPath(process.endOfProcess)
process.RAWSIMoutput_step = cms.EndPath(process.RAWSIMoutput)
# Schedule definition
process.schedule = cms.Schedule(process.generation_step,process.genfiltersummary_step,process.endjob_step,process.RAWSIMoutput_step)
from PhysicsTools.PatAlgos.tools.helpers import associatePatAlgosToolsTask
associatePatAlgosToolsTask(process)
# filter all path with the production filter sequence
for path in process.paths:
getattr(process,path).insert(0, process.ProductionFilterSequence)
# Customisation from command line
# Add early deletion of temporary data products to reduce peak memory need
from Configuration.StandardSequences.earlyDeleteSettings_cff import customiseEarlyDelete
process = customiseEarlyDelete(process)
# End adding early deletion
|
examples/20_basic/example_multioutput_regression.py | psaks/auto-sklearn | 6,390 | 12614971 | <reponame>psaks/auto-sklearn
# -*- encoding: utf-8 -*-
"""
=======================
Multi-output Regression
=======================
The following example shows how to fit a multioutput regression model with
*auto-sklearn*.
"""
import numpy as numpy
from sklearn.datasets import make_regression
from sklearn.metrics import r2_score
from sklearn.model_selection import train_test_split
from autosklearn.regression import AutoSklearnRegressor
############################################################################
# Data Loading
# ============
X, y = make_regression(n_samples=1000, n_features=10, n_informative=5, n_targets=3)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=1)
############################################################################
# Build and fit a regressor
# =========================
automl = AutoSklearnRegressor(
time_left_for_this_task=120,
per_run_time_limit=30,
tmp_folder='/tmp/autosklearn_multioutput_regression_example_tmp',
)
automl.fit(X_train, y_train, dataset_name='synthetic')
############################################################################
# View the models found by auto-sklearn
# =====================================
print(automl.leaderboard())
############################################################################
# Print the final ensemble constructed by auto-sklearn
# ====================================================
print(automl.show_models())
###########################################################################
# Get the Score of the final ensemble
# ===================================
predictions = automl.predict(X_test)
print("R2 score:", r2_score(y_test, predictions))
###########################################################################
# Get the configuration space
# ===========================
# The configuration space is reduced, i.e. no SVM.
print(automl.get_configuration_space(X_train, y_train))
|
flexget/plugins/output/mock_output.py | Jeremiad/Flexget | 1,322 | 12614997 | <filename>flexget/plugins/output/mock_output.py
from loguru import logger
from flexget import plugin
from flexget.event import event
logger = logger.bind(name='mock_output')
class MockOutput:
"""
Debugging plugin which records a copy of all accepted entries into a list stored in `mock_output` attribute
of the task.
"""
schema = {'type': 'boolean'}
def on_task_start(self, task, config):
task.mock_output = []
def on_task_output(self, task, config):
task.mock_output.extend(e.copy() for e in task.all_entries if e.accepted)
def on_task_exit(self, task, config):
logger.verbose(
'The following titles were output during this task run: {}',
', '.join(e['title'] for e in task.mock_output),
)
@event('plugin.register')
def register_plugin():
plugin.register(MockOutput, 'mock_output', debug=True, api_ver=2)
|
what_is_the_property/demo2.py | NightmareQAQ/python-notes | 106 | 12615004 | <reponame>NightmareQAQ/python-notes<filename>what_is_the_property/demo2.py<gh_stars>100-1000
class A:
def __init__(self, content):
self._content = content
@property
def content(self):
if not hasattr(self, '_content'):
return "content not exists"
return self._content
@content.setter
def content(self, value):
self._content = value
@content.deleter
def content(self):
del self._content
if __name__ == "__main__":
a = A('hello')
print('content:', a.content) # automatically calls getter
a.content = 'world' # automatically calls setter
print('content:', a.content)
del a.content # automatically calls deleter
print('content:', a.content) # content not exists
|
bench/compress_normal.py | jmswaney/zarr | 131 | 12615014 | <gh_stars>100-1000
import numpy as np
import sys
sys.path.insert(0, '..')
import zarr
import line_profiler
import timeit
from zarr import blosc
# setup
a = np.random.normal(2000, 1000, size=200000000).astype('u2')
z = zarr.empty_like(a, chunks=1000000, compression='blosc', compression_opts=dict(cname='lz4', clevel=5, shuffle=2))
print(z)
print('*' * 79)
# time
t = timeit.repeat('z[:] = a', repeat=10, number=1, globals=globals())
print(t)
print(min(t))
print(z)
# profile
profile = line_profiler.LineProfiler(blosc.compress)
profile.run('z[:] = a')
profile.print_stats()
print('*' * 79)
# time
t = timeit.repeat('z[:]', repeat=10, number=1, globals=globals())
print(t)
print(min(t))
# profile
profile = line_profiler.LineProfiler(blosc.decompress)
profile.run('z[:]')
profile.print_stats()
|
modules/pymol/experimenting.py | dualword/pymol-open-source | 636 | 12615018 | <reponame>dualword/pymol-open-source
#A* -------------------------------------------------------------------
#B* This file contains source code for the PyMOL computer program
#C* Copyright (c) Schrodinger, LLC.
#D* -------------------------------------------------------------------
#E* It is unlawful to modify or remove this copyright notice.
#F* -------------------------------------------------------------------
#G* Please see the accompanying LICENSE file for further information.
#H* -------------------------------------------------------------------
#I* Additional authors of this source file include:
#-*
#-*
#-*
#Z* -------------------------------------------------------------------
if True:
from . import selector
from .cmd import _cmd,lock,unlock,Shortcut,QuietException, \
DEFAULT_ERROR, DEFAULT_SUCCESS, _raising, is_ok, is_error
cmd = __import__("sys").modules["pymol.cmd"]
import threading
import pymol
import string
def get_bond_print(obj,max_bond,max_type,_self=cmd):
r = DEFAULT_ERROR
try:
_self.lock(_self)
r = _cmd.get_bond_print(_self._COb,str(obj),int(max_bond),int(max_type))
finally:
_self.unlock(r,_self)
if _self._raising(r,_self): raise pymol.CmdException
return r
def spheroid(object="",average=0,_self=cmd): # EXPERIMENTAL
'''
DESCRIPTION
"spheroid" averages trajectory frames together to create
an ellipsoid-like approximation of the actual anisotropic
motion exhibited by the atom over a series of trajectory frames.
USAGE
spheroid object,average
average = number of states to average for each resulting spheroid state
'''
print("Warning: 'spheroid' is experimental, incomplete, and unstable.")
with _self.lockcm:
r = _cmd.spheroid(_self._COb,str(object),int(average))
return r
def mem(_self=cmd):
'''
DESCRIPTION
"mem" Dumps current memory state to standard output. This is a
debugging feature, not an official part of the API.
'''
r = DEFAULT_ERROR
try:
_self.lock(_self)
r = _cmd.mem(_self._COb)
finally:
_self.unlock(r,_self)
if _self._raising(r,_self): raise pymol.CmdException
return r
def check(selection=None, preserve=0):
'''
DESCRIPTION
"check" is unsupported command that may eventually have something
to do with assigning forcefield parameters to a selection of
atoms.
'''
# This function relies on code that is not currently part of PyMOL/ChemPy
# NOTE: the realtime module relies on code that is not yet part of PyMOL/ChemPy
from chempy.tinker import realtime
if selection is None:
arg = cmd.get_names("objects")
arg = arg[0:1]
if arg:
if len(arg):
selection = arg
if selection is not None:
selection = selector.process(selection)
realtime.assign("("+selection+")",int(preserve))
realtime.setup("("+selection+")")
def fast_minimize(*args, **kwargs):
'''
DESCRIPTION
"fast_minimize" is an unsupported nonfunctional command that may
eventually have something to do with doing a quick clean up of the
molecular structure.
'''
kwargs['_setup'] = 0
return minimize(*args, **kwargs)
def minimize(sele='', iter=500, grad=0.01, interval=50, _setup=1, _self=cmd):
'''
DESCRIPTION
"fast_minimize" is an unsupported nonfunctional command that may
eventually have something to do with minimization.
'''
from chempy.tinker import realtime
if not sele:
names = _self.get_names("objects")
if not names:
return
sele = names[0]
sele = '(' + sele + ')'
if not int(_setup) or realtime.setup(sele):
_self.async_(realtime.mini, int(iter), float(grad), int(interval), sele)
else:
print(" minimize: missing parameters, can't continue")
def dump(fnam, obj, state=1, quiet=1, _self=cmd):
'''
DESCRIPTION
The dump command writes the geometry of an isosurface, isomesh,
isodot, or map object to a simple text file. Each line contains one
vertex in case of representations, or one grid point in case of a map.
For surface objects, XYZ coordinates and the normal are exported.
Three lines make one triangle (like GL_TRIANGLES).
For mesh objects, XYZ coordinates are exported (no normals).
The vertices form line strips (like GL_LINE_STRIP), a blank
line starts a new strip.
For dot objects, XYZ coordinates are exported.
For map objects, XYZ coordinates and the value at the point are
exported. This forms a grid map.
USAGE
dump filename, object, state=1, quiet=1
ARGUMENTS
filename = str: file that will be written
object = str: object name
EXAMPLE
fetch 1ubq, mymap, type=2fofc, async=0
dump gridmap.txt, mymap
isosurface mysurface, mymap
dump surfacegeometry.txt, mysurface
isomesh mymesh, mymap
dump meshgeometry.txt, mymesh
isodot mydot, mymap, quiet=1
dump dotgeometry.txt, mydot
SEE ALSO
COLLADA export
'''
r = DEFAULT_ERROR
try:
_self.lock(_self)
r = _cmd.dump(_self._COb, str(fnam), obj, int(state) - 1, int(quiet))
finally:
_self.unlock(r,_self)
if _self._raising(r,_self): raise pymol.CmdException
return r
def dummy(*arg):
return None
def test(group=0,index=0,_self=cmd): # generic test routine for development
'''
DESCRIPTION
"dump" is an unsupported internal command.
'''
r = DEFAULT_ERROR
try:
_self.lock(_self)
r=_cmd.test(_self._COb,int(group),int(index))
finally:
_self.unlock(r,_self)
if _self._raising(r,_self): raise pymol.CmdException
return r
def load_coords(model, oname, state=1): # UNSUPPORTED
'''
WARNING: buggy argument list, state get's decremented twice!
'''
return pymol.importing.load_coordset(model, oname, int(state)-1)
def focal_blur(aperture=2.0, samples=10, ray=0, filename='', quiet=1, _self=cmd):
'''
DESCRIPTION
Creates fancy figures by introducing a focal blur to the image.
The object at the origin will be in focus.
USAGE
focal_blur [ aperture [, samples [, ray [, filename ]]]]
ARGUMENTS
aperture = float: aperture angle in degrees {default: 2.0}
samples = int: number of images for averaging {default: 10}
ray = 0/1: {default: 0}
filename = str: write image to file {default: temporary}
AUTHORS
<NAME>, <NAME> and <NAME>
EXAMPLES
focal_blur 3.0, 50
'''
raise pymol.IncentiveOnlyException()
def callout(name, label, pos='', screen='auto', state=-1, color='front',
quiet=1, _self=cmd):
'''
DESCRIPTION
Create a new screen-stabilized callout object.
ARGUMENTS
name = str: object name
label = str: label text
pos = str or list: anchor in model space as 3-float coord list or atom
selection. If empty, don't draw an arrow. {default: }
screen = str or list: position on screen as 2-float list between [-1,-1]
(lower left) and [1,1] (upper right) or "auto" for smart placement.
{default: auto}
'''
raise pymol.IncentiveOnlyException()
def desaturate(selection="all", a=0.5, quiet=1, _self=cmd):
'''
DESCRIPTION
Desaturate the colors in the given selection.
ARGUMENTS
selection = str: atom selection {default: all}
a = float [0..1]: desaturation factor {default: 0.5}
'''
raise pymol.IncentiveOnlyException()
|
edward/criticisms/ppc.py | zhangyewu/edward | 5,200 | 12615026 | <filename>edward/criticisms/ppc.py
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import six
import tensorflow as tf
from edward.models import RandomVariable
from edward.util import check_data, check_latent_vars, get_session
def ppc(T, data, latent_vars=None, n_samples=100):
"""Posterior predictive check
[@rubin1984bayesianly; @meng1994posterior; @gelman1996posterior].
PPC's form an empirical distribution for the predictive discrepancy,
$p(T\mid x) = \int p(T(x^{\\text{rep}})\mid z) p(z\mid x) dz$
by drawing replicated data sets $x^{\\text{rep}}$ and
calculating $T(x^{\\text{rep}})$ for each data set. Then it
compares it to $T(x)$.
If `data` is inputted with the prior predictive distribution, then
it is a prior predictive check [@box1980sampling].
Args:
T: function.
Discrepancy function, which takes a dictionary of data and
dictionary of latent variables as input and outputs a `tf.Tensor`.
data: dict.
Data to compare to. It binds observed variables (of type
`RandomVariable` or `tf.Tensor`) to their realizations (of
type `tf.Tensor`). It can also bind placeholders (of type
`tf.Tensor`) used in the model to their realizations.
latent_vars: dict.
Collection of random variables (of type `RandomVariable` or
`tf.Tensor`) binded to their inferred posterior. This argument
is used when the discrepancy is a function of latent variables.
n_samples: int.
Number of replicated data sets.
Returns:
list of np.ndarray.
List containing the reference distribution, which is a NumPy array
with `n_samples` elements,
$(T(x^{{\\text{rep}},1}, z^{1}), ...,
T(x^{\\text{rep,nsamples}}, z^{\\text{nsamples}}))$
and the realized discrepancy, which is a NumPy array with
`n_samples` elements,
$(T(x, z^{1}), ..., T(x, z^{\\text{nsamples}})).$
#### Examples
```python
# build posterior predictive after inference:
# it is parameterized by a posterior sample
x_post = ed.copy(x, {z: qz, beta: qbeta})
# posterior predictive check
# T is a user-defined function of data, T(data)
T = lambda xs, zs: tf.reduce_mean(xs[x_post])
ed.ppc(T, data={x_post: x_train})
# in general T is a discrepancy function of the data (both response and
# covariates) and latent variables, T(data, latent_vars)
T = lambda xs, zs: tf.reduce_mean(zs[z])
ed.ppc(T, data={y_post: y_train, x_ph: x_train},
latent_vars={z: qz, beta: qbeta})
# prior predictive check
# run ppc on original x
ed.ppc(T, data={x: x_train})
```
"""
sess = get_session()
if not callable(T):
raise TypeError("T must be a callable function.")
check_data(data)
if latent_vars is None:
latent_vars = {}
check_latent_vars(latent_vars)
if not isinstance(n_samples, int):
raise TypeError("n_samples must have type int.")
# Build replicated latent variables.
zrep = {key: tf.convert_to_tensor(value)
for key, value in six.iteritems(latent_vars)}
# Build replicated data.
xrep = {x: (x.value() if isinstance(x, RandomVariable) else obs)
for x, obs in six.iteritems(data)}
# Create feed_dict for data placeholders that the model conditions
# on; it is necessary for all session runs.
feed_dict = {key: value for key, value in six.iteritems(data)
if isinstance(key, tf.Tensor) and "Placeholder" in key.op.type}
# Calculate discrepancy over many replicated data sets and latent
# variables.
Trep = T(xrep, zrep)
Tobs = T(data, zrep)
Treps = []
Ts = []
for _ in range(n_samples):
# Take a forward pass (session run) to get new samples for
# each calculation of the discrepancy.
# Alternatively, we could unroll the graph by registering this
# operation `n_samples` times, each for different parent nodes
# representing `xrep` and `zrep`. But it's expensive.
Treps += [sess.run(Trep, feed_dict)]
Ts += [sess.run(Tobs, feed_dict)]
return [np.stack(Treps), np.stack(Ts)]
|
neutron/tests/unit/extensions/test_security_groups_normalized_cidr.py | congnt95/neutron | 1,080 | 12615040 | <gh_stars>1000+
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib.api.definitions import security_groups_normalized_cidr
import webob.exc
from neutron.tests.unit.extensions import test_securitygroup
DB_PLUGIN_KLASS = (
'neutron.tests.unit.extensions.test_security_groups_normalized_cidr.'
'TestPlugin')
class SecurityGroupNormalizedCidrTestExtManager(
test_securitygroup.SecurityGroupTestExtensionManager):
def get_resources(self):
self.update_attributes_map(
security_groups_normalized_cidr.RESOURCE_ATTRIBUTE_MAP)
return super(
SecurityGroupNormalizedCidrTestExtManager, self).get_resources()
class TestPlugin(test_securitygroup.SecurityGroupTestPlugin):
supported_extension_aliases = ['security-group',
security_groups_normalized_cidr.ALIAS]
class TestSecurityGroupsNormalizedCidr(
test_securitygroup.SecurityGroupDBTestCase):
def setUp(self):
super(TestSecurityGroupsNormalizedCidr, self).setUp(
plugin=DB_PLUGIN_KLASS,
ext_mgr=SecurityGroupNormalizedCidrTestExtManager())
def test_create_security_group_rule_with_not_normalized_cidr(self):
name = 'webservers'
description = 'my webservers'
remote_prefixes = ['10.0.0.120/24', '10.0.0.200/24']
with self.security_group(name, description) as sg:
sg_id = sg['security_group']['id']
for remote_ip_prefix in remote_prefixes:
rule = self._build_security_group_rule(
sg_id,
'ingress', 'tcp',
remote_ip_prefix=remote_ip_prefix)
res = self._create_security_group_rule(self.fmt, rule)
self.assertEqual(webob.exc.HTTPCreated.code, res.status_int)
res_sg = self.deserialize(self.fmt, res)
self.assertEqual(
'10.0.0.0/24',
res_sg['security_group_rule']['normalized_cidr']
)
self.assertEqual(
remote_ip_prefix,
res_sg['security_group_rule']['remote_ip_prefix']
)
|
rosplan_rqt/src/rosplan_rqt/action_config.py | kvnptl/ROSPlan | 278 | 12615069 | <gh_stars>100-1000
from qt_gui.plugin import Plugin
from .action_config_widget import ActionConfigWidget # class to be created in my_module_widget.py!
class ActionConfig(Plugin):
def __init__(self, context):
super(ActionConfig, self).__init__(context)
# Give QObjects reasonable names
self.setObjectName('ActionConfig')
# Create QWidget
self._widget = ActionConfigWidget()
if context.serial_number() > 1:
self._widget.setWindowTitle(self._widget.windowTitle() + (' (%d)' % context.serial_number()))
# Add widget to the user interface
context.add_widget(self._widget)
# Process standalone plugin command-line arguments
from argparse import ArgumentParser
parser = ArgumentParser()
# Add argument(s) to the parser.
parser.add_argument("-q", "--quiet", action="store_true",
dest="quiet",
help="Put plugin in silent mode")
args, unknowns = parser.parse_known_args(context.argv())
if not args.quiet:
print 'arguments: ', args
print 'unknowns: ', unknowns
def shutdown_plugin(self):
# TODO unregister all publishers here
pass
def save_settings(self, plugin_settings, instance_settings):
# TODO save intrinsic configuration, usually using:
# instance_settings.set_value(k, v)
pass
def restore_settings(self, plugin_settings, instance_settings):
# TODO restore intrinsic configuration, usually using:
# v = instance_settings.value(k)
pass
#def trigger_configuration(self):
# Comment in to signal that the plugin has a way to configure
# This will enable a setting button (gear icon) in each dock widget title bar
# Usually used to open a modal configuration dialog
|
runtime/musl-lkl/lkl/scripts/kconfig/tests/choice_value_with_m_dep/__init__.py | dme26/intravisor | 1,144 | 12615079 | <filename>runtime/musl-lkl/lkl/scripts/kconfig/tests/choice_value_with_m_dep/__init__.py<gh_stars>1000+
"""
Hide tristate choice values with mod dependency in y choice.
If tristate choice values depend on symbols set to 'm', they should be
hidden when the choice containing them is changed from 'm' to 'y'
(i.e. exclusive choice).
Related Linux commit: fa64e5f6a35efd5e77d639125d973077ca506074
"""
def test(conf):
assert conf.oldaskconfig('config', 'y') == 0
assert conf.config_contains('expected_config')
assert conf.stdout_contains('expected_stdout')
|
tests/test_chi_ssa_32.py | MAYANK25402/city-scrapers | 255 | 12615088 | from os.path import dirname, join
from city_scrapers_core.constants import COMMISSION, TENTATIVE
from city_scrapers_core.utils import file_response
from freezegun import freeze_time
from city_scrapers.spiders.chi_ssa_32 import ChiSsa32Spider
test_response = file_response(
join(dirname(__file__), "files", "chi_ssa_32.json"),
url="https://auburngresham.wixsite.com/ssa32/calendar",
)
spider = ChiSsa32Spider()
freezer = freeze_time("2019-11-09")
freezer.start()
parsed_items = [item for item in spider.parse(test_response)]
freezer.stop()
def test_title():
assert parsed_items[0]["title"] == "SSA #69 Commissioners"
def test_description():
assert parsed_items[0]["description"] == (
" SSA #69 Commissioners Meeting:\n"
" Corresponding Dates: \n"
"February 12, 2019; July 23, 2019; and November 12, 2019\n"
" Location: 7901 S. Racine, Chicago, IL 60620\n"
" Time: 8:30 am to 10:00 am"
)
def test_start():
assert str(parsed_items[0]["start"]) == "2019-11-12 09:30:00"
def test_end():
assert str(parsed_items[0]["end"]) == "2019-11-12 11:00:00"
def test_time_notes():
assert parsed_items[0]["time_notes"] == ""
def test_id():
assert parsed_items[0]["id"] == "chi_ssa_32/201911120930/x/ssa_69_commissioners"
def test_status():
assert parsed_items[0]["status"] == TENTATIVE
def test_location():
assert parsed_items[0]["location"] == {
"name": "",
"address": "7901 S Racine Ave, Chicago, IL 60620, USA",
}
def test_source():
url_p = "https://www.google.com/calendar/event?eid="
url_s1 = "MWQxMDU2cDhmZmVjZjBmN2JqZHRuMmtncDEgZ2FnZGNjaGljYWdvQG0&ctz=GMT-05:00"
assert parsed_items[0]["source"] == url_p + url_s1
def test_links():
assert parsed_items[0]["links"] == []
def test_classification():
assert parsed_items[0]["classification"] == COMMISSION
def test_all_day():
assert parsed_items[0]["all_day"] is False
|
airflow/deploy/gcp_util.py | ajtrexler/tensorflow-recommendation-wals | 166 | 12615099 | <reponame>ajtrexler/tensorflow-recommendation-wals<gh_stars>100-1000
# Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper functions for interacting with Google Cloud Platform APIs.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import time
from apiclient import discovery
import apiclient.errors
# Cap exponential backoff for polling APIs
MAX_POLL_INTERVAL_SECS = 15
def empty_gcs_bucket(bucket_name, credentials):
"""Attempts to delete all objects in a bucket.
If concurrent object creations occur while the bucket is being
emptied, those objects may not be deleted and may cause bucket
deletion to fail.
Args:
bucket_name: a string specifying the bucket to empty
credentials: oauth2client.Credentials to be used for
authentication
"""
logging.info("Emptying GCS bucket: %s", bucket_name)
service = discovery.build('storage', 'v1', credentials=credentials)
response = service.objects().list(bucket=bucket_name).execute()
_delete_resources(bucket_name, response.get('items', []), credentials)
while 'nextPageToken' in response:
response = service.objects().list(
bucket=bucket_name, pageToken=response['nextPageToken']).execute()
_delete_resources(bucket_name, response.get('items', []), credentials)
def _delete_resources(bucket_name, resources, credentials):
"""Deletes the specified resources from the given bucket.
Resources are represented as described in
https://cloud.google.com/storage/docs/json_api/v1/objects#resource
Args:
bucket_name: a string specifying the bucket from which to
delete
resources: a list of resources
credentials: oauth2client.Credentials to be used for
authentication
"""
logging.info("Deleting %s resources.", len(resources))
service = discovery.build('storage', 'v1', credentials=credentials)
for r in resources:
try:
service.objects().delete(
bucket=bucket_name,
object=r['name']).execute()
except apiclient.errors.HttpError as e:
logging.warning('Error deleting %s: %s', r, e)
def create_gcs_bucket(bucket_name, location, project, credentials):
"""Attempts to create a Google Cloud Storage bucket.
Args:
bucket_name: a string specifying the name of the bucket to
create
location: a string specifying the location where the bucket
should be allocated. See
https://cloud.google.com/storage/docs/bucket-locations
for an authoritative list of values.
project: a string specifying the GCP project in which to create
the bucket
credentials: oauth2client.Credentials to be used for
authentication
Returns:
True if a bucket named bucket_name was successfully created, False
otherwise. Note that False will be returned if there was already a
bucket with the provided bucket_name.
"""
service = discovery.build('storage', 'v1', credentials=credentials)
body = {'name': bucket_name, 'location': location}
try:
service.buckets().insert(project=project, body=body).execute()
logging.info('Created GCS bucket gs://%s', bucket_name)
return True
except apiclient.errors.HttpError as e:
logging.warn('Failed to create GCS bucket gs://%s. %s', bucket_name, e)
return False
def delete_gcs_bucket(bucket_name, credentials, force=False):
"""Attempts to delete a Google Cloud Storage bucket.
The REST API doesn't allow for deletion of non-empty buckets;
use force=True to attempt to empty the bucket prior to deletion.
If concurrent object creations occur while the bucket is being
emptied, those objects may not be deleted and may cause bucket
deletion to fail.
Args:
bucket_name: a string specifying the name of the bucket to
delete
credentials: oauth2client.Credentials to be used for
authentication
force: a boolean specifying whether or not to attempt to empty
the bucket prior to deletion.
Returns:
True if a bucket named bucket_name was successfully deleted, False
otherwise.
"""
if force:
empty_gcs_bucket(bucket_name, credentials)
service = discovery.build('storage', 'v1', credentials=credentials)
try:
resp = service.buckets().delete(bucket=bucket_name).execute()
# An empty response indicates a successful deletion.
# https://cloud.google.com/storage/docs/json_api/v1/buckets/delete
return not bool(resp)
except apiclient.errors.HttpError as e:
logging.warn('Error deleting GCS bucket %s: %s', bucket_name, e)
return False
def create_gke_cluster(cluster_name, project, zone, credentials):
"""Tries to create a GKE cluster.
TODO(wwlian): Expose more of the node pool's configuration as
needed.
Args:
cluster_name: string specifying the desired cluster name
project: a string specifying the GCP project in which to create
the cluster
zone: string specifying the GCE zone in which to create the
cluster
credentials: oauth2client.Credentials to be used for
authentication
Returns:
True if a new cluster with the provided name has been created,
False otherwise. Note that False will be returned if a cluster
with the provided cluster_name already existed.
"""
service = discovery.build('container', 'v1', credentials=credentials)
cluster_body = {
'name': cluster_name,
'zone': zone,
'network': 'default',
'loggingService': 'logging.googleapis.com',
'monitoringService': 'none',
'subnetwork': 'default',
'nodePools': [{
'initialNodeCount': 3,
'config': {
'machineType': 'n1-standard-1',
'imageType': 'GCI',
'diskSizeGb': 100,
'oauthScopes': [
'https://www.googleapis.com/auth/compute',
'https://www.googleapis.com/auth/devstorage.read_write',
'https://www.googleapis.com/auth/sqlservice.admin',
'https://www.googleapis.com/auth/logging.write',
'https://www.googleapis.com/auth/servicecontrol',
'https://www.googleapis.com/auth/service.management.'
'readonly',
'https://www.googleapis.com/auth/trace.append',
'https://www.googleapis.com/auth/source.read_only',
'https://www.googleapis.com/auth/cloud-platform'
]
},
'autoscaling': {
'enabled': False
},
'management': {
'autoUpgrade': False,
'autoRepair': False,
'upgradeOptions': {}
},
'name': 'default-pool'
}],
'masterAuth': {
'username': 'admin'
}
}
request = service.projects().zones().clusters().create(
projectId=project, zone=zone, body={'cluster': cluster_body})
logging.info('Waiting for GKE cluster creation: %s', cluster_name)
if not _wait_for_operation(request,
_gke_op_poller_factory(service, project, zone)):
logging.warn('GKE cluster creation failed: %s', cluster_name)
return False
# Verify creation by tring to retrieve cluster info.
request = service.projects().zones().clusters().get(
projectId=project, zone=zone, clusterId=cluster_name)
try:
request.execute()
logging.info('Created GKE cluster: %s', cluster_name)
return True
except apiclient.errors.HttpError as e:
logging.warn(str(e))
return False
def delete_gke_cluster(cluster_name, project, zone, credentials):
"""Attempts to delete a GKE cluster.
Args:
cluster_name: A string specifying the cluster to delete
project: a string specifying the GCP project in which the
cluster resides
zone: The zone from which to delete the cluster
credentials: oauth2client.Credentials to be used for
authentication
Returns:
True if the specified cluster was successfully deleted from the
specified zone; False otherwise.
"""
service = discovery.build('container', 'v1', credentials=credentials)
# If the cluster is in the process of being provisioned, we have to wait
# until it is up and running before we can initiate deletion.
request = service.projects().zones().clusters().get(
projectId=project, zone=zone, clusterId=cluster_name)
while True:
try:
cluster = request.execute()
except apiclient.errors.HttpError:
# No such cluster; this will get caught when we try to delete
# it.
break
if cluster['status'] == 'RUNNING':
break
request = service.projects().zones().clusters().delete(
projectId=project, zone=zone, clusterId=cluster_name)
if not _wait_for_operation(
request, _gke_op_poller_factory(service, project, zone)):
return False
# Verify deletion by tring to retrieve cluster info.
request = service.projects().zones().clusters().get(
projectId=project, zone=zone, clusterId=cluster_name)
try:
request.execute()
return False
except apiclient.errors.HttpError as e:
return e.resp['status'] == '404'
def create_sql_instance(instance_name, db_region, db_tier, project,
credentials):
"""Creates a Cloud SQL instance and sets its root password.
If the instance already exists, the creation step is skipped, but
the root password will still be reset.
Args:
instance_name: A string specifying the name for the new instance
db_region: A string specifying the region in which the instance
should be created
db_tier: A string specifying the database tier to create. For a
list of valid tiers and the regions in which they are
available, use 'gcloud sql tiers list'.
project: a string specifying the GCP project in which to create
the instance credentials: oauth2client.Credentials to be
used for authentication
Returns:
True if the Cloud SQL instance was successfully created, and its
root password was successfully set; False otherwise.
"""
service = discovery.build('sqladmin', 'v1beta4', credentials=credentials)
request = service.instances().insert(
project=project,
body={
'name': instance_name,
'region': db_region,
'settings': {
'tier': db_tier,
'activationPolicy': 'ALWAYS'
}
}
)
logging.info('Waiting for Cloud SQL instance creation: %s', instance_name)
if not _wait_for_operation(request,
_cloud_sql_op_poller_factory(service, project)):
return False
# Verify creation by tring to retrieve instance info.
request = service.instances().get(project=project,
instance=instance_name)
try:
request.execute()
return True
except apiclient.errors.HttpError:
return False
def set_sql_root_password(root_pw, instance_name, project, credentials):
"""Attempts to set the root SQL password in a Cloud SQL instance.
Args:
root_pw: A string specifying the root password to set in the
Cloud SQL instance.
instance_name: A string specifying the name of the Cloud SQL
instance
project: a string specifying the GCP project in which to create
the instance
credentials: oauth2client.Credentials to be used for
authentication
Returns:
True if the instance's root password was successfully set; False
otherwise.
"""
service = discovery.build('sqladmin', 'v1beta4', credentials=credentials)
request = service.users().update(
project=project, instance=instance_name, host='%', name='root',
body={'password': <PASSWORD>})
logging.info('Waiting for Cloud SQL root password set: %s', instance_name)
return _wait_for_operation(request,
_cloud_sql_op_poller_factory(service, project))
def delete_sql_instance(instance_name, project, credentials):
"""Attempts to delete a Google Cloud SQL instance.
Args:
instance_name: A string specifying the name for the new instance
project: a string specifying the GCP project in which the
instance resides
credentials: oauth2client.Credentials to be used for
authentication
Returns:
True if this attempt to delete the instance succeeded, False
otherwise. Note that this means that this function may return
False if the instance did not exist in the first place or was
deleted concurrently
"""
service = discovery.build('sqladmin', 'v1beta4', credentials=credentials)
# If the instance is in the process of being provisioned, we have to
# wait until it is up and running before we can initiate deletion.
request = service.instances().get(project=project, instance=instance_name)
while True:
try:
instance = request.execute()
except apiclient.errors.HttpError:
# No such instance; this will get caught when we try to delete
# it.
break
if instance['state'] == 'RUNNABLE':
break
request = service.instances().delete(project=project,
instance=instance_name)
if not _wait_for_operation(
request, _cloud_sql_op_poller_factory(service, project)):
return False
# Verify deletion by tring to retrieve instance info.
request = service.instances().get(project=project,
instance=instance_name)
try:
request.execute()
return False
except apiclient.errors.HttpError as e:
return e.resp['status'] == '404'
def _wait_for_operation(request, op_poller):
"""Executes a request and waits for its operation to finish.
Args:
request: A apiclient.http.HttpRequest whose response is expected
to be an Operation.
op_poller: A function whose first argument is expected to be an
Operation. When called on an operation, op_poller should
poll the API and return an updated version of the same
Operation.
Returns:
True if request executed without raising an HttpError, False
otherwise
"""
try:
logging.debug('Executing synchronous request: %s', request.to_json())
start_time = time.time()
op = request.execute()
except apiclient.errors.HttpError as e:
logging.warn(str(e))
return False
poll_interval_secs = 1
while op['status'] != 'DONE':
time.sleep(poll_interval_secs)
logging.debug('Polling Operation: %s', op)
op = op_poller(op)
# Exponential backoff up to maximum.
poll_interval_secs = min(MAX_POLL_INTERVAL_SECS,
2 * poll_interval_secs)
duration = time.time() - start_time
logging.debug('Operation completed in %s seconds: %s',
duration, request.to_json())
return True
def _cloud_sql_op_poller_factory(service, project):
"""Creates a function that polls a Cloud SQL operation.
The value returned by a call to this function can be provided as the
op_poller argument to _wait_for_operation.
Args:
service: a apiclient.discovery.Resource object for interacting
with the Cloud SQL API. This is usually the same object used
to create the request that spawned the operation that will
be waited on.
project: a string specifying the GCP project in which the
operation will be executing
Returns:
a function that can be used as the second argument to
_wait_for_operation.
"""
def op_poller(op):
return (service.operations()
.get(project=project, operation=op['name']).execute())
return op_poller
def _gke_op_poller_factory(service, project, zone):
"""Creates a function that polls a GKE operation.
The value returned by a call to this function can be provided as the
op_poller argument to _wait_for_operation.
Args:
service: a apiclient.discovery.Resource object for interacting
with the GKE API. This is usually the same object used to
create the request that spawned the operation that will be
waited on.
project: a string specifying the GCP project in which the
operation will be executing
zone: a string specifying the GCE zone in which the operation
will be running
Returns:
a function that can be used as the second argument to
_wait_for_operation.
"""
def op_poller(op):
return (service.projects().zones().operations()
.get(projectId=project, zone=zone, operationId=op['name'])
.execute())
return op_poller
|
imaging/ml/toolkit/hcls_imaging_ml_toolkit/dicom_builder.py | rczhang/healthcare | 310 | 12615121 | <filename>imaging/ml/toolkit/hcls_imaging_ml_toolkit/dicom_builder.py
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility class for building Basic Text DICOM Structured Reports."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import datetime
import os
from typing import Any, Dict, Text
import uuid
from absl import logging
import numpy as np
from hcls_imaging_ml_toolkit import dicom_json
from hcls_imaging_ml_toolkit import dicom_web
from hcls_imaging_ml_toolkit import tag_values
from hcls_imaging_ml_toolkit import tags
# Default UUID prefix used to generate UIDs of DICOM objects created by this
# module.
DEFAULT_UUID_PREFIX = '1.3.6.1.4.1.11129.5.3'
# DICOM header preamble is 128-byte long.
_PREAMBLE_LENGTH = 128
# Little Endian Transfer Syntax.
_IMPLICIT_VR_LITTLE_ENDIAN = '1.2.840.10008.1.2'
_EXPLICIT_VR_LITTLE_ENDIAN = '1.2.840.10008.1.2.1'
# Accepted character set.
_ISO_CHARACTER_SET = 'ISO_IR 192'
class DicomBuilder(object):
"""Can be used to create DICOM objects."""
def __init__(self, uid_prefix: Text = DEFAULT_UUID_PREFIX) -> None:
"""Inits DicomBuilder with passed args.
Args:
uid_prefix: UID string that is used as the prefix for all generated UIDs.
"""
self._uid_prefix = uid_prefix
def BuildJsonSR(
self, report_text: Text,
metadata_json: Dict[Text, Any]) -> dicom_json.ObjectWithBulkData:
"""Builds and returns a Basic Text DICOM JSON Structured Report instance.
This function will create a new DICOM series.
Args:
report_text: Text string to use for the Basic Text DICOM SR.
metadata_json: Dict of tags (including study-level information) to add.
Returns:
DICOM JSON Object containing the Structured Report.
"""
# Dicom StowJsonRs expects a list with DICOM JSON as elements.
# Add study level tags to the SR.
dataset = metadata_json.copy()
series_uid = self.GenerateUID()
instance_uid = self.GenerateUID()
dicom_json.Insert(dataset, tags.SOP_CLASS_UID,
tag_values.BASIC_TEXT_SR_CUID)
dicom_json.Insert(dataset, tags.MODALITY, tag_values.SR_MODALITY)
dicom_json.Insert(dataset, tags.SERIES_INSTANCE_UID, series_uid)
dicom_json.Insert(dataset, tags.SPECIFIC_CHARACTER_SET, _ISO_CHARACTER_SET)
logging.log(
logging.INFO,
'Creating DICOM JSON SR with Series UID: %s and Instance UID: %s',
series_uid, instance_uid)
dicom_json.Insert(dataset, tags.SOP_INSTANCE_UID, instance_uid)
content_dataset = {}
dicom_json.Insert(content_dataset, tags.RELATIONSHIP_TYPE, 'CONTAINS')
dicom_json.Insert(content_dataset, tags.VALUE_TYPE, 'TEXT')
dicom_json.Insert(content_dataset, tags.TEXT_VALUE, report_text)
dicom_json.Insert(dataset, tags.CONTENT_SEQUENCE, content_dataset)
dicom_json.Insert(dataset, tags.TRANSFER_SYNTAX_UID,
_IMPLICIT_VR_LITTLE_ENDIAN)
dicom_json.Insert(dataset, tags.MEDIA_STORAGE_SOP_CLASS_UID,
tag_values.BASIC_TEXT_SR_CUID)
dicom_json.Insert(dataset, tags.MEDIA_STORAGE_SOP_INSTANCE_UID,
instance_uid)
return dicom_json.ObjectWithBulkData(dataset)
def BuildJsonSC(self, image_array: np.ndarray, metadata_json: Dict[Text, Any],
series_uid: Text) -> dicom_json.ObjectWithBulkData:
"""Builds and returns a DICOM Secondary Capture.
Args:
image_array: Image array (RGB) to embed in DICOM instance.
metadata_json: Dict of tags (including study-level information) to add.
series_uid: UID of the series to create the SC in.
Returns:
DICOM JSON Object containing JSON and bulk data of the Secondary Capture.
"""
# Copy over any study and instance level tags.
instance_uid = self.GenerateUID()
metadata_json = metadata_json.copy()
dicom_json.Insert(metadata_json, tags.SOP_CLASS_UID,
tag_values.SECONDARY_CAPTURE_CUID)
dicom_json.Insert(metadata_json, tags.MODALITY, tag_values.OT_MODALITY)
dicom_json.Insert(metadata_json, tags.SERIES_INSTANCE_UID, series_uid)
dicom_json.Insert(metadata_json, tags.SPECIFIC_CHARACTER_SET,
_ISO_CHARACTER_SET)
dicom_json.Insert(metadata_json, tags.SOP_INSTANCE_UID, instance_uid)
dicom_json.Insert(metadata_json, tags.TRANSFER_SYNTAX_UID,
_IMPLICIT_VR_LITTLE_ENDIAN)
dicom_json.Insert(metadata_json, tags.MEDIA_STORAGE_SOP_CLASS_UID,
tag_values.SECONDARY_CAPTURE_CUID)
dicom_json.Insert(metadata_json, tags.MEDIA_STORAGE_SOP_INSTANCE_UID,
instance_uid)
# Assures URI is unique.
study_uid = dicom_json.GetValue(metadata_json, tags.STUDY_INSTANCE_UID)
uri = '{}/{}/{}'.format(study_uid, series_uid, instance_uid)
metadata_json[tags.PIXEL_DATA.number] = {
'vr': tags.PIXEL_DATA.vr,
'BulkDataURI': uri
}
dicom_json.Insert(metadata_json, tags.PHOTOMETRIC_INTERPRETATION, 'RGB')
dicom_json.Insert(metadata_json, tags.SAMPLES_PER_PIXEL, 3)
# Indicates we store pixel data as R1,G1,B1,R2,G2,B2...
dicom_json.Insert(metadata_json, tags.PLANAR_CONFIGURATION, 0)
dicom_json.Insert(metadata_json, tags.ROWS, image_array.shape[0])
dicom_json.Insert(metadata_json, tags.COLUMNS, image_array.shape[1])
dicom_json.Insert(metadata_json, tags.BITS_ALLOCATED, 8)
dicom_json.Insert(metadata_json, tags.BITS_STORED, 8)
dicom_json.Insert(metadata_json, tags.HIGH_BIT, 7)
dicom_json.Insert(metadata_json, tags.PIXEL_REPRESENTATION, 0)
bulkdata = dicom_web.DicomBulkData(
uri=uri,
data=image_array.tobytes(),
content_type='application/octet-stream')
return dicom_json.ObjectWithBulkData(metadata_json, [bulkdata])
def BuildJsonInstanceFromPng(
self, image: bytes, sop_class_uid: Text) -> dicom_json.ObjectWithBulkData:
"""Builds and returns a DICOM instance from a PNG.
This function will create a new DICOM study and series. Converts all
incoming
images to grayscale.
Args:
image: Image bytes of DICOM instance.
sop_class_uid: UID of the SOP class for DICOM instance.
Returns:
DICOM JSON Object containing JSON and bulk data of the Secondary Capture.
"""
study_uid = self.GenerateUID()
series_uid = self.GenerateUID()
instance_uid = self.GenerateUID()
metadata_json = {}
dicom_json.Insert(metadata_json, tags.PLANAR_CONFIGURATION, 0)
# Converts colored images to grayscale.
dicom_json.Insert(metadata_json, tags.PHOTOMETRIC_INTERPRETATION,
'MONOCHROME2')
dicom_json.Insert(metadata_json, tags.SOP_CLASS_UID, sop_class_uid)
dicom_json.Insert(metadata_json, tags.STUDY_INSTANCE_UID, study_uid)
dicom_json.Insert(metadata_json, tags.SERIES_INSTANCE_UID, series_uid)
dicom_json.Insert(metadata_json, tags.SPECIFIC_CHARACTER_SET,
_ISO_CHARACTER_SET)
dicom_json.Insert(metadata_json, tags.SOP_INSTANCE_UID, instance_uid)
dicom_json.Insert(metadata_json, tags.TRANSFER_SYNTAX_UID,
_EXPLICIT_VR_LITTLE_ENDIAN)
dicom_json.Insert(metadata_json, tags.MEDIA_STORAGE_SOP_CLASS_UID,
sop_class_uid)
dicom_json.Insert(metadata_json, tags.MEDIA_STORAGE_SOP_INSTANCE_UID,
instance_uid)
# Assures URI is unique.
uri = '{}/{}/{}'.format(study_uid, series_uid, instance_uid)
metadata_json[tags.PIXEL_DATA.number] = {
'vr': tags.PIXEL_DATA.vr,
'BulkDataURI': uri
}
bulkdata = dicom_web.DicomBulkData(
uri=uri, data=image, content_type='image/png; transfer-syntax=""')
return dicom_json.ObjectWithBulkData(metadata_json, [bulkdata])
def GenerateUID(self) -> Text:
"""Generates a random DICOM UUID with the prefix DicomBuilder was constructed with.
Returns:
Unique UID starting with |self._uid_prefix|.
"""
# Generates a unique UID using the Process ID, Host ID and current time.
# Uses as a period as the separator and combines the generated UUID with the
# |self._uid_prefix| prefix.
# Example: 1.3.6.1.4.1.11129.5.3.268914880332007.160162.47.376673
uuid_components = [
self._uid_prefix,
uuid.getnode(),
abs(os.getpid()),
datetime.datetime.today().second,
datetime.datetime.today().microsecond
]
generated_uuid = '.'.join(
str(uuid_component) for uuid_component in uuid_components)
return generated_uuid
def UIDStartsWith(uid: Text, prefix: Text) -> bool:
"""Determines if the uid starts with |prefix|.
Args:
uid: Text string representing a UID.
prefix: Text string representing the prefix of the UID which is being
tested.
Returns:
True if |uid| starts with |prefix| false otherwise.
"""
return uid.find(prefix + '.') == 0
def IsToolkitUID(uid: Text) -> bool:
"""Determines if the uid was generated by the toolkit.
Args:
uid: Text string representing a UID.
Returns:
True if |uid| starts with |DEFAULT_UUID_PREFIX| false otherwise.
"""
return UIDStartsWith(uid, DEFAULT_UUID_PREFIX)
|
go/private/actions/archive.bzl | aignas/rules_go | 1,099 | 12615127 | <filename>go/private/actions/archive.bzl
# Copyright 2014 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
load(
"//go/private:common.bzl",
"as_tuple",
"split_srcs",
)
load(
"//go/private:mode.bzl",
"LINKMODE_C_ARCHIVE",
"LINKMODE_C_SHARED",
"mode_string",
)
load(
"//go/private:providers.bzl",
"GoArchive",
"GoArchiveData",
"effective_importpath_pkgpath",
"get_archive",
)
load(
"//go/private/rules:cgo.bzl",
"cgo_configure",
)
load(
"//go/private/actions:compilepkg.bzl",
"emit_compilepkg",
)
def emit_archive(go, source = None, _recompile_suffix = ""):
"""See go/toolchains.rst#archive for full documentation."""
if source == None:
fail("source is a required parameter")
split = split_srcs(source.srcs)
testfilter = getattr(source.library, "testfilter", None)
pre_ext = ""
if go.mode.link == LINKMODE_C_ARCHIVE:
pre_ext = "_" # avoid collision with go_binary output file with .a extension
elif testfilter == "exclude":
pre_ext = ".internal"
elif testfilter == "only":
pre_ext = ".external"
if _recompile_suffix:
pre_ext += _recompile_suffix
out_lib = go.declare_file(go, name = source.library.name, ext = pre_ext + ".a")
# store __.PKGDEF and nogo facts in .x
out_export = go.declare_file(go, name = source.library.name, ext = pre_ext + ".x")
out_cgo_export_h = None # set if cgo used in c-shared or c-archive mode
direct = [get_archive(dep) for dep in source.deps]
runfiles = source.runfiles
data_files = runfiles.files
for a in direct:
runfiles = runfiles.merge(a.runfiles)
if a.source.mode != go.mode:
fail("Archive mode does not match {} is {} expected {}".format(a.data.label, mode_string(a.source.mode), mode_string(go.mode)))
importmap = "main" if source.library.is_main else source.library.importmap
importpath, _ = effective_importpath_pkgpath(source.library)
if source.cgo and not go.mode.pure:
# TODO(jayconrod): do we need to do full Bourne tokenization here?
cppopts = [f for fs in source.cppopts for f in fs.split(" ")]
copts = [f for fs in source.copts for f in fs.split(" ")]
cxxopts = [f for fs in source.cxxopts for f in fs.split(" ")]
clinkopts = [f for fs in source.clinkopts for f in fs.split(" ")]
cgo = cgo_configure(
go,
srcs = split.go + split.c + split.asm + split.cxx + split.objc + split.headers,
cdeps = source.cdeps,
cppopts = cppopts,
copts = copts,
cxxopts = cxxopts,
clinkopts = clinkopts,
)
if go.mode.link in (LINKMODE_C_SHARED, LINKMODE_C_ARCHIVE):
out_cgo_export_h = go.declare_file(go, path = "_cgo_install.h")
cgo_deps = cgo.deps
runfiles = runfiles.merge(cgo.runfiles)
emit_compilepkg(
go,
sources = split.go + split.c + split.asm + split.cxx + split.objc + split.headers,
cover = source.cover,
embedsrcs = source.embedsrcs,
importpath = importpath,
importmap = importmap,
archives = direct,
out_lib = out_lib,
out_export = out_export,
out_cgo_export_h = out_cgo_export_h,
gc_goopts = source.gc_goopts,
cgo = True,
cgo_inputs = cgo.inputs,
cppopts = cgo.cppopts,
copts = cgo.copts,
cxxopts = cgo.cxxopts,
objcopts = cgo.objcopts,
objcxxopts = cgo.objcxxopts,
clinkopts = cgo.clinkopts,
testfilter = testfilter,
)
else:
cgo_deps = depset()
emit_compilepkg(
go,
sources = split.go + split.c + split.asm + split.cxx + split.objc + split.headers,
cover = source.cover,
embedsrcs = source.embedsrcs,
importpath = importpath,
importmap = importmap,
archives = direct,
out_lib = out_lib,
out_export = out_export,
gc_goopts = source.gc_goopts,
cgo = False,
testfilter = testfilter,
)
data = GoArchiveData(
# TODO(#2578): reconsider the provider API. There's a lot of redundant
# information here. Some fields are tuples instead of lists or dicts
# since GoArchiveData is stored in a depset, and no value in a depset
# may be mutable. For now, new copied fields are private (named with
# a leading underscore) since they may change in the future.
# GoLibrary fields
name = source.library.name,
label = source.library.label,
importpath = source.library.importpath,
importmap = source.library.importmap,
importpath_aliases = source.library.importpath_aliases,
pathtype = source.library.pathtype,
# GoSource fields
srcs = as_tuple(source.srcs),
orig_srcs = as_tuple(source.orig_srcs),
_orig_src_map = tuple([source.orig_src_map.get(src, src) for src in source.srcs]),
_cover = as_tuple(source.cover),
_embedsrcs = as_tuple(source.embedsrcs),
_x_defs = tuple(source.x_defs.items()),
_gc_goopts = as_tuple(source.gc_goopts),
_cgo = source.cgo,
_cdeps = as_tuple(source.cdeps),
_cppopts = as_tuple(source.cppopts),
_copts = as_tuple(source.copts),
_cxxopts = as_tuple(source.cxxopts),
_clinkopts = as_tuple(source.clinkopts),
_cgo_exports = as_tuple(source.cgo_exports),
# Information on dependencies
_dep_labels = tuple([d.data.label for d in direct]),
_dep_importmaps = tuple([d.data.importmap for d in direct]),
# Information needed by dependents
file = out_lib,
export_file = out_export,
data_files = as_tuple(data_files),
_cgo_deps = as_tuple(cgo_deps),
)
x_defs = dict(source.x_defs)
for a in direct:
x_defs.update(a.x_defs)
cgo_exports_direct = list(source.cgo_exports)
# Ensure that the _cgo_export.h of the current target comes first when cgo_exports is iterated
# by prepending it and specifying the order explicitly. This is required as the CcInfo attached
# to the archive only exposes a single header rather than combining all headers.
if out_cgo_export_h:
cgo_exports_direct.insert(0, out_cgo_export_h)
cgo_exports = depset(direct = cgo_exports_direct, transitive = [a.cgo_exports for a in direct], order = "preorder")
return GoArchive(
source = source,
data = data,
direct = direct,
libs = depset(direct = [out_lib], transitive = [a.libs for a in direct]),
transitive = depset([data], transitive = [a.transitive for a in direct]),
x_defs = x_defs,
cgo_deps = depset(transitive = [cgo_deps] + [a.cgo_deps for a in direct]),
cgo_exports = cgo_exports,
runfiles = runfiles,
mode = go.mode,
)
|
tests/clpy_tests/linalg_tests/test_eigenvalue.py | fixstars/clpy | 142 | 12615148 | <reponame>fixstars/clpy
import unittest
import numpy
import clpy
from clpy import backend
from clpy import testing
@testing.parameterize(*testing.product({
'UPLO': ['U', 'L'],
}))
@unittest.skipUnless(
backend.cusolver_enabled, 'Only cusolver in CUDA 8.0 is supported')
@testing.gpu
class TestEigenvalue(unittest.TestCase):
_multiprocess_can_split_ = True
@testing.for_all_dtypes(no_float16=True, no_complex=True)
@testing.numpy_clpy_allclose(rtol=1e-3, atol=1e-4)
def test_eigh(self, xp, dtype):
a = xp.array([[1, 0, 3], [0, 5, 0], [7, 0, 9]], dtype)
w, v = xp.linalg.eigh(a, UPLO=self.UPLO)
# Order of eigen values is not defined.
# They must be sorted to compare them.
if xp is numpy:
inds = numpy.argsort(w)
else:
inds = clpy.array(numpy.argsort(w.get()))
w = w[inds]
v = v[inds]
return xp.concatenate([w[None], v])
def test_eigh_float16(self):
# NumPy's eigh deos not support float16
a = clpy.array([[1, 0, 3], [0, 5, 0], [7, 0, 9]], 'e')
w, v = clpy.linalg.eigh(a, UPLO=self.UPLO)
self.assertEqual(w.dtype, numpy.float16)
self.assertEqual(v.dtype, numpy.float16)
na = numpy.array([[1, 0, 3], [0, 5, 0], [7, 0, 9]], 'f')
nw, nv = numpy.linalg.eigh(na, UPLO=self.UPLO)
testing.assert_allclose(w, nw, rtol=1e-3, atol=1e-4)
testing.assert_allclose(v, nv, rtol=1e-3, atol=1e-4)
@testing.for_all_dtypes(no_float16=True, no_complex=True)
@testing.numpy_clpy_allclose(rtol=1e-3, atol=1e-4)
def test_eigvalsh(self, xp, dtype):
a = xp.array([[1, 0, 3], [0, 5, 0], [7, 0, 9]], dtype)
w = xp.linalg.eigvalsh(a, UPLO=self.UPLO)
# Order of eigen values is not defined.
# They must be sorted to compare them.
if xp is numpy:
inds = numpy.argsort(w)
else:
inds = clpy.array(numpy.argsort(w.get()))
w = w[inds]
return w
|
example_configs/text2text/en-de/en-de-convs2s-8-gpu.py | gioannides/OpenSeq2Seq | 1,459 | 12615172 | <filename>example_configs/text2text/en-de/en-de-convs2s-8-gpu.py
from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals
import tensorflow as tf
from open_seq2seq.models import Text2Text
from open_seq2seq.data.text2text.text2text import ParallelTextDataLayer
from open_seq2seq.data.text2text.text2text import SpecialTextTokens
from open_seq2seq.data.text2text.tokenizer import EOS_ID
from open_seq2seq.encoders import ConvS2SEncoder
from open_seq2seq.decoders import ConvS2SDecoder
from open_seq2seq.losses import BasicSequenceLoss
from open_seq2seq.optimizers.lr_policies import transformer_policy
from open_seq2seq.parts.convs2s.utils import gated_linear_units
import math
"""
This configuration file describes a variant of ConvS2S model from
https://arxiv.org/pdf/1705.03122
"""
# REPLACE THIS TO THE PATH WITH YOUR WMT DATA
data_root = "[REPLACE THIS TO THE PATH WITH YOUR WMT DATA]"
base_model = Text2Text
num_layers = 15
d_model = 512
hidden_before_last = 512
conv_act = gated_linear_units
normalization_type = "weight_norm"
scaling_factor = math.sqrt(0.5)
max_length = 64
base_params = {
"use_horovod": True,
"num_gpus": 1, # Use 8 horovod workers to train on 8 GPUs
# max_step is set for 35 epochs on 8 gpus with batch size of 64,
# 4.5M is the size of the dataset
"max_steps": 310000,
"batch_size_per_gpu": 64,
"save_summaries_steps": 100,
"print_loss_steps": 100,
"print_samples_steps": 100,
"eval_steps": 4000,
"save_checkpoint_steps": 4000,
"logdir": "ConvSeq2Seq-8GPUs-FP32",
"optimizer": "Adam",
"optimizer_params": {},
"lr_policy": transformer_policy,
"lr_policy_params": {
"learning_rate": 9,
"max_lr": 1e-3,
"warmup_steps": 4000,
"d_model": d_model,
},
"max_grad_norm": 0.1,
"summaries": ['learning_rate', 'variables', 'gradients', 'larc_summaries',
'variable_norm', 'gradient_norm', 'global_gradient_norm'],
"dtype": tf.float32, # to enable mixed precision, comment this line and uncomment two below lines
#"dtype": "mixed",
#"loss_scaling": "Backoff",
"encoder": ConvS2SEncoder,
"encoder_params": {
"src_emb_size": d_model,
"pad_embeddings_2_eight": True,
"att_layer_num": num_layers,
# original ConvS2S paper
#"conv_nchannels_kwidth": [(512, 3)]*10 + [(768, 3)]*3 + [(2048, 1)]*2,
# fairseq config
"conv_nchannels_kwidth": [(512, 3)]*9 + [(1024, 3)]*4 + [(2048, 1)]*2,
"embedding_dropout_keep_prob": 0.8,
"hidden_dropout_keep_prob": 0.8,
"max_input_length": max_length,
"PAD_SYMBOL": SpecialTextTokens.PAD_ID.value,
"conv_activation": conv_act,
'normalization_type': normalization_type,
"scaling_factor": scaling_factor,
},
"decoder": ConvS2SDecoder,
"decoder_params": {
"shared_embed": True,
"tgt_emb_size": d_model,
"pad_embeddings_2_eight": True,
"out_emb_size": hidden_before_last,
"pos_embed": False,
# original ConvS2S paper
#"conv_nchannels_kwidth": [(512, 3)]*10 + [(768, 3)]*3 + [(2048, 1)]*2,
# fairseq config
"conv_nchannels_kwidth": [(512, 3)]*9 + [(1024, 3)]*4 + [(2048, 1)]*2,
"embedding_dropout_keep_prob": 0.8,
"hidden_dropout_keep_prob": 0.8,
"out_dropout_keep_prob": 0.8,
"max_input_length": max_length,
"extra_decode_length": 56,
"beam_size": 5,
"alpha": 0.6,
"EOS_ID": EOS_ID,
"GO_SYMBOL": SpecialTextTokens.S_ID.value,
"END_SYMBOL": SpecialTextTokens.EOS_ID.value,
"PAD_SYMBOL": SpecialTextTokens.PAD_ID.value,
"conv_activation": conv_act,
'normalization_type': normalization_type,
"scaling_factor": scaling_factor,
},
"loss": BasicSequenceLoss,
"loss_params": {
"offset_target_by_one": True,
"average_across_timestep": True,
"do_mask": True
}
}
train_params = {
"data_layer": ParallelTextDataLayer,
"data_layer_params": {
"pad_vocab_to_eight": True,
"src_vocab_file": data_root + "m_common.vocab",
"tgt_vocab_file": data_root + "m_common.vocab",
"source_file": data_root + "train.clean.en.shuffled.BPE_common.32K.tok",
"target_file": data_root + "train.clean.de.shuffled.BPE_common.32K.tok",
"delimiter": " ",
"shuffle": True,
"shuffle_buffer_size": 25000,
"repeat": True,
"map_parallel_calls": 16,
"prefetch_buffer_size": 2,
"max_length": max_length,
},
}
eval_params = {
"batch_size_per_gpu": 64,
"data_layer": ParallelTextDataLayer,
"data_layer_params": {
"src_vocab_file": data_root+"m_common.vocab",
"tgt_vocab_file": data_root+"m_common.vocab",
"source_file": data_root+"wmt13-en-de.src.BPE_common.32K.tok",
"target_file": data_root+"wmt13-en-de.ref.BPE_common.32K.tok",
"delimiter": " ",
"shuffle": False,
"repeat": True,
"max_length": max_length,
"prefetch_buffer_size": 1,
},
}
infer_params = {
"batch_size_per_gpu": 1,
"data_layer": ParallelTextDataLayer,
"data_layer_params": {
"src_vocab_file": data_root+"m_common.vocab",
"tgt_vocab_file": data_root+"m_common.vocab",
"source_file": data_root+"wmt14-en-de.src.BPE_common.32K.tok",
"target_file": data_root+"wmt14-en-de.src.BPE_common.32K.tok",
"delimiter": " ",
"shuffle": False,
"repeat": False,
"max_length": max_length*2,
"prefetch_buffer_size": 1,
},
} |
TradzQAI/API/cbpro/websocket_client.py | kkuette/AI_project | 164 | 12615173 | <reponame>kkuette/AI_project
# cbpro/WebsocketClient.py
# original author: <NAME>
# mongo "support" added by <NAME>
#
#
# Template object to receive messages from the Coinbase Websocket Feed
from __future__ import print_function
import json
import base64
import hmac
import hashlib
import time
from threading import Thread
from websocket import create_connection, WebSocketConnectionClosedException
from pymongo import MongoClient
from .cbpro_auth import get_auth_headers
class WebsocketClient(object):
def __init__(self, url="wss://ws-feed.pro.coinbase.com", products=None, message_type="subscribe", mongo_collection=None,
should_print=True, auth=False, api_key="", api_secret="", api_passphrase="", channels=None):
self.url = url
self.products = products
self.channels = channels
self.type = message_type
self.stop = True
self.error = None
self.ws = None
self.thread = None
self.auth = auth
self.api_key = api_key
self.api_secret = api_secret
self.api_passphrase = api_passphrase
self.should_print = should_print
self.mongo_collection = mongo_collection
def start(self):
def _go():
self._connect()
self._listen()
self._disconnect()
self.stop = False
self.on_open()
self.thread = Thread(target=_go)
self.thread.start()
def _connect(self):
if self.products is None:
self.products = ["BTC-USD"]
elif not isinstance(self.products, list):
self.products = [self.products]
if self.url[-1] == "/":
self.url = self.url[:-1]
if self.channels is None:
sub_params = {'type': 'subscribe', 'product_ids': self.products}
else:
sub_params = {'type': 'subscribe', 'product_ids': self.products, 'channels': self.channels}
if self.auth:
timestamp = str(time.time())
message = timestamp + 'GET' + '/users/self/verify'
auth_headers = get_auth_headers(timestamp, message, self.api_key, self.api_secret, self.api_passphrase)
sub_params['signature'] = auth_headers['CB-ACCESS-SIGN']
sub_params['key'] = auth_headers['CB-ACCESS-KEY']
sub_params['passphrase'] = auth_headers['CB-ACCESS-PASSPHRASE']
sub_params['timestamp'] = auth_headers['CB-ACCESS-TIMESTAMP']
self.ws = create_connection(self.url)
self.ws.send(json.dumps(sub_params))
def _listen(self):
while not self.stop:
try:
'''
start_t = 0
if time.time() - start_t >= 30:
# Set a 30 second ping to keep connection alive
self.ws.ping("keepalive")
start_t = time.time()
'''
data = self.ws.recv()
self.t_time = time.time()
msg = json.loads(data)
except ValueError as e:
self.on_error(e)
except Exception as e:
self.on_error(e)
else:
self.on_message(msg)
def _disconnect(self):
try:
if self.ws:
self.ws.close()
except WebSocketConnectionClosedException as e:
pass
self.on_close()
def close(self):
self.stop = True
self.thread.join()
def on_open(self):
if self.should_print:
print("-- Subscribed! --\n")
def on_close(self):
if self.should_print:
print("\n-- Socket Closed --")
def on_message(self, msg):
if self.should_print:
print(msg)
if self.mongo_collection: # dump JSON to given mongo collection
self.mongo_collection.insert_one(msg)
def on_error(self, e, data=None):
self.error = e
self.stop = True
print('{} - data: {}'.format(e, data))
if __name__ == "__main__":
import sys
import cbpro
import time
class MyWebsocketClient(cbpro.WebsocketClient):
def on_open(self):
self.url = "wss://ws-feed.pro.coinbase.com/"
self.products = ["BTC-USD", "ETH-USD"]
self.message_count = 0
print("Let's count the messages!")
def on_message(self, msg):
print(json.dumps(msg, indent=4, sort_keys=True))
self.message_count += 1
def on_close(self):
print("-- Goodbye! --")
wsClient = MyWebsocketClient()
wsClient.start()
print(wsClient.url, wsClient.products)
try:
while True:
print("\nMessageCount =", "%i \n" % wsClient.message_count)
time.sleep(1)
except KeyboardInterrupt:
wsClient.close()
if wsClient.error:
sys.exit(1)
else:
sys.exit(0)
|
fireant/tests/test_fireant.py | mikeengland/fireant | 122 | 12615181 | <gh_stars>100-1000
from unittest import TestCase
import fireant
class APITests(TestCase):
def test_package_exports_databases(self):
with self.subTest("base class"):
self.assertIn("Database", vars(fireant))
for db in ("MySQL", "Vertica", "Redshift", "PostgreSQL", "MSSQL", "Snowflake"):
with self.subTest(db):
self.assertIn(db + "Database", vars(fireant))
def test_package_exports_dataset(self):
self.assertIn("DataSet", vars(fireant))
for element in ("Join", "Field", "DataType"):
with self.subTest(element):
self.assertIn(element, vars(fireant))
def test_package_exports_intervals(self):
for element in (
"hour",
"day",
"week",
"month",
"quarter",
"year",
"NumericInterval",
):
with self.subTest(element):
self.assertIn(element, vars(fireant))
def test_package_exports_references(self):
for element in (
"DayOverDay",
"WeekOverWeek",
"MonthOverMonth",
"QuarterOverQuarter",
"YearOverYear",
"DaysOverDays",
"WeeksOverWeeks",
"MonthsOverMonths",
"QuartersOverQuarters",
"YearsOverYears",
):
with self.subTest(element):
self.assertIn(element, vars(fireant))
def test_package_exports_modifiers(self):
for element in ("Rollup", "OmitFromRollup", "ResultSet"):
with self.subTest(element):
self.assertIn(element, vars(fireant))
def test_package_exports_operations(self):
self.assertIn("Operation", vars(fireant))
for element in ("CumSum", "CumMean", "CumProd", "RollingMean", "Share"):
with self.subTest(element):
self.assertIn(element, vars(fireant))
def test_package_exports_exceptions(self):
for element in ("DataSetException", "DataSetFilterException"):
with self.subTest(element):
self.assertIn(element, vars(fireant))
|
seg_models/image_reader.py | zhusiling/AAF-TF | 261 | 12615182 | # Copyright 2016 <NAME>
import numpy as np
import tensorflow as tf
def image_scaling(img, label):
"""Randomly scales the images between 0.5 to 1.5 times the original size.
Args:
img: A tensor of size [batch_size, height_in, width_in, channels]
label: A tensor of size [batch_size, height_in, width_in]
Returns:
A tensor of size [batch_size, height_out, width_out, channels], and another
tensor of size [batch_size, height_out, width_out]
"""
scale = tf.random_uniform(
[1], minval=0.5, maxval=2.0, dtype=tf.float32, seed=None)
h_new = tf.to_int32(tf.to_float(tf.shape(img)[0]) * scale)
w_new = tf.to_int32(tf.to_float(tf.shape(img)[1]) * scale)
new_shape = tf.squeeze(tf.stack([h_new, w_new]), squeeze_dims=[1])
img = tf.image.resize_images(img, new_shape)
# Rescale labels by nearest neighbor sampling.
label = tf.image.resize_nearest_neighbor(
tf.expand_dims(label, 0), new_shape)
label = tf.squeeze(label, squeeze_dims=[0])
return img, label
def image_mirroring(img, label):
"""Randomly horizontally mirrors the images and their labels.
Args:
img: A tensor of size [batch_size, height_in, width_in, channels]
label: A tensor of size [batch_size, height_in, width_in]
Returns:
A tensor of size [batch_size, height_in, width_in, channels], and another
tensor of size [batch_size, height_in, width_in]
"""
distort_left_right_random = tf.random_uniform(
[1], 0, 1.0, dtype=tf.float32)
distort_left_right_random = distort_left_right_random[0]
mirror = tf.less(tf.stack([1.0, distort_left_right_random, 1.0]), 0.5)
mirror = tf.boolean_mask([0, 1, 2], mirror)
img = tf.reverse(img, mirror)
label = tf.reverse(label, mirror)
return img, label
def crop_and_pad_image_and_labels(image,
label,
crop_h,
crop_w,
ignore_label=255,
random_crop=True):
"""Randomly crops and pads the images and their labels.
Args:
img: A tensor of size [batch_size, height_in, width_in, channels]
label: A tensor of size [batch_size, height_in, width_in]
crop_h: A number indicating the height of output data.
crop_w: A number indicating the width of output data.
ignore_label: A number indicating the indices of ignored label.
random_crop: enable/disable random_crop for random cropping.
Returns:
A tensor of size [batch_size, height_out, width_out, channels], and another
tensor of size [batch_size, height_out, width_out, 1]
"""
# Needs to be subtracted and later added due to 0 padding.
label = tf.cast(label, dtype=tf.float32)
label = label - ignore_label
# Concatenate images with labels, which makes random cropping easier.
combined = tf.concat(axis=2, values=[image, label])
image_shape = tf.shape(image)
combined_pad = tf.image.pad_to_bounding_box(
combined,
0,
0,
tf.maximum(crop_h, image_shape[0]),
tf.maximum(crop_w, image_shape[1]))
last_image_dim = tf.shape(image)[-1]
last_label_dim = tf.shape(label)[-1]
if random_crop:
combined_crop = tf.random_crop(combined_pad, [crop_h,crop_w,4])
else:
combined_crop = tf.image.resize_image_with_crop_or_pad(
combined_pad,
crop_h,
crop_w)
img_crop = combined_crop[:, :, :last_image_dim]
label_crop = combined_crop[:, :, last_image_dim:]
label_crop = label_crop + ignore_label
label_crop = tf.cast(label_crop, dtype=tf.uint8)
# Set static shape so that tensorflow knows shape at running.
img_crop.set_shape((crop_h, crop_w, 3))
label_crop.set_shape((crop_h,crop_w, 1))
return img_crop, label_crop
def read_labeled_image_list(data_dir, data_list):
"""Reads txt file containing paths to images and ground truth masks.
Args:
data_dir: A string indicating the path to the root directory of images
and masks.
data_list: A string indicating the path to the file with lines of the form
'/path/to/image /path/to/label'.
Returns:
Two lists with all file names for images and masks, respectively.
"""
f = open(data_list, 'r')
images = []
masks = []
for line in f:
try:
image, mask = line.strip("\n").split(' ')
except ValueError: # Adhoc for test.
image = mask = line.strip("\n")
images.append(data_dir + image)
masks.append(data_dir + mask)
return images, masks
def read_images_from_disk(input_queue,
input_size,
random_scale,
random_mirror,
random_crop,
ignore_label,
img_mean):
"""Reads one image and its corresponding label and perform pre-processing.
Args:
input_queue: A tensorflow queue with paths to the image and its mask.
input_size: A tuple with entries of height and width. If None, return
images of original size.
random_scale: enable/disable random_scale for randomly scaling images
and their labels.
random_mirror: enable/disable random_mirror for randomly and horizontally
flipping images and their labels.
ignore_label: A number indicating the index of label to ignore.
img_mean: A vector indicating the mean colour values of RGB channels.
Returns:
Two tensors: the decoded image and its mask.
"""
img_contents = tf.read_file(input_queue[0])
label_contents = tf.read_file(input_queue[1])
img = tf.image.decode_jpeg(img_contents, channels=3)
img = tf.cast(img, dtype=tf.float32)
# Extract mean.
img -= img_mean
label = tf.image.decode_png(label_contents, channels=1)
if input_size is not None:
h, w = input_size
# Randomly scale the images and labels.
if random_scale:
img, label = image_scaling(img, label)
# Randomly mirror the images and labels.
if random_mirror:
img, label = image_mirroring(img, label)
# Randomly crops the images and labels.
img, label = crop_and_pad_image_and_labels(
img, label, h, w, ignore_label, random_crop
)
return img, label
class ImageReader(object):
"""
Generic ImageReader which reads images and corresponding
segmentation masks from the disk, and enqueues them into
a TensorFlow queue.
"""
def __init__(self, data_dir, data_list, input_size,
random_scale, random_mirror, random_crop,
ignore_label, img_mean):
"""
Initialise an ImageReader.
Args:
data_dir: path to the directory with images and masks.
data_list: path to the file with lines of the form
'/path/to/image /path/to/mask'.
input_size: a tuple with (height, width) values, to which all the
images will be resized.
random_scale: whether to randomly scale the images.
random_mirror: whether to randomly mirror the images.
ignore_label: index of label to ignore during the training.
img_mean: vector of mean colour values.
Returns:
A tensor of size [batch_size, height_out, width_out, channels], and
another tensor of size [batch_size, height_out, width_out]
"""
self.data_dir = data_dir
self.data_list = data_list
self.input_size = input_size
self.image_list, self.label_list = read_labeled_image_list(
self.data_dir, self.data_list)
self.images = tf.convert_to_tensor(self.image_list, dtype=tf.string)
self.labels = tf.convert_to_tensor(self.label_list, dtype=tf.string)
self.queue = tf.train.slice_input_producer(
[self.images, self.labels],
shuffle=input_size is not None) # not shuffling if it is val
self.image, self.label = read_images_from_disk(
self.queue,
self.input_size,
random_scale,
random_mirror,
random_crop,
ignore_label,
img_mean)
def dequeue(self, num_elements):
"""Packs images and labels into a batch.
Args:
num_elements: A number indicating the batch size.
Returns:
A tensor of size [batch_size, height_out, width_out, 3], and
another tensor of size [batch_size, height_out, width_out, 1]
"""
image_batch, label_batch = tf.train.batch(
[self.image, self.label],
num_elements,
num_threads=2)
return image_batch, label_batch
|
tests/modules/imported/alias_classes.py | MoonStarCZW/py2rb | 124 | 12615222 | <reponame>MoonStarCZW/py2rb
class spam:
def __init__(self):
self.msgtxt = "this is spam"
def msg(self):
print(self.msgtxt)
if __name__ == '__main__':
s = spam()
s.msg()
|
pyopencl/_mymako.py | kanderso-nrel/pyopencl | 569 | 12615259 | try:
import mako.template # noqa
except ImportError:
raise ImportError(
"Some of PyOpenCL's facilities require the Mako templating engine.\n"
"You or a piece of software you have used has tried to call such a\n"
"part of PyOpenCL, but there was a problem importing Mako.\n\n"
"You may install mako now by typing one of:\n"
"- easy_install Mako\n"
"- pip install Mako\n"
"- aptitude install python-mako\n"
"\nor whatever else is appropriate for your system.")
from mako import * # noqa
|
tests/licensedcode/test_match.py | jimjag/scancode-toolkit | 1,511 | 12615286 | # -*- coding: utf-8 -*-
#
# Copyright (c) nexB Inc. and others. All rights reserved.
# ScanCode is a trademark of nexB Inc.
# SPDX-License-Identifier: Apache-2.0
# See http://www.apache.org/licenses/LICENSE-2.0 for the license text.
# See https://github.com/nexB/scancode-toolkit for support or download.
# See https://aboutcode.org for more information about nexB OSS projects.
#
import os
from commoncode.testcase import FileBasedTesting
from licensedcode import cache
from licensedcode import index
from licensedcode import models
from licensedcode.index import LicenseIndex
from licensedcode.match import filter_contained_matches
from licensedcode.match import filter_matches_missing_key_phrases
from licensedcode.match import filter_overlapping_matches
from licensedcode.match import get_full_matched_text
from licensedcode.match import get_matching_regions
from licensedcode.match import LicenseMatch
from licensedcode.match import merge_matches
from licensedcode.match import reportable_tokens
from licensedcode.match import restore_non_overlapping
from licensedcode.match import tokenize_matched_text
from licensedcode.match import Token
from licensedcode.models import Rule
from licensedcode.models import load_rules
from licensedcode.query import Query
from licensedcode.spans import Span
TEST_DATA_DIR = os.path.join(os.path.dirname(__file__), 'data')
class TestLicenseMatchBasic(FileBasedTesting):
test_data_dir = TEST_DATA_DIR
def test_LicenseMatch_equality(self):
r1 = Rule(stored_text='r1', license_expression='apache-2.0 OR gpl')
m1_r1 = LicenseMatch(rule=r1, qspan=Span(0, 2), ispan=Span(0, 2))
m2_r1 = LicenseMatch(rule=r1, qspan=Span(0, 2), ispan=Span(0, 2))
assert m1_r1 == m2_r1
assert not (m1_r1 != m2_r1)
r2 = Rule(stored_text='r1', license_expression='apache-2.0 OR gpl')
m3_r2 = LicenseMatch(rule=r2, qspan=Span(0, 2), ispan=Span(0, 2))
assert r1 == r2
assert m1_r1 == m3_r2
def test_LicenseMatch_equality_2(self):
r1 = Rule(stored_text='r1', license_expression='apache-2.0 OR gpl')
m1_r1 = LicenseMatch(rule=r1, qspan=Span(0, 2), ispan=Span(0, 2))
r2 = Rule(stored_text='r2', license_expression='gpl OR apache-2.0')
m2_r2 = LicenseMatch(rule=r2, qspan=Span(0, 2), ispan=Span(0, 2))
assert r1.licensing is r2.licensing
assert r1 != r2
assert r1.license_expression != r2.license_expression
assert r1.license_expression_object == r2.license_expression_object
assert str(r1.license_expression_object.simplify()) == str(r2.license_expression_object.simplify())
assert m1_r1 == m2_r2
assert not (m1_r1 != m2_r2)
assert r2.same_licensing(r2)
assert m1_r1.qspan == m2_r2.qspan
assert m1_r1.ispan == m2_r2.ispan
r3 = Rule(stored_text='r3', license_expression='gpl OR apache-2.0')
m3_r3 = LicenseMatch(rule=r3, qspan=Span(0, 2), ispan=Span(0, 3))
assert m2_r2 != m3_r3
r4 = Rule(stored_text='r3', license_expression='gpl1 OR apache-2.0')
m4_r4 = LicenseMatch(rule=r4, qspan=Span(0, 2), ispan=Span(0, 3))
assert m3_r3 != m4_r4
def test_LicenseMatch_not_equal(self):
r1 = Rule(text_file='r1', license_expression='apache-1.0 OR gpl')
m1 = LicenseMatch(rule=r1, qspan=Span(0, 2), ispan=Span(0, 2))
r2 = Rule(text_file='r2', license_expression='gpl OR apache-2.0')
m2 = LicenseMatch(rule=r2, qspan=Span(0, 2), ispan=Span(0, 2))
assert m1 != m2
r3 = Rule(text_file='r3', license_expression='apache-1.0 OR gpl')
m3 = LicenseMatch(rule=r3, qspan=Span(0, 2), ispan=Span(0, 2))
assert m1 == m3
r4 = Rule(text_file='r4', license_expression='apache-1.0 OR gpl')
m4 = LicenseMatch(rule=r4, qspan=Span(1, 2), ispan=Span(1, 2))
assert not m1 == m4
def test_LicenseMatch_equals(self):
rule = Rule(text_file='r1', license_expression='apache-2.0 OR gpl')
m1 = LicenseMatch(rule=rule, matcher='chunk1', qspan=Span(0, 7), ispan=Span(0, 7), start_line=1, end_line=1)
m2 = LicenseMatch(rule=rule, matcher='chunk2', qspan=Span(0, 7), ispan=Span(0, 7), start_line=1, end_line=1)
assert m1 == m2
m3 = LicenseMatch(rule=rule, matcher='chunk3', qspan=Span(16, 23), ispan=Span(0, 7), start_line=3, end_line=3)
assert m1 != m3
def test_LicenseMatch_comparisons(self):
r1 = Rule(text_file='r1', license_expression='apache-2.0 OR gpl')
contained1 = LicenseMatch(rule=r1, qspan=Span(0, 5), ispan=Span(0, 5))
contained2 = LicenseMatch(rule=r1, qspan=Span(1, 4), ispan=Span(1, 4))
same_span1 = LicenseMatch(rule=r1, qspan=Span(1, 6), ispan=Span(1, 6))
same_span2 = LicenseMatch(rule=r1, qspan=Span(1, 6), ispan=Span(1, 6))
before_after = LicenseMatch(rule=r1, qspan=Span(8, 9), ispan=Span(8, 9))
touching = LicenseMatch(rule=r1, qspan=Span(7, 7), ispan=Span(7, 7))
overlapping = LicenseMatch(rule=r1, qspan=Span(4, 7), ispan=Span(4, 7))
assert same_span1 == same_span2
assert same_span1 in same_span2
assert same_span1.overlap(same_span2)
assert same_span2.overlap(same_span1)
assert contained1 not in same_span1
assert same_span1 not in contained1
assert contained1.overlap(same_span2)
assert contained1.surround(contained2)
assert contained2 in same_span2
assert contained2 in contained1
assert contained2.overlap(overlapping)
assert overlapping.overlap(contained2)
assert overlapping.overlap(same_span1)
assert not overlapping.overlap(before_after)
assert before_after.is_after(same_span1)
assert before_after.is_after(touching)
assert before_after.is_after(contained1)
def test_combine_raise_TypeError_for_matches_of_different_rules(self):
r1 = Rule(text_file='r1', license_expression='apache-2.0 OR gpl')
r2 = Rule(text_file='r2', license_expression='apache-2.0 OR gpl2')
m1 = LicenseMatch(rule=r1, qspan=Span(0, 5), ispan=Span(0, 5))
m2 = LicenseMatch(rule=r2, qspan=Span(1, 6), ispan=Span(1, 6))
try:
m1.combine(m2)
except TypeError:
pass
def test_combine_matches_with_same_rules(self):
r1 = Rule(text_file='r1', license_expression='apache-2.0 OR gpl')
m1 = LicenseMatch(rule=r1, qspan=Span(0, 5), ispan=Span(0, 5))
m2 = LicenseMatch(rule=r1, qspan=Span(1, 6), ispan=Span(1, 6))
match = m1.combine(m2)
assert match.qspan == Span(0, 6)
assert match.ispan == Span(0, 6)
def test_combine_matches_cannot_combine_matches_with_same_licensing_and_different_rules(self):
r1 = Rule(text_file='r1', license_expression='apache-2.0 OR gpl')
r2 = Rule(text_file='r2', license_expression='apache-2.0 OR gpl')
m1 = LicenseMatch(rule=r1, qspan=Span(0, 5), ispan=Span(0, 5))
m2 = LicenseMatch(rule=r2, qspan=Span(1, 6), ispan=Span(1, 6))
try:
m1.combine(m2)
self.fail('Should fail')
except TypeError:
pass
def test_LicenseMatch_small(self):
r1_text = u'licensed under the GPL, licensed under the GPL distribute extent of law'
small_rule = Rule(text_file='small_rule', license_expression='apache-1.1', stored_text=r1_text)
r2_text = u'licensed under the GPL, licensed under the GPL re distribute extent of law' * 10
long_rule = Rule(text_file='long_rule', license_expression='apache-1.1', stored_text=r2_text)
_idx = index.LicenseIndex([small_rule, long_rule])
test = LicenseMatch(rule=small_rule, qspan=Span(0, 10), ispan=Span(0, 10), hispan=Span(12))
assert test.is_small()
test = LicenseMatch(rule=small_rule, qspan=Span(0, 10), ispan=Span(0, 10), hispan=Span(11, 12))
assert test.is_small()
test = LicenseMatch(rule=small_rule, qspan=Span(10, 11, 12), ispan=Span(10, 11, 12), hispan=Span(11, 12))
assert test.is_small()
test = LicenseMatch(rule=small_rule, qspan=Span(1, 6), ispan=Span(1, 6))
assert test.is_small()
test = LicenseMatch(rule=long_rule, qspan=Span(0, 10), ispan=Span(0, 10), hispan=Span(12))
assert test.is_small()
test = LicenseMatch(rule=long_rule, qspan=Span(5, 10), ispan=Span(5, 10), hispan=Span(5, 6))
assert test.is_small()
test = LicenseMatch(rule=small_rule, qspan=Span(1, 10), ispan=Span(1, 10), hispan=Span(3, 6))
assert not test.is_small()
def test_LicenseMatch_score_is_not_100_with_aho_match_and_extra_unknown_token_hash_match(self):
text = (
'this file is licensed under the GPL license version2 only '
'or any other version. You can redistribute this file under '
'this or any other license.')
r1 = Rule(text_file='r1', license_expression='apache-1.1', stored_text=text)
idx = index.LicenseIndex([r1])
querys = (
'this file is licensed under the GPL license version2 only '
+' big ' +
'or any other version. You can redistribute this file under '
'this or any other license.')
match = idx.match(query_string=querys)[0]
assert match.score() < 100
def test_LicenseMatch_score_is_not_100_with_aho_match_and_extra_unknown_token_seq_match(self):
text = (
'this file is licensed under the GPL license version2 only '
'or any other version. You can redistribute this file under '
'this or any other license.')
r1 = Rule(text_file='r1', license_expression='apache-1.1', stored_text=text)
idx = index.LicenseIndex([r1])
querys = (
'this file is licensed under the GPL license version2 only '
+' is ' +
'or any other version. You can redistribute this file under '
'this or any other license.')
match = idx.match(query_string=querys)[0]
assert match.score() < 100
def test_LicenseMatch_score_is_not_100_with_aho_match_and_extra_unknown_token_aho_match(self):
text = (
'this file is licensed under the GPL license version2 only '
'or any other version. You can redistribute this file under '
'this or any other license.')
r1 = Rule(text_file='r1', license_expression='apache-1.1', stored_text=text)
idx = index.LicenseIndex([r1])
querys = (
'this this file is licensed under the GPL license version2 only '
+' big ' +
'or any other version. You can redistribute this file under '
'this or any other license. that')
match = idx.match(query_string=querys)[0]
assert match.score() < 100
def test_LicenseMatch_matches_only_when_all_key_phrases_are_present(self):
text_r1 = (
'License '
'Distributed under the {{MIT License}}. See LICENSE for {{more information}}.'
'You can redistribute this file under this or any other license.')
r1 = Rule(text_file='r1', license_expression='mit', stored_text=text_r1)
text_r2 = (
'License '
'Distributed under the {{GPL License}} License. See LICENSE for {{more information}}.'
'You can redistribute this file under this or any other license.')
r2 = Rule(text_file='r2', license_expression='gpl', stored_text=text_r2)
idx = index.LicenseIndex([r1, r2])
querys = (
'License '
'Distributed under the Apache License. See LICENSE for more information.'
'You can redistribute this file under this or any other license.')
matches = idx.match(query_string=querys)
assert not matches
def test_LicenseMatch_matches_only_when_all_key_phrases_are_present_in_order(self):
text_r1 = (
'License '
'Distributed under the {{MIT License}}. See LICENSE for more information. '
'{{You can redistribute this file}} under this or any other license. '
)
r1 = Rule(text_file='r1', license_expression='mit', stored_text=text_r1)
text_r2 = 'Foo bar'
r2 = Rule(text_file='r2', license_expression='gpl', stored_text=text_r2)
idx = index.LicenseIndex([r1, r2])
querys = (
'License '
'Distributed under the License MIT. See LICENSE for more information. '
'You can redistribute this file under this or any other license. '
' and otherwise foo bar'
)
matches = idx.match(query_string=querys)
assert len(matches) == 1
assert matches[0].rule == r2
def test_LicenseMatch_matches_only_when_key_phrases_are_uninterrupted_by_unknown(self):
text_r1 = (
'License '
'Distributed under the {{MIT License}}. See LICENSE for more information.'
'You can redistribute this file under this or any other license.')
r1 = Rule(text_file='r1', license_expression='mit', stored_text=text_r1)
text_r2 = (
'License '
'Distributed under the BSD License. See LICENSE for more information.'
'You can redistribute this file under this or any other license.')
r2 = Rule(text_file='r2', license_expression='gpl', stored_text=text_r2)
idx = index.LicenseIndex([r1, r2])
querys = (
'See LICENSE for more information, and also you can redistribute this file under this or any other license.'
'License '
'Distributed under the MIT, foobar License. See LICENSE or website for more information.'
'You can redistribute this file under this or any other license.'
)
matches = idx.match(query_string=querys)
assert len(matches) == 1
assert matches[0].rule == r2
def test_LicenseMatch_matches_only_when_key_phrases_are_uninterrupted_by_stopword(self):
text_r1 = (
'License '
'Distributed under the {{MIT License}}. See LICENSE for more information.'
'You can redistribute this file under this or any other license.')
r1 = Rule(text_file='r1', license_expression='mit', stored_text=text_r1)
text_r2 = (
'License '
'Distributed under the BSD License. See LICENSE for more information.'
'You can redistribute this file under this or any other license.')
r2 = Rule(text_file='r2', license_expression='gpl', stored_text=text_r2)
idx = index.LicenseIndex([r1, r2])
querys = (
'See LICENSE for more information, and also you can redistribute this file under this or any other license.'
'License '
'Distributed under the MIT, a License. See LICENSE or website for more information.'
# ^ stopword ^
'You can redistribute this file under this or any other license.'
)
matches = idx.match(query_string=querys)
assert len(matches) == 1
assert matches[0].rule == r2
def test_LicenseMatch_matches_key_phrases_aho_with_exact_match_selects_key_phrase_match(self):
text_r1 = (
'License '
'Distributed under the {{MIT License}}. See LICENSE for more information.'
)
r1 = Rule(text_file='r1', license_expression='mit', stored_text=text_r1)
text_r2 = (
'License '
'Distributed under the {{BSD License}}. See LICENSE for more information.'
'You can redistribute this file under this or any other license.')
r2 = Rule(text_file='r2', license_expression='bsd', stored_text=text_r2)
idx = index.LicenseIndex([r1, r2])
querys = (
'License '
'Distributed under the MIT License. See LICENSE for more information.'
'You can redistribute this file under this or any other license.'
)
matches = idx.match(query_string=querys, _skip_hash_match=True)
assert len(matches) == 1
assert matches[0].rule == r1
def test_LicenseMatch_matches_only_when_key_phrase_is_uninterrupted(self):
text_r1 = (
'licensed under the '
'{{Creative Commons Attribution 4.0 License}} '
'(the "License"); '
' this is a license with has several interesting characteristics '
)
r1 = Rule(text_file='r1', license_expression='keyphrase', stored_text=text_r1)
text_r2 = (
'licensed under the '
'Creative Commons Attribution 4.0 License '
'(the "License"); '
' this is a license that has several interesting characteristics also '
)
r2 = Rule(text_file='r2', license_expression='plain', stored_text=text_r2)
legalese = set(['licensed', 'license', 'attribution', ])
idx = index.LicenseIndex([r1, r2], _legalese=legalese)
assert r1.key_phrase_spans == [Span(3, 8)]
assert r2.key_phrase_spans == []
# NonCommercial and ShareAlike are "unknown" words here
# therefore we should match r2 as as a sequence and not r1 because the
# key phrase are interrupted
querys = (
'This work is '
# 0 UW 1
'licensed under the '
# 2 3 4
'Creative Commons Attribution-Share Alike 4.0 License '
# 5 6 7 UW UW 8 9 10
'(the "License"). '
# 11 12
'this is a license that has several interesting characteristics FOO'
# 13 14 SW 15 16 17 18 19 20 UW 21
)
matches = idx.match(query_string=querys)
assert len(matches) == 1
match = matches[0]
assert match.query.unknowns_by_pos == {0: 1, 7: 2, 20: 1}
assert match.qspan == Span(2, 20)
itokens = [idx.tokens_by_tid[i] for i in match.itokens(idx)]
assert itokens == [
'licensed',
'under',
'the',
'creative',
'commons',
'attribution',
'4',
'0',
'license',
'the',
'license',
'this',
'is',
'license',
'that',
'has',
'several',
'interesting',
'characteristics',
]
assert match.rule == r2
class TestMergeMatches(FileBasedTesting):
test_data_dir = TEST_DATA_DIR
def test_merge_does_merge_non_contiguous_matches_in_sequence(self):
r1 = Rule(text_file='r1', license_expression='apache-2.0 OR gpl')
m1 = LicenseMatch(rule=r1, qspan=Span(0, 2), ispan=Span(0, 2))
m2 = LicenseMatch(rule=r1, qspan=Span(4, 6), ispan=Span(4, 6))
m5 = LicenseMatch(rule=r1, qspan=Span(1, 6), ispan=Span(1, 6))
results = merge_matches([m1, m2, m5])
assert results == [LicenseMatch(rule=r1, qspan=Span(0, 6), ispan=Span(0, 6))]
def test_merge_does_not_merge_overlapping_matches_of_different_rules_with_different_licensing(self):
r1 = Rule(text_file='r1', license_expression='apache-2.0 OR gpl')
r2 = Rule(text_file='r2', license_expression='apache-2.0 OR gpl2')
m1 = LicenseMatch(rule=r1, qspan=Span(0, 5), ispan=Span(0, 5))
m2 = LicenseMatch(rule=r2, qspan=Span(1, 6), ispan=Span(1, 6))
results = merge_matches([m1, m2])
assert results == [m1, m2]
def test_merge_does_merge_overlapping_matches_of_same_rules_if_in_sequence(self):
r1 = Rule(text_file='r1', license_expression='apache-2.0 OR gpl')
m1 = LicenseMatch(rule=r1, qspan=Span(0, 5), ispan=Span(0, 5))
m2 = LicenseMatch(rule=r1, qspan=Span(1, 6), ispan=Span(1, 6))
results = merge_matches([m1, m2])
assert results == [LicenseMatch(rule=r1, qspan=Span(0, 6), ispan=Span(0, 6))]
def test_merge_does_not_merge_overlapping_matches_of_same_rules_if_in_sequence_with_gaps(self):
r1 = Rule(text_file='r1', license_expression='apache-2.0 OR gpl')
r1.length = 50
m1 = LicenseMatch(rule=r1, qspan=Span(1, 3), ispan=Span(1, 3))
m2 = LicenseMatch(rule=r1, qspan=Span(14, 20), ispan=Span(4, 10))
expected = [LicenseMatch(rule=r1, qspan=Span(1, 3) | Span(14, 20), ispan=Span(1, 10))]
results = merge_matches([m1, m2])
assert results == expected
def test_merge_does_not_merge_overlapping_matches_of_same_rules_if_in_sequence_with_gaps_for_long_match(self):
r1 = Rule(text_file='r1', license_expression='apache-2.0 OR gpl')
r1.length = 20
m1 = LicenseMatch(rule=r1, qspan=Span(1, 10), ispan=Span(1, 10))
m2 = LicenseMatch(rule=r1, qspan=Span(14, 20), ispan=Span(14, 20))
expected = [LicenseMatch(rule=r1, qspan=Span(1, 10) | Span(14, 20), ispan=Span(1, 10) | Span(14, 20))]
results = merge_matches([m1, m2])
assert results == expected
def test_merge_does_not_merge_overlapping_matches_of_same_rules_if_in_not_sequence(self):
r1 = Rule(text_file='r1', license_expression='apache-2.0 OR gpl')
m1 = LicenseMatch(rule=r1, qspan=Span(1, 3), ispan=Span(1, 3))
m2 = LicenseMatch(rule=r1, qspan=Span(14, 20), ispan=Span(1, 3))
matches = merge_matches([m1, m2])
assert sorted(matches) == sorted([m1, m2])
def test_merge_does_not_merge_contained_matches_of_different_rules_with_same_licensing(self):
r1 = Rule(text_file='r1', license_expression='apache-2.0 OR gpl')
r2 = Rule(text_file='r2', license_expression='apache-2.0 OR gpl')
m1 = LicenseMatch(rule=r1, qspan=Span(1, 6), ispan=Span(1, 6))
m2 = LicenseMatch(rule=r2, qspan=Span(1, 6), ispan=Span(1, 6))
matches = merge_matches([m1, m2])
assert sorted(matches) == sorted([m1, m2])
def test_files_does_filter_contained_matches_of_different_rules_with_same_licensing(self):
r1 = Rule(text_file='r1', license_expression='apache-2.0 OR gpl')
r2 = Rule(text_file='r2', license_expression='apache-2.0 OR gpl')
m1 = LicenseMatch(rule=r1, qspan=Span(1, 6), ispan=Span(1, 6))
m2 = LicenseMatch(rule=r2, qspan=Span(1, 6), ispan=Span(1, 6))
matches, discarded = filter_contained_matches([m1, m2])
assert matches == [m2]
assert discarded == [m1]
def test_merge_does_not_merge_overlapping_matches_with_same_licensings(self):
r1 = Rule(text_file='r1', license_expression='apache-2.0 OR gpl')
r2 = Rule(text_file='r2', license_expression='apache-2.0 OR gpl')
overlap = LicenseMatch(rule=r1, qspan=Span(0, 5), ispan=Span(0, 5))
same_span1 = LicenseMatch(rule=r1, qspan=Span(1, 6), ispan=Span(1, 6))
same_span2 = LicenseMatch(rule=r2, qspan=Span(1, 6), ispan=Span(1, 6))
result = merge_matches([overlap, same_span1, same_span2])
expected = [
LicenseMatch(rule=r1, qspan=Span(0, 6), ispan=Span(0, 6)),
LicenseMatch(rule=r2, qspan=Span(1, 6), ispan=Span(1, 6)),
]
assert sorted(result) == sorted(expected)
def test_filter_contained_matches_only_filter_contained_matches_with_same_licensings(self):
r1 = Rule(text_file='r1', license_expression='apache-2.0 OR gpl')
r2 = Rule(text_file='r2', license_expression='apache-2.0 OR gpl')
overlap = LicenseMatch(rule=r1, qspan=Span(0, 5), ispan=Span(0, 5))
same_span1 = LicenseMatch(rule=r1, qspan=Span(1, 6), ispan=Span(1, 6))
same_span2 = LicenseMatch(rule=r2, qspan=Span(1, 6), ispan=Span(1, 6))
matches, discarded = filter_contained_matches([overlap, same_span1, same_span2])
assert matches == [overlap, same_span1]
assert discarded
def test_filter_overlapping_matches_does_filter_overlapping_matches_with_same_licensings(self):
r1 = Rule(text_file='r1', license_expression='apache-2.0 OR gpl')
r2 = Rule(text_file='r2', license_expression='apache-2.0 OR gpl')
overlap = LicenseMatch(rule=r1, qspan=Span(0, 5), ispan=Span(0, 5))
same_span1 = LicenseMatch(rule=r1, qspan=Span(1, 6), ispan=Span(1, 6))
same_span2 = LicenseMatch(rule=r2, qspan=Span(1, 6), ispan=Span(1, 6))
matches, discarded = filter_overlapping_matches([overlap, same_span1, same_span2])
assert matches == [overlap]
assert discarded
def test_filter_contained_matches_prefers_longer_overlapping_matches(self):
r1 = Rule(text_file='r1', license_expression='apache-2.0 OR gpl')
r2 = Rule(text_file='r2', license_expression='apache-2.0 OR gpl')
overlap = LicenseMatch(rule=r1, qspan=Span(0, 5), ispan=Span(0, 5))
same_span1 = LicenseMatch(rule=r1, qspan=Span(1, 6), ispan=Span(1, 6))
same_span2 = LicenseMatch(rule=r2, qspan=Span(1, 8), ispan=Span(1, 8))
matches, discarded = filter_contained_matches([overlap, same_span1, same_span2])
assert matches == [overlap, same_span2]
assert discarded
def test_filter_overlapping_matches_prefers_longer_overlapping_matches(self):
r1 = Rule(text_file='r1', license_expression='apache-2.0 OR gpl')
r2 = Rule(text_file='r2', license_expression='apache-2.0 OR gpl')
overlap = LicenseMatch(rule=r1, qspan=Span(0, 5), ispan=Span(0, 5))
same_span1 = LicenseMatch(rule=r1, qspan=Span(1, 6), ispan=Span(1, 6))
same_span2 = LicenseMatch(rule=r2, qspan=Span(1, 8), ispan=Span(1, 8))
matches, discarded = filter_overlapping_matches([overlap, same_span1, same_span2])
assert matches == [same_span2]
assert discarded
def test_merge_contiguous_touching_matches_in_sequence(self):
r1 = Rule(stored_text='r1', license_expression='apache-2.0 OR gpl')
m1 = LicenseMatch(rule=r1, qspan=Span(0, 2), ispan=Span(0, 2))
m2 = LicenseMatch(rule=r1, qspan=Span(3, 6), ispan=Span(3, 6))
result = merge_matches([m1, m2])
match = result[0]
assert match == LicenseMatch(rule=r1, qspan=Span(0, 6), ispan=Span(0, 6))
def test_merge_contiguous_contained_matches(self):
r1 = Rule(text_file='r1', license_expression='apache-2.0 OR gpl')
m1 = LicenseMatch(rule=r1, qspan=Span(0, 2), ispan=Span(0, 2))
m2 = LicenseMatch(rule=r1, qspan=Span(3, 6), ispan=Span(3, 6))
m5 = LicenseMatch(rule=r1, qspan=Span(7, 8), ispan=Span(7, 8))
result = merge_matches([m1, m2, m5])
assert result == [LicenseMatch(rule=r1, qspan=Span(0, 8), ispan=Span(0, 8))]
def test_merge_should_not_merge_repeated_matches_out_of_sequence(self):
rule = Rule(text_file='gpl-2.0_49.RULE', license_expression=u'gpl-2.0')
rule.rid = 2615
m1 = LicenseMatch(rule=rule, matcher='chunk1', qspan=Span(0, 7), ispan=Span(0, 7))
m2 = LicenseMatch(rule=rule, matcher='chunk2', qspan=Span(8, 15), ispan=Span(0, 7))
m3 = LicenseMatch(rule=rule, matcher='chunk3', qspan=Span(16, 23), ispan=Span(0, 7))
result = merge_matches([m1, m2, m3])
assert result == [m1, m2, m3]
def test_merge_merges_contained_and_overlapping_match(self):
r1 = Rule(text_file='r1', license_expression='apache-2.0 OR gpl')
m1 = LicenseMatch(rule=r1, qspan=Span(0, 5), ispan=Span(0, 5))
contained = LicenseMatch(rule=r1, qspan=Span(1, 4), ispan=Span(1, 4))
overlapping = LicenseMatch(rule=r1, qspan=Span(1, 6), ispan=Span(1, 6))
assert contained in overlapping
assert contained in m1
result = merge_matches([m1, contained, overlapping])
expected = [LicenseMatch(rule=r1, qspan=Span(0, 6), ispan=Span(0, 6))]
assert result == expected
def test_merge_does_not_merge_multiple_contained_matches_across_rules(self):
r1 = Rule(text_file='r1', license_expression='apache-2.0 OR gpl')
m1 = LicenseMatch(rule=r1, qspan=Span(0, 5), ispan=Span(0, 5))
r2 = Rule(text_file='r2', license_expression='apache-2.0 OR gpl')
contained1 = LicenseMatch(rule=r2, qspan=Span(1, 2), ispan=Span(1, 2))
r3 = Rule(text_file='r3', license_expression='apache-2.0 OR gpl')
contained2 = LicenseMatch(rule=r3, qspan=Span(3, 4), ispan=Span(3, 4))
r5 = Rule(text_file='r5', license_expression='apache-2.0 OR gpl')
m5 = LicenseMatch(rule=r5, qspan=Span(1, 6), ispan=Span(1, 6))
result = merge_matches([m1, contained1, contained2, m5])
assert sorted(result) == sorted([m1, contained1, contained2, m5])
def test_filter_contained_matches_does_filter_across_rules(self):
r1 = Rule(text_file='r1', license_expression='apache-2.0 OR gpl')
m1 = LicenseMatch(rule=r1, qspan=Span(0, 5), ispan=Span(0, 5))
r2 = Rule(text_file='r2', license_expression='apache-2.0 OR gpl')
contained1 = LicenseMatch(rule=r2, qspan=Span(1, 2), ispan=Span(1, 2))
r3 = Rule(text_file='r3', license_expression='apache-2.0 OR gpl')
contained2 = LicenseMatch(rule=r3, qspan=Span(3, 4), ispan=Span(3, 4))
r5 = Rule(text_file='r5', license_expression='apache-2.0 OR gpl')
m5 = LicenseMatch(rule=r5, qspan=Span(1, 6), ispan=Span(1, 6))
result, _discarded = filter_contained_matches([m1, contained1, contained2, m5])
assert result == [m1, m5]
def test_filter_overlapping_matches_does_not_filter_multiple_contained_matches_across_rules(self):
r1 = Rule(text_file='r1', license_expression='apache-2.0 OR gpl')
m1 = LicenseMatch(rule=r1, qspan=Span(0, 5), ispan=Span(0, 5))
r2 = Rule(text_file='r2', license_expression='apache-2.0 OR gpl')
contained1 = LicenseMatch(rule=r2, qspan=Span(1, 2), ispan=Span(1, 2))
r3 = Rule(text_file='r3', license_expression='apache-2.0 OR gpl')
contained2 = LicenseMatch(rule=r3, qspan=Span(3, 4), ispan=Span(3, 4))
r5 = Rule(text_file='r5', license_expression='apache-2.0 OR gpl')
m5 = LicenseMatch(rule=r5, qspan=Span(1, 6), ispan=Span(1, 6))
result, _discarded = filter_overlapping_matches([m1, contained1, contained2, m5])
assert result == [m1]
def test_filter_contained_matches_filters_multiple_contained_matches(self):
r1 = Rule(text_file='r1', license_expression='apache-2.0 OR gpl')
m1 = LicenseMatch(rule=r1, qspan=Span(0, 5), ispan=Span(0, 5))
r2 = Rule(text_file='r2', license_expression='apache-2.0 OR gpl')
contained1 = LicenseMatch(rule=r2, qspan=Span(1, 2), ispan=Span(1, 2))
r3 = Rule(text_file='r3', license_expression='apache-2.0 OR gpl')
contained2 = LicenseMatch(rule=r3, qspan=Span(3, 4), ispan=Span(3, 4))
r5 = Rule(text_file='r5', license_expression='apache-2.0 OR gpl')
m5 = LicenseMatch(rule=r5, qspan=Span(1, 6), ispan=Span(1, 6))
matches, discarded = filter_contained_matches([m1, contained1, contained2, m5])
assert matches == [m1, m5]
assert sorted(discarded) == sorted([contained1, contained2, ])
def test_filter_overlapping_matches_filters_multiple_contained_matches(self):
r1 = Rule(text_file='r1', license_expression='apache-2.0 OR gpl')
m1 = LicenseMatch(rule=r1, qspan=Span(0, 5), ispan=Span(0, 5))
r2 = Rule(text_file='r2', license_expression='apache-2.0 OR gpl')
contained1 = LicenseMatch(rule=r2, qspan=Span(1, 2), ispan=Span(1, 2))
r3 = Rule(text_file='r3', license_expression='apache-2.0 OR gpl')
contained2 = LicenseMatch(rule=r3, qspan=Span(3, 4), ispan=Span(3, 4))
r5 = Rule(text_file='r5', license_expression='apache-2.0 OR gpl')
m5 = LicenseMatch(rule=r5, qspan=Span(1, 6), ispan=Span(1, 6))
matches, discarded = filter_overlapping_matches([m1, contained1, contained2, m5])
assert matches == [m1]
assert sorted(discarded) == sorted([m5, contained1, contained2, ])
def test_merge_does_not_merge_matches_with_same_spans_if_licenses_are_identical_but_rule_differ(self):
r1 = Rule(text_file='r1', license_expression='apache-2.0')
m1 = LicenseMatch(rule=r1, qspan=Span(0, 2), ispan=Span(0, 2))
m5 = LicenseMatch(rule=r1, qspan=Span(1, 6), ispan=Span(1, 6))
r2 = Rule(text_file='r2', license_expression='apache-2.0')
m2 = LicenseMatch(rule=r2, qspan=Span(0, 2), ispan=Span(0, 2))
matches = merge_matches([m1, m2, m5])
assert sorted(matches) == sorted([LicenseMatch(rule=r1, qspan=Span(0, 6), ispan=Span(0, 6)), m2])
def test_filter_contained_matches_filters_matches_with_same_spans_if_licenses_are_identical_but_rule_differ(self):
r1 = Rule(text_file='r1', license_expression='apache-2.0')
m1 = LicenseMatch(rule=r1, qspan=Span(0, 2), ispan=Span(0, 2))
m5 = LicenseMatch(rule=r1, qspan=Span(1, 6), ispan=Span(1, 6))
r2 = Rule(text_file='r2', license_expression='apache-2.0')
m2 = LicenseMatch(rule=r2, qspan=Span(0, 2), ispan=Span(0, 2))
matches, discarded = filter_contained_matches([m1, m2, m5])
assert matches == [m1, m5]
assert discarded
def test_filter_overlapping_matches_filters_matches_with_same_spans_if_licenses_are_identical_but_rule_differ(self):
r1 = Rule(text_file='r1', license_expression='apache-2.0')
m1 = LicenseMatch(rule=r1, qspan=Span(0, 2), ispan=Span(0, 2))
m5 = LicenseMatch(rule=r1, qspan=Span(1, 6), ispan=Span(1, 6))
r2 = Rule(text_file='r2', license_expression='apache-2.0')
m2 = LicenseMatch(rule=r2, qspan=Span(0, 2), ispan=Span(0, 2))
matches, discarded = filter_overlapping_matches([m1, m2, m5])
assert matches == [m5]
assert discarded
def test_merge_then_filter_matches_with_same_spans_if_licenses_are_identical_but_rule_differ(self):
r1 = Rule(text_file='r1', license_expression='apache-2.0')
m1 = LicenseMatch(rule=r1, qspan=Span(0, 2), ispan=Span(0, 2))
m5 = LicenseMatch(rule=r1, qspan=Span(1, 6), ispan=Span(1, 6))
r2 = Rule(text_file='r2', license_expression='apache-2.0')
m2 = LicenseMatch(rule=r2, qspan=Span(0, 2), ispan=Span(0, 2))
matches = merge_matches([m1, m2, m5])
matches, discarded = filter_contained_matches(matches)
assert matches == [LicenseMatch(rule=r1, qspan=Span(0, 6), ispan=Span(0, 6))]
assert discarded
def test_merge_overlapping_matches(self):
r1 = Rule(text_file='r1', license_expression='apache-2.0')
m1 = LicenseMatch(rule=r1, qspan=Span(0, 2), ispan=Span(0, 2))
m2 = LicenseMatch(rule=r1, qspan=Span(1, 6), ispan=Span(1, 6))
matches = merge_matches([m1, m2])
assert matches == [LicenseMatch(rule=r1, qspan=Span(0, 6), ispan=Span(0, 6))]
def test_merge_does_not_merges_matches_with_same_spans_if_licenses_are_the_same_but_have_different_licenses_ordering(self):
r1 = Rule(text_file='r1', license_expression='apache-2.0 OR gpl')
m1 = LicenseMatch(rule=r1, qspan=Span(0, 2), ispan=Span(0, 2))
m5 = LicenseMatch(rule=r1, qspan=Span(1, 6), ispan=Span(1, 6))
r2 = Rule(text_file='r2', license_expression='gpl OR apache-2.0')
m2 = LicenseMatch(rule=r2, qspan=Span(0, 2), ispan=Span(0, 2))
result = merge_matches([m1, m2, m5])
assert sorted(result) == sorted([LicenseMatch(rule=r1, qspan=Span(0, 6), ispan=Span(0, 6)), m2])
def test_merge_does_not_merges_matches_with_same_spans_if_rules_are_different(self):
r1 = Rule(text_file='r1', license_expression='apache-2.0 OR gpl')
m1 = LicenseMatch(rule=r1, qspan=Span(0, 2), ispan=Span(0, 2))
m5 = LicenseMatch(rule=r1, qspan=Span(1, 6), ispan=Span(1, 6))
r2 = Rule(text_file='r2', license_expression='apache-2.0 OR gpl')
m2 = LicenseMatch(rule=r2, qspan=Span(0, 2), ispan=Span(0, 2))
result = merge_matches([m1, m2, m5])
assert sorted(result) == sorted([LicenseMatch(rule=r1, qspan=Span(0, 6), ispan=Span(0, 6)), m2])
def test_merge_merges_duplicate_matches(self):
r1 = Rule(text_file='r1', license_expression='apache-2.0')
m1 = LicenseMatch(rule=r1, qspan=Span(0, 8), ispan=Span(0, 8))
m2 = LicenseMatch(rule=r1, qspan=Span(0, 8), ispan=Span(0, 8))
matches = merge_matches([m1, m2])
assert (matches == [m1]) or (matches == [m2])
def test_merge_does_not_merge_overlapping_matches_in_sequence_with_assymetric_overlap(self):
r1 = Rule(text_file='r1', license_expression=u'lgpl-2.0-plus')
# ---> merge_matches: current: LicenseMatch<'3-seq', lines=(9, 28), 'lgpl-2.0-plus_9.RULE', u'lgpl-2.0-plus', choice=False, score=87.5, len=126, ilen=126, hilen=20, rlen=144, qreg=(50, 200), ireg=(5, 142), qspan=Span(50, 90)|Span(92, 142)|Span(151, 182)|Span(199, 200), ispan=Span(5, 21)|Span(23, 46)|Span(48, 77)|Span(79, 93)|Span(95, 100)|Span(108, 128)|Span(130, 142), hispan=Span(10)|Span(14)|Span(18)|Span(24)|Span(27)|Span(52)|Span(57)|Span(61)|Span(65, 66)|Span(68)|Span(70)|Span(80)|Span(88)|Span(96)|Span(111)|Span(113)|Span(115)|Span(131)|Span(141)>
# ---> merge_matches: next: LicenseMatch<'2-aho', lines=(28, 44), 'lgpl-2.0-plus_9.RULE', u'lgpl-2.0-plus', choice=False, score=100.0, len=144, ilen=144, hilen=21, rlen=144, qreg=(198, 341), ireg=(0, 143), qspan=Span(198, 341), ispan=Span(0, 143), hispan=Span(1)|Span(10)|Span(14)|Span(18)|Span(24)|Span(27)|Span(52)|Span(57)|Span(61)|Span(65, 66)|Span(68)|Span(70)|Span(80)|Span(88)|Span(96)|Span(111)|Span(113)|Span(115)|Span(131)|Span(141)>
# ---> ###merge_matches: next overlaps in sequence current, merged as new: LicenseMatch<'3-seq 2-aho', lines=(9, 44), 'lgpl-2.0-plus_9.RULE', u'lgpl-2.0-plus', choice=False, score=100.0, len=268, hilen=21, rlen=144, qreg=(50, 341), ireg=(0, 143), qspan=Span(50, 90)|Span(92, 142)|Span(151, 182)|Span(198, 341), ispan=Span(0, 143), his
# ---> merge_matches: current: len=126, hilen=20, rlen=144, qreg=(50, 200), ireg=(5, 142)
# ---> merge_matches: next: len=144, hilen=21, rlen=144, qreg=(198, 341), ireg=(0, 143)
m1 = LicenseMatch(
rule=r1,
qspan=Span(50, 90) | Span(92, 142) | Span(151, 182) | Span(199, 200),
ispan=
Span(5, 21) | Span(23, 46) | Span(48, 77) | Span(79, 93) |
Span(95, 100) | Span(108, 128) | Span(130, 142),
hispan=
Span(10) | Span(14) | Span(18) | Span(24) | Span(27) | Span(52) |
Span(57) | Span(61) | Span(65, 66) | Span(68) | Span(70) | Span(80) |
Span(88) | Span(96) | Span(111) | Span(113) | Span(115) | Span(131) |
Span(141),
)
m2 = LicenseMatch(
rule=r1,
qspan=Span(198, 341),
ispan=Span(0, 143),
hispan=
Span(1) | Span(10) | Span(14) | Span(18) | Span(24) | Span(27) |
Span(52) | Span(57) | Span(61) | Span(65, 66) | Span(68) | Span(70) |
Span(80) | Span(88) | Span(96) | Span(111) | Span(113) | Span(115) |
Span(131) | Span(141))
matches = merge_matches([m1, m2])
assert matches == [m1, m2]
class TestLicenseMatchFilter(FileBasedTesting):
test_data_dir = TEST_DATA_DIR
def test_filter_contained_matches_matches_filters_multiple_nested_contained_matches_and_large_overlapping(self):
r1 = Rule(text_file='r1', license_expression='apache-2.0 OR gpl')
m1 = LicenseMatch(rule=r1, qspan=Span(0, 5), ispan=Span(0, 5))
large_overlap = LicenseMatch(rule=r1, qspan=Span(1, 6), ispan=Span(1, 6))
contained = LicenseMatch(rule=r1, qspan=Span(1, 4), ispan=Span(1, 4))
in_contained = LicenseMatch(rule=r1, qspan=Span(2, 3), ispan=Span(2, 3))
result, discarded = filter_contained_matches([m1, contained, in_contained, large_overlap])
assert result == [m1, large_overlap]
assert discarded == [contained, in_contained]
def test_filter_overlapping_matches_matches_filters_multiple_nested_contained_matches_and_large_overlapping(self):
r1 = Rule(text_file='r1', license_expression='apache-2.0 OR gpl')
m1 = LicenseMatch(rule=r1, qspan=Span(0, 5), ispan=Span(0, 5))
large_overlap = LicenseMatch(rule=r1, qspan=Span(1, 6), ispan=Span(1, 6))
contained = LicenseMatch(rule=r1, qspan=Span(1, 4), ispan=Span(1, 4))
in_contained = LicenseMatch(rule=r1, qspan=Span(2, 3), ispan=Span(2, 3))
result, discarded = filter_overlapping_matches([m1, contained, in_contained, large_overlap])
assert result == [m1]
assert discarded
def test_filter_matches_filters_non_contiguous_or_overlapping__but_contained_matches(self):
r1 = Rule(text_file='r1', license_expression='apache-2.0 OR gpl')
m1 = LicenseMatch(rule=r1, qspan=Span(1, 2), ispan=Span(1, 2))
m2 = LicenseMatch(rule=r1, qspan=Span(3, 6), ispan=Span(3, 6))
m3 = LicenseMatch(rule=r1, qspan=Span(1, 6), ispan=Span(1, 6))
m4 = LicenseMatch(rule=r1, qspan=Span(0, 7), ispan=Span(0, 7))
m5 = LicenseMatch(rule=r1, qspan=Span(1, 6), ispan=Span(1, 6))
result, discarded = filter_contained_matches([m1, m2, m3, m4, m5])
assert result == [m4]
assert discarded
def test_filter_matches_filters_non_contiguous_or_overlapping_contained_matches_with_touching_boundaries(self):
r1 = Rule(text_file='r1', license_expression='apache-2.0 OR gpl')
m1 = LicenseMatch(rule=r1, qspan=Span(0, 2), ispan=Span(0, 2))
r2 = Rule(text_file='r2', license_expression='apache-2.0 OR gpl')
m2 = LicenseMatch(rule=r2, qspan=Span(3, 7), ispan=Span(3, 7))
r3 = Rule(text_file='r3', license_expression='apache-2.0 OR gpl')
m3 = LicenseMatch(rule=r3, qspan=Span(0, 6), ispan=Span(0, 6))
r6 = Rule(text_file='r6', license_expression='apache-2.0 OR gpl')
m6 = LicenseMatch(rule=r6, qspan=Span(1, 7), ispan=Span(1, 7))
r5 = Rule(text_file='r5', license_expression='apache-2.0 OR gpl')
m5 = LicenseMatch(rule=r5, qspan=Span(1, 6), ispan=Span(1, 6))
r4 = Rule(text_file='r4', license_expression='apache-2.0 OR gpl')
m4 = LicenseMatch(rule=r4, qspan=Span(0, 7), ispan=Span(0, 7))
result, discarded = filter_contained_matches([m1, m2, m3, m4, m5, m6])
assert result == [m4]
assert discarded
def test_filter_contained_matches_matches_does_filter_matches_with_contained_spans_if_licenses_are_different(self):
r1 = Rule(text_file='r1', license_expression='apache-2.0')
m1 = LicenseMatch(rule=r1, qspan=Span(0, 2), ispan=Span(0, 2))
r2 = Rule(text_file='r2', license_expression='apache-2.0')
m2 = LicenseMatch(rule=r2, qspan=Span(1, 6), ispan=Span(1, 6))
r3 = Rule(text_file='r3', license_expression='apache-1.1')
m3 = LicenseMatch(rule=r3, qspan=Span(0, 2), ispan=Span(0, 2))
matches, discarded = filter_contained_matches([m1, m2, m3])
assert matches == [m1, m2]
assert discarded
def test_filter_overlapping_matches_matches_does_filter_matches_with_contained_spans_if_licenses_are_different(self):
r1 = Rule(text_file='r1', license_expression='apache-2.0')
m1 = LicenseMatch(rule=r1, qspan=Span(0, 2), ispan=Span(0, 2))
r2 = Rule(text_file='r2', license_expression='apache-2.0')
m2 = LicenseMatch(rule=r2, qspan=Span(1, 6), ispan=Span(1, 6))
r3 = Rule(text_file='r3', license_expression='apache-1.1')
m3 = LicenseMatch(rule=r3, qspan=Span(0, 2), ispan=Span(0, 2))
matches, discarded = filter_overlapping_matches([m1, m2, m3])
assert matches == [m2]
assert discarded
def test_filter_overlapping_matches_matches_filters_matches_with_medium_overlap_only_if_license_are_the_same(self):
r1 = Rule(text_file='r1', license_expression='apache-1.1')
m1 = LicenseMatch(rule=r1, qspan=Span(0, 10), ispan=Span(0, 10))
m2 = LicenseMatch(rule=r1, qspan=Span(3, 11), ispan=Span(3, 11))
r2 = Rule(text_file='r2', license_expression='gpl OR apache-2.0')
m3 = LicenseMatch(rule=r2, qspan=Span(7, 15), ispan=Span(7, 15))
result, discarded = filter_overlapping_matches([m1, m2, m3])
assert sorted(result) == sorted([m1, m3])
assert discarded
def test_filter_matches_handles_interlaced_matches_with_overlap_and_same_license(self):
rule_dir = self.get_test_loc('match_filter/rules')
idx = index.LicenseIndex(load_rules(rule_dir))
rules = {r.identifier: r for r in idx.rules_by_rid}
query_loc = self.get_test_loc('match_filter/query')
matches = idx.match(location=query_loc)
expected = [
# filtered: LicenseMatch(matcher='3-seq', rule=rules['rule1.RULE'], qspan=Span(4, 47) | Span(50, 59), ispan=Span(1, 53)),
LicenseMatch(matcher='2-aho', rule=rules['rule2.RULE'], qspan=Span(24, 85), ispan=Span(0, 61)),
]
assert matches == expected
def test_filter_contained_matches_matches_filters_matches_does_not_discard_non_overlapping(self):
r1 = Rule(text_file='r1', license_expression='apache-1.1')
r2 = Rule(text_file='r2', license_expression='gpl OR apache-2.0')
r3 = Rule(text_file='r3', license_expression='gpl')
# we have these matches
# 1. ABC
# 2. ABCDEDFG
# 3. DEFCGJLJLJKLJJLKJLJJJLJLJLJJL
# we do not want 1. to be discarded in the final
m1 = LicenseMatch(rule=r1, qspan=Span(0, 5), ispan=Span(0, 5))
m2 = LicenseMatch(rule=r2, qspan=Span(0, 40), ispan=Span(0, 40))
m3 = LicenseMatch(rule=r3, qspan=Span(6, 120), ispan=Span(6, 120))
result, discarded = filter_contained_matches([m2, m1, m3])
assert result == [m2, m3]
assert discarded == [m1]
def test_filter_overlapping_matches_matches_filters_matches_does_not_discard_non_overlapping(self):
r1 = Rule(text_file='r1', license_expression='apache-1.1')
r2 = Rule(text_file='r2', license_expression='gpl OR apache-2.0')
r3 = Rule(text_file='r3', license_expression='gpl')
# we have these matches
# 1. ABC
# 2. ABCDEDFG
# 3. DEFCGJLJLJKLJJLKJLJJJLJLJLJJL
# we do not want 1. to be discarded in the final
m1 = LicenseMatch(rule=r1, qspan=Span(0, 5), ispan=Span(0, 5))
m2 = LicenseMatch(rule=r2, qspan=Span(0, 40), ispan=Span(0, 40))
m3 = LicenseMatch(rule=r3, qspan=Span(6, 120), ispan=Span(6, 120))
result, discarded = filter_overlapping_matches([m2, m1, m3])
assert result == [m3]
assert discarded == [m1, m2]
result, discarded = restore_non_overlapping(result, discarded)
assert result == [m1]
assert discarded == [m2]
def test_filter_key_phrases_keeps_matches_where_key_phrase_spans_is_fully_container_in_ispan(self):
idx = index.LicenseIndex()
query = Query(query_string="Lorum ipsum", idx=idx)
r1 = Rule(text_file='r1', license_expression='apache-1.1', key_phrase_spans=[Span(2, 4)])
match_key_phrase_fully_contained = LicenseMatch(rule=r1, query=query, qspan=Span(0, 5), ispan=Span(0, 5))
match_key_phrase_fully_outside = LicenseMatch(rule=r1, query=query, qspan=Span(5, 8), ispan=Span(5, 8))
match_key_phrase_partially_contained = LicenseMatch(rule=r1, query=query, qspan=Span(0, 3), ispan=Span(0, 2))
match_key_phrase_fully_containing = LicenseMatch(rule=r1, query=query, qspan=Span(3), ispan=Span(3))
kept, discarded = filter_matches_missing_key_phrases([
match_key_phrase_fully_contained,
match_key_phrase_fully_outside,
match_key_phrase_partially_contained,
match_key_phrase_fully_containing
])
assert kept == [
match_key_phrase_fully_contained
]
assert discarded == [
match_key_phrase_fully_outside,
match_key_phrase_partially_contained,
match_key_phrase_fully_containing
]
def test_filter_key_phrases_discards_matches_where_qspan_intersects_with_unknown_or_stopwords(self):
idx = index.LicenseIndex()
query = Query(query_string="Lorum ipsum", idx=idx)
query.unknowns_by_pos = {12: 1}
query.stopwords_by_pos = {23: 1}
r1 = Rule(text_file='r1', license_expression='apache-1.1', key_phrase_spans=[Span(2, 4)])
match_key_phrase_fully_contained = LicenseMatch(rule=r1, query=query, qspan=Span(0, 5), ispan=Span(0, 5))
match_qspan_intersects_with_unknowns = LicenseMatch(rule=r1, query=query, qspan=Span(10, 15), ispan=Span(0, 5))
match_qspan_intersects_with_stopwords = LicenseMatch(rule=r1, query=query, qspan=Span(20, 25), ispan=Span(0, 5))
kept, discarded = filter_matches_missing_key_phrases([
match_key_phrase_fully_contained,
match_qspan_intersects_with_unknowns,
match_qspan_intersects_with_stopwords,
])
assert kept == [
match_key_phrase_fully_contained
]
assert discarded == [
match_qspan_intersects_with_unknowns,
match_qspan_intersects_with_stopwords
]
def test_filter_key_phrases_discards_matches_where_key_phrase_is_interruped_in_qspan(self):
idx = index.LicenseIndex()
query = Query(query_string="Lorum ipsum", idx=idx)
query.unknowns_by_pos = {}
r1 = Rule(
text_file='r1',
license_expression='apache-1.1',
key_phrase_spans=[Span(12, 14)],
)
qspan_ispan_same_pos = LicenseMatch(
rule=r1, query=query,
qspan=Span(10, 15), ispan=Span(10, 15)
)
qspan_with_offset = LicenseMatch(
rule=r1, query=query,
qspan=Span(20, 25), ispan=Span(10, 15)
)
qspan_non_contiguous = LicenseMatch(
rule=r1, query=query,
qspan=Span([20, 21, 22, 23, 25]), ispan=Span(10, 15)
)
kept, discarded = filter_matches_missing_key_phrases([
qspan_ispan_same_pos,
qspan_with_offset,
qspan_non_contiguous
])
assert kept == [
qspan_ispan_same_pos,
qspan_with_offset
]
assert discarded == [
qspan_non_contiguous,
]
def test_get_matching_regions_15_words(self):
rule_dir = self.get_test_loc('match_regions/rules')
idx = index.LicenseIndex(load_rules(rule_dir))
query_string = '''GPLv2
This source code is licensed under the MIT
GPLv2
under both the GPLv2 and Apache 2.0 License
the under both the under both the under both the under both the under both
GPL v2 license
This source code is licensed under the MIT
'''
matches = idx.match(query_string=query_string)
matched_rules = [m.rule.identifier for m in matches]
expected_rules = [
'gpl-2.0_bare_single_word.RULE',
'mit_101.RULE',
'gpl-2.0_bare_single_word.RULE',
'gpl-2.0_or_apache-2.0_2.RULE',
'gpl-2.0_bare_single_word2.RULE',
'mit_101.RULE',
]
assert matched_rules == expected_rules
regions = get_matching_regions(matches)
expected_regions = [Span(0, 18), Span(34, 44)]
assert regions == expected_regions
assert matches[0].qspan in regions[0]
assert matches[1].qspan in regions[0]
assert matches[2].qspan in regions[0]
assert matches[3].qspan in regions[0]
assert matches[4].qspan in regions[1]
assert matches[5].qspan in regions[1]
def test_get_matching_regions_10_words_are_not_enough(self):
rule_dir = self.get_test_loc('match_regions/rules')
idx = index.LicenseIndex(load_rules(rule_dir))
query_string = '''GPLv2
This source code is licensed under the MIT
GPLv2
under both the GPLv2 and Apache 2.0 License
the under both the under foo bar both the under
GPL v2 license
This source code is licensed under the MIT
'''
matches = idx.match(query_string=query_string)
matched_rules = [m.rule.identifier for m in matches]
expected_rules = [
'gpl-2.0_bare_single_word.RULE',
'mit_101.RULE',
'gpl-2.0_bare_single_word.RULE',
'gpl-2.0_or_apache-2.0_2.RULE',
'gpl-2.0_bare_single_word2.RULE',
'mit_101.RULE',
]
assert matched_rules == expected_rules
regions = get_matching_regions(matches)
expected_regions = [Span(0, 37)]
assert regions == expected_regions
def test_get_matching_regions_11_words_are_enough(self):
rule_dir = self.get_test_loc('match_regions/rules')
idx = index.LicenseIndex(load_rules(rule_dir))
query_string = '''GPLv2
This source code is licensed under the MIT
GPLv2
under both the GPLv2 and Apache 2.0 License
the under both the under both the under both the under
GPL v2 license
This source code is licensed under the MIT
'''
matches = idx.match(query_string=query_string)
matched_rules = [m.rule.identifier for m in matches]
expected_rules = [
'gpl-2.0_bare_single_word.RULE',
'mit_101.RULE',
'gpl-2.0_bare_single_word.RULE',
'gpl-2.0_or_apache-2.0_2.RULE',
'gpl-2.0_bare_single_word2.RULE',
'mit_101.RULE',
]
assert matched_rules == expected_rules
regions = get_matching_regions(matches)
expected_regions = [Span(0, 18), Span(30, 40)]
assert regions == expected_regions
assert matches[0].qspan in regions[0]
assert matches[1].qspan in regions[0]
assert matches[2].qspan in regions[0]
assert matches[3].qspan in regions[0]
assert matches[4].qspan in regions[1]
assert matches[5].qspan in regions[1]
def test_get_matching_regions_2_lines_are_not_enough(self):
rule_dir = self.get_test_loc('match_regions/rules')
idx = index.LicenseIndex(load_rules(rule_dir))
query_string = '''GPLv2
This source code is licensed under the MIT
GPLv2
under both the GPLv2 and Apache 2.0 License
one
two
GPL v2 license
This source code is licensed under the MIT
'''
matches = idx.match(query_string=query_string)
matched_rules = [m.rule.identifier for m in matches]
expected_rules = [
'gpl-2.0_bare_single_word.RULE',
'mit_101.RULE',
'gpl-2.0_bare_single_word.RULE',
'gpl-2.0_or_apache-2.0_2.RULE',
'gpl-2.0_bare_single_word2.RULE',
'mit_101.RULE',
]
assert matched_rules == expected_rules
regions = get_matching_regions(matches)
expected_regions = [Span(0, 29)]
assert regions == expected_regions
def test_get_matching_regions_2_lines_with_10_words_are_enough(self):
rule_dir = self.get_test_loc('match_regions/rules')
idx = index.LicenseIndex(load_rules(rule_dir))
query_string = '''GPLv2
This source code is licensed under the MIT
GPLv2
under both the GPLv2 and Apache 2.0 License
one
two three four five six seven eight nine ten
GPL v2 license
This source code is licensed under the MIT
'''
matches = idx.match(query_string=query_string)
matched_rules = [m.rule.identifier for m in matches]
expected_rules = [
'gpl-2.0_bare_single_word.RULE',
'mit_101.RULE',
'gpl-2.0_bare_single_word.RULE',
'gpl-2.0_or_apache-2.0_2.RULE',
'gpl-2.0_bare_single_word2.RULE',
'mit_101.RULE',
]
assert matched_rules == expected_rules
regions = get_matching_regions(matches)
expected_regions = [Span(0, 29)]
assert regions == expected_regions
def test_get_matching_regions_3_lines_enough(self):
rule_dir = self.get_test_loc('match_regions/rules')
idx = index.LicenseIndex(load_rules(rule_dir))
query_string = '''GPLv2
This source code is licensed under the MIT
GPLv2
under both the GPLv2 and Apache 2.0 License
one
two
three
GPL v2 license
This source code is licensed under the MIT
'''
matches = idx.match(query_string=query_string)
matched_rules = [m.rule.identifier for m in matches]
expected_rules = [
'gpl-2.0_bare_single_word.RULE',
'mit_101.RULE',
'gpl-2.0_bare_single_word.RULE',
'gpl-2.0_or_apache-2.0_2.RULE',
'gpl-2.0_bare_single_word2.RULE',
'mit_101.RULE',
]
assert matched_rules == expected_rules
regions = get_matching_regions(matches)
expected_regions = [Span(0, 18), Span(19, 29)]
assert regions == expected_regions
assert matches[0].qspan in regions[0]
assert matches[1].qspan in regions[0]
assert matches[2].qspan in regions[0]
assert matches[3].qspan in regions[0]
assert matches[4].qspan in regions[1]
assert matches[5].qspan in regions[1]
class TestLicenseMatchScore(FileBasedTesting):
test_data_dir = TEST_DATA_DIR
def test_LicenseMatch_score_100(self):
r1 = Rule(text_file='r1', license_expression='apache-2.0')
r1.relevance = 100
r1.length = 3
m1 = LicenseMatch(rule=r1, qspan=Span(0, 2), ispan=Span(0, 2))
assert m1.score() == 100
def test_LicenseMatch_score_50(self):
r1 = Rule(text_file='r1', license_expression='apache-2.0')
r1.relevance = 50
r1.length = 3
m1 = LicenseMatch(rule=r1, qspan=Span(0, 2), ispan=Span(0, 2))
assert m1.score() == 50
def test_LicenseMatch_score_25_with_stored_relevance(self):
r1 = Rule(text_file='r1', license_expression='apache-2.0')
r1.relevance = 50
r1.length = 6
m1 = LicenseMatch(rule=r1, qspan=Span(0, 2), ispan=Span(0, 2))
# NB we do not have a query here
assert m1.score() == 25
def test_LicenseMatch_score_0(self):
r1 = Rule(text_file='r1', license_expression='apache-2.0')
r1.relevance = 0
r1.length = 6
m1 = LicenseMatch(rule=r1, qspan=Span(), ispan=Span())
assert m1.score() == 0
def test_LicenseMatch_score_0_relevance(self):
r1 = Rule(text_file='r1', license_expression='apache-2.0')
r1.relevance = 0
r1.length = 6
m1 = LicenseMatch(rule=r1, qspan=Span(0, 2), ispan=Span(0, 2))
assert m1.score() == 0
def test_LicenseMatch_score_100_contiguous(self):
r1 = Rule(text_file='r1', license_expression='apache-2.0')
r1.relevance = 100
r1.length = 42
m1 = LicenseMatch(rule=r1, qspan=Span(0, 41), ispan=Span(0, 41))
assert m1.score() == 100
def test_LicenseMatch_score_100_non_contiguous(self):
r1 = Rule(text_file='r1', license_expression='apache-2.0')
r1.relevance = 100
r1.length = 42
m1 = LicenseMatch(rule=r1, qspan=Span(0, 19) | Span(30, 51), ispan=Span(0, 41))
assert m1.score() == 80.77
def test_LicenseMatch_stopwords_are_treated_as_unknown_2484(self):
rules_dir = self.get_test_loc('stopwords/index/rules')
lics_dir = self.get_test_loc('stopwords/index/licenses')
rules = models.get_rules(licenses_data_dir=lics_dir, rules_data_dir=rules_dir)
idx = LicenseIndex(rules)
query_location = self.get_test_loc('stopwords/query.txt')
matches = idx.match(location=query_location)
results = [m.rule.identifier for m in matches]
assert results == ['gpl-1.0.bare.RULE', 'gpl-1.0.bare.RULE', 'gpl-1.0.bare.RULE']
class TestCollectLicenseMatchTexts(FileBasedTesting):
test_data_dir = TEST_DATA_DIR
def test_get_full_matched_text_base(self):
rule_text = u'''
Copyright [[some copyright]]
THIS IS FROM [[THE CODEHAUS]] AND CONTRIBUTORS
IN NO EVENT SHALL [[THE CODEHAUS]] OR ITS CONTRIBUTORS BE LIABLE
EVEN IF ADVISED OF THE [[POSSIBILITY OF SUCH]] DAMAGE
'''
rule = Rule(stored_text=rule_text, license_expression='test')
idx = index.LicenseIndex([rule])
querys = u'''
foobar 45 . Copyright 2003 (C) James. All Rights Reserved.
THIS IS FROM THE CODEHAUS AND CONTRIBUTORS
IN NO EVENT SHALL THE best CODEHAUS OR ITS CONTRIBUTORS BE LIABLE
EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. chabada DAMAGE 12 ABC dasdasda .
'''
result = idx.match(query_string=querys)
assert len(result) == 1
match = result[0]
# Note that there is a trailing space in that string
expected = u"""Copyright [2003] ([C]) [James]. [All] [Rights] [Reserved].
THIS IS FROM THE CODEHAUS AND CONTRIBUTORS
IN NO EVENT SHALL THE [best] CODEHAUS OR ITS CONTRIBUTORS BE LIABLE
EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """
matched_text = u''.join(
get_full_matched_text(match, query_string=querys, idx=idx, _usecache=False))
assert matched_text == expected
expected_nh = u"""Copyright 2003 (C) James. All Rights Reserved.
THIS IS FROM THE CODEHAUS AND CONTRIBUTORS
IN NO EVENT SHALL THE best CODEHAUS OR ITS CONTRIBUTORS BE LIABLE
EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """
matched_text_nh = u''.join(
get_full_matched_text(
match, query_string=querys, idx=idx, _usecache=False, highlight=False))
assert matched_text_nh == expected_nh
expected_origin_text = u"""Copyright 2003 (C) James. All Rights Reserved.
THIS IS FROM THE CODEHAUS AND CONTRIBUTORS
IN NO EVENT SHALL THE best CODEHAUS OR ITS CONTRIBUTORS BE LIABLE
EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """
origin_matched_text = u''.join(get_full_matched_text(
match,
query_string=querys,
idx=idx,
highlight_not_matched=u'%s',
))
assert origin_matched_text == expected_origin_text
def test_get_full_matched_text(self):
rule_text = u'''
Copyright [[some copyright]]
THIS IS FROM [[THE CODEHAUS]] AND CONTRIBUTORS
IN NO EVENT SHALL [[THE CODEHAUS]] OR ITS CONTRIBUTORS BE LIABLE
EVEN IF ADVISED OF THE [[POSSIBILITY OF SUCH]] DAMAGE
'''
rule = Rule(stored_text=rule_text, license_expression='test')
idx = index.LicenseIndex([rule])
querys = u'''
foobar 45 Copyright 2003 (C) James. All Rights Reserved.
THIS IS FROM THE CODEHAUS AND CONTRIBUTORS
IN NO EVENT SHALL THE best CODEHAUS OR ITS CONTRIBUTORS BE LIABLE
EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. chabada DAMAGE 12 ABC
'''
result = idx.match(query_string=querys)
assert len(result) == 1
match = result[0]
# Note that there is a trailing space in that string
expected = u"""Copyright [2003] ([C]) [James]. [All] [Rights] [Reserved].
THIS IS FROM THE CODEHAUS AND CONTRIBUTORS
IN NO EVENT SHALL THE [best] CODEHAUS OR ITS CONTRIBUTORS BE LIABLE
EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """
matched_text = u''.join(get_full_matched_text(match, query_string=querys, idx=idx, _usecache=False))
assert matched_text == expected
# the text is finally rstripped
matched_text = match.matched_text(_usecache=False)
assert matched_text == expected.rstrip()
# test again using some HTML with tags
# Note that there is a trailing space in that string
expected = u"""Copyright <br>2003</br> (<br>C</br>) <br>James</br>. <br>All</br> <br>Rights</br> <br>Reserved</br>.
THIS IS FROM THE CODEHAUS AND CONTRIBUTORS
IN NO EVENT SHALL THE <br>best</br> CODEHAUS OR ITS CONTRIBUTORS BE LIABLE
EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """
matched_text = u''.join(get_full_matched_text(
match, query_string=querys, idx=idx, highlight_not_matched=u'<br>%s</br>', _usecache=False))
assert matched_text == expected
# test again using whole_lines
expected = u""" foobar 45 Copyright 2003 (C) James. All Rights Reserved.
THIS IS FROM THE CODEHAUS AND CONTRIBUTORS
IN NO EVENT SHALL THE best CODEHAUS OR ITS CONTRIBUTORS BE LIABLE
EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. chabada DAMAGE 12 ABC\n"""
matched_text = u''.join(get_full_matched_text(
match, query_string=querys, idx=idx, highlight_not_matched=u'%s', whole_lines=True))
assert matched_text == expected
def test_get_full_matched_text_does_not_munge_underscore(self):
rule_text = 'MODULE_LICENSE_GPL'
rule = Rule(stored_text=rule_text, license_expression='test')
idx = index.LicenseIndex([rule])
querys = 'MODULE_LICENSE_GPL'
result = idx.match(query_string=querys)
assert len(result) == 1
match = result[0]
expected = 'MODULE_LICENSE_GPL'
matched_text = u''.join(get_full_matched_text(match, query_string=querys, idx=idx, _usecache=False))
assert matched_text == expected
def test_get_full_matched_text_does_not_munge_plus(self):
rule_text = 'MODULE_LICENSE_GPL+ +'
rule = Rule(stored_text=rule_text, license_expression='test')
idx = index.LicenseIndex([rule])
querys = 'MODULE_LICENSE_GPL+ +'
result = idx.match(query_string=querys)
assert len(result) == 1
match = result[0]
expected = 'MODULE_LICENSE_GPL+ +\n'
matched_text = u''.join(get_full_matched_text(match, query_string=querys, idx=idx, _usecache=False))
assert matched_text == expected
def test_tokenize_matched_text_does_cache_last_call_from_query_string_and_location(self):
dictionary = {'module': 0, 'license': 1, 'gpl+': 2}
location = None
query_string = 'the MODULE_LICENSE_GPL+ foobar'
result1 = tokenize_matched_text(location, query_string, dictionary)
result2 = tokenize_matched_text(location, query_string, dictionary)
assert result2 is result1
location = self.get_test_loc('matched_text/tokenize_matched_text_query.txt')
query_string = None
result3 = tokenize_matched_text(location, query_string, dictionary)
assert result3 is not result2
assert result3 == result2
result4 = tokenize_matched_text(location, query_string, dictionary)
assert result4 is result3
def test_tokenize_matched_text_does_return_correct_tokens(self):
querys = u'''
foobar 45 Copyright 2003 (C) James. All Rights Reserved. THIS
IS FROM THE CODEHAUS AND CONTRIBUTORS
'''
dictionary = dict(this=0, event=1, possibility=2, reserved=3, liable=5, copyright=6)
result = tokenize_matched_text(location=None, query_string=querys, dictionary=dictionary)
expected = [
Token(value=u'\n', line_num=1, pos=-1, is_text=False, is_matched=False, is_known=False),
Token(value=u' ', line_num=2, pos=-1, is_text=False, is_matched=False, is_known=False),
Token(value=u'foobar', line_num=2, pos=-1, is_text=True, is_matched=False, is_known=False),
Token(value=u' ', line_num=2, pos=-1, is_text=False, is_matched=False, is_known=False),
Token(value=u'45', line_num=2, pos=-1, is_text=True, is_matched=False, is_known=False),
Token(value=u' ', line_num=2, pos=-1, is_text=False, is_matched=False, is_known=False),
Token(value=u'Copyright', line_num=2, pos=0, is_text=True, is_matched=False, is_known=True),
Token(value=u' ', line_num=2, pos=-1, is_text=False, is_matched=False, is_known=False),
Token(value=u'2003', line_num=2, pos=-1, is_text=True, is_matched=False, is_known=False),
Token(value=u' (', line_num=2, pos=-1, is_text=False, is_matched=False, is_known=False),
Token(value=u'C', line_num=2, pos=-1, is_text=True, is_matched=False, is_known=False),
Token(value=u') ', line_num=2, pos=-1, is_text=False, is_matched=False, is_known=False),
Token(value=u'James', line_num=2, pos=-1, is_text=True, is_matched=False, is_known=False),
Token(value=u'. ', line_num=2, pos=-1, is_text=False, is_matched=False, is_known=False),
Token(value=u'All', line_num=2, pos=-1, is_text=True, is_matched=False, is_known=False),
Token(value=u' ', line_num=2, pos=-1, is_text=False, is_matched=False, is_known=False),
Token(value=u'Rights', line_num=2, pos=-1, is_text=True, is_matched=False, is_known=False),
Token(value=u' ', line_num=2, pos=-1, is_text=False, is_matched=False, is_known=False),
Token(value=u'Reserved', line_num=2, pos=1, is_text=True, is_matched=False, is_known=True),
Token(value=u'. ', line_num=2, pos=-1, is_text=False, is_matched=False, is_known=False),
Token(value=u'THIS', line_num=2, pos=2, is_text=True, is_matched=False, is_known=True),
Token(value=u'\n', line_num=2, pos=-1, is_text=False, is_matched=False, is_known=False),
Token(value=u' ', line_num=3, pos=-1, is_text=False, is_matched=False, is_known=False),
Token(value=u'IS', line_num=3, pos=-1, is_text=True, is_matched=False, is_known=False),
Token(value=u' ', line_num=3, pos=-1, is_text=False, is_matched=False, is_known=False),
Token(value=u'FROM', line_num=3, pos=-1, is_text=True, is_matched=False, is_known=False),
Token(value=u' ', line_num=3, pos=-1, is_text=False, is_matched=False, is_known=False),
Token(value=u'THE', line_num=3, pos=-1, is_text=True, is_matched=False, is_known=False),
Token(value=u' ', line_num=3, pos=-1, is_text=False, is_matched=False, is_known=False),
Token(value=u'CODEHAUS', line_num=3, pos=-1, is_text=True, is_matched=False, is_known=False),
Token(value=u' ', line_num=3, pos=-1, is_text=False, is_matched=False, is_known=False),
Token(value=u'AND', line_num=3, pos=-1, is_text=True, is_matched=False, is_known=False),
Token(value=u' ', line_num=3, pos=-1, is_text=False, is_matched=False, is_known=False),
Token(value=u'CONTRIBUTORS', line_num=3, pos=-1, is_text=True, is_matched=False, is_known=False),
Token(value=u'\n', line_num=3, pos=-1, is_text=False, is_matched=False, is_known=False),
Token(value=' \n', line_num=4, pos=-1, is_text=False, is_matched=False, is_known=False)
]
assert result == expected
def test_tokenize_matched_text_does_not_crash_on_turkish_unicode(self):
querys = u'İrəli'
result = tokenize_matched_text(location=None, query_string=querys, dictionary={})
expected = [
Token(value='i', line_num=1, pos=-1, is_text=True, is_matched=False, is_known=False),
Token(value='rəli', line_num=1, pos=-1, is_text=True, is_matched=False, is_known=False),
Token(value='\n', line_num=1, pos=-1, is_text=False, is_matched=False, is_known=False),
]
assert result == expected
def test_tokenize_matched_text_behaves_like_query_tokenizer_on_turkish_unicode(self):
from licensedcode.tokenize import query_tokenizer
querys = u'İrəli'
matched_text_result = tokenize_matched_text(location=None, query_string=querys, dictionary={})
matched_text_result = [t.value for t in matched_text_result]
query_tokenizer_result = list(query_tokenizer(querys))
if matched_text_result[-1] == '\n':
matched_text_result = matched_text_result[:-1]
assert matched_text_result == query_tokenizer_result
def test_reportable_tokens_filter_tokens_does_not_strip_last_token_value(self):
tokens = [
Token(value=u'\n', line_num=1, pos=-1, is_text=False, is_matched=False, is_known=False),
Token(value=u' ', line_num=2, pos=-1, is_text=False, is_matched=False, is_known=False),
Token(value=u'foobar', line_num=2, pos=-1, is_text=True, is_matched=False, is_known=False),
Token(value=u' ', line_num=2, pos=-1, is_text=False, is_matched=False, is_known=False),
Token(value=u'45', line_num=2, pos=-1, is_text=True, is_matched=False, is_known=False),
Token(value=u' ', line_num=2, pos=-1, is_text=False, is_matched=False, is_known=False),
Token(value=u'Copyright', line_num=2, pos=0, is_text=True, is_matched=False, is_known=True),
Token(value=u' ', line_num=2, pos=-1, is_text=False, is_matched=False, is_known=False),
Token(value=u'2003', line_num=2, pos=-1, is_text=True, is_matched=False, is_known=False),
Token(value=u' (', line_num=2, pos=-1, is_text=False, is_matched=False, is_known=False),
Token(value=u'C', line_num=2, pos=-1, is_text=True, is_matched=False, is_known=False),
Token(value=u') ', line_num=2, pos=-1, is_text=False, is_matched=False, is_known=False),
Token(value=u'James', line_num=2, pos=-1, is_text=True, is_matched=False, is_known=False),
Token(value=u'. ', line_num=2, pos=-1, is_text=False, is_matched=False, is_known=False),
Token(value=u'All', line_num=2, pos=-1, is_text=True, is_matched=False, is_known=False),
Token(value=u' ', line_num=2, pos=-1, is_text=False, is_matched=False, is_known=False),
Token(value=u'Rights', line_num=2, pos=-1, is_text=True, is_matched=False, is_known=False),
Token(value=u' ', line_num=2, pos=-1, is_text=False, is_matched=False, is_known=False),
Token(value=u'Reserved', line_num=2, pos=1, is_text=True, is_matched=False, is_known=True),
Token(value=u'. ', line_num=2, pos=-1, is_text=False, is_matched=False, is_known=False),
Token(value=u'THIS', line_num=2, pos=2, is_text=True, is_matched=False, is_known=True),
Token(value=u'\n', line_num=2, pos=-1, is_text=False, is_matched=False, is_known=False),
Token(value=u' ', line_num=3, pos=-1, is_text=False, is_matched=False, is_known=False),
]
match_qspan = Span(0, 1)
result = list(reportable_tokens(tokens, match_qspan, start_line=1, end_line=2, whole_lines=False))
expected = [
Token(value=u'Copyright', line_num=2, pos=0, is_text=True, is_matched=True, is_known=True),
Token(value=u' ', line_num=2, pos=-1, is_text=False, is_matched=False, is_known=False),
Token(value=u'2003', line_num=2, pos=-1, is_text=True, is_matched=False, is_known=False),
Token(value=u' (', line_num=2, pos=-1, is_text=False, is_matched=False, is_known=False),
Token(value=u'C', line_num=2, pos=-1, is_text=True, is_matched=False, is_known=False),
Token(value=u') ', line_num=2, pos=-1, is_text=False, is_matched=False, is_known=False),
Token(value=u'James', line_num=2, pos=-1, is_text=True, is_matched=False, is_known=False),
Token(value=u'. ', line_num=2, pos=-1, is_text=False, is_matched=False, is_known=False),
Token(value=u'All', line_num=2, pos=-1, is_text=True, is_matched=False, is_known=False),
Token(value=u' ', line_num=2, pos=-1, is_text=False, is_matched=False, is_known=False),
Token(value=u'Rights', line_num=2, pos=-1, is_text=True, is_matched=False, is_known=False),
Token(value=u' ', line_num=2, pos=-1, is_text=False, is_matched=False, is_known=False),
Token(value=u'Reserved', line_num=2, pos=1, is_text=True, is_matched=True, is_known=True),
Token(value=u'. ', line_num=2, pos=-1, is_text=False, is_matched=False, is_known=False)
]
assert result == expected
# test again with whole lines
match_qspan = Span(0, 1)
result = list(reportable_tokens(tokens, match_qspan, start_line=1, end_line=2, whole_lines=True))
expected = [
Token(value=u'\n', line_num=1, pos=-1, is_text=False, is_matched=False, is_known=False),
Token(value=u' ', line_num=2, pos=-1, is_text=False, is_matched=False, is_known=False),
Token(value=u'foobar', line_num=2, pos=-1, is_text=True, is_matched=False, is_known=False),
Token(value=u' ', line_num=2, pos=-1, is_text=False, is_matched=False, is_known=False),
Token(value=u'45', line_num=2, pos=-1, is_text=True, is_matched=False, is_known=False),
Token(value=u' ', line_num=2, pos=-1, is_text=False, is_matched=False, is_known=False),
Token(value=u'Copyright', line_num=2, pos=0, is_text=True, is_matched=True, is_known=True),
Token(value=u' ', line_num=2, pos=-1, is_text=False, is_matched=False, is_known=False),
Token(value=u'2003', line_num=2, pos=-1, is_text=True, is_matched=False, is_known=False),
Token(value=u' (', line_num=2, pos=-1, is_text=False, is_matched=False, is_known=False),
Token(value=u'C', line_num=2, pos=-1, is_text=True, is_matched=False, is_known=False),
Token(value=u') ', line_num=2, pos=-1, is_text=False, is_matched=False, is_known=False),
Token(value=u'James', line_num=2, pos=-1, is_text=True, is_matched=False, is_known=False),
Token(value=u'. ', line_num=2, pos=-1, is_text=False, is_matched=False, is_known=False),
Token(value=u'All', line_num=2, pos=-1, is_text=True, is_matched=False, is_known=False),
Token(value=u' ', line_num=2, pos=-1, is_text=False, is_matched=False, is_known=False),
Token(value=u'Rights', line_num=2, pos=-1, is_text=True, is_matched=False, is_known=False),
Token(value=u' ', line_num=2, pos=-1, is_text=False, is_matched=False, is_known=False),
Token(value=u'Reserved', line_num=2, pos=1, is_text=True, is_matched=True, is_known=True),
Token(value=u'. ', line_num=2, pos=-1, is_text=False, is_matched=False, is_known=False),
Token(value=u'THIS', line_num=2, pos=2, is_text=True, is_matched=False, is_known=True),
Token(value=u'\n', line_num=2, pos=-1, is_text=False, is_matched=False, is_known=False)]
assert result == expected
def test_matched_text_is_collected_correctly_end2end(self):
rules_data_dir = self.get_test_loc('matched_text/index/rules')
query_location = self.get_test_loc('matched_text/query.txt')
rules = models.load_rules(rules_data_dir)
idx = LicenseIndex(rules)
results = [match.matched_text(_usecache=False) for match in idx.match(location=query_location)]
expected = [
'This source code is licensed under both the Apache 2.0 license '
'(found in the\n# LICENSE',
'This source code is licensed under [both] [the] [Apache] [2].[0] license '
'(found in the\n# LICENSE file in the root directory of this source tree)',
'GPLv2 ('
]
assert results == expected
def check_matched_texts(self, test_loc, expected_texts, whole_lines=True):
idx = cache.get_index()
test_loc = self.get_test_loc(test_loc)
matches = idx.match(location=test_loc)
matched_texts = [
m.matched_text(whole_lines=whole_lines, highlight=False, _usecache=False)
for m in matches
]
assert matched_texts == expected_texts
def test_matched_text_is_collected_correctly_end2end_for_spdx_match_whole_lines(self):
self.check_matched_texts(
test_loc='matched_text/spdx/query.txt',
expected_texts=['@REM # SPDX-License-Identifier: BSD-2-Clause-Patent'],
whole_lines=True
)
def test_matched_text_is_collected_correctly_end2end_for_spdx_match_plain(self):
self.check_matched_texts(
test_loc='matched_text/spdx/query.txt',
expected_texts=['SPDX-License-Identifier: BSD-2-Clause-Patent'],
whole_lines=False
)
def test_matched_text_is_not_truncated_with_unicode_diacritic_input_from_query(self):
idx = cache.get_index()
querys_with_diacritic_unicode = 'İ license MIT'
result = idx.match(query_string=querys_with_diacritic_unicode)
assert len(result) == 1
match = result[0]
expected = 'license MIT'
matched_text = match.matched_text(_usecache=False,)
assert matched_text == expected
def test_matched_text_is_not_truncated_with_unicode_diacritic_input_from_file(self):
idx = cache.get_index()
file_with_diacritic_unicode_location = self.get_test_loc('matched_text/unicode_text/main3.js')
result = idx.match(location=file_with_diacritic_unicode_location)
assert len(result) == 1
match = result[0]
expected = 'license MIT'
matched_text = match.matched_text(_usecache=False)
assert matched_text == expected
def test_matched_text_is_not_truncated_with_unicode_diacritic_input_from_query_whole_lines(self):
idx = cache.get_index()
querys_with_diacritic_unicode = 'İ license MIT'
result = idx.match(query_string=querys_with_diacritic_unicode)
assert len(result) == 1
match = result[0]
expected = '[İ] license MIT'
matched_text = match.matched_text(_usecache=False, whole_lines=True)
assert matched_text == expected
def test_matched_text_is_not_truncated_with_unicode_diacritic_input_with_diacritic_in_rules(self):
rule_dir = self.get_test_loc('matched_text/turkish_unicode/rules')
idx = index.LicenseIndex(load_rules(rule_dir))
query_loc = self.get_test_loc('matched_text/turkish_unicode/query')
matches = idx.match(location=query_loc)
matched_texts = [
m.matched_text(whole_lines=False, highlight=False, _usecache=False)
for m in matches
]
expected = [
'Licensed under the Apache License, Version 2.0\r\nnext_label=irəli',
'İ license MIT',
'İ license MIT',
'Licensed under the Apache License, Version 2.0\r\nnext_label=irəli',
'lİcense mit'
]
assert matched_texts == expected
def test_matched_text_is_not_truncated_with_unicode_diacritic_input_and_full_index(self):
expected = [
'Licensed under the Apache License, Version 2.0',
'license MIT',
'license MIT',
'Licensed under the Apache License, Version 2.0'
]
self.check_matched_texts(
test_loc='matched_text/turkish_unicode/query',
expected_texts=expected,
whole_lines=False
)
def test_matched_text_does_not_ignores_whole_lines_in_binary_with_small_index(self):
rule_dir = self.get_test_loc('matched_text/binary_text/rules')
idx = index.LicenseIndex(load_rules(rule_dir))
query_loc = self.get_test_loc('matched_text/binary_text/gosu')
matches = idx.match(location=query_loc)
matched_texts = [
m.matched_text(whole_lines=True, highlight=False, _usecache=False)
for m in matches
]
expected = ['{{ .Self }} license: GPL-3 (full text at https://github.com/tianon/gosu)']
assert matched_texts == expected
def test_matched_text_does_not_ignores_whole_lines_in_binary_against_full_index(self):
expected = ['{{ .Self }} license: GPL-3 (full text at https://github.com/tianon/gosu)']
self.check_matched_texts(
test_loc='matched_text/binary_text/gosu',
expected_texts=expected,
whole_lines=True,
)
def test_matched_text_is_collected_correctly_in_binary_ffmpeg_windows_whole_lines(self):
expected_texts = [
'--enable-gpl --enable-version3 --enable-dxva2 --enable-libmfx --enable-nvenc '
'--enable-avisynth --enable-bzlib --enable-fontconfig --enable-frei0r '
'--enable-gnutls --enable-iconv --enable-libass --enable-libbluray '
'--enable-libbs2b --enable-libcaca --enable-libfreetype --enable-libgme '
'--enable-libgsm --enable-libilbc --enable-libmodplug --enable-libmp3lame '
'--enable-libopencore-amrnb --enable-libopencore-amrwb --enable-libopenh264 '
'--enable-libopenjpeg --enable-libopus --enable-librtmp --enable-libsnappy '
'--enable-libsoxr --enable-libspeex --enable-libtheora --enable-libtwolame '
'--enable-libvidstab --enable-libvo-amrwbenc --enable-libvorbis '
'--enable-libvpx --enable-libwavpack --enable-libwebp --enable-libx264 '
'--enable-libx265 --enable-libxavs --enable-libxvid --enable-libzimg '
'--enable-lzma --enable-decklink --enable-zlib',
'%sconfiguration: --enable-gpl --enable-version3 --enable-dxva2 '
'--enable-libmfx --enable-nvenc --enable-avisynth --enable-bzlib '
'--enable-fontconfig --enable-frei0r --enable-gnutls --enable-iconv '
'--enable-libass --enable-libbluray --enable-libbs2b --enable-libcaca '
'--enable-libfreetype --enable-libgme --enable-libgsm --enable-libilbc '
'--enable-libmodplug --enable-libmp3lame --enable-libopencore-amrnb '
'--enable-libopencore-amrwb --enable-libopenh264 --enable-libopenjpeg '
'--enable-libopus --enable-librtmp --enable-libsnappy --enable-libsoxr '
'--enable-libspeex --enable-libtheora --enable-libtwolame --enable-libvidstab '
'--enable-libvo-amrwbenc --enable-libvorbis --enable-libvpx '
'--enable-libwavpack --enable-libwebp --enable-libx264 --enable-libx265 '
'--enable-libxavs --enable-libxvid --enable-libzimg --enable-lzma '
'--enable-decklink --enable-zlib',
'%s is free software; you can redistribute it and/or modify\n'
'it under the terms of the GNU General Public License as published by\n'
'the Free Software Foundation; either version 3 of the License, or\n'
'(at your option) any later version.\n'
'%s is distributed in the hope that it will be useful,\n'
'but WITHOUT ANY WARRANTY; without even the implied warranty of\n'
'MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n'
'GNU General Public License for more details.\n'
'You should have received a copy of the GNU General Public License\n'
'along with %s. If not, see <http://www.gnu.org/licenses/>.\n'
'File formats:\n'
'D. = Demuxing supported\n'
'.E = Muxing supported\n'
'%s%s %-15s %s\n'
'Devices:\n'
'Codecs:\n'
'D..... = Decoding supported\n'
'.E.... = Encoding supported\n'
'..V... = Video codec\n'
"No option name near '%s'\n"
"Unable to parse '%s': %s\n"
"Setting '%s' to value '%s'\n"
"Option '%s' not found\n"
'--enable-gpl --enable-version3 --enable-dxva2 --enable-libmfx --enable-nvenc '
'--enable-avisynth --enable-bzlib --enable-fontconfig --enable-frei0r '
'--enable-gnutls --enable-iconv --enable-libass --enable-libbluray '
'--enable-libbs2b --enable-libcaca --enable-libfreetype --enable-libgme '
'--enable-libgsm --enable-libilbc --enable-libmodplug --enable-libmp3lame '
'--enable-libopencore-amrnb --enable-libopencore-amrwb --enable-libopenh264 '
'--enable-libopenjpeg --enable-libopus --enable-librtmp --enable-libsnappy '
'--enable-libsoxr --enable-libspeex --enable-libtheora --enable-libtwolame '
'--enable-libvidstab --enable-libvo-amrwbenc --enable-libvorbis '
'--enable-libvpx --enable-libwavpack --enable-libwebp --enable-libx264 '
'--enable-libx265 --enable-libxavs --enable-libxvid --enable-libzimg '
'--enable-lzma --enable-decklink --enable-zlib',
'--enable-gpl --enable-version3 --enable-dxva2 --enable-libmfx --enable-nvenc '
'--enable-avisynth --enable-bzlib --enable-fontconfig --enable-frei0r '
'--enable-gnutls --enable-iconv --enable-libass --enable-libbluray '
'--enable-libbs2b --enable-libcaca --enable-libfreetype --enable-libgme '
'--enable-libgsm --enable-libilbc --enable-libmodplug --enable-libmp3lame '
'--enable-libopencore-amrnb --enable-libopencore-amrwb --enable-libopenh264 '
'--enable-libopenjpeg --enable-libopus --enable-librtmp --enable-libsnappy '
'--enable-libsoxr --enable-libspeex --enable-libtheora --enable-libtwolame '
'--enable-libvidstab --enable-libvo-amrwbenc --enable-libvorbis '
'--enable-libvpx --enable-libwavpack --enable-libwebp --enable-libx264 '
'--enable-libx265 --enable-libxavs --enable-libxvid --enable-libzimg '
'--enable-lzma --enable-decklink --enable-zlib',
'libavfilter license: GPL version 3 or later',
'--enable-gpl --enable-version3 --enable-dxva2 --enable-libmfx --enable-nvenc '
'--enable-avisynth --enable-bzlib --enable-fontconfig --enable-frei0r '
'--enable-gnutls --enable-iconv --enable-libass --enable-libbluray '
'--enable-libbs2b --enable-libcaca --enable-libfreetype --enable-libgme '
'--enable-libgsm --enable-libilbc --enable-libmodplug --enable-libmp3lame '
'--enable-libopencore-amrnb --enable-libopencore-amrwb --enable-libopenh264 '
'--enable-libopenjpeg --enable-libopus --enable-librtmp --enable-libsnappy '
'--enable-libsoxr --enable-libspeex --enable-libtheora --enable-libtwolame '
'--enable-libvidstab --enable-libvo-amrwbenc --enable-libvorbis '
'--enable-libvpx --enable-libwavpack --enable-libwebp --enable-libx264 '
'--enable-libx265 --enable-libxavs --enable-libxvid --enable-libzimg '
'--enable-lzma --enable-decklink --enable-zlib',
'libavformat license: GPL version 3 or later',
'--enable-gpl --enable-version3 --enable-dxva2 --enable-libmfx --enable-nvenc '
'--enable-avisynth --enable-bzlib --enable-fontconfig --enable-frei0r '
'--enable-gnutls --enable-iconv --enable-libass --enable-libbluray '
'--enable-libbs2b --enable-libcaca --enable-libfreetype --enable-libgme '
'--enable-libgsm --enable-libilbc --enable-libmodplug --enable-libmp3lame '
'--enable-libopencore-amrnb --enable-libopencore-amrwb --enable-libopenh264 '
'--enable-libopenjpeg --enable-libopus --enable-librtmp --enable-libsnappy '
'--enable-libsoxr --enable-libspeex --enable-libtheora --enable-libtwolame '
'--enable-libvidstab --enable-libvo-amrwbenc --enable-libvorbis '
'--enable-libvpx --enable-libwavpack --enable-libwebp --enable-libx264 '
'--enable-libx265 --enable-libxavs --enable-libxvid --enable-libzimg '
'--enable-lzma --enable-decklink --enable-zlib',
'libavcodec license: GPL version 3 or later',
'--enable-gpl --enable-version3 --enable-dxva2 --enable-libmfx --enable-nvenc '
'--enable-avisynth --enable-bzlib --enable-fontconfig --enable-frei0r '
'--enable-gnutls --enable-iconv --enable-libass --enable-libbluray '
'--enable-libbs2b --enable-libcaca --enable-libfreetype --enable-libgme '
'--enable-libgsm --enable-libilbc --enable-libmodplug --enable-libmp3lame '
'--enable-libopencore-amrnb --enable-libopencore-amrwb --enable-libopenh264 '
'--enable-libopenjpeg --enable-libopus --enable-librtmp --enable-libsnappy '
'--enable-libsoxr --enable-libspeex --enable-libtheora --enable-libtwolame '
'--enable-libvidstab --enable-libvo-amrwbenc --enable-libvorbis '
'--enable-libvpx --enable-libwavpack --enable-libwebp --enable-libx264 '
'--enable-libx265 --enable-libxavs --enable-libxvid --enable-libzimg '
'--enable-lzma --enable-decklink --enable-zlib',
'libpostproc license: GPL version 3 or later',
'--enable-gpl --enable-version3 --enable-dxva2 --enable-libmfx --enable-nvenc '
'--enable-avisynth --enable-bzlib --enable-fontconfig --enable-frei0r '
'--enable-gnutls --enable-iconv --enable-libass --enable-libbluray '
'--enable-libbs2b --enable-libcaca --enable-libfreetype --enable-libgme '
'--enable-libgsm --enable-libilbc --enable-libmodplug --enable-libmp3lame '
'--enable-libopencore-amrnb --enable-libopencore-amrwb --enable-libopenh264 '
'--enable-libopenjpeg --enable-libopus --enable-librtmp --enable-libsnappy '
'--enable-libsoxr --enable-libspeex --enable-libtheora --enable-libtwolame '
'--enable-libvidstab --enable-libvo-amrwbenc --enable-libvorbis '
'--enable-libvpx --enable-libwavpack --enable-libwebp --enable-libx264 '
'--enable-libx265 --enable-libxavs --enable-libxvid --enable-libzimg '
'--enable-lzma --enable-decklink --enable-zlib',
'libswresample license: GPL version 3 or later',
'--enable-gpl --enable-version3 --enable-dxva2 --enable-libmfx --enable-nvenc '
'--enable-avisynth --enable-bzlib --enable-fontconfig --enable-frei0r '
'--enable-gnutls --enable-iconv --enable-libass --enable-libbluray '
'--enable-libbs2b --enable-libcaca --enable-libfreetype --enable-libgme '
'--enable-libgsm --enable-libilbc --enable-libmodplug --enable-libmp3lame '
'--enable-libopencore-amrnb --enable-libopencore-amrwb --enable-libopenh264 '
'--enable-libopenjpeg --enable-libopus --enable-librtmp --enable-libsnappy '
'--enable-libsoxr --enable-libspeex --enable-libtheora --enable-libtwolame '
'--enable-libvidstab --enable-libvo-amrwbenc --enable-libvorbis '
'--enable-libvpx --enable-libwavpack --enable-libwebp --enable-libx264 '
'--enable-libx265 --enable-libxavs --enable-libxvid --enable-libzimg '
'--enable-lzma --enable-decklink --enable-zlib',
'libswscale license: GPL version 3 or later',
'--enable-gpl --enable-version3 --enable-dxva2 --enable-libmfx --enable-nvenc '
'--enable-avisynth --enable-bzlib --enable-fontconfig --enable-frei0r '
'--enable-gnutls --enable-iconv --enable-libass --enable-libbluray '
'--enable-libbs2b --enable-libcaca --enable-libfreetype --enable-libgme '
'--enable-libgsm --enable-libilbc --enable-libmodplug --enable-libmp3lame '
'--enable-libopencore-amrnb --enable-libopencore-amrwb --enable-libopenh264 '
'--enable-libopenjpeg --enable-libopus --enable-librtmp --enable-libsnappy '
'--enable-libsoxr --enable-libspeex --enable-libtheora --enable-libtwolame '
'--enable-libvidstab --enable-libvo-amrwbenc --enable-libvorbis '
'--enable-libvpx --enable-libwavpack --enable-libwebp --enable-libx264 '
'--enable-libx265 --enable-libxavs --enable-libxvid --enable-libzimg '
'--enable-lzma --enable-decklink --enable-zlib',
'libavutil license: GPL version 3 or later',
'This software is derived from the GNU GPL XviD codec (1.3.0).',
]
self.check_matched_texts(
test_loc='matched_text/ffmpeg/ffmpeg.exe',
expected_texts=expected_texts,
whole_lines=True
)
def test_matched_text_is_collected_correctly_in_binary_ffmpeg_windows_not_whole_lines(self):
expected_texts = [
'enable-gpl --enable-version3 --',
'enable-gpl --enable-version3 --',
'is free software; you can redistribute it and/or modify\n'
'it under the terms of the GNU General Public License as published by\n'
'the Free Software Foundation; either version 3 of the License, or\n'
'(at your option) any later version.\n'
'%s is distributed in the hope that it will be useful,\n'
'but WITHOUT ANY WARRANTY; without even the implied warranty of\n'
'MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n'
'GNU General Public License for more details.\n'
'You should have received a copy of the GNU General Public License\n'
'along with %s. If not, see <http://www.gnu.org/licenses/>.\n'
'File formats:\n'
'D. = Demuxing supported\n'
'.E = Muxing supported\n'
'%s%s %-15s %s\n'
'Devices:\n'
'Codecs:\n'
'D..... = Decoding supported\n'
'.E.... = Encoding supported\n'
'..V... = Video codec\n'
"No option name near '%s'\n"
"Unable to parse '%s': %s\n"
"Setting '%s' to value '%s'\n"
"Option '%s' not found\n"
'--enable-gpl --',
'enable-gpl --enable-version3 --',
'license: GPL version 3 or later',
'enable-gpl --enable-version3 --',
'license: GPL version 3 or later',
'enable-gpl --enable-version3 --',
'license: GPL version 3 or later',
'enable-gpl --enable-version3 --',
'license: GPL version 3 or later',
'enable-gpl --enable-version3 --',
'license: GPL version 3 or later',
'enable-gpl --enable-version3 --',
'license: GPL version 3 or later',
'enable-gpl --enable-version3 --',
'license: GPL version 3 or later',
'This software is derived from the GNU GPL XviD codec ('
]
self.check_matched_texts(
test_loc='matched_text/ffmpeg/ffmpeg.exe',
expected_texts=expected_texts,
whole_lines=False,
)
def test_matched_text_is_collected_correctly_in_binary_ffmpeg_elf_whole_lines(self):
expected_texts = [
'--prefix=/usr --extra-version=0ubuntu0.1 --build-suffix=-ffmpeg '
'--toolchain=hardened --libdir=/usr/lib/x86_64-linux-gnu '
'--incdir=/usr/include/x86_64-linux-gnu --cc=cc --cxx=g++ --enable-gpl '
'--enable-shared --disable-stripping --disable-decoder=libopenjpeg '
'--disable-decoder=libschroedinger --enable-avresample --enable-avisynth '
'--enable-gnutls --enable-ladspa --enable-libass --enable-libbluray '
'--enable-libbs2b --enable-libcaca --enable-libcdio --enable-libflite '
'--enable-libfontconfig --enable-libfreetype --enable-libfribidi '
'--enable-libgme --enable-libgsm --enable-libmodplug --enable-libmp3lame '
'--enable-libopenjpeg --enable-libopus --enable-libpulse --enable-librtmp '
'--enable-libschroedinger --enable-libshine --enable-libsnappy '
'--enable-libsoxr --enable-libspeex --enable-libssh --enable-libtheora '
'--enable-libtwolame --enable-libvorbis --enable-libvpx --enable-libwavpack '
'--enable-libwebp --enable-libx265 --enable-libxvid --enable-libzvbi '
'--enable-openal --enable-opengl --enable-x11grab --enable-libdc1394 '
'--enable-libiec61883 --enable-libzmq --enable-frei0r --enable-libx264 '
'--enable-libopencv',
'%sconfiguration: --prefix=/usr --extra-version=0ubuntu0.1 '
'--build-suffix=-ffmpeg --toolchain=hardened '
'--libdir=/usr/lib/x86_64-linux-gnu --incdir=/usr/include/x86_64-linux-gnu '
'--cc=cc --cxx=g++ --enable-gpl --enable-shared --disable-stripping '
'--disable-decoder=libopenjpeg --disable-decoder=libschroedinger '
'--enable-avresample --enable-avisynth --enable-gnutls --enable-ladspa '
'--enable-libass --enable-libbluray --enable-libbs2b --enable-libcaca '
'--enable-libcdio --enable-libflite --enable-libfontconfig '
'--enable-libfreetype --enable-libfribidi --enable-libgme --enable-libgsm '
'--enable-libmodplug --enable-libmp3lame --enable-libopenjpeg '
'--enable-libopus --enable-libpulse --enable-librtmp --enable-libschroedinger '
'--enable-libshine --enable-libsnappy --enable-libsoxr --enable-libspeex '
'--enable-libssh --enable-libtheora --enable-libtwolame --enable-libvorbis '
'--enable-libvpx --enable-libwavpack --enable-libwebp --enable-libx265 '
'--enable-libxvid --enable-libzvbi --enable-openal --enable-opengl '
'--enable-x11grab --enable-libdc1394 --enable-libiec61883 --enable-libzmq '
'--enable-frei0r --enable-libx264 --enable-libopencv',
'%s is free software; you can redistribute it and/or modify\n'
'it under the terms of the GNU General Public License as published by\n'
'the Free Software Foundation; either version 2 of the License, or\n'
'(at your option) any later version.\n'
'%s is distributed in the hope that it will be useful,\n'
'but WITHOUT ANY WARRANTY; without even the implied warranty of\n'
'MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n'
'GNU General Public License for more details.\n'
'You should have received a copy of the GNU General Public License\n'
'along with %s; if not, write to the Free Software\n'
'Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA'
]
self.check_matched_texts(
test_loc='matched_text/ffmpeg/ffmpeg',
expected_texts=expected_texts,
whole_lines=True,
)
def test_matched_text_is_collected_correctly_in_binary_ffmpeg_static_whole_lines(self):
expected_texts = ['libswresample license: LGPL version 2.1 or later']
self.check_matched_texts(
test_loc='matched_text/ffmpeg/libavsample.lib',
expected_texts=expected_texts,
whole_lines=True,
)
|
recirq/hfvqe/analysis_test.py | Coiner1909/ReCirq | 195 | 12615315 | <gh_stars>100-1000
# Copyright 2020 Google
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from itertools import product
import numpy as np
import scipy as sp
from recirq.hfvqe.circuits import rhf_params_to_matrix
from recirq.hfvqe.analysis import (trace_distance, kdelta, energy_from_opdm,
fidelity_witness, fidelity,
mcweeny_purification)
from recirq.hfvqe.molecular_example import make_h6_1_3, make_h3_2_5
from recirq.hfvqe.gradient_hf import rhf_func_generator
def test_kdelta():
assert np.isclose(kdelta(1, 1), 1.)
assert np.isclose(kdelta(0, 1), 0.)
def test_trace_distance():
rho = np.arange(16).reshape((4, 4))
sigma = np.arange(16, 32).reshape((4, 4))
assert np.isclose(trace_distance(rho, rho), 0.)
assert np.isclose(trace_distance(rho, sigma), 32.0)
def test_energy_from_opdm():
"""Build test assuming sampling functions work"""
rhf_objective, molecule, parameters, obi, tbi = make_h6_1_3()
unitary, energy, _ = rhf_func_generator(rhf_objective)
parameters = np.array([0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9])
initial_opdm = np.diag([1] * 3 + [0] * 3)
final_opdm = unitary(parameters) @ initial_opdm @ unitary(
parameters).conj().T
test_energy = energy_from_opdm(final_opdm,
constant=molecule.nuclear_repulsion,
one_body_tensor=obi,
two_body_tensor=tbi)
true_energy = energy(parameters)
assert np.allclose(test_energy, true_energy)
def test_energy_from_opdm_odd_qubit():
"""Build test assuming sampling functions work"""
rhf_objective, molecule, parameters, obi, tbi = make_h3_2_5()
unitary, energy, _ = rhf_func_generator(rhf_objective)
parameters = np.array([0.1, 0.2])
initial_opdm = np.diag([1] * 1 + [0] * 2)
print(initial_opdm)
final_opdm = unitary(parameters) @ initial_opdm @ unitary(
parameters).conj().T
test_energy = energy_from_opdm(final_opdm,
constant=molecule.nuclear_repulsion,
one_body_tensor=obi,
two_body_tensor=tbi)
true_energy = energy(parameters)
assert np.allclose(test_energy, true_energy)
def test_mcweeny():
np.random.seed(82)
opdm = np.array([[
0.766034130, -0.27166330, -0.30936072, -0.08471057, -0.04878244,
-0.01285432
],
[
-0.27166330, 0.67657015, -0.37519640, -0.02101843,
-0.03568214, -0.05034585
],
[
-0.30936072, -0.37519640, 0.55896791, 0.04267370,
-0.02258184, -0.08783738
],
[
-0.08471057, -0.02101843, 0.04267370, 0.05450848,
0.11291253, 0.17131658
],
[
-0.04878244, -0.03568214, -0.02258184, 0.11291253,
0.26821219, 0.42351185
],
[
-0.01285432, -0.05034585, -0.08783738, 0.17131658,
0.42351185, 0.67570713
]])
for i, j in product(range(6), repeat=2):
opdm[i, j] += np.random.randn() * 1.0E-3
opdm = 0.5 * (opdm + opdm.T)
pure_opdm = mcweeny_purification(opdm)
w, _ = np.linalg.eigh(pure_opdm)
assert len(np.where(w < -1.0E-9)[0]) == 0
def test_fidelity():
parameters = np.array([0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9])
u = sp.linalg.expm(rhf_params_to_matrix(parameters, 6))
opdm = np.array([[
0.766034130, -0.27166330, -0.30936072, -0.08471057, -0.04878244,
-0.01285432
],
[
-0.27166330, 0.67657015, -0.37519640, -0.02101843,
-0.03568214, -0.05034585
],
[
-0.30936072, -0.37519640, 0.55896791, 0.04267370,
-0.02258184, -0.08783738
],
[
-0.08471057, -0.02101843, 0.04267370, 0.05450848,
0.11291253, 0.17131658
],
[
-0.04878244, -0.03568214, -0.02258184, 0.11291253,
0.26821219, 0.42351185
],
[
-0.01285432, -0.05034585, -0.08783738, 0.17131658,
0.42351185, 0.67570713
]])
assert np.isclose(fidelity(u, opdm), 1.0)
opdm += 0.1
opdm = 0.5 * (opdm + opdm.T)
assert np.isclose(fidelity(u, opdm), 0.3532702370138279)
def test_fidelity_witness():
parameters = np.array([0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9])
u = sp.linalg.expm(rhf_params_to_matrix(parameters, 6))
omega = [1] * 3 + [0] * 3
opdm = np.array([[
0.766034130, -0.27166330, -0.30936072, -0.08471057, -0.04878244,
-0.01285432
],
[
-0.27166330, 0.67657015, -0.37519640, -0.02101843,
-0.03568214, -0.05034585
],
[
-0.30936072, -0.37519640, 0.55896791, 0.04267370,
-0.02258184, -0.08783738
],
[
-0.08471057, -0.02101843, 0.04267370, 0.05450848,
0.11291253, 0.17131658
],
[
-0.04878244, -0.03568214, -0.02258184, 0.11291253,
0.26821219, 0.42351185
],
[
-0.01285432, -0.05034585, -0.08783738, 0.17131658,
0.42351185, 0.67570713
]])
assert np.isclose(fidelity_witness(u, omega, opdm), 1.0)
opdm += 0.1
opdm = 0.5 * (opdm + opdm.T)
# higher than fidelity because of particle number breaking
assert np.isclose(fidelity_witness(u, omega, opdm), 0.7721525013371697)
|
tools/graphviz.py | chlorm-forks/gyp | 1,666 | 12615323 | #!/usr/bin/env python
# Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Using the JSON dumped by the dump-dependency-json generator,
generate input suitable for graphviz to render a dependency graph of
targets."""
from __future__ import print_function
import collections
import json
import sys
def ParseTarget(target):
target, _, suffix = target.partition('#')
filename, _, target = target.partition(':')
return filename, target, suffix
def LoadEdges(filename, targets):
"""Load the edges map from the dump file, and filter it to only
show targets in |targets| and their depedendents."""
file = open('dump.json')
edges = json.load(file)
file.close()
# Copy out only the edges we're interested in from the full edge list.
target_edges = {}
to_visit = targets[:]
while to_visit:
src = to_visit.pop()
if src in target_edges:
continue
target_edges[src] = edges[src]
to_visit.extend(edges[src])
return target_edges
def WriteGraph(edges):
"""Print a graphviz graph to stdout.
|edges| is a map of target to a list of other targets it depends on."""
# Bucket targets by file.
files = collections.defaultdict(list)
for src, dst in edges.items():
build_file, target_name, toolset = ParseTarget(src)
files[build_file].append(src)
print('digraph D {')
print(' fontsize=8') # Used by subgraphs.
print(' node [fontsize=8]')
# Output nodes by file. We must first write out each node within
# its file grouping before writing out any edges that may refer
# to those nodes.
for filename, targets in files.items():
if len(targets) == 1:
# If there's only one node for this file, simplify
# the display by making it a box without an internal node.
target = targets[0]
build_file, target_name, toolset = ParseTarget(target)
print(' "%s" [shape=box, label="%s\\n%s"]' % (target, filename,
target_name))
else:
# Group multiple nodes together in a subgraph.
print(' subgraph "cluster_%s" {' % filename)
print(' label = "%s"' % filename)
for target in targets:
build_file, target_name, toolset = ParseTarget(target)
print(' "%s" [label="%s"]' % (target, target_name))
print(' }')
# Now that we've placed all the nodes within subgraphs, output all
# the edges between nodes.
for src, dsts in edges.items():
for dst in dsts:
print(' "%s" -> "%s"' % (src, dst))
print('}')
def main():
if len(sys.argv) < 2:
print(__doc__, file=sys.stderr)
print(file=sys.stderr)
print('usage: %s target1 target2...' % (sys.argv[0]), file=sys.stderr)
return 1
edges = LoadEdges('dump.json', sys.argv[1:])
WriteGraph(edges)
return 0
if __name__ == '__main__':
sys.exit(main())
|
mmdet/core/optimizer/builder.py | deepakksingh/mmdetection | 232 | 12615339 | <reponame>deepakksingh/mmdetection<filename>mmdet/core/optimizer/builder.py
import copy
import inspect
import torch
from mmcv.utils import Registry, build_from_cfg
OPTIMIZERS = Registry('optimizer')
OPTIMIZER_BUILDERS = Registry('optimizer builder')
def register_torch_optimizers():
torch_optimizers = []
for module_name in dir(torch.optim):
if module_name.startswith('__'):
continue
_optim = getattr(torch.optim, module_name)
if inspect.isclass(_optim) and issubclass(_optim,
torch.optim.Optimizer):
OPTIMIZERS.register_module()(_optim)
torch_optimizers.append(module_name)
return torch_optimizers
TORCH_OPTIMIZERS = register_torch_optimizers()
def build_optimizer_constructor(cfg):
return build_from_cfg(cfg, OPTIMIZER_BUILDERS)
def build_optimizer(model, cfg):
optimizer_cfg = copy.deepcopy(cfg)
constructor_type = optimizer_cfg.pop('constructor',
'DefaultOptimizerConstructor')
paramwise_cfg = optimizer_cfg.pop('paramwise_cfg', None)
optim_constructor = build_optimizer_constructor(
dict(
type=constructor_type,
optimizer_cfg=optimizer_cfg,
paramwise_cfg=paramwise_cfg))
optimizer = optim_constructor(model)
return optimizer
|
nemo_text_processing/inverse_text_normalization/ru/taggers/decimals.py | hamjam/NeMo | 4,145 | 12615347 | # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo_text_processing.text_normalization.en.graph_utils import NEMO_SPACE, GraphFst, delete_extra_space
try:
import pynini
from pynini.lib import pynutil
PYNINI_AVAILABLE = True
except (ModuleNotFoundError, ImportError):
PYNINI_AVAILABLE = False
class DecimalFst(GraphFst):
"""
Finite state transducer for classifying decimal
e.g. "минус три целых две десятых" -> decimal { negative: "true" integer_part: "3," fractional_part: "2" }
Args:
tn_decimal: Text normalization Decimal graph
deterministic: if True will provide a single transduction option,
for False multiple transduction are generated (used for audio-based normalization)
"""
def __init__(self, tn_decimal, deterministic: bool = False):
super().__init__(name="decimal", kind="classify", deterministic=deterministic)
optional_graph_negative = pynini.closure(
pynutil.insert("negative: ") + pynini.cross("минус", "\"true\"") + delete_extra_space, 0, 1
)
graph_fractional_part = pynini.invert(tn_decimal.graph_fractional).optimize()
graph_integer_part = pynini.invert(tn_decimal.integer_part).optimize()
optional_graph_quantity = pynini.invert(tn_decimal.optional_quantity).optimize()
graph_fractional = pynutil.insert("fractional_part: \"") + graph_fractional_part + pynutil.insert("\"")
graph_integer = pynutil.insert("integer_part: \"") + graph_integer_part + pynutil.insert("\"")
optional_graph_quantity = pynutil.insert("quantity: \"") + optional_graph_quantity + pynutil.insert("\"")
optional_graph_quantity = pynini.closure(pynini.accep(NEMO_SPACE) + optional_graph_quantity, 0, 1)
self.final_graph_wo_sign = (
graph_integer + pynini.accep(NEMO_SPACE) + graph_fractional + optional_graph_quantity
)
final_graph = optional_graph_negative + self.final_graph_wo_sign
final_graph = self.add_tokens(final_graph)
self.fst = final_graph.optimize()
|
data/grocery/prep_grocery.py | marcegeek/audio-super-res | 712 | 12615361 | """
Create an HDF5 file of patches for training super-resolution model.
"""
import os, argparse
import numpy as np
import h5py
import cPickle
import csv
from tqdm import tqdm
import pprint
import librosa
from scipy import interpolate
from scipy.signal import decimate
from scipy.signal import butter, lfilter
import re
f = 'grocery/train.csv'
TRAIN_PROB = 0.8
NUM_DATES = 1024
MASk_PROB = 0.1
# read data into a dictionary of items -> (date -> [list of counts]
items = {}
with open(f, 'rb') as csvfile:
reader = csv.reader(csvfile, delimiter=",")
next(reader, None)
i =0
for row in tqdm(reader):
i +=1
item = row[3]
sales = row[4]
date = row[1]
if(date == '2015-12-03'): break # set this to a later date to capture more data
# note: also change row length below
if(item not in items):
items[item] = {}
if(date not in items[item]):
items[item][date] = [];
items[item][date].append(float(sales))
# avergae per count per date per item data and create tensor with
# row = items, columns = date, and value = count
data = []
for vals in tqdm(items.values()):
row = []
for sales in tqdm(vals.values()):
row.append(np.average(np.array(sales)))
if(len(row) >= NUM_DATES): # cut off extra dates to keep size constant
# note: change this to change size of processed date
data.append(row[:NUM_DATES])
data = np.stack(data)
pprint.pprint(data.shape)
# split into train and test sets
trainY = data[:int(data.shape[0]*TRAIN_PROB),]
testY = data[int(data.shape[0]*TRAIN_PROB):,:]
# mask out some of the data
trainX = np.empty_like(trainY)
trainX[:] = trainY
testX = np.empty_like(testY)
testX[:] = testY
trainMask = np.random.choice([0,1],size=trainX.shape, p=[MASK_PROB, 1-MASK_PROB])
trainX = np.multiply(trainX, trainMask)
testMask = np.random.choice([0,1],size=testX.shape, p=[MASK_PROB, 1-MASK_PROB])
testX = np.multiply(testX, testMask)
# pickle the data
print trainX.shape
print trainY.shape
cPickle.dump(testX, open('grocery/grocery-test-data_'+str(MASK_PROB),'w'))
cPickle.dump(testY, open('grocery/grocery-test-label'+str(MASK_PROB),'w'))
cPickle.dump(trainX, open('grocery/grocery-train-data'+str(MASK_PROB),'w'))
cPickle.dump(trainY, open('grocery/grocery-train-label'+str(MASK_PROB),'w'))
|
suds/cache.py | slushie0/interactive-tutorials | 2,750 | 12615363 | <gh_stars>1000+
# This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: <NAME> ( <EMAIL> )
"""
Contains basic caching classes.
"""
import os
import suds
import tempfile
from suds.sax.parser import Parser
from suds.sax.element import Element
from datetime import datetime as dt
from datetime import timedelta
from logging import getLogger
try:
import cPickle as pickle
except:
import pickle
import shutil
log = getLogger(__name__)
class Cache:
"""
An object object cache.
"""
def get(self, id):
"""
Get a object from the cache by ID.
@param id: The object ID.
@type id: str
@return: The object, else None
@rtype: any
"""
raise Exception('not-implemented')
def getf(self, id):
"""
Get a object from the cache by ID.
@param id: The object ID.
@type id: str
@return: The object, else None
@rtype: any
"""
raise Exception('not-implemented')
def put(self, id, object):
"""
Put a object into the cache.
@param id: The object ID.
@type id: str
@param object: The object to add.
@type object: any
"""
raise Exception('not-implemented')
def putf(self, id, fp):
"""
Write a fp into the cache.
@param id: The object ID.
@type id: str
@param fp: File pointer.
@type fp: file-like object.
"""
raise Exception('not-implemented')
def purge(self, id):
"""
Purge a object from the cache by id.
@param id: A object ID.
@type id: str
"""
raise Exception('not-implemented')
def clear(self):
"""
Clear all objects from the cache.
"""
raise Exception('not-implemented')
class NoCache(Cache):
"""
The passthru object cache.
"""
def get(self, id):
return None
def getf(self, id):
return None
def put(self, id, object):
pass
def putf(self, id, fp):
pass
class FileCache(Cache):
"""
A file-based URL cache.
@cvar fnprefix: The file name prefix.
@type fnsuffix: str
@cvar remove_default_location_on_exit: Whether to remove the default cache
location on process exit (default=True).
@type remove_default_location_on_exit: bool
@ivar duration: The cached file duration which defines how
long the file will be cached.
@type duration: (unit, value)
@ivar location: The directory for the cached files.
@type location: str
"""
fnprefix = 'suds'
__default_location = None
remove_default_location_on_exit = True
units = ('months', 'weeks', 'days', 'hours', 'minutes', 'seconds')
def __init__(self, location=None, **duration):
"""
Initialized a new FileCache instance.
If no cache location is specified, a temporary default location will be
used. Such default cache location will be shared by all FileCache
instances with no explicitly specified location within the same
process. The default cache location will be removed automatically on
process exit unless user sets the remove_default_location_on_exit
FileCache class attribute to False.
@param location: The directory for the cached files.
@type location: str
@param duration: The cached file duration which defines how
long the file will be cached. A duration=0 means forever.
The duration may be: (months|weeks|days|hours|minutes|seconds).
@type duration: {unit:value}
"""
if location is None:
location = self.__get_default_location()
self.location = location
self.duration = (None, 0)
self.setduration(**duration)
self.checkversion()
def fnsuffix(self):
"""
Get the file name suffix
@return: The suffix
@rtype: str
"""
return 'gcf'
def setduration(self, **duration):
"""
Set the caching duration which defines how long the
file will be cached.
@param duration: The cached file duration which defines how
long the file will be cached. A duration=0 means forever.
The duration may be: (months|weeks|days|hours|minutes|seconds).
@type duration: {unit:value}
"""
if len(duration) == 1:
arg = list(duration.items())[0]
if not arg[0] in self.units:
raise Exception('must be: %s' % str(self.units))
self.duration = arg
return self
def setlocation(self, location):
"""
Set the location (directory) for the cached files.
@param location: The directory for the cached files.
@type location: str
"""
self.location = location
def mktmp(self):
"""
Make the I{location} directory if it doesn't already exits.
"""
try:
if not os.path.isdir(self.location):
os.makedirs(self.location)
except:
log.debug(self.location, exc_info=1)
return self
def put(self, id, bfr):
try:
fn = self.__fn(id)
f = self.open(fn, 'wb')
f.write(bfr)
f.close()
return bfr
except:
log.debug(id, exc_info=1)
return bfr
def putf(self, id, fp):
try:
fn = self.__fn(id)
f = self.open(fn, 'wb')
f.write(fp.read())
fp.close()
f.close()
return open(fn, 'rb')
except:
log.debug(id, exc_info=1)
return fp
def get(self, id):
try:
f = self.getf(id)
bfr = f.read()
f.close()
return bfr
except:
pass
def getf(self, id):
try:
fn = self.__fn(id)
self.validate(fn)
return self.open(fn, 'rb')
except:
pass
def validate(self, fn):
"""
Validate that the file has not expired based on the I{duration}.
@param fn: The file name.
@type fn: str
"""
if self.duration[1] < 1:
return
created = dt.fromtimestamp(os.path.getctime(fn))
d = {self.duration[0]: self.duration[1]}
expired = created+timedelta(**d)
if expired < dt.now():
log.debug('%s expired, deleted', fn)
os.remove(fn)
def clear(self):
for fn in os.listdir(self.location):
if os.path.isdir(fn):
continue
if fn.startswith(self.fnprefix):
log.debug('deleted: %s', fn)
os.remove(os.path.join(self.location, fn))
def purge(self, id):
fn = self.__fn(id)
try:
os.remove(fn)
except:
pass
def open(self, fn, *args):
"""
Open the cache file making sure the directory is created.
"""
self.mktmp()
return open(fn, *args)
def checkversion(self):
path = os.path.join(self.location, 'version')
try:
f = self.open(path, 'rt')
version = f.read()
f.close()
if version != suds.__version__:
raise Exception()
except:
self.clear()
f = self.open(path, 'wt')
f.write(suds.__version__)
f.close()
def __fn(self, id):
name = id
suffix = self.fnsuffix()
fn = '%s-%s.%s' % (self.fnprefix, name, suffix)
return os.path.join(self.location, fn)
@staticmethod
def __get_default_location():
"""
Returns the current process's default cache location folder.
The folder is determined lazily on first call.
"""
if not FileCache.__default_location:
tmp = tempfile.mkdtemp("suds-default-cache")
FileCache.__default_location = tmp
import atexit
atexit.register(FileCache.__remove_default_location)
return FileCache.__default_location
@staticmethod
def __remove_default_location():
"""
Removes the default cache location folder.
This removal may be disabled by setting the
remove_default_location_on_exit FileCache class attribute to False.
"""
if FileCache.remove_default_location_on_exit:
# We must not load shutil here on-demand as under some
# circumstances this may cause the shutil.rmtree() operation to
# fail due to not having some internal module loaded. E.g. this
# happens if you run the project's test suite using the setup.py
# test command on Python 2.4.x.
shutil.rmtree(FileCache.__default_location, ignore_errors=True)
class DocumentCache(FileCache):
"""
Provides xml document caching.
"""
def fnsuffix(self):
return 'xml'
def get(self, id):
try:
with FileCache.getf(self, id) as fp:
if fp is None:
return None
p = Parser()
return p.parse(fp)
except:
FileCache.purge(self, id)
def put(self, id, object):
if isinstance(object, Element):
FileCache.put(self, id, str(object).encode())
else:
log.warn("WARN: Given object is not an instance of Element. Skipping!")
return object
class ObjectCache(FileCache):
"""
Provides pickled object caching.
@cvar protocol: The pickling protocol.
@type protocol: int
"""
protocol = 2
def fnsuffix(self):
return 'px'
def get(self, id):
try:
with FileCache.getf(self, id) as fp:
if fp is None:
return None
else:
return pickle.load(fp)
except:
FileCache.purge(self, id)
def put(self, id, object):
bfr = pickle.dumps(object, self.protocol)
FileCache.put(self, id, bfr)
return object
|
app/lib/cli/env.py | grepleria/SnitchDNS | 152 | 12615368 | import click
from flask.cli import with_appcontext
@click.command('env', help='SnitchDNS helper to identify the running environment')
@with_appcontext
def main():
print('OK')
return True
|
handlers/monitor_report.py | ishine/loggrove | 220 | 12615391 | <filename>handlers/monitor_report.py<gh_stars>100-1000
# Created by zhouwang on 2019/7/28.
from .base import BaseRequestHandler
import json
import logging
logger = logging.getLogger()
def report_valid(func):
def _wrapper(self):
error = {}
host = self.get_argument('host', '')
counts = self.get_argument('counts', '')
if not host:
error['host'] = 'Required'
if not counts:
error['counts'] = 'Required'
else:
try:
counts = json.loads(counts)
except:
error['counts'] = 'Must JSON'
if error:
return dict(code=400, msg='Bad POST data', error=error)
self.reqdata = dict(
host=host,
counts=counts,
)
return func(self)
return _wrapper
class Handler(BaseRequestHandler):
def post(self):
response_data = self._report()
self._write(response_data)
@report_valid
def _report(self):
try:
inserts = self.reqdata['counts']
with self.transaction():
self.cursor.executemany(self.insert_sql, inserts)
except Exception as e:
logger.error('Report failed[%s]: %s' % (self.reqdata['host'], str(e)))
return dict(code=500, msg='Report failed')
else:
return dict(code=200, msg='Report successful')
insert_sql = '''
INSERT INTO
monitor_count (logfile_id, host, monitor_item_id, count, count_time)
VALUES
(%s, %s, %s, %s, %s)
'''
|
mayan/apps/storage/classes.py | nattangwiwat/Mayan-EDMS-recitation | 343 | 12615397 | import logging
from io import BytesIO, StringIO
from django.core.files.base import File
from django.core.files.storage import Storage
from django.utils.deconstruct import deconstructible
from django.utils.module_loading import import_string
from django.utils.translation import ugettext_lazy as _
from mayan.apps.common.class_mixins import AppsModuleLoaderMixin
from .literals import DEFAULT_STORAGE_BACKEND
logger = logging.getLogger(name=__name__)
class BufferedFile(File):
def __init__(self, file_object, mode, name=None):
self.file_object = file_object
self.mode = mode
if 'b' in mode:
self.stream = BytesIO()
else:
self.stream = StringIO()
self.name = name
self.stream_size = 0
def close(self):
self.file_object.close()
self.stream.close()
def flush(self):
return self.file_object.flush()
def read(self, size=None):
if size is None:
size = -1
if size == -1 or size > self.stream_size:
while True:
position = self.stream.tell()
chunk = self._get_file_object_chunk()
if chunk:
self.stream_size += len(chunk)
self.stream.write(chunk)
self.stream.seek(position)
if self.stream_size >= size and size != -1:
break
else:
break
if size:
read_size = min(size, self.stream_size)
self.stream_size -= read_size
else:
read_size = None
return self.stream.read(read_size)
class DefinedStorage(AppsModuleLoaderMixin):
_loader_module_name = 'storages'
_registry = {}
@classmethod
def get(cls, name):
return cls._registry[name]
def __init__(self, dotted_path, label, name, kwargs=None, error_message=None):
self.dotted_path = dotted_path
self.error_message = error_message
self.label = label
self.name = name
self.kwargs = kwargs or {}
self.__class__._registry[name] = self
def __eq__(self, other):
return True
def get_storage_instance(self):
try:
return self.get_storage_subclass()(**self.kwargs)
except Exception as exception:
message = self.error_message or _(
'Unable to initialize storage: %(name)s. Check the storage '
'backend dotted path and arguments.'
) % {
'name': self.name
}
logger.fatal(message)
raise TypeError(message) from exception
def get_storage_subclass(self):
"""
Import a storage class and return a subclass that will always return eq
True to avoid creating a new migration when for runtime storage class
changes.
"""
try:
imported_storage_class = import_string(
dotted_path=self.dotted_path
)
except Exception as exception:
message = self.error_message or _(
'Unable to initialize storage: %(name)s. Check the storage '
'backend dotted path and arguments.'
) % {
'name': self.name
}
logger.fatal(message)
raise TypeError(message) from exception
class DynamicStorageSubclass(imported_storage_class):
def __init__(self, *args, **kwargs):
return super().__init__(*args, **kwargs)
def __eq__(self, other):
return True
def deconstruct(self):
return (
'mayan.apps.storage.classes.FakeStorageSubclass', (), {}
)
return DynamicStorageSubclass
def defined_storage_proxy_method(method_name):
def inner_function(self, *args, **kwargs):
return getattr(
DefinedStorage.get(name=self.name).get_storage_instance(), method_name
)(*args, **kwargs)
return inner_function
@deconstructible
class DefinedStorageLazy:
def __init__(self, name):
self.name = name
super().__init__()
delete = defined_storage_proxy_method(method_name='delete')
exists = defined_storage_proxy_method(method_name='exists')
generate_filename = defined_storage_proxy_method(
method_name='generate_filename'
)
open = defined_storage_proxy_method(method_name='open')
path = defined_storage_proxy_method(method_name='path')
save = defined_storage_proxy_method(method_name='save')
size = defined_storage_proxy_method(method_name='size')
class FakeStorageSubclass:
"""
Placeholder class to allow serializing the real storage subclass to
support migrations.
"""
def __eq__(self, other):
return True
class PassthroughStorage(Storage):
def __init__(self, *args, **kwargs):
logger.debug(
'initializing passthrought storage with: %s, %s', args, kwargs
)
next_storage_backend = kwargs.pop(
'next_storage_backend', DEFAULT_STORAGE_BACKEND
)
next_storage_backend_arguments = kwargs.pop(
'next_storage_backend_arguments', {}
)
self.next_storage_class = import_string(
dotted_path=next_storage_backend
)
self.next_storage_backend = self.next_storage_class(
**next_storage_backend_arguments
)
super().__init__(*args, **kwargs)
def _call_backend_method(self, method_name, kwargs):
return getattr(self.next_storage_backend, method_name)(**kwargs)
def delete(self, *args, **kwargs):
return self.next_storage_backend.delete(*args, **kwargs)
def exists(self, *args, **kwargs):
return self.next_storage_backend.exists(*args, **kwargs)
def path(self, *args, **kwargs):
return self.next_storage_backend.path(*args, **kwargs)
def size(self, *args, **kwargs):
return self.next_storage_backend.size(*args, **kwargs)
|
site/flask/lib/python2.7/site-packages/whoosh/fields.py | theholyhades1/tartanHacks2015 | 319 | 12615407 | <reponame>theholyhades1/tartanHacks2015<gh_stars>100-1000
# Copyright 2007 <NAME>. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY MATT CHAPUT ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
# EVENT SHALL MATT CHAPUT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are
# those of the authors and should not be interpreted as representing official
# policies, either expressed or implied, of Matt Chaput.
""" Contains functions and classes related to fields.
"""
import datetime, fnmatch, re, struct, sys
from array import array
from decimal import Decimal
from whoosh import analysis, columns, formats
from whoosh.compat import u, b, PY3
from whoosh.compat import with_metaclass
from whoosh.compat import itervalues, xrange
from whoosh.compat import bytes_type, string_type, integer_types, text_type
from whoosh.system import emptybytes
from whoosh.system import pack_byte, unpack_byte
from whoosh.util.numeric import to_sortable, from_sortable
from whoosh.util.numeric import typecode_max, NaN
from whoosh.util.text import utf8encode, utf8decode
from whoosh.util.times import datetime_to_long, long_to_datetime
# Exceptions
class FieldConfigurationError(Exception):
pass
class UnknownFieldError(Exception):
pass
# Field Types
class FieldType(object):
"""Represents a field configuration.
The FieldType object supports the following attributes:
* format (formats.Format): the storage format for the field's contents.
* analyzer (analysis.Analyzer): the analyzer to use to turn text into
terms.
* vector (formats.Format): the storage format for the field's vectors
(forward index), or None if the field should not store vectors.
* scorable (boolean): whether searches against this field may be scored.
This controls whether the index stores per-document field lengths for
this field.
* stored (boolean): whether the content of this field is stored for each
document. For example, in addition to indexing the title of a document,
you usually want to store the title so it can be presented as part of
the search results.
* unique (boolean): whether this field's value is unique to each document.
For example, 'path' or 'ID'. IndexWriter.update_document() will use
fields marked as 'unique' to find the previous version of a document
being updated.
* multitoken_query is a string indicating what kind of query to use when
a "word" in a user query parses into multiple tokens. The string is
interpreted by the query parser. The strings understood by the default
query parser are "first" (use first token only), "and" (join the tokens
with an AND query), "or" (join the tokens with OR), "phrase" (join
the tokens with a phrase query), and "default" (use the query parser's
default join type).
The constructor for the base field type simply lets you supply your own
configured field format, vector format, and scorable and stored values.
Subclasses may configure some or all of this for you.
"""
analyzer = format = vector = scorable = stored = unique = None
indexed = True
multitoken_query = "default"
sortable_typecode = None
spelling = False
column_type = None
def __init__(self, format, analyzer, vector=None, scorable=False,
stored=False, unique=False, multitoken_query="default",
sortable=False):
assert isinstance(format, formats.Format)
self.format = format
self.analyzer = analyzer
self.vector = vector
self.scorable = scorable
self.stored = stored
self.unique = unique
self.multitoken_query = multitoken_query
self.set_sortable(sortable)
def __repr__(self):
temp = "%s(format=%r, vector=%r, scorable=%s, stored=%s, unique=%s)"
return temp % (self.__class__.__name__, self.format, self.vector,
self.scorable, self.stored, self.unique)
def __eq__(self, other):
return all((isinstance(other, FieldType),
(self.format == other.format),
(self.vector == other.vector),
(self.scorable == other.scorable),
(self.stored == other.stored),
(self.unique == other.unique),
(self.column_type == other.column_type)))
def __ne__(self, other):
return not(self.__eq__(other))
# Column methods
def set_sortable(self, sortable):
if sortable:
if isinstance(sortable, columns.Column):
self.column_type = sortable
else:
self.column_type = self.default_column()
else:
self.column_type = None
def default_column(self):
return columns.VarBytesColumn()
# Methods for converting input into indexing information
def index(self, value, **kwargs):
"""Returns an iterator of (btext, frequency, weight, encoded_value)
tuples for each unique word in the input value.
The default implementation uses the ``analyzer`` attribute to tokenize
the value into strings, then encodes them into bytes using UTF-8.
"""
if not self.format:
raise Exception("%s field %r cannot index without a format"
% (self.__class__.__name__, self))
if not isinstance(value, (text_type, list, tuple)):
raise ValueError("%r is not unicode or sequence" % value)
assert isinstance(self.format, formats.Format)
if "mode" not in kwargs:
kwargs["mode"] = "index"
word_values = self.format.word_values
ana = self.analyzer
for tstring, freq, wt, vbytes in word_values(value, ana, **kwargs):
yield (utf8encode(tstring)[0], freq, wt, vbytes)
def process_text(self, qstring, mode='', **kwargs):
"""Analyzes the given string and returns an iterator of token texts.
>>> field = fields.TEXT()
>>> list(field.process_text("The ides of March"))
["ides", "march"]
"""
if not self.format:
raise Exception("%s field has no format" % self)
return (t.text for t in self.tokenize(qstring, mode=mode, **kwargs))
def tokenize(self, value, **kwargs):
"""Analyzes the given string and returns an iterator of Token objects
(note: for performance reasons, actually the same token yielded over
and over with different attributes).
"""
if not self.analyzer:
raise Exception("%s field has no analyzer" % self.__class__)
return self.analyzer(value, **kwargs)
def to_bytes(self, value):
"""Returns a bytes representation of the given value, appropriate to be
written to disk. The default implementation assumes a unicode value and
encodes it using UTF-8.
"""
if isinstance(value, (list, tuple)):
value = value[0]
if not isinstance(value, bytes_type):
value = utf8encode(value)[0]
return value
def to_column_value(self, value):
"""Returns an object suitable to be inserted into the document values
column for this field. The default implementation simply calls
``self.to_bytes(value)``.
"""
return self.to_bytes(value)
def from_column_value(self, value):
return self.from_bytes(value)
def from_bytes(self, bs):
return utf8decode(bs)[0]
# Methods related to query parsing
def self_parsing(self):
"""Subclasses should override this method to return True if they want
the query parser to call the field's ``parse_query()`` method instead
of running the analyzer on text in this field. This is useful where
the field needs full control over how queries are interpreted, such
as in the numeric field type.
"""
return False
def parse_query(self, fieldname, qstring, boost=1.0):
"""When ``self_parsing()`` returns True, the query parser will call
this method to parse basic query text.
"""
raise NotImplementedError(self.__class__.__name__)
def parse_range(self, fieldname, start, end, startexcl, endexcl,
boost=1.0):
"""When ``self_parsing()`` returns True, the query parser will call
this method to parse range query text. If this method returns None
instead of a query object, the parser will fall back to parsing the
start and end terms using process_text().
"""
return None
# Methods related to sortings
def sortable_terms(self, ixreader, fieldname):
"""Returns an iterator of the "sortable" tokens in the given reader and
field. These values can be used for sorting. The default implementation
simply returns all tokens in the field.
This can be overridden by field types such as NUMERIC where some values
in a field are not useful for sorting.
"""
return ixreader.lexicon(fieldname)
# Methods related to spelling
def separate_spelling(self):
"""Returns True if this field requires special handling of the words
that go into the field's word graph.
The default behavior is to return True if the field is "spelled" but
not indexed, or if the field is indexed but the analyzer has
morphological transformations (e.g. stemming). Exotic field types may
need to override this behavior.
This method should return False if the field does not support spelling
(i.e. the ``spelling`` attribute is False).
"""
return self.spelling and self.analyzer.has_morph()
def spellable_words(self, value):
"""Returns an iterator of each unique word (in sorted order) in the
input value, suitable for inclusion in the field's word graph.
The default behavior is to call the field analyzer with the keyword
argument ``no_morph=True``, which should make the analyzer skip any
morphological transformation filters (e.g. stemming) to preserve the
original form of the words. Exotic field types may need to override
this behavior.
"""
if isinstance(value, (list, tuple)):
words = value
else:
words = [token.text for token
in self.analyzer(value, no_morph=True)]
return iter(sorted(set(words)))
def has_morph(self):
"""Returns True if this field by default performs morphological
transformations on its terms, e.g. stemming.
"""
if self.analyzer:
return self.analyzer.has_morph()
else:
return False
# Methods related to the posting/vector formats
def supports(self, name):
"""Returns True if the underlying format supports the given posting
value type.
>>> field = TEXT()
>>> field.supports("positions")
True
>>> field.supports("characters")
False
"""
return self.format.supports(name)
def clean(self):
"""Clears any cached information in the field and any child objects.
"""
if self.format and hasattr(self.format, "clean"):
self.format.clean()
if self.vector and hasattr(self.vector, "clean"):
self.vector.clean()
# Event methods
def on_add(self, schema, fieldname):
pass
def on_remove(self, schema, fieldname):
pass
class ID(FieldType):
"""Configured field type that indexes the entire value of the field as one
token. This is useful for data you don't want to tokenize, such as the path
of a file.
"""
__inittypes__ = dict(stored=bool, unique=bool, field_boost=float)
def __init__(self, stored=False, unique=False, field_boost=1.0,
spelling=False, sortable=False, analyzer=None):
"""
:param stored: Whether the value of this field is stored with the
document.
"""
self.analyzer = analyzer or analysis.IDAnalyzer()
self.format = formats.Existence(field_boost=field_boost)
self.stored = stored
self.unique = unique
self.spelling = spelling
self.set_sortable(sortable)
class IDLIST(FieldType):
"""Configured field type for fields containing IDs separated by whitespace
and/or punctuation (or anything else, using the expression param).
"""
__inittypes__ = dict(stored=bool, unique=bool, expression=bool,
field_boost=float)
def __init__(self, stored=False, unique=False, expression=None,
field_boost=1.0, spelling=False):
"""
:param stored: Whether the value of this field is stored with the
document.
:param unique: Whether the value of this field is unique per-document.
:param expression: The regular expression object to use to extract
tokens. The default expression breaks tokens on CRs, LFs, tabs,
spaces, commas, and semicolons.
"""
expression = expression or re.compile(r"[^\r\n\t ,;]+")
self.analyzer = analysis.RegexAnalyzer(expression=expression)
self.format = formats.Existence(field_boost=field_boost)
self.stored = stored
self.unique = unique
self.spelling = spelling
class NUMERIC(FieldType):
"""Special field type that lets you index integer or floating point
numbers in relatively short fixed-width terms. The field converts numbers
to sortable bytes for you before indexing.
You specify the numeric type of the field (``int`` or ``float``) when you
create the ``NUMERIC`` object. The default is ``int``. For ``int``, you can
specify a size in bits (``32`` or ``64``). For both ``int`` and ``float``
you can specify a ``signed`` keyword argument (default is ``True``).
>>> schema = Schema(path=STORED, position=NUMERIC(int, 64, signed=False))
>>> ix = storage.create_index(schema)
>>> with ix.writer() as w:
... w.add_document(path="/a", position=5820402204)
...
You can also use the NUMERIC field to store Decimal instances by specifying
a type of ``int`` or ``long`` and the ``decimal_places`` keyword argument.
This simply multiplies each number by ``(10 ** decimal_places)`` before
storing it as an integer. Of course this may throw away decimal prcesision
(by truncating, not rounding) and imposes the same maximum value limits as
``int``/``long``, but these may be acceptable for certain applications.
>>> from decimal import Decimal
>>> schema = Schema(path=STORED, position=NUMERIC(int, decimal_places=4))
>>> ix = storage.create_index(schema)
>>> with ix.writer() as w:
... w.add_document(path="/a", position=Decimal("123.45")
...
"""
def __init__(self, numtype=int, bits=32, stored=False, unique=False,
field_boost=1.0, decimal_places=0, shift_step=4, signed=True,
sortable=False, default=None):
"""
:param numtype: the type of numbers that can be stored in this field,
either ``int``, ``float``. If you use ``Decimal``,
use the ``decimal_places`` argument to control how many decimal
places the field will store.
:param bits: When ``numtype`` is ``int``, the number of bits to use to
store the number: 8, 16, 32, or 64.
:param stored: Whether the value of this field is stored with the
document.
:param unique: Whether the value of this field is unique per-document.
:param decimal_places: specifies the number of decimal places to save
when storing Decimal instances. If you set this, you will always
get Decimal instances back from the field.
:param shift_steps: The number of bits of precision to shift away at
each tiered indexing level. Values should generally be 1-8. Lower
values yield faster searches but take up more space. A value
of `0` means no tiered indexing.
:param signed: Whether the numbers stored in this field may be
negative.
"""
# Allow users to specify strings instead of Python types in case
# docstring isn't clear
if numtype == "int":
numtype = int
if numtype == "float":
numtype = float
# Raise an error if the user tries to use a type other than int or
# float
if numtype is Decimal:
numtype = int
if not decimal_places:
raise TypeError("To store Decimal instances, you must set the "
"decimal_places argument")
elif numtype not in (int, float):
raise TypeError("Can't use %r as a type, use int or float"
% numtype)
# Sanity check
if numtype is float and decimal_places:
raise Exception("A float type and decimal_places argument %r are "
"incompatible" % decimal_places)
intsizes = [8, 16, 32, 64]
intcodes = ["B", "H", "I", "Q"]
# Set up field configuration based on type and size
if numtype is float:
bits = 64 # Floats are converted to 64 bit ints
else:
if bits not in intsizes:
raise Exception("Invalid bits %r, use 8, 16, 32, or 64"
% bits)
# Type code for the *sortable* representation
self.sortable_typecode = intcodes[intsizes.index(bits)]
self._struct = struct.Struct(">" + self.sortable_typecode)
self.numtype = numtype
self.bits = bits
self.stored = stored
self.unique = unique
self.decimal_places = decimal_places
self.shift_step = shift_step
self.signed = signed
self.analyzer = analysis.IDAnalyzer()
self.format = formats.Existence(field_boost=field_boost)
self.min_value, self.max_value = self._min_max()
# Column configuration
if default is None:
if numtype is int:
default = typecode_max[self.sortable_typecode]
else:
default = NaN
elif not self.is_valid(default):
raise Exception("The default %r is not a valid number for this "
"field" % default)
self.default = default
self.set_sortable(sortable)
def __getstate__(self):
d = self.__dict__.copy()
if "_struct" in d:
del d["_struct"]
return d
def __setstate__(self, d):
self.__dict__.update(d)
self._struct = struct.Struct(">" + self.sortable_typecode)
if "min_value" not in d:
d["min_value"], d["max_value"] = self._min_max()
def _min_max(self):
numtype = self.numtype
bits = self.bits
signed = self.signed
# Calculate the minimum and maximum possible values for error checking
min_value = from_sortable(numtype, bits, signed, 0)
max_value = from_sortable(numtype, bits, signed, 2 ** bits - 1)
return min_value, max_value
def default_column(self):
return columns.NumericColumn(self.sortable_typecode,
default=self.default)
def is_valid(self, x):
try:
x = self.to_bytes(x)
except ValueError:
return False
except OverflowError:
return False
return True
def index(self, num, **kwargs):
# If the user gave us a list of numbers, recurse on the list
if isinstance(num, (list, tuple)):
for n in num:
for item in self.index(n):
yield item
return
# word, freq, weight, valuestring
if self.shift_step:
for shift in xrange(0, self.bits, self.shift_step):
yield (self.to_bytes(num, shift), 1, 1.0, emptybytes)
else:
yield (self.to_bytes(num), 1, 1.0, emptybytes)
def prepare_number(self, x):
if x == emptybytes or x is None:
return x
dc = self.decimal_places
if dc and isinstance(x, (string_type, Decimal)):
x = Decimal(x) * (10 ** dc)
elif isinstance(x, Decimal):
raise TypeError("Can't index a Decimal object unless you specified "
"decimal_places on the field")
try:
x = self.numtype(x)
except OverflowError:
raise ValueError("Value %r overflowed number type %r"
% (x, self.numtype))
if x < self.min_value or x > self.max_value:
raise ValueError("Numeric field value %s out of range [%s, %s]"
% (x, self.min_value, self.max_value))
return x
def unprepare_number(self, x):
dc = self.decimal_places
if dc:
s = str(x)
x = Decimal(s[:-dc] + "." + s[-dc:])
return x
def to_column_value(self, x):
if isinstance(x, (list, tuple, array)):
x = x[0]
x = self.prepare_number(x)
return to_sortable(self.numtype, self.bits, self.signed, x)
def from_column_value(self, x):
x = from_sortable(self.numtype, self.bits, self.signed, x)
return self.unprepare_number(x)
def to_bytes(self, x, shift=0):
# Try to avoid re-encoding; this sucks because on Python 2 we can't
# tell the difference between a string and encoded bytes, so we have
# to require the user use unicode when they mean string
if isinstance(x, bytes_type):
return x
if x == emptybytes or x is None:
return self.sortable_to_bytes(0)
x = self.prepare_number(x)
x = to_sortable(self.numtype, self.bits, self.signed, x)
return self.sortable_to_bytes(x, shift)
def sortable_to_bytes(self, x, shift=0):
if shift:
x >>= shift
return pack_byte(shift) + self._struct.pack(x)
def from_bytes(self, bs):
x = self._struct.unpack(bs[1:])[0]
x = from_sortable(self.numtype, self.bits, self.signed, x)
x = self.unprepare_number(x)
return x
def process_text(self, text, **kwargs):
return (self.to_bytes(text),)
def self_parsing(self):
return True
def parse_query(self, fieldname, qstring, boost=1.0):
from whoosh import query
from whoosh.qparser.common import QueryParserError
if qstring == "*":
return query.Every(fieldname, boost=boost)
if not self.is_valid(qstring):
raise QueryParserError("%r is not a valid number" % qstring)
token = self.to_bytes(qstring)
return query.Term(fieldname, token, boost=boost)
def parse_range(self, fieldname, start, end, startexcl, endexcl,
boost=1.0):
from whoosh import query
from whoosh.qparser.common import QueryParserError
if start is not None:
if not self.is_valid(start):
raise QueryParserError("Range start %r is not a valid number"
% start)
start = self.prepare_number(start)
if end is not None:
if not self.is_valid(end):
raise QueryParserError("Range end %r is not a valid number"
% end)
end = self.prepare_number(end)
return query.NumericRange(fieldname, start, end, startexcl, endexcl,
boost=boost)
def sortable_terms(self, ixreader, fieldname):
zero = b("\x00")
for token in ixreader.lexicon(fieldname):
if token[0:1] != zero:
# Only yield the full-precision values
break
yield token
class DATETIME(NUMERIC):
"""Special field type that lets you index datetime objects. The field
converts the datetime objects to sortable text for you before indexing.
Since this field is based on Python's datetime module it shares all the
limitations of that module, such as the inability to represent dates before
year 1 in the proleptic Gregorian calendar. However, since this field
stores datetimes as an integer number of microseconds, it could easily
represent a much wider range of dates if the Python datetime implementation
ever supports them.
>>> schema = Schema(path=STORED, date=DATETIME)
>>> ix = storage.create_index(schema)
>>> w = ix.writer()
>>> w.add_document(path="/a", date=datetime.now())
>>> w.commit()
"""
__inittypes__ = dict(stored=bool, unique=bool)
def __init__(self, stored=False, unique=False, sortable=False):
"""
:param stored: Whether the value of this field is stored with the
document.
:param unique: Whether the value of this field is unique per-document.
"""
super(DATETIME, self).__init__(int, 64, stored=stored,
unique=unique, shift_step=8,
sortable=sortable)
def prepare_datetime(self, x):
from whoosh.util.times import floor
if isinstance(x, text_type):
# For indexing, support same strings as for query parsing --
# convert unicode to datetime object
x = self._parse_datestring(x)
x = floor(x) # this makes most sense (unspecified = lowest)
if isinstance(x, datetime.datetime):
return datetime_to_long(x)
elif isinstance(x, bytes_type):
return x
else:
raise Exception("%r is not a datetime" % (x,))
def to_column_value(self, x):
if isinstance(x, bytes_type):
raise Exception("%r is not a datetime" % (x,))
if isinstance(x, (list, tuple)):
x = x[0]
return self.prepare_datetime(x)
def from_column_value(self, x):
return long_to_datetime(x)
def to_bytes(self, x, shift=0):
x = self.prepare_datetime(x)
return NUMERIC.to_bytes(self, x, shift=shift)
def from_bytes(self, bs):
x = NUMERIC.from_bytes(self, bs)
return long_to_datetime(x)
def _parse_datestring(self, qstring):
# This method parses a very simple datetime representation of the form
# YYYY[MM[DD[hh[mm[ss[uuuuuu]]]]]]
from whoosh.util.times import adatetime, fix, is_void
qstring = qstring.replace(" ", "").replace("-", "").replace(".", "")
year = month = day = hour = minute = second = microsecond = None
if len(qstring) >= 4:
year = int(qstring[:4])
if len(qstring) >= 6:
month = int(qstring[4:6])
if len(qstring) >= 8:
day = int(qstring[6:8])
if len(qstring) >= 10:
hour = int(qstring[8:10])
if len(qstring) >= 12:
minute = int(qstring[10:12])
if len(qstring) >= 14:
second = int(qstring[12:14])
if len(qstring) == 20:
microsecond = int(qstring[14:])
at = fix(adatetime(year, month, day, hour, minute, second,
microsecond))
if is_void(at):
raise Exception("%r is not a parseable date" % qstring)
return at
def parse_query(self, fieldname, qstring, boost=1.0):
from whoosh import query
from whoosh.util.times import is_ambiguous
try:
at = self._parse_datestring(qstring)
except:
e = sys.exc_info()[1]
return query.error_query(e)
if is_ambiguous(at):
startnum = datetime_to_long(at.floor())
endnum = datetime_to_long(at.ceil())
return query.NumericRange(fieldname, startnum, endnum)
else:
return query.Term(fieldname, at, boost=boost)
def parse_range(self, fieldname, start, end, startexcl, endexcl,
boost=1.0):
from whoosh import query
if start is None and end is None:
return query.Every(fieldname, boost=boost)
if start is not None:
startdt = self._parse_datestring(start).floor()
start = datetime_to_long(startdt)
if end is not None:
enddt = self._parse_datestring(end).ceil()
end = datetime_to_long(enddt)
return query.NumericRange(fieldname, start, end, boost=boost)
class BOOLEAN(FieldType):
"""Special field type that lets you index boolean values (True and False).
The field converts the boolean values to text for you before indexing.
>>> schema = Schema(path=STORED, done=BOOLEAN)
>>> ix = storage.create_index(schema)
>>> w = ix.writer()
>>> w.add_document(path="/a", done=False)
>>> w.commit()
"""
bytestrings = (b("f"), b("t"))
trues = frozenset(u("t true yes 1").split())
falses = frozenset(u("f false no 0").split())
__inittypes__ = dict(stored=bool, field_boost=float)
def __init__(self, stored=False, field_boost=1.0):
"""
:param stored: Whether the value of this field is stored with the
document.
"""
self.stored = stored
self.field_boost = field_boost
self.format = formats.Existence(field_boost=field_boost)
def _obj_to_bool(self, x):
# We special case strings such as "true", "false", "yes", "no", but
# otherwise call bool() on the query value. This lets you pass objects
# as query values and do the right thing.
if isinstance(x, string_type) and x.lower() in self.trues:
x = True
elif isinstance(x, string_type) and x.lower() in self.falses:
x = False
else:
x = bool(x)
return x
def to_bytes(self, x):
if isinstance(x, bytes_type):
return x
elif isinstance(x, string_type):
x = x.lower() in self.trues
else:
x = bool(x)
bs = self.bytestrings[int(x)]
return bs
def index(self, bit, **kwargs):
if isinstance(bit, string_type):
bit = bit.lower() in self.trues
else:
bit = bool(bit)
# word, freq, weight, valuestring
return [(self.bytestrings[int(bit)], 1, 1.0, emptybytes)]
def self_parsing(self):
return True
def parse_query(self, fieldname, qstring, boost=1.0):
from whoosh import query
if qstring == "*":
return query.Every(fieldname, boost=boost)
return query.Term(fieldname, self._obj_to_bool(qstring), boost=boost)
class STORED(FieldType):
"""Configured field type for fields you want to store but not index.
"""
indexed = False
stored = True
def __init__(self):
pass
class COLUMN(FieldType):
"""Configured field type for fields you want to store as a per-document
value column but not index.
"""
indexed = False
stored = False
def __init__(self, columnobj=None):
if columnobj is None:
columnobj = columns.VarBytesColumn()
if not isinstance(columnobj, columns.Column):
raise TypeError("%r is not a column object" % (columnobj,))
self.column_type = columnobj
def to_bytes(self, v):
return v
def from_bytes(self, b):
return b
class KEYWORD(FieldType):
"""Configured field type for fields containing space-separated or
comma-separated keyword-like data (such as tags). The default is to not
store positional information (so phrase searching is not allowed in this
field) and to not make the field scorable.
"""
__inittypes__ = dict(stored=bool, lowercase=bool, commas=bool,
scorable=bool, unique=bool, field_boost=float)
def __init__(self, stored=False, lowercase=False, commas=False,
vector=None, scorable=False, unique=False, field_boost=1.0,
spelling=False, sortable=False):
"""
:param stored: Whether to store the value of the field with the
document.
:param comma: Whether this is a comma-separated field. If this is False
(the default), it is treated as a space-separated field.
:param scorable: Whether this field is scorable.
"""
self.analyzer = analysis.KeywordAnalyzer(lowercase=lowercase,
commas=commas)
self.format = formats.Frequency(field_boost=field_boost)
self.scorable = scorable
self.stored = stored
self.unique = unique
self.spelling = spelling
if vector:
if type(vector) is type:
vector = vector()
elif isinstance(vector, formats.Format):
pass
else:
vector = self.format
else:
vector = None
self.vector = vector
if sortable:
self.column_type = self.default_column()
class TEXT(FieldType):
"""Configured field type for text fields (for example, the body text of an
article). The default is to store positional information to allow phrase
searching. This field type is always scorable.
"""
__inittypes__ = dict(analyzer=analysis.Analyzer, phrase=bool,
vector=object, stored=bool, field_boost=float)
def __init__(self, analyzer=None, phrase=True, chars=False, vector=None,
stored=False, field_boost=1.0, multitoken_query="default",
spelling=False, sortable=False, lang=None):
"""
:param analyzer: The analysis.Analyzer to use to index the field
contents. See the analysis module for more information. If you omit
this argument, the field uses analysis.StandardAnalyzer.
:param phrase: Whether the store positional information to allow phrase
searching.
:param chars: Whether to store character ranges along with positions.
If this is True, "phrase" is also implied.
:param vector: A :class:`whoosh.formats.Format` object to use to store
term vectors, or ``True`` to store vectors using the same format as
the inverted index, or ``None`` or ``False`` to not store vectors.
By default, fields do not store term vectors.
:param stored: Whether to store the value of this field with the
document. Since this field type generally contains a lot of text,
you should avoid storing it with the document unless you need to,
for example to allow fast excerpts in the search results.
:param spelling: Whether to generate word graphs for this field to make
spelling suggestions much faster.
:param sortable: If True, make this field sortable using the default
column type. If you pass a :class:`whoosh.columns.Column` instance
instead of True, the field will use the given column type.
:param lang: automaticaly configure a
:class:`whoosh.analysis.LanguageAnalyzer` for the given language.
This is ignored if you also specify an ``analyzer``.
"""
if analyzer:
self.analyzer = analyzer
elif lang:
self.analyzer = analysis.LanguageAnalyzer(lang)
else:
self.analyzer = analysis.StandardAnalyzer()
if chars:
formatclass = formats.Characters
elif phrase:
formatclass = formats.Positions
else:
formatclass = formats.Frequency
self.format = formatclass(field_boost=field_boost)
if vector:
if type(vector) is type:
vector = vector()
elif isinstance(vector, formats.Format):
pass
else:
vector = formatclass()
else:
vector = None
self.vector = vector
if sortable:
if isinstance(sortable, columns.Column):
self.column_type = sortable
else:
self.column_type = columns.VarBytesColumn()
else:
self.column_type = None
self.multitoken_query = multitoken_query
self.scorable = True
self.stored = stored
self.spelling = spelling
class NGRAM(FieldType):
"""Configured field that indexes text as N-grams. For example, with a field
type NGRAM(3,4), the value "hello" will be indexed as tokens
"hel", "hell", "ell", "ello", "llo". This field type chops the entire text
into N-grams, including whitespace and punctuation. See :class:`NGRAMWORDS`
for a field type that breaks the text into words first before chopping the
words into N-grams.
"""
__inittypes__ = dict(minsize=int, maxsize=int, stored=bool,
field_boost=float, queryor=bool, phrase=bool)
scorable = True
def __init__(self, minsize=2, maxsize=4, stored=False, field_boost=1.0,
queryor=False, phrase=False, sortable=False):
"""
:param minsize: The minimum length of the N-grams.
:param maxsize: The maximum length of the N-grams.
:param stored: Whether to store the value of this field with the
document. Since this field type generally contains a lot of text,
you should avoid storing it with the document unless you need to,
for example to allow fast excerpts in the search results.
:param queryor: if True, combine the N-grams with an Or query. The
default is to combine N-grams with an And query.
:param phrase: store positions on the N-grams to allow exact phrase
searching. The default is off.
"""
formatclass = formats.Frequency
if phrase:
formatclass = formats.Positions
self.analyzer = analysis.NgramAnalyzer(minsize, maxsize)
self.format = formatclass(field_boost=field_boost)
self.stored = stored
self.queryor = queryor
self.set_sortable(sortable)
def self_parsing(self):
return True
def parse_query(self, fieldname, qstring, boost=1.0):
from whoosh import query
terms = [query.Term(fieldname, g)
for g in self.process_text(qstring, mode='query')]
cls = query.Or if self.queryor else query.And
return cls(terms, boost=boost)
class NGRAMWORDS(NGRAM):
"""Configured field that chops text into words using a tokenizer,
lowercases the words, and then chops the words into N-grams.
"""
__inittypes__ = dict(minsize=int, maxsize=int, stored=bool,
field_boost=float, tokenizer=analysis.Tokenizer,
at=str, queryor=bool)
scorable = True
def __init__(self, minsize=2, maxsize=4, stored=False, field_boost=1.0,
tokenizer=None, at=None, queryor=False, sortable=False):
"""
:param minsize: The minimum length of the N-grams.
:param maxsize: The maximum length of the N-grams.
:param stored: Whether to store the value of this field with the
document. Since this field type generally contains a lot of text,
you should avoid storing it with the document unless you need to,
for example to allow fast excerpts in the search results.
:param tokenizer: an instance of :class:`whoosh.analysis.Tokenizer`
used to break the text into words.
:param at: if 'start', only takes N-grams from the start of the word.
If 'end', only takes N-grams from the end. Otherwise the default
is to take all N-grams from each word.
:param queryor: if True, combine the N-grams with an Or query. The
default is to combine N-grams with an And query.
"""
self.analyzer = analysis.NgramWordAnalyzer(minsize, maxsize, tokenizer,
at=at)
self.format = formats.Frequency(field_boost=field_boost)
self.stored = stored
self.queryor = queryor
self.set_sortable(sortable)
# Schema class
class MetaSchema(type):
def __new__(cls, name, bases, attrs):
super_new = super(MetaSchema, cls).__new__
if not any(b for b in bases if isinstance(b, MetaSchema)):
# If this isn't a subclass of MetaSchema, don't do anything special
return super_new(cls, name, bases, attrs)
# Create the class
special_attrs = {}
for key in list(attrs.keys()):
if key.startswith("__"):
special_attrs[key] = attrs.pop(key)
new_class = super_new(cls, name, bases, special_attrs)
fields = {}
for b in bases:
if hasattr(b, "_clsfields"):
fields.update(b._clsfields)
fields.update(attrs)
new_class._clsfields = fields
return new_class
def schema(self):
return Schema(**self._clsfields)
class Schema(object):
"""Represents the collection of fields in an index. Maps field names to
FieldType objects which define the behavior of each field.
Low-level parts of the index use field numbers instead of field names for
compactness. This class has several methods for converting between the
field name, field number, and field object itself.
"""
def __init__(self, **fields):
""" All keyword arguments to the constructor are treated as fieldname =
fieldtype pairs. The fieldtype can be an instantiated FieldType object,
or a FieldType sub-class (in which case the Schema will instantiate it
with the default constructor before adding it).
For example::
s = Schema(content = TEXT,
title = TEXT(stored = True),
tags = KEYWORD(stored = True))
"""
self._fields = {}
self._dyn_fields = {}
for name in sorted(fields.keys()):
self.add(name, fields[name])
def copy(self):
"""Returns a shallow copy of the schema. The field instances are not
deep copied, so they are shared between schema copies.
"""
return self.__class__(**self._fields)
def __eq__(self, other):
return (other.__class__ is self.__class__
and list(self.items()) == list(other.items()))
def __ne__(self, other):
return not(self.__eq__(other))
def __repr__(self):
return "<%s: %r>" % (self.__class__.__name__, self.names())
def __iter__(self):
"""Returns the field objects in this schema.
"""
return iter(self._fields.values())
def __getitem__(self, name):
"""Returns the field associated with the given field name.
"""
if name in self._fields:
return self._fields[name]
for expr, fieldtype in itervalues(self._dyn_fields):
if expr.match(name):
return fieldtype
raise KeyError("No field named %r" % (name,))
def __len__(self):
"""Returns the number of fields in this schema.
"""
return len(self._fields)
def __contains__(self, fieldname):
"""Returns True if a field by the given name is in this schema.
"""
# Defined in terms of __getitem__ so that there's only one method to
# override to provide dynamic fields
try:
field = self[fieldname]
return field is not None
except KeyError:
return False
def items(self):
"""Returns a list of ("fieldname", field_object) pairs for the fields
in this schema.
"""
return sorted(self._fields.items())
def names(self, check_names=None):
"""Returns a list of the names of the fields in this schema.
:param check_names: (optional) sequence of field names to check
whether the schema accepts them as (dynamic) field names -
acceptable names will also be in the result list.
Note: You may also have static field names in check_names, that
won't create duplicates in the result list. Unsupported names
will not be in the result list.
"""
fieldnames = set(self._fields.keys())
if check_names is not None:
check_names = set(check_names) - fieldnames
fieldnames.update(fieldname for fieldname in check_names
if fieldname in self)
return sorted(fieldnames)
def clean(self):
for field in self:
field.clean()
def add(self, name, fieldtype, glob=False):
"""Adds a field to this schema.
:param name: The name of the field.
:param fieldtype: An instantiated fields.FieldType object, or a
FieldType subclass. If you pass an instantiated object, the schema
will use that as the field configuration for this field. If you
pass a FieldType subclass, the schema will automatically
instantiate it with the default constructor.
"""
# Check field name
if name.startswith("_"):
raise FieldConfigurationError("Field names cannot start with an "
"underscore")
if " " in name:
raise FieldConfigurationError("Field names cannot contain spaces")
if name in self._fields or (glob and name in self._dyn_fields):
raise FieldConfigurationError("Schema already has a field %r"
% name)
# If the user passed a type rather than an instantiated field object,
# instantiate it automatically
if type(fieldtype) is type:
try:
fieldtype = fieldtype()
except:
e = sys.exc_info()[1]
raise FieldConfigurationError("Error: %s instantiating field "
"%r: %r" % (e, name, fieldtype))
if not isinstance(fieldtype, FieldType):
raise FieldConfigurationError("%r is not a FieldType object"
% fieldtype)
if glob:
expr = re.compile(fnmatch.translate(name))
self._dyn_fields[name] = (expr, fieldtype)
else:
fieldtype.on_add(self, name)
self._fields[name] = fieldtype
def remove(self, fieldname):
if fieldname in self._fields:
self._fields[fieldname].on_remove(self, fieldname)
del self._fields[fieldname]
elif fieldname in self._dyn_fields:
del self._dyn_fields[fieldname]
else:
raise KeyError("No field named %r" % fieldname)
def has_vectored_fields(self):
"""Returns True if any of the fields in this schema store term vectors.
"""
return any(ftype.vector for ftype in self)
def has_scorable_fields(self):
return any(ftype.scorable for ftype in self)
def stored_names(self):
"""Returns a list of the names of fields that are stored.
"""
return [name for name, field in self.items() if field.stored]
def scorable_names(self):
"""Returns a list of the names of fields that store field
lengths.
"""
return [name for name, field in self.items() if field.scorable]
def vector_names(self):
"""Returns a list of the names of fields that store vectors.
"""
return [name for name, field in self.items() if field.vector]
def separate_spelling_names(self):
"""Returns a list of the names of fields that require special handling
for generating spelling graphs... either because they store graphs but
aren't indexed, or because the analyzer is stemmed.
"""
return [name for name, field in self.items()
if field.spelling and field.separate_spelling()]
class SchemaClass(with_metaclass(MetaSchema, Schema)):
"""Allows you to define a schema using declarative syntax, similar to
Django models::
class MySchema(SchemaClass):
path = ID
date = DATETIME
content = TEXT
You can use inheritance to share common fields between schemas::
class Parent(SchemaClass):
path = ID(stored=True)
date = DATETIME
class Child1(Parent):
content = TEXT(positions=False)
class Child2(Parent):
tags = KEYWORD
This class overrides ``__new__`` so instantiating your sub-class always
results in an instance of ``Schema``.
>>> class MySchema(SchemaClass):
... title = TEXT(stored=True)
... content = TEXT
...
>>> s = MySchema()
>>> type(s)
<class 'whoosh.fields.Schema'>
"""
def __new__(cls, *args, **kwargs):
obj = super(Schema, cls).__new__(Schema)
kw = getattr(cls, "_clsfields", {})
kw.update(kwargs)
obj.__init__(*args, **kw)
return obj
def ensure_schema(schema):
if isinstance(schema, type) and issubclass(schema, Schema):
schema = schema.schema()
if not isinstance(schema, Schema):
raise FieldConfigurationError("%r is not a Schema" % schema)
return schema
def merge_fielddict(d1, d2):
keyset = set(d1.keys()) | set(d2.keys())
out = {}
for name in keyset:
field1 = d1.get(name)
field2 = d2.get(name)
if field1 and field2 and field1 != field2:
raise Exception("Inconsistent field %r: %r != %r"
% (name, field1, field2))
out[name] = field1 or field2
return out
def merge_schema(s1, s2):
schema = Schema()
schema._fields = merge_fielddict(s1._fields, s2._fields)
schema._dyn_fields = merge_fielddict(s1._dyn_fields, s2._dyn_fields)
return schema
def merge_schemas(schemas):
schema = schemas[0]
for i in xrange(1, len(schemas)):
schema = merge_schema(schema, schemas[i])
return schema
|
tests/helpers/test_storage.py | MrDelik/core | 30,023 | 12615411 | <reponame>MrDelik/core
"""Tests for the storage helper."""
import asyncio
from datetime import timedelta
import json
from unittest.mock import Mock, patch
import pytest
from homeassistant.const import (
EVENT_HOMEASSISTANT_FINAL_WRITE,
EVENT_HOMEASSISTANT_STOP,
)
from homeassistant.core import CoreState
from homeassistant.helpers import storage
from homeassistant.util import dt
from tests.common import async_fire_time_changed
MOCK_VERSION = 1
MOCK_VERSION_2 = 2
MOCK_MINOR_VERSION_1 = 1
MOCK_MINOR_VERSION_2 = 2
MOCK_KEY = "storage-test"
MOCK_DATA = {"hello": "world"}
MOCK_DATA2 = {"goodbye": "cruel world"}
@pytest.fixture
def store(hass):
"""Fixture of a store that prevents writing on Home Assistant stop."""
return storage.Store(hass, MOCK_VERSION, MOCK_KEY)
@pytest.fixture
def store_v_1_1(hass):
"""Fixture of a store that prevents writing on Home Assistant stop."""
return storage.Store(
hass, MOCK_VERSION, MOCK_KEY, minor_version=MOCK_MINOR_VERSION_1
)
@pytest.fixture
def store_v_1_2(hass):
"""Fixture of a store that prevents writing on Home Assistant stop."""
return storage.Store(
hass, MOCK_VERSION, MOCK_KEY, minor_version=MOCK_MINOR_VERSION_2
)
@pytest.fixture
def store_v_2_1(hass):
"""Fixture of a store that prevents writing on Home Assistant stop."""
return storage.Store(
hass, MOCK_VERSION_2, MOCK_KEY, minor_version=MOCK_MINOR_VERSION_1
)
async def test_loading(hass, store):
"""Test we can save and load data."""
await store.async_save(MOCK_DATA)
data = await store.async_load()
assert data == MOCK_DATA
async def test_custom_encoder(hass):
"""Test we can save and load data."""
class JSONEncoder(json.JSONEncoder):
"""Mock JSON encoder."""
def default(self, o):
"""Mock JSON encode method."""
return "9"
store = storage.Store(hass, MOCK_VERSION, MOCK_KEY, encoder=JSONEncoder)
with pytest.raises(ValueError):
await store.async_save(Mock())
await store.async_save(object())
data = await store.async_load()
assert data == "9"
async def test_loading_non_existing(hass, store):
"""Test we can save and load data."""
with patch("homeassistant.util.json.open", side_effect=FileNotFoundError):
data = await store.async_load()
assert data is None
async def test_loading_parallel(hass, store, hass_storage, caplog):
"""Test we can save and load data."""
hass_storage[store.key] = {"version": MOCK_VERSION, "data": MOCK_DATA}
results = await asyncio.gather(store.async_load(), store.async_load())
assert results[0] == MOCK_DATA
assert results[0] is results[1]
assert caplog.text.count(f"Loading data for {store.key}")
async def test_saving_with_delay(hass, store, hass_storage):
"""Test saving data after a delay."""
store.async_delay_save(lambda: MOCK_DATA, 1)
assert store.key not in hass_storage
async_fire_time_changed(hass, dt.utcnow() + timedelta(seconds=1))
await hass.async_block_till_done()
assert hass_storage[store.key] == {
"version": MOCK_VERSION,
"minor_version": 1,
"key": MOCK_KEY,
"data": MOCK_DATA,
}
async def test_saving_on_final_write(hass, hass_storage):
"""Test delayed saves trigger when we quit Home Assistant."""
store = storage.Store(hass, MOCK_VERSION, MOCK_KEY)
store.async_delay_save(lambda: MOCK_DATA, 5)
assert store.key not in hass_storage
hass.bus.async_fire(EVENT_HOMEASSISTANT_STOP)
hass.state = CoreState.stopping
await hass.async_block_till_done()
async_fire_time_changed(hass, dt.utcnow() + timedelta(seconds=10))
await hass.async_block_till_done()
assert store.key not in hass_storage
hass.bus.async_fire(EVENT_HOMEASSISTANT_FINAL_WRITE)
await hass.async_block_till_done()
assert hass_storage[store.key] == {
"version": MOCK_VERSION,
"minor_version": 1,
"key": MOCK_KEY,
"data": MOCK_DATA,
}
async def test_not_delayed_saving_while_stopping(hass, hass_storage):
"""Test delayed saves don't write after the stop event has fired."""
store = storage.Store(hass, MOCK_VERSION, MOCK_KEY)
hass.bus.async_fire(EVENT_HOMEASSISTANT_STOP)
await hass.async_block_till_done()
hass.state = CoreState.stopping
store.async_delay_save(lambda: MOCK_DATA, 1)
async_fire_time_changed(hass, dt.utcnow() + timedelta(seconds=2))
await hass.async_block_till_done()
assert store.key not in hass_storage
async def test_not_delayed_saving_after_stopping(hass, hass_storage):
"""Test delayed saves don't write after stop if issued before stopping Home Assistant."""
store = storage.Store(hass, MOCK_VERSION, MOCK_KEY)
store.async_delay_save(lambda: MOCK_DATA, 10)
assert store.key not in hass_storage
hass.bus.async_fire(EVENT_HOMEASSISTANT_STOP)
hass.state = CoreState.stopping
await hass.async_block_till_done()
assert store.key not in hass_storage
async_fire_time_changed(hass, dt.utcnow() + timedelta(seconds=15))
await hass.async_block_till_done()
assert store.key not in hass_storage
async def test_not_saving_while_stopping(hass, hass_storage):
"""Test saves don't write when stopping Home Assistant."""
store = storage.Store(hass, MOCK_VERSION, MOCK_KEY)
hass.state = CoreState.stopping
await store.async_save(MOCK_DATA)
assert store.key not in hass_storage
async def test_loading_while_delay(hass, store, hass_storage):
"""Test we load new data even if not written yet."""
await store.async_save({"delay": "no"})
assert hass_storage[store.key] == {
"version": MOCK_VERSION,
"minor_version": 1,
"key": MOCK_KEY,
"data": {"delay": "no"},
}
store.async_delay_save(lambda: {"delay": "yes"}, 1)
assert hass_storage[store.key] == {
"version": MOCK_VERSION,
"minor_version": 1,
"key": MOCK_KEY,
"data": {"delay": "no"},
}
data = await store.async_load()
assert data == {"delay": "yes"}
async def test_writing_while_writing_delay(hass, store, hass_storage):
"""Test a write while a write with delay is active."""
store.async_delay_save(lambda: {"delay": "yes"}, 1)
assert store.key not in hass_storage
await store.async_save({"delay": "no"})
assert hass_storage[store.key] == {
"version": MOCK_VERSION,
"minor_version": 1,
"key": MOCK_KEY,
"data": {"delay": "no"},
}
async_fire_time_changed(hass, dt.utcnow() + timedelta(seconds=1))
await hass.async_block_till_done()
assert hass_storage[store.key] == {
"version": MOCK_VERSION,
"minor_version": 1,
"key": MOCK_KEY,
"data": {"delay": "no"},
}
data = await store.async_load()
assert data == {"delay": "no"}
async def test_multiple_delay_save_calls(hass, store, hass_storage):
"""Test a write while a write with changing delays."""
store.async_delay_save(lambda: {"delay": "yes"}, 1)
store.async_delay_save(lambda: {"delay": "yes"}, 2)
store.async_delay_save(lambda: {"delay": "yes"}, 3)
assert store.key not in hass_storage
await store.async_save({"delay": "no"})
assert hass_storage[store.key] == {
"version": MOCK_VERSION,
"minor_version": 1,
"key": MOCK_KEY,
"data": {"delay": "no"},
}
async_fire_time_changed(hass, dt.utcnow() + timedelta(seconds=1))
await hass.async_block_till_done()
assert hass_storage[store.key] == {
"version": MOCK_VERSION,
"minor_version": 1,
"key": MOCK_KEY,
"data": {"delay": "no"},
}
data = await store.async_load()
assert data == {"delay": "no"}
async def test_multiple_save_calls(hass, store, hass_storage):
"""Test multiple write tasks."""
assert store.key not in hass_storage
tasks = [store.async_save({"savecount": savecount}) for savecount in range(6)]
await asyncio.gather(*tasks)
assert hass_storage[store.key] == {
"version": MOCK_VERSION,
"minor_version": 1,
"key": MOCK_KEY,
"data": {"savecount": 5},
}
data = await store.async_load()
assert data == {"savecount": 5}
async def test_migrator_no_existing_config(hass, store, hass_storage):
"""Test migrator with no existing config."""
with patch("os.path.isfile", return_value=False), patch.object(
store, "async_load", return_value={"cur": "config"}
):
data = await storage.async_migrator(hass, "old-path", store)
assert data == {"cur": "config"}
assert store.key not in hass_storage
async def test_migrator_existing_config(hass, store, hass_storage):
"""Test migrating existing config."""
with patch("os.path.isfile", return_value=True), patch("os.remove") as mock_remove:
data = await storage.async_migrator(
hass, "old-path", store, old_conf_load_func=lambda _: {"old": "config"}
)
assert len(mock_remove.mock_calls) == 1
assert data == {"old": "config"}
assert hass_storage[store.key] == {
"key": MOCK_KEY,
"version": MOCK_VERSION,
"minor_version": 1,
"data": data,
}
async def test_migrator_transforming_config(hass, store, hass_storage):
"""Test migrating config to new format."""
async def old_conf_migrate_func(old_config):
"""Migrate old config to new format."""
return {"new": old_config["old"]}
with patch("os.path.isfile", return_value=True), patch("os.remove") as mock_remove:
data = await storage.async_migrator(
hass,
"old-path",
store,
old_conf_migrate_func=old_conf_migrate_func,
old_conf_load_func=lambda _: {"old": "config"},
)
assert len(mock_remove.mock_calls) == 1
assert data == {"new": "config"}
assert hass_storage[store.key] == {
"key": MOCK_KEY,
"version": MOCK_VERSION,
"minor_version": 1,
"data": data,
}
async def test_minor_version_default(hass, store, hass_storage):
"""Test minor version default."""
await store.async_save(MOCK_DATA)
assert hass_storage[store.key]["minor_version"] == 1
async def test_minor_version(hass, store_v_1_2, hass_storage):
"""Test minor version."""
await store_v_1_2.async_save(MOCK_DATA)
assert hass_storage[store_v_1_2.key]["minor_version"] == MOCK_MINOR_VERSION_2
async def test_migrate_major_not_implemented_raises(hass, store, store_v_2_1):
"""Test migrating between major versions fails if not implemented."""
await store_v_2_1.async_save(MOCK_DATA)
with pytest.raises(NotImplementedError):
await store.async_load()
async def test_migrate_minor_not_implemented(
hass, hass_storage, store_v_1_1, store_v_1_2
):
"""Test migrating between minor versions does not fail if not implemented."""
assert store_v_1_1.key == store_v_1_2.key
await store_v_1_1.async_save(MOCK_DATA)
assert hass_storage[store_v_1_1.key] == {
"key": MOCK_KEY,
"version": MOCK_VERSION,
"minor_version": MOCK_MINOR_VERSION_1,
"data": MOCK_DATA,
}
data = await store_v_1_2.async_load()
assert hass_storage[store_v_1_1.key]["data"] == data
await store_v_1_2.async_save(MOCK_DATA)
assert hass_storage[store_v_1_2.key] == {
"key": MOCK_KEY,
"version": MOCK_VERSION,
"minor_version": MOCK_MINOR_VERSION_2,
"data": MOCK_DATA,
}
async def test_migration(hass, hass_storage, store_v_1_2):
"""Test migration."""
calls = 0
class CustomStore(storage.Store):
async def _async_migrate_func(
self, old_major_version, old_minor_version, old_data: dict
):
nonlocal calls
calls += 1
assert old_major_version == store_v_1_2.version
assert old_minor_version == store_v_1_2.minor_version
return old_data
await store_v_1_2.async_save(MOCK_DATA)
assert hass_storage[store_v_1_2.key] == {
"key": MOCK_KEY,
"version": MOCK_VERSION,
"minor_version": MOCK_MINOR_VERSION_2,
"data": MOCK_DATA,
}
assert calls == 0
legacy_store = CustomStore(hass, 2, store_v_1_2.key, minor_version=1)
data = await legacy_store.async_load()
assert calls == 1
assert hass_storage[store_v_1_2.key]["data"] == data
await legacy_store.async_save(MOCK_DATA)
assert hass_storage[legacy_store.key] == {
"key": MOCK_KEY,
"version": 2,
"minor_version": 1,
"data": MOCK_DATA,
}
async def test_legacy_migration(hass, hass_storage, store_v_1_2):
"""Test legacy migration method signature."""
calls = 0
class LegacyStore(storage.Store):
async def _async_migrate_func(self, old_version, old_data: dict):
nonlocal calls
calls += 1
assert old_version == store_v_1_2.version
return old_data
await store_v_1_2.async_save(MOCK_DATA)
assert hass_storage[store_v_1_2.key] == {
"key": MOCK_KEY,
"version": MOCK_VERSION,
"minor_version": MOCK_MINOR_VERSION_2,
"data": MOCK_DATA,
}
assert calls == 0
legacy_store = LegacyStore(hass, 2, store_v_1_2.key, minor_version=1)
data = await legacy_store.async_load()
assert calls == 1
assert hass_storage[store_v_1_2.key]["data"] == data
await legacy_store.async_save(MOCK_DATA)
assert hass_storage[legacy_store.key] == {
"key": MOCK_KEY,
"version": 2,
"minor_version": 1,
"data": MOCK_DATA,
}
async def test_changing_delayed_written_data(hass, store, hass_storage):
"""Test changing data that is written with delay."""
data_to_store = {"hello": "world"}
store.async_delay_save(lambda: data_to_store, 1)
assert store.key not in hass_storage
loaded_data = await store.async_load()
assert loaded_data == data_to_store
assert loaded_data is not data_to_store
loaded_data["hello"] = "earth"
async_fire_time_changed(hass, dt.utcnow() + timedelta(seconds=1))
await hass.async_block_till_done()
assert hass_storage[store.key] == {
"version": MOCK_VERSION,
"minor_version": 1,
"key": MOCK_KEY,
"data": {"hello": "world"},
}
|
scripts/extensions/imgliveuploader/app.py | AIllIll/hyperface-new | 184 | 12615428 | # -*- coding: utf-8 -*-
import base64
import cv2
from flask import Flask, render_template
from flask_socketio import SocketIO, emit
import multiprocessing
import numpy as np
import os
# logging
from logging import getLogger, NullHandler, CRITICAL
logger = getLogger(__name__)
logger.addHandler(NullHandler())
# disable werkzeug logger
werkzeug_logger = getLogger('werkzeug')
werkzeug_logger.setLevel(CRITICAL)
# disable werkzeug logger
engineio_logger = getLogger('engineio')
engineio_logger.setLevel(CRITICAL)
# disable socketio logger
socketio_logger = getLogger('socketio')
socketio_logger.setLevel(CRITICAL)
IO_NAMESPACE = '/liveuploader'
ASYNC_MODE = 'eventlet'
def decodeimg(img):
'''decode from jpg/png base64 string image'''
try:
img = img[img.find(',') + 1:]
img = base64.decodestring(img.encode('ascii'))
img = np.fromstring(img, dtype=np.uint8)
img = cv2.imdecode(img, 1)
return img
except Exception:
logger.error('Failed to decodeimg()')
return None
def encodeimg(img, ext='.jpeg'):
try:
ret, img = cv2.imencode(ext, img)
if not ret:
raise
img = img.tostring()
img = base64.encodestring(img)
img = 'data:image/jpeg;base64,' + img.decode('ascii')
return img
except Exception:
logger.error('Failed to encodeimg()')
return None
def encodeImgElement(data, key):
try:
img = encodeimg(data[key])
if img is None:
raise Exception()
data[key] = img
except KeyError:
logger.error('No image data (key: %s)' % key)
except:
logger.error('Invalid image data (key: %s)' % key)
try:
data.pop(key)
except:
pass
def rotateImg(img, deg):
h, w = img.shape[:2]
M = cv2.getRotationMatrix2D((w / 2, h / 2), deg, 1.0)
rotated_img = cv2.warpAffine(img, M, (w, h))
return rotated_img
def rotateImgElement(data, key, deg):
try:
img = rotateImg(data[key], deg)
if img is None:
raise Exception()
data[key] = img
except KeyError:
logger.error('No image data (key: %s)' % key)
except:
logger.error('Invalid image data (key: %s)' % key)
try:
data.pop(key)
except:
pass
def new_server(request_queue, response_queue, stop_page, port, secret_key):
# create server
app = Flask(__name__, static_url_path='/static')
app.config['SECRET_KEY'] = secret_key
socketio = SocketIO(app, async_mode=ASYNC_MODE,
logger=False, engineio_logger=False)
# rooting
@app.route('/')
def __index():
logger.info('Render uploader page')
return render_template('index.html', script="index.js")
if stop_page:
@app.route('/stop')
def __stop():
socketio.stop()
logger.info('Server stop request')
return 'This server is stopped'
@socketio.on('connect', namespace=IO_NAMESPACE)
def __on_upload_connect():
logger.info('New live uploader connection is established')
@socketio.on('disconnect', namespace=IO_NAMESPACE)
def __on_upload_disconnect():
logger.info('Live uploader connection is closed')
@socketio.on('upload_img', namespace=IO_NAMESPACE)
def __on_upload_image(data):
logger.debug('New image is received')
# check need to output
if request_queue is None:
return
# decode from jpeg base64 string
try:
img = data['img']
except KeyError:
logger.error('Invalid data type')
return
img = decodeimg(img)
if img is None:
return
# Rotate 180
if 'rotate' in data and data['rotate']:
img = rotateImg(img, 180)
# put into output queue
request_queue.put(img)
# emit response
if response_queue is not None:
# wait for response
resp_data = response_queue.get()
# Rotate 180
if 'rotate' in data and data['rotate']:
rotateImgElement(resp_data, key='img', deg=180)
# encode image
encodeImgElement(resp_data, key='img')
# emit
logger.debug('Emit response')
emit('response', resp_data, namespace=IO_NAMESPACE)
# start server
logger.info('Start server on port %d' % port)
socketio.run(app, host='0.0.0.0', port=port, debug=False, log_output=False)
logger.info('Stop server on port %d' % port)
def start(request_queue, response_queue=None, stop_page=True, port=5000,
secret_key=os.urandom(24)):
'''Start new image uploading server on `port`.
This function create new daemon process and start it.
arguments:
* request_queue (multiprocessing.Queue): output queue.
It returns a image (np.ndarray).
* response_queue (multiprocessing.Queue): input queue.
The input type is dict and it can contain
'img': (np.ndarray), 'msg': (str).
* stop_page (bool): enable server stop page "/stop".
* port (int): server port
If there are no need to use IO, set corresponding queues to `None`.
'''
process = multiprocessing.Process(target=new_server,
args=(request_queue, response_queue,
stop_page, port, secret_key))
process.daemon = True
process.start()
|
MS14-068/pykek/kek/krb5.py | shellfarmer/windows-kernel-exploits | 6,550 | 12615446 | <reponame>shellfarmer/windows-kernel-exploits
#!/usr/bin/python
# Author
# ------
# <NAME>
# Contact : sylvain dot monne at solucom dot fr
# http://twitter.com/bidord
from socket import socket
from pyasn1.type.univ import Integer, Sequence, SequenceOf, OctetString, BitString, Boolean
from pyasn1.type.char import GeneralString
from pyasn1.type.useful import GeneralizedTime
from pyasn1.type.tag import Tag, tagClassContext, tagClassApplication, tagFormatSimple
from pyasn1.type.namedtype import NamedTypes, NamedType, OptionalNamedType
from pyasn1.codec.der.encoder import encode
from pyasn1.codec.der.decoder import decode
from crypto import encrypt, decrypt, checksum, RC4_HMAC, RSA_MD5
from util import epoch2gt
from struct import pack, unpack
NT_UNKNOWN = 0
NT_PRINCIPAL = 1
NT_SRV_INST = 2
NT_SRV_HST = 3
NT_SRV_XHST = 4
NT_UID = 5
NT_X500_PRINCIPAL = 6
NT_SMTP_NAME = 7
NT_ENTERPRISE = 10
AD_IF_RELEVANT = 1
AD_WIN2K_PAC = 128
def _c(n, t):
return t.clone(tagSet=t.tagSet + Tag(tagClassContext, tagFormatSimple, n))
def _v(n, t):
return t.clone(tagSet=t.tagSet + Tag(tagClassContext, tagFormatSimple, n), cloneValueFlag=True)
def application(n):
return Sequence.tagSet + Tag(tagClassApplication, tagFormatSimple, n)
class Microseconds(Integer): pass
class KerberosString(GeneralString): pass
class Realm(KerberosString): pass
class PrincipalName(Sequence):
componentType = NamedTypes(
NamedType('name-type', _c(0, Integer())),
NamedType('name-string', _c(1, SequenceOf(componentType=KerberosString()))))
class KerberosTime(GeneralizedTime): pass
class HostAddress(Sequence):
componentType = NamedTypes(
NamedType('addr-type', _c(0, Integer())),
NamedType('address', _c(1, OctetString())))
class HostAddresses(SequenceOf):
componentType = HostAddress()
class AuthorizationData(SequenceOf):
componentType = Sequence(componentType=NamedTypes(
NamedType('ad-type', _c(0, Integer())),
NamedType('ad-data', _c(1, OctetString()))))
class PAData(Sequence):
componentType = NamedTypes(
NamedType('padata-type', _c(1, Integer())),
NamedType('padata-value', _c(2, OctetString())))
class KerberosFlags(BitString): pass
class EncryptedData(Sequence):
componentType = NamedTypes(
NamedType('etype', _c(0, Integer())),
OptionalNamedType('kvno', _c(1, Integer())),
NamedType('cipher', _c(2, OctetString())))
class EncryptionKey(Sequence):
componentType = NamedTypes(
NamedType('keytype', _c(0, Integer())),
NamedType('keyvalue', _c(1, OctetString())))
class CheckSum(Sequence):
componentType = NamedTypes(
NamedType('cksumtype', _c(0, Integer())),
NamedType('checksum', _c(1, OctetString())))
class Ticket(Sequence):
tagSet = application(1)
componentType = NamedTypes(
NamedType('tkt-vno', _c(0, Integer())),
NamedType('realm', _c(1, Realm())),
NamedType('sname', _c(2, PrincipalName())),
NamedType('enc-part', _c(3, EncryptedData())))
class APOptions(KerberosFlags): pass
class APReq(Sequence):
tagSet = application(14)
componentType = NamedTypes(
NamedType('pvno', _c(0, Integer())),
NamedType('msg-type', _c(1, Integer())),
NamedType('ap-options', _c(2, APOptions())),
NamedType('ticket', _c(3, Ticket())),
NamedType('authenticator', _c(4, EncryptedData())))
class Authenticator(Sequence):
tagSet = application(2)
componentType = NamedTypes(
NamedType('authenticator-vno', _c(0, Integer())),
NamedType('crealm', _c(1, Realm())),
NamedType('cname', _c(2, PrincipalName())),
OptionalNamedType('cksum', _c(3, CheckSum())),
NamedType('cusec', _c(4, Microseconds())),
NamedType('ctime', _c(5, KerberosTime())),
OptionalNamedType('subkey', _c(6, EncryptionKey())),
OptionalNamedType('seq-number', _c(7, Integer())),
OptionalNamedType('authorization-data', _c(8, AuthorizationData())))
class KDCOptions(KerberosFlags): pass
class KdcReqBody(Sequence):
componentType = NamedTypes(
NamedType('kdc-options', _c(0, KDCOptions())),
OptionalNamedType('cname', _c(1, PrincipalName())),
NamedType('realm', _c(2, Realm())),
OptionalNamedType('sname', _c(3, PrincipalName())),
OptionalNamedType('from', _c(4, KerberosTime())),
NamedType('till', _c(5, KerberosTime())),
OptionalNamedType('rtime', _c(6, KerberosTime())),
NamedType('nonce', _c(7, Integer())),
NamedType('etype', _c(8, SequenceOf(componentType=Integer()))),
OptionalNamedType('addresses', _c(9, HostAddresses())),
OptionalNamedType('enc-authorization-data', _c(10, EncryptedData())),
OptionalNamedType('additional-tickets', _c(11, SequenceOf(componentType=Ticket()))))
class KdcReq(Sequence):
componentType = NamedTypes(
NamedType('pvno', _c(1, Integer())),
NamedType('msg-type', _c(2, Integer())),
NamedType('padata', _c(3, SequenceOf(componentType=PAData()))),
NamedType('req-body', _c(4, KdcReqBody())))
class TicketFlags(KerberosFlags): pass
class AsReq(KdcReq):
tagSet = application(10)
class TgsReq(KdcReq):
tagSet = application(12)
class KdcRep(Sequence):
componentType = NamedTypes(
NamedType('pvno', _c(0, Integer())),
NamedType('msg-type', _c(1, Integer())),
OptionalNamedType('padata', _c(2, SequenceOf(componentType=PAData()))),
NamedType('crealm', _c(3, Realm())),
NamedType('cname', _c(4, PrincipalName())),
NamedType('ticket', _c(5, Ticket())),
NamedType('enc-part', _c(6, EncryptedData())))
class AsRep(KdcRep):
tagSet = application(11)
class TgsRep(KdcRep):
tagSet = application(13)
class LastReq(SequenceOf):
componentType = Sequence(componentType=NamedTypes(
NamedType('lr-type', _c(0, Integer())),
NamedType('lr-value', _c(1, KerberosTime()))))
class PaEncTimestamp(EncryptedData): pass
class PaEncTsEnc(Sequence):
componentType = NamedTypes(
NamedType('patimestamp', _c(0, KerberosTime())),
NamedType('pausec', _c(1, Microseconds())))
class EncKDCRepPart(Sequence):
componentType = NamedTypes(
NamedType('key', _c(0, EncryptionKey())),
NamedType('last-req', _c(1, LastReq())),
NamedType('nonce', _c(2, Integer())),
OptionalNamedType('key-expiration', _c(3, KerberosTime())),
NamedType('flags', _c(4, TicketFlags())),
NamedType('authtime', _c(5, KerberosTime())),
OptionalNamedType('starttime', _c(6, KerberosTime())),
NamedType('endtime', _c(7, KerberosTime())),
OptionalNamedType('renew-till', _c(8, KerberosTime())),
NamedType('srealm', _c(9, Realm())),
NamedType('sname', _c(10, PrincipalName())),
OptionalNamedType('caddr', _c(11, HostAddresses())))
class EncASRepPart(EncKDCRepPart):
tagSet = application(25)
class EncTGSRepPart(EncKDCRepPart):
tagSet = application(26)
class TransitedEncoding(Sequence):
componentType = NamedTypes(
NamedType('tr-type', _c(0, Integer())),
NamedType('contents', _c(1, OctetString())))
class EncTicketPart(Sequence):
tagSet = application(3)
componentType = NamedTypes(
NamedType('flags', _c(0, TicketFlags())),
NamedType('key', _c(1, EncryptionKey())),
NamedType('crealm', _c(2, Realm())),
NamedType('cname', _c(3, PrincipalName())),
NamedType('transited', _c(4, TransitedEncoding())),
NamedType('authtime', _c(5, KerberosTime())),
OptionalNamedType('starttime', _c(6, KerberosTime())),
NamedType('endtime', _c(7, KerberosTime())),
OptionalNamedType('renew-till', _c(8, KerberosTime())),
OptionalNamedType('caddr', _c(9, HostAddresses())),
OptionalNamedType('authorization-data', _c(10, AuthorizationData())))
class KerbPaPacRequest(Sequence):
componentType = NamedTypes(
NamedType('include-pac', _c(0, Boolean())))
def build_req_body(realm, service, host, nonce, cname=None, authorization_data=None, etype=RC4_HMAC):
req_body = KdcReqBody()
# (Forwardable, Proxiable, Renewable, Canonicalize)
req_body['kdc-options'] = "'01010000100000000000000000000000'B"
if cname is not None:
req_body['cname'] = None
req_body['cname']
req_body['cname']['name-type'] = NT_PRINCIPAL
req_body['cname']['name-string'] = None
req_body['cname']['name-string'][0] = cname
req_body['realm'] = realm
req_body['sname'] = None
req_body['sname']['name-type'] = NT_PRINCIPAL
req_body['sname']['name-string'] = None
req_body['sname']['name-string'][0] = service
req_body['sname']['name-string'][1] = host
req_body['from'] = '19700101000000Z'
req_body['till'] = '19700101000000Z'
req_body['rtime'] = '19700101000000Z'
req_body['nonce'] = nonce
req_body['etype'] = None
req_body['etype'][0] = etype
if authorization_data is not None:
req_body['enc-authorization-data'] = None
req_body['enc-authorization-data']['etype'] = authorization_data[0]
req_body['enc-authorization-data']['cipher'] = authorization_data[1]
return req_body
def build_authenticator(realm, name, chksum, subkey, current_time, authorization_data=None):
auth = Authenticator()
auth['authenticator-vno'] = 5
auth['crealm'] = realm
auth['cname'] = None
auth['cname']['name-type'] = NT_PRINCIPAL
auth['cname']['name-string'] = None
auth['cname']['name-string'][0] = name
auth['cksum'] = None
auth['cksum']['cksumtype'] = chksum[0]
auth['cksum']['checksum'] = chksum[1]
gt, ms = epoch2gt(current_time, microseconds=True)
auth['cusec'] = ms
auth['ctime'] = gt
auth['subkey'] = None
auth['subkey']['keytype'] = subkey[0]
auth['subkey']['keyvalue'] = subkey[1]
if authorization_data is not None:
auth['authorization-data'] = _v(8, authorization_data)
return auth
def build_ap_req(ticket, key, msg_type, authenticator):
enc_auth = encrypt(key[0], key[1], msg_type, encode(authenticator))
ap_req = APReq()
ap_req['pvno'] = 5
ap_req['msg-type'] = 14
ap_req['ap-options'] = "'00000000000000000000000000000000'B"
ap_req['ticket'] = _v(3, ticket)
ap_req['authenticator'] = None
ap_req['authenticator']['etype'] = key[0]
ap_req['authenticator']['cipher'] = enc_auth
return ap_req
def build_tgs_req(target_realm, target_service, target_host,
user_realm, user_name, tgt, session_key, subkey,
nonce, current_time, authorization_data=None, pac_request=None):
if authorization_data is not None:
ad1 = AuthorizationData()
ad1[0] = None
ad1[0]['ad-type'] = authorization_data[0]
ad1[0]['ad-data'] = authorization_data[1]
ad = AuthorizationData()
ad[0] = None
ad[0]['ad-type'] = AD_IF_RELEVANT
ad[0]['ad-data'] = encode(ad1)
enc_ad = (subkey[0], encrypt(subkey[0], subkey[1], 5, encode(ad)))
else:
ad = None
enc_ad = None
req_body = build_req_body(target_realm, target_service, target_host, nonce, authorization_data=enc_ad)
chksum = (RSA_MD5, checksum(RSA_MD5, encode(req_body)))
authenticator = build_authenticator(user_realm, user_name, chksum, subkey, current_time)#, ad)
ap_req = build_ap_req(tgt, session_key, 7, authenticator)
tgs_req = TgsReq()
tgs_req['pvno'] = 5
tgs_req['msg-type'] = 12
tgs_req['padata'] = None
tgs_req['padata'][0] = None
tgs_req['padata'][0]['padata-type'] = 1
tgs_req['padata'][0]['padata-value'] = encode(ap_req)
if pac_request is not None:
pa_pac_request = KerbPaPacRequest()
pa_pac_request['include-pac'] = pac_request
tgs_req['padata'][1] = None
tgs_req['padata'][1]['padata-type'] = 128
tgs_req['padata'][1]['padata-value'] = encode(pa_pac_request)
tgs_req['req-body'] = _v(4, req_body)
return tgs_req
def build_pa_enc_timestamp(current_time, key):
gt, ms = epoch2gt(current_time, microseconds=True)
pa_ts_enc = PaEncTsEnc()
pa_ts_enc['patimestamp'] = gt
pa_ts_enc['pausec'] = ms
pa_ts = PaEncTimestamp()
pa_ts['etype'] = key[0]
pa_ts['cipher'] = encrypt(key[0], key[1], 1, encode(pa_ts_enc))
return pa_ts
def build_as_req(target_realm, user_name, key, current_time, nonce, pac_request=None):
req_body = build_req_body(target_realm, 'krbtgt', target_realm, nonce, cname=user_name)
pa_ts = build_pa_enc_timestamp(current_time, key)
as_req = AsReq()
as_req['pvno'] = 5
as_req['msg-type'] = 10
as_req['padata'] = None
as_req['padata'][0] = None
as_req['padata'][0]['padata-type'] = 2
as_req['padata'][0]['padata-value'] = encode(pa_ts)
if pac_request is not None:
pa_pac_request = KerbPaPacRequest()
pa_pac_request['include-pac'] = pac_request
as_req['padata'][1] = None
as_req['padata'][1]['padata-type'] = 128
as_req['padata'][1]['padata-value'] = encode(pa_pac_request)
as_req['req-body'] = _v(4, req_body)
return as_req
def send_req(req, kdc, port=88):
data = encode(req)
data = pack('>I', len(data)) + data
sock = socket()
sock.connect((kdc, port))
sock.send(data)
return sock
def recv_rep(sock):
data = ''
datalen = None
while True:
rep = sock.recv(8192)
if not rep:
sock.close()
raise IOError('Connection error')
data += rep
if len(rep) >= 4:
if datalen is None:
datalen = unpack('>I', rep[:4])[0]
if len(data) >= 4 + datalen:
sock.close()
return data[4:4 + datalen]
def _decrypt_rep(data, key, spec, enc_spec, msg_type):
rep = decode(data, asn1Spec=spec)[0]
rep_enc = str(rep['enc-part']['cipher'])
rep_enc = decrypt(key[0], key[1], msg_type, rep_enc)
rep_enc = decode(rep_enc, asn1Spec=enc_spec)[0]
return rep, rep_enc
def decrypt_tgs_rep(data, key):
return _decrypt_rep(data, key, TgsRep(), EncTGSRepPart(), 9) # assume subkey
def decrypt_as_rep(data, key):
return _decrypt_rep(data, key, AsRep(), EncASRepPart(), 8)
def decrypt_ticket_enc_part(ticket, key):
ticket_enc = str(ticket['enc-part']['cipher'])
ticket_enc = decrypt(key[0], key[1], 2, ticket_enc)
return decode(ticket_enc, asn1Spec=EncTicketPart())[0]
def iter_authorization_data(ad):
if ad is None:
return
for block in ad:
yield block
if block['ad-type'] == AD_IF_RELEVANT:
for subblock in iter_authorization_data(decode(str(block['ad-data']), asn1Spec=AuthorizationData())[0]):
yield subblock
|
sp_api/api/upload/upload.py | Priyankk18k/python-amazon-sp-api | 213 | 12615472 | <filename>sp_api/api/upload/upload.py<gh_stars>100-1000
from sp_api.base import Client, sp_endpoint
from sp_api.base.helpers import create_md5, fill_query_params
class Upload(Client):
@sp_endpoint('/uploads/v1/uploadDestinations/{}', method='POST')
def upload_document(self, resource, file, content_type='application/pdf', **kwargs):
md5 = create_md5(file)
kwargs.update({
'contentMD5': md5,
'contentType': kwargs.pop('contentType', content_type),
'marketplaceIds': self.marketplace_id
})
return self._request(fill_query_params(kwargs.pop('path'), resource), params=kwargs)
|
extras/sample_armory_code.py | Manny27nyc/BitcoinArmory | 505 | 12615493 | import sys
from armoryengine.BDM import BDM_BLOCKCHAIN_READY, TheBDM
from armoryengine.ArmoryUtils import RightNow
sys.path.append('..')
sys.path.append('.')
from armoryengine import *
from math import sqrt
from time import sleep
# Run at least one of the LoadBlockchain's if running anything after it
run_WalletCreate = False
run_LoadBlockchain_Async = False
run_LoadBlockchain_Block = True
run_WalletRescan = False
run_DiffChangeList = True
run_UniqueAddresses = False
run_CumulativeSize = True
run_TrafficCamera = False
run_SatoshiDice = False
################################################################################
if run_WalletCreate:
# NOTE:
# ALL ADDRESSES THROUGHOUT EVERYTHING ARE IN 20-BYTE BINARY FORM (hash160/addr20)
# Use hash160_to_addrStr() and addrStr_to_hash160() to convert...
print '\n\nCreating a new C++ wallet, add a few addresses...'
cppWallet = Cpp.BtcWallet()
cppWallet.addAddress_1_( hex_to_binary('11b366edfc0a8b66feebae5c2e25a7b6a5d1cf31') ) # hash160 (hex)
cppWallet.addAddress_1_( addrStr_to_hash160('1EbAUHsitefy3rSECh8eK2fdAWTUbpVUDN')[1] ) # addrStr
cppWallet.addAddress_1_('\x1b~\xa7*\x85\t\x12\xb7=\xd4G\xf3\xbd\xc1\x00\xf1\x00\x8b\xde\xb0') # hash160 (bin)
print 'Addresses in this wallet:'
for i in range(cppWallet.getNumAddr()):
print '\t', hash160_to_addrStr(cppWallet.getAddrByIndex(i).getAddrStr20())
print '\n\nRegistering the wallet with the BlockDataManager & loading...'
cppWallet.registerWallet()
################################################################################
if run_LoadBlockchain_Async:
"""
By setting blocking=False, most calls to TheBDM will return immediately,
after queuing the BDM to execute the operation in the background. You have
to check back later to see when it's done. However, even when blocking is
false, any functions that return data must block so the data can be
returned. If you are in asynchronous mode, and don't want to ever wait
for anything, always check TheBDM.getState()==BDM_BLOCKCHAIN_READY before
requesting data that will force blocking.
"""
start = RightNow()
TheBDM.setBlocking(False)
TheBDM.setOnlineMode(True)
sleep(2)
print 'Waiting for blockchain loading to finish',
while not TheBDM.getState()==BDM_BLOCKCHAIN_READY:
print '.',
sys.stdout.flush()
sleep(2)
print 'Loading blockchain took %0.1f sec' % (RightNow() - start)
topBlock = TheBDM.getTopBlockHeight()
print '\n\nCurrent Top Block is:', topBlock
TheBDM.blockchain().top().pprint()
################################################################################
if run_LoadBlockchain_Block:
start = RightNow()
TheBDM.setBlocking(True)
TheBDM.setOnlineMode(True)
# The setOnlineMode should block until blockchain loading is complete
print 'Loading blockchain took %0.1f sec' % (RightNow() - start)
topBlock = TheBDM.getTopBlockHeight()
print '\n\nCurrent Top Block is:', topBlock
TheBDM.blockchain().top().pprint()
################################################################################
if run_WalletRescan:
print 'Inducing a rescan by adding a new address and requesting...'
cppWallet.addAddress_1_( hex_to_binary('0cdcd0f388a31b11ff11b1d8d7a9f978b37bc7af') )
TheBDM.scanBlockchainForTx(cppWallet)
print '\n\nBalance of this wallet:', coin2str(cppWallet.getSpendableBalance())
print 'Unspent outputs:'
unspentTxOuts = cppWallet.getSpendableTxOutList() #IGNOREZC isnt defined in this script
for utxo in unspentTxOuts:
utxo.pprintOneLine(topBlock)
print '\n\nTransaction history of this wallet:'
ledger = cppWallet.getTxLedger()
for le in ledger:
le.pprintOneLine()
################################################################################
if run_DiffChangeList:
print '\n\n'
print '-'*80
print 'Now for something completely different...'
start = RightNow()
print '\n\nCollect all difficulty changes...'
print 'Block'.rjust(10), 'Difficulty'.rjust(14), '\t', 'Date'
prevDiff = 0
maxDiff = hex_to_int('ff'*32)
minDiff = maxDiff
minDiffBlk = hex_to_int('ff'*32)
for h in xrange(0,topBlock+1):
header = TheBDM.blockchain().getHeaderByHeight(h)
currDiff = header.getDifficulty()
thisHash = header.getThisHash()
thisDiff = binary_to_int(thisHash);
if thisDiff < minDiff:
minDiff = thisDiff
minDiffBlk = h
if not prevDiff==currDiff:
print str(h).rjust(10),
print ('%0.1f'%currDiff).rjust(14),
print '\t', unixTimeToFormatStr(header.getTimestamp())
prevDiff = currDiff
from math import log
print 'Took %0.1f seconds to collect difficulty list' % (RightNow()-start)
print '\nBlock with the lowest difficulty:'
print ' Block Num: ', minDiffBlk
print ' Block Hash: ', int_to_hex(minDiff, 32, BIGENDIAN)
print ' Equiv Difficult:', maxDiff/(minDiff * 2**32)
print ' Equiv Diff bits:', log(maxDiff/minDiff)/log(2)
print ' Block Header (hex): '
print ' ', binary_to_hex(TheBDM.blockchain().getHeaderByHeight(minDiffBlk).serialize())
################################################################################
if run_CumulativeSize:
f = open('blksizelist.txt','w')
cumul = 0
for h in xrange(0,topBlock+1):
if h%10000 == 0:
print '\tAccumulated %d blocks' % h
header = TheBDM.blockchain().getHeaderByHeight(h)
cumul += header.getBlockSize()
if (h%2016==0) or h+1>=topBlock:
f.write('%d %d\n' % (h,cumul))
f.close()
################################################################################
if run_UniqueAddresses:
start = RightNow()
allAddr = set()
totalTxOutEver = 0
for h in xrange(0,topBlock+1):
if h%10000 == 0:
print '\tScanned %d blocks' % h
header = TheBDM.blockchain().getHeaderByHeight(h)
txList = header.getTxRefPtrList()
for txref in txList:
tx = txref.getTxCopy()
for nout in range(tx.getNumTxOut()):
txout = tx.getTxOut(nout)
if txout.isStandard():
allAddr.add(txout.getRecipientAddr())
totalTxOutEver += 1
print 'Took %0.1f seconds to count all addresses' % (RightNow()-start)
print 'There are %d unique addresses in the blockchain!' % len(allAddr)
print 'There are %d standard TxOuts in all blocks' % totalTxOutEver
################################################################################
if run_TrafficCamera:
# will fill this in later
pass
################################################################################
if run_SatoshiDice:
print '\n\nLet look at all the bets ever placed at SatoshiDice.com'
# First, get the Satoshi dice page so we can extract the addresses and payouts
import urllib
httppage = urllib.urlopen('http://www.satoshidice.com').read().split('\n')
# Given this line is part of the wager/addr table, get all the data from it
def extractLineData(line):
line2 = line.replace('<','~').replace('>','~').replace('=','~').replace(' ','~')
pcs = line2.split('~')
out = []
ltflag = False
for pc in pcs:
if pc=='lessthan':
ltflag = True
if ltflag and pc.isdigit():
out.append(pc)
ltflag = False
if pc.startswith('1dice') or pc.endswith('%') or pc.endswith('x'):
out.append(pc)
return out
# We have a webpage and a method to process the relevant lines
diceTargetMap = {}
dicePctWinMap = {}
diceWinMultMap = {}
diceLoseMultMap = {}
diceBetsMadeMap = {}
diceBetsPaidOut = {}
diceBetsMadeMapList = {}
WIN, LOSE, REFUND = 0,1,2
for line in httppage:
if 'lessthan' in line and '1dice' in line:
targ,addr,winr,mult,hous,rtrn = extractLineData(line)
diceAddr = addrStr_to_hash160(addr)[1]
diceTargetMap[diceAddr] = int(targ)
dicePctWinMap[diceAddr] = float(winr[:-1])/100.0
diceWinMultMap[diceAddr] = float(mult[:-1])
diceLoseMultMap[diceAddr] = 0.005 # looks to be a static 0.5% now, spread is all in the win mult
diceBetsMadeMap[diceAddr] = 0
diceBetsPaidOut[diceAddr] = [0, 0, 0]
diceBetsMadeMapList[diceAddr] = []
betsIn = {}
sdRecvAmt = 0
sdRtrnAmt = 0
sdFeePaid = 0
totalBets = 0
def getTxFee(tx):
btcIn, btcOut = 0,0
for i in range(tx.getNumTxIn()):
btcIn += TheBDM.getSentValue(tx.getTxIn(i))
for i in range(tx.getNumTxOut()):
btcOut += tx.getTxOut(i).getValue()
return (btcIn - btcOut)
# Approximation of a bet's variance isn't good enough for me. Assume fair
# odds, compute exactly! These are the stats for SatoshiDice.com bank acct
def computeWagerStats(amt, diceAddr):
# SD loses money on winning bets, gains money on losing bets
#afterFee = amt - 0.0005e8
#winAmt = afterFee - diceWinMultMap[diceAddr]*amt
#winPct = diceTargetMap[diceAddr] / 65536.0;
#losAmt = afterFee - diceLoseMultMap[diceAddr]*amt
#losPct = 1-winPct
# Modified calculation to produce theoretical numbers assuming better
# game design
payout = 0.97
afterFee = amt
winAmt = afterFee - payout*amt
winPct = diceTargetMap[diceAddr] / 65536.0;
losAmt = afterFee - ((1-payout)/2)*amt
losPct = 1-winPct
avg = winPct*winAmt + losPct*losAmt
var = (winPct*(winAmt-avg)**2) + (losPct*(losAmt-avg)**2)
#print amt, diceTargetMap[diceAddr], diceWinMultMap[diceAddr], diceLoseMultMap[diceAddr]
#print winAmt, winPct, losAmt, losPct
#print avg, var, sqrt(var)
#print coin2str(avg), coin2str(var), coin2str(sqrt(var))
#print '\n'
return [avg, var]
completedIn = 0.0
completedOut = 0.0
totalAvgSum = 0.0
totalVarSum = 0.0
firstSDTxPassed = False
totalSDBytes = 0
totalBCBytes = 0
totalSDTx = 0
totalBCTx = 0
fileAllBets = open('sdAllBets.txt','w')
try:
for h in xrange(175000,topBlock+1):
if h%10000 == 0:
print '\tSearched %d blocks' % h
header = TheBDM.blockchain().getHeaderByHeight(h)
txList = header.getTxRefPtrList()
for txref in txList:
tx = txref.getTxCopy()
# Check every TxOut in this transaction for SatoshiDice bets
txHash = tx.getThisHash()
if firstSDTxPassed:
totalBCBytes += tx.getSize()
totalBCTx += 1
thisIsAWager = False
for nout in range(tx.getNumTxOut()):
txout = tx.getTxOut(nout)
if txout.isStandard():
if dicePctWinMap.has_key(txout.getRecipientAddr()):
# This is a SatoshiDice bet!
firstSDTxPassed = True
# Add this to the total tx/byte count, first time
if not thisIsAWager:
totalSDBytes += tx.getSize()
totalSDTx += 1
thisIsAWager = True
totalBets += 1
diceAddr = txout.getRecipientAddr()
betAmt = txout.getValue()
betWin = betAmt * diceWinMultMap[diceAddr]
betLos = betAmt * diceLoseMultMap[diceAddr]
firstTxIn = tx.getTxIn(0)
bettorAddr = TheBDM.getSenderAddr20(firstTxIn)
## Create the serialized OutPoint, store the tx
outpointStr = txHash + int_to_binary(nout, widthBytes=4)
betsIn[outpointStr] = [betAmt, betWin, betLos, diceAddr, bettorAddr]
sdRecvAmt += betAmt
diceBetsMadeMap[diceAddr] += 1
winPct = diceTargetMap[diceAddr] / 65536.0;
losPct = 1-winPct
winMult = diceWinMultMap[diceAddr]
losMult = diceLoseMultMap[diceAddr]
fileAllBets.write('%s %d %f %f %f %f\n' % (coin2str(betAmt), diceTargetMap[diceAddr], winPct, winMult, losPct, losMult))
for nin in range(tx.getNumTxIn()):
txin = tx.getTxIn(nin)
op = txin.getOutPoint()
opStr = op.getTxHash() + int_to_binary(op.getTxOutIndex(), widthBytes=4)
returned = -1
if betsIn.has_key(opStr):
betAmt, betWin, betLos, diceAddr, addr160 = betsIn[opStr]
for nout in range(tx.getNumTxOut()):
if addr160 == tx.getTxOut(nout).getRecipientAddr():
returned = tx.getTxOut(nout).getValue()
sdRtrnAmt += returned
sdFeePaid += getTxFee(tx)
completedIn += betAmt
completedOut += returned
avg, var = computeWagerStats(betAmt, diceAddr)
totalAvgSum += avg
totalVarSum += var
diceBetsMadeMapList[diceAddr].append(betAmt)
totalSDBytes += tx.getSize()
totalSDTx += 1
break
if returned==-1:
print 'Did not find recip, failed...'
continue
else:
if returned <= betLos*1.25:
diceBetsPaidOut[diceAddr][LOSE] += 1
elif abs(returned - betAmt) < betLos/2.0:
diceBetsPaidOut[diceAddr][REFUND] += 1
else:
diceBetsPaidOut[diceAddr][WIN] += 1
del betsIn[opStr]
break
except:
raise
print 'Unaccounted-for Bets:'
i = 0
unacctBTC = 0
for key,val in betsIn.iteritems():
txid = binary_to_hex(key[:32 ])
outidx = binary_to_int(key[ 32:])
betAmt = val[0]
sdAddr = val[3]
recip1 = val[4]
#print i, hex_switchEndian(txid), '%03d'%outidx, coin2str(betAmt),
#print hash160_to_addrStr(sdAddr)[:8], hash160_to_addrStr(recip1)[:8]
i += 1
unacctBTC += betAmt
print 'Results:', unixTimeToFormatStr(RightNow())
print ''
print 'Address'.rjust(10),
print 'Target'.rjust(8),
print 'Should Win'.rjust(12), '|',
print '#Bets'.rjust(8), '|',
print 'Win'.center(16), '|',
print 'Lose'.center(16), '|',
print 'Refunds'.center(17), '|',
print 'Accounted-for'
print '-'*118
totalBets = 0
diceAddrList = []
for a160,ct in diceBetsMadeMap.iteritems():
diceAddrList.append([a160, diceTargetMap[a160]])
diceAddrList.sort(key=(lambda x: x[1]))
for a160,targ in diceAddrList:
total = diceBetsMadeMap[a160]
winners = diceBetsPaidOut[a160][WIN]
losers = diceBetsPaidOut[a160][LOSE]
refunds = diceBetsPaidOut[a160][REFUND]
total2 = winners+losers
print hash160_to_addrStr(a160)[:9].rjust(10),
print str(targ).rjust(8),
print ('%0.5f' % (targ/65536.)).rjust(12),
print '|', str(total).rjust(8),
print '|', str(winners).rjust(6), ('(%0.5f)'%(winners/float(total2))).rjust(8),
print '|', str(losers).rjust(6), ('(%0.5f)'%(losers/float(total2))).rjust(8),
print '|', str(refunds).rjust(6), ('(%0.5f)'%(refunds/float(total2))).rjust(8),
print '|', '(%0.3f)'.rjust(10) % ((winners+losers+refunds)/float(total))
totalBets += total
print '-'*118
print ' '*32, '|', str(totalBets).rjust(8), '|'
print ''
print '-'*118
print 'Total Bets Made: ', totalBets
print 'Cumulative Wagers: ', coin2str(sdRecvAmt), 'BTC'
print 'Cumulative Rewards: ', coin2str(sdRtrnAmt), 'BTC'
print 'Cumulative Fees Paid: ', coin2str(sdFeePaid), 'BTC'
print 'Cumulative Unreturned: ', coin2str(unacctBTC), 'BTC'
print '----'
print 'SD Profit/Loss From Games: ', coin2str(sdRecvAmt - sdRtrnAmt), 'BTC'
print 'SD Profit/Loss With Fees: ', coin2str(sdRecvAmt - (sdRtrnAmt + sdFeePaid)), 'BTC'
#f = open('bethist.txt','w')
#for a160,targ in diceAddrList:
#f.write(str(targ)+'\n')
#f.write(' '.join([coin2str(b) for b in diceBetsMadeMapList[a160]]))
#f.write('\n')
#f.close()
BtoMB = lambda x: float(x)/(1024*1024.)
print 'Since Satoshi Dice started, there have been:'
print 'Blockchain Tx: %d : SatoshiDice Tx: %d (%0.1f%%)' % (totalBCTx, totalSDTx, 100*float(totalSDTx)/totalBCTx)
print 'Blockchain MB: %0.1f : SatoshiDice Tx: %0.1f (%0.1f%%)' % (BtoMB(totalBCBytes), BtoMB(totalSDBytes), 100*float(totalSDBytes)/totalBCBytes)
|
plugin.video.mrknow/lib/utils/javascriptUtils.py | mrknow/filmkodi | 105 | 12615497 | <filename>plugin.video.mrknow/lib/utils/javascriptUtils.py
# -*- coding: utf-8 -*-
import re
import urllib
import base64
import unpackstd
import unpack95High
from string import join
import traceback, sys
class JsFunctions:
def hp_d01(self, s):
ar=[]
os=""
for i in range(0,len(s)-1):
c = ord(s[i])
if c < 128:
c = c^2
os += chr(c)
if len(os) > 80:
ar.append(os)
os = ""
o = join(ar,'') + os
return o
def o61a2a8f(self, s):
r = "";
tmp = s.split("18267506");
s = urllib.unquote(tmp[0]);
k = urllib.unquote(tmp[1] + "511382");
for i in range(0,len(s)-1):
r += chr((int(k[i%len(k)])^ord(s[i]))+1);
return r;
def n98c4d2c(self, s):
txtArr = s.split('18234663')
s = urllib.unquote(txtArr[0])
t = urllib.unquote(txtArr[1] + '549351')
tmp=''
for i in range(0,len(s)-1):
tmp += chr((int(t[i%len(t)])^ord(s[i]))+-6)
return urllib.unquote(tmp)
def RrRrRrRr(self, teaabb):
tttmmm=""
l=len(teaabb)
www = hhhhffff = int(round(l/2))
if l<2*www:
hhhhffff -= 1
for i in range(0,hhhhffff-1):
tttmmm = tttmmm + teaabb[i] + teaabb[i+hhhhffff]
if l<2*www :
tttmmm = tttmmm + teaabb[l-1]
return tttmmm
def ew_dc(self, s):
d=''
a=[]
for i in range(0, len(s)-1):
c = ord(s[i])
if (c<128):
c = c^5
d += chr(c)
if (i+1) % 99 == 0:
a.append(d)
d=''
r = join(a,'') + d
return r
def pbbfa0(self, s):
r = ""
tmp = s.split("17753326")
s = urllib.unquote(tmp[0])
k = urllib.unquote(tmp[1] + "527117")
for i in range(0,len(s)):
r += chr((int(k[i%len(k)])^ord(s[i]))+7)
return r
class JsUnpacker:
def unpackAll(self, data):
try:
in_data=data
sPattern = '(eval\\(function\\(p,a,c,k,e,d.*)'
enc_data=re.compile(sPattern).findall(in_data)
if len(enc_data)==0:
sPattern = '(eval\\(function\\(p,a,c,k,e,r.*)'
enc_data=re.compile(sPattern).findall(in_data)
for enc_val in enc_data:
unpack_val=unpackstd.unpack(enc_val)
in_data=in_data.replace(enc_val,unpack_val)
in_data=in_data.replace('\\\'','\'')
return in_data
except:
traceback.print_exc(file=sys.stdout)
return data.replace(enc_val,'')
def containsPacked(self, data):
return 'p,a,c,k,e,d' in data or 'p,a,c,k,e,r' in data
class JsUnpackerV2:
def unpackAll(self, data):
try:
in_data=data
sPattern = '(eval\\(function\\(p,a,c,k,e,d.*)'
enc_data=re.compile(sPattern).findall(in_data)
#print 'enc_data',enc_data, len(enc_data)
if len(enc_data)==0:
sPattern = '(eval\\(function\\(p,a,c,k,e,r.*)'
enc_data=re.compile(sPattern).findall(in_data)
#print 'enc_data packer...',enc_data
for enc_val in enc_data:
unpack_val=self.unpack(enc_val)
in_data=in_data.replace(enc_val,unpack_val)
return in_data
except:
traceback.print_exc(file=sys.stdout)
return data
def containsPacked(self, data):
return 'String.fromCharCode(c+29)' in data
def unpack(self,sJavascript,iteration=1, totaliterations=1 ):
aSplit = sJavascript.split("rn p}('")
p1,a1,c1,k1=('','0','0','')
ss="p1,a1,c1,k1=(\'"+aSplit[1].split(".spli")[0]+')'
exec(ss)
k1=k1.split('|')
aSplit = aSplit[1].split("))'")
e = ''
d = ''#32823
sUnpacked1 = str(self.__unpack(p1, a1, c1, k1, e, d,iteration))
if iteration>=totaliterations:
return sUnpacked1
else:
return self.unpack(sUnpacked1,iteration+1)
def __unpack(self,p, a, c, k, e, d, iteration,v=1):
while (c >= 1):
c = c -1
if (k[c]):
aa=str(self.__itoaNew(c, a))
p=re.sub('\\b' + aa +'\\b', k[c], p)# THIS IS Bloody slow!
return p
def __itoa(self,num, radix):
result = ""
if num==0: return '0'
while num > 0:
result = "0123456789abcdefghijklmnopqrstuvwxyz"[num % radix] + result
num /= radix
return result
def __itoaNew(self,cc, a):
aa="" if cc < a else self.__itoaNew(int(cc / a),a)
cc = (cc % a)
bb=chr(cc + 29) if cc> 35 else str(self.__itoa(cc,36))
return aa+bb
class JsUnpacker95High:
def unpackAll(self, data):
try:
in_data=data
sPattern = '(eval\\(function\\(p,a,c,k,e,d.*)'
enc_data=re.compile(sPattern).findall(in_data)
if len(enc_data)==0:
sPattern = '(eval\\(function\\(p,a,c,k,e,r.*)'
enc_data=re.compile(sPattern).findall(in_data)
for enc_val in enc_data:
unpack_val=unpack95High.unpack(enc_val)
in_data=in_data.replace(enc_val,unpack_val)
in_data=in_data.replace('\\\'','\'')
return in_data
except:
traceback.print_exc(file=sys.stdout)
return data.replace(enc_val,'')
def containsPacked(self, data):
return r'[\xa1-\xff]' in data
class JsUnIonCube:
def ionX(self, x, arrayX):
r = []
s = 0
w = 0
for d in x:
w |= int(arrayX[ord(d)-48]) << s
if (s):
r.append(chr(165 ^ w & 255))
w >>= 8
s -= 2
else:
s = 6
r = ''.join(r)
return r
def unIonALL(self,data):
in_data=data
sPattern = 'c="(.*?)";eval\\(unescape\\(".*"\\)\\);x\\("(.*?)"\\)'
undc_data=re.compile(sPattern).findall(in_data)
c = undc_data[0][0]
x = undc_data[0][1]
l = list(c)
for i in range(0, len(c), 3):
l[i]='%'
c = ''.join(l)
c = urllib.unquote_plus(c)
arrayPattern = 't=Array\\((.*?)\\)'
arrayData = re.compile(arrayPattern).findall(c)
ionArray = arrayData[0].split(',')
data=self.ionX(x,ionArray)
return data
def containsIon(self,data):
return 'eval(unescape("d="";' in data
class JsUnwiser:
def unwiseAll(self, data):
try:
in_data=data
sPattern = 'eval\\(function\\(w,i,s,e\\).*?}\\((.*?)\\)'
wise_data=re.compile(sPattern).findall(in_data)
for wise_val in wise_data:
unpack_val=self.unwise(wise_val)
#print '\nunpack_val',unpack_val
in_data=in_data.replace(wise_val,unpack_val)
return re.sub("eval\(function\(w,i,s,e\).*?join\(''\);}", "", in_data, count=1, flags=re.DOTALL)
except:
traceback.print_exc(file=sys.stdout)
return data
def containsWise(self, data):
return 'w,i,s,e' in data
def unwise(self, sJavascript):
#print 'sJavascript',sJavascript
page_value=""
try:
ss="w,i,s,e=("+sJavascript+')'
exec (ss)
page_value=self.__unpack(w,i,s,e)
except: traceback.print_exc(file=sys.stdout)
return page_value
def __unpack( self,w, i, s, e):
lIll = 0;
ll1I = 0;
Il1l = 0;
ll1l = [];
l1lI = [];
while True:
if (lIll < 5):
l1lI.append(w[lIll])
elif (lIll < len(w)):
ll1l.append(w[lIll]);
lIll+=1;
if (ll1I < 5):
l1lI.append(i[ll1I])
elif (ll1I < len(i)):
ll1l.append(i[ll1I])
ll1I+=1;
if (Il1l < 5):
l1lI.append(s[Il1l])
elif (Il1l < len(s)):
ll1l.append(s[Il1l]);
Il1l+=1;
if (len(w) + len(i) + len(s) + len(e) == len(ll1l) + len(l1lI) + len(e)):
break;
lI1l = ''.join(ll1l)#.join('');
I1lI = ''.join(l1lI)#.join('');
ll1I = 0;
l1ll = [];
for lIll in range(0,len(ll1l),2):
#print 'array i',lIll,len(ll1l)
ll11 = -1;
if ( ord(I1lI[ll1I]) % 2):
ll11 = 1;
#print 'val is ', lI1l[lIll: lIll+2]
l1ll.append(chr( int(lI1l[lIll: lIll+2], 36) - ll11));
ll1I+=1;
if (ll1I >= len(l1lI)):
ll1I = 0;
ret=''.join(l1ll)
if 'eval(function(w,i,s,e)' in ret:
ret=re.compile('eval\(function\(w,i,s,e\).*}\((.*?)\)').findall(ret)[0]
return self.unwise(ret)
else:
return ret
class JsUnFunc:
def unFuncALL(self,data):
in_data = data
dec_data = ''
sPattern = r"var\s*tmp\s*=\s*s.split\(\"([^\"]+)\"\)"
kPattern = r"unescape\(tmp\[1\]\s*\+\s*\"([^\"]+)\"\)"
dataPattern = r"document.write\(\w+\(\'\'\)\s*\+\s*\'([^\']+)"
modPattern = r"charCodeAt\(i\)\)\+\s*([^\)]+)\)"
s_data = re.compile(sPattern).findall(in_data)
k_data = re.compile(kPattern).findall(in_data)
undc_data = re.compile(dataPattern).findall(in_data)
mod_data = re.compile(modPattern).findall(in_data)
sDelimiter = s_data[0]
s = undc_data[0]
tmp = urllib.unquote(s).split(sDelimiter)
k = tmp[1] + k_data[0]
mod = int(mod_data[0])
encData = tmp[0]
for i,d in enumerate(encData):
dec_data += chr((int(k[i % len(k)]) ^ ord(d)) + mod)
data = re.sub("eval\(unescape\('function.*?unescape\(''\)\);'\)\);", dec_data, in_data, count=1, flags=re.DOTALL)
return data
def cointainUnFunc(self,data):
return 'String.fromCharCode((parseInt' in data
class JsUnPP:
def UnPPAll(self,data):
def removeNonAscii(s): return "".join(i for i in s if ord(i)<128)
in_data = data
tPattern = r"var\s*t=['\"](\w+)['\"]\s*;\s*for"
t_data = re.compile(tPattern).findall(in_data)
for i in t_data:
out_data = removeNonAscii(str(base64.b16decode(i.upper())))
data = re.sub(r"var\s*t=\"[^}]+}", out_data, data, count=1)
return data
def containUnPP(self,data):
return 'parseInt(t.substr' in data
class JsUnPush:
def UnPush(self,data):
in_data = data
varPattern = '(var\s*\w+\s*=\s*new.*?\.push\(\'\'\);)'
var_data = re.compile(varPattern).findall(in_data)
charPattern = '\(\'([%0-9a-fA-F])\'\)'
chars = re.compile(charPattern).findall(var_data[0])
res = urllib.unquote(''.join(chars))
out_data=in_data.replace(var_data[0],res)
return out_data
def containUnPush(self,data):
return '.push(\'%\')' in data
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.