filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_0_6800 | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from msrest.serialization import Model
class TeamSettingsDaysOffPatch(Model):
"""TeamSettingsDaysOffPatch.
:param days_off:
:type days_off: list of :class:`DateRange <work.v4_0.models.DateRange>`
"""
_attribute_map = {
'days_off': {'key': 'daysOff', 'type': '[DateRange]'}
}
def __init__(self, days_off=None):
super(TeamSettingsDaysOffPatch, self).__init__()
self.days_off = days_off
|
the-stack_0_6802 | import elasticsearch
import datetime
node = 'Elasticsearch:80'
#node = '54.186.33.136:9200'
es = elasticsearch.Elasticsearch(node)
entry_mapping = {
'entry-type': {
'properties': {
'id': {'type': 'string'},
'created': {'type': 'date'},
'title': {'type': 'string'},
'tags': {'type': 'string', 'analyzer': 'keyword'},
'content': {'type': 'string'}
}
}
}
es.index(
index='test-index',
doc_type='test_type',
id='test_id',
body={
'title': 'test_title',
'content': 'This is the content',
},
op_type='create'
)
|
the-stack_0_6803 | import numpy as np
import torch
from .primitives import fexp, cuboid_inside_outside_function, \
inside_outside_function, points_to_cuboid_distances, \
transform_to_primitives_centric_system, deform, sq_volumes
from .regularizers import get as get_regularizer
def sampling_from_parametric_space_to_equivalent_points(
shape_params,
epsilons,
sq_sampler
):
"""
Given the sampling steps in the parametric space, we want to ge the actual
3D points.
Arguments:
----------
shape_params: Tensor with size BxMx3, containing the shape along each
axis for the M primitives
epsilons: Tensor with size BxMx2, containing the shape along the
latitude and the longitude for the M primitives
Returns:
---------
P: Tensor of size BxMxSx3 that contains S sampled points from the
surface of each primitive
N: Tensor of size BxMxSx3 that contains the normals of the S sampled
points from the surface of each primitive
"""
# Allocate memory to store the sampling steps
B = shape_params.shape[0] # batch size
M = shape_params.shape[1] # number of primitives
S = sq_sampler.n_samples
etas, omegas = sq_sampler.sample_on_batch(
shape_params.detach().cpu().numpy(),
epsilons.detach().cpu().numpy()
)
# Make sure we don't get nan for gradients
etas[etas == 0] += 1e-6
omegas[omegas == 0] += 1e-6
# Move to tensors
etas = shape_params.new_tensor(etas)
omegas = shape_params.new_tensor(omegas)
# Make sure that all tensors have the right shape
a1 = shape_params[:, :, 0].unsqueeze(-1) # size BxMx1
a2 = shape_params[:, :, 1].unsqueeze(-1) # size BxMx1
a3 = shape_params[:, :, 2].unsqueeze(-1) # size BxMx1
e1 = epsilons[:, :, 0].unsqueeze(-1) # size BxMx1
e2 = epsilons[:, :, 1].unsqueeze(-1) # size BxMx1
x = a1 * fexp(torch.cos(etas), e1) * fexp(torch.cos(omegas), e2)
y = a2 * fexp(torch.cos(etas), e1) * fexp(torch.sin(omegas), e2)
z = a3 * fexp(torch.sin(etas), e1)
# Make sure we don't get INFs
# x[torch.abs(x) <= 1e-9] = 1e-9
# y[torch.abs(y) <= 1e-9] = 1e-9
# z[torch.abs(z) <= 1e-9] = 1e-9
x = ((x > 0).float() * 2 - 1) * torch.max(torch.abs(x), x.new_tensor(1e-6))
y = ((y > 0).float() * 2 - 1) * torch.max(torch.abs(y), x.new_tensor(1e-6))
z = ((z > 0).float() * 2 - 1) * torch.max(torch.abs(z), x.new_tensor(1e-6))
# Compute the normals of the SQs
nx = (torch.cos(etas)**2) * (torch.cos(omegas)**2) / x
ny = (torch.cos(etas)**2) * (torch.sin(omegas)**2) / y
nz = (torch.sin(etas)**2) / z
return torch.stack([x, y, z], -1), torch.stack([nx, ny, nz], -1)
def sample_uniformly_from_cubes_surface(shape_params, epsilons, sampler):
"""
Given the sampling steps in the parametric space, we want to ge the actual
3D points on the surface of the cube.
Arguments:
----------
shape_params: Tensor with size BxMx3, containing the shape along each
axis for the M primitives
Returns:
---------
P: Tensor of size BxMxSx3 that contains S sampled points from the
surface of each primitive
"""
# TODO: Make sure that this is the proper way to do this!
# Check the device of the angles and move all the tensors to that device
device = shape_params.device
# Allocate memory to store the sampling steps
B = shape_params.shape[0] # batch size
M = shape_params.shape[1] # number of primitives
S = sampler.n_samples
N = S/6
X_SQ = torch.zeros(B, M, S, 3).to(device)
for b in range(B):
for m in range(M):
x_max = shape_params[b, m, 0]
y_max = shape_params[b, m, 1]
z_max = shape_params[b, m, 2]
x_min = -x_max
y_min = -y_max
z_min = -z_max
X_SQ[b, m] = torch.stack([
torch.stack([
torch.ones((N, 1)).to(device)*x_min,
torch.rand(N, 1).to(device)*(y_max-y_min) + y_min,
torch.rand(N, 1).to(device)*(z_max-z_min) + z_min
], dim=-1).squeeze(),
torch.stack([
torch.ones((N, 1)).to(device)*x_max,
torch.rand(N, 1).to(device)*(y_max-y_min) + y_min,
torch.rand(N, 1).to(device)*(z_max-z_min) + z_min
], dim=-1).squeeze(),
torch.stack([
torch.rand(N, 1).to(device)*(x_max-x_min) + x_min,
torch.ones((N, 1)).to(device)*y_min,
torch.rand(N, 1).to(device)*(z_max-z_min) + z_min
], dim=-1).squeeze(),
torch.stack([
torch.rand(N, 1).to(device)*(x_max-x_min) + x_min,
torch.ones((N, 1)).to(device)*y_max,
torch.rand(N, 1).to(device)*(z_max-z_min) + z_min
], dim=-1).squeeze(),
torch.stack([
torch.rand(N, 1).to(device)*(x_max-x_min) + x_min,
torch.rand(N, 1).to(device)*(y_max-y_min) + y_min,
torch.ones((N, 1)).to(device)*z_min,
], dim=-1).squeeze(),
torch.stack([
torch.rand(N, 1).to(device)*(x_max-x_min) + x_min,
torch.rand(N, 1).to(device)*(y_max-y_min) + y_min,
torch.ones((N, 1)).to(device)*z_max,
], dim=-1).squeeze()
]).view(-1, 3)
normals = X_SQ.new_zeros(X_SQ.shape)
normals[:, :, 0*N:1*N, 0] = -1
normals[:, :, 1*N:2*N, 0] = 1
normals[:, :, 2*N:3*N, 1] = -1
normals[:, :, 3*N:4*N, 1] = 1
normals[:, :, 4*N:5*N, 2] = -1
normals[:, :, 5*N:6*N, 2] = 1
# make sure that X_SQ has the expected shape
assert X_SQ.shape == (B, M, S, 3)
return X_SQ, normals
def euclidean_dual_loss_pair(
y_hat_from,
y_target_from,
y_hat_to,
y_target_to,
regularizer_terms,
sampler,
options):
"""
Arguments:
----------
y_hat: List of Tensors containing the predictions of the network
y_target: Tensor with size BxNx6 with the N points from the target
object and their corresponding normals
regularizer_terms: dictionary with the various regularizers, on the
volume of the primitives, the Bernoullis etc.
sampler: An object of either CuboidSampler or EqualDistanceSampler
depending on the type of the primitive we are using
options: A dictionary with various options
Returns:
--------
the loss
"""
loss_from, debug_stats_from = euclidean_dual_loss(y_hat_from,
y_target_from,
regularizer_terms,
sampler, options)
loss_to, debug_stats_to = euclidean_dual_loss(y_hat_to,
y_target_to,
regularizer_terms,
sampler, options)
# y_hat_from y_hat_to contain all of the parameters needed
# to do regularization
loss = loss_from + loss_to
debug_stats = {}
for key in debug_stats_from:
assert isinstance(key, str)
debug_stats[key + "_from"] = debug_stats_from[key]
for key in debug_stats_to:
assert isinstance(key, str)
debug_stats[key + "_to"] = debug_stats_to[key]
rotations_L2 = ((y_hat_to.rotations - y_hat_from.rotations)**2).sum(-1).sum(-1)
translation_L2 = ((y_hat_to.translations - y_hat_from.translations)**2).sum(-1).sum(-1)
sizes_L2 = ((y_hat_to.sizes - y_hat_from.sizes)**2).sum(-1).sum(-1)
shapes_L2 = ((y_hat_to.shapes - y_hat_from.shapes)**2).sum(-1).sum(-1)
params_L2 = 0.0005 * (rotations_L2 + translation_L2 + sizes_L2 + shapes_L2)
debug_stats['delta_params_L2'] = params_L2
# y_hat_to - y_hat_from
prob_L1 = 0.005 * torch.abs(y_hat_to.probs - y_hat_from.probs).sum(-1).sum(-1)
debug_stats['delta_prob_L1'] = prob_L1
loss += prob_L1 + params_L2
return loss, debug_stats
def euclidean_dual_loss(
y_hat,
y_target,
regularizer_terms,
sampler,
options
):
"""
Arguments:
----------
y_hat: List of Tensors containing the predictions of the network
y_target: Tensor with size BxNx6 with the N points from the target
object and their corresponding normals
regularizer_terms: dictionary with the various regularizers, on the
volume of the primitives, the Bernoullis etc.
sampler: An object of either CuboidSampler or EqualDistanceSampler
depending on the type of the primitive we are using
options: A dictionary with various options
Returns:
--------
the loss
"""
# If use_cuboids is true then use 3D cuboids as geometric primitives. If
# use_sq is true use SQs as geometric primitives. If none of the above is
# true the default geometric primitive is cuboidal superquadrics, namely
# SQs with \epsilon_1=\epsilon_2=0.25
use_cuboids = options.get("use_cuboids", False)
use_sq = options.get("use_sq", False)
use_chamfer = options.get("use_chamfer", False)
loss_weights = options.get(
"loss_weights",
{"pcl_to_prim_weight": 1.0, "prim_to_pcl_weight": 1.0}
)
gt_normals = y_target[:, :, 3:6]
gt_points = y_target[:, :, :3]
# Make sure that everything has the right shape
assert gt_points.shape[-1] == 3
# Declare some variables
B = gt_points.shape[0] # batch size
N = gt_points.shape[1] # number of points per sample
M = y_hat[0].shape[1] # number of primitives
S = sampler.n_samples # number of points sampled from the SQ
probs = y_hat[0].view(B, M)
translations = y_hat[1].view(B, M, 3)
rotations = y_hat[2].view(B, M, 4)
shapes = y_hat[3].view(B, M, 3)
epsilons = y_hat[4].view(B, M, 2)
tapering_params = y_hat[5].view(B, M, 2)
# Transform the 3D points from world-coordinates to primitive-centric
# coordinates with size BxNxMx3
X_transformed = transform_to_primitives_centric_system(
gt_points,
translations,
rotations
)
# Based on the shape of the primitive, do the sampling either on the
# surface of the SQ or on the surface of the cuboid
if use_cuboids:
sample_points_on_surface = sample_uniformly_from_cubes_surface
else:
sample_points_on_surface =\
sampling_from_parametric_space_to_equivalent_points
# Get the coordinates of the sampled points on the surfaces of the SQs,
# with size BxMxSx3
X_SQ, normals = sample_points_on_surface(
shapes,
epsilons,
sampler
)
X_SQ = deform(X_SQ, shapes, tapering_params)
# Make the normals unit vectors
normals_norm = normals.norm(dim=-1).view(B, M, S, 1)
normals = normals / normals_norm
# Make sure that everything has the right size
assert X_SQ.shape == (B, M, S, 3)
assert normals.shape == (B, M, S, 3)
assert X_transformed.shape == (B, N, M, 3)
# Make sure that the normals are unit vectors
assert torch.sqrt(torch.sum(normals ** 2, -1)).sum() == B*M*S
# Compute the pairwise Euclidean distances between points sampled on the
# surface of the SQ (X_SQ) with points sampled on the surface of the target
# object (X_transformed)
# In the code we do everything at once, but this comment helps understand
# what we are actually doing
# t = X_transformed.permute(0, 2, 1, 3) # now X_transformed has size
# BxMxNx3
# xx_sq = X_sq.unsqueeze(3) # now xx_sq has size BxMxSx1x3
# t = t.unsqueeze(2) # now t has size BxMx1xNx3
V = (X_SQ.unsqueeze(3) - (X_transformed.permute(0, 2, 1, 3)).unsqueeze(2))
assert V.shape == (B, M, S, N, 3)
# Now we can compute the distances from every point in the surface of the
# SQ to every point on the target object transformed in every
# primitive-based coordinate system
# D = torch.sum((xx_sq - t)**2, -1) # D has size BxMxSxN
# TODO: Should I add the SQRT, now we are computing the squared distances
D = torch.sum((V)**2, -1)
assert D.shape == (B, M, S, N)
pcl_to_prim, inside, debug_stats = pcl_to_prim_loss(
[probs, translations, rotations, shapes, epsilons, tapering_params],
X_transformed,
D,
use_cuboids,
use_sq,
use_chamfer
)
assert inside is None or inside.shape == (B, N, M)
prim_to_pcl = prim_to_pcl_loss(
y_hat,
V,
normals,
inside,
D,
use_chamfer
)
# Compute any regularizer terms
regularizers = get_regularizer_term(
y_hat,
debug_stats["F"],
X_SQ,
regularizer_terms
)
reg_values = get_regularizer_weights(
regularizers,
regularizer_terms
)
debug_stats["regularizer_terms"] = reg_values
debug_stats["pcl_to_prim_loss"] = pcl_to_prim
debug_stats["prim_to_pcl_loss"] = prim_to_pcl
# Sum up the regularization terms
regs = sum(reg_values.values())
w1 = loss_weights["pcl_to_prim_weight"]
w2 = loss_weights["prim_to_pcl_weight"]
return w1 * pcl_to_prim + w2 * prim_to_pcl + regs, debug_stats
def pcl_to_prim_loss(
y_hat,
X_transformed,
D,
use_cuboids=False,
use_sq=False,
use_chamfer=False
):
"""
Arguments:
----------
y_hat: List of Tensors containing the predictions of the network
X_transformed: Tensor with size BxNxMx3 with the N points from the
target object transformed in the M primitive-centric
coordinate systems
D: Tensor of size BxMxSxN that contains the pairwise distances between
points on the surface of the SQ to the points on the target object
use_cuboids: when True use cuboids as geometric primitives
use_sq: when True use superquadrics as geometric primitives
use_chamfer: when True compute the Chamfer distance
"""
# Declare some variables
B = X_transformed.shape[0] # batch size
N = X_transformed.shape[1] # number of points per sample
M = X_transformed.shape[2] # number of primitives
shapes = y_hat[3].view(B, M, 3)
epsilons = y_hat[4].view(B, M, 2)
probs = y_hat[0]
# Get the relative position of points with respect to the SQs using the
# inside-outside function
F = shapes.new_tensor(0)
inside = None
# XXX
# if not use_chamfer: # you should still calculate the F's regardless...
if True:
if use_cuboids:
F = points_to_cuboid_distances(X_transformed, shapes)
inside = F <= 0
elif use_sq:
F = inside_outside_function(
X_transformed,
shapes,
epsilons
)
inside = F <= 1
else:
# If no argument is given (use_sq and use_cuboids) the default
# geometric primitives are cuboidal superquadrics, namely
# with \epsilon_1=\epsilon_2=0.25
F = cuboid_inside_outside_function(
X_transformed,
shapes,
epsilon=0.25
)
inside = F <= 1
D = torch.min(D, 2)[0].permute(0, 2, 1) # size BxNxM
assert D.shape == (B, N, M)
if not use_chamfer:
D[inside] = 0.0
distances, idxs = torch.sort(D, dim=-1)
# Start by computing the cumulative product
# Sort based on the indices
probs = torch.cat([
probs[i].take(idxs[i]).unsqueeze(0) for i in range(len(idxs))
])
neg_cumprod = torch.cumprod(1-probs, dim=-1)
neg_cumprod = torch.cat(
[neg_cumprod.new_ones((B, N, 1)), neg_cumprod[:, :, :-1]],
dim=-1
)
# minprob[i, j, k] is the probability that for sample i and point j the
# k-th primitive has the minimum loss
minprob = probs.mul(neg_cumprod)
loss = torch.einsum("ijk,ijk->", [distances, minprob])
loss = loss / B / N
# Return some debug statistics
debug_stats = {}
debug_stats["F"] = F
debug_stats["distances"] = distances
debug_stats["minprob"] = minprob
debug_stats["neg_cumprod"] = neg_cumprod
return loss, inside, debug_stats
def prim_to_pcl_loss(
y_hat,
V,
normals,
inside,
D,
use_chamfer=False
):
"""
Arguments:
----------
y_hat: List of Tensors containing the predictions of the network
V: Tensor with size BxMxSxN3 with the vectors from the points on SQs to
the points on the target's object surface.
normals: Tensor with size BxMxSx3 with the normals at every sampled
points on the surfaces of the M primitives
inside: A mask containing 1 if a point is inside the corresponding
shape
D: Tensor of size BxMxSxN that contains the pairwise distances between
points on the surface of the SQ to the points on the target object
"""
B = V.shape[0] # batch size
M = V.shape[1] # number of primitives
S = V.shape[2] # number of points sampled on the SQ
N = V.shape[3] # number of points sampled on the target object
probs = y_hat[0]
assert D.shape == (B, M, S, N)
# We need to compute the distance to the closest point from the target
# object for every point S
# min_D = D.min(-1)[0] # min_D has size BxMxS
if not use_chamfer:
outside = (1-inside).permute(0, 2, 1).unsqueeze(2).float()
assert outside.shape == (B, M, 1, N)
D = D + (outside*1e30)
# Compute the minimum distances D, with size BxMxS
D = D.min(-1)[0]
D[D >= 1e30] = 0.0
assert D.shape == (B, M, S)
# Compute an approximate area of the superellipsoid as if it were an
# ellipsoid
shapes = y_hat[3].view(B, M, 3)
area = 4 * np.pi * (
(shapes[:, :, 0] * shapes[:, :, 1])**1.6 / 3 +
(shapes[:, :, 0] * shapes[:, :, 2])**1.6 / 3 +
(shapes[:, :, 1] * shapes[:, :, 2])**1.6 / 3
)**0.625
area = M * area / area.sum(dim=-1, keepdim=True)
# loss = torch.einsum("ij,ij,ij->", [torch.max(D, -1)[0], probs, volumes])
# loss = torch.einsum("ij,ij,ij->", [torch.mean(D, -1), probs, volumes])
# loss = torch.einsum("ij,ij->", [torch.max(D, -1)[0], probs])
loss = torch.einsum("ij,ij,ij->", [torch.mean(D, -1), probs, area])
loss = loss / B / M
return loss
def get_regularizer_term(
parameters,
F,
X_SQ,
regularizer_terms,
transition_matrix=None
):
regularizers = [
"sparsity_regularizer",
"bernoulli_regularizer",
"entropy_bernoulli_regularizer",
"parsimony_regularizer",
"overlapping_regularizer"
]
if regularizer_terms["regularizer_type"] is None:
regularizer_terms["regularizer_type"] = []
return {
r: get_regularizer(
r if r in regularizer_terms["regularizer_type"] else "",
parameters,
F,
X_SQ,
regularizer_terms
)
for r in regularizers
}
def get_regularizer_weights(regularizers, regularizer_terms):
# Ensures that the expected number of primitives lies between a minimum and
# a maximum number of primitives.
bernoulli_reg = regularizers["bernoulli_regularizer"] *\
regularizer_terms["bernoulli_regularizer_weight"]
# Ensures that the bernoullis will be either 1.0 or 0.0 and not 0.5
entropy_bernoulli_reg = regularizers["entropy_bernoulli_regularizer"] *\
regularizer_terms["entropy_bernoulli_regularizer_weight"]
# Minimizes the expected number of primitives
parsimony_reg = regularizers["parsimony_regularizer"] *\
regularizer_terms["parsimony_regularizer_weight"]
# Ensures that primitves do not intersect with each other using the F
# function
overlapping_reg = regularizers["overlapping_regularizer"] *\
regularizer_terms["overlapping_regularizer_weight"]
# Similar to the bernoulli_regularizer. Again we want to ensure that the
# expected number of primitives will be between a minimum an a maximum
# number of primitives.
sparsity_reg = regularizers["sparsity_regularizer"] *\
regularizer_terms["sparsity_regularizer_weight"]
reg_values = {
"sparsity_regularizer": sparsity_reg,
"overlapping_regularizer": overlapping_reg,
"parsimony_regularizer": parsimony_reg,
"entropy_bernoulli_regularizer": entropy_bernoulli_reg,
"bernoulli_regularizer": bernoulli_reg
}
return reg_values
|
the-stack_0_6806 | """
RESTful platform for notify component.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/notify.rest/
"""
import logging
import requests
import voluptuous as vol
from homeassistant.components.notify import (
ATTR_TARGET, ATTR_TITLE, ATTR_TITLE_DEFAULT, BaseNotificationService,
PLATFORM_SCHEMA)
from homeassistant.const import (CONF_RESOURCE, CONF_METHOD, CONF_NAME)
import homeassistant.helpers.config_validation as cv
CONF_MESSAGE_PARAMETER_NAME = 'message_param_name'
CONF_TARGET_PARAMETER_NAME = 'target_param_name'
CONF_TITLE_PARAMETER_NAME = 'title_param_name'
DEFAULT_MESSAGE_PARAM_NAME = 'message'
DEFAULT_METHOD = 'GET'
DEFAULT_TARGET_PARAM_NAME = None
DEFAULT_TITLE_PARAM_NAME = None
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_RESOURCE): cv.url,
vol.Optional(CONF_MESSAGE_PARAMETER_NAME,
default=DEFAULT_MESSAGE_PARAM_NAME): cv.string,
vol.Optional(CONF_METHOD, default=DEFAULT_METHOD):
vol.In(['POST', 'GET', 'POST_JSON']),
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_TARGET_PARAMETER_NAME,
default=DEFAULT_TARGET_PARAM_NAME): cv.string,
vol.Optional(CONF_TITLE_PARAMETER_NAME,
default=DEFAULT_TITLE_PARAM_NAME): cv.string,
})
_LOGGER = logging.getLogger(__name__)
def get_service(hass, config, discovery_info=None):
"""Get the RESTful notification service."""
resource = config.get(CONF_RESOURCE)
method = config.get(CONF_METHOD)
message_param_name = config.get(CONF_MESSAGE_PARAMETER_NAME)
title_param_name = config.get(CONF_TITLE_PARAMETER_NAME)
target_param_name = config.get(CONF_TARGET_PARAMETER_NAME)
return RestNotificationService(
resource, method, message_param_name, title_param_name,
target_param_name)
class RestNotificationService(BaseNotificationService):
"""Implementation of a notification service for REST."""
def __init__(self, resource, method, message_param_name, title_param_name,
target_param_name):
"""Initialize the service."""
self._resource = resource
self._method = method.upper()
self._message_param_name = message_param_name
self._title_param_name = title_param_name
self._target_param_name = target_param_name
def send_message(self, message="", **kwargs):
"""Send a message to a user."""
data = {
self._message_param_name: message
}
if self._title_param_name is not None:
data[self._title_param_name] = kwargs.get(
ATTR_TITLE, ATTR_TITLE_DEFAULT)
if self._target_param_name is not None and ATTR_TARGET in kwargs:
# Target is a list as of 0.29 and we don't want to break existing
# integrations, so just return the first target in the list.
data[self._target_param_name] = kwargs[ATTR_TARGET][0]
if self._method == 'POST':
response = requests.post(self._resource, data=data, timeout=10)
elif self._method == 'POST_JSON':
response = requests.post(self._resource, json=data, timeout=10)
else: # default GET
response = requests.get(self._resource, params=data, timeout=10)
if response.status_code not in (200, 201):
_LOGGER.exception(
"Error sending message. Response %d: %s:",
response.status_code, response.reason)
|
the-stack_0_6807 | from __future__ import absolute_import, division, print_function
import boost_adaptbx.boost.python as bp
ext = bp.import_ext("iotbx_pdb_hierarchy_ext")
from iotbx_pdb_hierarchy_ext import *
from libtbx.str_utils import show_sorted_by_counts
from libtbx.utils import Sorry, plural_s, null_out
from libtbx import Auto, dict_with_default_0, group_args
from iotbx.pdb import hy36encode, hy36decode, common_residue_names_get_class
from iotbx.pdb.amino_acid_codes import one_letter_given_three_letter
from iotbx.pdb.modified_aa_names import lookup as aa_3_as_1_mod
from iotbx.pdb.modified_rna_dna_names import lookup as na_3_as_1_mod
from iotbx.pdb.utils import all_chain_ids, all_label_asym_ids
import iotbx.cif.model
from cctbx import crystal
from cctbx.array_family import flex
import six
from six.moves import cStringIO as StringIO
from six.moves import range, zip
import collections
import operator
import warnings
import math
import sys
class pickle_import_trigger(object): pass
level_ids = ["model", "chain", "residue_group", "atom_group", "atom"]
def _show_residue_group(rg, out, prefix):
atoms = rg.atoms()
if (atoms.size() == 0):
ch = rg.parent()
if (ch is None): ch = " "
else: ch = "%s" % ch.id
print(prefix+'empty: "%s%s"' % (ch, rg.resid()), file=out)
else:
def show_atom(atom):
print(prefix+'"%s"' % atom.format_atom_record(
replace_floats_with=".*."), file=out)
if (atoms.size() <= 3):
for atom in atoms: show_atom(atom)
else:
show_atom(atoms[0])
print(prefix+'... %d atom%s not shown' % plural_s(
atoms.size()-2), file=out)
show_atom(atoms[-1])
class overall_counts(object):
def __init__(self):
self._errors = None
self._warnings = None
def show(self,
out=None,
prefix="",
flag_errors=True,
flag_warnings=True,
residue_groups_max_show=10,
duplicate_atom_labels_max_show=10):
if (out is None): out = sys.stdout
self._errors = []
self._warnings = []
def add_err(msg):
if (flag_errors): print(prefix+msg, file=out)
self._errors.append(msg.strip())
def add_warn(msg):
if (flag_warnings): print(prefix+msg, file=out)
self._warnings.append(msg.strip())
fmt = "%%%dd" % len(str(self.n_atoms))
print(prefix+"total number of:", file=out)
if (self.n_duplicate_model_ids != 0):
add_err(" ### ERROR: duplicate model ids ###")
if (self.n_empty_models != 0):
add_warn(" ### WARNING: empty model ###")
print(prefix+" models: ", fmt % self.n_models, end='', file=out)
infos = []
if (self.n_duplicate_model_ids != 0):
infos.append("%d with duplicate model id%s" % plural_s(
self.n_duplicate_model_ids))
if (self.n_empty_models != 0):
infos.append("%d empty" % self.n_empty_models)
if (len(infos) != 0): print(" (%s)" % "; ".join(infos), end='', file=out)
print(file=out)
if (self.n_duplicate_chain_ids != 0):
add_warn(" ### WARNING: duplicate chain ids ###")
if (self.n_empty_chains != 0):
add_warn(" ### WARNING: empty chain ###")
print(prefix+" chains: ", fmt % self.n_chains, end='', file=out)
infos = []
if (self.n_duplicate_chain_ids != 0):
infos.append("%d with duplicate chain id%s" % plural_s(
self.n_duplicate_chain_ids))
if (self.n_empty_chains != 0):
infos.append("%d empty" % self.n_empty_chains)
if (self.n_explicit_chain_breaks != 0):
infos.append("%d explicit chain break%s" % plural_s(
self.n_explicit_chain_breaks))
if (len(infos) != 0): print(" (%s)" % "; ".join(infos), end='', file=out)
print(file=out)
print(prefix+" alt. conf.:", fmt % self.n_alt_conf, file=out)
print(prefix+" residues: ", fmt % (
self.n_residues + self.n_residue_groups + self.n_empty_residue_groups), end='', file=out)
if (self.n_residue_groups != 0):
print(" (%d with mixed residue names)" % self.n_residue_groups, end='', file=out)
print(file=out)
if (self.n_duplicate_atom_labels != 0):
add_err(" ### ERROR: duplicate atom labels ###")
print(prefix+" atoms: ", fmt % self.n_atoms, end='', file=out)
if (self.n_duplicate_atom_labels != 0):
print(" (%d with duplicate labels)" %self.n_duplicate_atom_labels, end='', file=out)
print(file=out)
print(prefix+" anisou: ", fmt % self.n_anisou, file=out)
if (self.n_empty_residue_groups != 0):
add_warn(" ### WARNING: empty residue_group ###")
print(prefix+" empty residue_groups:", \
fmt % self.n_empty_residue_groups, file=out)
if (self.n_empty_atom_groups != 0):
add_warn(" ### WARNING: empty atom_group ###")
print(prefix+" empty atom_groups:", \
fmt % self.n_empty_atom_groups, file=out)
#
c = self.element_charge_types
print(prefix+"number of atom element+charge types:", len(c), file=out)
if (len(c) != 0):
print(prefix+"histogram of atom element+charge frequency:", file=out)
show_sorted_by_counts(c.items(), out=out, prefix=prefix+" ")
#
c = self.resname_classes
print(prefix+"residue name classes:", end='', file=out)
if (len(c) == 0): print(" None", end='', file=out)
print(file=out)
show_sorted_by_counts(c.items(), out=out, prefix=prefix+" ")
#
c = self.chain_ids
print(prefix+"number of chain ids: %d" % len(c), file=out)
if (len(c) != 0):
print(prefix+"histogram of chain id frequency:", file=out)
show_sorted_by_counts(c.items(), out=out, prefix=prefix+" ")
#
c = self.alt_conf_ids
print(prefix+"number of alt. conf. ids: %d" % len(c), file=out)
if (len(c) != 0):
print(prefix+"histogram of alt. conf. id frequency:", file=out)
show_sorted_by_counts(c.items(), out=out, prefix=prefix+" ")
#
fmt = "%%%dd" % len(str(max(
self.n_alt_conf_none,
self.n_alt_conf_pure,
self.n_alt_conf_proper,
self.n_alt_conf_improper)))
print(prefix+"residue alt. conf. situations:", file=out)
print(prefix+" pure main conf.: ", fmt%self.n_alt_conf_none, file=out)
print(prefix+" pure alt. conf.: ", fmt%self.n_alt_conf_pure, file=out)
print(prefix+" proper alt. conf.: ", fmt%self.n_alt_conf_proper, file=out)
if (self.n_alt_conf_improper != 0):
add_err(" ### ERROR: improper alt. conf. ###")
print(prefix+" improper alt. conf.:", \
fmt % self.n_alt_conf_improper, file=out)
self.show_chains_with_mix_of_proper_and_improper_alt_conf(
out=out, prefix=prefix)
#
c = self.resnames
print(prefix+"number of residue names: %d" % len(c), file=out)
if (len(c) != 0):
print(prefix+"histogram of residue name frequency:", file=out)
annotation_appearance = {
"common_amino_acid": None,
"modified_amino_acid": " modified amino acid",
"common_rna_dna": None,
"modified_rna_dna": " modified rna/dna",
"common_water": " common water",
"common_small_molecule": " common small molecule",
"common_element": " common element",
"other": " other",
'd_amino_acid' : ' D-amino acid',
'common_saccharide' : ' common saccharide',
}
show_sorted_by_counts(c.items(), out=out, prefix=prefix+" ",
annotations=[
annotation_appearance[common_residue_names_get_class(name=name)]
for name in c.keys()])
#
if (len(self.consecutive_residue_groups_with_same_resid) != 0):
add_warn("### WARNING: consecutive residue_groups with same resid ###")
self.show_consecutive_residue_groups_with_same_resid(
out=out, prefix=prefix, max_show=residue_groups_max_show)
#
if (len(self.residue_groups_with_multiple_resnames_using_same_altloc)!= 0):
add_err("### ERROR: residue group with multiple resnames using"
" same altloc ###")
self.show_residue_groups_with_multiple_resnames_using_same_altloc(
out=out, prefix=prefix, max_show=residue_groups_max_show)
#
self.show_duplicate_atom_labels(
out=out, prefix=prefix, max_show=duplicate_atom_labels_max_show)
def as_str(self,
prefix="",
residue_groups_max_show=10,
duplicate_atom_labels_max_show=10):
out = StringIO()
self.show(
out=out,
prefix=prefix,
residue_groups_max_show=residue_groups_max_show,
duplicate_atom_labels_max_show=duplicate_atom_labels_max_show)
return out.getvalue()
def errors(self):
if (self._errors is None): self.show(out=null_out())
return self._errors
def get_n_residues_of_classes(self, classes):
result = 0
for resname, count in self.resnames.items():
if common_residue_names_get_class(resname) in classes:
result += count
return result
def warnings(self):
if (self._warnings is None): self.show(out=null_out())
return self._warnings
def errors_and_warnings(self):
return self.errors() + self.warnings()
def show_improper_alt_conf(self, out=None, prefix=""):
if (self.n_alt_conf_improper == 0): return
if (out is None): out = sys.stdout
for residue_group,label in [(self.alt_conf_proper, "proper"),
(self.alt_conf_improper, "improper")]:
if (residue_group is None): continue
print(prefix+"residue with %s altloc" % label, file=out)
for ag in residue_group.atom_groups():
for atom in ag.atoms():
print(prefix+' "%s"' % atom.format_atom_record(
replace_floats_with=".*."), file=out)
def raise_improper_alt_conf_if_necessary(self):
sio = StringIO()
self.show_improper_alt_conf(out=sio)
msg = sio.getvalue()
if (len(msg) != 0): raise Sorry(msg.rstrip())
def show_chains_with_mix_of_proper_and_improper_alt_conf(self,
out=None,
prefix=""):
if (out is None): out = sys.stdout
n = self.n_chains_with_mix_of_proper_and_improper_alt_conf
print(prefix+"chains with mix of proper and improper alt. conf.:", n, file=out)
if (n != 0): prefix += " "
self.show_improper_alt_conf(out=out, prefix=prefix)
def raise_chains_with_mix_of_proper_and_improper_alt_conf_if_necessary(self):
if (self.n_chains_with_mix_of_proper_and_improper_alt_conf == 0):
return
sio = StringIO()
self.show_chains_with_mix_of_proper_and_improper_alt_conf(out=sio)
raise Sorry(sio.getvalue().rstrip())
def show_consecutive_residue_groups_with_same_resid(self,
out=None,
prefix="",
max_show=10):
cons = self.consecutive_residue_groups_with_same_resid
if (len(cons) == 0): return
if (out is None): out = sys.stdout
print(prefix+"number of consecutive residue groups with same resid: %d" % \
len(cons), file=out)
if (max_show is None): max_show = len(cons)
elif (max_show <= 0): return
delim = prefix+" "+"-"*42
prev_rg = None
for rgs in cons[:max_show]:
for next,rg in zip(["", "next "], rgs):
if ( prev_rg is not None
and prev_rg.memory_id() == rg.memory_id()): continue
elif (next == "" and prev_rg is not None):
print(delim, file=out)
prev_rg = rg
print(prefix+" %sresidue group:" % next, file=out)
_show_residue_group(rg=rg, out=out, prefix=prefix+" ")
if (len(cons) > max_show):
print(delim, file=out)
print(prefix + " ... %d remaining instance%s not shown" % \
plural_s(len(cons)-max_show), file=out)
def show_residue_groups_with_multiple_resnames_using_same_altloc(self,
out=None,
prefix="",
max_show=10):
rgs = self.residue_groups_with_multiple_resnames_using_same_altloc
if (len(rgs) == 0): return
print(prefix+"residue groups with multiple resnames using" \
" same altloc:", len(rgs), file=out)
if (max_show is None): max_show = len(cons)
elif (max_show <= 0): return
for rg in rgs[:max_show]:
print(prefix+" residue group:", file=out)
_show_residue_group(rg=rg, out=out, prefix=prefix+" ")
if (len(rgs) > max_show):
print(prefix + " ... %d remaining instance%s not shown" % \
plural_s(len(rgs)-max_show), file=out)
def \
raise_residue_groups_with_multiple_resnames_using_same_altloc_if_necessary(
self, max_show=10):
sio = StringIO()
self.show_residue_groups_with_multiple_resnames_using_same_altloc(
out=sio, max_show=max_show)
msg = sio.getvalue()
if (len(msg) != 0): raise Sorry(msg.rstrip())
def show_duplicate_atom_labels(self, out=None, prefix="", max_show=10):
dup = self.duplicate_atom_labels
if (len(dup) == 0): return
if (out is None): out = sys.stdout
fmt = "%%%dd" % len(str(self.n_duplicate_atom_labels))
print(prefix+"number of groups of duplicate atom labels:", \
fmt % len(dup), file=out)
print(prefix+" total number of affected atoms: ", \
fmt % self.n_duplicate_atom_labels, file=out)
if (max_show is None): max_show = len(dup)
elif (max_show <= 0): return
for atoms in dup[:max_show]:
prfx = " group "
for atom in atoms:
atom_str = atom.format_atom_record(replace_floats_with=".*.")
# replacing atom number with .*.
a_s = atom_str[:4]+ " .*." + atom_str[11:]
print(prefix+prfx+'"%s"' % a_s, file=out)
prfx = " "
if (len(dup) > max_show):
print(prefix+" ... %d remaining group%s not shown" % \
plural_s(len(dup)-max_show), file=out)
def raise_duplicate_atom_labels_if_necessary(self, max_show=10):
sio = StringIO()
self.show_duplicate_atom_labels(out=sio, max_show=max_show)
msg = sio.getvalue()
if (len(msg) != 0): raise Sorry(msg.rstrip())
class __hash_eq_mixin(object):
def __hash__(self):
return hash(self.memory_id())
def __eq__(self, other):
if (isinstance(other, self.__class__)):
return (self.memory_id() == other.memory_id())
return False
def __ne__(self, other):
return not ( self == other )
bp.inject(ext.root, __hash_eq_mixin)
@bp.inject_into(ext.root)
class _():
__doc__ = """
Root node of the PDB hierarchy object. This is returned by the method
construct_hierarchy() of the PDB/mmCIF input objects, but it may also be
created programatically. Note that it does not contain any reference to
crystal symmetry or source scattering information, meaning that in practice
it must often be tracked alongside an equivalent cctbx.xray.structure object.
Pickling is supported, simply by writing out and reading back the PDB-format
representation of the hierarchy.
Examples
--------
>>> hierarchy = iotbx.pdb.hierarchy.root()
"""
def __getstate__(self):
version = 2
pdb_string = StringIO()
py3out = self._as_pdb_string_cstringio( # NOTE py3out will be None in py2
cstringio=pdb_string,
append_end=True,
interleaved_conf=0,
atoms_reset_serial_first_value=None,
atom_hetatm=True,
sigatm=True,
anisou=True,
siguij=True)
if six.PY3:
pdb_string.write(py3out)
return (version, pickle_import_trigger(), self.info, pdb_string.getvalue())
def __setstate__(self, state):
assert len(state) >= 3
version = state[0]
if (version == 1): assert len(state) == 3
elif (version == 2): assert len(state) == 4
else: raise RuntimeError("Unknown version of pickled state.")
self.info = state[-2]
import iotbx.pdb
models = iotbx.pdb.input(
source_info="pickle",
lines=flex.split_lines(state[-1])).construct_hierarchy(sort_atoms=False).models()
self.pre_allocate_models(number_of_additional_models=len(models))
for model in models:
self.append_model(model=model)
def chains(self):
"""
Iterate over all chains in all models.
"""
for model in self.models():
for chain in model.chains():
yield chain
def residue_groups(self):
"""Iterate over all residue groups (by model and then chain)"""
for model in self.models():
for chain in model.chains():
for rg in chain.residue_groups():
yield rg
def atom_groups(self):
"""
Iterate over all atom groups (by model, then chain, then residue group)
"""
for model in self.models():
for chain in model.chains():
for rg in chain.residue_groups():
for ag in rg.atom_groups():
yield ag
def only_model(self):
assert self.models_size() == 1
return self.models()[0]
def only_chain(self):
return self.only_model().only_chain()
def only_residue_group(self):
return self.only_chain().only_residue_group()
def only_conformer(self):
return self.only_chain().only_conformer()
def only_atom_group(self):
return self.only_residue_group().only_atom_group()
def only_residue(self):
return self.only_conformer().only_residue()
def only_atom(self):
return self.only_atom_group().only_atom()
def overall_counts(self):
"""
Calculate basic statistics for contents of the PDB hierarchy, including
number of residues of each type.
:returns: iotbx.pdb.hierarchy.overall_counts object
"""
result = overall_counts()
self.get_overall_counts(result)
return result
def occupancy_counts(self):
eps = 1.e-6
occ = self.atoms().extract_occ()
mean = flex.mean(occ)
negative = (occ<0).count(True)
zero_count = (flex.abs(occ)<eps).count(True)
zero_fraction = zero_count*100./occ.size()
equal_to_1_count = ((occ>(1.-eps)) & (occ<(1.+eps))).count(True)
equal_to_1_fraction = equal_to_1_count*100/occ.size()
between_0_and_1_count = ((occ>(0.+eps)) & (occ<(1.-eps))).count(True)
between_0_and_1_fraction = between_0_and_1_count*100/occ.size()
greater_than_1_count = (occ>(1.+eps)).count(True)
greater_than_1_fraction = greater_than_1_count*100./occ.size()
number_of_residues = len(list(self.residue_groups()))
number_of_alt_confs = 0
alt_loc_dist = collections.Counter()
for rg in self.residue_groups():
n_confs = len(rg.conformers())
if(n_confs > 1):
number_of_alt_confs += 1
alt_loc_dist[n_confs] += 1
return group_args(
mean = mean,
negative = negative,
zero_count = zero_count,
zero_fraction = zero_fraction,
equal_to_1_count = equal_to_1_count,
equal_to_1_fraction = equal_to_1_fraction,
between_0_and_1_count = between_0_and_1_count,
between_0_and_1_fraction = between_0_and_1_fraction,
greater_than_1_count = greater_than_1_count,
greater_than_1_fraction = greater_than_1_fraction,
alt_conf_frac = number_of_alt_confs*100/number_of_residues,
alt_loc_dist = alt_loc_dist)
def composition(self):
asc = self.atom_selection_cache()
def rc(sel_str, as_atoms=False):
sel = asc.selection(sel_str)
if(as_atoms):
return self.select(sel).atoms().size()
else:
return len(list(self.select(sel).residue_groups()))
sel_str_other = "not (water or nucleotide or protein)"
other_cnts = collections.Counter()
for rg in self.select(asc.selection(sel_str_other)).residue_groups():
for resname in rg.unique_resnames():
other_cnts[resname]+=1
return group_args(
n_atoms = self.atoms().size(),
n_chains = len(list(self.chains())),
n_protein = rc("protein"),
n_nucleotide = rc("nucleotide"),
n_water = rc("water"),
n_hd = rc(sel_str="element H or element D",as_atoms=True),
n_other = rc(sel_str_other),
other_cnts = other_cnts,
# atom counts for Table 1
n_protein_atoms = rc("protein and not (element H or element D)", as_atoms=True),
n_nucleotide_atoms = rc("nucleotide and not (element H or element D)", as_atoms=True),
n_water_atoms = rc("water", as_atoms=True),
n_other_atoms = rc(sel_str_other, as_atoms=True))
def show(self,
out=None,
prefix="",
level_id=None,
level_id_exception=ValueError):
"""
Display a summary of hierarchy contents.
"""
if (level_id == None): level_id = "atom"
try: level_no = level_ids.index(level_id)
except ValueError:
raise level_id_exception('Unknown level_id="%s"' % level_id)
if (out is None): out = sys.stdout
if (self.models_size() == 0):
print(prefix+'### WARNING: empty hierarchy ###', file=out)
model_ids = dict_with_default_0()
for model in self.models():
model_ids[model.id] += 1
for model in self.models():
chains = model.chains()
if (model_ids[model.id] != 1):
s = " ### ERROR: duplicate model id ###"
else: s = ""
print(prefix+'model id="%s"' % model.id, \
"#chains=%d%s" % (len(chains), s), file=out)
if (level_no == 0): continue
if (model.chains_size() == 0):
print(prefix+' ### WARNING: empty model ###', file=out)
model_chain_ids = dict_with_default_0()
for chain in chains:
model_chain_ids[chain.id] += 1
for chain in chains:
rgs = chain.residue_groups()
if (model_chain_ids[chain.id] != 1):
s = " ### WARNING: duplicate chain id ###"
else: s = ""
print(prefix+' chain id="%s"' % chain.id, \
"#residue_groups=%d%s" % (len(rgs), s), file=out)
if (level_no == 1): continue
if (chain.residue_groups_size() == 0):
print(prefix+' ### WARNING: empty chain ###', file=out)
suppress_chain_break = True
prev_resid = ""
for rg in rgs:
if (not rg.link_to_previous and not suppress_chain_break):
print(prefix+" ### chain break ###", file=out)
suppress_chain_break = False
ags = rg.atom_groups()
resnames = set()
for ag in rg.atom_groups():
resnames.add(ag.resname)
infos = []
if (len(resnames) > 1): infos.append("with mixed residue names")
resid = rg.resid()
if (prev_resid == resid): infos.append("same as previous resid")
prev_resid = resid
if (len(infos) != 0): s = " ### Info: %s ###" % "; ".join(infos)
else: s = ""
print(prefix+' resid="%s"' % resid, \
"#atom_groups=%d%s" % (len(ags), s), file=out)
if (level_no == 2): continue
if (rg.atom_groups_size() == 0):
print(prefix+' ### WARNING: empty residue_group ###', file=out)
for ag in ags:
atoms = ag.atoms()
print(prefix+' altloc="%s"' % ag.altloc, \
'resname="%s"' % ag.resname, \
"#atoms=%d" % len(atoms), file=out)
if (level_no == 3): continue
if (ag.atoms_size() == 0):
print(prefix+' ### WARNING: empty atom_group ###', file=out)
for atom in atoms:
print(prefix+' "%s"' % atom.name, file=out)
def as_str(self,
prefix="",
level_id=None,
level_id_exception=ValueError):
"""
Alias for show().
"""
out = StringIO()
self.show(
out=out,
prefix=prefix,
level_id=level_id,
level_id_exception=level_id_exception)
return out.getvalue()
def as_pdb_string(self,
crystal_symmetry=None,
cryst1_z=None,
write_scale_records=True,
append_end=False,
interleaved_conf=0,
atoms_reset_serial_first_value=None,
atom_hetatm=True,
sigatm=True,
anisou=True,
siguij=True,
output_break_records=True, # TODO deprecate
cstringio=None,
return_cstringio=Auto):
"""
Generate complete PDB-format string representation. External crystal
symmetry is strongly recommended if this is being output to a file.
:param crystal_symmetry: cctbx.crystal.symmetry object or equivalent (such
as an xray.structure object or Miller array)
:param write_scale_records: write fractional scaling records (SCALE) if
crystal symmetry is provided
:param anisou: write ANISOU records for anisotropic atoms
:param sigatm: write SIGATM records if applicable
:param siguij: write SIGUIJ records if applicable
:returns: Python str
"""
if (cstringio is None):
cstringio = StringIO()
if (return_cstringio is Auto):
return_cstringio = False
elif (return_cstringio is Auto):
return_cstringio = True
if (crystal_symmetry is not None or cryst1_z is not None):
from iotbx.pdb import format_cryst1_and_scale_records
print(format_cryst1_and_scale_records(
crystal_symmetry=crystal_symmetry,
cryst1_z=cryst1_z,
write_scale_records=write_scale_records), file=cstringio)
py3out = self._as_pdb_string_cstringio(
cstringio=cstringio,
append_end=append_end,
interleaved_conf=interleaved_conf,
atoms_reset_serial_first_value=atoms_reset_serial_first_value,
atom_hetatm=atom_hetatm,
sigatm=sigatm,
anisou=anisou,
siguij=siguij,
output_break_records=output_break_records)
if six.PY3:
cstringio.write(py3out)
if (return_cstringio):
return cstringio
return cstringio.getvalue()
# MARKED_FOR_DELETION_OLEG
# REASON: This is not equivalent conversion. Hierarchy does not have a lot
# of information pdb_input and cif_input should have. Therefore this
# function should not be used at all to avoid confusion and having crippled
# input objects. Moreover, the use of mmtbx.model should eliminate the
# need in this tranformation.
# Currently used exclusively in Tom's code.
def as_pdb_input(self, crystal_symmetry=None):
"""
Generate corresponding pdb.input object.
"""
import iotbx.pdb
pdb_str = self.as_pdb_string(crystal_symmetry=crystal_symmetry)
pdb_inp = iotbx.pdb.input(
source_info="pdb_hierarchy",
lines=flex.split_lines(pdb_str))
return pdb_inp
# END_MARKED_FOR_DELETION_OLEG
def extract_xray_structure(self, crystal_symmetry=None,
min_distance_sym_equiv=None):
"""
Generate the equivalent cctbx.xray.structure object. If the crystal
symmetry is not provided, this will be placed in a P1 box. In practice it
is usually best to keep the original xray structure object around, but this
method is helpful in corner cases.
"""
if min_distance_sym_equiv is not None: # use it
return self.as_pdb_input(crystal_symmetry).xray_structure_simple(
min_distance_sym_equiv=min_distance_sym_equiv)
else: # usual just use whatever is default in xray_structure_simple
return self.as_pdb_input(crystal_symmetry).xray_structure_simple()
def adopt_xray_structure(self, xray_structure, assert_identical_id_str=True):
"""
Apply the current (refined) atomic parameters from the cctbx.xray.structure
object to the atoms in the PDB hierarchy. This will fail if the labels of
the scatterers do not match the atom labels.
"""
from cctbx import adptbx
if(self.atoms_size() != xray_structure.scatterers().size()):
raise RuntimeError("Incompatible size of hierarchy and scatterers array.")
awl = self.atoms_with_labels()
scatterers = xray_structure.scatterers()
uc = xray_structure.unit_cell()
orth = uc.orthogonalize
def set_attr(sc, a):
a.set_xyz(new_xyz=orth(sc.site))
a.set_occ(new_occ=sc.occupancy)
a.set_b(new_b=adptbx.u_as_b(sc.u_iso_or_equiv(uc)))
if(sc.flags.use_u_aniso() and sc.u_star != (-1.0, -1.0, -1.0, -1.0, -1.0, -1.0)):
# a.set_uij(new_uij = adptbx.u_star_as_u_cart(uc,sc.u_star))
a.set_uij(new_uij = sc.u_cart_plus_u_iso(uc))
else:
a.uij_erase()
a.set_fp(new_fp=sc.fp)
a.set_fdp(new_fdp=sc.fdp)
element, charge = sc.element_and_charge_symbols()
a.set_element(element)
a.set_charge(charge)
def get_id(l):
r = [pos for pos, char in enumerate(l) if char == '"']
if(len(r)<2): return None
i,j = r[-2:]
r = "".join(l[i:j+1].replace('"',"").replace('"',"").split())
return r
for sc, a in zip(scatterers, awl):
id_str = a.id_str()
resname_from_sc = id_str[10:13]
cl1 = common_residue_names_get_class(resname_from_sc)
cl2 = common_residue_names_get_class(a.resname)
if assert_identical_id_str:
l1 = get_id(sc.label)
l2 = get_id(a.id_str())
if(l1 != l2):
raise RuntimeError("Mismatch: \n %s \n %s \n"%(sc.label,a.id_str()))
set_attr(sc=sc, a=a)
def apply_rotation_translation(self, rot_matrices, trans_vectors):
"""
LIMITATION: ANISOU records in resulting hierarchy will be invalid!!!
"""
roots=[]
for r,t in zip(rot_matrices, trans_vectors):
for model in self.models():
root = iotbx.pdb.hierarchy.root()
m = iotbx.pdb.hierarchy.model()
for c in model.chains():
c = c.detached_copy()
xyz = c.atoms().extract_xyz()
new_xyz = r.elems*xyz+t
c.atoms().set_xyz(new_xyz)
m.append_chain(c)
root.append_model(m)
roots.append(root)
result = iotbx.pdb.hierarchy.join_roots(roots=roots)
result.reset_i_seq_if_necessary()
return result
def remove_residue_groups_with_atoms_on_special_positions_selective(self,
crystal_symmetry):
self.reset_i_seq_if_necessary()
special_position_settings = crystal.special_position_settings(
crystal_symmetry = crystal_symmetry)
# Using
# unconditional_general_position_flags=(self.atoms().extract_occ() != 1)
# will skip atoms on sp that have partial occupancy.
site_symmetry_table = \
special_position_settings.site_symmetry_table(
sites_cart = self.atoms().extract_xyz())
spi = site_symmetry_table.special_position_indices()
removed = []
for c in self.chains():
for rg in c.residue_groups():
keep=True
for i in rg.atoms().extract_i_seq():
if(i in spi):
keep=False
break
if(not keep):
for resname in rg.unique_resnames():
if(common_residue_names_get_class(resname) == "common_amino_acid" or
common_residue_names_get_class(resname) == "common_rna_dna"):
raise RuntimeError(
"Amino-acid residue or NA is on special position.")
for resname in rg.unique_resnames():
removed.append(",".join([c.id, rg.resid(), resname]))
c.remove_residue_group(residue_group=rg)
return removed
def shift_to_origin(self, crystal_symmetry):
uc = crystal_symmetry.unit_cell()
sites_frac = uc.fractionalize(self.atoms().extract_xyz())
l = abs(min(sites_frac.min()))
r = abs(max(sites_frac.max()))
rl = max(l, r)+2
rr= range(int(-rl), int(rl))
shift_best = None
for x in rr:
for y in rr:
for z in rr:
sf = sites_frac+[x,y,z]
sc = uc.orthogonalize(sf)
cmf = uc.fractionalize(sc.mean())
if(cmf[0]>=0 and cmf[0]<1 and
cmf[1]>=0 and cmf[1]<1 and
cmf[2]>=0 and cmf[2]<1):
shift_best = [x,y,z]
assert shift_best is not None # should never happen
self.atoms().set_xyz(uc.orthogonalize(sites_frac+shift_best))
def expand_to_p1(self, crystal_symmetry, exclude_self=False):
# ANISOU will be invalid
import string
import scitbx.matrix
r = root()
m = model()
idl = [i for i in string.ascii_lowercase]
idu = [i for i in string.ascii_uppercase]
taken = [c.id for c in self.chains()]
n_atoms = []
for m_ in self.models():
for smx in crystal_symmetry.space_group().all_ops():
m3 = smx.r().as_double()
m3 = scitbx.matrix.sqr(m3)
if(exclude_self and m3.is_r3_identity_matrix()): continue
t = smx.t().as_double()
t = scitbx.matrix.col((t[0],t[1],t[2]))
for c_ in m_.chains():
n_at = len(c_.atoms())
if(not n_at in n_atoms): n_atoms.append(n_at)
c_ = c_.detached_copy()
xyz = c_.atoms().extract_xyz()
xyz = crystal_symmetry.unit_cell().fractionalize(xyz)
new_xyz = crystal_symmetry.unit_cell().orthogonalize(m3.elems*xyz+t)
c_.atoms().set_xyz(new_xyz)
#
if(not (smx.r().is_unit_mx() and smx.t().is_zero())):
found = False
for idu_ in idu:
for idl_ in idl:
id_ = idu_+idl_
if(not id_ in taken):
taken.append(id_)
found = id_
break
if(found): break
c_.id = found
#
m.append_chain(c_)
r.append_model(m)
return r
def write_pdb_file(self,
file_name,
open_append=False,
crystal_symmetry=None,
cryst1_z=None,
write_scale_records=True,
append_end=False,
interleaved_conf=0,
atoms_reset_serial_first_value=None,
atom_hetatm=True,
sigatm=True,
anisou=True,
siguij=True,
link_records=None,
):
if link_records:
if (open_append): mode = "a"
else: mode = "w"
with open(file_name, mode) as f:
print(link_records, file=f)
open_append = True
if (crystal_symmetry is not None or cryst1_z is not None):
if (open_append): mode = "a"
else: mode = "w"
from iotbx.pdb import format_cryst1_and_scale_records
with open(file_name, mode) as f:
print(format_cryst1_and_scale_records(
crystal_symmetry=crystal_symmetry,
cryst1_z=cryst1_z,
write_scale_records=write_scale_records), file=f)
open_append = True
self._write_pdb_file(
file_name=file_name,
open_append=open_append,
append_end=append_end,
interleaved_conf=interleaved_conf,
atoms_reset_serial_first_value=atoms_reset_serial_first_value,
atom_hetatm=atom_hetatm,
sigatm=sigatm,
anisou=anisou,
siguij=siguij,
)
def get_label_alt_id_iseq(self, iseq):
assert self.atoms_size() > iseq
return self.get_label_alt_id_atom(self.atoms()[iseq])
def get_label_alt_id_atom(self, atom):
alt_id = atom.parent().altloc
if alt_id == '': alt_id = '.'
return alt_id
def get_auth_asym_id_iseq(self, iseq):
assert self.atoms_size() > iseq, "%d, %d" % (self.atoms_size(), iseq)
return self.get_auth_asym_id(self.atoms()[iseq].parent().parent().parent())
def get_auth_asym_id(self, chain):
auth_asym_id = chain.id
if len(chain.atoms()[0].segid.strip()) > len(auth_asym_id):
auth_asym_id = chain.atoms()[0].segid.strip()
if auth_asym_id.strip() == '':
# chain id is empty, segid is empty, just duplicate label_asym_id
# since we cannot read mmCIF with empty auth_asym_id. Outputting a file
# that we cannot read - bad.
auth_asym_id = self.get_label_asym_id(chain.residue_groups()[0])
return auth_asym_id
def get_label_asym_id_iseq(self, iseq):
assert self.atoms_size() > iseq
return self.get_label_asym_id(self.atoms()[iseq].parent().parent())
def get_label_asym_id(self, residue_group):
if not hasattr(self, '_lai_lookup'):
self._lai_lookup = {}
# fill self._lai_lookup for the whole hierarchy
number_label_asym_id = 0
label_asym_ids = all_label_asym_ids()
for model in self.models():
for chain in model.chains():
previous = None
for rg in chain.residue_groups():
resname = rg.atom_groups()[0].resname.strip()
residue_class = common_residue_names_get_class(resname)
rg_mid = rg.memory_id()
if residue_class in ['common_amino_acid', 'modified_amino_acid',
'common_rna_dna', 'modified_rna_dna']:
if previous != 'poly' and previous is not None:
number_label_asym_id += 1
self._lai_lookup[rg_mid] = label_asym_ids[number_label_asym_id]
previous = 'poly'
elif residue_class in ['common_water']:
if previous != 'water' and previous is not None:
number_label_asym_id += 1
previous = 'water'
self._lai_lookup[rg_mid] = label_asym_ids[number_label_asym_id]
else: # ligand
if previous is not None:
number_label_asym_id += 1
previous = 'ligand'
self._lai_lookup[rg_mid] = label_asym_ids[number_label_asym_id]
number_label_asym_id += 1 # up for each chain
previous = None
number_label_asym_id += 1 # up for each model
rg_mid = residue_group.memory_id()
result = self._lai_lookup.get(rg_mid, None)
if result is None:
print (residue_group.id_str())
return result
# return self.number_label_asym_id, self.label_asym_ids[self.number_label_asym_id]
def get_auth_seq_id_iseq(self, iseq):
assert self.atoms_size() > iseq
return self.get_auth_seq_id(self.atoms()[iseq].parent().parent())
def get_auth_seq_id(self, rg):
return rg.resseq.strip()
def get_label_seq_id_iseq(self, iseq):
assert self.atoms_size() > iseq, "%d, %d" % (self.atoms_size(), iseq)
return self.get_label_seq_id(self.atoms()[iseq].parent())
def get_label_seq_id(self, atom_group):
if not hasattr(self, '_label_seq_id_dict'):
# make it
prev_ac_key = ''
self._label_seq_id_dict = {}
for model in self.models():
for chain in model.chains():
label_seq_id = 0
for rg in chain.residue_groups():
for ag in rg.atom_groups():
cur_ac_key = chain.id + rg.resseq + rg.icode
if cur_ac_key != prev_ac_key:
label_seq_id += 1
prev_ac_key = cur_ac_key
label_seq_id_str='.'
comp_id = ag.resname.strip()
residue_class = common_residue_names_get_class(comp_id)
if residue_class in ['common_amino_acid', 'modified_amino_acid']:
label_seq_id_str = str(label_seq_id)
self._label_seq_id_dict[ag.memory_id()] = label_seq_id_str
return self._label_seq_id_dict[atom_group.memory_id()]
def as_cif_block(self,
crystal_symmetry=None,
coordinate_precision=5,
occupancy_precision=3,
b_iso_precision=5,
u_aniso_precision=5):
if crystal_symmetry is None:
crystal_symmetry = crystal.symmetry()
cs_cif_block = crystal_symmetry.as_cif_block(format="mmcif")
h_cif_block = iotbx.cif.model.block()
coord_fmt_str = "%%.%if" %coordinate_precision
occ_fmt_str = "%%.%if" %occupancy_precision
b_iso_fmt_str = "%%.%if" %b_iso_precision
u_aniso_fmt_str = "%%.%if" %u_aniso_precision
atom_site_loop = iotbx.cif.model.loop(header=(
'_atom_site.group_PDB',
'_atom_site.id',
'_atom_site.label_atom_id',
'_atom_site.label_alt_id',
'_atom_site.label_comp_id',
'_atom_site.auth_asym_id',
'_atom_site.auth_seq_id',
'_atom_site.pdbx_PDB_ins_code',
'_atom_site.Cartn_x',
'_atom_site.Cartn_y',
'_atom_site.Cartn_z',
'_atom_site.occupancy',
'_atom_site.B_iso_or_equiv',
'_atom_site.type_symbol',
'_atom_site.pdbx_formal_charge',
'_atom_site.phenix_scat_dispersion_real',
'_atom_site.phenix_scat_dispersion_imag',
'_atom_site.label_asym_id',
'_atom_site.label_entity_id',
'_atom_site.label_seq_id',
#'_atom_site.auth_comp_id',
#'_atom_site.auth_atom_id',
'_atom_site.pdbx_PDB_model_num',
))
aniso_loop = iotbx.cif.model.loop(header=(
'_atom_site_anisotrop.id',
'_atom_site_anisotrop.pdbx_auth_atom_id',
'_atom_site_anisotrop.pdbx_label_alt_id',
'_atom_site_anisotrop.pdbx_auth_comp_id',
'_atom_site_anisotrop.pdbx_auth_asym_id',
'_atom_site_anisotrop.pdbx_auth_seq_id',
'_atom_site_anisotrop.pdbx_PDB_ins_code',
'_atom_site_anisotrop.U[1][1]',
'_atom_site_anisotrop.U[2][2]',
'_atom_site_anisotrop.U[3][3]',
'_atom_site_anisotrop.U[1][2]',
'_atom_site_anisotrop.U[1][3]',
'_atom_site_anisotrop.U[2][3]'
))
# cache dictionary lookups to save time in inner loop
atom_site_group_PDB = atom_site_loop['_atom_site.group_PDB']
atom_site_id = atom_site_loop['_atom_site.id']
atom_site_label_atom_id = atom_site_loop['_atom_site.label_atom_id']
atom_site_label_alt_id = atom_site_loop['_atom_site.label_alt_id']
atom_site_label_comp_id = atom_site_loop['_atom_site.label_comp_id']
atom_site_auth_asym_id = atom_site_loop['_atom_site.auth_asym_id']
atom_site_auth_seq_id = atom_site_loop['_atom_site.auth_seq_id']
atom_site_pdbx_PDB_ins_code = atom_site_loop['_atom_site.pdbx_PDB_ins_code']
atom_site_Cartn_x = atom_site_loop['_atom_site.Cartn_x']
atom_site_Cartn_y = atom_site_loop['_atom_site.Cartn_y']
atom_site_Cartn_z = atom_site_loop['_atom_site.Cartn_z']
atom_site_occupancy = atom_site_loop['_atom_site.occupancy']
atom_site_B_iso_or_equiv = atom_site_loop['_atom_site.B_iso_or_equiv']
atom_site_type_symbol = atom_site_loop['_atom_site.type_symbol']
atom_site_pdbx_formal_charge = atom_site_loop['_atom_site.pdbx_formal_charge']
atom_site_phenix_scat_dispersion_real = \
atom_site_loop['_atom_site.phenix_scat_dispersion_real']
atom_site_phenix_scat_dispersion_imag = \
atom_site_loop['_atom_site.phenix_scat_dispersion_imag']
atom_site_label_asym_id = atom_site_loop['_atom_site.label_asym_id']
atom_site_label_entity_id = atom_site_loop['_atom_site.label_entity_id']
atom_site_label_seq_id = atom_site_loop['_atom_site.label_seq_id']
#atom_site_loop['_atom_site.auth_comp_id'].append(comp_id)
#atom_site_loop['_atom_site.auth_atom_id'].append(atom.name.strip())
atom_site_pdbx_PDB_model_num = atom_site_loop['_atom_site.pdbx_PDB_model_num']
atom_site_anisotrop_id = aniso_loop['_atom_site_anisotrop.id']
atom_site_anisotrop_pdbx_auth_atom_id = \
aniso_loop['_atom_site_anisotrop.pdbx_auth_atom_id']
atom_site_anisotrop_pdbx_label_alt_id = \
aniso_loop['_atom_site_anisotrop.pdbx_label_alt_id']
atom_site_anisotrop_pdbx_auth_comp_id = \
aniso_loop['_atom_site_anisotrop.pdbx_auth_comp_id']
atom_site_anisotrop_pdbx_auth_asym_id = \
aniso_loop['_atom_site_anisotrop.pdbx_auth_asym_id']
atom_site_anisotrop_pdbx_auth_seq_id = \
aniso_loop['_atom_site_anisotrop.pdbx_auth_seq_id']
atom_site_anisotrop_pdbx_PDB_ins_code = \
aniso_loop['_atom_site_anisotrop.pdbx_PDB_ins_code']
atom_site_anisotrop_U11 = aniso_loop['_atom_site_anisotrop.U[1][1]']
atom_site_anisotrop_U22 = aniso_loop['_atom_site_anisotrop.U[2][2]']
atom_site_anisotrop_U33 = aniso_loop['_atom_site_anisotrop.U[3][3]']
atom_site_anisotrop_U12 = aniso_loop['_atom_site_anisotrop.U[1][2]']
atom_site_anisotrop_U13 = aniso_loop['_atom_site_anisotrop.U[1][3]']
atom_site_anisotrop_U23 = aniso_loop['_atom_site_anisotrop.U[2][3]']
unique_chain_ids = set()
auth_asym_ids = flex.std_string()
label_asym_ids = flex.std_string()
#
chem_comp_loop = iotbx.cif.model.loop(header=(
'_chem_comp.id',
))
struct_asym_loop = iotbx.cif.model.loop(header=(
'_struct_asym.id',
))
chem_comp_ids = []
chem_comp_atom_ids = []
struct_asym_ids = []
#
chain_ids = all_chain_ids()
for model in self.models():
model_id = model.id
if model_id == '': model_id = '1'
for chain in model.chains():
auth_asym_id = self.get_auth_asym_id(chain)
for residue_group in chain.residue_groups():
label_asym_id = self.get_label_asym_id(residue_group)
seq_id = self.get_auth_seq_id(residue_group)
icode = residue_group.icode
if icode == ' ' or icode == '': icode = '?'
for atom_group in residue_group.atom_groups():
comp_id = atom_group.resname.strip()
entity_id = '?' # XXX how do we determine this?
for atom in atom_group.atoms():
group_pdb = "ATOM"
if atom.hetero: group_pdb = "HETATM"
x, y, z = [coord_fmt_str %i for i in atom.xyz]
atom_charge = atom.charge_tidy()
if atom_charge is None:
atom_charge = "?"
else:
atom_charge = atom_charge.strip()
if atom_charge == "": atom_charge = "?"
fp, fdp = atom.fp, atom.fdp
if fp == 0 and fdp == 0:
fp = '.'
fdp = '.'
else:
fp = "%.4f" %fp
fdp = "%.4f" %fdp
atom_site_group_PDB.append(group_pdb)
atom_site_id.append(str(hy36decode(width=5, s=atom.serial)))
atom_site_label_atom_id.append(atom.name.strip())
if atom.name.strip() not in chem_comp_atom_ids:
chem_comp_atom_ids.append(atom.name.strip())
atom_site_label_alt_id.append(self.get_label_alt_id_atom(atom))
atom_site_label_comp_id.append(comp_id)
if comp_id not in chem_comp_ids: chem_comp_ids.append(comp_id)
atom_site_auth_asym_id.append(auth_asym_id)
atom_site_auth_seq_id.append(seq_id)
atom_site_pdbx_PDB_ins_code.append(icode)
atom_site_Cartn_x.append(x)
atom_site_Cartn_y.append(y)
atom_site_Cartn_z.append(z)
atom_site_occupancy.append(occ_fmt_str % atom.occ)
atom_site_B_iso_or_equiv.append(b_iso_fmt_str % atom.b)
atom_site_type_symbol.append(atom.element.strip())
atom_site_pdbx_formal_charge.append(atom_charge)
atom_site_phenix_scat_dispersion_real.append(fp)
atom_site_phenix_scat_dispersion_imag.append(fdp)
atom_site_label_asym_id.append(label_asym_id.strip())
if label_asym_id.strip() not in struct_asym_ids:
struct_asym_ids.append(label_asym_id.strip())
atom_site_label_entity_id.append(entity_id)
atom_site_label_seq_id.append(self.get_label_seq_id(atom_group))
#atom_site_loop['_atom_site.auth_comp_id'].append(comp_id)
#atom_site_loop['_atom_site.auth_atom_id'].append(atom.name.strip())
atom_site_pdbx_PDB_model_num.append(model_id.strip())
if atom.uij_is_defined():
u11, u22, u33, u12, u13, u23 = [
u_aniso_fmt_str %i for i in atom.uij]
atom_site_anisotrop_id.append(
str(hy36decode(width=5, s=atom.serial)))
atom_site_anisotrop_pdbx_auth_atom_id.append(atom.name.strip())
atom_site_anisotrop_pdbx_label_alt_id.append(self.get_label_alt_id_atom(atom))
atom_site_anisotrop_pdbx_auth_comp_id.append(comp_id)
atom_site_anisotrop_pdbx_auth_asym_id.append(auth_asym_id)
atom_site_anisotrop_pdbx_auth_seq_id.append(seq_id)
atom_site_anisotrop_pdbx_PDB_ins_code.append(icode)
atom_site_anisotrop_U11.append(u11)
atom_site_anisotrop_U22.append(u22)
atom_site_anisotrop_U33.append(u33)
atom_site_anisotrop_U12.append(u12)
atom_site_anisotrop_U13.append(u13)
atom_site_anisotrop_U23.append(u23)
for key in ('_atom_site.phenix_scat_dispersion_real',
'_atom_site.phenix_scat_dispersion_imag'):
if atom_site_loop[key].all_eq('.'):
del atom_site_loop[key]
h_cif_block.add_loop(atom_site_loop)
if aniso_loop.size() > 0:
h_cif_block.add_loop(aniso_loop)
h_cif_block.update(cs_cif_block)
#
chem_comp_ids.sort()
for row in chem_comp_ids: chem_comp_loop.add_row([row])
h_cif_block.add_loop(chem_comp_loop)
chem_comp_atom_ids.sort()
for row in struct_asym_ids: struct_asym_loop.add_row([row])
h_cif_block.add_loop(struct_asym_loop)
#
return h_cif_block
def write_mmcif_file(self,
file_name,
crystal_symmetry=None,
data_block_name=None):
cif_object = iotbx.cif.model.cif()
if data_block_name is None:
data_block_name = "phenix"
cif_object[data_block_name] = self.as_cif_block(
crystal_symmetry=crystal_symmetry)
with open(file_name, "w") as f:
print(cif_object, file=f)
def atoms_with_labels(self):
"""
Generator for atom_with_labels objects, presented in the same order as
the array returned by the atoms() method.
"""
for model in self.models():
for chain in model.chains():
is_first_in_chain = True
for rg in chain.residue_groups():
is_first_after_break = not (is_first_in_chain or rg.link_to_previous)
for ag in rg.atom_groups():
for atom in ag.atoms():
yield atom_with_labels(
atom=atom,
model_id=model.id,
chain_id=chain.id,
resseq=rg.resseq,
icode=rg.icode,
altloc=ag.altloc,
resname=ag.resname,
is_first_in_chain=is_first_in_chain,
is_first_after_break=is_first_after_break)
is_first_in_chain = False
is_first_after_break = False
def get_conformer_indices(self):
n_seq = self.atoms_size()
conformer_indices = flex.size_t(n_seq, 0)
altloc_indices = self.altloc_indices()
if ("" in altloc_indices): p = 0
else: p = 1
altlocs = sorted(altloc_indices.keys())
for i,altloc in enumerate(altlocs):
if (altloc == ""): continue
conformer_indices.set_selected(altloc_indices[altloc], i+p)
return conformer_indices
def remove_incomplete_main_chain_protein(self,
required_atom_names=['CA','N','C','O']):
# Remove each residue_group that does not contain CA N C O of protein
hierarchy = self
for model in hierarchy.models():
for chain in model.chains():
for residue_group in chain.residue_groups():
all_atom_names_found=[]
atom_groups = residue_group.atom_groups()
for atom_group in atom_groups:
for atom in atom_group.atoms():
atom_name=atom.name.strip()
if not atom_name in all_atom_names_found:
all_atom_names_found.append(atom_name)
for r in required_atom_names:
if not r in all_atom_names_found:
chain.remove_residue_group(residue_group=residue_group)
break
if (len(chain.residue_groups()) == 0):
model.remove_chain(chain=chain)
def remove_alt_confs(self, always_keep_one_conformer):
hierarchy = self
for model in hierarchy.models():
for chain in model.chains():
for residue_group in chain.residue_groups():
atom_groups = residue_group.atom_groups()
assert (len(atom_groups) > 0)
cleanup_needed = True
if always_keep_one_conformer :
if (len(atom_groups) == 1) and (atom_groups[0].altloc == ''):
continue
atom_groups_and_occupancies = []
for atom_group in atom_groups :
if (atom_group.altloc == ''):
continue
mean_occ = flex.mean(atom_group.atoms().extract_occ())
atom_groups_and_occupancies.append((atom_group, mean_occ))
atom_groups_and_occupancies.sort(key=operator.itemgetter(1), reverse=True)
for atom_group, occ in atom_groups_and_occupancies[1:] :
residue_group.remove_atom_group(atom_group=atom_group)
single_conf, occ = atom_groups_and_occupancies[0]
single_conf.altloc = ''
else :
for atom_group in atom_groups :
if (not atom_group.altloc in ["", "A"]):
residue_group.remove_atom_group(atom_group=atom_group)
else :
atom_group.altloc = ""
if (len(residue_group.atom_groups()) == 0):
chain.remove_residue_group(residue_group=residue_group)
cleanup_needed = False
if cleanup_needed and residue_group.atom_groups_size() > 1:
ags = residue_group.atom_groups()
for i in range(len(ags)-1, 0, -1):
residue_group.merge_atom_groups(ags[0], ags[i])
residue_group.remove_atom_group(ags[i])
if (len(chain.residue_groups()) == 0):
model.remove_chain(chain=chain)
atoms = hierarchy.atoms()
new_occ = flex.double(atoms.size(), 1.0)
atoms.set_occ(new_occ)
def rename_chain_id(self, old_id, new_id):
for model in self.models():
for chain in model.chains():
if(chain.id == old_id):
chain.id = new_id
def remove_atoms(self, fraction):
assert fraction>0 and fraction<1.
sel_keep = flex.random_bool(self.atoms_size(), 1-fraction)
return self.select(sel_keep)
def set_atomic_charge(self, iselection, charge):
assert isinstance(charge, int)
if(iselection is None):
raise Sorry("Specify an atom selection to apply a charge to.")
if(abs(charge) >= 10):
raise Sorry("The charge must be in the range from -9 to 9.")
if(iselection.size() == 0):
raise Sorry("Empty selection for charge modification")
if(charge == 0):
charge = " "
elif (charge < 0):
charge = "%1d-" % abs(charge)
else:
charge = "%1d+" % charge
atoms = self.atoms()
for i_seq in iselection:
atom = atoms[i_seq]
atom.set_charge(charge)
def truncate_to_poly(self, atom_names_set=set()):
pdb_atoms = self.atoms()
pdb_atoms.reset_i_seq()
aa_resnames = one_letter_given_three_letter
for model in self.models():
for chain in model.chains():
for rg in chain.residue_groups():
def have_amino_acid():
for ag in rg.atom_groups():
if (ag.resname in aa_resnames):
return True
return False
if (have_amino_acid()):
for ag in rg.atom_groups():
for atom in ag.atoms():
if (atom.name not in atom_names_set):
ag.remove_atom(atom=atom)
def truncate_to_poly_gly(self):
self.truncate_to_poly(
atom_names_set=set([" N ", " CA ", " C ", " O "]))
def truncate_to_poly_ala(self):
self.truncate_to_poly(
atom_names_set=set([" N ", " CA ", " C ", " O ", " CB "]))
def convert_semet_to_met(self):
for i_seq, atom in enumerate(self.atoms()):
if (atom.name.strip()=="SE") and (atom.element.strip().upper()=="SE"):
atom_group = atom.parent()
if(atom_group.resname == "MSE"):
atom_group.resname = "MET"
atom.name = " SD "
atom.element = " S"
for ag_atom in atom_group.atoms():
ag_atom.hetero = False
def convert_met_to_semet(self):
for i_seq, atom in enumerate(self.atoms()):
if((atom.name.strip()=="SD") and (atom.element.strip().upper()=="S")):
atom_group = atom.parent()
if(atom_group.resname == "MET"):
atom_group.resname = "MSE"
atom.name = " SE "
atom.element = "SE"
for ag_atom in atom_group.atoms():
ag_atom.hetero = True
def transfer_chains_from_other(self, other):
i_model = 0
other_models = other.models()
for md,other_md in zip(self.models(), other_models):
i_model += 1
md.id = hy36encode(width=4, value=i_model)
md.transfer_chains_from_other(other=other_md)
msz, omsz = self.models_size(), other.models_size()
if (omsz > msz):
for other_md in other_models[msz:]:
i_model += 1
md = model(id = hy36encode(width=4, value=i_model))
md.transfer_chains_from_other(other=other_md)
self.append_model(model=md)
def atom_selection_cache(self, special_position_settings=None):
from iotbx.pdb.atom_selection import cache
return cache(root=self,
special_position_settings=special_position_settings)
def occupancy_groups_simple(self, common_residue_name_class_only=None,
always_group_adjacent=True,
ignore_hydrogens=True):
if(ignore_hydrogens):
sentinel = self.atoms().reset_tmp_for_occupancy_groups_simple()
else:
sentinel = self.atoms().reset_tmp(first_value=0, increment=1)
result = []
for chain in self.chains():
if(common_residue_name_class_only is None):
if(chain.is_protein()):
common_residue_name_class_only = "common_amino_acid"
if(chain.is_na()):
common_residue_name_class_only = "common_rna_dna"
result.extend(chain.occupancy_groups_simple(
common_residue_name_class_only=common_residue_name_class_only,
always_group_adjacent=always_group_adjacent))
del sentinel
return result
def chunk_selections(self, residues_per_chunk):
result = []
if(residues_per_chunk<1): return result
for model in self.models():
for chain in model.chains():
residue_range_sel = flex.size_t()
cntr = 0
for rg in chain.residue_groups():
i_seqs = rg.atoms().extract_i_seq()
last_added=True
if(cntr!=residues_per_chunk):
residue_range_sel.extend(i_seqs)
last_added=False
else:
result.append(residue_range_sel)
residue_range_sel = flex.size_t()
residue_range_sel.extend(i_seqs)
cntr = 0
last_added=False
cntr += 1
if(len(result)==0 or not last_added):
assert residue_range_sel.size()>0
result.append(residue_range_sel)
return result
def flip_symmetric_amino_acids(self):
import time
from scitbx.math import dihedral_angle
def chirality_delta(sites, volume_ideal, both_signs):
d_01 = sites[1] - sites[0]
d_02 = sites[2] - sites[0]
d_03 = sites[3] - sites[0]
d_02_cross_d_03 = d_02.cross(d_03)
volume_model = d_01.dot(d_02_cross_d_03)
delta_sign = -1;
if both_signs and volume_model < 0:
delta_sign = 1
delta = volume_ideal + delta_sign * volume_model
return delta[0]
data = {
"ARG" : {"dihedral" : ["CD", "NE", "CZ", "NH1"],
"value" : [0, 1],
"pairs" : [["NH1", "NH2"],
["HH11","HH21"], # should this also be periodicty
["HH12","HH22"], # of 1
],
},
"ASP" : {"dihedral" : ["CA", "CB", "CG", "OD1"],
"value" : [0, 1],
"pairs" : [["OD1", "OD2"]],
},
"GLU" : {"dihedral" : ["CB", "CG", "CD", "OE1"],
"value" : [0, 1],
"pairs" : [["OE1", "OE2"]],
},
"PHE" : {"dihedral" : ["CA", "CB", "CG", "CD1"],
"value" : [0, 1],
"pairs" : [["CD1", "CD2"],
["CE1", "CE2"],
["HD1", "HD2"],
["HE1", "HE2"],
],
},
# even less symmetric flips - based on chirals
'VAL' : {'chiral' : ['CB', 'CA', 'CG1', 'CG2'],
'value' : [-2.5, False, 1],
'pairs' : [['CG1', 'CG2'],
['HG11','HG21'],
['HG12','HG22'],
['HG13','HG23'],
],
},
'LEU' : {'chiral' : ['CG', 'CB', 'CD1', 'CD2'],
'value' : [-2.5, False, 1],
'pairs' : [['CD1', 'CD2'],
['HD11','HD21'],
['HD12','HD22'],
['HD13','HD23'],
],
},
}
data["TYR"]=data["PHE"]
sites_cart = self.atoms().extract_xyz()
t0=time.time()
info = ""
for rg in self.residue_groups():
for ag in rg.atom_groups():
flip_data = data.get(ag.resname, None)
if flip_data is None: continue
assert not ('dihedral' in flip_data and 'chiral' in flip_data)
flip_it=False
if 'dihedral' in flip_data:
sites = []
for d in flip_data["dihedral"]:
atom = ag.get_atom(d)
if atom is None: break
sites.append(atom.xyz)
if len(sites)!=4: continue
dihedral = dihedral_angle(sites=sites, deg=True)
if abs(dihedral)>360./flip_data["value"][1]/4:
flip_it=True
elif 'chiral' in flip_data:
sites = []
for d in flip_data["chiral"]:
atom = ag.get_atom(d)
if atom is None: break
sites.append(atom.xyz)
if len(sites)!=4: continue
delta = chirality_delta(sites=[flex.vec3_double([xyz]) for xyz in sites],
volume_ideal=flip_data["value"][0],
both_signs=flip_data['value'][1],
)
if abs(delta)>2.:
flip_it=True
if flip_it:
info += ' Residue "%s %s %s":' % (
rg.parent().id,
ag.resname,
rg.resseq,
)
flips_stored = []
atoms = ag.atoms()
for pair in flip_data["pairs"]:
atom1 = ag.get_atom(pair[0])
atom2 = ag.get_atom(pair[1])
if atom1 is None and atom2 is None: continue
if len(list(filter(None, [atom1, atom2]))) == 1:
flips_stored=[]
info += ' not complete - not flipped'
break
flips_stored.append([atom1,atom2])
for atom1, atom2 in flips_stored:
for attr in ['xyz', 'b']:
tmp = getattr(atom1, attr)
setattr(atom1, attr, getattr(atom2, attr))
setattr(atom2, attr, tmp)
info += ' "%s" <-> "%s"' % (atom1.name.strip(),
atom2.name.strip())
info += '\n'
if not info: info = ' None\n'
info += ' Time to flip residues: %0.2fs\n' % (time.time()-t0)
return info
def distance_based_simple_two_way_bond_sets(self,
fallback_expected_bond_length=1.4,
fallback_search_max_distance=2.5):
from cctbx.crystal import distance_based_connectivity
atoms = self.atoms().deep_copy() # XXX potential bottleneck
atoms.set_chemical_element_simple_if_necessary()
sites_cart = atoms.extract_xyz()
elements = atoms.extract_element()
conformer_indices = self.get_conformer_indices()
return distance_based_connectivity.build_simple_two_way_bond_sets(
sites_cart=sites_cart,
elements=elements,
conformer_indices=conformer_indices,
fallback_expected_bond_length=fallback_expected_bond_length,
fallback_search_max_distance=fallback_search_max_distance)
def reset_i_seq_if_necessary(self):
atoms = self.atoms()
i_seqs = atoms.extract_i_seq()
if (i_seqs.all_eq(0)):
atoms.reset_i_seq()
def get_peptide_c_alpha_selection(self):
"""
Extract atom selection (flex.size_t) for protein C-alpha atoms.
"""
result = flex.size_t()
i_seqs = self.atoms().extract_i_seq()
if(i_seqs.size()>1): assert i_seqs[1:].all_ne(0)
for model in self.models():
for chain in model.chains():
for rg in chain.residue_groups():
for ag in rg.atom_groups():
if(common_residue_names_get_class(ag.resname) == "common_amino_acid"):
for atom in ag.atoms():
if(atom.name.strip() == "CA"):
result.append(atom.i_seq)
return result
def contains_protein(self, min_content=0):
"""
Inspect residue names and counts to determine if enough of them are protein.
"""
oc = self.overall_counts()
n_prot_residues = oc.get_n_residues_of_classes(
classes=['common_amino_acid', 'modified_amino_acid'])
n_water_residues = oc.get_n_residues_of_classes(
classes=['common_water'])
if oc.n_residues-n_water_residues > 0:
return n_prot_residues / (oc.n_residues-n_water_residues) > min_content
return n_prot_residues > min_content
def contains_nucleic_acid(self, min_content=0):
"""
Inspect residue names and counts to determine if enough of
them are RNA or DNA.
"""
oc = self.overall_counts()
n_na_residues = oc.get_n_residues_of_classes(
classes=['common_rna_dna', 'modified_rna_dna'])
n_water_residues = oc.get_n_residues_of_classes(
classes=['common_water'])
if oc.n_residues-n_water_residues > 0:
return n_na_residues / (oc.n_residues-n_water_residues) > min_content
return n_na_residues > min_content
def contains_rna(self):
"""
Inspect residue names and counts to determine if any of
them are RNA.
"""
oc = self.overall_counts()
for resname, count in oc.resnames.items():
if ( common_residue_names_get_class(resname) == "common_rna_dna"
and "D" not in resname.upper() ):
return True
return False
def remove_hd(self, reset_i_seq=False):
"""
Remove all hydrogen/deuterium atoms in-place. Returns the number of atoms
deleted.
"""
n_removed = 0
for pdb_model in self.models():
for pdb_chain in pdb_model.chains():
for pdb_residue_group in pdb_chain.residue_groups():
for pdb_atom_group in pdb_residue_group.atom_groups():
for pdb_atom in pdb_atom_group.atoms():
if (pdb_atom.element.strip().upper() in ["H","D"]):
pdb_atom_group.remove_atom(pdb_atom)
n_removed += 1
if (pdb_atom_group.atoms_size() == 0):
pdb_residue_group.remove_atom_group(pdb_atom_group)
if (pdb_residue_group.atom_groups_size() == 0):
pdb_chain.remove_residue_group(pdb_residue_group)
if (pdb_chain.residue_groups_size() == 0):
pdb_model.remove_chain(pdb_chain)
if (pdb_model.chains_size() == 0):
self.remove_model(pdb_model)
if (reset_i_seq):
self.atoms().reset_i_seq()
return n_removed
def is_ca_only(self):
"""
Determine if hierarchy consists only from CA atoms.
Upgrade options:
- implement threshold for cases where several residues are present in
full;
- figure out how to deal with HETATM records of the same chain.
- Ignore possible incorrect alignment of atom names.
"""
result = True
for model in self.models():
result = result and model.is_ca_only()
return result
bp.inject(ext.model, __hash_eq_mixin)
@bp.inject_into(ext.model)
class _():
"""
Class representing MODEL blocks in a PDB file (or equivalent mmCIF). There
will always be at least one of these in a hierarchy root extracted from a
PDB file even if no MODEL records are present.
Example
-------
>>> hierarchy = iotbx.pdb.hierarchy.root()
>>> model = iotbx.pdb.hierarchy.model(id="1")
>>> hierarchy.append_model(model)
>>> model = hierarchy.only_model()
"""
def residue_groups(self):
for chain in self.chains():
for rg in chain.residue_groups():
yield rg
def atom_groups(self):
for chain in self.chains():
for rg in chain.residue_groups():
for ag in rg.atom_groups():
yield ag
def only_chain(self):
assert self.chains_size() == 1
return self.chains()[0]
def only_residue_group(self):
return self.only_chain().only_residue_group()
def only_conformer(self):
return self.only_chain().only_conformer()
def only_atom_group(self):
return self.only_residue_group().only_atom_group()
def only_residue(self):
return self.only_conformer().only_residue()
def only_atom(self):
return self.only_atom_group().only_atom()
def is_ca_only(self):
"""
Determine if model consists only from CA atoms.
Upgrade options:
- implement threshold for cases where several residues are present in
full;
- figure out how to deal with HETATM records of the same chain.
- Ignore possible incorrect alignment of atom names.
"""
result = True
for chain in self.chains():
result = result and chain.is_ca_only()
return result
bp.inject(ext.chain, __hash_eq_mixin)
@bp.inject_into(ext.chain)
class _():
"""
Class representing a continuous chain of atoms, as defined by the combination
of chain ID field and TER records (or the chain index in mmCIF format). Note
that this does not necessarily correspond to a covalently linked entity, as
it may be used to group various heteroatoms (including water), but
chemically distinct protein or nucleic acid chains will typically be
grouped into exactly one chain object apiece.
"""
def atom_groups(self):
for rg in self.residue_groups():
for ag in rg.atom_groups():
yield ag
def only_residue_group(self):
assert self.residue_groups_size() == 1
return self.residue_groups()[0]
def only_conformer(self):
conformers = self.conformers()
assert len(conformers) == 1
return conformers[0]
def only_atom_group(self):
return self.only_residue_group().only_atom_group()
def only_residue(self):
return self.only_conformer().only_residue()
def only_atom(self):
return self.only_atom_group().only_atom()
def residues(self):
return self.only_conformer().residues()
def occupancy_groups_simple(self, common_residue_name_class_only=None,
always_group_adjacent=True):
result = []
residue_groups = self.residue_groups()
n_rg = len(residue_groups)
done = [False] * n_rg
def process_range(i_begin, i_end):
isolated_var_occ = []
groups = {}
for i_rg in range(i_begin, i_end):
done[i_rg] = True
rg = residue_groups[i_rg]
for ag in residue_groups[i_rg].atom_groups():
altloc = ag.altloc
if (altloc == ""):
for atom in ag.atoms():
if (atom.tmp < 0): continue
if (atom.occ > 0 and atom.occ < 1):
isolated_var_occ.append(atom.tmp)
else:
group = []
for atom in ag.atoms():
if (atom.tmp < 0): continue
group.append(atom.tmp)
if (len(group) != 0):
groups.setdefault(altloc, []).extend(group)
groups = list(groups.values())
if (len(groups) != 0):
for group in groups: group.sort()
groups.sort(key=operator.itemgetter(0))
result.append(groups)
for i in isolated_var_occ:
result.append([[i]])
for i_begin,i_end in self.find_pure_altloc_ranges(
common_residue_name_class_only=common_residue_name_class_only):
# use always_group_adjacent
do_this_step = True
nc = None
for i_rg in range(i_begin, i_end):
rg = residue_groups[i_rg]
n_conf = len(residue_groups[i_rg].conformers())
if(nc is None): nc = n_conf
else:
if(nc != n_conf):
do_this_step = False
#
if(always_group_adjacent):
process_range(i_begin, i_end)
else:
if(do_this_step):
process_range(i_begin, i_end)
for i_rg in range(n_rg):
if (done[i_rg]): continue
process_range(i_rg, i_rg+1)
result.sort(key=lambda element: element[0][0])
return result
def get_residue_names_and_classes(self):
"""
Extract the residue names and counts of each residue type (protein,
nucleic acid, etc) within the chain.
:returns: a tuple containing a list of residue names, and a dictionary of
residue type frequencies.
"""
from iotbx.pdb import residue_name_plus_atom_names_interpreter
rn_seq = []
residue_classes = dict_with_default_0()
for residue_group in self.residue_groups():
# XXX should we iterate over all atom_groups or just take the first one?
#for atom_group in residue_group.atom_groups():
atom_group = residue_group.atom_groups()[0]
rnpani = residue_name_plus_atom_names_interpreter(
residue_name=atom_group.resname,
atom_names=[atom.name for atom in atom_group.atoms()])
rn = rnpani.work_residue_name
rn_seq.append(rn)
if (rn is None):
c = None
else:
c = common_residue_names_get_class(name=rn)
residue_classes[c] += 1
return (rn_seq, residue_classes)
def as_sequence(self, substitute_unknown='X'):
"""
Naively extract single-character protein or nucleic acid sequence, without
accounting for residue numbering.
:param substitute_unknown: character to use for unrecognized 3-letter codes
"""
assert ((isinstance(substitute_unknown, str)) and
(len(substitute_unknown) == 1))
common_rna_dna_codes = {
"A": "A",
"C": "C",
"G": "G",
"U": "U",
"DA": "A",
"DC": "C",
"DG": "G",
"DT": "T"}
rn_seq, residue_classes = self.get_residue_names_and_classes()
n_aa = residue_classes["common_amino_acid"] + residue_classes["modified_amino_acid"]
n_na = residue_classes["common_rna_dna"] + residue_classes["modified_rna_dna"]
seq = []
if (n_aa > n_na):
aa_3_as_1 = one_letter_given_three_letter
for rn in rn_seq:
if (rn in aa_3_as_1_mod):
seq.append(aa_3_as_1_mod.get(rn, substitute_unknown))
else :
seq.append(aa_3_as_1.get(rn, substitute_unknown))
elif (n_na != 0):
for rn in rn_seq:
if rn not in common_rna_dna_codes and rn in na_3_as_1_mod:
rn = na_3_as_1_mod.get(rn, "N")
seq.append(common_rna_dna_codes.get(rn, "N"))
return seq
def _residue_is_aa_or_na(self, residue_name, include_modified=True):
"""
Helper function for checking if a residue is an amino acid or
nucleic acid
Parameters
----------
residue_name: str
The residue name
include_modified: bool
If set, include modified amino and nucleic acids
Returns
-------
bool
True if the residue is an amino or nucleic acid, false otherwise
"""
residue_class = common_residue_names_get_class(residue_name)
acceptable_classes = ['common_amino_acid', 'common_rna_dna']
if include_modified:
acceptable_classes += ['d_amino_acid', 'modified_amino_acid', 'modified_rna_dna']
return residue_class in acceptable_classes
def as_padded_sequence(self, missing_char='X', skip_insertions=False,
pad=True, substitute_unknown='X', pad_at_start=True,
ignore_hetatm=False):
"""
Extract protein or nucleic acid sequence, taking residue numbering into
account so that apparent gaps will be filled with substitute characters.
"""
seq = self.as_sequence()
padded_seq = []
last_resseq = 0
last_icode = " "
i = 0
for i, residue_group in enumerate(self.residue_groups()):
if (skip_insertions) and (residue_group.icode != " "):
continue
if ignore_hetatm and not self._residue_is_aa_or_na(residue_group.unique_resnames()[0]):
continue
resseq = residue_group.resseq_as_int()
if (pad) and (resseq > (last_resseq + 1)):
for x in range(resseq - last_resseq - 1):
if last_resseq == 0 and not pad_at_start: break
padded_seq.append(missing_char)
last_resseq = resseq
padded_seq.append(seq[i])
return "".join(padded_seq)
def get_residue_ids(self, skip_insertions=False, pad=True, pad_at_start=True,
ignore_hetatm=False):
resids = []
last_resseq = 0
last_icode = " "
for i, residue_group in enumerate(self.residue_groups()):
if (skip_insertions) and (residue_group.icode != " "):
continue
if ignore_hetatm and not self._residue_is_aa_or_na(residue_group.unique_resnames()[0]):
continue
resseq = residue_group.resseq_as_int()
if (pad) and (resseq > (last_resseq + 1)):
for x in range(resseq - last_resseq - 1):
if last_resseq == 0 and not pad_at_start: break
resids.append(None)
last_resseq = resseq
resids.append(residue_group.resid())
return resids
def get_residue_names_padded(
self, skip_insertions=False, pad=True, pad_at_start=True,
ignore_hetatm=False):
resnames = []
last_resseq = 0
last_icode = " "
for i, residue_group in enumerate(self.residue_groups()):
if (skip_insertions) and (residue_group.icode != " "):
continue
if ignore_hetatm and not self._residue_is_aa_or_na(residue_group.unique_resnames()[0]):
continue
resseq = residue_group.resseq_as_int()
if (pad) and (resseq > (last_resseq + 1)):
for x in range(resseq - last_resseq - 1):
if last_resseq == 0 and not pad_at_start: break
resnames.append(None)
last_resseq = resseq
resnames.append(residue_group.unique_resnames()[0])
return resnames
def is_protein(self, min_content=0.8, ignore_water=True):
"""
Determine whether the chain represents an amino acid polymer, based on the
frequency of residue names.
Very slow due to usage of residue_name_plus_atom_names_interpreter in
get_residue_names_and_classes (majority of the processing is unnecessary)
"""
rn_seq, residue_classes = self.get_residue_names_and_classes()
n_aa = residue_classes["common_amino_acid"] + residue_classes['modified_amino_acid']
n_na = residue_classes["common_rna_dna"] + residue_classes['modified_rna_dna']
if (ignore_water):
while rn_seq.count("HOH") > 0 :
rn_seq.remove("HOH")
if (len(rn_seq) == 0):
return False
elif ((n_aa > n_na) and ((n_aa / len(rn_seq)) >= min_content)):
return True
elif (rn_seq == (["UNK"] * len(rn_seq))):
return True
return False
def is_na(self, min_content=0.8, ignore_water=True):
"""
Determine whether the chain represents a nucleic acid polymer, based on the
frequency of base names.
Very slow due to usage of residue_name_plus_atom_names_interpreter in
get_residue_names_and_classes (majority of the processing is unnecessary)
"""
rn_seq, residue_classes = self.get_residue_names_and_classes()
n_aa = residue_classes["common_amino_acid"] + residue_classes['modified_amino_acid']
n_na = residue_classes["common_rna_dna"] + residue_classes['modified_rna_dna']
if (ignore_water):
while rn_seq.count("HOH") > 0 :
rn_seq.remove("HOH")
if (len(rn_seq) == 0):
return False
elif ((n_na > n_aa) and ((n_na / len(rn_seq)) >= min_content)):
return True
return False
def is_ca_only(self):
"""
Determine if chain consists only from CA atoms.
Upgrade options:
- implement threshold for cases where several residues are present in
full;
- figure out how to deal with HETATM records of the same chain.
- Ignore possible incorrect alignment of atom names.
"""
atom_names = self.atoms().extract_name()
return atom_names.all_eq(" CA ")
bp.inject(ext.residue_group, __hash_eq_mixin)
@bp.inject_into(ext.residue_group)
class _():
def only_atom_group(self):
assert self.atom_groups_size() == 1
return self.atom_groups()[0]
def only_atom(self):
return self.only_atom_group().only_atom()
def id_str(self):
chain_id = ""
chain = self.parent()
if (chain is not None):
chain_id = chain.id
return "%2s%4s%1s" % (chain_id, self.resseq, self.icode)
bp.inject(ext.atom_group, __hash_eq_mixin)
@bp.inject_into(ext.atom_group)
class _():
def only_atom(self):
assert self.atoms_size() == 1
return self.atoms()[0]
# FIXME suppress_segid has no effect here
def id_str(self, suppress_segid=None):
chain_id = ""
resid = ""
rg = self.parent()
if (rg is not None):
resid = rg.resid()
chain = rg.parent()
if (chain is not None):
chain_id = chain.id
return "%1s%3s%2s%5s" % (self.altloc, self.resname, chain_id, resid)
def occupancy(self, raise_error_if_non_uniform=False):
"""
Calculate the mean occupancy for atoms in this group, with option of
raising ValueError if they differ.
"""
atom_occupancies = self.atoms().extract_occ()
assert (len(atom_occupancies) > 0)
min_max_mean = atom_occupancies.min_max_mean()
if (min_max_mean.min != min_max_mean.max):
if (raise_error_if_non_uniform):
raise ValueError(("Non-uniform occupancies for atom group %s "+
"(range: %.2f - %.2f).") % (self.id_str(), min_max_mean.min,
min_max_mean.max))
return min_max_mean.mean
bp.inject(ext.atom, __hash_eq_mixin)
@bp.inject_into(ext.atom)
class _():
__doc__ = """
The basic unit of the PDB hierarchy (or the PDB input object in general),
representing a single point scatterer corresponding to an ATOM or HETATM
record in PDB format (plus associated ANISOU or related records if present).
Note that this does not directly store attributes of higher-level entities
whose identity is also recorded in ATOM records, such as the chain ID or
residue name. These may be retrieved either by walking up the hierarchy
starting with atom.parent(), or by calling atom.fetch_labels().
"""
def chain(self):
"""
Convenience method for fetching the chain object associated with this
atom (or None of not defined).
"""
ag = self.parent()
if (ag is not None):
rg = ag.parent()
if (rg is not None):
return rg.parent()
return None
def is_in_same_conformer_as(self, other):
"""
Indicate whether two atoms are part of the same conformer and thus are
capable of interacting directly, as defined by the parent atom_group and
model object(s).
"""
ag_i = self.parent(optional=False)
ag_j = other.parent(optional=False)
altloc_i = ag_i.altloc
altloc_j = ag_j.altloc
if ( len(altloc_i) != 0
and len(altloc_j) != 0
and altloc_i != altloc_j):
return False
def p3(ag):
return ag.parent(optional=False) \
.parent(optional=False) \
.parent(optional=False)
model_i = p3(ag_i)
model_j = p3(ag_j)
return model_i.memory_id() == model_j.memory_id()
def set_element_and_charge_from_scattering_type_if_necessary(self,
scattering_type):
from cctbx.eltbx.xray_scattering \
import get_element_and_charge_symbols \
as gec
sct_e, sct_c = gec(scattering_type=scattering_type, exact=False)
pdb_ec = self.element.strip() + self.charge.strip()
if (len(pdb_ec) != 0):
if (sct_e == "" and sct_c == ""):
return False
pdb_e, pdb_c = gec(scattering_type=pdb_ec, exact=False)
if ( pdb_e == sct_e
and pdb_c == sct_c):
return False
self.element = "%2s" % sct_e.upper()
self.charge = "%-2s" % sct_c
return True
def charge_as_int(self):
"""
Extract the atomic charge from the (string) charge field.
:returns: Python int, defaulting to zero
"""
charge = self.charge_tidy()
if charge is None:
return 0
if charge.endswith("-"):
sign = -1
else:
sign = 1
charge = charge.strip(" -+")
if charge != "":
return sign * int(charge)
else:
return 0
@bp.inject_into(ext.conformer)
class _():
__doc__ = """
Alternate view into a chain object, grouping sequential residues with
equivalent altlocs. As a general rule it is preferrable to iterate over
chain.residue_groups() instead.
"""
def only_residue(self):
residues = self.residues()
assert len(residues) == 1
return residues[0]
def only_atom(self):
return self.only_residue().only_atom()
def get_residue_names_and_classes(self):
# XXX This function should probably be deprecated, since it has been
# duplicated in chain.get_residue_names_and_classes which should probably
# be preferred to this function
rn_seq = []
residue_classes = dict_with_default_0()
for residue in self.residues():
rnpani = residue.residue_name_plus_atom_names_interpreter()
rn = rnpani.work_residue_name
rn_seq.append(rn)
if (rn is None):
c = None
else:
c = common_residue_names_get_class(name=rn)
residue_classes[c] += 1
return (rn_seq, residue_classes)
def is_protein(self, min_content=0.8):
# XXX DEPRECATED
# Used only in mmtbx/validation and wxtbx. Easy to eliminate.
rn_seq, residue_classes = self.get_residue_names_and_classes()
n_aa = residue_classes["common_amino_acid"] + residue_classes['modified_amino_acid']
n_na = residue_classes["common_rna_dna"] + residue_classes['modified_rna_dna']
non_water = len(rn_seq)-residue_classes.get('common_water', 0)
if ((n_aa > n_na) and ((n_aa / non_water) >= min_content)):
return True
return False
def is_na(self, min_content=0.8):
# XXX DEPRECATED
# Used only in mmtbx/validation and wxtbx. Easy to eliminate.
rn_seq, residue_classes = self.get_residue_names_and_classes()
n_aa = residue_classes["common_amino_acid"] + residue_classes['modified_amino_acid']
n_na = residue_classes["common_rna_dna"] + residue_classes['modified_rna_dna']
non_water = len(rn_seq)-residue_classes.get('common_water', 0)
if ((n_na > n_aa) and ((n_na / non_water) >= min_content)):
return True
return False
def as_sequence(self, substitute_unknown='X'):
# XXX This function should probably be deprecated, since it has been
# duplicated in chain.as_sequence which should probably be preferred to
# this function
assert ((isinstance(substitute_unknown, str)) and
(len(substitute_unknown) == 1))
common_rna_dna_codes = {
"A": "A",
"C": "C",
"G": "G",
"U": "U",
"DA": "A",
"DC": "C",
"DG": "G",
"DT": "T"}
rn_seq, residue_classes = self.get_residue_names_and_classes()
n_aa = residue_classes["common_amino_acid"] + residue_classes["modified_amino_acid"]
n_na = residue_classes["common_rna_dna"] + residue_classes["modified_rna_dna"]
seq = []
if (n_aa > n_na):
aa_3_as_1 = one_letter_given_three_letter
for rn in rn_seq:
if (rn in aa_3_as_1_mod):
seq.append(aa_3_as_1_mod.get(rn, substitute_unknown))
else :
seq.append(aa_3_as_1.get(rn, substitute_unknown))
elif (n_na != 0):
for rn in rn_seq:
if rn not in common_rna_dna_codes and rn in na_3_as_1_mod:
rn = na_3_as_1_mod.get(rn, "N")
seq.append(common_rna_dna_codes.get(rn, "N"))
return seq
def format_fasta(self, max_line_length=79):
seq = self.as_sequence()
n = len(seq)
if (n == 0): return None
comment = [">"]
p = self.parent()
if (p is not None):
comment.append('chain "%2s"' % p.id)
comment.append('conformer "%s"' % self.altloc)
result = [" ".join(comment)]
i = 0
while True:
j = min(n, i+max_line_length)
if (j == i): break
result.append("".join(seq[i:j]))
i = j
return result
def as_padded_sequence(self, missing_char='X', skip_insertions=False,
pad=True, substitute_unknown='X', pad_at_start=True):
# XXX This function should probably be deprecated, since it has been
# duplicated in chain.as_padded_sequence which should probably be preferred
# to this function
seq = self.as_sequence()
padded_seq = []
last_resseq = 0
last_icode = " "
i = 0
for i, residue in enumerate(self.residues()):
if (skip_insertions) and (residue.icode != " "):
continue
resseq = residue.resseq_as_int()
if (pad) and (resseq > (last_resseq + 1)):
for x in range(resseq - last_resseq - 1):
if last_resseq == 0 and not pad_at_start: break
padded_seq.append(missing_char)
last_resseq = resseq
padded_seq.append(seq[i])
return "".join(padded_seq)
def as_sec_str_sequence(self, helix_sele, sheet_sele, missing_char='X',
pad=True, pad_at_start=True):
ss_seq = []
last_resseq = 0
for i, residue in enumerate(self.residues()):
resseq = residue.resseq_as_int()
if pad and resseq > (last_resseq + 1):
for x in range(resseq - last_resseq - 1):
if last_resseq == 0 and not pad_at_start: break
ss_seq.append(missing_char)
found = False
for atom in residue.atoms():
if helix_sele[atom.i_seq] :
ss_seq.append('H')
found = True
break
elif sheet_sele[atom.i_seq] :
ss_seq.append('S')
found = True
break
if not found :
ss_seq.append('L')
last_resseq = resseq
return "".join(ss_seq)
def get_residue_ids(self, skip_insertions=False, pad=True, pad_at_start=True):
# XXX This function should probably be deprecated, since it has been
# duplicated in chain.get_residue_ids which should probably be preferred
# to this function
resids = []
last_resseq = 0
last_icode = " "
for i, residue in enumerate(self.residues()):
if (skip_insertions) and (residue.icode != " "):
continue
resseq = residue.resseq_as_int()
if (pad) and (resseq > (last_resseq + 1)):
for x in range(resseq - last_resseq - 1):
if last_resseq == 0 and not pad_at_start: break
resids.append(None)
last_resseq = resseq
resids.append(residue.resid())
return resids
def get_residue_names_padded(
self, skip_insertions=False, pad=True, pad_at_start=True):
# XXX This function should probably be deprecated, since it has been
# duplicated in chain.get_residue_names_padded which should probably be
# preferred to this function
resnames = []
last_resseq = 0
last_icode = " "
for i, residue in enumerate(self.residues()):
if (skip_insertions) and (residue.icode != " "):
continue
resseq = residue.resseq_as_int()
if (pad) and (resseq > (last_resseq + 1)):
for x in range(resseq - last_resseq - 1):
if last_resseq == 0 and not pad_at_start: break
resnames.append(None)
last_resseq = resseq
resnames.append(residue.resname)
return resnames
@bp.inject_into(ext.residue)
class _():
def __getinitargs__(self):
result_root = self.root()
if (result_root is None):
orig_conformer = self.parent()
assert orig_conformer is not None
orig_chain = orig_conformer.parent()
assert orig_chain is not None
orig_model = orig_chain.parent()
assert orig_model is not None
result_atom_group = atom_group(
altloc=orig_conformer.altloc, resname=self.resname)
result_residue_group = residue_group(
resseq=self.resseq, icode=self.icode)
result_chain = chain(id=orig_chain.id)
result_model = model(id=orig_model.id)
result_root = root()
result_root.append_model(result_model)
result_model.append_chain(result_chain)
result_chain.append_residue_group(result_residue_group)
result_residue_group.append_atom_group(result_atom_group)
for atom in self.atoms():
result_atom_group.append_atom(atom.detached_copy())
return (result_root,)
def standalone_copy(self):
return residue(root=self.__getinitargs__()[0])
def only_atom(self):
assert self.atoms_size() == 1
return self.atoms()[0]
def residue_name_plus_atom_names_interpreter(self,
translate_cns_dna_rna_residue_names=None,
return_mon_lib_dna_name=False):
from iotbx.pdb import residue_name_plus_atom_names_interpreter
return residue_name_plus_atom_names_interpreter(
residue_name=self.resname,
atom_names=[atom.name for atom in self.atoms()],
translate_cns_dna_rna_residue_names=translate_cns_dna_rna_residue_names,
return_mon_lib_dna_name=return_mon_lib_dna_name)
@bp.inject_into(ext.atom_with_labels)
class _():
__doc__ = """
Stand-in for atom object, which explicitly records the attributes normally
reserved for parent classes such as residue name, chain ID, etc.
"""
def __getstate__(self):
labels_dict = {}
for attr in [ "xyz", "sigxyz", "occ", "sigocc", "b", "sigb", "uij",
"siguij", "hetero", "serial", "name", "segid", "element",
"charge", "model_id", "chain_id", "resseq", "icode",
"altloc", "resname", ] :
labels_dict[attr] = getattr(self, attr, None)
return labels_dict
def __setstate__(self, state):
from iotbx.pdb import make_atom_with_labels
state = dict(state)
make_atom_with_labels(self, **state)
def fetch_labels(self):
return self
# MARKED_FOR_DELETION_OLEG
# Reason: so far fount only in iotbx/file_reader.py for no clear reason.
class input_hierarchy_pair(object):
def __init__(self,
input,
hierarchy=None,
sort_atoms=False,
):
self.input = input
if (hierarchy is None):
hierarchy = self.input.construct_hierarchy(
set_atom_i_seq=True, sort_atoms=sort_atoms)
self.hierarchy = hierarchy
def __getinitargs__(self):
from pickle import PicklingError
raise PicklingError
def hierarchy_to_input_atom_permutation(self):
"""
Return the permutation selection
(:py:class:`scitbx.array_family.flex.size_t`) mapping the atoms as ordered
by the hierarchy to their original positions in the PDB/mmCIF file.
"""
h_atoms = self.hierarchy.atoms()
sentinel = h_atoms.reset_tmp(first_value=0, increment=1)
return self.input.atoms().extract_tmp_as_size_t()
def input_to_hierarchy_atom_permutation(self):
"""
Return the permutation selection
(:py:class:`scitbx.array_family.flex.size_t`) mapping the atoms as ordered
in the original PDB/mmCIF file to their positions in the hierarchy.
"""
i_atoms = self.input.atoms()
sentinel = i_atoms.reset_tmp(first_value=0, increment=1)
return self.hierarchy.atoms().extract_tmp_as_size_t()
def xray_structure_simple(self, *args, **kwds):
"""
Wrapper for the equivalent method of the input object - extracts the
:py:class:`cctbx.xray.structure` with scatterers in the same order as in
the hierarchy.
"""
perm = self.input_to_hierarchy_atom_permutation()
xrs = self.input.xray_structure_simple(*args, **kwds)
return xrs.select(perm)
def construct_hierarchy(self, *args, **kwds) : # TODO remove eventually
"""
Returns a reference to the existing hierarchy. For backwards compatibility
only, and issues a :py:class:`warnings.DeprecationWarning`.
"""
warnings.warn("Please access input.hierarchy directly.",
DeprecationWarning)
return self.hierarchy
def crystal_symmetry(self, *args, **kwds):
return self.input.crystal_symmetry(*args, **kwds)
class input(input_hierarchy_pair):
"""
Class used for reading a PDB hierarchy from a file or string.
Attributes
----------
input : iotbx.pdb.pdb_input_from_any
hierarchy : iotbx.pdb.hierarchy.root
Examples
--------
>>> import iotbx.pdb.hierarchy
>>> pdb_in = iotbx.pdb.hierarchy.input(pdb_string='''
... ATOM 1 N ASP A 37 10.710 14.456 9.568 1.00 15.78 N
... ATOM 2 CA ASP A 37 9.318 14.587 9.999 1.00 18.38 C
... ''')
>>> print pdb_in.hierarchy.atoms_size()
2
"")
"""
def __init__(self, file_name=None,
pdb_string=None, source_info=Auto, sort_atoms=True):
"""
Initializes an input from a file or string.
Parameters
----------
file_name : str, optional
pdb_string : str, optional
source_info : str, optional
Indicates where this PDB came from (i.e. "string")
"""
assert [file_name, pdb_string].count(None) == 1
import iotbx.pdb
if (file_name is not None):
assert source_info is Auto
pdb_inp = iotbx.pdb.input(file_name=file_name)
else:
if (source_info is Auto): source_info = "string"
pdb_inp = iotbx.pdb.input(
source_info=source_info, lines=flex.split_lines(pdb_string))
super(input, self).__init__(input=pdb_inp, sort_atoms=sort_atoms)
# END_MARKED_FOR_DELETION_OLEG
class show_summary(input):
def __init__(self,
file_name=None,
pdb_string=None,
out=None,
prefix="",
flag_errors=True,
flag_warnings=True,
residue_groups_max_show=10,
duplicate_atom_labels_max_show=10,
level_id=None,
level_id_exception=ValueError):
input.__init__(self, file_name=file_name, pdb_string=pdb_string)
print(prefix+self.input.source_info(), file=out)
self.overall_counts = self.hierarchy.overall_counts()
self.overall_counts.show(
out=out,
prefix=prefix+" ",
residue_groups_max_show=residue_groups_max_show,
duplicate_atom_labels_max_show=duplicate_atom_labels_max_show)
if (level_id is not None):
self.hierarchy.show(
out=out,
prefix=prefix+" ",
level_id=level_id,
level_id_exception=level_id_exception)
# MARKED_FOR_DELETION_OLEG
# Reason: functionality is moved to mmtbx.model and uses better all_chain_ids
# function from iotbx.pdb.utils
# Not until used in iotbx/pdb/__init__py: join_fragment_files:
# GUI app: Combine PDB files
# CL app: iotbx.pdb.join_fragment_files
def suffixes_for_chain_ids(suffixes=Auto):
if (suffixes is Auto):
suffixes="123456789" \
"ABCDEFGHIJKLMNOPQRSTUVWXYZ" \
"abcdefghijklmnopqrstuvwxyz"
return suffixes
def append_chain_id_suffixes(roots, suffixes=Auto):
suffixes = suffixes_for_chain_ids(suffixes=suffixes)
assert len(roots) <= len(suffixes)
for root,suffix in zip(roots, suffixes):
for model in root.models():
for chain in model.chains():
assert len(chain.id) == 1, len(chain.id)
chain.id += suffix
def join_roots(roots, chain_id_suffixes=Auto):
"""
Combine two root objects.
"""
if (chain_id_suffixes is not None):
append_chain_id_suffixes(roots=roots, suffixes=chain_id_suffixes)
result = root()
for rt in roots:
result.transfer_chains_from_other(other=rt)
return result
# END_MARKED_FOR_DELETION_OLEG
# XXX: Nat's utility functions
# also used in ncs_search.py
def new_hierarchy_from_chain(chain):
"""
Given a chain object, create an entirely new hierarchy object contaning only
this chain (using a new copy).
"""
import iotbx.pdb.hierarchy
hierarchy = iotbx.pdb.hierarchy.root()
model = iotbx.pdb.hierarchy.model()
model.append_chain(chain.detached_copy())
hierarchy.append_model(model)
return hierarchy
def find_and_replace_chains(original_hierarchy, partial_hierarchy,
log=sys.stdout):
"""
Delete and replace the first chain in the original hierarchy corresponding
to each model/ID combination in the partial hierarchy. Note that this means
that if waters and heteroatoms are given the same ID as a protein chain
(separated by other chains or TER record(s)), but the partial hierarchy only
contains a substitute protein chain, the heteroatom chain will be kept.
"""
for original_model in original_hierarchy.models():
for partial_model in partial_hierarchy.models():
if original_model.id == partial_model.id :
#print >> log, " found model '%s'" % partial_model.id
i = 0
while i < len(original_model.chains()):
original_chain = original_model.chains()[i]
j = 0
while j < len(partial_model.chains()):
partial_chain = partial_model.chains()[j]
if original_chain.id == partial_chain.id :
#print >> log, " found chain '%s' at index %d" % (
# partial_chain.id, i)
original_model.remove_chain(i)
original_model.insert_chain(i, partial_chain.detached_copy())
partial_model.remove_chain(j)
break
j += 1
i += 1
def get_contiguous_ranges(hierarchy):
assert (len(hierarchy.models()) == 1)
chain_clauses = []
for chain in hierarchy.models()[0].chains():
resid_ranges = []
start_resid = None
last_resid = None
last_resseq = - sys.maxsize
for residue_group in chain.residue_groups():
resseq = residue_group.resseq_as_int()
resid = residue_group.resid()
if (resseq != last_resseq) and (resseq != (last_resseq + 1)):
if (start_resid is not None):
resid_ranges.append((start_resid, last_resid))
start_resid = resid
last_resid = resid
else :
if (start_resid is None):
start_resid = resid
last_resid = resid
last_resseq = resseq
if (start_resid is not None):
resid_ranges.append((start_resid, last_resid))
resid_clauses = []
for r1, r2 in resid_ranges :
if (r1 == r2):
resid_clauses.append("resid %s" % r1)
else :
resid_clauses.append("resid %s through %s" % (r1,r2))
sele = ("chain '%s' and ((" + ") or (".join(resid_clauses) + "))") % \
chain.id
chain_clauses.append(sele)
return chain_clauses
# used for reporting build results in phenix
def get_residue_and_fragment_count(pdb_file=None, pdb_hierarchy=None):
from libtbx import smart_open
if (pdb_file is not None):
raw_records = flex.std_string()
with smart_open.for_reading(file_name=pdb_file) as f:
lines = f.read()
raw_records.extend(flex.split_lines(lines))
pdb_in = iotbx.pdb.input(source_info=pdb_file, lines=raw_records)
pdb_hierarchy = pdb_in.construct_hierarchy()
assert (pdb_hierarchy is not None)
models = pdb_hierarchy.models()
if len(models) == 0 :
return (0, 0, 0)
chains = models[0].chains()
if len(chains) == 0 :
return (0, 0, 0)
n_res = 0
n_frag = 0
n_h2o = 0
for chain in chains :
i = -999
for res in chain.conformers()[0].residues():
residue_type = common_residue_names_get_class(
res.resname, consider_ccp4_mon_lib_rna_dna=True)
if ( ('amino_acid' in residue_type) or ('rna_dna' in residue_type) ):
n_res += 1
resseq = res.resseq_as_int()
if resseq > (i + 1):
n_frag += 1
i = resseq
elif ('water' in residue_type):
n_h2o += 1
return (n_res, n_frag, n_h2o)
def sites_diff(hierarchy_1,
hierarchy_2,
exclude_waters=True,
return_hierarchy=True,
log=None):
"""
Given two PDB hierarchies, calculate the shift of each atom (accounting for
possible insertions/deletions) and (optionally) apply it to the B-factor for
display in PyMOL, plotting in PHENIX GUI, etc.
"""
if (log is None) : log = null_out()
atom_lookup = {}
deltas = flex.double(hierarchy_2.atoms_size(), -1.)
for atom in hierarchy_1.atoms_with_labels():
if (atom.resname in ["HOH", "WAT"]) and (exclude_waters):
continue
atom_id = atom.id_str()
if (atom_id in atom_lookup):
raise RuntimeError("Duplicate atom ID - can't extract coordinates.")
atom_lookup[atom_id] = atom.xyz
for i_seq, atom in enumerate(hierarchy_2.atoms_with_labels()):
if (atom.resname in ["HOH", "WAT"]) and (exclude_waters):
continue
atom_id = atom.id_str()
if (atom_id in atom_lookup):
x1,y1,z1 = atom_lookup[atom_id]
x2,y2,z2 = atom.xyz
delta = math.sqrt((x2-x1)**2 + (y2-y1)**2 + (z2-z1)**2)
deltas[i_seq] = delta
if (return_hierarchy):
hierarchy_new = hierarchy_2.deep_copy()
hierarchy_new.atoms().set_b(deltas)
return hierarchy_new
else :
return deltas
def substitute_atom_group(
current_group,
new_group):
"""
Substitute sidechain atoms from one residue for another, using
least-squares superposition to align the backbone atoms.
Limited functionality:
1) Amino-acids only, 2) side chain atoms only.
"""
from scitbx.math import superpose
new_atoms = new_group.detached_copy().atoms()
selection_fixed = flex.size_t()
selection_moving = flex.size_t()
res_class = common_residue_names_get_class(current_group.resname)
if(res_class != "common_amino_acid"):
raise Sorry("Only common amino-acid residues supported.")
aa_backbone_atoms_1 = [" CA ", " C ", " N ", " O "]
aa_backbone_atoms_2 = [" CA ", " C ", " N ", " CB "]
aa_backbone_atoms_1.sort()
aa_backbone_atoms_2.sort()
#
def get_bb_atoms(current_group, aa_backbone_atoms):
result = []
for atom in current_group.atoms():
if(atom.name in aa_backbone_atoms_1):
result.append(atom.name)
result.sort()
return result
aa_backbone_atoms_current = get_bb_atoms(current_group, aa_backbone_atoms_1)
aa_backbone_atoms_new = get_bb_atoms(new_group, aa_backbone_atoms_1)
if(aa_backbone_atoms_current != aa_backbone_atoms_1 or
aa_backbone_atoms_new != aa_backbone_atoms_1):
outl = ''
for atom in current_group.atoms():
outl += '\n%s' % atom.quote()
raise Sorry("Main chain must be complete. %s" % outl)
#
for i_seq, atom in enumerate(current_group.atoms()):
if(not atom.name in aa_backbone_atoms_2): continue
for j_seq, other_atom in enumerate(new_group.atoms()):
if(atom.name == other_atom.name):
selection_fixed.append(i_seq)
selection_moving.append(j_seq)
sites_fixed = current_group.atoms().extract_xyz().select(selection_fixed)
sites_moving = new_atoms.extract_xyz().select(selection_moving)
assert sites_fixed.size() == sites_moving.size()
lsq_fit = superpose.least_squares_fit(
reference_sites = sites_fixed,
other_sites = sites_moving)
sites_new = new_atoms.extract_xyz()
sites_new = lsq_fit.r.elems * sites_new + lsq_fit.t.elems
new_atoms.set_xyz(sites_new)
atom_b_iso = {}
atom_occ = {}
mean_b = flex.mean(current_group.atoms().extract_b())
for atom in current_group.atoms():
if(not atom.name in aa_backbone_atoms_1):
current_group.remove_atom(atom)
atom_b_iso[atom.name] = atom.b
atom_occ[atom.name] = atom.occ
for atom in new_atoms:
if(not atom.name in aa_backbone_atoms_1):
if(atom.name in atom_b_iso): atom.b = atom_b_iso[atom.name]
else: atom.b = mean_b
if(atom.name in atom_occ): atom.occ = atom_occ[atom.name]
else: atom.occ = 1.
current_group.append_atom(atom)
current_group.resname = new_group.resname
return current_group
|
the-stack_0_6808 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 1999-2018 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from numpy.linalg import LinAlgError
from ... import opcodes as OperandDef
from ...serialize import KeyField, StringField
from ...core import ExecutableTuple
from ..array_utils import device, as_same_device
from ..datasource import tensor as astensor
from ..operands import TensorHasInput, TensorOperandMixin
from ..core import TensorOrder
from .core import SFQR, TSQR
class TensorQR(TensorHasInput, TensorOperandMixin):
_op_type_ = OperandDef.QR
_input = KeyField('input')
_method = StringField('method')
def __init__(self, method=None, dtype=None, **kw):
super(TensorQR, self).__init__(_method=method, _dtype=dtype, **kw)
@property
def method(self):
return self._method
@property
def output_limit(self):
return 2
def _set_inputs(self, inputs):
super(TensorQR, self)._set_inputs(inputs)
self._input = self._inputs[0]
def __call__(self, a):
a = astensor(a)
if a.ndim != 2:
raise LinAlgError('{0}-dimensional tensor given. '
'Tensor must be two-dimensional'.format(a.ndim))
tiny_q, tiny_r = np.linalg.qr(np.ones((1, 1), dtype=a.dtype))
x, y = a.shape
q_shape, r_shape = (a.shape, (y, y)) if x > y else ((x, x), a.shape)
q, r = self.new_tensors([a],
kws=[{'side': 'q', 'dtype': tiny_q.dtype,
'shape': q_shape, 'order': TensorOrder.C_ORDER},
{'side': 'r', 'dtype': tiny_r.dtype,
'shape': r_shape, 'order': TensorOrder.C_ORDER}])
return ExecutableTuple([q, r])
@classmethod
def tile(cls, op):
q, r = op.outputs
q_dtype, r_dtype = q.dtype, r.dtype
q_shape, r_shape = q.shape, r.shape
in_tensor = op.input
if in_tensor.chunk_shape == (1, 1):
in_chunk = in_tensor.chunks[0]
chunk_op = op.copy().reset_key()
qr_chunks = chunk_op.new_chunks([in_chunk], shape=(q_shape, r_shape), index=in_chunk.index,
kws=[{'side': 'q'}, {'side': 'r'}])
q_chunk, r_chunk = qr_chunks
new_op = op.copy()
kws = [
{'chunks': [q_chunk], 'nsplits': ((q_shape[0],), (q_shape[1],)),
'dtype': q_dtype, 'shape': q_shape, 'order': q.order},
{'chunks': [r_chunk], 'nsplits': ((r_shape[0],), (r_shape[1],)),
'dtype': r_dtype, 'shape': r_shape, 'order': r.order}
]
return new_op.new_tensors(op.inputs, kws=kws)
elif op.method == 'tsqr':
return TSQR.tile(op)
elif op.method == 'sfqr':
return SFQR.tile(op)
else:
raise NotImplementedError('Only tsqr method supported for now')
@classmethod
def execute(cls, ctx, op):
(a,), device_id, xp = as_same_device(
[ctx[c.key] for c in op.inputs], device=op.device, ret_extra=True)
with device(device_id):
q, r = xp.linalg.qr(a)
qc, rc = op.outputs
ctx[qc.key] = q
ctx[rc.key] = r
def qr(a, method='tsqr'):
"""
Compute the qr factorization of a matrix.
Factor the matrix `a` as *qr*, where `q` is orthonormal and `r` is
upper-triangular.
Parameters
----------
a : array_like, shape (M, N)
Matrix to be factored.
method: {'tsqr', 'sfqr'}, optional
method to calculate qr factorization, tsqr as default
TSQR is presented in:
A. Benson, D. Gleich, and J. Demmel.
Direct QR factorizations for tall-and-skinny matrices in
MapReduce architectures.
IEEE International Conference on Big Data, 2013.
http://arxiv.org/abs/1301.1071
FSQR is a QR decomposition for fat and short matrix:
A = [A1, A2, A3, ...], A1 may be decomposed as A1 = Q1 * R1,
for A = Q * R, Q = Q1, R = [R1, R2, R3, ...] where A2 = Q1 * R2, A3 = Q1 * R3, ...
Returns
-------
q : Tensor of float or complex, optional
A matrix with orthonormal columns. When mode = 'complete' the
result is an orthogonal/unitary matrix depending on whether or not
a is real/complex. The determinant may be either +/- 1 in that
case.
r : Tensor of float or complex, optional
The upper-triangular matrix.
Raises
------
LinAlgError
If factoring fails.
Notes
-----
For more information on the qr factorization, see for example:
http://en.wikipedia.org/wiki/QR_factorization
Examples
--------
>>> import mars.tensor as mt
>>> a = mt.random.randn(9, 6)
>>> q, r = mt.linalg.qr(a)
>>> mt.allclose(a, mt.dot(q, r)).execute() # a does equal qr
True
"""
op = TensorQR(method=method)
return op(a)
|
the-stack_0_6809 | import json
import plotly
import pandas as pd
from nltk.stem import WordNetLemmatizer
from nltk.tokenize import word_tokenize
from flask import Flask
from flask import render_template, request, jsonify
from plotly.graph_objs import Bar
from sklearn.externals import joblib
from sqlalchemy import create_engine
app = Flask(__name__)
def tokenize(text):
tokens = word_tokenize(text)
lemmatizer = WordNetLemmatizer()
clean_tokens = []
for tok in tokens:
clean_tok = lemmatizer.lemmatize(tok).lower().strip()
clean_tokens.append(clean_tok)
return clean_tokens
# load data
engine = create_engine('sqlite:///../data/YourDatabaseName.db')
df = pd.read_sql_table('data/DisasterResponse.db', engine)
# load model
model = joblib.load("/home/workspace/models/classifier.pkl")
# index webpage displays cool visuals and receives user input text for model
@app.route('/')
@app.route('/index')
def index():
# extract data needed for visuals
# TODO: Below is an example - modify to extract data for your own visuals
genre_counts = df.groupby('genre').count()['message']
genre_names = list(genre_counts.index)
# create visuals
# TODO: Below is an example - modify to create your own visuals
graphs = [
{
'data': [
Bar(
x=genre_names,
y=genre_counts
)
],
'layout': {
'title': 'Distribution of Message Genres',
'yaxis': {
'title': "Count"
},
'xaxis': {
'title': "Genre"
}
}
}
]
# encode plotly graphs in JSON
ids = ["graph-{}".format(i) for i, _ in enumerate(graphs)]
graphJSON = json.dumps(graphs, cls=plotly.utils.PlotlyJSONEncoder)
# render web page with plotly graphs
return render_template('master.html', ids=ids, graphJSON=graphJSON)
# web page that handles user query and displays model results
@app.route('/go')
def go():
# save user input in query
query = request.args.get('query', '')
# use model to predict classification for query
classification_labels = model.predict([query])[0]
classification_results = dict(zip(df.columns[4:], classification_labels))
# This will render the go.html Please see that file.
return render_template(
'go.html',
query=query,
classification_result=classification_results
)
def main():
app.run(host='0.0.0.0', port=3001, debug=True)
if __name__ == '__main__':
main() |
the-stack_0_6810 | # -*- coding: utf-8 -*-
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2018 The Electrum developers
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import base64
import hashlib
import functools
from typing import Union, Tuple, Optional
from ctypes import (
byref, c_byte, c_int, c_uint, c_char_p, c_size_t, c_void_p, create_string_buffer,
CFUNCTYPE, POINTER, cast
)
from .util import bfh, bh2u, assert_bytes, to_bytes, InvalidPassword, profiler, randrange
from .crypto import (sha256d, aes_encrypt_with_iv, aes_decrypt_with_iv, hmac_oneshot)
from . import constants
from .logging import get_logger
from .ecc_fast import _libsecp256k1, SECP256K1_EC_UNCOMPRESSED
_logger = get_logger(__name__)
def string_to_number(b: bytes) -> int:
return int.from_bytes(b, byteorder='big', signed=False)
def sig_string_from_der_sig(der_sig: bytes) -> bytes:
r, s = get_r_and_s_from_der_sig(der_sig)
return sig_string_from_r_and_s(r, s)
def der_sig_from_sig_string(sig_string: bytes) -> bytes:
r, s = get_r_and_s_from_sig_string(sig_string)
return der_sig_from_r_and_s(r, s)
def der_sig_from_r_and_s(r: int, s: int) -> bytes:
sig_string = (int.to_bytes(r, length=32, byteorder="big") +
int.to_bytes(s, length=32, byteorder="big"))
sig = create_string_buffer(64)
ret = _libsecp256k1.secp256k1_ecdsa_signature_parse_compact(_libsecp256k1.ctx, sig, sig_string)
if not ret:
raise Exception("Bad signature")
ret = _libsecp256k1.secp256k1_ecdsa_signature_normalize(_libsecp256k1.ctx, sig, sig)
der_sig = create_string_buffer(80) # this much space should be enough
der_sig_size = c_size_t(len(der_sig))
ret = _libsecp256k1.secp256k1_ecdsa_signature_serialize_der(_libsecp256k1.ctx, der_sig, byref(der_sig_size), sig)
if not ret:
raise Exception("failed to serialize DER sig")
der_sig_size = der_sig_size.value
return bytes(der_sig)[:der_sig_size]
def get_r_and_s_from_der_sig(der_sig: bytes) -> Tuple[int, int]:
assert isinstance(der_sig, bytes)
sig = create_string_buffer(64)
ret = _libsecp256k1.secp256k1_ecdsa_signature_parse_der(_libsecp256k1.ctx, sig, der_sig, len(der_sig))
if not ret:
raise Exception("Bad signature")
ret = _libsecp256k1.secp256k1_ecdsa_signature_normalize(_libsecp256k1.ctx, sig, sig)
compact_signature = create_string_buffer(64)
_libsecp256k1.secp256k1_ecdsa_signature_serialize_compact(_libsecp256k1.ctx, compact_signature, sig)
r = int.from_bytes(compact_signature[:32], byteorder="big")
s = int.from_bytes(compact_signature[32:], byteorder="big")
return r, s
def get_r_and_s_from_sig_string(sig_string: bytes) -> Tuple[int, int]:
if not (isinstance(sig_string, bytes) and len(sig_string) == 64):
raise Exception("sig_string must be bytes, and 64 bytes exactly")
sig = create_string_buffer(64)
ret = _libsecp256k1.secp256k1_ecdsa_signature_parse_compact(_libsecp256k1.ctx, sig, sig_string)
if not ret:
raise Exception("Bad signature")
ret = _libsecp256k1.secp256k1_ecdsa_signature_normalize(_libsecp256k1.ctx, sig, sig)
compact_signature = create_string_buffer(64)
_libsecp256k1.secp256k1_ecdsa_signature_serialize_compact(_libsecp256k1.ctx, compact_signature, sig)
r = int.from_bytes(compact_signature[:32], byteorder="big")
s = int.from_bytes(compact_signature[32:], byteorder="big")
return r, s
def sig_string_from_r_and_s(r: int, s: int) -> bytes:
sig_string = (int.to_bytes(r, length=32, byteorder="big") +
int.to_bytes(s, length=32, byteorder="big"))
sig = create_string_buffer(64)
ret = _libsecp256k1.secp256k1_ecdsa_signature_parse_compact(_libsecp256k1.ctx, sig, sig_string)
if not ret:
raise Exception("Bad signature")
ret = _libsecp256k1.secp256k1_ecdsa_signature_normalize(_libsecp256k1.ctx, sig, sig)
compact_signature = create_string_buffer(64)
_libsecp256k1.secp256k1_ecdsa_signature_serialize_compact(_libsecp256k1.ctx, compact_signature, sig)
return bytes(compact_signature)
def _x_and_y_from_pubkey_bytes(pubkey: bytes) -> Tuple[int, int]:
assert isinstance(pubkey, bytes), f'pubkey must be bytes, not {type(pubkey)}'
pubkey_ptr = create_string_buffer(64)
ret = _libsecp256k1.secp256k1_ec_pubkey_parse(
_libsecp256k1.ctx, pubkey_ptr, pubkey, len(pubkey))
if not ret:
raise InvalidECPointException('public key could not be parsed or is invalid')
pubkey_serialized = create_string_buffer(65)
pubkey_size = c_size_t(65)
_libsecp256k1.secp256k1_ec_pubkey_serialize(
_libsecp256k1.ctx, pubkey_serialized, byref(pubkey_size), pubkey_ptr, SECP256K1_EC_UNCOMPRESSED)
pubkey_serialized = bytes(pubkey_serialized)
assert pubkey_serialized[0] == 0x04, pubkey_serialized
x = int.from_bytes(pubkey_serialized[1:33], byteorder='big', signed=False)
y = int.from_bytes(pubkey_serialized[33:65], byteorder='big', signed=False)
return x, y
class InvalidECPointException(Exception):
"""e.g. not on curve, or infinity"""
@functools.total_ordering
class ECPubkey(object):
def __init__(self, b: Optional[bytes]):
if b is not None:
assert isinstance(b, (bytes, bytearray)), f'pubkey must be bytes-like, not {type(b)}'
if isinstance(b, bytearray):
b = bytes(b)
self._x, self._y = _x_and_y_from_pubkey_bytes(b)
else:
self._x, self._y = None, None
@classmethod
def from_sig_string(cls, sig_string: bytes, recid: int, msg_hash: bytes) -> 'ECPubkey':
assert_bytes(sig_string)
if len(sig_string) != 64:
raise Exception(f'wrong encoding used for signature? len={len(sig_string)} (should be 64)')
if recid < 0 or recid > 3:
raise ValueError('recid is {}, but should be 0 <= recid <= 3'.format(recid))
sig65 = create_string_buffer(65)
ret = _libsecp256k1.secp256k1_ecdsa_recoverable_signature_parse_compact(
_libsecp256k1.ctx, sig65, sig_string, recid)
if not ret:
raise Exception('failed to parse signature')
pubkey = create_string_buffer(64)
ret = _libsecp256k1.secp256k1_ecdsa_recover(_libsecp256k1.ctx, pubkey, sig65, msg_hash)
if not ret:
raise InvalidECPointException('failed to recover public key')
return ECPubkey._from_libsecp256k1_pubkey_ptr(pubkey)
@classmethod
def from_signature65(cls, sig: bytes, msg_hash: bytes) -> Tuple['ECPubkey', bool]:
if len(sig) != 65:
raise Exception(f'wrong encoding used for signature? len={len(sig)} (should be 65)')
nV = sig[0]
if nV < 27 or nV >= 35:
raise Exception("Bad encoding")
if nV >= 31:
compressed = True
nV -= 4
else:
compressed = False
recid = nV - 27
return cls.from_sig_string(sig[1:], recid, msg_hash), compressed
@classmethod
def from_x_and_y(cls, x: int, y: int) -> 'ECPubkey':
_bytes = (b'\x04'
+ int.to_bytes(x, length=32, byteorder='big', signed=False)
+ int.to_bytes(y, length=32, byteorder='big', signed=False))
return ECPubkey(_bytes)
def get_public_key_bytes(self, compressed=True):
if self.is_at_infinity(): raise Exception('point is at infinity')
x = int.to_bytes(self.x(), length=32, byteorder='big', signed=False)
y = int.to_bytes(self.y(), length=32, byteorder='big', signed=False)
if compressed:
header = b'\x03' if self.y() & 1 else b'\x02'
return header + x
else:
header = b'\x04'
return header + x + y
def get_public_key_hex(self, compressed=True):
return bh2u(self.get_public_key_bytes(compressed))
def point(self) -> Tuple[int, int]:
return self.x(), self.y()
def x(self) -> int:
return self._x
def y(self) -> int:
return self._y
def _to_libsecp256k1_pubkey_ptr(self):
pubkey = create_string_buffer(64)
public_pair_bytes = self.get_public_key_bytes(compressed=False)
ret = _libsecp256k1.secp256k1_ec_pubkey_parse(
_libsecp256k1.ctx, pubkey, public_pair_bytes, len(public_pair_bytes))
if not ret:
raise Exception('public key could not be parsed or is invalid')
return pubkey
@classmethod
def _from_libsecp256k1_pubkey_ptr(cls, pubkey) -> 'ECPubkey':
pubkey_serialized = create_string_buffer(65)
pubkey_size = c_size_t(65)
_libsecp256k1.secp256k1_ec_pubkey_serialize(
_libsecp256k1.ctx, pubkey_serialized, byref(pubkey_size), pubkey, SECP256K1_EC_UNCOMPRESSED)
return ECPubkey(bytes(pubkey_serialized))
def __repr__(self):
if self.is_at_infinity():
return f"<ECPubkey infinity>"
return f"<ECPubkey {self.get_public_key_hex()}>"
def __mul__(self, other: int):
if not isinstance(other, int):
raise TypeError('multiplication not defined for ECPubkey and {}'.format(type(other)))
other %= CURVE_ORDER
if self.is_at_infinity() or other == 0:
return POINT_AT_INFINITY
pubkey = self._to_libsecp256k1_pubkey_ptr()
ret = _libsecp256k1.secp256k1_ec_pubkey_tweak_mul(_libsecp256k1.ctx, pubkey, other.to_bytes(32, byteorder="big"))
if not ret:
return POINT_AT_INFINITY
return ECPubkey._from_libsecp256k1_pubkey_ptr(pubkey)
def __rmul__(self, other: int):
return self * other
def __add__(self, other):
if not isinstance(other, ECPubkey):
raise TypeError('addition not defined for ECPubkey and {}'.format(type(other)))
if self.is_at_infinity(): return other
if other.is_at_infinity(): return self
pubkey1 = self._to_libsecp256k1_pubkey_ptr()
pubkey2 = other._to_libsecp256k1_pubkey_ptr()
pubkey_sum = create_string_buffer(64)
pubkey1 = cast(pubkey1, c_char_p)
pubkey2 = cast(pubkey2, c_char_p)
array_of_pubkey_ptrs = (c_char_p * 2)(pubkey1, pubkey2)
ret = _libsecp256k1.secp256k1_ec_pubkey_combine(_libsecp256k1.ctx, pubkey_sum, array_of_pubkey_ptrs, 2)
if not ret:
return POINT_AT_INFINITY
return ECPubkey._from_libsecp256k1_pubkey_ptr(pubkey_sum)
def __eq__(self, other) -> bool:
if not isinstance(other, ECPubkey):
return False
return self.point() == other.point()
def __ne__(self, other):
return not (self == other)
def __hash__(self):
return hash(self.point())
def __lt__(self, other):
if not isinstance(other, ECPubkey):
raise TypeError('comparison not defined for ECPubkey and {}'.format(type(other)))
return (self.x() or 0) < (other.x() or 0)
def verify_message_for_address(self, sig65: bytes, message: bytes, algo=lambda x: sha256d(msg_magic(x))) -> None:
assert_bytes(message)
h = algo(message)
public_key, compressed = self.from_signature65(sig65, h)
# check public key
if public_key != self:
raise Exception("Bad signature")
# check message
self.verify_message_hash(sig65[1:], h)
# TODO return bool instead of raising
def verify_message_hash(self, sig_string: bytes, msg_hash: bytes) -> None:
assert_bytes(sig_string)
if len(sig_string) != 64:
raise Exception(f'wrong encoding used for signature? len={len(sig_string)} (should be 64)')
if not (isinstance(msg_hash, bytes) and len(msg_hash) == 32):
raise Exception("msg_hash must be bytes, and 32 bytes exactly")
sig = create_string_buffer(64)
ret = _libsecp256k1.secp256k1_ecdsa_signature_parse_compact(_libsecp256k1.ctx, sig, sig_string)
if not ret:
raise Exception("Bad signature")
ret = _libsecp256k1.secp256k1_ecdsa_signature_normalize(_libsecp256k1.ctx, sig, sig)
pubkey = self._to_libsecp256k1_pubkey_ptr()
if 1 != _libsecp256k1.secp256k1_ecdsa_verify(_libsecp256k1.ctx, sig, msg_hash, pubkey):
raise Exception("Bad signature")
def encrypt_message(self, message: bytes, magic: bytes = b'BIE1') -> bytes:
"""
ECIES encryption/decryption methods; AES-128-CBC with PKCS7 is used as the cipher; hmac-sha256 is used as the mac
"""
assert_bytes(message)
ephemeral = ECPrivkey.generate_random_key()
ecdh_key = (self * ephemeral.secret_scalar).get_public_key_bytes(compressed=True)
key = hashlib.sha512(ecdh_key).digest()
iv, key_e, key_m = key[0:16], key[16:32], key[32:]
ciphertext = aes_encrypt_with_iv(key_e, iv, message)
ephemeral_pubkey = ephemeral.get_public_key_bytes(compressed=True)
encrypted = magic + ephemeral_pubkey + ciphertext
mac = hmac_oneshot(key_m, encrypted, hashlib.sha256)
return base64.b64encode(encrypted + mac)
@classmethod
def order(cls):
return CURVE_ORDER
def is_at_infinity(self):
return self == POINT_AT_INFINITY
@classmethod
def is_pubkey_bytes(cls, b: bytes):
try:
ECPubkey(b)
return True
except:
return False
GENERATOR = ECPubkey(bytes.fromhex('0479be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798'
'483ada7726a3c4655da4fbfc0e1108a8fd17b448a68554199c47d08ffb10d4b8'))
CURVE_ORDER = 0xFFFFFFFF_FFFFFFFF_FFFFFFFF_FFFFFFFE_BAAEDCE6_AF48A03B_BFD25E8C_D0364141
POINT_AT_INFINITY = ECPubkey(None)
def msg_magic(message: bytes) -> bytes:
from .bitcoin import var_int
length = bfh(var_int(len(message)))
return b"\x15Qtum Signed Message:\n" + length + message
def verify_signature(pubkey: bytes, sig: bytes, h: bytes) -> bool:
try:
ECPubkey(pubkey).verify_message_hash(sig, h)
except:
return False
return True
def verify_message_with_address(address: str, sig65: bytes, message: bytes, *, net=None):
from .bitcoin import pubkey_to_address
assert_bytes(sig65, message)
if net is None: net = constants.net
try:
h = sha256d(msg_magic(message))
public_key, compressed = ECPubkey.from_signature65(sig65, h)
# check public key using the address
pubkey_hex = public_key.get_public_key_hex(compressed)
for txin_type in ['p2pkh','p2wpkh','p2wpkh-p2sh']:
addr = pubkey_to_address(txin_type, pubkey_hex, net=net)
if address == addr:
break
else:
raise Exception("Bad signature")
# check message
public_key.verify_message_hash(sig65[1:], h)
return True
except Exception as e:
_logger.info(f"Verification error: {repr(e)}")
return False
def is_secret_within_curve_range(secret: Union[int, bytes]) -> bool:
if isinstance(secret, bytes):
secret = string_to_number(secret)
return 0 < secret < CURVE_ORDER
class ECPrivkey(ECPubkey):
def __init__(self, privkey_bytes: bytes):
assert_bytes(privkey_bytes)
if len(privkey_bytes) != 32:
raise Exception('unexpected size for secret. should be 32 bytes, not {}'.format(len(privkey_bytes)))
secret = string_to_number(privkey_bytes)
if not is_secret_within_curve_range(secret):
raise InvalidECPointException('Invalid secret scalar (not within curve order)')
self.secret_scalar = secret
pubkey = GENERATOR * secret
super().__init__(pubkey.get_public_key_bytes(compressed=False))
@classmethod
def from_secret_scalar(cls, secret_scalar: int):
secret_bytes = int.to_bytes(secret_scalar, length=32, byteorder='big', signed=False)
return ECPrivkey(secret_bytes)
@classmethod
def from_arbitrary_size_secret(cls, privkey_bytes: bytes):
"""This method is only for legacy reasons. Do not introduce new code that uses it.
Unlike the default constructor, this method does not require len(privkey_bytes) == 32,
and the secret does not need to be within the curve order either.
"""
return ECPrivkey(cls.normalize_secret_bytes(privkey_bytes))
@classmethod
def normalize_secret_bytes(cls, privkey_bytes: bytes) -> bytes:
scalar = string_to_number(privkey_bytes) % CURVE_ORDER
if scalar == 0:
raise Exception('invalid EC private key scalar: zero')
privkey_32bytes = int.to_bytes(scalar, length=32, byteorder='big', signed=False)
return privkey_32bytes
def __repr__(self):
return f"<ECPrivkey {self.get_public_key_hex()}>"
@classmethod
def generate_random_key(cls):
randint = randrange(CURVE_ORDER)
ephemeral_exponent = int.to_bytes(randint, length=32, byteorder='big', signed=False)
return ECPrivkey(ephemeral_exponent)
def get_secret_bytes(self) -> bytes:
return int.to_bytes(self.secret_scalar, length=32, byteorder='big', signed=False)
def sign(self, msg_hash: bytes, sigencode=None) -> bytes:
if not (isinstance(msg_hash, bytes) and len(msg_hash) == 32):
raise Exception("msg_hash to be signed must be bytes, and 32 bytes exactly")
if sigencode is None:
sigencode = sig_string_from_r_and_s
privkey_bytes = self.secret_scalar.to_bytes(32, byteorder="big")
nonce_function = None
sig = create_string_buffer(64)
def sign_with_extra_entropy(extra_entropy):
ret = _libsecp256k1.secp256k1_ecdsa_sign(
_libsecp256k1.ctx, sig, msg_hash, privkey_bytes,
nonce_function, extra_entropy)
if not ret:
raise Exception('the nonce generation function failed, or the private key was invalid')
compact_signature = create_string_buffer(64)
_libsecp256k1.secp256k1_ecdsa_signature_serialize_compact(_libsecp256k1.ctx, compact_signature, sig)
r = int.from_bytes(compact_signature[:32], byteorder="big")
s = int.from_bytes(compact_signature[32:], byteorder="big")
return r, s
r, s = sign_with_extra_entropy(extra_entropy=None)
counter = 0
while r >= 2**255: # grind for low R value https://github.com/bitcoin/bitcoin/pull/13666
counter += 1
extra_entropy = counter.to_bytes(32, byteorder="little")
r, s = sign_with_extra_entropy(extra_entropy=extra_entropy)
sig_string = sig_string_from_r_and_s(r, s)
self.verify_message_hash(sig_string, msg_hash)
sig = sigencode(r, s)
return sig
def sign_transaction(self, hashed_preimage: bytes) -> bytes:
return self.sign(hashed_preimage, sigencode=der_sig_from_r_and_s)
def sign_message(self, message: bytes, is_compressed: bool, algo=lambda x: sha256d(msg_magic(x))) -> bytes:
def bruteforce_recid(sig_string):
for recid in range(4):
sig65 = construct_sig65(sig_string, recid, is_compressed)
try:
self.verify_message_for_address(sig65, message, algo)
return sig65, recid
except Exception as e:
continue
else:
raise Exception("error: cannot sign message. no recid fits..")
message = to_bytes(message, 'utf8')
msg_hash = algo(message)
sig_string = self.sign(msg_hash, sigencode=sig_string_from_r_and_s)
sig65, recid = bruteforce_recid(sig_string)
return sig65
def decrypt_message(self, encrypted: Union[str, bytes], magic: bytes=b'BIE1') -> bytes:
encrypted = base64.b64decode(encrypted) # type: bytes
if len(encrypted) < 85:
raise Exception('invalid ciphertext: length')
magic_found = encrypted[:4]
ephemeral_pubkey_bytes = encrypted[4:37]
ciphertext = encrypted[37:-32]
mac = encrypted[-32:]
if magic_found != magic:
raise Exception('invalid ciphertext: invalid magic bytes')
try:
ephemeral_pubkey = ECPubkey(ephemeral_pubkey_bytes)
except InvalidECPointException as e:
raise Exception('invalid ciphertext: invalid ephemeral pubkey') from e
ecdh_key = (ephemeral_pubkey * self.secret_scalar).get_public_key_bytes(compressed=True)
key = hashlib.sha512(ecdh_key).digest()
iv, key_e, key_m = key[0:16], key[16:32], key[32:]
if mac != hmac_oneshot(key_m, encrypted[:-32], hashlib.sha256):
raise InvalidPassword()
return aes_decrypt_with_iv(key_e, iv, ciphertext)
def construct_sig65(sig_string: bytes, recid: int, is_compressed: bool) -> bytes:
comp = 4 if is_compressed else 0
return bytes([27 + recid + comp]) + sig_string
|
the-stack_0_6811 |
# finding the count of even numbers between 0 and 100.
#x=10%2
#print("x",x)
#y=7%2
#print("y",y)
#onemli not: space and tab are important in python!!!!
nums = [0,1,2,3,4,5,6,7,8,9,10]
count=0
for item in nums:
print("ev even a yan na:", item)
if item%2==0: # heger cift sayi ye yan na!
#bele cift sayi ya!
print("bele even a:", item)
count=count+1 # yek zede bike! mana wina ew e ku tu dijmeri.
print ("count:",count)
else:
print ("na ne even a, count zede neke! count=",count)
|
the-stack_0_6812 | from parameterized import parameterized
from test_plus.test import TestCase
from ...generic.tests.test_views import (
AuthorshipViewSetMixin,
GenericViewSetMixin,
OrderingViewSetMixin,
)
from ..factories import InstitutionFactory
from ..serializers import InstitutionSerializer
class InstitutionViewSetTestCase(
AuthorshipViewSetMixin, GenericViewSetMixin, OrderingViewSetMixin, TestCase
):
basename = "institution"
serializer_class = InstitutionSerializer
factory_class = InstitutionFactory
queries_less_than_limit = 11
ordering_fields = [
"comment",
"-comment",
"created_on",
"created_by__username",
"-created_by__username,comment",
]
def validate_item(self, item):
self.assertEqual(item["name"], self.obj.name)
self.assertEqual(item["comment"], self.obj.comment)
for i, tag in enumerate(item["tags"]):
self.assertEqual(tag, self.obj.tags.all()[i].name)
@parameterized.expand(
[
("CAT", ["CAT", "CATASTROPHE"]),
("cat", ["CAT", "CATASTROPHE"]),
("KITTY", ["KITTY"]),
("KIT", ["KITTY"]),
("INVALID", []),
]
)
def test_should_filter_by_name(self, query, expected_names):
InstitutionFactory(name="KITTY")
InstitutionFactory(name="CAT")
InstitutionFactory(name="CATASTROPHE")
self.login_required()
response = self.client.get(
self.get_url(name="list"),
content_type="application/json",
data={"query": query},
)
self.assertEqual(response.status_code, 200, response.json())
names = [item["name"] for item in response.json()["results"]]
self.assertCountEqual(expected_names, names)
|
the-stack_0_6813 | #------------------------------------------------------------------------------
# Copyright 2013 Esri
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#------------------------------------------------------------------------------
# Name: TestModelLowestPoint.py
# Description: Automatic Test of Lowest Point Model
# Requirements: ArcGIS Desktop Standard with Spatial Analyst Extension
#------------------------------------------------------------------------------
import arcpy
import os
import sys
import traceback
import TestUtilities
def RunTest():
try:
arcpy.AddMessage("Starting Test: LowestPoint")
if arcpy.CheckExtension("Spatial") == "Available":
arcpy.CheckOutExtension("Spatial")
else:
# Raise a custom exception
raise Exception("LicenseError")
# WORKAROUND
print("Creating New Scratch Workspace (Workaround)")
TestUtilities.createScratch()
# Verify the expected configuration exists
inputPolygonFC = os.path.join(TestUtilities.inputGDB, "samplePolygonArea")
inputSurface = os.path.join(TestUtilities.inputGDB, "Jbad_SRTM_USGS_EROS")
outputPointsFC = os.path.join(TestUtilities.outputGDB, "LowestPoint")
toolbox = TestUtilities.toolbox
# Check For Valid Input
objects2Check = []
objects2Check.extend([inputPolygonFC, inputSurface, toolbox])
for object2Check in objects2Check :
desc = arcpy.Describe(object2Check)
if desc == None :
raise Exception("Bad Input")
else :
print("Valid Object: " + desc.Name)
# Set environment settings
print("Running from: " + str(TestUtilities.currentPath))
print("Geodatabase path: " + str(TestUtilities.geodatabasePath))
arcpy.env.overwriteOutput = True
arcpy.env.scratchWorkspace = TestUtilities.scratchGDB
arcpy.ImportToolbox(toolbox, "VandR")
inputFeatureCount = int(arcpy.GetCount_management(inputPolygonFC).getOutput(0))
print("Input FeatureClass: " + str(inputPolygonFC))
print("Input Feature Count: " + str(inputFeatureCount))
if (inputFeatureCount < 1) :
print("Invalid Input Feature Count: " + str(inputFeatureCount))
########################################################3
# Execute the Model under test:
arcpy.LowestPoint_VandR(inputPolygonFC, inputSurface, outputPointsFC)
########################################################3
# Verify the results
outputFeatureCount = int(arcpy.GetCount_management(outputPointsFC).getOutput(0))
print("Output FeatureClass: " + str(outputPointsFC))
print("Output Feature Count: " + str(outputFeatureCount))
if (outputPointsFC < 1) :
print("Invalid Output Feature Count: " + str(outputFeatureCount))
raise Exception("Test Failed")
# WORKAROUND: delete scratch db
print("Deleting Scratch Workspace (Workaround)")
TestUtilities.deleteScratch()
print("Test Successful")
except arcpy.ExecuteError:
# Get the tool error messages
msgs = arcpy.GetMessages()
arcpy.AddError(msgs)
# return a system error code
sys.exit(-1)
except Exception as e:
# Get the traceback object
tb = sys.exc_info()[2]
tbinfo = traceback.format_tb(tb)[0]
# Concatenate information together concerning the error into a message string
pymsg = "PYTHON ERRORS:\nTraceback info:\n" + tbinfo + "\nError Info:\n" + str(sys.exc_info()[1])
msgs = "ArcPy ERRORS:\n" + arcpy.GetMessages() + "\n"
# Return python error messages for use in script tool or Python Window
arcpy.AddError(pymsg)
arcpy.AddError(msgs)
# return a system error code
sys.exit(-1)
finally:
# Check in the 3D Analyst extension
arcpy.CheckInExtension("Spatial")
RunTest() |
the-stack_0_6817 | import math
import time
import datetime
class LoadBar:
"""
"""
def __init__(self, max=100, size=20, head='.', body='.', border_left='[', border_right=']', show_step=True,
show_percentage=True, show_eta=True, title=None, show_total_time=True, show_time=False):
"""
:param max: int: Max value of the load
"""
self.loading = False
self.max = max
self.size = size
self.head = head
self.body = body
self.border_left = border_left
self.border_right = border_right
self.show_step = show_step
self.show_percentage = show_percentage
# ----- ETA -----
self.show_eta = show_eta
self.eta = None
self.eta_last_i_t = None
self.start_time = None
self.stop_time = None
self.show_time = show_time
self.show_total_time = show_total_time or show_eta or show_time
# ----- End ETA -----
self.title = title
self._i = 0 # State of the progress
@property
def i(self):
return self._i
@i.setter
def i(self, i):
if self.use_time:
# Do some work to see how long it is gonna last
if self.eta_last_i_t is not None:
if self.eta_last_i_t[0] > i:
# Don't want to go backward
self.eta = None
self.eta_last_i_t = None
elif self.eta_last_i_t[0] < i:
# Do nothing if this is the same
t = time.time()
eta = (t - self.eta_last_i_t[1]) * self.max / (i - self.eta_last_i_t[0])
self.eta = eta if self.eta is None else 0.5 * eta + 0.5 * self.eta
self.eta_last_i_t = (i, t)
else:
# First iteration, I have to set up for the next one
self.eta_last_i_t = (i, time.time())
self._i = i
@property
def use_time(self):
return self.show_eta or self.show_total_time
def start(self, end=''):
"""
:return:
"""
self.loading = True
if self.use_time:
self.start_time = time.time()
self.update(step=0, end=end)
def update(self, step=None, to_add=None, end='', start='\r'):
"""
:param start:
:param end:
:param step:
:param to_add:
:return:
"""
if step is None:
to_add = 1 if to_add is None else to_add
self.i = self.i + to_add
else:
self.i = step
l = list()
if self.title is not None: l.append(self.title)
if self.show_step: l.append(self._get_step())
if self.show_percentage: l.append(self._get_percentage())
l.append(self._get_bar())
if self.show_time or (self.show_total_time and not self.loading): l.append(self._get_time())
if self.show_eta and self.loading: l.append(self._get_eta())
s = ' '.join(l)
self._print(s, end=end, start=start)
def end(self):
self.loading = False
if self.use_time:
self.stop_time = time.time()
self.update(step=self.max, end='\n')
def _print(self, to_print, end='', flush=True, start='\r'):
"""
Rewrite print function with default args
:param to_print:
:param end:
:param flush:
:param start
:return:
"""
# \r used to put the cursor at the beginning of the line
print(f'{start}{to_print}', end=end, flush=flush)
def _get_bar(self):
done = int(min(self.i, self.max) * self.size // self.max)
todo = self.size - done
todo_head = min(todo, 1) # 1 or 0
todo_blank = todo - todo_head
return f'{self.border_left}{self.body * done}{self.head * todo_head}{" " * todo_blank}{self.border_right}'
def _get_step(self):
if not self.show_step:
return ''
digit_nb = int(1 + math.floor(math.log10(self.max)))
return '{0:{1}}'.format(self.i, digit_nb) + f'/{self.max}'
def _get_percentage(self):
if not self.show_percentage:
return ''
percentage = self.i * 100 / self.max
percentage_string = f'{percentage:3.0f}%'
if self.show_step:
percentage_string = f'({percentage_string})'
return percentage_string
def _get_time(self):
if self.loading:
if not self.show_time:
return ''
else:
current_time = time.time() - self.start_time
current_time = datetime.timedelta(seconds=int(current_time))
return f'Time {current_time}'
else:
if not self.show_total_time:
return ''
if self.start_time is not None and self.stop_time is not None:
total_time = int(self.stop_time - self.start_time)
total_time = datetime.timedelta(seconds=total_time)
return f'Time {total_time}'
def _get_eta(self):
eta = '-:--:--' # if self.eta is None
if self.loading:
if not self.show_eta:
return ''
if self.eta is not None:
eta = self.eta * (self.max - self.i) / self.max
eta = datetime.timedelta(seconds=int(eta))
return f'ETA {eta}'
else:
return ''
|
the-stack_0_6818 | import base64
import json
import os
import tempfile
import zipfile
none = "d3043820717d74d9a17694c176d39733"
# region Application
class Application:
def __init__(self, name):
self.name = name
# endregion
# region Environment
class Environment:
def __init__(self, name, application_id, providers=none, locations=none):
self.name = name
self.application_id = application_id
self.preferences = {'providers': providers, 'locations': locations}
# endregion
# region Environment
class Function:
def __init__(
self,
name,
environment_id,
directory,
handler,
runtime,
memory,
timeout):
self.name = name
self.environment_id = environment_id
self.directory = directory
self.handler = handler
self.runtime = runtime
self.memory = memory
self.timeout = timeout
# endregion
class ApplicationCreationRequest:
def __init__(self, application):
self.application = application
def toJSON(self):
return json.dumps(self, default=lambda o: o.__dict__,
sort_keys=True, indent=4)
class EnvironmentCreationRequest:
def __init__(self, environment):
self.environment = environment
def toJSON(self):
return json.dumps(self, default=lambda o: o.__dict__,
sort_keys=True, indent=4)
class FunctionCreationRequest:
def __init__(self, function, print_output=True):
self.should_print_output = print_output
self.function = self.rebuildFunctionInlineCode(function)
def print_output(self, output):
if self.should_print_output is True:
print(output)
def rebuildFunctionInlineCode(self, function):
directory = function.directory
handler = function.handler
with tempfile.NamedTemporaryFile() as temp:
self.zip(directory, temp.name)
temp.seek(0)
base64content = base64.b64encode(temp.read())
function.code = {'source': base64content, 'handler': handler}
del function.directory
del function.handler
return function
def zip(self, src, dst):
zf = zipfile.ZipFile(dst, "w", zipfile.ZIP_DEFLATED)
abs_src = os.path.abspath(src)
for dirname, subdirs, files in os.walk(src):
for filename in files:
absname = os.path.abspath(os.path.join(dirname, filename))
arcname = absname[len(abs_src) + 1:]
self.print_output(
"collecting file {}".format(
os.path.join(
dirname, filename)))
zf.write(absname, arcname)
zf.close()
def toJSON(self):
del self.should_print_output
return json.dumps(self, default=lambda o: o.__dict__,
sort_keys=True, indent=4)
|
the-stack_0_6819 | """A setuptools based setup module.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
import os
import sys
from setuptools import setup, find_packages
# pylint: disable=redefined-builtin
here = os.path.abspath(os.path.dirname(__file__)) # pylint: disable=invalid-name
with open(os.path.join(here, "README.rst"), encoding="utf-8") as fid:
long_description = fid.read() # pylint: disable=invalid-name
with open(os.path.join(here, "requirements.txt"), encoding="utf-8") as fid:
install_requires = [line for line in fid.read().splitlines() if line.strip()]
setup(
name="aas-core-meta",
version="2021.11.20a2",
description="Provide meta-models for Asset Administration Shell information model.",
long_description=long_description,
url="https://github.com/aas-core-works/aas-core-meta",
author="Nico Braunisch, Marko Ristin, Robert Lehmann, Marcin Sadurski, Manuel Sauer",
author_email="[email protected]",
classifiers=[
"Development Status :: 3 - Alpha",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3.8",
],
license="License :: OSI Approved :: MIT License",
keywords="asset administration shell,design-by-contract,meta-model",
packages=find_packages(exclude=["tests"]),
install_requires=install_requires,
# fmt: off
extras_require={
"dev": [
"black==21.11b0",
"mypy==0.910",
],
},
# fmt: on
py_modules=["aas_core_meta"],
package_data={"aas_core_meta": ["py.typed"]},
data_files=[(".", ["LICENSE", "README.rst", "requirements.txt"])],
)
|
the-stack_0_6820 | import argparse
from io import BytesIO
from urllib.parse import unquote_plus
from urllib.request import urlopen
from flask import Flask, request, send_file
from waitress import serve
from ..bg import remove
app = Flask(__name__)
@app.route("/", methods=["GET", "POST"])
def index():
file_content = ""
if request.method == "POST":
if "file" not in request.files:
return {"error": "missing post form param 'file'"}, 400
file_content = request.files["file"].read()
if request.method == "GET":
url = request.args.get("url", type=str)
if url is None:
return {"error": "missing query param 'url'"}, 400
file_content = urlopen(unquote_plus(url)).read()
if file_content == "":
return {"error": "File content is empty"}, 400
alpha_matting = "a" in request.values
af = request.values.get("af", type=int, default=240)
ab = request.values.get("ab", type=int, default=10)
ae = request.values.get("ae", type=int, default=10)
model = request.args.get("model", type=str, default="u2net")
if model not in ("u2net", "u2netp"):
return {"error": "invalid query param 'model'"}, 400
try:
return send_file(
BytesIO(
remove(
file_content,
model_name=model,
alpha_matting=alpha_matting,
alpha_matting_foreground_threshold=af,
alpha_matting_background_threshold=ab,
alpha_matting_erode_structure_size=ae
)
),
mimetype="image/png",
)
except Exception as e:
app.logger.exception(e, exc_info=True)
return {"error": "oops, something went wrong!"}, 500
def main():
ap = argparse.ArgumentParser()
ap.add_argument(
"-a",
"--addr",
default="0.0.0.0",
type=str,
help="The IP address to bind to.",
)
ap.add_argument(
"-p",
"--port",
default=5000,
type=int,
help="The port to bind to.",
)
args = ap.parse_args()
serve(app, host=args.addr, port=args.port)
if __name__ == "__main__":
main()
|
the-stack_0_6821 | import argparse
import json
import os
import random
import time
import numpy as np
import torch.distributed as dist
import torch.utils.data.distributed
from apex import amp
from apex.parallel import DistributedDataParallel
from warpctc_pytorch import CTCLoss
from data.data_loader import AudioDataLoader, SpectrogramDataset, BucketingSampler, DistributedBucketingSampler
from decoder import GreedyDecoder
from logger import VisdomLogger, TensorBoardLogger
from model import DeepSpeech, supported_rnns
from test import evaluate
from utils import reduce_tensor, check_loss
parser = argparse.ArgumentParser(description='DeepSpeech training')
parser.add_argument('--train-manifest', metavar='DIR',
help='path to train manifest csv', default='data/cv-valid-train_manifest_en.csv')
parser.add_argument('--val-manifest', metavar='DIR',
help='path to validation manifest csv', default='data/cv-valid-test_manifest_en.csv')
parser.add_argument('--sample-rate', default=16000, type=int, help='Sample rate')
parser.add_argument('--batch-size', default=20, type=int, help='Batch size for training')
parser.add_argument('--num-workers', default=4, type=int, help='Number of workers used in data-loading')
parser.add_argument('--labels-path', default='labels.json', help='Contains all characters for transcription')
parser.add_argument('--window-size', default=.02, type=float, help='Window size for spectrogram in seconds')
parser.add_argument('--window-stride', default=.01, type=float, help='Window stride for spectrogram in seconds')
parser.add_argument('--window', default='hamming', help='Window type for spectrogram generation')
parser.add_argument('--hidden-size', default=800, type=int, help='Hidden size of RNNs')
parser.add_argument('--hidden-layers', default=5, type=int, help='Number of RNN layers')
parser.add_argument('--rnn-type', default='gru', help='Type of the RNN. rnn|gru|lstm are supported')
parser.add_argument('--epochs', default=70, type=int, help='Number of training epochs')
parser.add_argument('--cuda', dest='cuda', action='store_true', help='Use cuda to train model')
parser.add_argument('--lr', '--learning-rate', default=3e-4, type=float, help='initial learning rate')
parser.add_argument('--momentum', default=0.9, type=float, help='momentum')
parser.add_argument('--max-norm', default=400, type=int, help='Norm cutoff to prevent explosion of gradients')
parser.add_argument('--learning-anneal', default=1.1, type=float, help='Annealing applied to learning rate every epoch')
parser.add_argument('--silent', dest='silent', action='store_true', help='Turn off progress tracking per iteration')
parser.add_argument('--checkpoint', dest='checkpoint', action='store_true', help='Enables checkpoint saving of model')
parser.add_argument('--checkpoint-per-batch', default=0, type=int, help='Save checkpoint per batch. 0 means never save')
parser.add_argument('--visdom', dest='visdom', action='store_true', help='Turn on visdom graphing')
parser.add_argument('--tensorboard', dest='tensorboard', action='store_true', help='Turn on tensorboard graphing')
parser.add_argument('--log-dir', default='visualize/deepspeech_final', help='Location of tensorboard log')
parser.add_argument('--log-params', dest='log_params', action='store_true', help='Log parameter values and gradients')
parser.add_argument('--id', default='Deepspeech training', help='Identifier for visdom/tensorboard run')
parser.add_argument('--save-folder', default='models/', help='Location to save epoch models')
parser.add_argument('--model-path', default='models/deepspeech_final_cv1_252hr.pth',
help='Location to save best validation model')
parser.add_argument('--continue-from', default='', help='Continue from checkpoint model')
parser.add_argument('--finetune', dest='finetune', action='store_true',
help='Finetune the model from checkpoint "continue_from"')
parser.add_argument('--augment', dest='augment', action='store_true', help='Use random tempo and gain perturbations.')
parser.add_argument('--noise-dir', default=None,
help='Directory to inject noise into audio. If default, noise Inject not added')
parser.add_argument('--noise-prob', default=0.4, help='Probability of noise being added per sample')
parser.add_argument('--noise-min', default=0.0,
help='Minimum noise level to sample from. (1.0 means all noise, not original signal)', type=float)
parser.add_argument('--noise-max', default=0.5,
help='Maximum noise levels to sample from. Maximum 1.0', type=float)
parser.add_argument('--no-shuffle', dest='no_shuffle', action='store_true',
help='Turn off shuffling and sample from dataset based on sequence length (smallest to largest)')
parser.add_argument('--no-sortaGrad', dest='no_sorta_grad', action='store_true',
help='Turn off ordering of dataset on sequence length for the first epoch.')
parser.add_argument('--no-bidirectional', dest='bidirectional', action='store_false', default=True,
help='Turn off bi-directional RNNs, introduces lookahead convolution')
parser.add_argument('--dist-url', default='tcp://127.0.0.1:1550', type=str,
help='url used to set up distributed training')
parser.add_argument('--dist-backend', default='nccl', type=str, help='distributed backend')
parser.add_argument('--world-size', default=1, type=int,
help='number of distributed processes')
parser.add_argument('--rank', default=0, type=int,
help='The rank of this process')
parser.add_argument('--gpu-rank', default=None,
help='If using distributed parallel for multi-gpu, sets the GPU for the process')
parser.add_argument('--seed', default=123456, type=int, help='Seed to generators')
parser.add_argument('--opt-level', type=str)
parser.add_argument('--keep-batchnorm-fp32', type=str, default=None)
parser.add_argument('--loss-scale', type=str, default=None)
torch.manual_seed(123456)
torch.cuda.manual_seed_all(123456)
def to_np(x):
return x.cpu().numpy()
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
if __name__ == '__main__':
args = parser.parse_args()
# Set seeds for determinism
torch.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
np.random.seed(args.seed)
random.seed(args.seed)
device = torch.device("cuda" if args.cuda else "cpu")
args.distributed = args.world_size > 1
main_proc = True
device = torch.device("cuda" if args.cuda else "cpu")
if args.distributed:
if args.gpu_rank:
torch.cuda.set_device(int(args.gpu_rank))
dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
world_size=args.world_size, rank=args.rank)
main_proc = args.rank == 0 # Only the first proc should save models
save_folder = args.save_folder
os.makedirs(save_folder, exist_ok=True) # Ensure save folder exists
loss_results, cer_results, wer_results = torch.Tensor(args.epochs), torch.Tensor(args.epochs), torch.Tensor(
args.epochs)
best_wer = None
if main_proc and args.visdom:
visdom_logger = VisdomLogger(args.id, args.epochs)
if main_proc and args.tensorboard:
tensorboard_logger = TensorBoardLogger(args.id, args.log_dir, args.log_params)
avg_loss, start_epoch, start_iter, optim_state = 0, 0, 0, None
if args.continue_from: # Starting from previous model
print("Loading checkpoint model %s" % args.continue_from)
package = torch.load(args.continue_from, map_location=lambda storage, loc: storage)
model = DeepSpeech.load_model_package(package)
labels = model.labels
audio_conf = model.audio_conf
if not args.finetune: # Don't want to restart training
optim_state = package['optim_dict']
start_epoch = int(package.get('epoch', 1)) - 1 # Index start at 0 for training
start_iter = package.get('iteration', None)
if start_iter is None:
start_epoch += 1 # We saved model after epoch finished, start at the next epoch.
start_iter = 0
else:
start_iter += 1
avg_loss = int(package.get('avg_loss', 0))
loss_results, cer_results, wer_results = package['loss_results'], package['cer_results'], \
package['wer_results']
best_wer = wer_results[start_epoch]
if main_proc and args.visdom: # Add previous scores to visdom graph
visdom_logger.load_previous_values(start_epoch, package)
if main_proc and args.tensorboard: # Previous scores to tensorboard logs
tensorboard_logger.load_previous_values(start_epoch, package)
else:
with open(args.labels_path) as label_file:
labels = str(''.join(json.load(label_file)))
audio_conf = dict(sample_rate=args.sample_rate,
window_size=args.window_size,
window_stride=args.window_stride,
window=args.window,
noise_dir=args.noise_dir,
noise_prob=args.noise_prob,
noise_levels=(args.noise_min, args.noise_max))
rnn_type = args.rnn_type.lower()
assert rnn_type in supported_rnns, "rnn_type should be either lstm, rnn or gru"
model = DeepSpeech(rnn_hidden_size=args.hidden_size,
nb_layers=args.hidden_layers,
labels=labels,
rnn_type=supported_rnns[rnn_type],
audio_conf=audio_conf,
bidirectional=args.bidirectional)
decoder = GreedyDecoder(labels)
train_dataset = SpectrogramDataset(audio_conf=audio_conf, manifest_filepath=args.train_manifest, labels=labels,
normalize=True, augment=args.augment)
test_dataset = SpectrogramDataset(audio_conf=audio_conf, manifest_filepath=args.val_manifest, labels=labels,
normalize=True, augment=False)
if not args.distributed:
train_sampler = BucketingSampler(train_dataset, batch_size=args.batch_size)
else:
train_sampler = DistributedBucketingSampler(train_dataset, batch_size=args.batch_size,
num_replicas=args.world_size, rank=args.rank)
train_loader = AudioDataLoader(train_dataset,
num_workers=args.num_workers, batch_sampler=train_sampler)
test_loader = AudioDataLoader(test_dataset, batch_size=args.batch_size,
num_workers=args.num_workers)
if (not args.no_shuffle and start_epoch != 0) or args.no_sorta_grad:
print("Shuffling batches for the following epochs")
train_sampler.shuffle(start_epoch)
model = model.to(device)
parameters = model.parameters()
optimizer = torch.optim.SGD(parameters, lr=args.lr,
momentum=args.momentum, nesterov=True, weight_decay=1e-5)
if optim_state is not None:
optimizer.load_state_dict(optim_state)
model, optimizer = amp.initialize(model, optimizer,
opt_level=args.opt_level,
keep_batchnorm_fp32=args.keep_batchnorm_fp32,
loss_scale=args.loss_scale)
if args.distributed:
model = DistributedDataParallel(model)
print(model)
print("Number of parameters: %d" % DeepSpeech.get_param_size(model))
criterion = CTCLoss()
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
for epoch in range(start_epoch, args.epochs):
model.train()
end = time.time()
start_epoch_time = time.time()
for i, (data) in enumerate(train_loader, start=start_iter):
if i == len(train_sampler):
break
inputs, targets, input_percentages, target_sizes = data
input_sizes = input_percentages.mul_(int(inputs.size(3))).int()
# measure data loading time
data_time.update(time.time() - end)
inputs = inputs.to(device)
out, output_sizes = model(inputs, input_sizes)
out = out.transpose(0, 1) # TxNxH
float_out = out.float() # ensure float32 for loss
loss = criterion(float_out, targets, output_sizes, target_sizes).to(device)
loss = loss / inputs.size(0) # average the loss by minibatch
if args.distributed:
loss = loss.to(device)
loss_value = reduce_tensor(loss, args.world_size).item()
else:
loss_value = loss.item()
# Check to ensure valid loss was calculated
valid_loss, error = check_loss(loss, loss_value)
if valid_loss:
optimizer.zero_grad()
# compute gradient
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_norm)
optimizer.step()
else:
print(error)
print('Skipping grad update')
loss_value = 0
avg_loss += loss_value
losses.update(loss_value, inputs.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if not args.silent:
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'.format(
(epoch + 1), (i + 1), len(train_sampler), batch_time=batch_time, data_time=data_time, loss=losses))
if args.checkpoint_per_batch > 0 and i > 0 and (i + 1) % args.checkpoint_per_batch == 0 and main_proc:
file_path = '%s/deepspeech_checkpoint_epoch_%d_iter_%d.pth' % (save_folder, epoch + 1, i + 1)
print("Saving checkpoint model to %s" % file_path)
torch.save(DeepSpeech.serialize(model, optimizer=optimizer, epoch=epoch, iteration=i,
loss_results=loss_results,
wer_results=wer_results, cer_results=cer_results, avg_loss=avg_loss),
file_path)
del loss, out, float_out
avg_loss /= len(train_sampler)
epoch_time = time.time() - start_epoch_time
print('Training Summary Epoch: [{0}]\t'
'Time taken (s): {epoch_time:.0f}\t'
'Average Loss {loss:.3f}\t'.format(epoch + 1, epoch_time=epoch_time, loss=avg_loss))
start_iter = 0 # Reset start iteration for next epoch
with torch.no_grad():
wer, cer, output_data = evaluate(test_loader=test_loader,
device=device,
model=model,
decoder=decoder,
target_decoder=decoder)
loss_results[epoch] = avg_loss
wer_results[epoch] = wer
cer_results[epoch] = cer
print('Validation Summary Epoch: [{0}]\t'
'Average WER {wer:.3f}\t'
'Average CER {cer:.3f}\t'.format(
epoch + 1, wer=wer, cer=cer))
values = {
'loss_results': loss_results,
'cer_results': cer_results,
'wer_results': wer_results
}
if args.visdom and main_proc:
visdom_logger.update(epoch, values)
if args.tensorboard and main_proc:
tensorboard_logger.update(epoch, values, model.named_parameters())
values = {
'Avg Train Loss': avg_loss,
'Avg WER': wer,
'Avg CER': cer
}
if main_proc and args.checkpoint:
file_path = '%s/deepspeech_%d.pth.tar' % (save_folder, epoch + 1)
torch.save(DeepSpeech.serialize(model, optimizer=optimizer, epoch=epoch, loss_results=loss_results,
wer_results=wer_results, cer_results=cer_results),
file_path)
# anneal lr
for g in optimizer.param_groups:
g['lr'] = g['lr'] / args.learning_anneal
print('Learning rate annealed to: {lr:.6f}'.format(lr=g['lr']))
if main_proc and (best_wer is None or best_wer > wer):
print("Found better validated model, saving to %s" % args.model_path)
torch.save(DeepSpeech.serialize(model, optimizer=optimizer, epoch=epoch, loss_results=loss_results,
wer_results=wer_results, cer_results=cer_results)
, args.model_path)
best_wer = wer
avg_loss = 0
if not args.no_shuffle:
print("Shuffling batches...")
train_sampler.shuffle(epoch)
|
the-stack_0_6822 | """
stdint
======
Although Python has native support for arbitrary-precision integers,
Javascript by default uses 64-bit floats as the only numeric type,
signifying they cannot store more than 53 integral bits.
Therefore, in Javascript, 64-bit integers are stored as an array
of 2 numbers. Likewise, 128-bit integers are stored as an array of 4
numbers. These functions the conversion of native Python integers
to and from a Javascript-like notation, to simplify integration with
data transfer objects.
This also provides routines to convert to and from fixed-width
integers in both catbuffer and data-transfer objects, as well
as extract high- and low-bit patterns from the types.
The module is named after <stdint.h>, which describes fixed-width
(standard) integers in C, even though it has no relationship
in terms of functionality.
"""
from __future__ import annotations
import typing
__all__ = [
# DTO Types
'I8DTOType',
'U8DTOType',
'U16DTOType',
'U24DTOType',
'U32DTOType',
'U64DTOType',
'U128DTOType',
# Byte sizes
'I8_BYTES',
'U8_BYTES',
'U16_BYTES',
'U24_BYTES',
'U32_BYTES',
'U64_BYTES',
'U128_BYTES',
# I8
# 'i8_high',
# 'i8_low',
'i8_iter_from_catbuffer',
'i8_iter_from_dto',
'i8_iter_to_catbuffer',
'i8_iter_to_dto',
'i8_from_catbuffer',
'i8_from_dto',
'i8_to_catbuffer',
'i8_to_dto',
# U8
'u8_high',
'u8_low',
'u8_iter_from_catbuffer',
'u8_iter_from_dto',
'u8_iter_to_catbuffer',
'u8_iter_to_dto',
'u8_from_catbuffer',
'u8_from_dto',
'u8_to_catbuffer',
'u8_to_dto',
# U16
'u16_high',
'u16_low',
'u16_iter_from_catbuffer',
'u16_iter_from_dto',
'u16_iter_to_catbuffer',
'u16_iter_to_dto',
'u16_from_catbuffer',
'u16_from_dto',
'u16_to_catbuffer',
'u16_to_dto',
# U24
'u24_high',
'u24_low',
'u24_iter_from_catbuffer',
'u24_iter_from_dto',
'u24_iter_to_catbuffer',
'u24_iter_to_dto',
'u24_from_catbuffer',
'u24_from_dto',
'u24_to_catbuffer',
'u24_to_dto',
# U32
'u32_high',
'u32_low',
'u32_iter_from_catbuffer',
'u32_iter_from_dto',
'u32_iter_to_catbuffer',
'u32_iter_to_dto',
'u32_from_catbuffer',
'u32_from_dto',
'u32_to_catbuffer',
'u32_to_dto',
# U64
'u64_high',
'u64_low',
'u64_iter_from_catbuffer',
'u64_iter_from_dto',
'u64_iter_to_catbuffer',
'u64_iter_to_dto',
'u64_from_catbuffer',
'u64_from_dto',
'u64_to_catbuffer',
'u64_to_dto',
# U128
'u128_high',
'u128_low',
'u128_iter_from_catbuffer',
'u128_iter_from_dto',
'u128_iter_to_catbuffer',
'u128_iter_to_dto',
'u128_from_catbuffer',
'u128_from_dto',
'u128_to_catbuffer',
'u128_to_dto',
]
U4_BITS = 4
U8_BITS = 8
U16_BITS = 16
U24_BITS = 24
U32_BITS = 32
U64_BITS = 64
U128_BITS = 128
U8_BYTES = U8_BITS // 8
U16_BYTES = U16_BITS // 8
U24_BYTES = U24_BITS // 8
U32_BYTES = U32_BITS // 8
U64_BYTES = U64_BITS // 8
U128_BYTES = U128_BITS // 8
U4_MAX = 0xF
U8_MAX = 0xFF
U16_MAX = 0xFFFF
U24_MAX = 0xFFFFFF
U32_MAX = 0xFFFFFFFF
U64_MAX = 0xFFFFFFFFFFFFFFFF
U128_MAX = 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF
I8_BITS = 8
I8_BYTES = I8_BITS // 8
I8_MAX = 0x7F
I8_MIN = -0x80
U8DTOType = int
U16DTOType = int
U24DTOType = int
U32DTOType = int
U64DTOType = typing.Sequence[U32DTOType]
U128DTOType = typing.Sequence[U64DTOType]
YieldIntType = typing.Generator[int, None, None]
YieldBytesType = typing.Generator[bytes, None, None]
I8DTOType = int
# HELPERS
def check_overflow(within_range: bool):
"""Raise exception if overflow."""
if not within_range:
raise OverflowError
def high(max: int, bits: int, mask: int) -> typing.Callable[[int], int]:
def wrapper(value: int) -> int:
check_overflow(0 <= value <= max)
return (value >> bits) & mask
wrapper.__name__ = f'u{2*bits}_high'
wrapper.__qualname__ = wrapper.__name__
wrapper.__doc__ = f'Get high {bits} from {2*bits}-bit integer.'
wrapper.__module__ = __name__
return wrapper
def low(max: int, bits: int, mask: int) -> typing.Callable[[int], int]:
def wrapper(value: int) -> int:
check_overflow(0 <= value <= max)
return value & mask
wrapper.__name__ = f'u{2*bits}_low'
wrapper.__qualname__ = wrapper.__name__
wrapper.__doc__ = f'Get low {bits} from {2*bits}-bit integer.'
wrapper.__module__ = __name__
return wrapper
def to_catbuffer_impl(size: int, signed: bool = False) -> typing.Callable[[int], bytes]:
def wrapper(value: int, signed: bool = False) -> bytes:
return value.to_bytes(size, 'little', signed=signed)
return wrapper
def to_catbuffer(bits: int, signed: bool = False) -> typing.Callable[[int], bytes]:
cb = to_catbuffer_impl(bits // 8, signed=signed)
def wrapper(value: int, signed: bool = False) -> bytes:
return cb(value)
wrapper.__name__ = f'u{bits}_to_catbuffer'
wrapper.__qualname__ = wrapper.__name__
wrapper.__doc__ = f'Convert {bits}-bit integer to catbuffer.'
wrapper.__module__ = __name__
return wrapper
def iter_to_catbuffer(bits: int, signed: bool = False):
cb = to_catbuffer_impl(bits // 8, signed=signed)
def wrapper(iterable, signed: bool = False):
for value in iterable:
yield cb(value)
wrapper.__name__ = f'u{bits}_iter_to_catbuffer'
wrapper.__qualname__ = wrapper.__name__
wrapper.__doc__ = f'Iteratively convert {bits}-bit integers to catbuffer.'
wrapper.__module__ = __name__
return wrapper
def from_catbuffer_impl(bits: int, signed: bool = False) -> typing.Callable[[bytes], int]:
def wrapper(catbuffer: bytes, signed: bool = False) -> int:
return int.from_bytes(catbuffer, 'little', signed=signed)
return wrapper
def from_catbuffer(bits: int, signed: bool = False) -> typing.Callable[[bytes], int]:
size = bits // 8
cb = from_catbuffer_impl(size, signed=signed)
def wrapper(catbuffer: bytes, signed: bool = False) -> int:
if len(catbuffer) > size:
raise OverflowError('bytes too big to convert')
return cb(catbuffer)
wrapper.__name__ = f'u{bits}_from_catbuffer'
wrapper.__qualname__ = wrapper.__name__
wrapper.__doc__ = f'Convert catbuffer to {bits}-bit integer.'
wrapper.__module__ = __name__
return wrapper
def iter_from_catbuffer(bits: int, signed: bool = False) -> typing.Callable[[bytes], YieldIntType]:
size = bits // 8
cb = from_catbuffer_impl(size, signed=signed)
def wrapper(catbuffer: bytes, signed: bool = False) -> YieldIntType:
length = len(catbuffer)
if length % size != 0:
raise ValueError(f'iter from_catbuffer requires multiple of {size}.')
for i in range(0, length, size):
start = i
stop = start + size
yield cb(catbuffer[start:stop])
wrapper.__name__ = f'u{bits}_iter_from_catbuffer'
wrapper.__qualname__ = wrapper.__name__
wrapper.__doc__ = f'Iteratively convert catbuffer to {bits}-bit integers.'
wrapper.__module__ = __name__
return wrapper
def iter_to_dto(bits: int, cb):
def wrapper(iterable):
for value in iterable:
yield cb(value)
wrapper.__name__ = f'u{bits}_iter_to_dto'
wrapper.__qualname__ = wrapper.__name__
wrapper.__doc__ = f'Iteratively convert {bits}-bit integers to DTO.'
wrapper.__module__ = __name__
return wrapper
def iter_from_dto(bits: int, cb):
def wrapper(iterable):
for value in iterable:
yield cb(value)
wrapper.__name__ = f'u{bits}_iter_from_dto'
wrapper.__qualname__ = wrapper.__name__
wrapper.__doc__ = f'Iteratively convert DTOs to {bits}-bit integers.'
wrapper.__module__ = __name__
return wrapper
# UINT8
def u8_to_dto(value: int) -> U8DTOType:
"""Convert 8-bit int to DTO."""
check_overflow(0 <= value <= U8_MAX)
return value
def u8_from_dto(dto: U8DTOType) -> int:
"""Convert DTO to 8-bit int."""
check_overflow(0 <= dto <= U8_MAX)
return dto
u8_high = high(U8_MAX, U4_BITS, U4_MAX)
u8_low = low(U8_MAX, U4_BITS, U4_MAX)
u8_to_catbuffer = to_catbuffer(U8_BITS)
u8_from_catbuffer = from_catbuffer(U8_BITS)
u8_iter_to_catbuffer = iter_to_catbuffer(U8_BITS)
u8_iter_from_catbuffer = iter_from_catbuffer(U8_BITS)
u8_iter_to_dto = iter_to_dto(U8_BITS, u8_to_dto)
u8_iter_from_dto = iter_from_dto(U8_BITS, u8_from_dto)
# INT8
def i8_to_dto(value: int) -> I8DTOType:
"""Convert 8-bit int to DTO."""
check_overflow(I8_MIN <= value <= I8_MAX)
return value
def i8_from_dto(dto: I8DTOType) -> int:
"""Convert DTO to 8-bit int."""
check_overflow(I8_MIN <= dto <= I8_MAX)
return dto
# i8_high = high(I8_MAX, I4_BITS, I4_MAX)
# i8_low = low(I8_MAX, I4_BITS, I4_MAX)
i8_to_catbuffer = to_catbuffer(I8_BITS, signed=True)
i8_from_catbuffer = from_catbuffer(I8_BITS, signed=True)
i8_iter_to_catbuffer = iter_to_catbuffer(I8_BITS, signed=True)
i8_iter_from_catbuffer = iter_from_catbuffer(I8_BITS, signed=True)
i8_iter_to_dto = iter_to_dto(I8_BITS, i8_to_dto)
i8_iter_from_dto = iter_from_dto(I8_BITS, i8_from_dto)
# UINT16
def u16_to_dto(value: int) -> U16DTOType:
"""Convert 16-bit int to DTO."""
check_overflow(0 <= value <= U16_MAX)
return value
def u16_from_dto(dto: U16DTOType) -> int:
"""Convert DTO to 16-bit int."""
check_overflow(0 <= dto <= U16_MAX)
return dto
u16_high = high(U16_MAX, U8_BITS, U8_MAX)
u16_low = low(U16_MAX, U8_BITS, U8_MAX)
u16_to_catbuffer = to_catbuffer(U16_BITS)
u16_from_catbuffer = from_catbuffer(U16_BITS)
u16_iter_to_catbuffer = iter_to_catbuffer(U16_BITS)
u16_iter_from_catbuffer = iter_from_catbuffer(U16_BITS)
u16_iter_to_dto = iter_to_dto(U16_BITS, u16_to_dto)
u16_iter_from_dto = iter_from_dto(U16_BITS, u16_from_dto)
# UINT24
def u24_to_dto(value: int) -> U24DTOType:
"""Convert 24-bit int to DTO."""
check_overflow(0 <= value <= U24_MAX)
return value
def u24_from_dto(dto: U24DTOType) -> int:
"""Convert DTO to 24-bit int."""
check_overflow(0 <= dto <= U24_MAX)
return dto
u24_high = high(U24_MAX, U8_BITS, U8_MAX)
u24_low = low(U24_MAX, U8_BITS, U8_MAX)
u24_to_catbuffer = to_catbuffer(U24_BITS)
u24_from_catbuffer = from_catbuffer(U24_BITS)
u24_iter_to_catbuffer = iter_to_catbuffer(U24_BITS)
u24_iter_from_catbuffer = iter_from_catbuffer(U24_BITS)
u24_iter_to_dto = iter_to_dto(U24_BITS, u24_to_dto)
u24_iter_from_dto = iter_from_dto(U24_BITS, u24_from_dto)
# UINT32
def u32_to_dto(value: int) -> U32DTOType:
"""Convert 32-bit int to DTO."""
check_overflow(0 <= value <= U32_MAX)
return value
def u32_from_dto(dto: U32DTOType) -> int:
"""Convert DTO to 32-bit int."""
check_overflow(0 <= dto <= U32_MAX)
return dto
u32_high = high(U32_MAX, U16_BITS, U16_MAX)
u32_low = low(U32_MAX, U16_BITS, U16_MAX)
u32_to_catbuffer = to_catbuffer(U32_BITS)
u32_from_catbuffer = from_catbuffer(U32_BITS)
u32_iter_to_catbuffer = iter_to_catbuffer(U32_BITS)
u32_iter_from_catbuffer = iter_from_catbuffer(U32_BITS)
u32_iter_to_dto = iter_to_dto(U32_BITS, u32_to_dto)
u32_iter_from_dto = iter_from_dto(U32_BITS, u32_from_dto)
# UINT64
def u64_to_dto(value: int) -> U64DTOType:
"""Convert 64-bit int to DTO."""
check_overflow(0 <= value <= U64_MAX)
return [u64_low(value), u64_high(value)]
def u64_from_dto(dto: U64DTOType) -> int:
"""Convert DTO to 64-bit int."""
if not (
len(dto) == 2
and dto[0] <= U32_MAX
and dto[1] <= U32_MAX
):
raise ArithmeticError
return (dto[0]) | (dto[1] << U32_BITS)
u64_high = high(U64_MAX, U32_BITS, U32_MAX)
u64_low = low(U64_MAX, U32_BITS, U32_MAX)
u64_to_catbuffer = to_catbuffer(U64_BITS)
u64_from_catbuffer = from_catbuffer(U64_BITS)
u64_iter_to_catbuffer = iter_to_catbuffer(U64_BITS)
u64_iter_from_catbuffer = iter_from_catbuffer(U64_BITS)
u64_iter_to_dto = iter_to_dto(U64_BITS, u64_to_dto)
u64_iter_from_dto = iter_from_dto(U64_BITS, u64_from_dto)
# UINT128
def u128_to_dto(value: int) -> U128DTOType:
"""Convert 128-bit int to DTO."""
check_overflow(0 <= value <= U128_MAX)
low = u128_low(value)
high = u128_high(value)
return [u64_to_dto(low), u64_to_dto(high)]
def u128_from_dto(dto: U128DTOType) -> int:
"""Convert DTO to 128-bit int."""
if len(dto) != 2:
raise ArithmeticError
low = u64_from_dto(dto[0])
high = u64_from_dto(dto[1])
return low | (high << U64_BITS)
u128_high = high(U128_MAX, U64_BITS, U64_MAX)
u128_low = low(U128_MAX, U64_BITS, U64_MAX)
u128_to_catbuffer = to_catbuffer(U128_BITS)
u128_from_catbuffer = from_catbuffer(U128_BITS)
u128_iter_to_catbuffer = iter_to_catbuffer(U128_BITS)
u128_iter_from_catbuffer = iter_from_catbuffer(U128_BITS)
u128_iter_to_dto = iter_to_dto(U128_BITS, u128_to_dto)
u128_iter_from_dto = iter_from_dto(U128_BITS, u128_from_dto)
|
the-stack_0_6823 | import os
import pytest
import ray
from ray import serve
if os.environ.get("RAY_SERVE_INTENTIONALLY_CRASH", False):
serve.controller._CRASH_AFTER_CHECKPOINT_PROBABILITY = 0.5
@pytest.fixture(scope="session")
def _shared_serve_instance():
ray.init(num_cpus=36)
serve.init()
yield
@pytest.fixture
def serve_instance(_shared_serve_instance):
serve.init()
yield
# Re-init if necessary.
serve.init()
controller = serve.api._get_controller()
# Clear all state between tests to avoid naming collisions.
for endpoint in ray.get(controller.get_all_endpoints.remote()):
serve.delete_endpoint(endpoint)
for backend in ray.get(controller.get_all_backends.remote()):
serve.delete_backend(backend)
|
the-stack_0_6824 | import mock
import zeit.cms.browser.interfaces
import zeit.cms.browser.listing
import zeit.cms.content.interfaces
import zeit.cms.interfaces
import zeit.cms.testing
import zope.component
import zope.publisher.browser
class HitColumnTest(zeit.cms.testing.ZeitCmsTestCase):
def test_sort_key(self):
class FakeAccessCounter(object):
hits = 5
total_hits = 19
def __init__(self, context):
pass
zope.component.getSiteManager().registerAdapter(
FakeAccessCounter, (zeit.cms.interfaces.ICMSContent,),
zeit.cms.content.interfaces.IAccessCounter)
listrep = zope.component.queryMultiAdapter(
(self.repository['testcontent'],
zope.publisher.browser.TestRequest()),
zeit.cms.browser.interfaces.IListRepresentation)
column = zeit.cms.browser.listing.HitColumn()
self.assertEqual((19, 5), column.getSortKey(listrep, formatter=None))
class ListingTest(zeit.cms.testing.ZeitCmsBrowserTestCase):
def test_columns_ignore_exceptions(self):
with mock.patch(
'zeit.cms.testcontenttype.testcontenttype.'
'ExampleContentType.authors', new=mock.PropertyMock) as author:
author.side_effect = RuntimeError('provoked')
b = self.browser
b.handleErrors = False
with self.assertNothingRaised():
b.open('http://localhost/++skin++vivi/repository')
# Check that the cells are present but empty.
self.assertEllipsis(
'...<td> <span class="filename">testcontent</span> </td>'
' <td> 2008 ... </td> <td> </td> <td> </td> <td> </td>...',
b.contents)
|
the-stack_0_6825 | import sys
from typing import Any
from typing import List
from kurobako import problem
from naslib.utils import get_dataset_api
op_names = [
"skip_connect",
"none",
"nor_conv_3x3",
"nor_conv_1x1",
"avg_pool_3x3",
]
edge_num = 4 * 3 // 2
max_epoch = 199
prune_start_epoch = 10
prune_epoch_step = 10
class NASLibProblemFactory(problem.ProblemFactory):
def __init__(self, dataset: str) -> None:
"""Creates ProblemFactory for NASBench201.
Args:
dataset:
Accepts one of "cifar10", "cifar100" or "ImageNet16-120".
"""
self._dataset = dataset
if dataset == "cifar10":
self._dataset = "cifar10-valid" # Set name used in dataset API
self._dataset_api = get_dataset_api("nasbench201", dataset)
def specification(self) -> problem.ProblemSpec:
params = [
problem.Var(f"x{i}", problem.CategoricalRange(op_names)) for i in range(edge_num)
]
return problem.ProblemSpec(
name=f"NASBench201-{self._dataset}",
params=params,
values=[problem.Var("value")],
steps=list(range(prune_start_epoch, max_epoch, prune_epoch_step)) + [max_epoch],
)
def create_problem(self, seed: int) -> problem.Problem:
return NASLibProblem(self._dataset, self._dataset_api)
class NASLibProblem(problem.Problem):
def __init__(self, dataset: str, dataset_api: Any) -> None:
super().__init__()
self._dataset = dataset
self._dataset_api = dataset_api
def create_evaluator(self, params: List[float]) -> problem.Evaluator:
ops = [op_names[int(x)] for x in params]
arch_str = "|{}~0|+|{}~0|{}~1|+|{}~0|{}~1|{}~2|".format(*ops)
return NASLibEvaluator(
self._dataset_api["nb201_data"][arch_str][self._dataset]["eval_acc1es"]
)
class NASLibEvaluator(problem.Evaluator):
def __init__(self, learning_curve: List[float]) -> None:
self._current_step = 0
self._lc = learning_curve
def current_step(self) -> int:
return self._current_step
def evaluate(self, next_step: int) -> List[float]:
self._current_step = next_step
return [-self._lc[next_step]]
if __name__ == "__main__":
if len(sys.argv) < 1 + 2:
print("Usage: python3 nas_bench_suite/problems.py <search_space> <dataset>")
print("Example: python3 nas_bench_suite/problems.py nasbench201 cifar10")
exit(1)
search_space_name = sys.argv[1]
# We currently do not support other benchmarks.
assert search_space_name == "nasbench201"
dataset = sys.argv[2]
runner = problem.ProblemRunner(NASLibProblemFactory(dataset))
runner.run()
|
the-stack_0_6827 | def to_openmm_Topology(item, selection='all', frame_indices='all', syntaxis='MolSysMT'):
from molsysmt.tools.openmm_Modeller import is_openmm_Modeller
from molsysmt.basic import convert
if not is_openmm_Modeller(item):
raise ValueError
tmp_item = convert(item, to_form='openmm.Topology', selection=selection,
frame_indices=frame_indices, syntaxis=syntaxis)
return tmp_item
|
the-stack_0_6828 | import re
from django import forms
from django.core.validators import RegexValidator
regex_validator_open = RegexValidator(
regex=re.compile("open", flags=re.ASCII),
message="You can't use open function",
inverse_match=True,
)
regex_validator_eval = RegexValidator(
regex=re.compile("eval", flags=re.ASCII),
message="You can't use eval function",
inverse_match=True,
)
regex_validator_exec = RegexValidator(
regex=re.compile("exec", flags=re.ASCII),
message="You can't use exec function",
inverse_match=True,
)
regex_validator_os = RegexValidator(
regex=re.compile(r"[%0-9\b]?os[\b%0-9]?", flags=re.ASCII),
message="You can't use os module",
inverse_match=True,
)
regex_validator_subprocess = RegexValidator(
regex=re.compile("subprocess", flags=re.ASCII),
message="You can't use subprocess module",
inverse_match=True,
)
regex_validator_pathlib = RegexValidator(
regex=re.compile("pathlib", flags=re.ASCII),
message="You can't use pathlib module",
inverse_match=True,
)
regex_validator_fileinput = RegexValidator(
regex=re.compile("fileinput", flags=re.ASCII),
message="You can't use fileinput module",
inverse_match=True,
)
regex_validator_shutil = RegexValidator(
regex=re.compile("shutil", flags=re.ASCII),
message="You can't use shutil module",
inverse_match=True,
)
regex_validator_parent_path = RegexValidator(
regex=re.compile(r"\.\.[/\\]{1}", flags=re.ASCII),
message="You can't go to the parent path",
inverse_match=True,
)
regex_validator_ftp = RegexValidator(
regex=re.compile(r".?ftp.?", flags=re.ASCII),
message="You can't use ftp protocol",
inverse_match=True,
)
class PythonInterpreterForm(forms.Form):
"""Form for the main page.
Attributes:
user_code: field for input user's code;
std_io: field for standard input-output;
timeout: field for setup of server's response timeout.
"""
user_code = forms.CharField(
widget=forms.Textarea,
label=False,
initial="# Type your Python code here and push Launch button.\n",
validators=[
regex_validator_open,
regex_validator_eval,
regex_validator_exec,
regex_validator_os,
regex_validator_subprocess,
regex_validator_pathlib,
regex_validator_fileinput,
regex_validator_shutil,
regex_validator_parent_path,
regex_validator_ftp,
],
)
std_io = forms.CharField(widget=forms.Textarea, label=False, required=False)
timeout = forms.IntegerField(max_value=20, label="Timeout, sec", initial=5)
|
the-stack_0_6830 | import random
import time
import warnings
import sys
import argparse
import shutil
import torch
import torch.backends.cudnn as cudnn
from torch.optim import SGD
from torch.optim.lr_scheduler import LambdaLR, MultiStepLR
from torch.utils.data import DataLoader
from torchvision.transforms import Compose, ToPILImage
sys.path.append('../../..')
from dalib.adaptation.keypoint_detection.regda import PoseResNet as RegDAPoseResNet, \
PseudoLabelGenerator, RegressionDisparity
import common.vision.models as models
from common.vision.models.keypoint_detection.pose_resnet import Upsampling, PoseResNet
from common.vision.models.keypoint_detection.loss import JointsKLLoss
import common.vision.datasets.keypoint_detection as datasets
import common.vision.transforms.keypoint_detection as T
from common.vision.transforms import Denormalize
from common.utils.data import ForeverDataIterator
from common.utils.meter import AverageMeter, ProgressMeter, AverageMeterDict
from common.utils.metric.keypoint_detection import accuracy
from common.utils.logger import CompleteLogger
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def main(args: argparse.Namespace):
logger = CompleteLogger(args.log, args.phase)
if args.seed is not None:
random.seed(args.seed)
torch.manual_seed(args.seed)
cudnn.deterministic = True
warnings.warn('You have chosen to seed training. '
'This will turn on the CUDNN deterministic setting, '
'which can slow down your training considerably! '
'You may see unexpected behavior when restarting '
'from checkpoints.')
cudnn.benchmark = True
# Data loading code
normalize = T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
train_transform = T.Compose([
T.RandomRotation(args.rotation),
T.RandomResizedCrop(size=args.image_size, scale=args.resize_scale),
T.ColorJitter(brightness=0.25, contrast=0.25, saturation=0.25),
T.GaussianBlur(),
T.ToTensor(),
normalize
])
val_transform = T.Compose([
T.Resize(args.image_size),
T.ToTensor(),
normalize
])
image_size = (args.image_size, args.image_size)
heatmap_size = (args.heatmap_size, args.heatmap_size)
source_dataset = datasets.__dict__[args.source]
train_source_dataset = source_dataset(root=args.source_root, transforms=train_transform,
image_size=image_size, heatmap_size=heatmap_size)
train_source_loader = DataLoader(train_source_dataset, batch_size=args.batch_size,
shuffle=True, num_workers=args.workers, pin_memory=True, drop_last=True)
val_source_dataset = source_dataset(root=args.source_root, split='test', transforms=val_transform,
image_size=image_size, heatmap_size=heatmap_size)
val_source_loader = DataLoader(val_source_dataset, batch_size=args.batch_size, shuffle=False, pin_memory=True)
target_dataset = datasets.__dict__[args.target]
train_target_dataset = target_dataset(root=args.target_root, transforms=train_transform,
image_size=image_size, heatmap_size=heatmap_size)
train_target_loader = DataLoader(train_target_dataset, batch_size=args.batch_size,
shuffle=True, num_workers=args.workers, pin_memory=True, drop_last=True)
val_target_dataset = target_dataset(root=args.target_root, split='test', transforms=val_transform,
image_size=image_size, heatmap_size=heatmap_size)
val_target_loader = DataLoader(val_target_dataset, batch_size=args.batch_size, shuffle=False, pin_memory=True)
print("Source train:", len(train_source_loader))
print("Target train:", len(train_target_loader))
print("Source test:", len(val_source_loader))
print("Target test:", len(val_target_loader))
train_source_iter = ForeverDataIterator(train_source_loader)
train_target_iter = ForeverDataIterator(train_target_loader)
# create model
backbone = models.__dict__[args.arch](pretrained=True)
upsampling = Upsampling(backbone.out_features)
num_keypoints = train_source_dataset.num_keypoints
model = RegDAPoseResNet(backbone, upsampling, 256, num_keypoints, num_head_layers=args.num_head_layers, finetune=True).to(device)
# define loss function
criterion = JointsKLLoss()
pseudo_label_generator = PseudoLabelGenerator(num_keypoints, args.heatmap_size, args.heatmap_size)
regression_disparity = RegressionDisparity(pseudo_label_generator, JointsKLLoss(epsilon=1e-7))
# define optimizer and lr scheduler
optimizer_f = SGD([
{'params': backbone.parameters(), 'lr': 0.1},
{'params': upsampling.parameters(), 'lr': 0.1},
], lr=0.1, momentum=args.momentum, weight_decay=args.wd, nesterov=True)
optimizer_h = SGD(model.head.parameters(), lr=1., momentum=args.momentum, weight_decay=args.wd, nesterov=True)
optimizer_h_adv = SGD(model.head_adv.parameters(), lr=1., momentum=args.momentum, weight_decay=args.wd, nesterov=True)
lr_decay_function = lambda x: args.lr * (1. + args.lr_gamma * float(x)) ** (-args.lr_decay)
lr_scheduler_f = LambdaLR(optimizer_f, lr_decay_function)
lr_scheduler_h = LambdaLR(optimizer_h, lr_decay_function)
lr_scheduler_h_adv = LambdaLR(optimizer_h_adv, lr_decay_function)
start_epoch = 0
if args.resume is None:
if args.pretrain is None:
# first pretrain the backbone and upsampling
print("Pretraining the model on source domain.")
args.pretrain = logger.get_checkpoint_path('pretrain')
pretrained_model = PoseResNet(backbone, upsampling, 256, num_keypoints, True).to(device)
optimizer = SGD(pretrained_model.get_parameters(lr=args.lr), momentum=args.momentum, weight_decay=args.wd, nesterov=True)
lr_scheduler = MultiStepLR(optimizer, args.lr_step, args.lr_factor)
best_acc = 0
for epoch in range(args.pretrain_epochs):
lr_scheduler.step()
print(lr_scheduler.get_lr())
pretrain(train_source_iter, pretrained_model, criterion, optimizer, epoch, args)
source_val_acc = validate(val_source_loader, pretrained_model, criterion, None, args)
# remember best acc and save checkpoint
if source_val_acc['all'] > best_acc:
best_acc = source_val_acc['all']
torch.save(
{
'model': pretrained_model.state_dict()
}, args.pretrain
)
print("Source: {} best: {}".format(source_val_acc['all'], best_acc))
# load from the pretrained checkpoint
pretrained_dict = torch.load(args.pretrain, map_location='cpu')['model']
model_dict = model.state_dict()
# remove keys from pretrained dict that doesn't appear in model dict
pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}
model.load_state_dict(pretrained_dict, strict=False)
else:
# optionally resume from a checkpoint
checkpoint = torch.load(args.resume, map_location='cpu')
model.load_state_dict(checkpoint['model'])
optimizer_f.load_state_dict(checkpoint['optimizer_f'])
optimizer_h.load_state_dict(checkpoint['optimizer_h'])
optimizer_h_adv.load_state_dict(checkpoint['optimizer_h_adv'])
lr_scheduler_f.load_state_dict(checkpoint['lr_scheduler_f'])
lr_scheduler_h.load_state_dict(checkpoint['lr_scheduler_h'])
lr_scheduler_h_adv.load_state_dict(checkpoint['lr_scheduler_h_adv'])
start_epoch = checkpoint['epoch'] + 1
# define visualization function
tensor_to_image = Compose([
Denormalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
ToPILImage()
])
def visualize(image, keypoint2d, name, heatmaps=None):
"""
Args:
image (tensor): image in shape 3 x H x W
keypoint2d (tensor): keypoints in shape K x 2
name: name of the saving image
"""
train_source_dataset.visualize(tensor_to_image(image),
keypoint2d, logger.get_image_path("{}.jpg".format(name)))
if args.phase == 'test':
# evaluate on validation set
source_val_acc = validate(val_source_loader, model, criterion, None, args)
target_val_acc = validate(val_target_loader, model, criterion, visualize, args)
print("Source: {:4.3f} Target: {:4.3f}".format(source_val_acc['all'], target_val_acc['all']))
for name, acc in target_val_acc.items():
print("{}: {:4.3f}".format(name, acc))
return
# start training
best_acc = 0
print("Start regression domain adaptation.")
for epoch in range(start_epoch, args.epochs):
logger.set_epoch(epoch)
print(lr_scheduler_f.get_lr(), lr_scheduler_h.get_lr(), lr_scheduler_h_adv.get_lr())
# train for one epoch
train(train_source_iter, train_target_iter, model, criterion, regression_disparity,
optimizer_f, optimizer_h, optimizer_h_adv, lr_scheduler_f, lr_scheduler_h, lr_scheduler_h_adv,
epoch, visualize if args.debug else None, args)
# evaluate on validation set
source_val_acc = validate(val_source_loader, model, criterion, None, args)
target_val_acc = validate(val_target_loader, model, criterion, visualize if args.debug else None, args)
# remember best acc and save checkpoint
torch.save(
{
'model': model.state_dict(),
'optimizer_f': optimizer_f.state_dict(),
'optimizer_h': optimizer_h.state_dict(),
'optimizer_h_adv': optimizer_h_adv.state_dict(),
'lr_scheduler_f': lr_scheduler_f.state_dict(),
'lr_scheduler_h': lr_scheduler_h.state_dict(),
'lr_scheduler_h_adv': lr_scheduler_h_adv.state_dict(),
'epoch': epoch,
'args': args
}, logger.get_checkpoint_path(epoch)
)
if target_val_acc['all'] > best_acc:
shutil.copy(logger.get_checkpoint_path(epoch), logger.get_checkpoint_path('best'))
best_acc = target_val_acc['all']
print("Source: {:4.3f} Target: {:4.3f} Target(best): {:4.3f}".format(source_val_acc['all'], target_val_acc['all'], best_acc))
for name, acc in target_val_acc.items():
print("{}: {:4.3f}".format(name, acc))
logger.close()
def pretrain(train_source_iter, model, criterion, optimizer,
epoch: int, args: argparse.Namespace):
batch_time = AverageMeter('Time', ':4.2f')
data_time = AverageMeter('Data', ':3.1f')
losses_s = AverageMeter('Loss (s)', ":.2e")
acc_s = AverageMeter("Acc (s)", ":3.2f")
progress = ProgressMeter(
args.iters_per_epoch,
[batch_time, data_time, losses_s, acc_s],
prefix="Epoch: [{}]".format(epoch))
# switch to train mode
model.train()
end = time.time()
for i in range(args.iters_per_epoch):
optimizer.zero_grad()
x_s, label_s, weight_s, meta_s = next(train_source_iter)
x_s = x_s.to(device)
label_s = label_s.to(device)
weight_s = weight_s.to(device)
# measure data loading time
data_time.update(time.time() - end)
# compute output
y_s = model(x_s)
loss_s = criterion(y_s, label_s, weight_s)
# compute gradient and do SGD step
loss_s.backward()
optimizer.step()
# measure accuracy and record loss
_, avg_acc_s, cnt_s, pred_s = accuracy(y_s.detach().cpu().numpy(),
label_s.detach().cpu().numpy())
acc_s.update(avg_acc_s, cnt_s)
losses_s.update(loss_s, cnt_s)
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
progress.display(i)
def train(train_source_iter, train_target_iter, model, criterion,regression_disparity,
optimizer_f, optimizer_h, optimizer_h_adv, lr_scheduler_f, lr_scheduler_h, lr_scheduler_h_adv,
epoch: int, visualize, args: argparse.Namespace):
batch_time = AverageMeter('Time', ':4.2f')
data_time = AverageMeter('Data', ':3.1f')
losses_s = AverageMeter('Loss (s)', ":.2e")
losses_gf = AverageMeter('Loss (t, false)', ":.2e")
losses_gt = AverageMeter('Loss (t, truth)', ":.2e")
acc_s = AverageMeter("Acc (s)", ":3.2f")
acc_t = AverageMeter("Acc (t)", ":3.2f")
acc_s_adv = AverageMeter("Acc (s, adv)", ":3.2f")
acc_t_adv = AverageMeter("Acc (t, adv)", ":3.2f")
progress = ProgressMeter(
args.iters_per_epoch,
[batch_time, data_time, losses_s, losses_gf, losses_gt, acc_s, acc_t, acc_s_adv, acc_t_adv],
prefix="Epoch: [{}]".format(epoch))
# switch to train mode
model.train()
end = time.time()
for i in range(args.iters_per_epoch):
x_s, label_s, weight_s, meta_s = next(train_source_iter)
x_t, label_t, weight_t, meta_t = next(train_target_iter)
x_s = x_s.to(device)
label_s = label_s.to(device)
weight_s = weight_s.to(device)
x_t = x_t.to(device)
label_t = label_t.to(device)
weight_t = weight_t.to(device)
# measure data loading time
data_time.update(time.time() - end)
# Step A train all networks to minimize loss on source domain
optimizer_f.zero_grad()
optimizer_h.zero_grad()
optimizer_h_adv.zero_grad()
y_s, y_s_adv = model(x_s)
loss_s = criterion(y_s, label_s, weight_s) + \
args.margin * args.trade_off * regression_disparity(y_s, y_s_adv, weight_s, mode='min')
loss_s.backward()
optimizer_f.step()
optimizer_h.step()
optimizer_h_adv.step()
# Step B train adv regressor to maximize regression disparity
optimizer_h_adv.zero_grad()
y_t, y_t_adv = model(x_t)
loss_ground_false = args.trade_off * regression_disparity(y_t, y_t_adv, weight_t, mode='max')
loss_ground_false.backward()
optimizer_h_adv.step()
# Step C train feature extractor to minimize regression disparity
optimizer_f.zero_grad()
y_t, y_t_adv = model(x_t)
loss_ground_truth = args.trade_off * regression_disparity(y_t, y_t_adv, weight_t, mode='min')
loss_ground_truth.backward()
optimizer_f.step()
# do update step
model.step()
lr_scheduler_f.step()
lr_scheduler_h.step()
lr_scheduler_h_adv.step()
# measure accuracy and record loss
_, avg_acc_s, cnt_s, pred_s = accuracy(y_s.detach().cpu().numpy(),
label_s.detach().cpu().numpy())
acc_s.update(avg_acc_s, cnt_s)
_, avg_acc_t, cnt_t, pred_t = accuracy(y_t.detach().cpu().numpy(),
label_t.detach().cpu().numpy())
acc_t.update(avg_acc_t, cnt_t)
_, avg_acc_s_adv, cnt_s_adv, pred_s_adv = accuracy(y_s_adv.detach().cpu().numpy(),
label_s.detach().cpu().numpy())
acc_s_adv.update(avg_acc_s_adv, cnt_s)
_, avg_acc_t_adv, cnt_t_adv, pred_t_adv = accuracy(y_t_adv.detach().cpu().numpy(),
label_t.detach().cpu().numpy())
acc_t_adv.update(avg_acc_t_adv, cnt_t)
losses_s.update(loss_s, cnt_s)
losses_gf.update(loss_ground_false, cnt_s)
losses_gt.update(loss_ground_truth, cnt_s)
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
progress.display(i)
if visualize is not None:
visualize(x_s[0], pred_s[0] * args.image_size / args.heatmap_size, "source_{}_pred".format(i))
visualize(x_s[0], meta_s['keypoint2d'][0], "source_{}_label".format(i))
visualize(x_t[0], pred_t[0] * args.image_size / args.heatmap_size, "target_{}_pred".format(i))
visualize(x_t[0], meta_t['keypoint2d'][0], "target_{}_label".format(i))
visualize(x_s[0], pred_s_adv[0] * args.image_size / args.heatmap_size, "source_adv_{}_pred".format(i))
visualize(x_t[0], pred_t_adv[0] * args.image_size / args.heatmap_size, "target_adv_{}_pred".format(i))
def validate(val_loader, model, criterion, visualize, args: argparse.Namespace):
batch_time = AverageMeter('Time', ':6.3f')
losses = AverageMeter('Loss', ':.2e')
acc = AverageMeterDict(val_loader.dataset.keypoints_group.keys(), ":3.2f")
progress = ProgressMeter(
len(val_loader),
[batch_time, losses, acc['all']],
prefix='Test: ')
# switch to evaluate mode
model.eval()
with torch.no_grad():
end = time.time()
for i, (x, label, weight, meta) in enumerate(val_loader):
x = x.to(device)
label = label.to(device)
weight = weight.to(device)
# compute output
y = model(x)
loss = criterion(y, label, weight)
# measure accuracy and record loss
losses.update(loss.item(), x.size(0))
acc_per_points, avg_acc, cnt, pred = accuracy(y.cpu().numpy(),
label.cpu().numpy())
group_acc = val_loader.dataset.group_accuracy(acc_per_points)
acc.update(group_acc, x.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
progress.display(i)
if visualize is not None:
visualize(x[0], pred[0] * args.image_size / args.heatmap_size, "val_{}_pred.jpg".format(i))
visualize(x[0], meta['keypoint2d'][0], "val_{}_label.jpg".format(i))
return acc.average()
if __name__ == '__main__':
architecture_names = sorted(
name for name in models.__dict__
if name.islower() and not name.startswith("__")
and callable(models.__dict__[name])
)
dataset_names = sorted(
name for name in datasets.__dict__
if not name.startswith("__") and callable(datasets.__dict__[name])
)
parser = argparse.ArgumentParser(description='Source Only for Keypoint Detection Domain Adaptation')
# dataset parameters
parser.add_argument('source_root', help='root path of the source dataset')
parser.add_argument('target_root', help='root path of the target dataset')
parser.add_argument('-s', '--source', help='source domain(s)')
parser.add_argument('-t', '--target', help='target domain(s)')
parser.add_argument('--resize-scale', nargs='+', type=float, default=(0.6, 1.3),
help='scale range for the RandomResizeCrop augmentation')
parser.add_argument('--rotation', type=int, default=180,
help='rotation range of the RandomRotation augmentation')
parser.add_argument('--image-size', type=int, default=256,
help='input image size')
parser.add_argument('--heatmap-size', type=int, default=64,
help='output heatmap size')
# model parameters
parser.add_argument('-a', '--arch', metavar='ARCH', default='resnet101',
choices=architecture_names,
help='backbone architecture: ' +
' | '.join(architecture_names) +
' (default: resnet101)')
parser.add_argument("--pretrain", type=str, default=None,
help="Where restore pretrained model parameters from.")
parser.add_argument("--resume", type=str, default=None,
help="where restore model parameters from.")
parser.add_argument('--num-head-layers', type=int, default=2)
parser.add_argument('--margin', type=float, default=4., help="margin gamma")
parser.add_argument('--trade-off', default=1., type=float,
help='the trade-off hyper-parameter for transfer loss')
# training parameters
parser.add_argument('-b', '--batch-size', default=32, type=int,
metavar='N',
help='mini-batch size (default: 32)')
parser.add_argument('--lr', '--learning-rate', default=0.1, type=float,
metavar='LR', help='initial learning rate', dest='lr')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
parser.add_argument('--wd', '--weight-decay', default=0.0001, type=float,
metavar='W', help='weight decay (default: 1e-4)')
parser.add_argument('--lr-gamma', default=0.0001, type=float)
parser.add_argument('--lr-decay', default=0.75, type=float, help='parameter for lr scheduler')
parser.add_argument('--lr-step', default=[45, 60], type=tuple, help='parameter for lr scheduler')
parser.add_argument('--lr-factor', default=0.1, type=float, help='parameter for lr scheduler')
parser.add_argument('-j', '--workers', default=4, type=int, metavar='N',
help='number of data loading workers (default: 4)')
parser.add_argument('--pretrain_epochs', default=70, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--epochs', default=30, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('-i', '--iters-per-epoch', default=500, type=int,
help='Number of iterations per epoch')
parser.add_argument('-p', '--print-freq', default=100, type=int,
metavar='N', help='print frequency (default: 100)')
parser.add_argument('--seed', default=None, type=int,
help='seed for initializing training. ')
parser.add_argument("--log", type=str, default='src_only',
help="Where to save logs, checkpoints and debugging images.")
parser.add_argument("--phase", type=str, default='train', choices=['train', 'test'],
help="When phase is 'test', only test the model.")
parser.add_argument('--debug', action="store_true",
help='In the debug mode, save images and predictions')
args = parser.parse_args()
print(args)
main(args)
|
the-stack_0_6833 | # -*- coding: utf-8 -*-
#
# wxcast: A Python API and cli to collect weather information.
#
# Copyright (c) 2021 Sean Marlow
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import click
from collections import OrderedDict
from wxcast import api
from wxcast import utils
def print_license(ctx, param, value):
"""
Eager option to print license information and exit.
"""
if not value or ctx.resilient_parsing:
return
click.echo(
'wxcast Copyright (C) 2021 Sean Marlow. (MIT License)\n\n'
'See LICENSE for more information.'
)
ctx.exit()
@click.group()
@click.version_option()
@click.option(
'--license',
expose_value=False,
is_eager=True,
is_flag=True,
callback=print_license,
help='Display license information and exit.'
)
def main():
"""
Retrieve the latest weather information in your terminal.
Data provided by NWS and AVWX.
NWS: https://forecast-v3.weather.gov/documentation \n
AVWX: https://avwx.rest/
"""
pass
@click.command()
@click.option(
'--no-color',
is_flag=True,
help='Remove ANSI color and styling from output.'
)
@click.argument('location')
def forecast(no_color, location):
"""
Retrieve current 7 day forecast for given location.
Location can be a city, address or zip/postal code.
Examples:
wxcast forecast denver
wxcast forecast "denver, co"
:param location: Location string to get forecast for.
:param no_color: If True do not style string output.
"""
try:
response = api.get_seven_day_forecast(location)
except Exception as e:
utils.echo_style(str(e), no_color, fg='red')
else:
data = OrderedDict(
(d['name'], d['detailedForecast']) for d in response
)
utils.echo_dict(data, no_color)
@click.command()
@click.option(
'-d', '--decoded',
is_flag=True,
help='Decode raw metar to string format.'
)
@click.option(
'--no-color',
is_flag=True,
help='Remove ANSI color and styling from output.'
)
@click.option(
'-t',
'--temp-unit',
default='C',
type=click.Choice(['C', 'F']),
help='Unit of measurement for temperature values. '
'Default: (C).'
)
@click.argument('icao')
def metar(decoded, no_color, temp_unit, icao):
"""
Retrieve the latest METAR given an airport ICAO code.
Example: wxcast metar -d KSLC
:param decoded: Flag to decode the METAR output.
:param no_color: If True do not style string output.
:param icao: The airport ICAO code to retrieve METAR for.
"""
try:
response = api.get_metar(icao, temp_unit, decoded)
except Exception as e:
utils.echo_style(str(e), no_color, fg='red')
else:
if decoded:
click.echo(
''.join([
utils.style_string(
'At ', no_color, fg='green'
),
utils.style_string(
response['time'], no_color, fg='blue'
),
utils.style_string(
' the conditions are:', no_color, fg='green'
),
'\n'
])
)
spaces = utils.get_max_key(response)
try:
# Try to convert elevation to ft and meters.
response['elevation'] = '{}ft ({}m)'.format(
int(float(response['elevation']) * 3.28084),
response['elevation']
)
except (KeyError, Exception):
pass
utils.echo_dict(response, no_color, spaces=spaces)
else:
utils.echo_style(response, no_color, fg='blue')
@click.command()
@click.option(
'--no-color',
is_flag=True,
help='Remove ANSI color and styling from output.'
)
@click.option(
'-t',
'--temp-unit',
default='C',
type=click.Choice(['C', 'F']),
help='Unit of measurement for temperature values. '
'Default: (C).'
)
@click.argument('station_id')
def conditions(no_color, temp_unit, station_id):
"""
Retrieve the latest conditions given a weather station id.
Example: wxcast conditions KDTW
:param no_color: If True do not style string output.
:param station_id: The weather station id to retrieve conditions for.
"""
try:
response = api.get_metar(station_id, temp_unit, decoded=True)
except Exception as e:
utils.echo_style(str(e), no_color, fg='red')
else:
response.pop('station', None)
response.pop('type', None)
response.pop('station', None)
response.pop('sea level pressure', None)
response.pop('remarks', None)
response.pop('elevation', None)
utils.echo_dict(response, no_color)
@click.command()
@click.option(
'--no-color',
is_flag=True,
help='Remove ANSI color and styling from output.'
)
def offices(no_color):
"""
Retrieve the available weather forecast offices (WFO).
Example: wxcast offices
:param no_color: If True do not style string output.
"""
try:
response = api.get_wfo_list()
except Exception as e:
utils.echo_style(str(e), no_color, fg='red')
else:
utils.echo_dict(response, no_color)
@click.command()
@click.option(
'--no-color',
is_flag=True,
help='Remove ANSI color and styling from output.'
)
@click.argument('wfo')
def products(no_color, wfo):
"""
Retrieve the available text products for a given wfo.
Example: wxcast products slc
:param no_color: If True do not style string output.
:param wfo: The weather forecast office abbreviation (BOU).
"""
try:
response = api.get_wfo_products(wfo)
except Exception as e:
utils.echo_style(str(e), no_color, fg='red')
else:
utils.echo_dict(response, no_color)
@click.command()
@click.option(
'--no-color',
is_flag=True,
help='Remove ANSI color and styling from output.'
)
@click.argument('wfo')
@click.argument('product')
def text(no_color, wfo, product):
"""
Retrieve the NWS text product.
Example: wxcast text slc afd
:param no_color: If True do not style string output.
:param wfo: The weather forecast office abbreviation (BOU).
:param product: The text product to retrieve.
"""
try:
response = api.get_nws_product(wfo, product)
except Exception as e:
utils.echo_style(str(e), no_color, fg='red')
else:
click.echo_via_pager(response)
@click.command()
@click.option(
'--no-color',
is_flag=True,
help='Remove ANSI color and styling from output.'
)
@click.argument('wfo')
def office(no_color, wfo):
"""
Retrieve information for a given wfo.
Example: wxcast info slc
:param no_color: If True do not style string output.
:param wfo: The weather forecast office abbreviation (BOU).
"""
try:
response = api.get_wfo_info(wfo)
except Exception as e:
utils.echo_style(str(e), no_color, fg='red')
else:
utils.echo_dict(response, no_color)
@click.command()
@click.option(
'--no-color',
is_flag=True,
help='Remove ANSI color and styling from output.'
)
@click.argument('wfo')
def stations(no_color, wfo):
"""
Retrieve a list of stations for a given wfo.
Example: wxcast info slc
:param no_color: If True do not style string output.
:param wfo: The weather forecast office abbreviation (BOU).
"""
try:
response = api.get_stations_for_wfo(wfo)
except Exception as e:
utils.echo_style(str(e), no_color, fg='red')
else:
utils.echo_style('\n'.join(response), no_color)
@click.command()
@click.option(
'--no-color',
is_flag=True,
help='Remove ANSI color and styling from output.'
)
@click.argument('station_id')
def station(no_color, station_id):
"""
Retrieve info for a weather station.
Example: wxcast station kbna
:param no_color: If True do not style string output.
:param station_id: The weather station id.
"""
try:
response = api.get_station_info(station_id)
except Exception as e:
utils.echo_style(str(e), no_color, fg='red')
else:
try:
# Try to convert elevation to ft and meters.
response['elevation'] = '{}ft ({}m)'.format(
int(float(response['elevation']) * 3.28084),
response['elevation']
)
except (KeyError, Exception):
pass
utils.echo_dict(response, no_color)
main.add_command(metar)
main.add_command(text)
main.add_command(offices)
main.add_command(products)
main.add_command(forecast)
main.add_command(office)
main.add_command(stations)
main.add_command(station)
main.add_command(conditions)
|
the-stack_0_6834 | import demistomock as demisto
from CommonServerPython import *
from CommonServerUserPython import *
from googleapiclient.discovery import build
from httplib2 import Http
import json
from oauth2client import service_account
from google.oauth2 import service_account as google_service_account
import googleapiclient.http
from googleapiclient._auth import authorized_http
import dateparser
import io
import os
# @@@@@@@@ GLOBALS @@@@@@@@
# If modifying these scopes, delete the file token.json.
SCOPES = ['https://www.googleapis.com/auth/ediscovery', 'https://www.googleapis.com/auth/devstorage.full_control']
DEMISTO_MATTER = 'test_search_phishing'
ADMIN_EMAIL = demisto.params()['gsuite_credentials']['identifier'].encode('utf-8')
PRIVATE_KEY_CONTENT = demisto.params()['auth_json'].encode('utf-8')
USE_SSL = not demisto.params().get('insecure', False)
# @@@@@@@@ HELPER FUNCS @@@@@@@@
def validate_input_values(arguments_values_to_verify, available_values):
for value in arguments_values_to_verify:
if value not in available_values:
return_error(
'Argument: \'{}\' is not one of the possible values: {}'.format(value, ', '.join(available_values)))
def get_credentials(additional_scopes=None, delegated_user=ADMIN_EMAIL):
"""Gets valid user credentials from storage.
If nothing has been stored, or if the stored credentials are invalid,
the OAuth2 flow is completed to obtain the new credentials.
Returns:
Credentials, the obtained credential.
"""
if delegated_user == 'me':
delegated_user = ADMIN_EMAIL
scopes = SCOPES
if additional_scopes is not None:
scopes += additional_scopes
try:
json_keyfile = json.loads(PRIVATE_KEY_CONTENT)
if not isinstance(json_keyfile, dict):
json_keyfile = json.loads(json_keyfile)
cred = service_account.ServiceAccountCredentials.from_json_keyfile_dict(json_keyfile,
scopes=scopes)
delegated_creds = cred.create_delegated(delegated_user)
except Exception as e:
LOG('An error occurred in the \'get_credentials\' function.')
err_msg = 'An error occurred while trying to construct an OAuth2 ' \
'ServiceAccountCredentials object - {}'.format(str(e))
return_error(err_msg)
return delegated_creds
def connect():
creds = get_credentials()
try:
service = build('vault', 'v1', http=creds.authorize(Http(disable_ssl_certificate_validation=(not USE_SSL))))
except Exception as e:
LOG('There was an error creating the Vault service in the \'connect\' function.')
err_msg = 'There was an error creating the Vault service - {}'.format(str(e))
return_error(err_msg)
return service
def is_matter_exist(service, matter_name): # Not needed at the moment
"""
Searches for existence of a matter by its name
Note - this is case-sensitive
:param service: Vault service object
:param matter_name: name of the matter to be searched
:return: True if exists, False otherwise.
"""
existing_matters = get_open_matters(service)
if any(matter_name == matter['name'] for matter in existing_matters):
return True
return False
def get_open_matters(service):
""" Gets first 10 matters """
open_matters = service.matters().list(state='OPEN').execute()
return open_matters
def get_matter_by_id(service, matter_id):
matter = service.matters().get(matterId=matter_id).execute()
return matter
def get_matters_by_state(service, state):
state = state.upper()
matter_state = state if state in ('OPEN', 'CLOSED', 'DELETED') else 'STATE_UNSPECIFIED'
matter_list = service.matters().list(state=matter_state).execute()
return matter_list
def delete_matter(service, matter_id):
_ = service.matters().delete(matterId=matter_id).execute()
return get_matter_by_id(service, matter_id) # Note - this is different that the other state updates
def close_matter(service, matter_id):
close_response = service.matters().close(matterId=matter_id, body={}).execute()
return close_response['matter']
def reopen_matter(service, matter_id):
reopen_response = service.matters().reopen(matterId=matter_id, body={}).execute()
return reopen_response['matter']
def undelete_matter(service, matter_id):
undeleted_matter = service.matters().undelete(matterId=matter_id, body={}).execute()
return undeleted_matter
def add_held_account(service, matter_id, hold_id, account_id):
held_account = {'accountId': account_id}
return service.matters().holds().accounts().create(matterId=matter_id, holdId=hold_id, body=held_account).execute()
def remove_held_account(service, matter_id, hold_id, account_id):
return service.matters().holds().accounts().delete(matterId=matter_id, holdId=hold_id,
accountId=account_id).execute()
def remove_hold(service, matter_id, hold_id):
return service.matters().holds().delete(matterId=matter_id, holdId=hold_id).execute()
def list_holds(service, matter_id):
"""
Return a list of existing holds
"""
done_paginating = False
response = service.matters().holds().list(matterId=matter_id).execute()
# append first page:
the_holds = response['holds']
# Keep paginating and appending:
while not done_paginating:
if 'nextPageToken' in response:
response = service.matters().holds.list(pageSize=10, pageToken=response['nextPageToken']).execute()
the_holds.extend(response['holds'])
else:
done_paginating = True
return the_holds
def timeframe_to_utc_zulu_range(timeframe_str):
"""
Converts a time-frame to UTC Zulu format that can be used for startTime and endTime in various Google Vault requests.
"""
try:
parsed_str = dateparser.parse(timeframe_str)
end_time = datetime.utcnow().isoformat() + 'Z' # Current time
start_time = parsed_str.isoformat() + 'Z'
return (start_time, end_time)
except Exception as ex:
err_msg = str(ex)
if 'Quota exceeded for quota metric' in err_msg:
err_msg = 'Quota for Google Vault API exceeded'
return_error('Unable to parse date correctly: {}'.format(err_msg))
else:
raise ex
def create_hold_query(hold_name, corpus, accounts, terms, time_frame="", start_time="", end_time=""):
"""
Creates the query that will be used to request the creation of a new hold. Returns the ready-to-be-sent request.
"""
# --- Sanitizing Input ---
corpus = corpus.upper()
if time_frame:
start_time, end_time = timeframe_to_utc_zulu_range(time_frame) # Making it UTC Zulu format
elif start_time:
if not end_time:
end_time = datetime.utcnow().isoformat() + 'Z' # End time will be now, if no end time was given
if isinstance(accounts, unicode):
accounts = accounts.split(',')
# --- Building Request ---
request = {}
mail_query = {} # type: Dict[Any, Any]
accounts_for_query = []
if not terms:
if start_time and end_time:
mail_query = {'startTime': start_time, 'endTime': end_time}
else:
if start_time and end_time:
mail_query = {'startTime': start_time, 'endTime': end_time, 'terms': terms}
# --- Building all small parts into big request object ---
request['name'] = hold_name
request['corpus'] = corpus
if mail_query:
request['query'] = {'mailQuery': mail_query} # Adding the ready mail query
for acc_id in accounts:
accounts_for_query.append({'accountId': acc_id})
request['accounts'] = accounts_for_query
return request
def create_hold_mail_accounts(service, matter_id, request_body):
"""
Creates a hold in Google Vault
"""
return service.matters().holds().create(matterId=matter_id, body=request_body).execute()
def create_export(service, matter, request_body):
"""
Creates an export in the given matter, with the given request_body (which is the actual JSON for the request).
"""
return service.matters().exports().create(matterId=matter, body=request_body).execute()
def create_mail_export_query(export_name, emails, time_frame, start_time, end_time, terms, org_unit="",
export_pst='True', export_mbox='False', search_method='All Accounts',
include_drafts='True', data_scope='All Data'):
"""
Creates the query that will be used in the request to create a mail export
"""
org_unit_id = org_unit
# --- Sanitizing Input ---
exclude_drafts = 'false'
if time_frame:
start_time, end_time = timeframe_to_utc_zulu_range(time_frame) # Making it UTC Zulu format
elif start_time:
if not end_time:
end_time = datetime.utcnow().isoformat() + 'Z' # End time will be now, if no end time was given
if isinstance(emails, (str, unicode)):
if ',' in emails:
emails = emails.split(',')
else:
emails = [emails]
if str(include_drafts).upper() == 'FALSE':
exclude_drafts = 'true'
if data_scope.upper() == 'HELD DATA':
data_scope = 'HELD_DATA'
if data_scope.upper() == 'ALL DATA':
data_scope = 'ALL_DATA'
if data_scope.upper() == 'UNPROCESSED DATA':
data_scope = 'UNPROCESSED_DATA'
if search_method.upper() == 'ORGANIZATIONAL UNIT(REQUIRES OU ARGUMENT)':
search_method = 'ORG_UNIT'
if search_method.upper() == 'ALL ACCOUNTS':
search_method = 'ENTIRE_ORG'
if search_method.upper() == 'SPECIFIC ACCOUNTS(REQUIRES EMAILS ARGUMENT)':
search_method = 'ACCOUNT'
# --- Building Request ---
request = {}
query = {}
emails_for_query = []
account_info = {'emails': []} # type: Dict[Any, Any]
org_unit_info = {'orgUnitId': org_unit_id}
corpus = 'MAIL'
export_format = 'PST' # Default
if export_mbox.upper() == 'TRUE':
export_format = 'MBOX'
mail_options = {
'exportFormat': export_format
}
# --- Building all small parts into big request object ---
query['dataScope'] = data_scope
query['searchMethod'] = search_method
query['corpus'] = corpus
query['mailOptions'] = {'excludeDrafts': exclude_drafts}
if start_time and end_time:
query['startTime'] = start_time
query['endTime'] = end_time
if terms:
query['terms'] = terms
if emails: # If user specified emails
for email in emails: # Go over all of them
emails_for_query.append(email) # Add them to the list
account_info['emails'] = emails_for_query # Add the list to the account_info dictionary
query['accountInfo'] = account_info # Add the account_info dictionary into the query object
if search_method == 'ORG_UNIT':
query['orgUnitInfo'] = org_unit_info
request['query'] = query # Adding query AFTER IT'S COMPLETED
request['exportOptions'] = {'mailOptions': mail_options}
request['name'] = export_name
return request
def create_drive_export_query(export_name, emails, team_drives, time_frame, start_time, end_time, terms, org_unit="",
search_method='Specific Accounts(requires emails argument)', include_teamdrives='True',
data_scope='All Data'):
"""
Creates the query that will be used in the request to create a groups export
"""
org_unit_id = org_unit
# --- Sanitizing Input ---
include_teamdrives = 'true'
if time_frame:
start_time, end_time = timeframe_to_utc_zulu_range(time_frame) # Making it UTC Zulu format
elif start_time:
if not end_time:
end_time = datetime.utcnow().isoformat() + 'Z' # End time will be now, if no end time was given
if isinstance(emails, (str, unicode)): # If emails were specified, making it a list:
if ',' in emails:
emails = emails.split(',')
else:
emails = [emails]
if isinstance(team_drives, (str, unicode)): # If team_drives were specified, making it a list:
if ',' in team_drives:
team_drives = team_drives.split(',')
else:
team_drives = [team_drives]
if str(include_teamdrives).upper() == 'FALSE':
include_teamdrives = 'false'
if data_scope.upper() == 'HELD DATA':
data_scope = 'HELD_DATA'
if data_scope.upper() == 'ALL DATA':
data_scope = 'ALL_DATA'
if data_scope.upper() == 'UNPROCESSED DATA':
data_scope = 'UNPROCESSED_DATA'
if search_method.upper() == 'ORGANIZATIONAL UNIT(REQUIRES OU ARGUMENT)':
search_method = 'ORG_UNIT'
if search_method.upper() == 'SPECIFIC ACCOUNTS(REQUIRES EMAILS ARGUMENT)':
search_method = 'ACCOUNT'
if search_method.upper() == 'TEAM DRIVE':
search_method = 'TEAM_DRIVE'
# --- Building Request ---
request = {}
query = {}
emails_for_query = []
teamdrives_for_query = []
account_info = {'emails': []} # type: Dict[Any, Any]
teamdrive_info = {'teamDriveIds': []} # type: Dict[Any, Any]
org_unit_info = {'orgUnitId': org_unit_id}
corpus = 'DRIVE'
# --- Building all small parts into big request object ---
query['dataScope'] = data_scope
query['searchMethod'] = search_method
query['corpus'] = corpus
query['driveOptions'] = {'includeTeamDrives': include_teamdrives}
if start_time and end_time:
query['startTime'] = start_time
query['endTime'] = end_time
if terms:
query['terms'] = terms
if emails: # If user specified emails
for email in emails: # Go over all of them
emails_for_query.append(email) # Add them to the list
account_info['emails'] = emails_for_query # Add the list to the account_info dictionary
if team_drives and include_teamdrives.upper() == 'TRUE': # If user specified team_drives and not emails
for teamdrive_id in team_drives:
teamdrives_for_query.append(teamdrive_id)
teamdrive_info['teamDriveIds'] = teamdrives_for_query
if search_method == 'ORG_UNIT':
query['orgUnitInfo'] = org_unit_info
if search_method == 'TEAM_DRIVE':
query['teamDriveInfo'] = teamdrive_info
if search_method == 'ACCOUNT':
# Add the account_info dictionary into the query object.
# This line SHOULD NOT exist if the user wants to use team_drives.
query['accountInfo'] = account_info
request['query'] = query # Adding query AFTER IT'S COMPLETED
request['name'] = export_name
return request
def create_groups_export_query(export_name, emails, time_frame, start_time, end_time, terms, search_method,
export_pst='True', export_mbox='False', data_scope='All Data'):
"""
Creates the query that will be used in the request to create a groups export
"""
# --- Sanitizing Input ---
if time_frame:
start_time, end_time = timeframe_to_utc_zulu_range(time_frame) # Making it UTC Zulu format
elif start_time:
if not end_time:
end_time = datetime.utcnow().isoformat() + 'Z' # End time will be now, if no end time was given
if isinstance(emails, (str, unicode)):
if ',' in emails:
emails = emails.split(',')
else:
emails = [emails]
if data_scope.upper() == 'HELD DATA':
data_scope = 'HELD_DATA'
if data_scope.upper() == 'ALL DATA':
data_scope = 'ALL_DATA'
if data_scope.upper() == 'UNPROCESSED DATA':
data_scope = 'UNPROCESSED_DATA'
# --- Building Request ---
request = {}
query = {}
emails_for_query = []
account_info = {'emails': []} # type: Dict[Any, Any]
corpus = 'GROUPS'
export_format = 'PST' # Default
if export_mbox.upper() == 'TRUE':
export_format = 'MBOX'
groups_options = {
'exportFormat': export_format
}
# --- Building all small parts into big request object ---
query['dataScope'] = data_scope
query['searchMethod'] = search_method
query['corpus'] = corpus
if start_time and end_time:
query['startTime'] = start_time
query['endTime'] = end_time
if terms:
query['terms'] = terms
if emails: # If user specified emails
for email in emails: # Go over all of them
emails_for_query.append(email) # Add them to the list
account_info['emails'] = emails_for_query # Add the list to the account_info dictionary
query['accountInfo'] = account_info # Add the account_info dictionary into the query object
request['query'] = query # Adding query AFTER IT'S COMPLETED
request['exportOptions'] = {'groupsOptions': groups_options}
request['name'] = export_name
return request
def get_export_by_id(service, matter_id, export_id):
return service.matters().exports().get(matterId=matter_id, exportId=export_id).execute()
def list_held_accounts(service, matter_id, hold_id):
return service.matters().holds().accounts().list(matterId=matter_id, holdId=hold_id).execute()['accounts']
def remove_held_accounts(service, matter_id, hold_id):
pass
def download_storage_object(object_ID, bucket_name):
service = connect_to_storage()
req = service.objects().get_media(bucket=bucket_name, object=object_ID) # pylint: disable=no-member
out_file = io.BytesIO()
downloader = googleapiclient.http.MediaIoBaseDownload(out_file, req)
done = False
while not done:
done = downloader.next_chunk()[1]
return out_file
def get_storage_credentials():
try:
privateKeyJson = json.loads(PRIVATE_KEY_CONTENT)
if not isinstance(privateKeyJson, dict):
privateKeyJson = json.loads(privateKeyJson)
crads = google_service_account.Credentials.from_service_account_info(privateKeyJson, scopes=SCOPES,
subject=ADMIN_EMAIL)
except Exception as e:
LOG('An error occurred in the \'get_storage_credentials\' function.')
err_msg = 'An error occurred while trying to construct an OAuth2 ' \
'Storage Credentials object - {}'.format(str(e))
return_error(err_msg)
return crads
def connect_to_storage():
try:
creds = get_storage_credentials()
ptth = authorized_http(creds)
ptth.disable_ssl_certificate_validation = (not USE_SSL)
service = build('storage', 'v1', http=ptth)
except Exception as e:
LOG('There was an error creating the Storage service in the \'connect_to_storage\' function.')
err_msg = 'There was an error creating the Storage service - {}'.format(str(e))
return_error(err_msg)
return service
def get_object_mame_by_type(objectsArr, extension):
for file in objectsArr:
objName = str(file.get('objectName'))
if (objName.endswith(extension)):
return objName
def build_key_val_pair(tagDict):
demisto.info('this is value: ')
demisto.info(tagDict['@TagName'])
demisto.info('this is key: ')
demisto.info(tagDict['@TagValue'])
key = filter(str.isalnum, str(tagDict['@TagName']))
value = tagDict['@TagValue'].encode('utf-8')
keyValPair = {key: value}
return keyValPair
def build_document_dict(document):
file_info = document['Files']['File']['ExternalFile']
newDocumentDict = {
'DocType': os.path.splitext(file_info['@FileName'])[1][1:].strip().lower(),
'MD5': file_info['@Hash']
}
tags = document['Tags']['Tag']
for currentTagDict in tags:
newDocumentDict.update(build_key_val_pair(currentTagDict))
return newDocumentDict
def build_dict_list(documentsArr):
documentsDictList = []
for document in documentsArr:
currentDocumentDict = build_document_dict(document)
documentsDictList.append(currentDocumentDict)
return documentsDictList
def get_current_matter_from_context(matter_id):
context_matter = demisto.dt(demisto.context(), 'GoogleVault.Matter(val.MatterID === "{0}")'.format(matter_id))
context_matter = context_matter[0] if type(context_matter) is list else context_matter
if not context_matter:
context_matter = {
'MatterID': matter_id,
'Export': []
}
return context_matter
def populate_matter_with_export(current_matter, current_export):
# add new export to matter
exports = current_matter.get('Export', [])
if type(exports) is dict:
exports = [exports]
# remove duplicate export after new updated exports were entered
filtered_export = list(filter(lambda export:
export['ExportID'] != current_export['ExportID'],
exports))
filtered_export.append(current_export)
current_matter['Export'] = filtered_export
return current_matter
# @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
# @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ ACTUAL FUNCS @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
# @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
def list_matters_command():
"""
Lists all matters in the project, with their corresponding state.
"""
try:
service = connect()
state = demisto.args().get('state', 'STATE_UNSPECIFIED')
validate_input_values([state], ['All', 'Open', 'Closed', 'Deleted', 'STATE_UNSPECIFIED', ''])
matters = (get_matters_by_state(service, state))['matters']
if not matters:
demisto.results('No matters found.')
else:
output = []
context_output = []
for matter in matters:
output.append({
'Matter Name': matter.get('name'),
'Matter ID': matter.get('matterId'),
'Matter State': matter.get('state')
})
context_output.append({
'Name': matter.get('name'),
'MatterID': matter.get('matterId'),
'State': matter.get('state') # Getting new state
})
markdown = '' # Use this to add extra line
title = ""
if state == 'All' or not state:
title = 'Here are all your matters'
else:
title = 'Here are your {} matters'.format(state.lower())
markdown += tableToMarkdown(title, output, ['Matter Name', 'Matter ID', 'Matter State'])
demisto.results({
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': matters,
'HumanReadable': markdown,
'EntryContext': {
'GoogleVault.Matter(val.MatterID === obj.MatterID)': context_output
}
})
except Exception as ex:
err_msg = str(ex)
if 'Quota exceeded for quota metric' in err_msg:
err_msg = 'Quota for Google Vault API exceeded'
return_error('Unable to list matters. Error: {}'.format(err_msg))
else:
raise ex
def create_matter_command():
try:
service = connect()
matter_name = demisto.getArg('name')
matter_description = demisto.getArg('description')
matter_content = {
'name': matter_name,
'description': matter_description,
}
matter = service.matters().create(body=matter_content).execute() # pylint: disable=no-member
markdown = ""
if matter_description:
markdown = 'Matter: {} was created successfully with description: {}.\nID: {}.'.format(matter_name,
matter_description,
matter.get(
'matterId'))
else:
markdown = 'Matter: {} was created successfully without a description.\nID: {}.'.format(matter_name,
matter.get(
'matterId'))
title = 'Matter creation successful.'
markdown_matter = []
markdown_matter.append({
'Matter Name': matter.get('name'),
'Matter ID': matter.get('matterId'),
'Matter State': matter.get('state')
})
markdown += tableToMarkdown(title, markdown_matter, ['Matter Name', 'Matter ID',
'Matter State']) # Why is the title displayed in a weird way?
output_context = []
output_context.append({
'Name': matter.get('name'),
'MatterID': matter.get('matterId'),
'State': matter.get('state')
})
demisto.results({
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': matter,
'HumanReadable': markdown,
'EntryContext': {
'GoogleVault.Matter(val.MatterID === obj.MatterID)': output_context
}
})
except Exception as ex:
err_msg = str(ex)
if 'Quota exceeded for quota metric' in err_msg:
err_msg = 'Quota for Google Vault API exceeded'
return_error('Unable to create matter. Error: {}'.format(err_msg))
else:
raise ex
def update_matter_state_command():
"""
* Note: This updates context only if a change in the current state was successful
"""
try:
service = connect()
matter_id = demisto.getArg('matterID')
wanted_state = demisto.getArg('state')
validate_input_values([wanted_state], ['CLOSE', 'DELETE', 'REOPEN', 'UNDELETE'])
matter_found = get_matter_by_id(service, matter_id)
current_state = matter_found.get('state')
if current_state: # if a matter was found with that ID:
context_output = []
result_of_update = ""
# Dealing with CLOSE:
if wanted_state == 'CLOSE':
if current_state == 'DELETED':
result_of_update = 'Matter is deleted and so it cannot be closed. It is possible to re-open it ' \
'and then close.'
elif current_state == 'CLOSED':
demisto.results('Matter is already closed.')
elif current_state == 'OPEN':
try:
close_response = close_matter(service, matter_id)
result_of_update = 'Matter was successfully closed.'
except Exception as ex:
if 'Matters have users on hold' in str(ex):
demisto.log('{}'.format(ex))
return_error('The matter has holds that prevent it from being closed.')
elif 'Quota exceeded for quota metric' in str(ex):
return_error('Quota for Google Vault API exceeded')
else:
raise ex
# Dealing with DELETE:
elif wanted_state == 'DELETE':
if current_state == 'OPEN':
try:
# Todo: check if contains holds. If it does, return error to user
close_response = close_matter(service, matter_id) # noqa: F841
_ = delete_matter(service, matter_id)
result_of_update = 'Matter was {} and is now DELETED.'.format(current_state)
except Exception as ex:
if 'Matters have users on hold' in str(ex):
demisto.log('{}'.format(ex))
return_error('The matter has holds that prevent it from being deleted.')
elif 'Quota exceeded for quota metric' in str(ex):
return_error('Quota for Google Vault API exceeded')
else:
raise ex
elif current_state == 'CLOSED':
try:
_ = delete_matter(service, matter_id)
result_of_update = 'Matter was {} and is not DELETED.'.format(current_state)
except Exception as ex:
if 'Matters have users on hold' in str(ex):
demisto.log('{}'.format(ex))
return_error('The matter has holds that prevent it from being deleted.')
elif 'Quota exceeded for quota metric' in str(ex):
return_error('Quota for Google Vault API exceeded')
else:
raise ex
elif current_state == 'DELETED':
demisto.results('Matter is already deleted.')
# Dealing with REOPEN:
elif wanted_state == 'REOPEN':
if current_state == 'OPEN':
demisto.results('Matter is already open.')
elif current_state == 'CLOSED':
_ = reopen_matter(service, matter_id)
result_of_update = 'Matter was {} and is now OPEN.'.format(current_state)
elif current_state == 'DELETED':
_ = undelete_matter(service, matter_id)
_ = reopen_matter(service, matter_id)
result_of_update = 'Matter was {} and is now OPEN.'.format(current_state)
# Dealing with UNDELETE:
elif wanted_state == 'UNDELETE':
if current_state == 'OPEN':
demisto.results('Matter is already open.')
elif current_state == 'CLOSED':
demisto.results('Matter is closed at the moment.')
elif current_state == 'DELETED':
_ = undelete_matter(service, matter_id)
result_of_update = 'Matter was {} and is now CLOSED.'.format(current_state)
if result_of_update: # If an update was done then update context:
context_output.append({
'Name': matter_found.get('name'),
'MatterID': matter_found.get('matterId'),
'State': get_matter_by_id(service, matter_id).get('state') # Getting new state
})
demisto.results({
'Type': entryTypes['note'],
'ContentsFormat': formats['text'],
'Contents': result_of_update,
'EntryContext': {
'GoogleVault.Matter(val.MatterID === obj.MatterID)': context_output
}
})
else:
demisto.results('No matter was found with that ID.') # Todo: never gets here. Gotta catch the exception
except Exception as ex:
err_msg = str(ex)
if 'Quota exceeded for quota metric' in err_msg:
err_msg = 'Quota for Google Vault API exceeded'
return_error('Unable to update matter. Error: {}'.format(err_msg))
else:
raise ex
def add_account_to_hold_command(): # Todo: Not sure if context is good (It works, but maybe not according to conventions)
try:
service = connect()
matter_id = demisto.getArg('matterID')
hold_id = demisto.getArg('holdID')
account_id = demisto.getArg('accountID')
_ = add_held_account(service, matter_id, hold_id, account_id)
msg_to_usr = 'Account {} was successfully added to hold {} in matter {}'.format(account_id, hold_id, matter_id)
context_output = []
context_output.append({
'ID': hold_id,
'matterID': matter_id,
'HeldAccount': {
'accountID': account_id,
'IsHeld': True
}
})
demisto.results({
'Type': entryTypes['note'],
'ContentsFormat': formats['text'],
'Contents': msg_to_usr,
'EntryContext': {
'GoogleVault.Hold(val.ID === obj.ID)': context_output
}
})
except Exception as ex:
err_msg = str(ex)
if 'Quota exceeded for quota metric' in err_msg:
err_msg = 'Quota for Google Vault API exceeded'
return_error('Unable to add account to hold. Error: {}'.format(err_msg))
else:
raise ex
def search_matter_command():
"""
* This can be highly optimized. What it currently does is search ALL matters and then filter by name / ID
* If a matter with an ID is found, there's no need to keep on searching. This can be optimized too.
* Note - this is case INSENSITIVE. Searching for 'MatTER1' will find 'matter1' too.
"""
try:
service = connect()
wanted_name = demisto.getArg('matterName')
wanted_id = demisto.getArg('matterID')
if wanted_name or wanted_id:
if wanted_name:
wanted_name = wanted_name.lower()
if wanted_id:
wanted_id = wanted_id.lower()
else:
demisto.results('No name or ID were specified. Please specify at least one of them.')
sys.exit(0)
matters = get_matters_by_state(service, state='STATE_UNSPECIFIED')['matters']
output = []
markdown_matters = []
found_anything = False
for matter in matters:
if matter.get('name').lower() == wanted_name or matter.get('matterId').lower() == wanted_id:
found_anything = True
markdown_matters.append({
'Matter Name': matter.get('name'),
'Matter ID': matter.get('matterId'),
'Matter State': matter.get('state')
})
output.append({
'Name': matter.get('name'),
'MatterID': matter.get('matterId'),
'State': matter.get('state')
})
if not found_anything: # If finished for loop through matters and no matter was found
demisto.results('No matters found.')
else:
markdown = '' # Use this to add extra line
if wanted_name:
title = 'Here are matters that have the name {}'.format(wanted_name)
else:
title = 'Here is the matter with ID {}'.format(wanted_id)
markdown += tableToMarkdown(title, markdown_matters, ['Matter Name', 'Matter ID', 'Matter State'])
demisto.results({
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': markdown_matters,
'HumanReadable': markdown,
'EntryContext': {
'GoogleVault.Matter(val.MatterID === obj.MatterID)': output
}
})
except Exception as ex:
err_msg = str(ex)
if 'Quota exceeded for quota metric' in err_msg:
err_msg = 'Quota for Google Vault API exceeded'
return_error('Unable to search matter. Error: {}'.format(err_msg))
else:
raise ex
def remove_account_from_hold_command():
try:
service = connect()
matter_id = demisto.getArg('matterID')
hold_id = demisto.getArg('holdID')
account_id = demisto.getArg('accountID')
_ = remove_held_account(service, matter_id, hold_id, account_id)
msg_to_usr = 'Account {} was successfully removed from hold {} in matter {}'.format(account_id, hold_id,
matter_id)
context_output = []
context_output.append({
'matterID': matter_id,
'ID': hold_id,
'HeldAccount': { # Does this allow only 1 HeldAccount to exist in a hold?
'ID': account_id,
'IsHeld': False
},
})
demisto.results({
'Type': entryTypes['note'],
'ContentsFormat': formats['text'],
'Contents': msg_to_usr,
'EntryContext': {
'GoogleVault.Hold(val.ID === obj.ID)': context_output
}
})
except Exception as ex:
err_msg = str(ex)
if 'Quota exceeded for quota metric' in err_msg:
err_msg = 'Quota for Google Vault API exceeded'
return_error('Unable to remove account from hold. Error: {}'.format(err_msg))
else:
raise ex
def delete_hold_command():
try:
service = connect()
matter_id = demisto.getArg('matterID')
hold_id = demisto.getArg('holdID')
_ = remove_hold(service, matter_id, hold_id)
msg_to_usr = 'Hold {} was successfully deleted from matter {}'.format(hold_id, matter_id)
demisto.results({
'Type': entryTypes['note'],
'ContentsFormat': formats['text'],
'Contents': msg_to_usr,
})
except Exception as ex:
err_msg = str(ex)
if 'Quota exceeded for quota metric' in err_msg:
err_msg = 'Quota for Google Vault API exceeded'
return_error('Unable to delete hold. Error: {}'.format(err_msg))
else:
raise ex
def list_holds_command():
try:
service = connect()
matter_id = demisto.getArg('matterID')
holds = list_holds(service, matter_id)
if not holds:
demisto.results('No holds found.')
else:
output = []
context_output = []
for hold in holds:
output.append({
'Matter ID': matter_id,
'Hold Name': hold.get('name'),
'Hold ID': hold.get('holdId')
})
context_output.append({
'name': hold.get('name'),
'ID': hold.get('holdId'),
'MatterID': matter_id
})
markdown = '' # Use this to add extra line
title = 'Here are all the holds under matter {}.'.format(matter_id)
markdown += tableToMarkdown(title, output, ['Hold Name', 'Hold ID', 'Matter ID'])
demisto.results({
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': holds,
'HumanReadable': markdown,
'EntryContext': {
'GoogleVault.Hold(val.ID === obj.ID)': context_output
}
})
except Exception as ex:
err_msg = str(ex)
if 'Quota exceeded for quota metric' in err_msg:
err_msg = 'Quota for Google Vault API exceeded'
return_error('Unable to list holds. Error: {}'.format(err_msg))
else:
raise ex
def create_hold_command():
service = connect()
matter_id = demisto.getArg('matterID')
hold_name = demisto.getArg('holdName')
corpus = demisto.getArg('corpus')
accounts = demisto.getArg('accountID')
time_frame = demisto.getArg('timeFrame')
start_time = demisto.getArg('startTime')
end_time = demisto.getArg('endTime')
terms = demisto.getArg('terms')
validate_input_values([corpus], ['Mail', 'Drive', 'Groups'])
query = create_hold_query(hold_name, corpus, accounts, time_frame, start_time, end_time, terms)
try:
response = create_hold_mail_accounts(service, matter_id, query)
except Exception as ex:
err_msg = str(ex)
if 'Quota exceeded for quota metric' in err_msg:
err_msg = 'Quota for Google Vault API exceeded'
return_error('Unable to create hold. Error: {}'.format(err_msg))
else:
raise ex
hold_id = response['holdId']
output = []
context_output = []
output.append({
'Hold Name': hold_name,
'Hold ID': hold_id
})
context_output.append({
'name': hold_name,
'ID': hold_id,
'matterID': matter_id
})
markdown = '' # Use this to add extra line
title = 'Here are the details of your newly created hold:'
markdown += tableToMarkdown(title, output, ['Hold Name', 'Hold ID'])
demisto.results({
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': {'Hold Name': hold_name, 'Hold ID': hold_id},
'HumanReadable': markdown,
'EntryContext': {
'GoogleVault.Hold(val.ID === obj.ID)': context_output
}
})
def create_mail_export_command():
"""
Creates a mail export in Google Vault
"""
service = connect()
matter_id = demisto.getArg('matterID')
export_name = demisto.getArg('exportName')
data_scope = demisto.getArg('dataScope')
search_method = demisto.getArg('searchMethod')
emails = demisto.getArg('emails')
include_drafts = demisto.getArg('includeDrafts')
start_time = demisto.getArg('startTime')
end_time = demisto.getArg('endTime')
time_frame = demisto.getArg('timeFrame')
terms = demisto.getArg('terms')
export_pst = demisto.getArg('exportPST')
export_mbox = demisto.getArg('exportMBOX')
org_unit = demisto.getArg('ou')
validate_input_values([include_drafts, export_pst, export_mbox], ['true', 'false', ''])
validate_input_values([data_scope], ['All Data', 'Held Data', 'Unprocessed Data'])
validate_input_values([search_method], ['All Accounts', 'Specific Accounts(requires emails argument)',
'Organizational Unit(requires ou argument)'])
query = create_mail_export_query(export_name, emails, time_frame, start_time, end_time, terms, org_unit, export_pst,
export_mbox, search_method, include_drafts, data_scope)
try:
response = create_export(service, matter_id, query)
except Exception as ex:
err_msg = str(ex)
if 'Quota exceeded for quota metric' in err_msg:
err_msg = 'Quota for Google Vault API exceeded'
return_error('Unable to create export. Error: {}'.format(err_msg))
else:
raise ex
create_time = response.get('createTime')
export_id = response.get('id')
title = 'A new export has been created successfully:\n'
output_for_markdown = { # This one is for tableToMarkdown to correctly map
'Matter ID': matter_id,
'Export ID': export_id,
'Export Name': export_name,
'Created Time': create_time
}
markdown = tableToMarkdown(title, output_for_markdown, ['Matter ID', 'Export ID', 'Export Name', 'Created Time'])
new_export = {
'MatterID': matter_id,
'ExportID': export_id,
'Name': export_name,
'CreateTime': create_time
}
context_matter = get_current_matter_from_context(matter_id)
new_matter = populate_matter_with_export(context_matter, new_export)
demisto.results({
'Type': entryTypes['note'],
'ContentsFormat': formats['text'],
'Contents': response,
'HumanReadable': markdown,
'EntryContext': {
'GoogleVault.Matter(val.MatterID === "{0}")'.format(matter_id): new_matter
}
})
def create_drive_export_command():
service = connect()
matter_id = demisto.getArg('matterID')
export_name = demisto.getArg('exportName')
data_scope = demisto.getArg('dataScope')
search_method = demisto.getArg('searchMethod')
emails = demisto.getArg('emails')
org_unit = demisto.getArg('ou')
team_drives = demisto.getArg('teamDrive')
include_teamdrives = demisto.getArg('includeTeamDrives')
time_frame = demisto.getArg('timeFrame')
start_time = demisto.getArg('startTime')
end_time = demisto.getArg('endTime')
terms = demisto.getArg('terms')
validate_input_values([include_teamdrives], ['true', 'false', ''])
validate_input_values([data_scope], ['All Data', 'Held Data', 'Unprocessed Data'])
validate_input_values([search_method], ['Team Drive', 'Specific Accounts(requires emails argument)',
'Organizational Unit(requires ou argument)'])
query = create_drive_export_query(export_name, emails, team_drives, time_frame, start_time, end_time, terms,
org_unit, search_method, include_teamdrives, data_scope)
try:
response = create_export(service, matter_id, query)
except Exception as ex:
err_msg = str(ex)
if 'Quota exceeded for quota metric' in err_msg:
err_msg = 'Quota for Google Vault API exceeded'
return_error('Unable to create export. Error: {}'.format(err_msg))
else:
raise ex
create_time = response.get('createTime')
export_id = response.get('id')
new_export = {
'MatterID': matter_id,
'ExportID': export_id,
'Name': export_name,
'CreateTime': create_time
}
context_matter = get_current_matter_from_context(matter_id)
new_matter = populate_matter_with_export(context_matter, new_export)
title = 'A new export has been created successfully:\n'
output_for_markdown = { # This one is for tableToMarkdown to correctly map
'Matter ID': matter_id,
'Export ID': export_id,
'Export Name': export_name,
'Created Time': create_time
}
markdown = tableToMarkdown(title, output_for_markdown, ['Matter ID', 'Export ID', 'Export Name', 'Created Time'])
demisto.results({
'Type': entryTypes['note'],
'ContentsFormat': formats['text'],
'Contents': response,
'HumanReadable': markdown,
'EntryContext': {
'GoogleVault.Matter(val.MatterID === "{0}")'.format(matter_id): new_matter
}
})
def create_groups_export_command():
service = connect()
matter_id = demisto.getArg('matterID')
export_name = demisto.getArg('exportName')
data_scope = demisto.getArg('dataScope')
search_method = 'ACCOUNT' # Hard-coded only for groups export
emails = demisto.getArg('groups')
start_time = demisto.getArg('startTime')
end_time = demisto.getArg('endTime')
time_frame = demisto.getArg('timeFrame')
terms = demisto.getArg('terms')
export_pst = demisto.getArg('exportPST')
export_mbox = demisto.getArg('exportMBOX')
validate_input_values([export_pst, export_mbox], ['true', 'false', ''])
validate_input_values([data_scope], ['All Data', 'Held Data', 'Unprocessed Data'])
query = create_groups_export_query(export_name, emails, time_frame, start_time, end_time, terms, search_method,
export_pst, export_mbox, data_scope)
try:
response = create_export(service, matter_id, query)
except Exception as ex:
err_msg = str(ex)
if 'Quota exceeded for quota metric' in err_msg:
err_msg = 'Quota for Google Vault API exceeded'
return_error('Unable to create export. Error: {}'.format(err_msg))
else:
raise ex
create_time = response.get('createTime')
export_id = response.get('id')
new_export = {
'MatterID': matter_id,
'ExportID': export_id,
'Name': export_name,
'CreateTime': create_time
}
context_matter = get_current_matter_from_context(matter_id)
new_matter = populate_matter_with_export(context_matter, new_export)
title = 'A new export has been created successfully:\n'
output_for_markdown = { # This one is for tableToMarkdown to correctly map
'Matter ID': matter_id,
'Export ID': export_id,
'Export Name': export_name,
'Created Time': create_time
}
markdown = tableToMarkdown(title, output_for_markdown, ['Matter ID', 'Export ID', 'Export Name', 'Created Time'])
demisto.results({
'Type': entryTypes['note'],
'ContentsFormat': formats['text'],
'Contents': response,
'HumanReadable': markdown,
'EntryContext': {
'GoogleVault.Matter(val.MatterID === "{0}")'.format(matter_id): new_matter
}
})
def get_multiple_exports_command():
export_IDs = argToList(demisto.getArg('exportIDS'))
matter_id = demisto.getArg('matterId')
id_concatenation = demisto.getArg('queryIDS')
if id_concatenation:
if '#' not in id_concatenation:
return_error(
'Should enter a concatenation of MatterID and ExportID with "#" delimeter such: <Matter_ID>#<ExportID>')
matter_id, export_id = id_concatenation.split('#')
export_IDs = [export_id]
if not (matter_id and export_IDs):
return_error('Missing parameter MetterID or ExportID')
current_matter = get_current_matter_from_context(matter_id)
for export_id in export_IDs:
new_export = get_export_command(export_id, matter_id)
current_matter = populate_matter_with_export(current_matter, new_export)
demisto.results({
'ContentsFormat': formats['text'],
'Contents': '',
'Type': entryTypes['note'],
'EntryContext': {
'GoogleVault.Matter(val.MatterID === "{0}")'.format(matter_id): current_matter
}
})
def get_export_command(export_id, matter_id):
service = connect()
try:
response = get_export_by_id(service, matter_id, export_id)
export_name = response.get('name')
export_status = response.get('status')
create_time = response.get('createTime')
bucket_name = response.get('cloudStorageSink').get('files')[0].get(
'bucketName') if export_status == 'COMPLETED' else ''
zip_object_name = get_object_mame_by_type(response.get('cloudStorageSink').get('files'),
'.zip') if export_status == 'COMPLETED' else ''
xml_object_name = get_object_mame_by_type(response.get('cloudStorageSink').get('files'),
'.xml') if export_status == 'COMPLETED' else ''
title = 'You Export details:\n'
output_for_markdown = { # This one is for tableToMarkdown to correctly map
'Matter ID': matter_id,
'Export ID': export_id,
'Export Name': export_name,
'Status': export_status,
'Created Time': create_time,
'Bucket Name(for download)': bucket_name,
'Download ID': zip_object_name,
'View ID': xml_object_name
}
if (export_status == 'COMPLETED'):
headers = ['Matter ID', 'Export ID', 'Export Name', 'Status', 'Created Time', 'Bucket Name(for download)',
'Download ID', 'View ID']
else:
headers = ['Matter ID', 'Export ID', 'Export Name', 'Status', 'Created Time']
markdown = tableToMarkdown(title, output_for_markdown, headers)
demisto.results({
'Type': entryTypes['note'],
'ContentsFormat': formats['text'],
'Contents': response,
'HumanReadable': markdown,
})
export_status = {
'MatterID': matter_id,
'ExportID': export_id,
'ExportName': export_name,
'Status': export_status,
'BucketName': bucket_name,
'DownloadID': zip_object_name,
'ViewID': xml_object_name
}
return export_status
except Exception as ex:
err_msg = str(ex)
if 'Quota exceeded for quota metric' in err_msg:
err_msg = 'Quota for Google Vault API exceeded'
return_error('Unable to get export. Error: {}'.format(err_msg))
else:
raise ex
def download_export_command():
try:
bucket_name = demisto.getArg('bucketName')
download_ID = demisto.getArg('downloadID')
out_file = download_storage_object(download_ID, bucket_name)
demisto.results(fileResult(demisto.uniqueFile() + '.zip', out_file.getvalue()))
except Exception as ex:
err_msg = str(ex)
if 'Quota exceeded for quota metric' in err_msg:
err_msg = 'Quota for Google Vault API exceeded'
return_error('Unable to download export. Error: {}'.format(err_msg))
else:
raise ex
def download_and_sanitize_export_results(object_ID, bucket_name, max_results):
out_file = download_storage_object(object_ID, bucket_name)
out_file_json = json.loads(xml2json(out_file.getvalue()))
if not out_file_json['Root']['Batch'].get('Documents'):
demisto.results('The export given contains 0 documents')
sys.exit(0)
documents = out_file_json['Root']['Batch']['Documents']['Document']
if type(documents) is dict:
documents = [documents]
dictList = build_dict_list(documents)
if len(dictList) > max_results:
return dictList[0:max_results]
return dictList
def get_drive_results_command():
try:
max_results = int(demisto.getArg('maxResult'))
view_ID = demisto.getArg('viewID')
bucket_name = demisto.getArg('bucketName')
output = download_and_sanitize_export_results(view_ID, bucket_name, max_results)
if not (output[0].get('Author') or output[0].get('Collaborators') or output[0].get('Title')):
return_error(
'Error displaying results: Corpus of the invoked command and the supplied ViewID does not match')
markedown_output = map(lambda document: {
'Title': document.get('Title'),
'Author': document.get('Author'),
'Collaborators': document.get('Collaborators'),
'Others': document.get('Others'),
'DateCreated': document.get('DateCreated'),
'DateModified': document.get('DateModified'),
'DocType': document.get('DocType'),
'MD5': document.get('MD5'),
}, output)
title = 'Your DRIVE inquiry details\n'
headers = ['Title', 'Author', 'Collaborators', 'Others', 'Labels', 'Viewers', 'DateCreated', 'DateModified',
'DocType', 'MD5']
markdown = tableToMarkdown(title, markedown_output, headers)
exportID = str(view_ID).split('/')[1]
contextOutput = {'ExportID': exportID, 'Results': markedown_output}
demisto.results({
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': contextOutput,
'HumanReadable': markdown,
'EntryContext': {
'GoogleVault.Matter.Export(val.ExportID === obj.ExportID)': contextOutput
}
})
except Exception as ex:
err_msg = str(ex)
if 'Quota exceeded for quota metric' in err_msg:
err_msg = 'Quota for Google Vault API exceeded'
return_error('Unable to display export result. Error: {}'.format(err_msg))
else:
raise ex
def get_mail_and_groups_results_command(inquiryType):
try:
max_results = int(demisto.getArg('maxResult'))
view_ID = demisto.getArg('viewID')
bucket_name = demisto.getArg('bucketName')
output = download_and_sanitize_export_results(view_ID, bucket_name, max_results)
if not (output[0].get('From') or output[0].get('To') or output[0].get('Subject')):
return_error(
'Error displaying results: Corpus of the invoked command and the supplied ViewID does not match')
markedown_output = map(lambda document: {
'From': document.get('From'),
'To': document.get('To'),
'CC': document.get('CC'),
'BCC': document.get('BCC'),
'Subject': document.get('Subject'),
'DateSent': document.get('DateSent'),
'DateReceived': document.get('DateReceived'),
}, output)
title = 'Your {} inquiry details\n'.format(inquiryType)
headers = ['Subject', 'From', 'To', 'CC', 'BCC', 'DateSent']
markdown = tableToMarkdown(title, markedown_output, headers)
exportID = str(view_ID).split('/')[1]
contextOutput = {'ExportID': exportID, 'Results': markedown_output}
demisto.results({
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': contextOutput,
'HumanReadable': markdown,
'EntryContext': {
'GoogleVault.Matter.Export(val.ExportID === obj.ExportID)': contextOutput
}
})
except Exception as ex:
err_msg = str(ex)
if 'Quota exceeded for quota metric' in err_msg:
err_msg = 'Quota for Google Vault API exceeded'
return_error('Unable to display export result. Error: {}'.format(err_msg))
else:
raise ex
def test_module():
"""
This is the call made when pressing the integration test button.
"""
try:
service = connect()
get_matters_by_state(service, 'STATE_UNSPECIFIED')
demisto.results('ok')
sys.exit(0)
except Exception as ex:
if 'Quota exceeded for quota metric' in str(ex):
return_error('Quota for Google Vault API exceeded')
else:
return_error(str(ex))
def main():
"""Main Execution Block"""
try:
handle_proxy()
# @@@@@@@@ DEMISTO COMMANDS @@@@@@@@
if demisto.command() == 'test-module':
# This is the call made when pressing the integration test button.
test_module()
elif demisto.command() == 'gvault-list-matters':
list_matters_command()
elif demisto.command() == 'gvault-create-matter':
create_matter_command()
elif demisto.command() == 'gvault-matter-update-state':
update_matter_state_command()
elif demisto.command() == 'gvault-add-heldAccount':
add_account_to_hold_command()
elif demisto.command() == 'gvault-get-matter':
search_matter_command()
elif demisto.command() == 'gvault-remove-heldAccount':
remove_account_from_hold_command()
elif demisto.command() == 'gvault-delete-hold':
delete_hold_command()
elif demisto.command() == 'gvault-list-holds':
list_holds_command()
elif demisto.command() == 'gvault-create-hold':
create_hold_command()
elif demisto.command() == 'gvault-create-export-mail':
create_mail_export_command()
elif demisto.command() == 'gvault-create-export-drive':
create_drive_export_command()
elif demisto.command() == 'gvault-create-export-groups':
create_groups_export_command()
elif demisto.command() == 'gvault-export-status':
get_multiple_exports_command()
elif demisto.command() == 'gvault-download-results':
download_export_command()
elif demisto.command() == 'gvault-get-drive-results':
get_drive_results_command()
elif demisto.command() == 'gvault-get-mail-results':
get_mail_and_groups_results_command('MAIL')
elif demisto.command() == 'gvault-get-groups-results':
get_mail_and_groups_results_command('GROUPS')
except Exception as e:
return_error(str(e))
# python2 uses __builtin__ python3 uses builtins
if __name__ == '__builtin__' or __name__ == 'builtins':
main()
|
the-stack_0_6836 | import torch
from torch import nn
import torch.nn.functional as F
class EmbedVector(nn.Module):
def __init__(self, config):
super(EmbedVector, self).__init__()
self.config = config
target_size = config.label
self.embed = nn.Embedding(config.words_num, config.words_dim)
if config.train_embed == False:
self.embed.weight.requires_grad = False
if config.qa_mode.upper() == 'LSTM':
self.lstm = nn.LSTM(input_size=config.words_dim,
hidden_size=config.hidden_size,
num_layers=config.num_layer,
dropout=config.rnn_dropout,
bidirectional=True)
elif config.qa_mode.upper() == 'GRU':
self.gru = nn.GRU(input_size=config.words_dim,
hidden_size=config.hidden_size,
num_layers=config.num_layer,
dropout=config.rnn_dropout,
bidirectional=True)
self.dropout = nn.Dropout(p=config.rnn_fc_dropout)
self.nonlinear = nn.Tanh()
#self.attn = nn.Sequential(
# nn.Linear(config.hidden_size * 2 + config.words_dim, config.hidden_size),
# self.nonlinear,
# nn.Linear(config.hidden_size, 1)
#)
self.hidden2tag = nn.Sequential(
#nn.Linear(config.hidden_size * 2 + config.words_dim, config.hidden_size * 2),
nn.Linear(config.hidden_size * 2, config.hidden_size * 2),
nn.BatchNorm1d(config.hidden_size * 2),
self.nonlinear,
self.dropout,
nn.Linear(config.hidden_size * 2, target_size)
)
def forward(self, x):
# x = (sequence length, batch_size, dimension of embedding)
text = x.text
x = self.embed(text)
num_word, batch_size, words_dim = x.size()
# h0 / c0 = (layer*direction, batch_size, hidden_dim)
if self.config.qa_mode.upper() == 'LSTM':
outputs, (ht, ct) = self.lstm(x)
elif self.config.qa_mode.upper() == 'GRU':
outputs, ht = self.gru(x)
else:
print("Wrong Entity Prediction Mode")
exit(1)
outputs = outputs.view(-1, outputs.size(2))
#x = x.view(-1, words_dim)
#attn_weights = F.softmax(self.attn(torch.cat((x, outputs), 1)), dim=0)
#attn_applied = torch.bmm(torch.diag(attn_weights[:, 0]).unsqueeze(0), outputs.unsqueeze(0))
#outputs = torch.cat((x, attn_applied.squeeze(0)), 1)
tags = self.hidden2tag(outputs).view(num_word, batch_size, -1)
scores = nn.functional.normalize(torch.mean(tags, dim=0), dim=1)
return scores |
the-stack_0_6837 | STEMS = [
('кон', ['конят', 'коня']),
('стол', ['столът']),
('хълм', ['хълма']),
('кола', ['колата', 'колите']),
('колело', ['колелото']),
('маса', ['маси']),
('стол', ['столове']),
('легло', ['легла']),
('чайник', ['чайници']),
('апарат', ['апарати']),
('дърво', ['дървета']),
('цвете', ['цветя']),
('самурай', ['самураи']),
('батерия', ['батерии']),
('чайник', ['чайниците']),
('метър', ['метри', 'метра', 'метрите']),
('километър', ['километри', 'километра', 'километрите']),
('квадратен', ['квадратна', 'квадратно', 'квадратни']),
('вървя', ['вървиш', 'върви', 'вървим', 'вървите', 'вървят']),
('мета', ['метат', 'метеш', 'мете', 'метем', 'метете']),
('рисувам', ['рисуват', 'рисуваш', 'рисува', 'рисуваме', 'рисувате']),
('стрелям', ['стрелят', 'стреляш', 'стреля', 'стреляме', 'стреляте']),
('чета', ['четоха', 'четох', 'чете', 'четохме', 'четохте']),
('говоря', ['говориха', 'говорих', 'говори', 'говорихме', 'говорихме']),
('рисувам', ['рисуваха', 'рисувах', 'рисува', 'рисувахме', 'рисувахте']),
('стрелям', ['стреляха', 'стрелях', 'стреля', 'стреляхме', 'стреляхте']),
('чета', ['четяха', 'четях', 'четеше', 'четяхме', 'четяхте']),
('говоря', ['говореха', 'говорех', 'говореше', 'говорехме', 'говорехте']),
(None, ['я']),
('отивам', ['отиваха', 'отиваше', 'отивах', 'отивахме', 'отивахте']),
('стрелям', ['стреляше']),
('чета', ['чели', 'чел', 'чела', 'чело']),
('чеша', ['чесали', 'чесал', 'чесала', 'чесало']),
('рисувам', ['рисували', 'рисувал', 'рисувала', 'рисувало']),
('стрелям', ['стреляли', 'стрелял', 'стреляла', 'стреляло']),
('говоря', ['говорили', 'говорил', 'говорила', 'говорило']),
('вампир', ['вампирката', 'вампир', 'вампирка']),
('красив', ['красивият', 'красива', 'красивата', 'красиви', 'красивите']),
('гладен', ['гладният', 'гладната', 'гладните', 'гладното']),
('археолог', ['археолози']),
('космически', ['космическа']),
('отивам', ['отишли', 'отишъл', 'отишла', 'отишло', 'отидохме', 'отидоха', 'отидохте']),
]
|
the-stack_0_6838 | import sys
import palmettopy.exceptions
from palmettopy.palmetto import Palmetto
words = ["cherry", "pie", "cr_eam", "apple", "orange", "banana",
"pineapple", "plum", "pig", "cra_cker", "so_und", "kit"]
palmetto = Palmetto()
try:
result = palmetto.get_df_for_words(words)
sys.exit(0)
except palmettopy.exceptions.EndpointDown:
sys.exit(1)
|
the-stack_0_6840 | import re
import vbox.base
from . import (
base,
props,
exceptions,
)
class HostDevice(base.SubEntity):
state = property(lambda s: s.getPayload()["Current State"].lower())
product = property(lambda s: s.getPayload()["Product"])
manufacturer = property(lambda s: s.getPayload()["Manufacturer"])
productId = property(lambda s: int(s.getPayload()["ProductId"].split()[0], 16))
vendorId = property(lambda s: int(s.getPayload()["VendorId"].split()[0], 16))
def __init__(self, parent, uuid):
super(HostDevice, self).__init__(parent)
self.UUID = uuid
def _getPayload():
for rec in self.source.getHostDevices():
if rec["UUID"] == self.UUID:
return dict(rec)
raise KeyError(self.UUID)
self.getPayload = base.ProxyRefreshTrail(
_getPayload, depends=(self.source.getHostDevices, )
)
def __repr__(self):
try:
payload = self.getPayload()
except KeyError:
payload = None
return "<{} payload={}>".format(self.__class__.__name__, payload)
class VmDevice(base.SubEntity):
UUID = property(lambda s: s.source.info["USBAttachedUUID" + s.idx])
vendorId = property(lambda s: int(s.source.info["USBAttachedVendorId" + s.idx], 16))
productId = property(lambda s: int(s.source.info["USBAttachedProductId" + s.idx], 16))
revisionId = property(lambda s: int(s.source.info["USBAttachedRevision" + s.idx], 16))
manufacturer = property(lambda s: s.source.info["USBAttachedManufacturer" + s.idx])
product = property(lambda s: s.source.info["USBAttachedProduct" + s.idx])
address = property(lambda s: s.source.info["USBAttachedAddress" + s.idx])
state = property(lambda s: "attached")
def __init__(self, parent, idx):
super(VmDevice, self).__init__(parent)
self.idx = idx
class VmUsb(base.SubEntity):
enabled = props.OnOff(**props.modify("usb"))
ehci = props.OnOff(**props.modify("usbehci")) # Enables/disable usb 2.0
def attach(self, device):
if device.state == "attached":
raise Exception("This USB device is already attached.")
target = device.UUID
self.source.usbAttach(target)
# notify the device backend that it should be refreshed.
device.source.clearCache()
for el in self.devices:
if el.UUID == target:
return el
else:
raise Exception("Device {!r} that was previously attached is now lost.".format(target))
@props.SourceProperty
def devices(self):
matcher = re.compile(r"^USBAttachedUUID(\d+)$")
foundIds = []
for key in self.source.info.iterkeys():
match = matcher.match(key)
if match:
foundIds.append(match.group(1))
return [
VmDevice(self, uuid)
for uuid in foundIds
]
class Library(base.Library):
@props.SourceProperty
def hostDevices(self):
return [
HostDevice(self, rec["UUID"])
for rec in self.source.getHostDevices()
] |
the-stack_0_6841 | #print is function when we want to print something on output
print("My name is Dhruv")
#You will notice something strange if you try to print any directory
#print("C:\Users\dhruv\Desktop\dhruv.github.io")
#Yes unicodeescape error
# Remember i told about escape character on previous tutorial
# yes it causing problems
# now place "r" in starting of sentence
print(r"C:\Users\dhruv\Desktop\dhruv.github.io")
#yes it is printed
# what what r means ? r means Rush string
# it means that " take the string as it , take no special meaning in this perticular STRING "
# One amazing thing you can do is , string can be store in variables
#You can also Add and Multiply strings
myname = "Dhruv "
myname + "Patel"
myname * 5
# now press run
# Do check my shell file for refrence
|
the-stack_0_6843 | """
The tests in this package are to ensure the proper resultant dtypes of
set operations.
"""
import numpy as np
import pytest
from pandas.core.dtypes.common import is_dtype_equal
import pandas as pd
from pandas import Float64Index, Int64Index, RangeIndex, UInt64Index
import pandas._testing as tm
from pandas.api.types import pandas_dtype
COMPATIBLE_INCONSISTENT_PAIRS = {
(Int64Index, RangeIndex): (tm.makeIntIndex, tm.makeRangeIndex),
(Float64Index, Int64Index): (tm.makeFloatIndex, tm.makeIntIndex),
(Float64Index, RangeIndex): (tm.makeFloatIndex, tm.makeIntIndex),
(Float64Index, UInt64Index): (tm.makeFloatIndex, tm.makeUIntIndex),
}
def test_union_same_types(index):
# Union with a non-unique, non-monotonic index raises error
# Only needed for bool index factory
idx1 = index.sort_values()
idx2 = index.sort_values()
assert idx1.union(idx2).dtype == idx1.dtype
def test_union_different_types(index, index_fixture2):
# This test only considers combinations of indices
# GH 23525
idx1, idx2 = index, index_fixture2
type_pair = tuple(sorted([type(idx1), type(idx2)], key=lambda x: str(x)))
if type_pair in COMPATIBLE_INCONSISTENT_PAIRS:
pytest.xfail("This test only considers non compatible indexes.")
if any(isinstance(idx, pd.MultiIndex) for idx in (idx1, idx2)):
pytest.xfail("This test doesn't consider multiindixes.")
if is_dtype_equal(idx1.dtype, idx2.dtype):
pytest.xfail("This test only considers non matching dtypes.")
# A union with a CategoricalIndex (even as dtype('O')) and a
# non-CategoricalIndex can only be made if both indices are monotonic.
# This is true before this PR as well.
# Union with a non-unique, non-monotonic index raises error
# This applies to the boolean index
idx1 = idx1.sort_values()
idx2 = idx2.sort_values()
assert idx1.union(idx2).dtype == np.dtype("O")
assert idx2.union(idx1).dtype == np.dtype("O")
@pytest.mark.parametrize("idx_fact1,idx_fact2", COMPATIBLE_INCONSISTENT_PAIRS.values())
def test_compatible_inconsistent_pairs(idx_fact1, idx_fact2):
# GH 23525
idx1 = idx_fact1(10)
idx2 = idx_fact2(20)
res1 = idx1.union(idx2)
res2 = idx2.union(idx1)
assert res1.dtype in (idx1.dtype, idx2.dtype)
assert res2.dtype in (idx1.dtype, idx2.dtype)
@pytest.mark.parametrize(
"left, right, expected",
[
("int64", "int64", "int64"),
("int64", "uint64", "object"),
("int64", "float64", "float64"),
("uint64", "float64", "float64"),
("uint64", "uint64", "uint64"),
("float64", "float64", "float64"),
("datetime64[ns]", "int64", "object"),
("datetime64[ns]", "uint64", "object"),
("datetime64[ns]", "float64", "object"),
("datetime64[ns, CET]", "int64", "object"),
("datetime64[ns, CET]", "uint64", "object"),
("datetime64[ns, CET]", "float64", "object"),
("Period[D]", "int64", "object"),
("Period[D]", "uint64", "object"),
("Period[D]", "float64", "object"),
],
)
def test_union_dtypes(left, right, expected):
left = pandas_dtype(left)
right = pandas_dtype(right)
a = pd.Index([], dtype=left)
b = pd.Index([], dtype=right)
result = (a | b).dtype
assert result == expected
|
the-stack_0_6845 | # coding:utf-8
from schemaobject.collections import OrderedDict
def column_schema_builder(table):
"""
Returns a dictionary loaded with all of the columns availale in the table.
``table`` must be an instance of TableSchema.
.. note::
This function is automatically called for you and set to
``schema.databases[name].tables[name].columns``
when you create an instance of SchemaObject
"""
conn = table.parent.parent.connection
cols = OrderedDict()
sql = """
SELECT TABLE_NAME, COLUMN_NAME, ORDINAL_POSITION, COLUMN_DEFAULT,
IS_NULLABLE, COLUMN_TYPE, COLUMN_KEY, CHARACTER_MAXIMUM_LENGTH,
CHARACTER_SET_NAME, COLLATION_NAME, EXTRA, COLUMN_COMMENT
FROM information_schema.COLUMNS
WHERE TABLE_SCHEMA='%s'
AND TABLE_NAME='%s'
ORDER BY ORDINAL_POSITION
"""
columns = conn.execute(sql % (table.parent.name, table.name))
if not columns:
return cols
for col in columns:
field = col['COLUMN_NAME']
column = ColumnSchema(name=field, parent=table)
column.ordinal_position = col['ORDINAL_POSITION']
column.field = col['COLUMN_NAME']
column.type = col['COLUMN_TYPE']
column.charset = col['CHARACTER_SET_NAME']
column.collation = col['COLLATION_NAME']
column.key = col['COLUMN_KEY']
column.default = col['COLUMN_DEFAULT']
column.extra = col['EXTRA']
column.comment = col['COLUMN_COMMENT']
if col['IS_NULLABLE'] == "YES":
column.null = True
else:
column.null = False
cols[field] = column
return cols
class ColumnSchema(object):
"""
Object representation of a single column.
Supports equality and inequality comparison of ColumnSchema.
``name`` is the column name.
``parent`` is an instance of TableSchema
.. note::
ColumnSchema objects are automatically created for you by column_schema_builder
and loaded under ``schema.databases[name].tables[name].columns``
.. note::
Attributes ``key``, ``comment`` are ignored in ``__eq__``, ``__neq__`` comparisons.
Example
>>> schema.databases['sakila'].tables['rental'].columns.keys()
['rental_id', 'rental_date', 'inventory_id', 'customer_id', 'return_date', 'staff_id', 'last_update']
Column Attributes
>>> schema.databases['sakila'].tables['rental'].columns['rental_id'].name
'rental_id'
>>> schema.databases['sakila'].tables['rental'].columns['rental_id'].field
'rental_id'
>>> schema.databases['sakila'].tables['rental'].columns['rental_id'].ordinal_position
1L
>>> schema.databases['sakila'].tables['rental'].columns['rental_id'].type
'INT(11)'
>>> schema.databases['sakila'].tables['staff'].columns['password'].charset
'utf8'
>>> schema.databases['sakila'].tables['staff'].columns['password'].collation
'utf8_bin'
>>> schema.databases['sakila'].tables['rental'].columns['rental_id'].null
False
>>> schema.databases['sakila'].tables['rental'].columns['rental_id'].key
'PRI'
>>> schema.databases['sakila'].tables['rental'].columns['last_update'].default
'CURRENT_TIMESTAMP'
>>> schema.databases['sakila'].tables['rental'].columns['rental_id'].extra
'auto_increment'
>>> schema.databases['sakila'].tables['rental'].columns['rental_id'].comment
''
"""
def __init__(self, name, parent):
self.parent = parent
self.name = name
self.field = name # alias for name, following mysql spec
self.ordinal_position = 0
self.type = None
self.charset = None
self.collation = None
self.null = None
self.key = None
self.default = None
self.extra = None
self.comment = None
def define(self, after=None, with_comment=False):
"""
Generate the SQL for this column definition.
``after`` is the name(string) of the column this should appear after.
If ``after`` is None, ``FIRST`` is used.
``with_comment`` boolean, add column comment to sql statement
>>> schema.databases['sakila'].tables['rental'].columns['last_update'].define(after="staff_id")
'`last_update` TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP on update CURRENT_TIMESTAMP AFTER `staff_id`'
>>> schema.databases['sakila'].tables['rental'].columns['rental_id'].define()
'`rental_id` INT(11) NOT NULL auto_increment FIRST'
"""
sql = ["`%s` %s" % (self.field, self.type)]
if (self.collation and
self.charset and
(
self.parent.options['charset'].value != self.charset or
self.parent.options['collation'].value != self.collation
)):
sql.append("CHARACTER SET %s COLLATE %s" % (self.charset, self.collation))
if not self.null:
sql.append("NOT NULL")
else:
sql.append("NULL")
try:
basestring
except NameError:
basestring = str
if self.default is not None and isinstance(self.default, (str, basestring)) \
and self.default != 'CURRENT_TIMESTAMP':
sql.append("DEFAULT '%s'" % self.default)
elif self.default is not None:
sql.append("DEFAULT %s" % self.default)
if self.extra:
sql.append(self.extra)
if with_comment and self.comment:
sql.append("COMMENT '%s'" % self.comment)
if after:
sql.append("AFTER `%s`" % after)
else:
sql.append("FIRST")
return ' '.join(sql)
def create(self, *args, **kwargs):
"""
Generate the SQL to create (ADD) this column.
``after`` is the name(string) of the column this should appear after.
If ``after`` is None, ``FIRST`` is used.
``with_comment`` boolean, add column comment to sql statement
>>> schema.databases['sakila'].tables['rental'].columns['last_update'].create(after="staff_id")
'ADD COLUMN `last_update` TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP on update CURRENT_TIMESTAMP AFTER `staff_id`'
>>> schema.databases['sakila'].tables['rental'].columns['rental_id'].create()
'ADD COLUMN `rental_id` INT(11) NOT NULL auto_increment FIRST'
"""
return "ADD COLUMN %s" % self.define(*args, **kwargs)
def modify(self, *args, **kwargs):
"""
Generate the SQL to modify this column.
``after`` is the name(string) of the column this should appear after.
If ``after`` is None, ``FIRST`` is used.x
``with_comment`` boolean, add column comment to sql statement
>>> schema.databases['sakila'].tables['rental'].columns['customer_id'].define(after="inventory_id")
'`customer_id` SMALLINT(5) UNSIGNED NOT NULL AFTER `inventory_id`'
>>> schema.databases['sakila'].tables['rental'].columns['customer_id'].default = 123
>>> schema.databases['sakila'].tables['rental'].columns['customer_id'].modify(after="inventory_id")
'MODIFY COLUMN `customer_id` SMALLINT(5) UNSIGNED NOT NULL DEFAULT 123 AFTER `inventory_id`'
"""
return "MODIFY COLUMN %s" % self.define(*args, **kwargs)
def drop(self):
"""
Generate the SQL to drop this column::
>>> schema.databases['sakila'].tables['rental'].columns['rental_id'].drop()
'DROP COLUMN `rental_id`'
"""
return "DROP COLUMN `%s`" % self.field
def __eq__(self, other):
if not isinstance(other, ColumnSchema):
return False
return ((self.field == other.field)
and (self.type == other.type)
and (self.null == other.null)
and (self.default == other.default)
and (self.extra == other.extra)
and (self.collation == other.collation))
def __ne__(self, other):
return not self.__eq__(other)
|
the-stack_0_6846 | """
Copyright (c) 2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import pytest
from tests.pruning.helpers import BigPruningTestModel, get_basic_pruning_config, \
PruningTestModelConcat, PruningTestModelEltwise
from tests.test_helpers import load_exported_onnx_version
def find_value_by_name_in_list(obj_list, name):
for obj in obj_list:
if obj.name == name:
return obj
return None
def check_bias_and_weight_shape(node_name, onnx_model_proto, weight_shape, bias_shape):
node_weight = find_value_by_name_in_list(onnx_model_proto.graph.initializer, node_name + '.weight')
node_bias = find_value_by_name_in_list(onnx_model_proto.graph.initializer, node_name + '.bias')
assert node_weight.dims == weight_shape
assert node_bias.dims == bias_shape
def test_pruning_export_simple_model(tmp_path):
model = BigPruningTestModel()
nncf_config = get_basic_pruning_config(input_sample_size=[1, 1, 8, 8])
nncf_config['compression']['pruning_init'] = 0.5
nncf_config['compression']['algorithm'] = 'filter_pruning'
onnx_model_proto = load_exported_onnx_version(nncf_config, model,
path_to_storage_dir=tmp_path)
# Check that conv2 + BN were pruned by output filters
# WARNING: starting from at least torch 1.7.0, torch.onnx.export will fuses BN into previous
# convs if torch.onnx.export is done with `training=False`, so this test might fail.
check_bias_and_weight_shape('nncf_module.conv2', onnx_model_proto, [16, 16, 3, 3], [16])
check_bias_and_weight_shape('nncf_module.bn', onnx_model_proto, [16], [16])
# Check that up was pruned by input filters
check_bias_and_weight_shape('nncf_module.up', onnx_model_proto, [16, 32, 3, 3], [32])
# Check that conv3 was pruned by input filters
check_bias_and_weight_shape('nncf_module.conv3', onnx_model_proto, [1, 32, 5, 5], [1])
@pytest.mark.parametrize(('prune_first', 'prune_last', 'ref_shapes'),
[(False, True, [[[16, 1, 2, 2], [16]], [[16, 16, 2, 2], [16]], [[16, 16, 2, 2], [16]],
[[8, 32, 3, 3], [8]]]),
(True, True, [[[8, 1, 2, 2], [8]], [[16, 8, 2, 2], [16]], [[16, 8, 2, 2], [16]],
[[8, 32, 3, 3], [8]]]),
(False, False, [[[16, 1, 2, 2], [16]], [[16, 16, 2, 2], [16]], [[16, 16, 2, 2], [16]],
[[16, 32, 3, 3], [16]]]),
(True, False, [[[8, 1, 2, 2], [8]], [[16, 8, 2, 2], [16]], [[16, 8, 2, 2], [16]],
[[16, 32, 3, 3], [16]]]),
]
)
def test_pruning_export_concat_model(tmp_path, prune_first, prune_last, ref_shapes):
model = PruningTestModelConcat()
nncf_config = get_basic_pruning_config(input_sample_size=[1, 1, 8, 8])
nncf_config['compression']['algorithm'] = 'filter_pruning'
nncf_config['compression']['params']['prune_first_conv'] = prune_first
nncf_config['compression']['params']['prune_last_conv'] = prune_last
nncf_config['compression']['pruning_init'] = 0.5
onnx_model_proto = load_exported_onnx_version(nncf_config, model,
path_to_storage_dir=tmp_path)
for i in range(1, 5):
conv_name = "nncf_module.conv{}".format(i)
check_bias_and_weight_shape(conv_name, onnx_model_proto, *ref_shapes[i - 1])
@pytest.mark.parametrize(('prune_first', 'prune_last', 'ref_shapes'),
[(False, True, [[[16, 1, 2, 2], [16]], [[16, 16, 2, 2], [16]], [[16, 16, 2, 2], [16]],
[[8, 16, 3, 3], [8]]]),
(True, True, [[[8, 1, 2, 2], [8]], [[16, 8, 2, 2], [16]], [[16, 8, 2, 2], [16]],
[[8, 16, 3, 3], [8]]]),
(False, False, [[[16, 1, 2, 2], [16]], [[16, 16, 2, 2], [16]], [[16, 16, 2, 2], [16]],
[[16, 16, 3, 3], [16]]]),
(True, False, [[[8, 1, 2, 2], [8]], [[16, 8, 2, 2], [16]], [[16, 8, 2, 2], [16]],
[[16, 16, 3, 3], [16]]]),
]
)
def test_pruning_export_eltwise_model(tmp_path, prune_first, prune_last, ref_shapes):
model = PruningTestModelEltwise()
nncf_config = get_basic_pruning_config(input_sample_size=[1, 1, 8, 8])
nncf_config['compression']['algorithm'] = 'filter_pruning'
nncf_config['compression']['params']['prune_first_conv'] = prune_first
nncf_config['compression']['params']['prune_last_conv'] = prune_last
nncf_config['compression']['pruning_init'] = 0.5
onnx_model_proto = load_exported_onnx_version(nncf_config, model,
path_to_storage_dir=tmp_path)
for i in range(1, 5):
conv_name = "nncf_module.conv{}".format(i)
check_bias_and_weight_shape(conv_name, onnx_model_proto, *ref_shapes[i - 1])
|
the-stack_0_6848 | #-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
#--------------------------------------------------------------------------
from typing import Dict
from logging import getLogger
from onnx import helper
from .onnx_model import OnnxModel
from .fusion_base import Fusion
from .fusion_utils import FusionUtils
logger = getLogger(__name__)
class FusionEmbedLayerNoMask(Fusion):
"""
Embed Layer Normalization will fuse embeddings and mask processing into one node.
The embeddings before conversion:
(input_ids) --------> Gather ----------+ (segment_ids)
| | |
| v v
+--> Shape --> Expand -> Gather---->Add Gather
| ^ | |
| | v v
+---(optional graph) SkipLayerNormalization
Optional graph is used to generate position list (0, 1, ...) per batch. It can be a constant in some model.
(input_ids) --> Gather -----+ Slice
| |
v v
(segment_ids)--> Gather --->Add Reshape
| |
v v
SkipLayerNormalization
"""
def __init__(self, model: OnnxModel, description='no mask'):
super().__init__(model, "EmbedLayerNormalization", "SkipLayerNormalization", description)
self.utils = FusionUtils(model)
self.attention = None
def match_segment_path(self, normalize_node, input_name_to_nodes, output_name_to_node, input_ids_cast_node):
segment_ids = None
segment_embedding_gather = None
segment_embedding_path = self.model.match_parent_path(normalize_node, ['Gather'], [1])
if segment_embedding_path is None:
segment_embedding_path = self.model.match_parent_path(normalize_node, ['Add', 'Gather'], [0, 1])
if segment_embedding_path is None:
logger.info("Segment embedding is not found. Embed layer cannot be fused.")
return
_, segment_embedding_gather = segment_embedding_path
else:
segment_embedding_gather = segment_embedding_path[0]
segment_ids = segment_embedding_gather.input[1]
self.nodes_to_remove.extend(segment_embedding_path)
if self.model.find_graph_input(segment_ids):
casted, segment_ids = self.utils.cast_graph_input_to_int32(segment_ids)
else:
segment_ids, segment_ids_cast_node = self.utils.cast_input_to_int32(segment_ids)
# Cast might be removed by OnnxRuntime.
_, segment_id_path, _ = self.model.match_parent_paths(
segment_ids_cast_node,
[(['ConstantOfShape', 'Concat', 'Unsqueeze', 'Gather', 'Shape', 'Cast'], [0, 0, 1, 0, 0, 0]),
(['ConstantOfShape', 'Concat', 'Unsqueeze', 'Gather', 'Shape'], [0, 0, 1, 0, 0])], output_name_to_node)
if segment_id_path and input_ids_cast_node and input_ids_cast_node.input[0] == segment_id_path[-1].input[0]:
logger.debug("Simplify semgent id path...")
self.model.add_node(
helper.make_node('Shape', inputs=[input_ids_cast_node.input[0]], outputs=["input_shape"]))
self.model.add_node(
helper.make_node('ConstantOfShape',
inputs=["input_shape"],
outputs=["zeros_for_input_shape"],
value=helper.make_tensor("value", onnx.TensorProto.INT32, [1], [1])))
segment_ids = "zeros_for_input_shape"
return segment_ids, segment_embedding_gather
def fuse(self, node, input_name_to_nodes, output_name_to_node):
is_distill = False
if self.model.match_parent_path(node, ['Add', 'Gather'], [0, 0]) is None and self.model.match_parent_path(
node, ['Gather'], [0]) is None:
logger.debug(
"Failed to match path SkipLayerNormalization[0] <-- Add <-- Gather or SkipLayerNormalization[0] <-- Gather"
)
return
self.attention = self.model.find_first_child_by_type(node, 'Attention', input_name_to_nodes, recursive=False)
if self.attention is None:
# In case user disables attention fusion, check whether subgraph looks like Attention.
if node.output[0] not in input_name_to_nodes:
return
children = input_name_to_nodes[node.output[0]]
children_types = sorted([child.op_type for child in children])
if children_types != ['MatMul', 'MatMul', 'MatMul', 'SkipLayerNormalization'] and children_types != [
'MatMul', 'MatMul', 'MatMul', 'Shape', 'Shape', 'SkipLayerNormalization'
]:
logger.debug("No Attention like subgraph in children of SkipLayerNormalization")
return
# Assume the order of embeddings are word_embedding + position_embedding + segment_embedding
normalize_node = node
add_node = None
word_embedding_path = self.model.match_parent_path(normalize_node, ['Add', 'Gather'], [0, 0])
if word_embedding_path is not None:
add_node, word_embedding_gather = word_embedding_path
else:
word_embedding_path = self.model.match_parent_path(normalize_node, ['Gather'], [0])
if word_embedding_path is not None:
word_embedding_gather = word_embedding_path[0]
is_distill = True
from packaging.version import Version
import onnxruntime
if Version(onnxruntime.__version__) <= Version("1.4.0"):
logger.warning(
'Please install onnxruntime with version > 1.4.0 for embedlayer fusion support for distilbert')
return
else:
logger.info("Word embedding path is not found. Embed layer cannot be fused.")
return
input_ids = word_embedding_gather.input[1]
position_embedding_expand = None
position_embedding_shape = None
position_embedding_path = self.model.match_parent_path(normalize_node, ['Gather', 'Expand'],
[1, 1]) # for distill-bert
if position_embedding_path is not None:
position_embedding_weight_node, position_embedding_expand = position_embedding_path
else:
position_embedding_path = self.model.match_parent_path(normalize_node, ['Reshape', 'Slice'], [1, 0])
if position_embedding_path is not None:
_, position_embedding_weight_node = position_embedding_path
else:
position_embedding_path = self.model.match_parent_path(add_node, ['Gather', 'Expand', 'Shape'],
[1, 1, 1])
if position_embedding_path is not None:
position_embedding_weight_node, position_embedding_expand, position_embedding_shape = position_embedding_path
else:
position_embedding_path = self.model.match_parent_path(
add_node, ['Gather', 'Expand', 'Concat', 'Unsqueeze', 'Gather', 'Shape'], [1, 1, 1, 1, 0, 0])
if position_embedding_path is not None:
position_embedding_weight_node, position_embedding_expand, _, _, _, position_embedding_shape = position_embedding_path
else:
# Here we will not try to get exact match. Instead, we only try identify position embedding weights.
position_embedding_path = self.model.match_parent_path(add_node, ['Gather', 'Expand'], [1, 1])
if position_embedding_path is not None:
position_embedding_weight_node, position_embedding_expand = position_embedding_path
else:
logger.info("Position embedding path is not found. Embed layer cannot be fused.")
return
if position_embedding_shape is not None and position_embedding_shape.input[0] != input_ids:
logger.info("position and word embedding is expected to be applied on same input")
return
if position_embedding_expand and position_embedding_shape:
input_parent = self.model.get_parent(position_embedding_shape, 0, output_name_to_node)
subgraph_nodes = self.model.get_parent_subgraph_nodes(position_embedding_expand,
[input_parent] if input_parent else [],
output_name_to_node)
self.nodes_to_remove.extend(subgraph_nodes)
self.nodes_to_remove.extend(word_embedding_path)
self.nodes_to_remove.extend(position_embedding_path)
self.nodes_to_remove.extend([normalize_node])
# Cast input_ids and segment_ids to int32.
input_ids_cast_node = None
if self.model.find_graph_input(input_ids):
casted, input_ids = self.utils.cast_graph_input_to_int32(input_ids)
else:
input_ids, input_ids_cast_node = self.utils.cast_input_to_int32(input_ids)
node_name = self.model.create_node_name('EmbedLayerNormalization')
output_name = node_name + "_output"
embed_node_inputs = None
if is_distill == False:
segment_path = self.match_segment_path(normalize_node, input_name_to_nodes, output_name_to_node,
input_ids_cast_node)
if segment_path is None:
return
else:
segment_ids, segment_embedding_gather = segment_path
embed_node_inputs = [
input_ids,
segment_ids,
word_embedding_gather.input[0],
position_embedding_weight_node.input[0],
segment_embedding_gather.input[0],
normalize_node.input[2],
normalize_node.input[3] # gamma and beta
]
else:
embed_node_inputs = [
input_ids,
'',
word_embedding_gather.input[0],
position_embedding_weight_node.input[0],
'',
normalize_node.input[2],
normalize_node.input[3] # gamma and beta
]
embed_node = helper.make_node('EmbedLayerNormalization',
embed_node_inputs,
outputs=[node_name + "_output", node_name + "_dummy_mask_index"],
name=node_name)
embed_node.domain = "com.microsoft"
# Pass attribute "epsilon" from normalize node to EmbedLayerNormalization.
for att in normalize_node.attribute:
if att.name == 'epsilon':
embed_node.attribute.extend([att])
# Set default value to 1e-12 if no attribute is found.
# OnnxRuntime 1.2.0 or older has no epsilon attribute. The optimized model can only work for 1.3.0 or later.
if len(embed_node.attribute) == 0:
embed_node.attribute.extend([helper.make_attribute("epsilon", 1.0E-12)])
self.model.replace_input_of_all_nodes(normalize_node.output[0], output_name)
self.nodes_to_add.append(embed_node)
class FusionEmbedLayerNormalization(FusionEmbedLayerNoMask):
def __init__(self, model: OnnxModel):
super().__init__(model, "with mask")
def fuse(self, node, input_name_to_nodes, output_name_to_node):
old_count = len(self.nodes_to_add)
super().fuse(node, input_name_to_nodes, output_name_to_node)
if len(self.nodes_to_add) == old_count:
return
if self.attention is not None:
mask_index = self.attention.input[3]
if mask_index in output_name_to_node:
node = output_name_to_node[mask_index]
if node.op_type == "ReduceSum":
embed_node = self.nodes_to_add.pop()
mask_input_name = node.input[0]
self.nodes_to_remove.extend([node])
embed_node.input.append(mask_input_name)
embed_node.output[1] = mask_index
self.nodes_to_add.append(embed_node)
self.prune_graph = True
|
the-stack_0_6849 | #!/usr/bin/python
import pickle
import numpy
import _pickle as cPickle
from sklearn.model_selection import cross_validate
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_selection import SelectPercentile, f_classif
from sklearn.model_selection import train_test_split
def preprocess(words_file = "../tools/word_data.pkl", authors_file="../tools/email_authors.pkl"):
"""
this function takes a pre-made list of email texts (by default word_data.pkl)
and the corresponding authors (by default email_authors.pkl) and performs
a number of preprocessing steps:
-- splits into training/testing sets (10% testing)
-- vectorizes into tfidf matrix
-- selects/keeps most helpful features
after this, the feaures and labels are put into numpy arrays, which play nice with sklearn functions
4 objects are returned:
-- training/testing features
-- training/testing labels
"""
### the words (features) and authors (labels), already largely preprocessed
### this preprocessing will be repeated in the text learning mini-project
authors_file_handler = open(authors_file, "rb")
authors = pickle.load(authors_file_handler)
authors_file_handler.close()
original = words_file
destination = "word_data_unix.pkl"
content = ''
outsize = 0
with open(original, 'rb') as infile:
content = infile.read()
with open(destination, 'wb') as output:
for line in content.splitlines():
outsize = outsize + len(line) + 1
output.write(line + str.encode('\n'))
words_file_handler = open(destination, "rb")
word_data = cPickle.load(words_file_handler)
words_file_handler.close()
### test_size is the percentage of events assigned to the test set
### (remainder go into training)
features_train, features_test, labels_train, labels_test = train_test_split(word_data, authors, test_size=0.1, random_state=42)
### text vectorization--go from strings to lists of numbers
vectorizer = TfidfVectorizer(sublinear_tf=True, max_df=0.5,
stop_words='english')
features_train_transformed = vectorizer.fit_transform(features_train)
features_test_transformed = vectorizer.transform(features_test)
### feature selection, because text is super high dimensional and
### can be really computationally chewy as a result
# percentile can be changed from 10 to 1, for example.
selector = SelectPercentile(f_classif, percentile=10)
selector.fit(features_train_transformed, labels_train)
features_train_transformed = selector.transform(features_train_transformed).toarray()
features_test_transformed = selector.transform(features_test_transformed).toarray()
### info on the data
print ("no. of Chris training emails:", sum(labels_train))
print ("no. of Sara training emails:", len(labels_train)-sum(labels_train))
return features_train_transformed, features_test_transformed, labels_train, labels_test
|
the-stack_0_6850 | import django
from gui.lnd_deps import router_pb2 as lnr
from gui.lnd_deps import router_pb2_grpc as lnrouter
from gui.lnd_deps.lnd_connect import lnd_connect
from lndg import settings
from os import environ
from time import sleep
environ['DJANGO_SETTINGS_MODULE'] = 'lndg.settings'
django.setup()
from gui.models import Channels, FailedHTLCs
def main():
try:
connection = lnd_connect(settings.LND_DIR_PATH, settings.LND_NETWORK, settings.LND_RPC_SERVER)
routerstub = lnrouter.RouterStub(connection)
for response in routerstub.SubscribeHtlcEvents(lnr.SubscribeHtlcEventsRequest()):
if response.event_type == 3 and str(response.link_fail_event) != '':
in_chan_id = response.incoming_channel_id
out_chan_id = response.outgoing_channel_id
in_chan = Channels.objects.filter(chan_id=in_chan_id)[0] if Channels.objects.filter(chan_id=in_chan_id).exists() else None
out_chan = Channels.objects.filter(chan_id=out_chan_id)[0] if Channels.objects.filter(chan_id=out_chan_id).exists() else None
in_chan_alias = in_chan.alias if in_chan is not None else None
out_chan_alias = out_chan.alias if out_chan is not None else None
out_chan_liq = out_chan.local_balance if out_chan is not None else None
out_chan_pending = out_chan.pending_outbound if out_chan is not None else None
amount = int(response.link_fail_event.info.outgoing_amt_msat/1000)
wire_failure = response.link_fail_event.wire_failure
failure_detail = response.link_fail_event.failure_detail
missed_fee = 0 if out_chan == None else round(((amount/1000000) * out_chan.local_fee_rate) + (out_chan.local_base_fee/1000), 3)
FailedHTLCs(amount=amount, chan_id_in=in_chan_id, chan_id_out=out_chan_id, chan_in_alias=in_chan_alias, chan_out_alias=out_chan_alias, chan_out_liq=out_chan_liq, chan_out_pending=out_chan_pending, wire_failure=wire_failure, failure_detail=failure_detail, missed_fee=missed_fee).save()
except Exception as e:
print('Error while running failed HTLC stream: ' + str(e))
sleep(20)
if __name__ == '__main__':
main() |
the-stack_0_6851 | """
Copyright (c) 2017 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from __future__ import print_function
import struct
import traceback
import os
from binaryninja.architecture import Architecture
from binaryninja.lowlevelil import LowLevelILLabel, LLIL_TEMP
from binaryninja.function import RegisterInfo, InstructionInfo, InstructionTextToken
from binaryninja.binaryview import BinaryView
from binaryninja.plugin import PluginCommand
from binaryninja.interaction import AddressField, ChoiceField, get_form_input
from binaryninja.types import Symbol
from binaryninja.log import log_error
from binaryninja.enums import (Endianness, BranchType, InstructionTextTokenType,
LowLevelILOperation, LowLevelILFlagCondition, FlagRole, SegmentFlag,
ImplicitRegisterExtend, SymbolType)
# Shift syles
SHIFT_SYLE_ARITHMETIC = 0,
SHIFT_SYLE_LOGICAL = 1,
SHIFT_SYLE_ROTATE_WITH_EXTEND = 2,
SHIFT_SYLE_ROTATE = 3,
ShiftStyle = [
'as', # SHIFT_SYLE_ARITHMETIC
'ls', # SHIFT_SYLE_LOGICAL
'rox', # SHIFT_SYLE_ROTATE_WITH_EXTEND
'ro' # SHIFT_SYLE_ROTATE
]
# Condition codes
CONDITION_TRUE = 0
CONDITION_FALSE = 1
CONDITION_HIGH = 2
CONDITION_LESS_OR_SAME = 3
CONDITION_CARRY_CLEAR = 4
CONDITION_CARRY_SET = 5
CONDITION_NOT_EQUAL = 6
CONDITION_EQUAL = 7
CONDITION_OVERFLOW_CLEAR = 8
CONDITION_OVERFLOW_SET = 9
CONDITION_PLUS = 10
CONDITION_MINUS = 11
CONDITION_GREATER_OR_EQUAL = 12
CONDITION_LESS_THAN = 13
CONDITION_GREATER_THAN = 14
CONDITION_LESS_OR_EQUAL = 15
Condition = [
't', # CONDITION_TRUE
'f', # CONDITION_FALSE
'hi', # CONDITION_HIGH
'ls', # CONDITION_LESS_OR_SAME
'cc', # CONDITION_CARRY_CLEAR
'cs', # CONDITION_CARRY_SET
'ne', # CONDITION_NOT_EQUAL
'eq', # CONDITION_EQUAL
'vc', # CONDITION_OVERFLOW_CLEAR
'vs', # CONDITION_OVERFLOW_SET
'pl', # CONDITION_PLUS
'mi', # CONDITION_MINUS
'ge', # CONDITION_GREATER_OR_EQUAL
'lt', # CONDITION_LESS_THAN
'gt', # CONDITION_GREATER_THAN
'le' # CONDITION_LESS_OR_EQUAL
]
# Registers
REGISTER_D0 = 0
REGISTER_D1 = 1
REGISTER_D2 = 2
REGISTER_D3 = 3
REGISTER_D4 = 4
REGISTER_D5 = 5
REGISTER_D6 = 6
REGISTER_D7 = 7
REGISTER_A0 = 8
REGISTER_A1 = 9
REGISTER_A2 = 10
REGISTER_A3 = 11
REGISTER_A4 = 12
REGISTER_A5 = 13
REGISTER_A6 = 14
REGISTER_A7 = 15
Registers = [
'd0', # REGISTER_D0
'd1', # REGISTER_D1
'd2', # REGISTER_D2
'd3', # REGISTER_D3
'd4', # REGISTER_D4
'd5', # REGISTER_D5
'd6', # REGISTER_D6
'd7', # REGISTER_D7
'a0', # REGISTER_A0
'a1', # REGISTER_A1
'a2', # REGISTER_A2
'a3', # REGISTER_A3
'a4', # REGISTER_A4
'a5', # REGISTER_A5
'a6', # REGISTER_A6
'sp' # REGISTER_A7
]
# Sizes
SIZE_BYTE = 0
SIZE_WORD = 1
SIZE_LONG = 2
SizeSuffix = [
'.b', # SIZE_BYTE
'.w', # SIZE_WORD
'.l', # SIZE_LONG
]
# Operands
class OpRegisterDirect:
def __init__(self, size, reg):
self.size = size
self.reg = reg
def __repr__(self):
return "OpRegisterDirect(%d, %s)" % (self.size, self.reg)
def format(self, addr):
# a0, d0
return [
InstructionTextToken(InstructionTextTokenType.RegisterToken, self.reg)
]
def get_pre_il(self, il):
return None
def get_post_il(self, il):
return None
def get_address_il(self, il):
return None
def get_source_il(self, il):
if self.reg == 'ccr':
c = il.flag_bit(1, 'c', 0)
v = il.flag_bit(1, 'v', 1)
z = il.flag_bit(1, 'z', 2)
n = il.flag_bit(1, 'n', 3)
x = il.flag_bit(1, 'x', 4)
return il.or_expr(1, il.or_expr(1, il.or_expr(1, il.or_expr(1, c, v), z), n), x)
else:
return il.reg(1 << self.size, self.reg)
def get_dest_il(self, il, value, flags=0):
if self.reg == 'ccr':
return None
else:
# return il.set_reg(1 << self.size, self.reg, value)
# if self.size == SIZE_BYTE:
# if self.reg[0] == 'a' or self.reg == 'sp':
# return None
# else:
# return il.set_reg(1, self.reg+'.b', value, flags)
# elif self.size == SIZE_WORD:
# return il.set_reg(2, self.reg+'.w', value, flags)
# else:
# return il.set_reg(4, self.reg, value, flags)
if self.size == SIZE_BYTE:
if self.reg[0] == 'a' or self.reg == 'sp':
return None
else:
return il.set_reg(4, self.reg, il.or_expr(4, il.and_expr(4, il.const(4, 0xffffff00), il.reg(4, self.reg)), il.and_expr(4, il.const(4, 0xff), value)), flags)
elif self.size == SIZE_WORD:
if self.reg[0] == 'a' or self.reg == 'sp':
return il.set_reg(4, self.reg, il.sign_extend(4, value), flags)
else:
return il.set_reg(4, self.reg, il.or_expr(4, il.and_expr(4, il.const(4, 0xffff0000), il.reg(4, self.reg)), il.and_expr(4, il.const(4, 0xffff), value)), flags)
else:
return il.set_reg(4, self.reg, value, flags)
class OpRegisterDirectPair:
def __init__(self, size, reg1, reg2):
self.size = size
self.reg1 = reg1
self.reg2 = reg2
def __repr__(self):
return "OpRegisterDirectPair(%d, %s, %s)" % (self.size, self.reg1, self.reg2)
def format(self, addr):
# d0:d1
return [
InstructionTextToken(InstructionTextTokenType.RegisterToken, self.reg1),
InstructionTextToken(InstructionTextTokenType.OperandSeparatorToken, ":"),
InstructionTextToken(InstructionTextTokenType.RegisterToken, self.reg2)
]
def get_pre_il(self, il):
return None
def get_post_il(self, il):
return None
def get_address_il(self, il):
return None
def get_source_il(self, il):
return (il.reg(1 << self.size, self.reg1), il.reg(1 << self.size, self.reg2))
def get_dest_il(self, il, values, flags=0):
return (il.set_reg(1 << self.size, self.reg1, values[0], flags), il.set_reg(1 << self.size, self.reg2, values[1], flags))
class OpRegisterMovemList:
def __init__(self, size, regs):
self.size = size
self.regs = regs
def __repr__(self):
return "OpRegisterMovemList(%d, %s)" % (self.size, repr(self.regs))
def format(self, addr):
# d0-d7/a0/a2/a4-a7
if len(self.regs) == 0:
return []
tokens = [InstructionTextToken(InstructionTextTokenType.RegisterToken, self.regs[0])]
last = self.regs[0]
first = None
for reg in self.regs[1:]:
if Registers[Registers.index(last)+1] == reg and reg != 'a0':
if first is None:
first = last
last = reg
else:
if first is not None:
tokens.append(InstructionTextToken(InstructionTextTokenType.OperandSeparatorToken, "-"))
tokens.append(InstructionTextToken(InstructionTextTokenType.RegisterToken, last))
tokens.append(InstructionTextToken(InstructionTextTokenType.OperandSeparatorToken, "/"))
tokens.append(InstructionTextToken(InstructionTextTokenType.RegisterToken, reg))
first = None
last = reg
if first is not None:
tokens.append(InstructionTextToken(InstructionTextTokenType.OperandSeparatorToken, "-"))
tokens.append(InstructionTextToken(InstructionTextTokenType.RegisterToken, last))
return tokens
def get_pre_il(self, il):
return None
def get_post_il(self, il):
return None
def get_address_il(self, il):
return None
def get_source_il(self, il):
return [il.reg(1 << self.size, reg) for reg in self.regs]
def get_dest_il(self, il, values, flags=0):
return [il.set_reg(1 << self.size, reg, val, flags) for reg, val in zip(self.regs, values)]
class OpRegisterIndirect:
def __init__(self, size, reg):
self.size = size
self.reg = reg
def __repr__(self):
return "OpRegisterIndirect(%d, %s)" % (self.size, self.reg)
def format(self, addr):
# (a0)
return [
InstructionTextToken(InstructionTextTokenType.BeginMemoryOperandToken, "("),
InstructionTextToken(InstructionTextTokenType.RegisterToken, self.reg),
InstructionTextToken(InstructionTextTokenType.EndMemoryOperandToken, ")")
]
def get_pre_il(self, il):
return None
def get_post_il(self, il):
return None
def get_address_il(self, il):
return il.reg(4, self.reg)
def get_source_il(self, il):
return il.load(1 << self.size, self.get_address_il(il))
def get_dest_il(self, il, value, flags=0):
#return il.store(1 << self.size, self.get_address_il(il), value, flags)
return il.expr(LowLevelILOperation.LLIL_STORE, self.get_address_il(il).index, value.index, size=1 << self.size, flags=flags)
class OpRegisterIndirectPair:
def __init__(self, size, reg1, reg2):
self.size = size
self.reg1 = reg1
self.reg2 = reg2
def __repr__(self):
return "OpRegisterIndirectPair(%d, %s, %s)" % (self.size, self.reg1, self.reg2)
def format(self, addr):
# d0:d1
return [
InstructionTextToken(InstructionTextTokenType.BeginMemoryOperandToken, "("),
InstructionTextToken(InstructionTextTokenType.RegisterToken, self.reg1),
InstructionTextToken(InstructionTextTokenType.EndMemoryOperandToken, ")"),
InstructionTextToken(InstructionTextTokenType.OperandSeparatorToken, ":"),
InstructionTextToken(InstructionTextTokenType.BeginMemoryOperandToken, "("),
InstructionTextToken(InstructionTextTokenType.RegisterToken, self.reg2),
InstructionTextToken(InstructionTextTokenType.EndMemoryOperandToken, ")")
]
def get_pre_il(self, il):
return None
def get_post_il(self, il):
return None
def get_address_il(self, il):
return (il.reg(4, self.reg1), il.reg(4, self.reg2))
def get_source_il(self, il):
return (il.load(1 << self.size, il.reg(4, self.reg1)), il.load(1 << self.size, il.reg(4, self.reg2)))
def get_dest_il(self, il, values, flags=0):
#return (il.store(1 << self.size, il.reg(4, self.reg1), values[0], flags), il.store(1 << self.size, il.reg(4, self.reg2), values[1], flags))
return (il.store(1 << self.size, il.reg(4, self.reg1), values[0]), il.store(1 << self.size, il.reg(4, self.reg2), values[1]))
class OpRegisterIndirectPostincrement:
def __init__(self, size, reg):
self.size = size
self.reg = reg
def __repr__(self):
return "OpRegisterIndirectPostincrement(%d, %s)" % (self.size, self.reg)
def format(self, addr):
# (a0)+
return [
InstructionTextToken(InstructionTextTokenType.BeginMemoryOperandToken, "("),
InstructionTextToken(InstructionTextTokenType.RegisterToken, self.reg),
InstructionTextToken(InstructionTextTokenType.EndMemoryOperandToken, ")"),
InstructionTextToken(InstructionTextTokenType.TextToken, "+")
]
def get_pre_il(self, il):
return None
def get_post_il(self, il):
return il.set_reg(4,
self.reg,
il.add(4,
il.reg(4, self.reg),
il.const(4, 1 << self.size)
)
)
def get_address_il(self, il):
return il.reg(4, self.reg)
def get_source_il(self, il):
return il.load(1 << self.size, self.get_address_il(il))
def get_dest_il(self, il, value, flags=0):
#return il.store(1 << self.size, self.get_address_il(il), value, flags)
return il.expr(LowLevelILOperation.LLIL_STORE, self.get_address_il(il).index, value.index, size=1 << self.size, flags=flags)
class OpRegisterIndirectPredecrement:
def __init__(self, size, reg):
self.size = size
self.reg = reg
def __repr__(self):
return "OpRegisterIndirectPredecrement(%d, %s)" % (self.size, self.reg)
def format(self, addr):
# -(a0)
return [
InstructionTextToken(InstructionTextTokenType.TextToken, "-"),
InstructionTextToken(InstructionTextTokenType.BeginMemoryOperandToken, "("),
InstructionTextToken(InstructionTextTokenType.RegisterToken, self.reg),
InstructionTextToken(InstructionTextTokenType.EndMemoryOperandToken, ")")
]
def get_pre_il(self, il):
return il.set_reg(4,
self.reg,
il.sub(4,
il.reg(4, self.reg),
il.const(4, 1 << self.size)
)
)
def get_post_il(self, il):
return None
def get_address_il(self, il):
return il.reg(4, self.reg)
def get_source_il(self, il):
return il.load(1 << self.size, self.get_address_il(il))
def get_dest_il(self, il, value, flags=0):
#return il.store(1 << self.size, self.get_address_il(il), value, flags)
return il.expr(LowLevelILOperation.LLIL_STORE, self.get_address_il(il).index, value.index, size=1 << self.size, flags=flags)
class OpRegisterIndirectDisplacement:
def __init__(self, size, reg, offset):
self.size = size
self.reg = reg
self.offset = offset
def __repr__(self):
return "OpRegisterIndirectDisplacement(%d, %s, 0x%x)" % (self.size, self.reg, self.offset)
def format(self, addr):
if self.reg == 'pc':
return [
InstructionTextToken(InstructionTextTokenType.BeginMemoryOperandToken, "("),
InstructionTextToken(InstructionTextTokenType.PossibleAddressToken, "${:08x}".format(addr+2+self.offset), addr+2+self.offset, 4),
InstructionTextToken(InstructionTextTokenType.EndMemoryOperandToken, ")")
]
else:
# $1234(a0)
return [
InstructionTextToken(InstructionTextTokenType.IntegerToken, "${:04x}".format(self.offset), self.offset, 2),
InstructionTextToken(InstructionTextTokenType.BeginMemoryOperandToken, "("),
InstructionTextToken(InstructionTextTokenType.RegisterToken, self.reg),
InstructionTextToken(InstructionTextTokenType.EndMemoryOperandToken, ")")
]
def get_pre_il(self, il):
return None
def get_post_il(self, il):
return None
def get_address_il(self, il):
if self.reg == 'pc':
return il.const(4, il.current_address+2+self.offset)
else:
return il.add(4,
il.reg(4, self.reg),
il.const(2, self.offset)
)
def get_source_il(self, il):
return il.load(1 << self.size, self.get_address_il(il))
def get_dest_il(self, il, value, flags=0):
if self.reg == 'pc':
return None
else:
#return il.store(1 << self.size, self.get_address_il(il), value, flags)
return il.expr(LowLevelILOperation.LLIL_STORE, self.get_address_il(il).index, value.index, size=1 << self.size, flags=flags)
class OpRegisterIndirectIndex:
def __init__(self, size, reg, offset, ireg, ireg_long, scale):
self.size = size
self.reg = reg
self.offset = offset
self.ireg = ireg
self.ireg_long = ireg_long
self.scale = scale
def __repr__(self):
return "OpRegisterIndirectIndex(%d, %s, 0x%x, %s, %d, %d)" % (self.size, self.reg, self.offset, self.ireg, self.ireg_long, self.scale)
def format(self, addr):
# $1234(a0,a1.l*4)
tokens = []
if self.offset != 0:
tokens.append(InstructionTextToken(InstructionTextTokenType.IntegerToken, "${:x}".format(self.offset), self.offset))
tokens.append(InstructionTextToken(InstructionTextTokenType.BeginMemoryOperandToken, "("))
tokens.append(InstructionTextToken(InstructionTextTokenType.RegisterToken, self.reg))
tokens.append(InstructionTextToken(InstructionTextTokenType.OperandSeparatorToken, ","))
tokens.append(InstructionTextToken(InstructionTextTokenType.RegisterToken, self.ireg))
tokens.append(InstructionTextToken(InstructionTextTokenType.TextToken, "."))
tokens.append(InstructionTextToken(InstructionTextTokenType.TextToken, "l" if self.ireg_long else 'w'))
if self.scale != 1:
tokens.append(InstructionTextToken(InstructionTextTokenType.OperandSeparatorToken, "*"))
tokens.append(InstructionTextToken(InstructionTextTokenType.IntegerToken, "{}".format(self.scale), self.scale))
tokens.append(InstructionTextToken(InstructionTextTokenType.EndMemoryOperandToken, ")"))
return tokens
def get_pre_il(self, il):
return None
def get_post_il(self, il):
return None
def get_address_il(self, il):
return il.add(4,
il.add(4,
il.const(4, il.current_address+2) if self.reg == 'pc' else il.reg(4, self.reg),
il.const(4, self.offset)
),
il.mult(4,
il.reg(4 if self.ireg_long else 2, self.ireg),
il.const(1, self.scale)
)
)
def get_source_il(self, il):
return il.load(1 << self.size, self.get_address_il(il))
def get_dest_il(self, il, value, flags=0):
if self.reg == 'pc':
return None
else:
#return il.store(1 << self.size, self.get_address_il(il), value, flags)
return il.expr(LowLevelILOperation.LLIL_STORE, self.get_address_il(il).index, value.index, size=1 << self.size, flags=flags)
class OpMemoryIndirect:
def __init__(self, size, reg, offset, outer_displacement):
self.size = size
self.reg = reg
self.offset = offset
self.outer_displacement = outer_displacement
def __repr__(self):
return "OpRegisterIndirectIndex(%d, %s, %d, %d)" % (self.size, self.reg, self.offset, self.outer_displacement)
def format(self, addr):
# ([$1234,a0],$1234)
tokens = []
tokens.append(InstructionTextToken(InstructionTextTokenType.BeginMemoryOperandToken, "("))
tokens.append(InstructionTextToken(InstructionTextTokenType.BeginMemoryOperandToken, "["))
if self.offset != 0:
tokens.append(InstructionTextToken(InstructionTextTokenType.IntegerToken, "${:x}".format(self.offset), self.offset))
tokens.append(InstructionTextToken(InstructionTextTokenType.OperandSeparatorToken, ","))
tokens.append(InstructionTextToken(InstructionTextTokenType.RegisterToken, self.reg))
tokens.append(InstructionTextToken(InstructionTextTokenType.EndMemoryOperandToken, "]"))
if self.outer_displacement != 0:
tokens.append(InstructionTextToken(InstructionTextTokenType.OperandSeparatorToken, ","))
tokens.append(InstructionTextToken(InstructionTextTokenType.IntegerToken, "${:x}".format(self.outer_displacement), self.outer_displacement))
tokens.append(InstructionTextToken(InstructionTextTokenType.EndMemoryOperandToken, ")"))
return tokens
def get_pre_il(self, il):
return None
def get_post_il(self, il):
return None
def get_address_il(self, il):
return il.add(4,
il.load(4,
il.add(4,
il.const(4, il.current_address+2) if self.reg == 'pc' else il.reg(4, self.reg),
il.const(4, self.offset)
),
),
il.const(4, self.outer_displacement)
)
def get_source_il(self, il):
return il.load(1 << self.size, self.get_address_il(il))
def get_dest_il(self, il, value, flags=0):
if self.reg == 'pc':
return None
else:
#return il.store(1 << self.size, self.get_address_il(il), value, flags)
return il.expr(LowLevelILOperation.LLIL_STORE, self.get_address_il(il).index, value.index, size=1 << self.size, flags=flags)
class OpMemoryIndirectPostindex:
def __init__(self, size, reg, offset, ireg, ireg_long, scale, outer_displacement):
self.size = size
self.reg = reg
self.offset = offset
self.ireg = ireg
self.ireg_long = ireg_long
self.scale = scale
self.outer_displacement = outer_displacement
def __repr__(self):
return "OpRegisterIndirectIndex(%d, %s, 0x%x, %s, %d, %d, 0x%x)" % (self.size, self.reg, self.offset, self.ireg, self.ireg_long, self.scale, self.outer_displacement)
def format(self, addr):
# ([$1234,a0],a1.l*4,$1234)
tokens = []
tokens.append(InstructionTextToken(InstructionTextTokenType.BeginMemoryOperandToken, "("))
tokens.append(InstructionTextToken(InstructionTextTokenType.BeginMemoryOperandToken, "["))
if self.offset != 0:
tokens.append(InstructionTextToken(InstructionTextTokenType.IntegerToken, "${:x}".format(self.offset), self.offset))
tokens.append(InstructionTextToken(InstructionTextTokenType.OperandSeparatorToken, ","))
tokens.append(InstructionTextToken(InstructionTextTokenType.RegisterToken, self.reg))
tokens.append(InstructionTextToken(InstructionTextTokenType.EndMemoryOperandToken, "]"))
tokens.append(InstructionTextToken(InstructionTextTokenType.OperandSeparatorToken, ","))
tokens.append(InstructionTextToken(InstructionTextTokenType.RegisterToken, self.ireg))
tokens.append(InstructionTextToken(InstructionTextTokenType.TextToken, "."))
tokens.append(InstructionTextToken(InstructionTextTokenType.TextToken, "l" if self.ireg_long else 'w'))
if self.scale != 1:
tokens.append(InstructionTextToken(InstructionTextTokenType.OperandSeparatorToken, "*"))
tokens.append(InstructionTextToken(InstructionTextTokenType.IntegerToken, "{}".format(self.scale), self.scale))
if self.outer_displacement != 0:
tokens.append(InstructionTextToken(InstructionTextTokenType.OperandSeparatorToken, ","))
tokens.append(InstructionTextToken(InstructionTextTokenType.IntegerToken, "${:x}".format(self.outer_displacement), self.outer_displacement))
tokens.append(InstructionTextToken(InstructionTextTokenType.EndMemoryOperandToken, ")"))
return tokens
def get_pre_il(self, il):
return None
def get_post_il(self, il):
return None
def get_address_il(self, il):
return il.add(4,
il.load(4,
il.add(4,
il.const(4, il.current_address+2) if self.reg == 'pc' else il.reg(4, self.reg),
il.const(4, self.offset)
)
),
il.add(4,
il.mult(4,
il.reg(4 if self.ireg_long else 2, self.ireg),
il.const(1, self.scale)
),
il.const(4, self.outer_displacement)
)
)
def get_source_il(self, il):
return il.load(1 << self.size, self.get_address_il(il))
def get_dest_il(self, il, value, flags=0):
if self.reg == 'pc':
return None
else:
#return il.store(1 << self.size, self.get_address_il(il), value, flags)
return il.expr(LowLevelILOperation.LLIL_STORE, self.get_address_il(il).index, value.index, size=1 << self.size, flags=flags)
class OpMemoryIndirectPreindex:
def __init__(self, size, reg, offset, ireg, ireg_long, scale, outer_displacement):
self.size = size
self.reg = reg
self.offset = offset
self.ireg = ireg
self.ireg_long = ireg_long
self.scale = scale
self.outer_displacement = outer_displacement
def __repr__(self):
return "OpRegisterIndirectIndex(%d, %s, 0x%x, %s, %d, %d, 0x%x)" % (self.size, self.reg, self.offset, self.ireg, self.ireg_long, self.scale, self.outer_displacement)
def format(self, addr):
# ([$1234,a0,a1.l*4],$1234)
tokens = []
tokens.append(InstructionTextToken(InstructionTextTokenType.BeginMemoryOperandToken, "("))
tokens.append(InstructionTextToken(InstructionTextTokenType.BeginMemoryOperandToken, "["))
if self.offset != 0:
tokens.append(InstructionTextToken(InstructionTextTokenType.IntegerToken, "${:x}".format(self.offset), self.offset))
tokens.append(InstructionTextToken(InstructionTextTokenType.OperandSeparatorToken, ","))
tokens.append(InstructionTextToken(InstructionTextTokenType.RegisterToken, self.reg))
tokens.append(InstructionTextToken(InstructionTextTokenType.OperandSeparatorToken, ","))
tokens.append(InstructionTextToken(InstructionTextTokenType.RegisterToken, self.ireg))
tokens.append(InstructionTextToken(InstructionTextTokenType.TextToken, "."))
tokens.append(InstructionTextToken(InstructionTextTokenType.TextToken, "l" if self.ireg_long else 'w'))
if self.scale != 1:
tokens.append(InstructionTextToken(InstructionTextTokenType.OperandSeparatorToken, "*"))
tokens.append(InstructionTextToken(InstructionTextTokenType.IntegerToken, "{}".format(self.scale), self.scale))
tokens.append(InstructionTextToken(InstructionTextTokenType.EndMemoryOperandToken, "]"))
if self.outer_displacement != 0:
tokens.append(InstructionTextToken(InstructionTextTokenType.OperandSeparatorToken, ","))
tokens.append(InstructionTextToken(InstructionTextTokenType.IntegerToken, "${:x}".format(self.outer_displacement), self.outer_displacement))
tokens.append(InstructionTextToken(InstructionTextTokenType.EndMemoryOperandToken, ")"))
return tokens
def get_pre_il(self, il):
return None
def get_post_il(self, il):
return None
def get_address_il(self, il):
return il.add(4,
il.load(4,
il.add(4,
il.add(4,
il.const(4, il.current_address+2) if self.reg == 'pc' else il.reg(4, self.reg),
il.const(4, self.offset)
),
il.mult(4,
il.reg(4 if self.ireg_long else 2, self.ireg),
il.const(1, self.scale)
)
)
),
il.const(4, self.outer_displacement)
)
def get_source_il(self, il):
return il.load(1 << self.size, self.get_address_il(il))
def get_dest_il(self, il, value, flags=0):
if self.reg == 'pc':
return None
else:
#return il.store(1 << self.size, self.get_address_il(il), value, flags)
return il.expr(LowLevelILOperation.LLIL_STORE, self.get_address_il(il).index, value.index, size=1 << self.size, flags=flags)
class OpAbsolute:
def __init__(self, size, address, address_size):
self.size = size
self.address = address
self.address_size = address_size
def __repr__(self):
return "OpAbsolute(%d, 0x%x, %d)" % (self.size, self.address, self.address_size)
def format(self, addr):
# ($1234).w
return [
InstructionTextToken(InstructionTextTokenType.BeginMemoryOperandToken, "("),
InstructionTextToken(InstructionTextTokenType.PossibleAddressToken, "${:0{}x}".format(self.address, 1 << self.address_size), self.address, 1 << self.address_size),
InstructionTextToken(InstructionTextTokenType.EndMemoryOperandToken, ")"+SizeSuffix[self.address_size])
]
def get_pre_il(self, il):
return None
def get_post_il(self, il):
return None
def get_address_il(self, il):
return il.sign_extend(4,
il.const(1 << self.address_size, self.address)
)
def get_source_il(self, il):
return il.load(1 << self.size, self.get_address_il(il))
def get_dest_il(self, il, value, flags=0):
#return il.store(1 << self.size, self.get_address_il(il), value, flags)
return il.expr(LowLevelILOperation.LLIL_STORE, self.get_address_il(il).index, value.index, size=1 << self.size, flags=flags)
class OpImmediate:
def __init__(self, size, value):
self.size = size
self.value = value
def __repr__(self):
return "OpImmediate(%d, 0x%x)" % (self.size, self.value)
def format(self, addr):
# #$1234
return [
InstructionTextToken(InstructionTextTokenType.TextToken, "#"),
#InstructionTextToken(InstructionTextTokenType.PossibleAddressToken, "${:0{}x}".format(self.value, 1 << self.size), self.value, 1 << self.size)
InstructionTextToken(InstructionTextTokenType.IntegerToken, "${:0{}x}".format(self.value, 1 << self.size), self.value, 1 << self.size)
]
def get_pre_il(self, il):
return None
def get_post_il(self, il):
return None
def get_address_il(self, il):
return None
def get_source_il(self, il):
return il.const(1 << self.size, self.value)
def get_dest_il(self, il, value, flags=0):
return None
# condition mapping to LLIL flag conditions
ConditionMapping = {
# 'hi': LowLevelILFlagCondition.
# 'ls': LowLevelILFlagCondition.
# 'cc': LowLevelILFlagCondition.
# 'cs': LowLevelILFlagCondition.
'ne': LowLevelILFlagCondition.LLFC_NE,
'eq': LowLevelILFlagCondition.LLFC_E,
'vc': LowLevelILFlagCondition.LLFC_NO,
'vs': LowLevelILFlagCondition.LLFC_O,
'pl': LowLevelILFlagCondition.LLFC_POS,
'mi': LowLevelILFlagCondition.LLFC_NEG,
'ge': LowLevelILFlagCondition.LLFC_UGE,
'lt': LowLevelILFlagCondition.LLFC_ULT,
'gt': LowLevelILFlagCondition.LLFC_UGT,
'le': LowLevelILFlagCondition.LLFC_ULE,
}
class M68000(Architecture):
name = "M68000"
address_size = 4
default_int_size = 4
max_instr_length = 22
endianness = Endianness.BigEndian
regs = {
'd0': RegisterInfo('d0', 4),
'd1': RegisterInfo('d1', 4),
'd2': RegisterInfo('d2', 4),
'd3': RegisterInfo('d3', 4),
'd4': RegisterInfo('d4', 4),
'd5': RegisterInfo('d5', 4),
'd6': RegisterInfo('d6', 4),
'd7': RegisterInfo('d7', 4),
'a0': RegisterInfo('a0', 4, extend=ImplicitRegisterExtend.SignExtendToFullWidth),
'a1': RegisterInfo('a1', 4, extend=ImplicitRegisterExtend.SignExtendToFullWidth),
'a2': RegisterInfo('a2', 4, extend=ImplicitRegisterExtend.SignExtendToFullWidth),
'a3': RegisterInfo('a3', 4, extend=ImplicitRegisterExtend.SignExtendToFullWidth),
'a4': RegisterInfo('a4', 4, extend=ImplicitRegisterExtend.SignExtendToFullWidth),
'a5': RegisterInfo('a5', 4, extend=ImplicitRegisterExtend.SignExtendToFullWidth),
'a6': RegisterInfo('a6', 4, extend=ImplicitRegisterExtend.SignExtendToFullWidth),
'sp': RegisterInfo('sp', 4, extend=ImplicitRegisterExtend.SignExtendToFullWidth),
'sr': RegisterInfo('sr', 2),
'ccr': RegisterInfo('sr', 1),
# control registers
# MC68010/MC68020/MC68030/MC68040/CPU32
'sfc': RegisterInfo('sfc', 4),
'dfc': RegisterInfo('dfc', 4),
'usp': RegisterInfo('usp', 4),
'vbr': RegisterInfo('vbr', 4),
# MC68020/MC68030/MC68040
'cacr': RegisterInfo('cacr', 4),
'caar': RegisterInfo('caar', 4),
'msp': RegisterInfo('msp', 4),
'isp': RegisterInfo('isp', 4),
# MC68040/MC68LC040
'tc': RegisterInfo('tc', 4),
'itt0': RegisterInfo('itt0', 4),
'itt1': RegisterInfo('itt1', 4),
'dtt0': RegisterInfo('dtt0', 4),
'dtt1': RegisterInfo('dtt1', 4),
'mmusr': RegisterInfo('mmusr', 4),
'urp': RegisterInfo('urp', 4),
'srp': RegisterInfo('srp', 4),
# MC68EC040
'iacr0': RegisterInfo('iacr0', 4),
'iacr1': RegisterInfo('iacr1', 4),
'dacr0': RegisterInfo('dacr0', 4),
'dacr1': RegisterInfo('dacr1', 4),
}
stack_pointer = 'sp'
flags = ['x', 'n', 'z', 'v', 'c']
flag_write_types = ['', '*', 'nzvc']
flags_written_by_flag_write_types = {
'*': ['x', 'n', 'z', 'v', 'c'],
'nzvc': ['n', 'z', 'v', 'c'],
}
flag_roles = {
'x': FlagRole.SpecialFlagRole,
'n': FlagRole.NegativeSignFlagRole,
'z': FlagRole.ZeroFlagRole,
'v': FlagRole.OverflowFlagRole,
'c': FlagRole.CarryFlagRole,
}
flags_required_for_flag_condition = {
# LowLevelILFlagCondition. ['c', 'z'], # hi
# LowLevelILFlagCondition. ['c', 'z'], # ls
# LowLevelILFlagCondition. ['c'], # cc
# LowLevelILFlagCondition. ['c'], # cs
LowLevelILFlagCondition.LLFC_NE: ['z'], # ne
LowLevelILFlagCondition.LLFC_E: ['z'], # eq
LowLevelILFlagCondition.LLFC_NO: ['v'], # vc
LowLevelILFlagCondition.LLFC_O: ['v'], # vs
LowLevelILFlagCondition.LLFC_POS: ['n'], # pl
LowLevelILFlagCondition.LLFC_NEG: ['n'], # mi
LowLevelILFlagCondition.LLFC_UGE: ['n', 'v'], # ge
LowLevelILFlagCondition.LLFC_ULT: ['n', 'v'], # lt
LowLevelILFlagCondition.LLFC_UGT: ['n', 'v', 'z'], # gt
LowLevelILFlagCondition.LLFC_ULE: ['n', 'v', 'z'], # le
}
control_registers = {
}
memory_indirect = False
movem_store_decremented = False
def decode_effective_address(self, mode, register, data, size=None):
mode &= 0x07
register &= 0x07
reg = None
if mode == 0:
# data register direct
return (OpRegisterDirect(size, Registers[register]), 0)
elif mode == 1:
# address register direct
return (OpRegisterDirect(size, Registers[register+8]), 0)
elif mode == 2:
# address register indirect
return (OpRegisterIndirect(size, Registers[register+8]), 0)
elif mode == 3:
# address register indirect with postincrement
return (OpRegisterIndirectPostincrement(size, Registers[register+8]), 0)
elif mode == 4:
# address register indirect with predecrement
return (OpRegisterIndirectPredecrement(size, Registers[register+8]), 0)
elif mode == 5:
# address register indirect with displacement
return (OpRegisterIndirectDisplacement(size, Registers[register+8], struct.unpack_from('>h', data, 0)[0]), 2)
elif mode == 6:
# extended addressing mode
reg = Registers[register+8]
elif mode == 7:
if register == 0:
# absolute short
val = struct.unpack_from('>H', data, 0)[0]
if val & 0x8000:
val |= 0xffff0000
return (OpAbsolute(size, val, 1), 2)
if register == 1:
# absolute long
return (OpAbsolute(size, struct.unpack_from('>L', data, 0)[0], 2), 4)
elif register == 2:
# program counter indirect with displacement
return (OpRegisterIndirectDisplacement(size, 'pc', struct.unpack_from('>h', data, 0)[0]), 2)
elif register == 3:
# extended addressing mode
reg = 'pc'
elif register == 4:
# immediate
if size == None:
# unspecified length
return (OpImmediate(size, None), None)
elif size == SIZE_BYTE:
# byte
return (OpImmediate(size, struct.unpack_from('>b', data, 1)[0]), 2)
elif size == 1:
# word
return (OpImmediate(size, struct.unpack_from('>h', data, 0)[0]), 2)
elif size == 2:
# long
return (OpImmediate(size, struct.unpack_from('>l', data, 0)[0]), 4)
if reg is not None:
extra = struct.unpack_from('>H', data, 0)[0]
# index register
xn = Registers[extra >> 12]
# index register size
index_size = (extra >> 11) & 1
# index register scale
scale = 1 << ((extra >> 9) & 3)
length = 2
if extra & 0x0100:
# full extension word
bd = 0
od = 0
# base displacement
if not (extra >> 7) & 1:
if (extra >> 4) & 3 == 2:
# word base displacement
bd = struct.unpack_from('>h', data, length)[0]
length += 2
elif (extra >> 4) & 3 == 3:
# long base displacement
bd = struct.unpack_from('>L', data, length)[0]
length += 4
# outer displacement
if extra & 3 == 2:
# word outer displacement
od = struct.unpack_from('>h', data, length)[0]
length += 2
elif extra & 3 == 3:
# long outer displacement
od = struct.unpack_from('>L', data, length)[0]
length += 4
# suppress index register
if extra & 7 == 0:
return (OpRegisterIndirectIndex(size, reg, bd, xn, index_size, scale), length)
elif (extra >> 6) & 1:
return (OpMemoryIndirect(size, reg, bd, od), length)
elif (extra >> 2) & 1:
return (OpMemoryIndirectPostindex(size, reg, bd, xn, index_size, scale, od), length)
else:
return (OpMemoryIndirectPreindex(size, reg, bd, xn, index_size, scale, od), length)
else:
# brief extension word
# 8 bit displacement
d8 = extra & 0xff
if d8 & 0x80:
d8 -= 256
return (OpRegisterIndirectIndex(size, reg, d8, xn, index_size, scale), length)
return (None, None)
def decode_instruction(self, data, addr):
error_value = (None, None, None, None, None, None)
if len(data) < 2:
return error_value
instruction = struct.unpack_from('>H', data)[0]
msb = instruction >> 8
operation_code = msb >> 4
#print((hex(addr), hex(instruction)))
instr = None
length = None
size = None
source = None
dest = None
third = None
if operation_code == 0x0:
# Bit manipulation/MOVEP/Immed late
if instruction & 0xf9c0 == 0x00c0:
# rtm, callm, chk2, cmp2
if instruction & 0xfff0 == 0x06c0:
instr = 'rtm'
dest = OpRegisterDirect(SIZE_LONG, Registers[instruction & 15])
length = 2
elif instruction & 0xffc0 == 0x06c0:
instr = 'callm'
source = OpImmediate(SIZE_BYTE, struct.unpack_from('>B', data, 3)[0])
dest, extra_dest = self.decode_effective_address(instruction >> 3, instruction, data[4:], SIZE_BYTE) # check
length = 4+extra_dest
else:
size = (instruction >> 9) & 3
extra = struct.unpack_from('>H', data, 2)[0]
if extra & 0x0800:
instr = 'chk2'
else:
instr = 'cmp2'
source, extra_source = self.decode_effective_address(instruction >> 3, instruction, data[4:], SIZE_BYTE) # check
dest = OpRegisterDirect(size, Registers[(instruction >> 12) & 15])
length = 4+extra_source
elif instruction & 0xffc0 in (0x0ac0, 0x0cc0, 0x0ec0):
if instruction & 0xf9ff == 0x08fc:
instr = 'cas2'
size = ((instruction >> 9) & 3) - 1
extra1 = struct.unpack_from('>H', data, 2)[0]
extra2 = struct.unpack_from('>H', data, 4)[0]
source = OpRegisterDirectPair(size, Registers[extra1 & 7], Registers[extra2 & 7])
dest = OpRegisterDirectPair(size, Registers[(extra1 >> 6) & 7], Registers[(extra2 >> 6) & 7])
third = OpRegisterIndirectPair(size, Registers[(extra1 >> 12) & 15], Registers[(extra2 >> 12) & 15])
length = 6
else:
instr = 'cas'
size = ((instruction >> 9) & 3) - 1
extra = struct.unpack_from('>H', data, 2)[0]
source = OpRegisterDirect(size, Registers[extra & 7])
dest = OpRegisterDirect(size, Registers[(extra >> 6) & 7])
third, extra_third = self.decode_effective_address(instruction >> 3, instruction, data[4:], size)
length = 4+extra_third
elif msb in (0x00, 0x02, 0x04, 0x06, 0x0a, 0x0c):
# ORI, ANDI, SUBI, ADDI, EORI, CMPI
if msb == 0x00:
instr = 'ori'
elif msb == 0x02:
instr = 'andi'
elif msb == 0x04:
instr = 'subi'
elif msb == 0x06:
instr = 'addi'
elif msb == 0x0a:
instr = 'eori'
elif msb == 0x0c:
instr = 'cmpi'
size = (instruction >> 6) & 0x03
source, extra_source = self.decode_effective_address(7, 4, data[2:], size)
if instruction & 0x00ff == 0x003c:
dest = OpRegisterDirect(size, 'ccr')
extra_dest = 0
elif instruction & 0x00ff == 0x007c:
dest = OpRegisterDirect(size, 'sr')
extra_dest = 0
else:
dest, extra_dest = self.decode_effective_address(instruction >> 3, instruction, data[2+extra_source:], size)
if dest is None:
instr = None
else:
length = 2+extra_source+extra_dest
elif msb == 0x08:
# btst, bchg, bclr, bset with constant
if instruction & 0xffc0 == 0x0800:
instr = 'btst'
elif instruction & 0xffc0 == 0x0840:
instr = 'bchg'
elif instruction & 0xffc0 == 0x0880:
instr = 'bclr'
elif instruction & 0xffc0 == 0x08C0:
instr = 'bset'
source = OpImmediate(SIZE_BYTE, struct.unpack_from('>B', data, 3)[0])
dest, extra_dest = self.decode_effective_address(instruction >> 3, instruction, data[4:], SIZE_BYTE)
if isinstance(dest, OpRegisterDirect):
dest.size = SIZE_LONG
if dest is None:
instr = None
else:
length = 4+extra_dest
elif msb & 0xf1 == 0x01:
# movep, btst, bchg, bclr, bset with register
if instruction & 0xf138 == 0x0108:
instr = 'movep'
size = ((instruction >> 6) & 1) + 1
source, extra_source = self.decode_effective_address(5, instruction, data[2:], SIZE_BYTE) # check
dest = OpRegisterDirect(size, Registers[(instruction >> 9) & 7])
length = 2+extra_source
if instruction & 0x0080:
source, dest = dest, source
else:
if instruction & 0xf1c0 == 0x0100:
instr = 'btst'
elif instruction & 0xf1c0 == 0x0140:
instr = 'bchg'
elif instruction & 0xf1c0 == 0x0180:
instr = 'bclr'
elif instruction & 0xf1c0 == 0x01c0:
instr = 'bset'
source = OpRegisterDirect(SIZE_BYTE, Registers[(instruction >> 9) & 7]) # check
dest, extra_dest = self.decode_effective_address(instruction >> 3, instruction, data[2:], SIZE_BYTE)
if isinstance(dest, OpRegisterDirect):
dest.size = SIZE_LONG
if dest is None:
instr = None
else:
length = 2+extra_dest
elif instruction & 0xff00 == 0x0e00:
instr = 'moves'
extra = struct.unpack_from('>H', data, 2)[0]
size = (instruction >> 6) & 3
dest = OpRegisterDirect(size, Registers[extra >> 12])
source, extra_source = self.decode_effective_address(instruction >> 3, instruction, data[4:], size)
if extra & 0x0800:
source, dest = dest, source
length = 4+extra_source
elif operation_code in (0x1, 0x2, 0x3):
# move
instr = 'move'
if operation_code == 0x1:
# Move byte
size = SIZE_BYTE
elif operation_code == 0x2:
# Move long
size = SIZE_LONG
elif operation_code == 0x3:
# Move word
size = SIZE_WORD
source, extra_source = self.decode_effective_address(instruction >> 3, instruction, data[2:], size)
if source is None:
instr = None
else:
dest, extra_dest = self.decode_effective_address(instruction >> 6, instruction >> 9, data[2+extra_source:], size)
if dest is None or isinstance(dest, OpImmediate):
instr = None
else:
if isinstance(dest, OpRegisterDirect) and (dest.reg[0] == 'a' or dest.reg == 'sp'):
instr = 'movea'
length = 2+extra_source+extra_dest
elif operation_code == 0x4:
# Miscellaneous
extra_source = 0
extra_dest = 0
size = None
skip_ea = False
if instruction & 0xf100 == 0x4100:
# lea, extb, chk
if instruction & 0xf1c0 == 0x41c0:
if instruction & 0x0038:
instr = 'lea'
dest = OpRegisterDirect(SIZE_LONG, Registers[((instruction >> 9) & 7) + 8])
else:
instr = 'extb'
size = SIZE_LONG
else:
instr = 'chk'
if instruction & 0x0080:
size = SIZE_WORD
else:
size = SIZE_LONG
dest = OpRegisterDirect(size, Registers[(instruction >> 9) & 7])
elif msb == 0x40:
# move from sr, negx
if instruction & 0xffc0 == 0x40c0:
# move from sr
instr = 'move'
size = SIZE_WORD
source = OpRegisterDirect(size, 'sr')
else:
instr = 'negx'
size = instruction >> 6
elif msb == 0x42:
# move to ccr, clr
if instruction & 0xffc0 == 0x42c0:
# move to ccr
instr = 'move'
size = SIZE_WORD
source = OpRegisterDirect(size, 'ccr')
else:
instr = 'clr'
size = instruction >> 6
elif msb == 0x44:
# move from ccr, neg
if instruction & 0xffc0 == 0x44c0:
# move from ccr
instr = 'move'
size = SIZE_WORD
dest = OpRegisterDirect(size, 'ccr')
else:
instr = 'neg'
size = instruction >> 6
elif msb == 0x46:
# move from sr, not
if instruction & 0xffc0 == 0x46c0:
# move from sr
instr = 'move'
size = SIZE_WORD
dest = OpRegisterDirect(size, 'sr')
else:
instr = 'not'
size = instruction >> 6
elif msb in (0x48, 0x4c):
# link, nbcd, movem, ext, swap, bkpt, pea, divs, divu, divsl, divul, muls, mulu
if instruction & 0xfff8 == 0x4808:
instr = 'link'
size = SIZE_LONG
dest, extra_dest = self.decode_effective_address(7, 4, data[2:], size)
elif instruction & 0xffc0 == 0x4800:
instr = 'nbcd'
dest, extra_dest = self.decode_effective_address(instruction >> 3, instruction, data[2+extra_source:], SIZE_BYTE)
skip_ea = True
elif instruction & 0xfb80 == 0x4880:
if instruction & 0x0040:
size = SIZE_LONG
else:
size = SIZE_WORD
if instruction & 0x0038:
instr = 'movem'
extra_source = 2
extra = struct.unpack_from('>H', data, 2)[0]
reg_list = []
if instruction & 0x0038 == 0x0020:
for k in range(16):
if extra << k & 0x8000:
reg_list.append(Registers[k])
else:
for k in range(16):
if extra >> k & 0x0001:
reg_list.append(Registers[k])
source = OpRegisterMovemList(size, reg_list)
else:
instr = 'ext'
dest, extra_dest = self.decode_effective_address(instruction >> 3, instruction, data[2+extra_source:], size)
skip_ea = True
if instruction & 0x0400:
source, dest = dest, source
elif instruction & 0xfff8 == 0x4840:
instr = 'swap'
dest, extra_dest = self.decode_effective_address(instruction >> 3, instruction, data[2+extra_source:], SIZE_LONG)
skip_ea = True
elif instruction & 0xfff8 == 0x4848:
instr = 'bkpt'
source = OpImmediate(SIZE_BYTE, instruction & 7)
skip_ea = True
elif instruction & 0xffc0 == 0x4840:
instr = 'pea'
size = SIZE_LONG
elif msb == 0x4c:
size = SIZE_LONG
extra_dest = 2
extra = struct.unpack_from('>H', data, 2)[0]
source, extra_source = self.decode_effective_address(instruction >> 3, instruction, data[2+extra_dest:], size)
dh = Registers[extra & 7]
dl = Registers[(extra >> 12) & 7]
dest = OpRegisterDirect(size, dl)
if instruction & 0x0040:
if extra & 0x0800:
instr = 'divs'
else:
instr = 'divu'
if extra & 0x0400:
dest = OpRegisterDirectPair(size, dh, dl)
elif dh != dl:
dest = OpRegisterDirectPair(size, dh, dl)
instr += 'l'
else:
if extra & 0x0800:
instr = 'muls'
else:
instr = 'mulu'
if extra & 0x0400:
dest = OpRegisterDirectPair(size, dh, dl)
skip_ea = True
elif msb == 0x4a:
# bgnd, illegal, tas, tst
if instruction == 0x4afa:
instr = 'bgnd'
skip_ea = True
elif instruction == 0x4afc:
instr = 'illegal'
skip_ea = True
elif instruction & 0xffc0 == 0x4ac0:
instr = 'tas'
skip_ea = True
dest, extra_dest = self.decode_effective_address(instruction >> 3, instruction, data[2:], SIZE_BYTE)
else:
instr = 'tst'
size = instruction >> 6
elif msb == 0x4e:
# trap, link, unlk, move, reset, nop, stop, rte, rtd, rts, trapv, rtr, movec, jsr, jmp
if instruction & 0xfff0 == 0x4e40:
instr = 'trap'
length = 2
source = OpImmediate(SIZE_BYTE, instruction & 15)
skip_ea = True
elif instruction & 0xfff0 == 0x4e50:
if instruction & 0xfff8 == 0x4e50:
instr = 'link'
dest, extra_dest = self.decode_effective_address(7, 4, data[2:], 1)
else:
instr = 'unlk'
source = OpRegisterDirect(SIZE_LONG, Registers[(instruction & 7) + 8])
skip_ea = True
elif instruction & 0xfff0 == 0x4e60:
instr = 'move'
size = SIZE_LONG
source = OpRegisterDirect(SIZE_LONG, Registers[(instruction & 7) + 8])
dest = OpRegisterDirect(size, 'usp')
if instruction & 0x08:
source, dest = dest, source
skip_ea = True
elif instruction == 0x4e70:
instr = 'reset'
skip_ea = True
elif instruction == 0x4e71:
instr = 'nop'
skip_ea = True
elif instruction == 0x4e72:
instr = 'stop'
source = OpImmediate(SIZE_WORD, struct.unpack_from(">H", data, 2)[0])
extra_source = 2
skip_ea = True
elif instruction == 0x4e73:
instr = 'rte'
skip_ea = True
elif instruction == 0x4e74:
instr = 'rtd'
dest, extra_dest = self.decode_effective_address(7, 4, data[2:], SIZE_WORD)
skip_ea = True
elif instruction == 0x4e75:
instr = 'rts'
skip_ea = True
elif instruction == 0x4e76:
instr = 'trapv'
skip_ea = True
elif instruction == 0x4e77:
instr = 'rtr'
skip_ea = True
elif instruction & 0xfffe == 0x4e7A:
instr = 'movec'
size = SIZE_LONG
extended = struct.unpack_from('>H', data, 2)[0]
control_reg = self.control_registers.get(extended & 0x0fff, None)
reg = (extended >> 12) & 15
if control_reg is None:
instr = None
else:
source = OpRegisterDirect(size, control_reg)
dest = OpRegisterDirect(size, Registers[reg])
if instruction & 1:
source, dest = dest, source
extra_source = 2
skip_ea = True
elif instruction & 0xff80 == 0x4e80:
if instruction & 0xffc0 == 0x4e80:
instr = 'jsr'
else:
instr = 'jmp'
dest, extra_dest = self.decode_effective_address(instruction >> 3, instruction, data[2+extra_source:], SIZE_LONG)
skip_ea = True
if instr is not None:
if size is not None:
size &= 3
if skip_ea:
pass
elif dest is None:
dest, extra_dest = self.decode_effective_address(instruction >> 3, instruction, data[2+extra_source:], size)
else:
source, extra_source = self.decode_effective_address(instruction >> 3, instruction, data[2+extra_dest:], size)
if extra_source is None or extra_dest is None:
instr = None
else:
length = 2+extra_source+extra_dest
elif operation_code == 0x5:
# ADDQ/SUBQ/Scc/DBcc/TRAPcc
if instruction & 0xf0c0 == 0x50c0:
if instruction & 0xf0f8 == 0x50c8:
instr = 'db'+Condition[(instruction >> 8) & 0xf]
source = OpRegisterDirect(SIZE_WORD, Registers[instruction & 7])
dest = OpRegisterIndirectDisplacement(SIZE_LONG, 'pc', struct.unpack_from('>h', data, 2)[0])
length = 4
elif instruction & 0xf0ff in (0x50fa, 0x50fb, 0x50fc):
instr = 'trap'+Condition[(instruction >> 8) & 0xf]
if instruction & 7 == 2:
length = 4
source = OpImmediate(SIZE_WORD, struct.unpack_from('>H', data, 2)[0])
elif instruction & 7 == 3:
length = 6
source = OpImmediate(SIZE_LONG, struct.unpack_from('>L', data, 2)[0])
elif instruction & 7 == 4:
length = 2
else:
instr = 's'+Condition[(instruction >> 8) & 0xf]
size = SIZE_BYTE
dest, extra_dest = self.decode_effective_address(instruction >> 3, instruction, data[2:], size)
length = 2+extra_dest
else:
if instruction & 0x0100:
instr = 'subq'
else:
instr = 'addq'
val = (instruction >> 9) & 7
if val == 0:
val = 8
size = (instruction >> 6) & 3
source = OpImmediate(SIZE_BYTE, val)
dest, extra_dest = self.decode_effective_address(instruction >> 3, instruction, data[2:], size)
length = 2+extra_dest
elif operation_code == 0x6:
# Bcc/BSR/BRA
if msb == 0x60:
instr = 'bra'
elif msb == 0x61:
instr = 'bsr'
else:
instr = 'b'+Condition[(instruction >> 8) & 0xf]
val = instruction & 0xff
if val == 0:
val = struct.unpack_from('>h', data, 2)[0]
length = 4
elif val == 0xff:
val = struct.unpack_from('>L', data, 2)[0]
length = 6
else:
if val & 0x80:
val -= 256
length = 2
dest = OpRegisterIndirectDisplacement(SIZE_LONG, 'pc', val)
elif operation_code == 0x7:
# MOVEQ
instr = 'moveq'
size = SIZE_LONG
val = instruction & 0xff
if val & 0x80:
val |= 0xffffff00
source = OpImmediate(size, val)
dest = OpRegisterDirect(size, Registers[(instruction >> 9) & 7])
length = 2
elif operation_code == 0x8:
# OR/DIV/SBCD
if instruction & 0xf0c0 == 0x80c0:
if instruction & 0x0100:
instr = 'divs'
else:
instr = 'divu'
size = SIZE_WORD
dest = OpRegisterDirect(size, Registers[(instruction >> 9) & 7])
source, extra_source = self.decode_effective_address(instruction >> 3, instruction, data[2:], size)
length = 2+extra_source
elif instruction & 0xf1f0 == 0x8100:
instr = 'sbcd'
length = 2
dest = OpRegisterDirect(SIZE_BYTE, Registers[(instruction >> 9) & 7])
source = OpRegisterDirect(SIZE_BYTE, Registers[instruction & 7])
if instruction & 8:
dest = OpRegisterIndirectPredecrement(SIZE_BYTE, Registers[((instruction >> 9) & 7) + 8])
source = OpRegisterIndirectPredecrement(SIZE_BYTE, Registers[(instruction & 7) + 8])
elif instruction & 0xf130 == 0x8100:
if instruction & 0x0040:
instr = 'pack'
if instruction & 8:
dest = OpRegisterIndirectPredecrement(SIZE_BYTE, Registers[((instruction >> 9) & 7) + 8])
source = OpRegisterIndirectPredecrement(SIZE_WORD, Registers[(instruction & 7) + 8])
else:
dest = OpRegisterDirect(SIZE_BYTE, Registers[(instruction >> 9) & 7])
source = OpRegisterDirect(SIZE_WORD, Registers[instruction & 7])
else:
instr = 'unpk'
if instruction & 8:
dest = OpRegisterIndirectPredecrement(SIZE_WORD, Registers[((instruction >> 9) & 7) + 8])
source = OpRegisterIndirectPredecrement(SIZE_BYTE, Registers[(instruction & 7) + 8])
else:
dest = OpRegisterDirect(SIZE_WORD, Registers[(instruction >> 9) & 7])
source = OpRegisterDirect(SIZE_BYTE, Registers[instruction & 7])
length = 4
third = OpImmediate(SIZE_WORD, struct.unpack_from(">H", data, 2)[0])
else:
instr = 'or'
opmode = (instruction >> 6) & 0x7
size = (instruction >> 6) & 3
dest = OpRegisterDirect(size, Registers[(instruction >> 9) & 7])
source, extra_source = self.decode_effective_address(instruction >> 3, instruction, data[2:], size)
if opmode & 4:
source, dest = dest, source
length = 2+extra_source
elif operation_code == 0x9:
# SUB/SUBA/SUBX
instr = 'sub'
opmode = (instruction >> 6) & 0x7
if opmode in (0x03, 0x07):
instr = 'suba'
if opmode == 0x03:
size = SIZE_WORD
else:
size = SIZE_LONG
dest = OpRegisterDirect(SIZE_LONG, Registers[((instruction >> 9) & 7) + 8])
else:
size = (instruction >> 6) & 3
dest = OpRegisterDirect(size, Registers[(instruction >> 9) & 7])
source, extra_source = self.decode_effective_address(instruction >> 3, instruction, data[2:], size)
if instr == 'sub' and opmode & 4:
if isinstance(source, OpRegisterDirect):
instr = 'subx'
if source.reg[0] == 'a' or source.reg == 'sp':
source = OpRegisterIndirectPredecrement(size, source.reg)
dest = OpRegisterIndirectPredecrement(size, dest.reg)
else:
source, dest = dest, source
length = 2+extra_source
elif operation_code == 0xa:
# (unassigned, reserved)
pass
elif operation_code == 0xb:
# CMP/EOR
instr = 'cmp'
opmode = (instruction >> 6) & 0x7
if opmode in (0x03, 0x07):
instr = 'cmpa'
if opmode == 0x03:
size = SIZE_WORD
else:
size = SIZE_LONG
dest = OpRegisterDirect(size, Registers[((instruction >> 9) & 7) + 8])
else:
size = (instruction >> 6) & 3
dest = OpRegisterDirect(size, Registers[(instruction >> 9) & 7])
source, extra_source = self.decode_effective_address(instruction >> 3, instruction, data[2:], size)
if instr == 'cmp' and opmode & 4:
if instruction & 0x0038 == 0x0008:
instr = 'cmpm'
source = OpRegisterIndirectPostincrement(size, Registers[instruction & 15])
dest = OpRegisterIndirectPostincrement(size, Registers[((instruction >> 9) & 7) + 8])
else:
source, dest = dest, source
instr = 'eor'
length = 2+extra_source
elif operation_code == 0xc:
# AND/MUL/ABCD/EXG
if instruction & 0xf0c0 == 0xc0c0:
if instruction & 0x0100:
instr = 'muls'
else:
instr = 'mulu'
size = SIZE_WORD
source, extra_source = self.decode_effective_address(instruction >> 3, instruction, data[2:], size)
dest = OpRegisterDirect(size, Registers[(instruction >> 9) & 7])
length = 2+extra_source
elif instruction & 0xf130 == 0xc100:
if instruction & 0xf1f0 == 0xc100:
instr = 'abcd'
if instruction & 0x0008:
source = OpRegisterIndirectPredecrement(SIZE_BYTE, Registers[(instruction & 7) + 8])
dest = OpRegisterIndirectPredecrement(SIZE_BYTE, Registers[((instruction >> 9) & 7) + 8])
else:
source = OpRegisterDirect(SIZE_BYTE, Registers[instruction & 7])
dest = OpRegisterDirect(SIZE_BYTE, Registers[(instruction >> 9) & 7])
else:
instr = 'exg'
size = SIZE_LONG
source = OpRegisterDirect(size, Registers[(instruction >> 9) & 7])
dest = OpRegisterDirect(size, Registers[instruction & 7])
if instruction & 0xf1f8 == 0xc148:
source = OpRegisterIndirectPredecrement(size, Registers[((instruction >> 9) & 7) + 8])
dest = OpRegisterIndirectPredecrement(size, Registers[(instruction & 7) + 8])
if instruction & 0xf1f8 == 0xc188:
dest = OpRegisterIndirectPredecrement(size, Registers[(instruction & 7) + 8])
length = 2
else:
instr = 'and'
opmode = (instruction >> 6) & 0x7
size = (instruction >> 6) & 3
dest = OpRegisterDirect(size, Registers[(instruction >> 9) & 7])
source, extra_source = self.decode_effective_address(instruction >> 3, instruction, data[2:], size)
if opmode & 4:
source, dest = dest, source
length = 2+extra_source
elif operation_code == 0xd:
# ADD/ADDA/ADDX
instr = 'add'
opmode = (instruction >> 6) & 0x7
if opmode in (0x03, 0x07):
instr = 'adda'
if opmode == 0x03:
size = SIZE_WORD
else:
size = SIZE_LONG
dest = OpRegisterDirect(SIZE_LONG, Registers[((instruction >> 9) & 7) + 8])
else:
size = (instruction >> 6) & 3
dest = OpRegisterDirect(size, Registers[(instruction >> 9) & 7])
source, extra_source = self.decode_effective_address(instruction >> 3, instruction, data[2:], size)
if instr == 'add' and opmode & 4:
if isinstance(source, OpRegisterDirect):
instr = 'addx'
if source.reg[0] == 'a' or source.reg == 'sp':
source = OpRegisterIndirectPredecrement(size, source.reg)
dest = OpRegisterIndirectPredecrement(size, dest.reg)
else:
source, dest = dest, source
length = 2+extra_source
elif operation_code == 0xe:
# shift/rotate/bit field
if instruction & 0xF8C0 == 0xE0C0:
# shift/rotate
size = SIZE_WORD
direction = (instruction >> 8) & 1
style = (instruction >> 9) & 3
dest, extra_dest = self.decode_effective_address(instruction >> 3, instruction, data[2:], size)
instr = ShiftStyle[style]
if direction:
instr += 'l'
else:
instr += 'r'
length = 2+extra_dest
elif instruction & 0xF8C0 == 0xE8C0:
# bit field instructions
# TODO
pass
else:
# shift/rotate
size = (instruction >> 6) & 3
direction = (instruction >> 8) & 1
style = (instruction >> 3) & 3
if (instruction >> 5) & 1:
source = OpRegisterDirect(SIZE_LONG, Registers[(instruction >> 9) & 7])
else:
val = (instruction >> 9) & 7
if val == 0:
val = 8
source = OpImmediate(SIZE_BYTE, val)
dest = OpRegisterDirect(size, Registers[instruction & 7])
instr = ShiftStyle[style]
if direction:
instr += 'l'
else:
instr += 'r'
length = 2
elif operation_code == 0xf:
# coprocessor instructions
# TODO
pass
if instr is None:
log_error('Bad opcode 0x{:x} at 0x{:x}'.format(instruction, addr))
return error_value
#print((instr, length, size, source, dest, third))
return instr, length, size, source, dest, third
def generate_instruction_il(self, il, instr, length, size, source, dest, third):
size_bytes = None
if size is not None:
size_bytes = 1 << size
if instr in ('move', 'moveq'):
if instr == 'move' and isinstance(dest, OpRegisterDirect) and dest.reg in ('ccr', 'sr'):
il.append(il.set_reg(1, LLIL_TEMP(0), source.get_source_il(il)))
il.append(il.set_flag('c', il.test_bit(1, il.reg(1, LLIL_TEMP(0)), il.const(1, 0x01))))
il.append(il.set_flag('v', il.test_bit(1, il.reg(1, LLIL_TEMP(0)), il.const(1, 0x02))))
il.append(il.set_flag('z', il.test_bit(1, il.reg(1, LLIL_TEMP(0)), il.const(1, 0x04))))
il.append(il.set_flag('n', il.test_bit(1, il.reg(1, LLIL_TEMP(0)), il.const(1, 0x08))))
il.append(il.set_flag('x', il.test_bit(1, il.reg(1, LLIL_TEMP(0)), il.const(1, 0x10))))
else:
flags = 'nzvc'
if ((isinstance(source, OpRegisterDirect) and source.reg in ('usp', 'ccr', 'sr')) or
(isinstance(dest, OpRegisterDirect) and dest.reg in ('usp', 'ccr', 'sr'))):
# move to/from control registers do not set flags
flags = 0
il.append(
dest.get_dest_il(il,
source.get_source_il(il),
flags
)
)
elif instr in ('movea', 'movec'):
# dest.size = SIZE_LONG
# il.append(
# dest.get_dest_il(il,
# il.sign_extend(4,
# source.get_source_il(il)
# )
# )
# )
il.append(
dest.get_dest_il(il,
source.get_source_il(il)
)
)
elif instr == 'clr':
il.append(
dest.get_dest_il(il,
il.const(4, 0),
'nzvc'
)
)
elif instr in ('add', 'addi', 'addq'):
il.append(
dest.get_dest_il(il,
il.add(size_bytes,
dest.get_source_il(il),
source.get_source_il(il),
flags='*'
)
)
)
elif instr == 'adda':
dest.size = SIZE_LONG
il.append(
dest.get_dest_il(il,
il.add(4,
dest.get_source_il(il),
il.sign_extend(4,
source.get_source_il(il)
)
)
)
)
elif instr == 'addx':
il.append(
dest.get_dest_il(il,
il.add(size_bytes,
il.add(size_bytes,
dest.get_source_il(il),
source.get_source_il(il),
flags='*'
),
il.flag('x'),
flags='*'
)
)
)
elif instr in ('sub', 'subi', 'subq'):
il.append(
dest.get_dest_il(il,
il.sub(size_bytes,
source.get_source_il(il),
dest.get_source_il(il),
flags='*'
)
)
)
elif instr == 'suba':
dest.size = SIZE_LONG
il.append(
dest.get_dest_il(il,
il.sub(4,
dest.get_source_il(il),
il.sign_extend(4,
source.get_source_il(il)
)
)
)
)
elif instr == 'subx':
il.append(
dest.get_dest_il(il,
il.sub(size_bytes,
il.sub(size_bytes,
dest.get_source_il(il),
source.get_source_il(il),
flags='*'
),
il.flag('x'),
flags='*'
)
)
)
elif instr == 'neg':
il.append(
dest.get_dest_il(il,
il.neg_expr(size_bytes,
dest.get_source_il(il),
flags='*'
)
)
)
elif instr == 'negx':
il.append(
dest.get_dest_il(il,
il.sub(size_bytes,
il.neg_expr(size_bytes,
dest.get_source_il(il),
flags='*'
),
il.flag('x'),
flags='*'
)
)
)
elif instr == 'abcd':
# TODO
il.append(il.unimplemented())
elif instr == 'sbcd':
# TODO
il.append(il.unimplemented())
elif instr == 'nbcd':
# TODO
il.append(il.unimplemented())
elif instr == 'pack':
il.append(
il.set_reg(2,
LLIL_TEMP(0),
il.add(2,
source.get_source_il(il),
third.get_source_il(il)
)
)
)
il.append(
dest.get_dest_il(il,
il.or_expr(1,
il.and_expr(2,
il.reg(2, LLIL_TEMP(0)),
il.const(2, 0x000F)
),
il.logical_shift_right(2,
il.and_expr(2,
il.reg(2, LLIL_TEMP(0)),
il.const(2, 0x0F00)
),
il.const(1, 4)
)
)
)
)
elif instr == 'unpk':
il.append(
il.set_reg(1,
LLIL_TEMP(0),
source.get_source_il(il)
)
)
il.append(
dest.get_dest_il(il,
il.add(2,
il.or_expr(2,
il.and_expr(2,
il.reg(1, LLIL_TEMP(0)),
il.const(1, 0x0F)
),
il.shift_left(2,
il.and_expr(2,
il.reg(1, LLIL_TEMP(0)),
il.const(1, 0xF0)
),
il.const(1, 4)
)
),
third.get_source_il(il)
)
)
)
elif instr in ('muls', 'mulu'):
if isinstance(dest, OpRegisterDirectPair):
il.append(
il.set_reg_split(4,
dest.reg1,
dest.reg2,
il.mult(4,
source.get_source_il(il),
dest.get_source_il(il)[0],
flags='nzvc'
)
)
)
else:
il.append(
il.set_reg(4,
dest.reg,
il.mult(4,
source.get_source_il(il),
dest.get_source_il(il),
flags='nzvc'
)
)
)
elif instr == 'divs':
if size == 1:
dividend_il = dest.get_source_il(il)
divisor_il = source.get_source_il(il)
dest.size = SIZE_LONG
il.append(
dest.get_dest_il(il,
il.or_expr(4,
il.shift_left(4, il.mod_signed(2, dividend_il, divisor_il), il.const(1, 16)),
il.div_signed(2, dividend_il, divisor_il, flags='nzvc')
)
)
)
elif isinstance(dest, OpRegisterDirect):
dividend_il = dest.get_source_il(il)
divisor_il = source.get_source_il(il)
il.append(
dest.get_dest_il(il,
il.div_signed(4, dividend_il, divisor_il, flags='nzvc')
)
)
else:
dividend_il = il.or_expr(8, il.shift_left(8, il.reg(4, dest.reg1), il.const(1, 32)), il.reg(4, dest.reg2))
divisor_il = source.get_source_il(il)
il.append(
il.set_reg(4,
LLIL_TEMP(0),
il.mod_signed(4, dividend_il, divisor_il)
)
)
il.append(
il.set_reg(4,
dest.reg2,
il.div_signed(4, dividend_il, divisor_il, flags='nzvc')
)
)
il.append(
il.set_reg(4,
dest.reg1,
il.reg(4, LLIL_TEMP(0))
)
)
elif instr == 'divsl':
dividend_il = il.reg(4, dest.reg2)
divisor_il = source.get_source_il(il)
il.append(
il.set_reg(4,
dest.reg1,
il.mod_signed(4, dividend_il, divisor_il)
)
)
il.append(
il.set_reg(4,
dest.reg2,
il.div_signed(4, dividend_il, divisor_il, flags='nzvc')
)
)
elif instr == 'divu':
if size == 1:
dividend_il = dest.get_source_il(il)
divisor_il = source.get_source_il(il)
dest.size = SIZE_LONG
il.append(
dest.get_dest_il(il,
il.or_expr(4,
il.shift_left(4, il.mod_unsigned(2, dividend_il, divisor_il), il.const(1, 16)),
il.div_unsigned(2, dividend_il, divisor_il, flags='nzvc')
)
)
)
elif isinstance(dest, OpRegisterDirect):
dividend_il = dest.get_source_il(il)
divisor_il = source.get_source_il(il)
il.append(
dest.get_dest_il(il,
il.div_unsigned(4, dividend_il, divisor_il, flags='nzvc')
)
)
else:
dividend_il = il.or_expr(8, il.shift_left(8, il.reg(4, dest.reg1), il.const(1, 32)), il.reg(4, dest.reg2))
divisor_il = source.get_source_il(il)
il.append(
il.set_reg(4,
LLIL_TEMP(0),
il.mod_unsigned(4, dividend_il, divisor_il)
)
)
il.append(
il.set_reg(4,
dest.reg2,
il.div_unsigned(4, dividend_il, divisor_il, flags='nzvc')
)
)
il.append(
il.set_reg(4,
dest.reg1,
il.reg(4, LLIL_TEMP(0))
)
)
elif instr == 'divul':
dividend_il = il.reg(4, dest.reg2)
divisor_il = source.get_source_il(il)
il.append(
il.set_reg(4,
dest.reg1,
il.mod_unsigned(4, dividend_il, divisor_il)
)
)
il.append(
il.set_reg(4,
dest.reg2,
il.div_unsigned(4, dividend_il, divisor_il, flags='nzvc')
)
)
elif instr == 'cas':
skip_label_found = True
skip = il.get_label_for_address(Architecture['M68000'], il.current_address+length)
if skip is None:
skip = LowLevelILLabel()
skip_label_found = False
il.append(
il.sub(size_bytes,
third.get_source_il(il),
source.get_source_il(il),
flags='nzvc'
)
)
equal = LowLevelILLabel()
not_equal = LowLevelILLabel()
il.append(
il.if_expr(il.flag_condition(LowLevelILFlagCondition.LLFC_E), equal, not_equal)
)
il.mark_label(equal)
il.append(
third.get_dest_il(il,
dest.get_source_il(il)
)
)
il.append(
il.goto(skip)
)
il.mark_label(not_equal)
il.append(
source.get_dest_il(il,
third.get_source_il(il)
)
)
if not skip_label_found:
il.mark_label(skip)
elif instr == 'cas2':
skip_label_found = True
skip = il.get_label_for_address(Architecture['M68000'], il.current_address+length)
if skip is None:
skip = LowLevelILLabel()
skip_label_found = False
il.append(
il.sub(size_bytes,
third.get_source_il(il)[0],
source.get_source_il(il)[0],
flags='nzvc'
)
)
equal = LowLevelILLabel()
not_equal = LowLevelILLabel()
check2 = LowLevelILLabel()
il.append(
il.if_expr(il.flag_condition(LowLevelILFlagCondition.LLFC_E), check2, not_equal)
)
il.mark_label(check2)
il.append(
il.sub(size_bytes,
third.get_source_il(il)[1],
source.get_source_il(il)[1],
flags='nzvc'
)
)
il.append(
il.if_expr(il.flag_condition(LowLevelILFlagCondition.LLFC_E), equal, not_equal)
)
il.mark_label(equal)
for it in third.get_dest_il(il,
dest.get_source_il(il)
):
il.append(it)
il.append(
il.goto(skip)
)
il.mark_label(not_equal)
for it in source.get_dest_il(il,
third.get_source_il(il)
):
il.append(it)
il.append(
il.goto(skip)
)
if not skip_label_found:
il.mark_label(skip)
elif instr == 'chk':
skip_label_found = True
skip = il.get_label_for_address(Architecture['M68000'], il.current_address+length)
if skip is None:
skip = LowLevelILLabel()
skip_label_found = False
trap = LowLevelILLabel()
check = LowLevelILLabel()
il.append(
il.if_expr(
il.compare_unsigned_less_than(size_bytes,
dest.get_source_il(il),
il.const(size_bytes, 0)
),
trap,
check
)
)
il.mark_label(check)
il.append(
il.if_expr(
il.compare_unsigned_greater_than(size_bytes,
dest.get_source_il(il),
source.get_source_il(il)
),
trap,
skip
)
)
il.mark_label(trap)
il.append(
il.system_call()
)
il.append(
il.goto(skip)
)
if not skip_label_found:
il.mark_label(skip)
elif instr == 'chk2':
skip_label_found = True
skip = il.get_label_for_address(Architecture['M68000'], il.current_address+length)
if skip is None:
skip = LowLevelILLabel()
skip_label_found = False
trap = LowLevelILLabel()
check = LowLevelILLabel()
il.append(
il.set_reg(4,
LLIL_TEMP(0),
source.get_address_il(il)
)
)
il.append(
il.if_expr(
il.compare_unsigned_less_than(size_bytes,
dest.get_source_il(il),
il.load(size_bytes,
il.reg(4, LLIL_TEMP(0))
)
),
trap,
check
)
)
il.mark_label(check)
il.append(
il.if_expr(
il.compare_unsigned_greater_than(size_bytes,
dest.get_source_il(il),
il.load(size_bytes,
il.add(4,
il.reg(4, LLIL_TEMP(0)),
il.const(4, size_bytes)
)
)
),
trap,
skip
)
)
il.mark_label(trap)
il.append(
il.system_call()
)
il.append(
il.goto(skip)
)
if not skip_label_found:
il.mark_label(skip)
elif instr == 'bchg':
bit_number_il = il.mod_unsigned(1,
source.get_source_il(il),
il.const(1, 8 << dest.size)
)
il.append(
il.set_flag('z',
il.compare_not_equal(4,
il.test_bit(4,
dest.get_source_il(il),
il.shift_left(4,
il.const(4, 1),
bit_number_il
)
),
il.const(4, 0)
)
)
)
il.append(
dest.get_dest_il(il,
il.xor_expr(4,
dest.get_source_il(il),
il.shift_left(4,
il.const(4, 1),
bit_number_il
)
)
)
)
elif instr == 'bclr':
bit_number_il = il.mod_unsigned(1,
source.get_source_il(il),
il.const(1, 8 << dest.size)
)
il.append(
il.set_flag('z',
il.compare_not_equal(4,
il.test_bit(4,
dest.get_source_il(il),
il.shift_left(4,
il.const(4, 1),
bit_number_il
)
),
il.const(4, 0)
)
)
)
il.append(
dest.get_dest_il(il,
il.and_expr(4,
dest.get_source_il(il),
il.not_expr(4,
il.shift_left(4,
il.const(4, 1),
bit_number_il
)
)
)
)
)
elif instr == 'bset':
bit_number_il = il.mod_unsigned(1,
source.get_source_il(il),
il.const(1, 8 << dest.size)
)
il.append(
il.set_flag('z',
il.compare_not_equal(4,
il.test_bit(4,
dest.get_source_il(il),
il.shift_left(4,
il.const(4, 1),
bit_number_il
)
),
il.const(4, 0)
)
)
)
il.append(
dest.get_dest_il(il,
il.or_expr(4,
dest.get_source_il(il),
il.shift_left(4,
il.const(4, 1),
bit_number_il
)
)
)
)
elif instr == 'btst':
bit_number_il = il.mod_unsigned(1,
source.get_source_il(il),
il.const(1, 8 << dest.size)
)
il.append(
il.set_flag('z',
il.compare_not_equal(4,
il.test_bit(4,
dest.get_source_il(il),
il.shift_left(4,
il.const(4, 1),
bit_number_il
)
),
il.const(4, 0)
)
)
)
elif instr in ('asl', 'lsl'):
source_il = il.const(1, 1)
if source is not None:
source_il = source.get_source_il(il)
il.append(
dest.get_dest_il(il,
il.shift_left(size_bytes,
dest.get_source_il(il),
source_il,
flags='*'
)
)
)
elif instr == 'asr':
source_il = il.const(1, 1)
if source is not None:
source_il = source.get_source_il(il)
il.append(
dest.get_dest_il(il,
il.arith_shift_right(size_bytes,
dest.get_source_il(il),
source_il,
flags='*'
)
)
)
elif instr == 'lsr':
source_il = il.const(1, 1)
if source is not None:
source_il = source.get_source_il(il)
il.append(
dest.get_dest_il(il,
il.logical_shift_right(size_bytes,
dest.get_source_il(il),
source_il,
flags='*'
)
)
)
elif instr == 'rol':
source_il = il.const(1, 1)
if source is not None:
source_il = source.get_source_il(il)
il.append(
dest.get_dest_il(il,
il.rotate_left(size_bytes,
dest.get_source_il(il),
source_il,
flags='*'
)
)
)
elif instr == 'ror':
source_il = il.const(1, 1)
if source is not None:
source_il = source.get_source_il(il)
il.append(
dest.get_dest_il(il,
il.rotate_right(size_bytes,
dest.get_source_il(il),
source_il,
flags='*'
)
)
)
elif instr == 'roxl':
source_il = il.const(1, 1)
if source is not None:
source_il = source.get_source_il(il)
il.append(
dest.get_dest_il(il,
il.rotate_left_carry(size_bytes,
dest.get_source_il(il),
source_il,
flags='*'
)
)
)
elif instr == 'roxr':
source_il = il.const(1, 1)
if source is not None:
source_il = source.get_source_il(il)
il.append(
dest.get_dest_il(il,
il.rotate_right_carry(size_bytes,
dest.get_source_il(il),
source_il,
flags='*'
)
)
)
elif instr in ('cmp', 'cmpi', 'cmpm'):
il.append(
il.sub(size_bytes,
dest.get_source_il(il),
source.get_source_il(il),
flags='nzvc'
)
)
elif instr == 'cmpa':
dest.size = SIZE_LONG
il.append(
il.sub(4,
dest.get_source_il(il),
il.sign_extend(4,
source.get_source_il(il)
),
flags='nzvc'
)
)
elif instr == 'cmp2':
skip_label_found = True
skip = il.get_label_for_address(Architecture['M68000'], il.current_address+length)
if skip is None:
skip = LowLevelILLabel()
skip_label_found = False
check = LowLevelILLabel()
il.append(
il.set_reg(4,
LLIL_TEMP(0),
source.get_address_il(il)
)
)
il.append(
il.sub(size_bytes,
dest.get_source_il(il),
il.load(size_bytes,
il.reg(4, LLIL_TEMP(0))
),
flags='nzvc'
)
)
il.append(
il.if_expr(
il.flag_condition(LowLevelILFlagCondition.LLFC_ULT),
skip,
check
)
)
il.mark_label(check)
il.append(
il.sub(size_bytes,
dest.get_source_il(il),
il.load(size_bytes,
il.add(4,
il.reg(4, LLIL_TEMP(0)),
il.const(4, size_bytes)
)
),
flags='nzvc'
)
)
il.append(
il.goto(skip)
)
if not skip_label_found:
il.mark_label(skip)
elif instr == 'tas':
il.append(
il.set_reg(1, LLIL_TEMP(0), dest.get_source_il(il), flags='nzvc')
)
il.append(
dest.get_dest_il(il,
il.or_expr(1,
il.reg(1, LLIL_TEMP(0)),
il.const(1, 0x80)
)
)
)
elif instr == 'tst':
il.append(
il.sub(size_bytes,
dest.get_source_il(il),
il.const(4, 0),
flags='nzvc'
)
)
elif instr in ('and', 'andi'):
if instr == 'andi' and isinstance(dest, OpRegisterDirect) and dest.reg in ('ccr', 'sr'):
if not source.value & 0x01: il.append(il.set_flag('c', il.const(1, 0)))
if not source.value & 0x02: il.append(il.set_flag('v', il.const(1, 0)))
if not source.value & 0x04: il.append(il.set_flag('z', il.const(1, 0)))
if not source.value & 0x08: il.append(il.set_flag('n', il.const(1, 0)))
if not source.value & 0x11: il.append(il.set_flag('x', il.const(1, 0)))
else:
il.append(
dest.get_dest_il(il,
il.and_expr(size_bytes,
dest.get_source_il(il),
source.get_source_il(il),
flags='nzvc'
)
)
)
elif instr in ('or', 'ori'):
if instr == 'ori' and isinstance(dest, OpRegisterDirect) and dest.reg in ('ccr', 'sr'):
if source.value & 0x01: il.append(il.set_flag('c', il.const(1, 1)))
if source.value & 0x02: il.append(il.set_flag('v', il.const(1, 1)))
if source.value & 0x04: il.append(il.set_flag('z', il.const(1, 1)))
if source.value & 0x08: il.append(il.set_flag('n', il.const(1, 1)))
if source.value & 0x11: il.append(il.set_flag('x', il.const(1, 1)))
else:
il.append(
dest.get_dest_il(il,
il.or_expr(size_bytes,
dest.get_source_il(il),
source.get_source_il(il),
flags='nzvc'
)
)
)
elif instr in ('eor', 'eori'):
if instr == 'eori' and isinstance(dest, OpRegisterDirect) and dest.reg in ('ccr', 'sr'):
if source.value & 0x01: il.append(il.set_flag('c', il.xor_expr(1, il.flag('c'), il.const(1, 1))))
if source.value & 0x02: il.append(il.set_flag('v', il.xor_expr(1, il.flag('v'), il.const(1, 1))))
if source.value & 0x04: il.append(il.set_flag('z', il.xor_expr(1, il.flag('z'), il.const(1, 1))))
if source.value & 0x08: il.append(il.set_flag('n', il.xor_expr(1, il.flag('n'), il.const(1, 1))))
if source.value & 0x11: il.append(il.set_flag('x', il.xor_expr(1, il.flag('x'), il.const(1, 1))))
else:
il.append(
dest.get_dest_il(il,
il.xor_expr(size_bytes,
dest.get_source_il(il),
source.get_source_il(il),
flags='nzvc'
)
)
)
elif instr == 'not':
il.append(
dest.get_dest_il(il,
il.not_expr(size_bytes,
dest.get_source_il(il),
flags='nzvc'
)
)
)
elif instr == 'swap':
il.append(
dest.get_dest_il(il,
il.rotate_right(4,
dest.get_source_il(il),
il.const(1, 16)
)
)
)
elif instr == 'exg':
il.append(
il.set_reg(4, LLIL_TEMP(0), source.get_source_il(il))
)
il.append(
source.get_dest_il(il, dest.get_source_il(il))
)
il.append(
dest.get_dest_il(il, il.reg(4, LLIL_TEMP(0)))
)
elif instr == 'ext':
reg = dest.reg
if dest.size == 1:
il.append(
il.set_reg(2,
reg,
il.sign_extend(4,
il.reg(1, reg),
flags='nzvc'
)
)
)
else:
il.append(
il.set_reg(4,
reg,
il.sign_extend(4,
il.reg(2, reg),
flags='nzvc'
)
)
)
elif instr == 'extb':
reg = dest.reg
il.append(
il.set_reg(4,
reg,
il.sign_extend(4,
il.reg(1, reg),
flags='nzvc'
)
)
)
elif instr == 'movem':
if isinstance(source, OpRegisterMovemList):
if isinstance(dest, OpRegisterIndirectPredecrement):
il.append(
il.set_reg(4, LLIL_TEMP(0), dest.get_address_il(il))
)
if self.movem_store_decremented:
il.append(
il.set_reg(4,
dest.reg,
il.sub(4,
il.reg(4, LLIL_TEMP(0)),
il.const(4, len(source.regs)*size_bytes)
)
)
)
for k in range(len(source.regs)):
il.append(
il.store(size_bytes,
il.sub(4,
il.reg(4, LLIL_TEMP(0)),
il.const(4, (k+1)*size_bytes)
),
il.reg(size_bytes, source.regs[len(source.regs)-1-k])
)
)
if not self.movem_store_decremented:
il.append(
il.set_reg(4,
dest.reg,
il.sub(4,
il.reg(4, LLIL_TEMP(0)),
il.const(4, len(source.regs)*size_bytes)
)
)
)
else:
il.append(
il.set_reg(4, LLIL_TEMP(0), dest.get_address_il(il))
)
for k in range(len(source.regs)):
il.append(
il.store(size_bytes,
il.add(4,
il.reg(4, LLIL_TEMP(0)),
il.const(4, k*size_bytes)
),
il.reg(size_bytes, source.regs[k])
)
)
else:
il.append(
il.set_reg(4, LLIL_TEMP(0), source.get_address_il(il))
)
for k in range(len(dest.regs)):
il.append(
il.set_reg(size_bytes,
dest.regs[k],
il.load(size_bytes,
il.add(4,
il.reg(4, LLIL_TEMP(0)),
il.const(4, k*size_bytes)
)
)
)
)
if isinstance(source, OpRegisterIndirectPostincrement):
il.append(
il.set_reg(4,
source.reg,
il.add(4,
il.reg(4, LLIL_TEMP(0)),
il.const(4, len(dest.regs)*size_bytes)
)
)
)
elif instr == 'lea':
il.append(
dest.get_dest_il(il, source.get_address_il(il))
)
elif instr == 'pea':
il.append(
il.push(4, dest.get_address_il(il))
)
elif instr == 'link':
source.size = SIZE_LONG
il.append(
il.push(4, source.get_source_il(il))
)
il.append(
source.get_dest_il(il, il.reg(4, "sp"))
)
il.append(
il.set_reg(4,
"sp",
il.add(4,
il.reg(4, "sp"),
il.sign_extend(4, dest.get_source_il(il))
)
)
)
elif instr == 'unlk':
il.append(
il.set_reg(4, "sp", source.get_source_il(il))
)
il.append(
source.get_dest_il(il, il.pop(4))
)
elif instr in ('jmp', 'bra'):
# TODO labels
il.append(
il.jump(dest.get_address_il(il))
)
elif instr in ('jsr', 'bsr'):
# TODO labels
il.append(
il.call(dest.get_address_il(il))
)
elif instr == 'callm':
# TODO
il.append(il.unimplemented())
elif instr in ('bhi', 'bls', 'bcc', 'bcs', 'bne', 'beq', 'bvc', 'bvs',
'bpl', 'bmi', 'bge', 'blt', 'bgt', 'ble'):
flag_cond = ConditionMapping.get(instr[1:], None)
dest_il = dest.get_address_il(il)
cond_il = None
if flag_cond is not None:
cond_il = il.flag_condition(flag_cond)
if cond_il is None:
il.append(il.unimplemented())
else:
t = il.get_label_for_address(Architecture['M68000'], il[dest_il].value)
indirect = False
if t is None:
t = LowLevelILLabel()
indirect = True
f_label_found = True
f = il.get_label_for_address(Architecture['M68000'], il.current_address+length)
if f is None:
f = LowLevelILLabel()
f_label_found = False
il.append(
il.if_expr(cond_il, t, f)
)
if indirect:
il.mark_label(t)
il.append(il.jump(dest_il))
if not f_label_found:
il.mark_label(f)
elif instr in ('dbt', 'dbf', 'dbhi', 'dbls', 'dbcc', 'dbcs', 'dbne',
'dbeq', 'dbvc', 'dbvs', 'dbpl', 'dbmi', 'dbge', 'dblt',
'dbgt', 'dble'):
flag_cond = ConditionMapping.get(instr[2:], None)
dest_il = dest.get_address_il(il)
cond_il = None
if flag_cond is not None:
cond_il = il.flag_condition(flag_cond)
elif instr == 'dbt':
cond_il = il.const(1, 1)
elif instr == 'dbf':
cond_il = il.const(1, 0)
if cond_il is None:
il.append(il.unimplemented())
else:
branch = il.get_label_for_address(Architecture['M68000'], il[dest_il].value)
indirect = False
if branch is None:
branch = LowLevelILLabel()
indirect = True
skip_label_found = True
skip = il.get_label_for_address(Architecture['M68000'], il.current_address+length)
if skip is None:
skip = LowLevelILLabel()
skip_label_found = False
decrement = LowLevelILLabel()
il.append(
il.if_expr(cond_il, skip, decrement)
)
il.mark_label(decrement)
il.append(
il.set_reg(2,
LLIL_TEMP(0),
il.sub(2,
source.get_source_il(il),
il.const(2, 1)
)
)
)
il.append(
source.get_dest_il(il, il.reg(2, LLIL_TEMP(0)))
)
il.append(
il.if_expr(
il.compare_equal(2,
il.reg(2, LLIL_TEMP(0)),
il.const(2, -1)
),
skip,
branch
)
)
if indirect:
il.mark_label(branch)
il.append(il.jump(dest_il))
if not skip_label_found:
il.mark_label(skip)
elif instr in ('st', 'sf', 'shi', 'sls', 'scc', 'scs', 'sne', 'seq',
'svc', 'svs', 'spl', 'smi', 'sge', 'slt', 'sgt', 'sle'):
flag_cond = ConditionMapping.get(instr[1:], None)
cond_il = None
if flag_cond is not None:
cond_il = il.flag_condition(flag_cond)
elif instr == 'st':
cond_il = il.const(1, 1)
elif instr == 'sf':
cond_il = il.const(1, 0)
if cond_il is None:
il.append(il.unimplemented())
else:
skip_label_found = True
skip = il.get_label_for_address(Architecture['M68000'], il.current_address+length)
if skip is None:
skip = LowLevelILLabel()
skip_label_found = False
set_dest = LowLevelILLabel()
clear_dest = LowLevelILLabel()
il.append(
il.if_expr(cond_il, set_dest, clear_dest)
)
il.mark_label(set_dest)
il.append(
dest.get_dest_il(il, il.const(1, 1))
)
il.append(
il.goto(skip)
)
il.mark_label(clear_dest)
il.append(
dest.get_dest_il(il, il.const(1, 0))
)
il.append(
il.goto(skip)
)
if not skip_label_found:
il.mark_label(skip)
elif instr == 'rtd':
il.append(
il.set_reg(4,
LLIL_TEMP(0),
il.pop(4)
)
)
il.append(
il.set_reg(4,
"sp",
il.add(4,
il.reg(4, "sp"),
source.get_source_il(il)
)
)
)
il.append(
il.ret(
il.reg(4, LLIL_TEMP(0))
)
)
elif instr == 'rte':
il.append(
il.set_reg(2,
"sr",
il.pop(2)
)
)
il.append(
il.ret(
il.pop(4)
)
)
elif instr == 'rtm':
# TODO
il.append(il.unimplemented())
elif instr == 'rtr':
il.append(
il.set_reg(2,
"ccr",
il.pop(2)
)
)
il.append(
il.ret(
il.pop(4)
)
)
elif instr == 'rts':
il.append(
il.ret(
il.pop(4)
)
)
elif instr in ('trapv', 'trapt', 'trapf', 'traphi', 'trapls', 'trapcc',
'trapcs', 'trapne', 'trapeq', 'trapvc', 'trapvs', 'trappl',
'trapmi', 'trapge', 'traplt', 'trapgt', 'traple'):
flag_cond = ConditionMapping.get(instr[4:], None)
cond_il = None
if flag_cond is not None:
cond_il = il.flag_condition(flag_cond)
elif instr == 'trapt':
cond_il = il.const(1, 1)
elif instr == 'trapf':
cond_il = il.const(1, 0)
elif instr == 'trapv':
cond_il = il.flag_condition(LowLevelILFlagCondition.LLFC_O)
if cond_il is None:
il.append(il.unimplemented())
else:
skip_label_found = True
skip = il.get_label_for_address(Architecture['M68000'], il.current_address+length)
if skip is None:
skip = LowLevelILLabel()
skip_label_found = False
trap = LowLevelILLabel()
il.append(
il.if_expr(cond_il, trap, skip)
)
il.mark_label(trap)
il.append(
il.system_call()
)
il.append(
il.goto(skip)
)
if not skip_label_found:
il.mark_label(skip)
elif instr in ('trap', 'illegal', 'bkpt'):
il.append(il.system_call())
elif instr in ('bgnd', 'nop', 'reset', 'stop'):
il.append(il.nop())
else:
il.append(il.unimplemented())
def perform_get_instruction_info(self, data, addr):
instr, length, size, source, dest, third = self.decode_instruction(data, addr)
if instr is None:
return None
result = InstructionInfo()
result.length = length
if instr in ('rtd', 'rte', 'rtr', 'rts'):
result.add_branch(BranchType.FunctionReturn)
elif instr in ('jmp', 'jsr',
'bra', 'bsr', 'bhi', 'bls', 'bcc', 'bcs', 'bne', 'beq',
'bvc', 'bvs', 'bpl', 'bmi', 'bge', 'blt', 'bgt', 'ble',
'dbt', 'dbf', 'dbhi', 'dbls', 'dbcc', 'dbcs', 'dbne',
'dbeq', 'dbvc', 'dbvs', 'dbpl', 'dbmi', 'dbge', 'dblt',
'dbgt', 'dble'):
conditional = False
call = False
branch_dest = None
bt = BranchType.UnresolvedBranch
if instr in ('jmp', 'bra'):
bt = BranchType.UnconditionalBranch
elif instr in ('jsr', 'bsr'):
call = True
bt = BranchType.CallDestination
else:
conditional = True
if isinstance(dest, OpAbsolute):
branch_dest = dest.address
elif isinstance(dest, OpRegisterIndirect):
if dest.reg == 'pc':
branch_dest = addr+2
else:
bt = BranchType.IndirectBranch
elif isinstance(dest, OpRegisterIndirectDisplacement):
if dest.reg == 'pc':
branch_dest = addr+2+dest.offset
else:
bt = BranchType.IndirectBranch
if conditional:
if instr[0:2] == 'db':
result.add_branch(BranchType.TrueBranch, addr+length)
result.add_branch(BranchType.FalseBranch, branch_dest)
else:
result.add_branch(BranchType.TrueBranch, branch_dest)
result.add_branch(BranchType.FalseBranch, addr+length)
else:
if call and bt == BranchType.IndirectBranch:
# don't branch at all for indirect calls
pass
elif bt == BranchType.IndirectBranch or bt == BranchType.UnresolvedBranch or branch_dest is None:
result.add_branch(bt)
else:
result.add_branch(bt, branch_dest)
return result
def perform_get_instruction_text(self, data, addr):
instr, length, size, source, dest, third = self.decode_instruction(data, addr)
if instr is None:
return None
if size is not None:
instr += SizeSuffix[size]
tokens = [InstructionTextToken(InstructionTextTokenType.InstructionToken, "%-10s" % instr)]
if source is not None:
tokens += source.format(addr)
if dest is not None:
if source is not None:
tokens += [InstructionTextToken(InstructionTextTokenType.OperandSeparatorToken, ',')]
tokens += dest.format(addr)
if third is not None:
if source is not None or dest is not None:
tokens += [InstructionTextToken(InstructionTextTokenType.OperandSeparatorToken, ',')]
tokens += third.format(addr)
return tokens, length
def perform_get_instruction_low_level_il(self, data, addr, il):
instr, length, size, source, dest, third = self.decode_instruction(data, addr)
if instr is None:
return None
if instr == 'movem':
# movem overrides default predecrement/postincrement IL generation
self.generate_instruction_il(il, instr, length, size, source, dest, third)
else:
# predecrement
if source is not None:
pre_il = source.get_pre_il(il)
if pre_il is not None:
il.append(pre_il)
if dest is not None:
pre_il = dest.get_pre_il(il)
if pre_il is not None:
il.append(pre_il)
if third is not None:
pre_il = third.get_pre_il(il)
if pre_il is not None:
il.append(pre_il)
self.generate_instruction_il(il, instr, length, size, source, dest, third)
# postincrement
if source is not None:
post_il = source.get_post_il(il)
if post_il is not None:
il.append(post_il)
if dest is not None:
post_il = dest.get_post_il(il)
if post_il is not None:
il.append(post_il)
if third is not None:
post_il = third.get_post_il(il)
if post_il is not None:
il.append(post_il)
return length
def perform_is_never_branch_patch_available(self, data, addr):
data = bytearray(data)
if data[0] & 0xf0 == 0x60:
# BRA, BSR, Bcc
return True
if data[0] == 0x4e and data[1] & 0x80 == 0x80:
# JMP, JSR
return True
return False
def perform_is_invert_branch_patch_available(self, data, addr):
data = bytearray(data)
if data[0] & 0xf0 == 0x60 and data[0] & 0xfe != 0x60:
# Bcc
return True
return False
def perform_is_always_branch_patch_available(self, data, addr):
data = bytearray(data)
if data[0] & 0xf0 == 0x60 and data[0] & 0xfe != 0x60:
# Bcc
return True
return False
def perform_is_skip_and_return_zero_patch_available(self, data, addr):
return self.perform_skip_and_return_value(data, addr)
def perform_is_skip_and_return_value_patch_available(self, data, addr):
data = bytearray(data)
if data[0] == 0x61:
# BSR
return True
if data[0] == 0x4e and data[1] & 0xc0 == 0x80:
# JSR
return True
return False
def perform_convert_to_nop(self, data, addr):
count = int(len(data)/2)
if count*2 != len(data):
return None
return b'\x4e\x71' * count
def perform_never_branch(self, data, addr):
data = bytearray(data)
if data[0] & 0xf0 == 0x60:
# BRA, BSR, Bcc
return self.perform_convert_to_nop(data, addr)
if data[0] == 0x4e and data[1] & 0x80 == 0x80:
# JMP, JSR
return self.perform_convert_to_nop(data, addr)
return None
def perform_invert_branch(self, data, addr):
data = bytearray(data)
if data[0] & 0xf0 == 0x60 and data[0] & 0xfe != 0x60:
# Bcc
return bytearray([data[0]^1])+data[1:]
return None
def perform_always_branch(self, data, addr):
data = bytearray(data)
if data[0] & 0xf0 == 0x60 and data[0] & 0xfe != 0x60:
# Bcc
return b'\x60'+data[1:]
return None
def perform_skip_and_return_value(self, data, addr, value=0):
count = int(len(data)/2)
if count*2 != len(data):
return None
data = bytearray(data)
ok = False
if data[0] == 0x61:
# BSR
ok = True
if data[0] == 0x4e and data[1] & 0xc0 == 0x80:
# JSR
ok = True
if not ok:
return None
if value > 0x80000000:
value = value - 0x100000000
if value >= -128 and value <= 127 and len(data) >= 2:
value = value & 0xff
return b'\x70'+chr(value)+b'\x4e\x71'*(count-1)
if len(data) >= 6:
return b'\x20\x3C'+struct.pack('>l', value)+b'\x4e\x71'*(count-3)
return None
class M68008(M68000):
name = "M68008"
class M68010(M68000):
name = "M68010"
control_registers = {
0x000: 'sfc',
0x001: 'dfc',
0x800: 'usp',
0x801: 'vbr',
}
# add BKPT, MOVE from CCR, MOVEC, MOVES, RTD
class M68020(M68010):
name = "M68020"
control_registers = {
0x000: 'sfc',
0x001: 'dfc',
0x800: 'usp',
0x801: 'vbr',
0x002: 'cacr',
0x802: 'caar',
0x803: 'msp',
0x804: 'isp',
}
memory_indirect = True
movem_store_decremented = True
# add BFCHG, BFCLR, BFEXTS, BFEXTU, BFFO, BFINS, BFSET, BFTST, CALLM, CAS, CAS2, CHK2, CMP2, cpBcc, cpDBcc, cpGEN, cpRESTORE, cpSAVE, cpScc, cpTRAPcc
# DIVSL, DIVUL, EXTB, PACK, RTM, TRAPcc, UNPK
# add memory indirect addressing
class M68030(M68020):
name = "M68030"
# remove CALLM, RTM
# add PFLUSH, PFLUSHA, PLOAD, PMOVE, PTEST
class M68040(M68030):
name = "M68040"
control_registers = {
0x000: 'sfc',
0x001: 'dfc',
0x800: 'usp',
0x801: 'vbr',
0x002: 'cacr',
0x803: 'msp',
0x804: 'isp',
0x003: 'tc',
0x004: 'itt0',
0x005: 'itt1',
0x006: 'dtt0',
0x007: 'dtt1',
0x805: 'mmusr',
0x806: 'urp',
0x807: 'srp',
}
# remove cpBcc, cpDBcc, cpGEN, cpRESTORE, cpSAVE, cpScc, cpTRAPcc, PFLUSHA, PLOAD, PMOVE
# add CINV, CPUSH, floating point, MOVE16
class M68LC040(M68040):
name = "M68LC040"
class M68EC040(M68040):
name = "M68EC040"
control_registers = {
0x000: 'sfc',
0x001: 'dfc',
0x800: 'usp',
0x801: 'vbr',
0x002: 'cacr',
0x803: 'msp',
0x804: 'isp',
0x004: 'iacr0',
0x005: 'iacr1',
0x006: 'dacr0',
0x007: 'dacr1'
}
class M68330(M68010):
name = "M68330"
movem_store_decremented = True
# AKA CPU32
# add BGND, CHK2, CMP2, DIVSL, DIVUL, EXTB, LPSTOP, TBLS, TBLSN, TBLU, TBLUN, TRAPcc
class M68340(M68330):
name = "M68340"
def create_vector_table(view, addr, size=256):
vectors = {
0: 'reset_initial_interrupt_stack_pointer',
1: 'reset_initial_program_counter',
2: 'access_fault',
3: 'address_error',
4: 'illegal_instruction',
5: 'integer_divide_by_zero',
6: 'chk_chk2_instruction',
7: 'ftrapcc_trapcc_trapv_instruction',
8: 'privilege_violation',
9: 'trace',
10: 'line_1010_emulator',
11: 'line_1111_emulator',
# 12 unassigned_reserved
13: 'coprocessor_protocol_violation',
14: 'format_error',
15: 'uninitialized_interrupt',
# 16-23 unassigned_reserved
24: 'spurious_interrupt',
25: 'level_1_interrupt_autovector',
26: 'level_2_interrupt_autovector',
27: 'level_3_interrupt_autovector',
28: 'level_4_interrupt_autovector',
29: 'level_5_interrupt_autovector',
30: 'level_6_interrupt_autovector',
31: 'level_7_interrupt_autovector',
32: 'trap_0_instruction',
33: 'trap_1_instruction',
34: 'trap_2_instruction',
35: 'trap_3_instruction',
36: 'trap_4_instruction',
37: 'trap_5_instruction',
38: 'trap_6_instruction',
39: 'trap_7_instruction',
40: 'trap_8_instruction',
41: 'trap_9_instruction',
42: 'trap_10_instruction',
43: 'trap_11_instruction',
44: 'trap_12_instruction',
45: 'trap_13_instruction',
46: 'trap_14_instruction',
47: 'trap_15_instruction',
48: 'fp_branch_or_set_on_unordered_condition',
49: 'fp_inexact_result',
50: 'fp_divide_by_zero',
51: 'fp_underflow',
52: 'fp_operand_error',
53: 'fp_overflow',
54: 'fp_signaling_nan',
55: 'fp_unimplemented_data_type',
56: 'mmu_configuration_error',
57: 'mmu_illegal_operation_error',
58: 'mmu_access_level_violation_error',
# 59-63 unassigned_reserved
}
for k in range(0, 192):
vectors[k+64] = 'user_%d' % k
t = view.parse_type_string("void *")[0]
for k in range(size):
name = vectors.get(k, 'unassigned_reserved')
view.define_user_symbol(Symbol(SymbolType.DataSymbol, addr+4*k, "_vector_%d_%s" % (k, name)))
view.define_user_data_var(addr+4*k, t)
value = struct.unpack(">L", view.read(addr+4*k, 4))[0]
if k > 0:
view.define_user_symbol(Symbol(SymbolType.FunctionSymbol, value, "vector_%d_%s" % (k, name)))
view.add_entry_point(value)
def prompt_create_vector_table(view, addr=None):
architectures = ['M68000', 'M68008', 'M68010', 'M68020', 'M68030', 'M68040', 'M68LC040', 'M68EC040', 'M68330', 'M68340']
size_choices = ['Full (256)', 'MMU (59)', 'FP (56)', 'Traps (48)', 'Interrupts (32)']
size_raw = [256, 59, 56, 48, 32]
if addr is None:
addr = 0
need_arch = True
if view.platform is not None and view.platform.arch.name in architectures:
# 68k arch already selected
need_arch = False
address_field = AddressField('Address', view, addr)
arch_field = ChoiceField('Architecture', architectures)
size_field = ChoiceField('Table size', size_choices)
res = False
if need_arch:
res = get_form_input([address_field, arch_field, size_field], 'Create M68k vector table')
else:
res = get_form_input([address_field, size_field], 'Create M68k vector table')
if res:
address = address_field.result
size = size_raw[size_field.result]
if need_arch:
arch = architectures[arch_field.result]
view.platform = Architecture[arch].standalone_platform
create_vector_table(view, address, size)
#PluginCommand.register("Create M68k vector table", "Create M68k vector table", prompt_create_vector_table)
PluginCommand.register_for_address("Create M68k vector table", "Create M68k vector table", prompt_create_vector_table)
M68000.register()
M68008.register()
M68010.register()
M68020.register()
M68030.register()
M68040.register()
M68LC040.register()
M68EC040.register()
M68330.register()
M68340.register()
|
the-stack_0_6853 | from django.urls import path, include
from rest_framework.routers import DefaultRouter
from bills import views
from django.conf.urls import url
router = DefaultRouter()
router.register('headbill', views.HeadBillViewSet)
router.register('relationshipTaxProduct', views.RelationshipTaxProductViewSet)
router.register('billdetail', views.BillDetailViewSet)
app_name = 'bills'
urlpatterns = [
# url(r'^customer/$', views.customer_list),
# url(r'^customer/(?P<pk>[0-9]+)$', views.customer_detail)
# path('customers/',views.customer_list),
# path('customers/<int:pk>',views.customer_detail),
path('customers/',views.CustomerListView.as_view()),
path('customers/<int:pk>',views.CustomerDetailView.as_view()),
path('subscriptions/',views.SubscriptionListView.as_view()),
path('subscriptions/<int:pk>',views.SubscriptionDetailView.as_view()),
path("products/", views.ProductList.as_view()),
path("products/<int:pk>", views.ProductDetail.as_view()),
path("taxestype/", views.TaxTypeList.as_view()),
path("taxestype/<int:pk>", views.TaxTypeDetail.as_view()),
path("bill/<int:pk>", views.BillDetail_list),
path('api2/', include(router.urls)),
]
|
the-stack_0_6854 | import os
import re
import subprocess
import sys
from setuptools import Extension, setup, find_packages
from setuptools.command.build_ext import build_ext
# Convert distutils Windows platform specifiers to CMake -A arguments
PLAT_TO_CMAKE = {
'win32': 'Win32',
'win-amd64': 'x64',
'win-arm32': 'ARM',
'win-arm64': 'ARM64',
}
base_path = os.path.abspath(os.path.dirname(__file__))
# A CMakeExtension needs a sourcedir instead of a file list.
# The name must be the _single_ output extension from the CMake build.
# If you need multiple extensions, see scikit-build.
class CMakeExtension(Extension):
def __init__(self, name, sourcedir=''):
Extension.__init__(self, name, sources=[])
self.sourcedir = os.path.abspath(sourcedir)
class CMakeBuild(build_ext):
def build_extension(self, ext):
extdir = os.path.abspath(os.path.dirname(self.get_ext_fullpath(ext.name)))
# required for auto-detection & inclusion of auxiliary 'native' libs
if not extdir.endswith(os.path.sep):
extdir += os.path.sep
debug = int(os.environ.get('DEBUG', 0)) if self.debug is None else self.debug
cfg = 'Debug' if debug else 'Release'
# CMake lets you override the generator - we need to check this.
# Can be set with Conda-Build, for example.
cmake_generator = os.environ.get('CMAKE_GENERATOR', '')
# Set Python_EXECUTABLE instead if you use PYBIND11_FINDPYTHON
# EXAMPLE_VERSION_INFO shows you how to pass a value into the C++ code
# from Python.
cmake_args = [
f'-DCMAKE_LIBRARY_OUTPUT_DIRECTORY={extdir}',
f'-DPYTHON_EXECUTABLE={sys.executable}',
f'-DCMAKE_BUILD_TYPE={cfg}', # not used on MSVC, but no harm
]
manylinux_inside = os.environ.get('MANYLINUX_INSIDE')
if manylinux_inside:
# using gcc 7.5 instead of default (Debian 9) 6.3
cmake_args.extend(
[
'-DCMAKE_C_COMPILER=/usr/local/bin/gcc',
'-DCMAKE_CXX_COMPILER=/usr/local/bin/g++',
]
)
build_args = []
# Adding CMake arguments set as environment variable
# (needed e.g. to build for ARM OSx on conda-forge)
if 'CMAKE_ARGS' in os.environ:
cmake_args += [item for item in os.environ['CMAKE_ARGS'].split(' ') if item]
# In this example, we pass in the version to C++. You might not need to.
# cmake_args += [f'-DEXAMPLE_VERSION_INFO={self.distribution.get_version()}']
if self.compiler.compiler_type != 'msvc':
# Using Ninja-build since it a) is available as a wheel and b)
# multithreads automatically. MSVC would require all variables be
# exported for Ninja to pick it up, which is a little tricky to do.
# Users can override the generator with CMAKE_GENERATOR in CMake
# 3.15+.
if not cmake_generator:
try:
import ninja # noqa: F401
cmake_args += ['-GNinja']
except ImportError:
pass
else:
# Single config generators are handled 'normally'
single_config = any(x in cmake_generator for x in {'NMake', 'Ninja'})
# CMake allows an arch-in-generator style for backward compatibility
contains_arch = any(x in cmake_generator for x in {'ARM', 'Win64'})
# Specify the arch if using MSVC generator, but only if it doesn't
# contain a backward-compatibility arch spec already in the
# generator name.
if not single_config and not contains_arch:
cmake_args += ['-A', PLAT_TO_CMAKE[self.plat_name]]
# Multi-config generators have a different way to specify configs
if not single_config:
cmake_args += [f'-DCMAKE_LIBRARY_OUTPUT_DIRECTORY_{cfg.upper()}={extdir}']
build_args += ['--config', cfg]
if sys.platform.startswith('darwin'):
# Cross-compile support for macOS - respect ARCHFLAGS if set
archs = re.findall(r'-arch (\S+)', os.environ.get('ARCHFLAGS', ''))
if archs:
cmake_args += ['-DCMAKE_OSX_ARCHITECTURES={}'.format(';'.join(archs))]
# Set CMAKE_BUILD_PARALLEL_LEVEL to control the parallel build level
# across all generators.
if 'CMAKE_BUILD_PARALLEL_LEVEL' not in os.environ:
# self.parallel is a Python 3 only way to set parallel jobs by hand
# using -j in the build_ext call, not supported by pip or PyPA-build.
if hasattr(self, 'parallel') and self.parallel:
# CMake 3.12+ only.
build_args += [f'-j{self.parallel}']
if not os.path.exists(self.build_temp):
os.makedirs(self.build_temp)
subprocess.check_call(['cmake', ext.sourcedir] + cmake_args, cwd=self.build_temp)
subprocess.check_call(['cmake', '--build', '.'] + build_args, cwd=self.build_temp)
with open(os.path.join(base_path, 'CMakeLists.txt'), 'r', encoding='utf-8') as f:
regex = re.compile(r'VERSION "([A-Za-z0-9.]+)"$', re.MULTILINE)
version = re.findall(regex, f.read())[0]
if version.count('.') == 3:
major, minor, path_, tweak = version.split('.')
version = f'{major}.{minor}.{path_}.dev{tweak}'
with open(os.path.join(base_path, 'README.md'), 'r', encoding='utf-8') as f:
readme = f.read()
setup(
name='wrtc', # webrtc for some reasons isn't allowed but looks like free...
version=version,
author='Il`ya Semyonov',
author_email='[email protected]',
license='BSD 3-Clause',
url='https://github.com/MarshalX/python-webrtc',
description='a Python extension that provides bindings to WebRTC M92',
long_description=readme,
long_description_content_type='text/markdown',
classifiers=[
'Development Status :: 1 - Planning',
'Natural Language :: English',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Operating System :: MacOS',
'Operating System :: Unix',
'Topic :: Internet',
'Topic :: Multimedia',
'Topic :: Multimedia :: Video',
'Topic :: Multimedia :: Video :: Capture',
'Topic :: Multimedia :: Sound/Audio',
'Topic :: Multimedia :: Sound/Audio :: Capture/Recording',
'Topic :: Communications',
'Topic :: Communications :: Internet Phone',
'Topic :: Communications :: Telephony',
"Topic :: Software Development :: Libraries",
"Topic :: Software Development :: Libraries :: Python Modules",
'Programming Language :: C++',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
"Programming Language :: Python :: Implementation",
"Programming Language :: Python :: Implementation :: CPython",
],
python_requires='~=3.7',
package_dir={'': 'python-webrtc/python'},
packages=find_packages(where='python-webrtc/python'),
ext_modules=[CMakeExtension('wrtc')],
# TODO add stub
cmdclass={'build_ext': CMakeBuild},
zip_safe=False,
project_urls={
'Author': 'https://github.com/MarshalX',
'Tracker': 'https://github.com/MarshalX/python-webrtc/issues',
'Source': 'https://github.com/MarshalX/python-webrtc',
},
)
|
the-stack_0_6855 | #
# Cormorant training script for the residue deletion dataset
#
import logging
import torch
from cormorant.data.collate import collate_activity
from cormorant.data.utils import initialize_datasets
from cormorant.engine import Engine
from cormorant.engine import init_argparse, init_file_paths, init_logger, init_cuda
from cormorant.engine import init_optimizer, init_scheduler
from cormorant.models import CormorantLEP
from cormorant.models.autotest import cormorant_tests
from torch.utils.data import DataLoader
# This makes printing tensors more readable.
torch.set_printoptions(linewidth=1000, threshold=100000)
logger = logging.getLogger('')
def main():
# Initialize arguments -- Just
args = init_argparse('lep')
# Initialize file paths
args = init_file_paths(args)
# Initialize logger
init_logger(args)
# Initialize dataloader
args, datasets, num_species, charge_scale = initialize_datasets(args, args.datadir, 'lep',
force_download=args.force_download,
ignore_check=args.ignore_check
)
# Construct PyTorch dataloaders from datasets
dataloaders = {split: DataLoader(dataset,
batch_size=args.batch_size,
shuffle=args.shuffle if (split == 'train') else False,
num_workers=args.num_workers,
collate_fn=collate_activity)
for split, dataset in datasets.items()}
# Initialize device and data type
device, dtype = init_cuda(args)
# Initialize model
model = CormorantLEP(args.maxl, args.max_sh, args.num_cg_levels, args.num_channels, num_species,
args.cutoff_type, args.hard_cut_rad, args.soft_cut_rad, args.soft_cut_width,
args.weight_init, args.level_gain, args.charge_power, args.basis_set,
charge_scale, args.gaussian_mask,
num_classes = args.num_classes,
cgprod_bounded = args.cgprod_bounded,
cg_agg_normalization = args.cg_agg_normalization,
cg_pow_normalization = args.cg_pow_normalization,
device = device, dtype = dtype)
# Initialize the scheduler and optimizer
optimizer = init_optimizer(args, model)
scheduler, restart_epochs = init_scheduler(args, optimizer)
# Define cross-entropy as the loss function.
loss_fn = torch.nn.functional.cross_entropy
# Apply the covariance and permutation invariance tests
print('Files:',dataloaders['train'])
cormorant_tests(model, dataloaders['train'], args, charge_scale=charge_scale, siamese=True)
# Instantiate the training class
trainer = Engine(args, dataloaders, model, loss_fn, optimizer, scheduler, restart_epochs, device, dtype, task='classification', clip_value=None)
print('Initialized a',trainer.task,'trainer.')
# Load from checkpoint file. If no checkpoint file exists, automatically does nothing.
trainer.load_checkpoint()
# Train model.
trainer.train()
# Test predictions on best model and also last checkpointed model.
trainer.evaluate()
if __name__ == '__main__':
main()
|
the-stack_0_6857 | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="PacaPy-raul-guajardo",
version="0.0.1",
author="Raul Guajardo",
author_email="[email protected]",
description="A package designed as a wrapper over Alpaca API for my general use.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/raulguajardo/PacaPy",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.7',
) |
the-stack_0_6858 | # coding=utf-8
import torch
import torch.utils.data as data
import torchvision.transforms as transforms
from PIL import Image
from PIL import ImageDraw
import os.path as osp
import numpy as np
import json
class CPDataset(data.Dataset):
"""Dataset for CP-VTON+.
"""
def __init__(self, opt):
super(CPDataset, self).__init__()
# base setting
self.opt = opt
self.root = opt.dataroot
self.datamode = opt.datamode # train or test or self-defined
self.stage = opt.stage # GMM or TOM
self.data_list = opt.data_list
self.fine_height = opt.fine_height
self.fine_width = opt.fine_width
self.radius = opt.radius
self.data_path = osp.join(opt.dataroot, opt.datamode)
self.transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
self.transformmask = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5,), (0.5,))])
# load data list
im_names = []
c_names = []
with open(osp.join(opt.dataroot, opt.data_list), 'r') as f:
for line in f.readlines():
im_name, c_name = line.strip().split()
im_names.append(im_name)
c_names.append(c_name)
self.im_names = im_names
self.c_names = c_names
def name(self):
return "CPDataset"
def __getitem__(self, index):
c_name = self.c_names[index]
im_name = self.im_names[index]
if self.stage == 'GMM':
c = Image.open(osp.join(self.data_path, 'cloth', c_name))
cm = Image.open(osp.join(self.data_path, 'cloth-mask', c_name)).convert('L')
else:
c = Image.open(osp.join(self.data_path, 'warp-cloth', im_name)) # c_name, if that is used when saved
cm = Image.open(osp.join(self.data_path, 'warp-mask', im_name)).convert('L') # c_name, if that is used when saved
c = self.transform(c) # [-1,1]
cm_array = np.array(cm)
cm_array = (cm_array >= 128).astype(np.float32)
cm = torch.from_numpy(cm_array) # [0,1]
cm.unsqueeze_(0)
# person image
im = Image.open(osp.join(self.data_path, 'image', im_name))
im = self.transform(im) # [-1,1]
"""
LIP labels
[(0, 0, 0), # 0=Background
(128, 0, 0), # 1=Hat
(255, 0, 0), # 2=Hair
(0, 85, 0), # 3=Glove
(170, 0, 51), # 4=SunGlasses
(255, 85, 0), # 5=UpperClothes
(0, 0, 85), # 6=Dress
(0, 119, 221), # 7=Coat
(85, 85, 0), # 8=Socks
(0, 85, 85), # 9=Pants
(85, 51, 0), # 10=Jumpsuits
(52, 86, 128), # 11=Scarf
(0, 128, 0), # 12=Skirt
(0, 0, 255), # 13=Face
(51, 170, 221), # 14=LeftArm
(0, 255, 255), # 15=RightArm
(85, 255, 170), # 16=LeftLeg
(170, 255, 85), # 17=RightLeg
(255, 255, 0), # 18=LeftShoe
(255, 170, 0) # 19=RightShoe
(170, 170, 50) # 20=Skin/Neck/Chest (Newly added after running dataset_neck_skin_correction.py)
]
"""
# load parsing image
parse_name = im_name.replace('.jpg', '.png')
im_parse = Image.open(
# osp.join(self.data_path, 'image-parse', parse_name)).convert('L')
osp.join(self.data_path, 'image-parse-new', parse_name)).convert('L') # updated new segmentation
parse_array = np.array(im_parse)
im_mask = Image.open(
osp.join(self.data_path, 'image-mask', parse_name)).convert('L')
mask_array = np.array(im_mask)
# parse_shape = (parse_array > 0).astype(np.float32) # CP-VTON body shape
# Get shape from body mask (CP-VTON+)
## parse_shape = (mask_array > 0).astype(np.float32) can only detect white background
parse_shape = (parse_array > 0).astype(np.float32)
if self.stage == 'GMM':
parse_head = (parse_array == 1).astype(np.float32) + \
(parse_array == 4).astype(np.float32) + \
(parse_array == 13).astype(
np.float32) # CP-VTON+ GMM input (reserved regions)
else:
parse_head = (parse_array == 1).astype(np.float32) + \
(parse_array == 2).astype(np.float32) + \
(parse_array == 4).astype(np.float32) + \
(parse_array == 9).astype(np.float32) + \
(parse_array == 12).astype(np.float32) + \
(parse_array == 13).astype(np.float32) + \
(parse_array == 16).astype(np.float32) + \
(parse_array == 17).astype(
np.float32) # CP-VTON+ TOM input (reserved regions)
parse_cloth = (parse_array == 5).astype(np.float32) + \
(parse_array == 6).astype(np.float32) + \
(parse_array == 7).astype(np.float32) # upper-clothes labels
# shape downsample
parse_shape_ori = Image.fromarray((parse_shape*255).astype(np.uint8))
parse_shape = parse_shape_ori.resize(
(self.fine_width//16, self.fine_height//16), Image.BILINEAR)
parse_shape = parse_shape.resize(
(self.fine_width, self.fine_height), Image.BILINEAR)
parse_shape_ori = parse_shape_ori.resize(
(self.fine_width, self.fine_height), Image.BILINEAR)
shape_ori = self.transformmask(parse_shape_ori) # [-1,1]
shape = self.transformmask(parse_shape)# [-1,1]
phead = torch.from_numpy(parse_head) # [0,1]
# phand = torch.from_numpy(parse_hand) # [0,1]
pcm = torch.from_numpy(parse_cloth) # [0,1]
# upper cloth
im_c = im * pcm + (1 - pcm) # [-1,1], fill 1 for other parts
im_h = im * phead - (1 - phead) # [-1,1], fill 0 for other parts
# load pose points
pose_name = im_name.replace('.jpg', '_keypoints.json')
with open(osp.join(self.data_path, 'pose', pose_name), 'r') as f:
pose_label = json.load(f)
pose_data = pose_label[0]["keypoints"]
pose_data = np.array(pose_data)
pose_data = pose_data.reshape((-1, 3))
point_num = pose_data.shape[0]
pose_map = torch.zeros(point_num, self.fine_height, self.fine_width)
r = self.radius
im_pose = Image.new('L', (self.fine_width, self.fine_height))
pose_draw = ImageDraw.Draw(im_pose)
for i in range(point_num):
one_map = Image.new('L', (self.fine_width, self.fine_height))
draw = ImageDraw.Draw(one_map)
pointx = pose_data[i, 0]
pointy = pose_data[i, 1]
if pointx > 1 and pointy > 1:
draw.rectangle((pointx-r, pointy-r, pointx +
r, pointy+r), 'white', 'white')
pose_draw.rectangle(
(pointx-r, pointy-r, pointx+r, pointy+r), 'white', 'white')
one_map = self.transformmask(one_map)
pose_map[i] = one_map[0]
# just for visualization
im_pose = self.transformmask(im_pose)
# cloth-agnostic representation
agnostic = torch.cat([shape, im_h, pose_map], 0)
if self.stage == 'GMM':
im_g = Image.open('grid.png')
im_g = self.transform(im_g)
else:
im_g = ''
pcm.unsqueeze_(0) # CP-VTON+
result = {
'c_name': c_name, # for visualization
'im_name': im_name, # for visualization or ground truth
'cloth': c, # for input
'cloth_mask': cm, # for input
'image': im, # for visualization
'agnostic': agnostic, # for input
'parse_cloth': im_c, # for ground truth
'shape': shape, # for visualization
'head': im_h, # for visualization
'pose_image': im_pose, # for visualization
'grid_image': im_g, # for visualization
'parse_cloth_mask': pcm, # for CP-VTON+, TOM input
'shape_ori': shape_ori, # original body shape without resize
}
return result
def __len__(self):
return len(self.im_names)
class CPDataLoader(object):
def __init__(self, opt, dataset):
super(CPDataLoader, self).__init__()
if opt.shuffle:
train_sampler = torch.utils.data.sampler.RandomSampler(dataset)
else:
train_sampler = None
self.data_loader = torch.utils.data.DataLoader(
dataset, batch_size=opt.batch_size, shuffle=(
train_sampler is None),
num_workers=opt.workers, pin_memory=True, sampler=train_sampler)
self.dataset = dataset
self.data_iter = self.data_loader.__iter__()
def next_batch(self):
try:
batch = self.data_iter.__next__()
except StopIteration:
self.data_iter = self.data_loader.__iter__()
batch = self.data_iter.__next__()
return batch
if __name__ == "__main__":
print("Check the dataset for geometric matching module!")
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--dataroot", default="data")
parser.add_argument("--datamode", default="train")
parser.add_argument("--stage", default="GMM")
parser.add_argument("--data_list", default="train_pairs.txt")
parser.add_argument("--fine_width", type=int, default=192)
parser.add_argument("--fine_height", type=int, default=256)
parser.add_argument("--radius", type=int, default=3)
parser.add_argument("--shuffle", action='store_true',
help='shuffle input data')
parser.add_argument('-b', '--batch-size', type=int, default=4)
parser.add_argument('-j', '--workers', type=int, default=1)
opt = parser.parse_args()
dataset = CPDataset(opt)
data_loader = CPDataLoader(opt, dataset)
print('Size of the dataset: %05d, dataloader: %04d'
% (len(dataset), len(data_loader.data_loader)))
first_item = dataset.__getitem__(0)
first_batch = data_loader.next_batch()
from IPython import embed
embed() |
the-stack_0_6859 | import sys
from time import time
import pandas as pd
from pandas import DataFrame
import numpy as np
import matplotlib.pyplot as plt
import itertools
import matplotlib as mpl
from scipy import linalg
from sklearn import metrics
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
from sklearn.preprocessing import scale
from sklearn import mixture
np.random.seed(42)
###Get command line arguments
clusterType = sys.argv[1] #Clustering algorithm
fileID = sys.argv[2]; #fileID
set = sys.argv[3]; #Set
numSpeakers = sys.argv[4]; #Number of Speakers
blockLength = sys.argv[5]; #Block length
hopLength = sys.argv[6]; #Hop length
thresholdOrder = sys.argv[7] #Adaptive Threshold order
extraid = int(sys.argv[8]); #extraid
gmm_co_var_type = sys.argv[9]; #'full' or 'tied'
estimated_labels = [];
###Prepare output file path
outputRoot = "/Users/avrosh/Documents/Coursework/7100_Spring_16/Dataset/dataset/"+set+"/"+"set"+set+"_S"+numSpeakers+"_"+hopLength+"_"+blockLength+"_"+fileID+"_"+thresholdOrder
if extraid != 0:
outputRoot = outputRoot + "_" + str(extraid)
outputRoot = outputRoot + "_" + clusterType + ".csv"
# print outputRoot
txtResultFile = open(outputRoot, "w")
###Prepare input file path
path = "/Users/avrosh/Documents/Coursework/7100_Spring_16/Dataset/dataset/"+set+"/features/set"+set+"_"+hopLength+"_"+blockLength+"_S"+numSpeakers+"_"+fileID+"_"+thresholdOrder
if extraid != 0:
path = path + "_" + str(extraid)
path = path + ".csv"
#print path
f = open(path)
f.readline()
###Read data
data = np.loadtxt(fname = f, delimiter=',')
all_labels = data[:,0]
labels = all_labels[all_labels != 0]
#labels = data[:,0]
#print labels
#normalize data
features = data[data[:,0] != 0]
features = scale(features[:,1:])
unscaled_features = features[:,1:]
#features = data[:,1:]
#print features
n_samples, n_features = features.shape
n_speakers = numSpeakers
#speaker_ids = np.unique(labels)
#print speaker_ids
print ("n_speakers %d \nn_samples %d \nn_features %d" % (int(n_speakers),int(n_samples),int(n_features)))
sample_size = 300
print(79 * '_')
###Method
def visualize_gmm(data,gmm):
##Visualize data
reduced_data = PCA(n_components=2).fit_transform(data)
gmm.fit(reduced_data)
color_iter = itertools.cycle(['r', 'g', 'b', 'c', 'm','k'])
global estimated_labels
# print estimated_labels
estimated_speaker_ids = np.unique(estimated_labels)
for speaker in estimated_speaker_ids:
speaker_labels = np.argwhere(labels==speaker)
plt.scatter(reduced_data[speaker_labels,0],
reduced_data[speaker_labels,1],
color=next(color_iter))
for i, (clf, title) in enumerate([(gmm, 'Clustered using GMM (showing PCA reduced plot)')]):
splot = plt.subplot(1, 1, 1 + i)
Y_ = clf.predict(reduced_data)
for i, (mean, covar, color) in enumerate(zip(clf.means_, clf._get_covars(), color_iter)):
v, w = linalg.eigh(covar)
u = w[0] / linalg.norm(w[0])
# as the DP will not use every component it has access to
# unless it needs it, we shouldn't plot the redundant
# components.
# if not np.any(Y_ == i):
# continue
# plt.scatter(X[Y_ == i, 0], X[Y_ == i, 1], .8, color=color)
# print X[Y_ == i, 0]
# Plot an ellipse to show the Gaussian component
angle = np.arctan(u[1] / u[0])
angle = 180 * angle / np.pi # convert to degrees
ell = mpl.patches.Ellipse(mean, v[0], v[1], 180 + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(0.5)
splot.add_artist(ell)
plt.xlim(-10, 10)
plt.ylim(-6, 6)
plt.xticks(())
plt.yticks(())
plt.title(title)
plt.legend(["Ellipses are clusters, dots are short blocks of audio"])
plt.show()
###Method
def visualize_kmeans(data):
########################################################################
#Visualize data
reduced_data = PCA(n_components=2).fit_transform(data)
kmeans = KMeans(init='k-means++',n_clusters=n_speakers,n_init=10)
kmeans.fit(reduced_data)
#step size of mesh
h = .02
#Plot the decision boundary
x_min, x_max = reduced_data[:,0].min() - 1, reduced_data[:,0].max() + 1
y_min, y_max = reduced_data[:,1].min() - 1, reduced_data[:,1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
#Obtain labels for each point in mesh
Z = kmeans.predict(np.c_[xx.ravel(), yy.ravel()])
#Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure(1)
plt.clf()
plt.imshow(Z, interpolation='nearest',
extent=(xx.min(), xx.max(), yy.min(), yy.max()),
cmap=plt.cm.Paired,
aspect='auto', origin='lower')
#Colour Cycler
colorcycler = itertools.cycle(['r', 'g', 'b', 'y','c','k','w','m'])
estimated_speaker_ids = np.unique(Z)
for speaker in estimated_speaker_ids:
speaker_labels = np.argwhere(labels==speaker)
# for every_speaker in speaker_labels:
# j = j + 1
# txtResultFile.write("{0},{1}".format(np.int_(speaker),np.int_(every_speaker)))
# if i==len(speaker_ids):
# if j<len(speaker_labels):
# txtResultFile.write(",")
# else:
# txtResultFile.write(",")
plt.scatter(reduced_data[speaker_labels,0],
reduced_data[speaker_labels,1],
color=next(colorcycler))
#plt.plot(reduced_data[:,0], reduced_data[:,1], 'k.',markersize=2)
#plt.plot(reduced_data[:,0],reduced_data[:,1],'g^', reduced_data[:,0])
#plot the centroids as white X
centroids = kmeans.cluster_centers_
plt.scatter(centroids[:,0],centroids[:,1],
marker='x', s=169, linewidths=3,
color='w', zorder=10)
plt.title('K-means clustering on the speakers (PCA-reduced data)')
plt.xlim(x_min,x_max)
plt.ylim(y_min,y_max)
plt.xticks(())
plt.yticks(())
plt.show()
###Method
def cluster(estimator, name, data):
t0 = time()
estimator.fit(data)
global estimated_labels
estimated_labels = estimator.predict(data)
# print estimated_labels
# homogeneity_score = metrics.homogeneity_score(labels,estimated_labels)
# completeness_score = metrics.completeness_score(labels, estimated_labels)
# v_measure_score = metrics.v_measure_score(labels, estimated_labels)
# adjusted_rand_score = metrics.adjusted_rand_score(labels, estimated_labels)
# adjusted_mutual_info_score = metrics.adjusted_mutual_info_score(labels, estimated_labels)
## silhouette_score = metrics.silhouette_score(features, estimated_labels,
## metric='euclidean',
## sample_size=sample_size)
i=0
j=0
for label in all_labels:
i = i + 1;
# txtResultFile.write("{0}".format(label))
# txtResultFile.write(",")
if label == 0:
txtResultFile.write("{0}".format(-1))
else:
txtResultFile.write("{0}".format(estimated_labels[j]))
j = j + 1
if i<len(all_labels):
txtResultFile.write("\n")
# print('Name: % 9s \n'
# 'Time: %.2fs \n'
# 'Homogeneity Score: %.3f \n'
# 'Completeness Score: %.3f \n'
# 'V Measure score: %.3f \n'
# 'Adjusted rand score: %.3f \n'
# 'Adjusted Mutual Info score: %.3f \n'
# % (name, (time()-t0),
# homogeneity_score,
# completeness_score,
# v_measure_score,
# adjusted_rand_score,
# adjusted_mutual_info_score))
print(79 * '_')
#KMeans
if (clusterType == "kmeans"):
cluster(KMeans(init='k-means++', n_clusters=n_speakers, n_init=10),
name='k-means++',
data=features)
visualize_kmeans(features)
##KMeans with random initialization
if (clusterType == "kmeans-rand"):
cluster(KMeans(init='random', n_clusters=n_speakers, n_init=10),
name='Random',
data=features)
visualize_kmeans(features)
#
##KMeans PCA
#in this case the seeding of the centers in deterministic, hence we run the algorithm only once
if (clusterType == "kmeans-pca"):
pca = PCA(n_components=n_speakers).fit(features)
cluster(KMeans(init=pca.components_, n_clusters=n_speakers, n_init=1),
name='PCA-based',
data=features)
visualize_kmeans(features)
##GMM
# Fit a mixture of Gaussians with EM using five components
if (clusterType == "gmm"):
gmm = mixture.GMM(n_components=int(n_speakers), covariance_type=gmm_co_var_type)
cluster(gmm,
name='gmm',
data=features)
visualize_gmm(features,gmm)
##GMM-PCA
# Fit a mixture of Gaussians with EM using five components
if (clusterType == "gmm-pca"):
reduced_data = PCA(n_components=10).fit_transform(unscaled_features)
reduced_data = scale(reduced_data)
gmm = mixture.GMM(n_components=n_speakers, covariance_type=gmm_co_var_type)
cluster(gmm,
name='gmm-pca',
data=reduced_data)
visualize_gmm(reduced_data,gmm)
###Close output file
txtResultFile.close()
sys.exit()
|
the-stack_0_6866 | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch_geometric.nn import GCNConv
class MPNNLSTM(nn.Module):
r"""An implementation of the Message Passing Neural Network with Long Short Term Memory.
For details see this paper: `"Transfer Graph Neural Networks for Pandemic Forecasting." <https://arxiv.org/abs/2009.08388>`_
Args:
in_channels (int): Number of input features.
hidden_size (int): Dimension of hidden representations.
out_channels (int): Number of output features.
num_nodes (int): Number of nodes in the network.
window (int): Number of past samples included in the input.
dropout (float): Dropout rate.
"""
def __init__(self, in_channels: int, hidden_size: int ,
out_channels: int, num_nodes: int, window: int, dropout: float):
super(MPNNLSTM, self).__init__()
self.window = window
self.num_nodes = num_nodes
self.hidden_size = hidden_size
self.dropout = dropout
self.in_channels = in_channels
self.out_channels = out_channels
self._create_parameters_and_layers()
def _create_parameters_and_layers(self):
self._convolution_1 = GCNConv(self.in_channels, self.hidden_size)
self._convolution_2 = GCNConv(self.hidden_size, self.hidden_size)
self._batch_norm_1 = nn.BatchNorm1d(self.hidden_size)
self._batch_norm_2 = nn.BatchNorm1d(self.hidden_size)
self._recurrent_1 = nn.LSTM(2*self.hidden_size, self.hidden_size, 1)
self._recurrent_2 = nn.LSTM(self.hidden_size, self.hidden_size, 1)
def _graph_convolution_1(self, X, edge_index, edge_weight):
X = F.relu(self._convolution_1(X, edge_index, edge_weight))
X = self._batch_norm_1(X)
X = F.dropout(X, p=self.dropout, training=self.training)
return X
def _graph_convolution_2(self, X, edge_index, edge_weight):
X = F.relu(self._convolution_2(X, edge_index, edge_weight))
X = self._batch_norm_2(X)
X = F.dropout(X, p=self.dropout, training=self.training)
return X
def forward(self, X: torch.FloatTensor, edge_index: torch.LongTensor,
edge_weight: torch.FloatTensor) -> torch.FloatTensor:
"""
Making a forward pass through the whole architecture.
Arg types:
* **X** *(PyTorch FloatTensor)* - Node features.
* **edge_index** *(PyTorch LongTensor)* - Graph edge indices.
* **edge_weight** *(PyTorch LongTensor, optional)* - Edge weight vector.
Return types:
* **H** *(PyTorch FloatTensor)* - The hidden representation of size 2*nhid+in_channels+window-1 for each node.
"""
R = list()
S = X.view(-1, self.window, self.num_nodes, self.in_channels)
S = torch.transpose(S, 1, 2)
S = S.reshape(-1, self.window, self.in_channels)
O = [S[:,0,:]]
for l in range(1, self.window):
O.append(S[:, l, self.in_channels-1].unsqueeze(1))
S = torch.cat(O, dim=1)
X = self._graph_convolution_1(X, edge_index, edge_weight)
R.append(X)
X = self._graph_convolution_2(X, edge_index, edge_weight)
R.append(X)
X = torch.cat(R, dim=1)
X = X.view(-1, self.window, self.num_nodes, X.size(1))
X = torch.transpose(X, 0, 1)
X = X.contiguous().view(self.window, -1, X.size(3))
X, (H_1, C_1) = self._recurrent_1(X)
X, (H_2, C_2) = self._recurrent_2(X)
H = torch.cat([H_1[0, :, :], H_2[0, :, :], S], dim=1)
return H
|
the-stack_0_6867 | import os
import unittest
from digiroad.connection.PostgisServiceProvider import PostgisServiceProvider
from digiroad.entities import Point
from digiroad.logic.Operations import Operations
from digiroad.util import CostAttributes, FileActions
class PostgisServiceProviderTest(unittest.TestCase):
def setUp(self):
self.postgisServiceProvider = PostgisServiceProvider()
self.fileActions = FileActions()
self.operations = Operations(self.fileActions)
self.dir = os.getcwd()
def test_createATemporaryTable(self):
tableName = "temporalTable"
columns = {
"uuid": "uuid",
"ykr_from_id": "INTEGER",
"ykr_to_id": "INTEGER",
"travel_time": "DOUBLE PRECISION",
"travel_time_difference": "DOUBLE PRECISION",
"geometry": "GEOMETRY",
}
try:
connection = self.postgisServiceProvider.getConnection()
self.postgisServiceProvider.createTemporaryTable(
con=connection,
tableName=tableName,
columns=columns
)
finally:
connection.close()
def test_getUUIDCode(self):
uuid = self.postgisServiceProvider.getUUID(con=self.postgisServiceProvider.getConnection())
print(uuid)
self.assertIsNotNone(uuid)
def test_bucle(self):
arrayList = [0, 1, 2, 3, 4, 5, 6, 7, 8]
expected = [[0, 3], [4, 7], [8, 8]]
jump = 4
self.assertEqual(expected, self.getModules(arrayList, jump))
arrayList = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
expected = [[0, 3], [4, 7], [8, 9]]
self.assertEqual(expected, self.getModules(arrayList, jump))
arrayList = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
expected = [[0, 3], [4, 7], [8, 10]]
self.assertEqual(expected, self.getModules(arrayList, jump))
arrayList = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
expected = [[0, 3], [4, 7], [8, 11]]
self.assertEqual(expected, self.getModules(arrayList, jump))
arrayList = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
expected = [[0, 3], [4, 7], [8, 11], [12, 12]]
self.assertEqual(expected, self.getModules(arrayList, jump))
arrayList = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
expected = [[0, 2], [3, 5], [6, 8], [9, 11], [12, 12]]
jump = 3
self.assertEqual(expected, self.getModules(arrayList, jump))
def getModules(self, arrayList, jump):
counter = 0
intervals = []
while counter < len(arrayList):
if counter + jump > len(arrayList):
jump = len(arrayList) % jump
intervals.append([counter, counter + jump - 1])
counter = counter + jump
print(intervals)
return intervals
|
the-stack_0_6869 | # -*- coding: utf-8 -*-
from bs4 import BeautifulSoup
from base64 import b64decode
from glob import glob
import imghdr
import os
import requests
import warnings
def acquire_note(directory, div):
if div.find('div').text is not None:
with open(os.path.join(directory, f'curation_notes.txt'), 'w', encoding='utf-8') as out:
out.write(div.find('div').text.strip())
else:
warnings.warn(f'Curation note could not be parsed for {model_id}', UserWarning)
def acquire_image(directory, div):
if div.find('img')['src'] is not None:
for img_file in div.find_all('img'):
img_format = img_file['src'].partition('/')[2].partition(';')[0]
img_data = b64decode(img_file['src'].partition(',')[2])
img_filename = os.path.join(directory, f'curation_image.{img_format}')
with open(img_filename, 'wb') as out:
out.write(img_data)
correct_img_format = imghdr.what(img_filename)
if correct_img_format != img_format:
os.rename(img_filename, os.path.join(directory, f'curation_image.{correct_img_format}'))
else:
warnings.warn(f'Curation image could not be found for {model_id}', UserWarning)
for model_path in glob(os.path.join('manual-fixes', '*')):
# acquire the raw HTML
model_id = os.path.basename(model_path)
url = f'https://www.ebi.ac.uk/biomodels/{model_id}#Curation'
response = requests.get(url, headers={'accept': 'application/html'})
soup = BeautifulSoup(response.text, 'lxml')
# acquire and export the curation notes
curation_divs = soup.find_all(class_='small-12 medium-6 large-6 columns')
for div in curation_divs:
if div.find('strong'):
acquire_note(model_path, div)
if div.find('img'):
acquire_image(model_path, div)
|
the-stack_0_6871 | import os
import sys
import time
import shlex
import signal
import logging
import traceback
import subprocess as sp
import multiprocessing as mp
import path
import daemon
import packaging
logger = logging.getLogger(__name__)
def status(module_name):
"""
Return the status of the module *module_name*
A module is considered as running if its pid file exists.
Return true if the module is running else false.
"""
pid_file = path.pid_file(module_name)
return os.path.exists(pid_file)
def status_all():
"""
Return the status of all the installed modules. See the above function
*status* for more details.
Return a dictionary from name to status (as boolean).
"""
modules = packaging.get_installed_modules()
return {name: status(name) for name in modules}
def execm(module_name, daemonize=True):
"""
Start a new module identified by its name *module_name*. The current
processus is killed at the end of the module when it's not a daemon. If it
is, the current processus is killed immediately. Use *invoke*
instead if you want to create a new killable process.
"""
child_proc = None
def signal_handler(signum, frame):
"""
Signal handler. If no child was created, it does nothing.
Else, it broadcasts the signal to the child.
"""
logger.info('Received signal %s, broadcasting it to child' % signum)
if child_proc is not None:
child_proc.send_signal(signum)
child_proc.wait()
# Check that only one instance is running at the same time
pid_file = path.pid_file(module_name)
if os.path.exists(pid_file):
raise RuntimeError('A pid file already exists for this module')
sys.exit(1)
# Get the start command from the configuration file
module_config = packaging.get_config(module_name)
if not 'start' in module_config:
raise RuntimeError(
'Missing "start" entry in the module\'s configuration file')
sys.exit(1)
start_cmd = module_config['start']
# Add our bin directory to the PATH variable
os.environ['PATH'] = path.bin_directory() + ':' + os.environ['PATH']
# Daemon or not Daemon ?
if daemonize:
# Create a daemon
daemon.possess_me()
# Redirect stdout and stderr into a log file
sys.stdout = open(path.log_file(module_name), 'a')
sys.stderr = sys.stdout
# Change the directory to the module directory
os.chdir(path.module_directory(module_name))
# Prepare to receive signal SIGINT and SIGTERM
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
return_code = 0
try:
# Write the new daemon pid in a new file
with open(pid_file, 'w') as f:
f.write(str(os.getpid()))
f.flush()
except (OSError, IOError) as e:
return_code = 1
else:
# Execute the start command
logger.info('Starting the module `%s`', module_name)
try:
child_proc = sp.Popen(shlex.split(start_cmd))
except OSError as e:
logger.exception(e)
return_code = 1
else:
return_code = child_proc.wait()
finally:
# Remove the pid file and return the corresponding code
logger.info('Shutting down the module `%s`', module_name)
os.remove(pid_file)
sys.exit(return_code)
sys.exit(0)
def invoke(module_name, daemonize=True):
"""
As exec_module, execute a module but fork before to keep the current
process active. To see if the module is really running, use the
*status* function.
"""
if status(module_name):
raise RuntimeError('Module `%s` is already running' % module_name)
proc = mp.Process(target=execm, args=(module_name, daemonize))
proc.start()
proc.join()
def invoke_all():
"""
Invoke all installed modules as daemon. Doesn't check if the modules are
correctly launch. Return the list of pid of the new processes.
"""
modules = packaging.get_installed_modules()
for name in modules:
try:
invoke(name, True)
time.sleep(0.1)
except RuntimeError as e:
logger.exception(e)
def stop(module_name):
"""
Stop the *module_name* module.
"""
if not status(module_name):
raise RuntimeError('Module `%s` is not running' % module_name)
remove_file = False
pid = 0
pid_file = path.pid_file(module_name)
with open(pid_file, 'r') as f:
try:
pid = int(f.readline())
except ValueError:
remove_file = True
if pid != 0:
try:
os.kill(pid, signal.SIGTERM)
except OSError as e:
if e.errno == 3: # No such process
remove_file = True
else:
raise e
if remove_file:
os.remove(pid_file)
def stop_all():
"""
Stop all the running modules
"""
modules = packaging.get_installed_modules()
for name in modules:
try:
stop(name)
except RuntimeError:
pass # Ignore if we try to stop a stopped module
|
the-stack_0_6872 | #!/usr/bin/env python3
import argparse
import json
from pathlib import Path
from typing import NamedTuple
import subprocess as sp
from dataset_utils import rm_imgs_without_labels
LABEL_MAP = {
"car": 0,
"bus": 1,
"person": 2,
"bike": 3,
"truck": 4,
"motor": 5,
"train": 6,
"rider": 7,
"traffic sign": 8,
"traffic light": 9,
}
IMG_WIDTH = 1280
IMG_HEIGHT = 720
class Arguments(NamedTuple):
data_path: Path
train_labels: Path
val_labels: Path
output_dir: Path
def parse_args() -> argparse.Namespace:
ap = argparse.ArgumentParser(
description="""
Create a dataset of images and labels, along with a corresponding
bdd100k.data file, a train.txt, and a validation.txt that can be inputed
into darknet to train a YOLO model on the BDD100k dataset.
WARNING: This will copy the images in the dataset to a different directory.
I am OK with this as storage is cheap on my PC, but modify this if you don't
like it.
"""
)
ap.add_argument(
"--data-path",
help="Path to BDD dataset root (e.g. bdd100k/images/100k). Should contain the directories `train`, `test`, and `val` with .jpg images",
)
ap.add_argument(
"--train-labels",
help="Path to BDD100k training labels JSON file (e.g. bdd100k_labels_images_train.json)",
)
ap.add_argument(
"--val-labels",
help="Path to BDD100k validation labels JSON file (e.g. bdd100k_labels_images_val.json)",
)
ap.add_argument(
"--output-dir",
help="Path to output the YOLO compatible dataset and other darknet helper files",
)
return ap.parse_args()
def validate_args(args: argparse.Namespace) -> Arguments:
data_path = Path(args.data_path).absolute().resolve()
assert data_path.is_dir(), "Given data path is not a directory"
assert (
data_path / "train"
).is_dir(), "Given data path doesn't contain a subdirectory `train`"
assert (
data_path / "val"
).is_dir(), "Given data path doesn't contain a subdirectory `val`"
assert (
data_path / "test"
).is_dir(), "Given data path doesn't contain a subdirectory `test`"
train_labels = Path(args.train_labels).absolute().resolve()
assert (
train_labels.is_file()
), "Given training labels path is either not a file or doesn't exist"
val_labels = Path(args.val_labels).absolute().resolve()
assert (
val_labels.is_file()
), "Given validation labels path is either not a file or doesn't exist"
output_dir = Path(args.output_dir).absolute().resolve()
if output_dir.is_dir():
import sys
print(
"[WARNING] Output directory already exists, contents may be overwritten",
file=sys.stderr,
)
output_dir.mkdir(parents=True, exist_ok=True)
return Arguments(
data_path=data_path,
train_labels=train_labels,
val_labels=val_labels,
output_dir=output_dir,
)
def box2d_to_yolo(box2d):
x1 = box2d["x1"] / IMG_WIDTH
x2 = box2d["x2"] / IMG_WIDTH
y1 = box2d["y1"] / IMG_HEIGHT
y2 = box2d["y2"] / IMG_HEIGHT
cx = (x1 + x2) / 2
cy = (y1 + y2) / 2
width = abs(x2 - x1)
height = abs(y2 - y1)
return cx, cy, width, height
def label2txt(labels_json: Path, output_dir: Path):
"""
This function converts the labels into a .txt file with the same name as the image.
It extracts the bounding box, class info from the .json file and converts it into
the darknet format.
The darknet format is
<object id> <x> <y> <width> <height>
"""
assert labels_json.is_file(), "Labels JSON file doesn't exist"
assert output_dir.is_dir(), "Output directory doesn't exist"
frames = json.load(open(labels_json, "r"))
for frame in frames:
img_name = Path(frame["name"])
assert img_name.suffix == ".jpg"
frame_file = output_dir / (img_name.with_suffix(".txt"))
# Creates, opens, and adds to a txt file with the name of each image.jpg
with open(frame_file, "w+") as f:
# For each sub label of each image, get the box2d variable
# Get the relative center point compared to the image size 1280/720
for label in frame["labels"]:
if "box2d" not in label:
continue
box2d = label["box2d"]
if box2d["x1"] >= box2d["x2"] or box2d["y1"] >= box2d["y2"]:
continue
cx, cy, width, height = box2d_to_yolo(box2d)
lbl = LABEL_MAP[label["category"]]
f.write("{} {} {} {} {}\n".format(lbl, cx, cy, width, height))
if __name__ == "__main__":
args = validate_args(parse_args())
# First, copy each data directory over to the output directory.
for dir in ["train", "val", "test"]:
src = args.data_path / dir
dst = args.output_dir / dir
dst.mkdir(parents=True, exist_ok=True)
cp_cmd = [
"rsync",
"-a",
str(src) + "/", # Trailing slash needed for rsync
str(dst),
]
print("-- Copying the data over to {}".format(dst))
print("> {}".format(" ".join(cp_cmd)))
proc = sp.Popen(cp_cmd, stdout=sp.DEVNULL)
if dir == "train" or dir == "val":
print("-- Generating labels at that dir in parallel")
if dir == "train":
label2txt(args.train_labels, dst)
if dir == "val":
label2txt(args.val_labels, dst)
proc.wait()
print("-- Done copying")
if dir == "train" or dir == "val":
print("-- Removing images without corresponding labels")
rm_imgs_without_labels(dst)
# Create names file
names = [''] * len(LABEL_MAP)
for label, num in LABEL_MAP.items():
names[num] = label
names_file = args.output_dir / "bdd100k.names"
with open(names_file, "w+") as f:
f.write("\n".join(names))
|
the-stack_0_6873 | #!/usr/bin/env python3
"""Python S3 Manager"""
import sys
import os
import pandas as pd
import boto3
from botocore.exceptions import ClientError
from shapely.geometry import box
class s3UploadDownload:
"""
A class to upload/pull files to/from S3.
"""
def __init__(self, bucket_name=None):
"""
constructor of the class.
"""
session = boto3.Session(
aws_access_key_id=os.getenv('AWS_ACCESS_KEY_ID'),
aws_secret_access_key=os.getenv('AWS_SECRET_ACCESS_KEY')
)
self.client = session.client('s3')
self.resource = session.resource('s3')
self.bucket_name = bucket_name
def pull_file(self, file_name):
"""
The function to download a file on the S3 bucket to the local instance.
Parameters:
file_name: name of the file on S3 bucket to pull.
"""
self.check_bucket_exists()
try:
self.client.download_file(self.bucket_name, file_name, file_name)
except ClientError:
return False
return True
def put_file(self, file_name, object_name=None):
"""
The function to upload a file to the S3 bucket.
Parameters:
file_name: name and path of the file to upload
"""
if object_name is None:
object_name = file_name
self.check_bucket_exists()
try:
self.client.upload_file(file_name, self.bucket_name, object_name)
except ClientError:
return False
return True
|
the-stack_0_6874 | from urllib import request
import xml.etree.ElementTree as ET
import os
wnid = "n02783161"
dirpath = os.path.join("raw_images",wnid)
if os.path.isdir(dirpath) == False:
os.mkdir(dirpath)
IMG_LIST_URL="http://www.image-net.org/api/text/imagenet.synset.geturls.getmapping?wnid={}"
url = IMG_LIST_URL.format(wnid)
with request.urlopen(url) as response:
html = response.read()
data = html.decode()
data = data.split()
fnames = data[::2]
urls = data[1::2]
files = os.listdir(os.path.join("bbox","Annotation",wnid))
annotated_index = [fnames.index(f.split('.')[0]) for f in files]
print(annotated_index)
#fnameをannotated_indexに
#for i in range(len(fnames)):
for i in annotated_index:
try:
print("Found:",urls[i],fnames[i])
with request.urlopen(urls[i]) as response:
img = response.read()
with open(os.path.join(dirpath,fnames[i]),'wb') as f:
f.write(img)
except:
print("Not Found:" + urls[i]) |
the-stack_0_6875 | #!/usr/bin/env python
#
# Use the raw transactions API to spend BONTEs received on particular addresses,
# and send any change back to that same address.
#
# Example usage:
# spendfrom.py # Lists available funds
# spendfrom.py --from=ADDRESS --to=ADDRESS --amount=11.00
#
# Assumes it will talk to a bontecoind or bontecoin-Qt running
# on localhost.
#
# Depends on jsonrpc
#
from decimal import *
import getpass
import math
import os
import os.path
import platform
import sys
import time
from jsonrpc import ServiceProxy, json
BASE_FEE=Decimal("0.001")
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def determine_db_dir():
"""Return the default location of the bontecoin data directory"""
if platform.system() == "Darwin":
return os.path.expanduser("~/Library/Application Support/Bontecoin/")
elif platform.system() == "Windows":
return os.path.join(os.environ['APPDATA'], "Bontecoin")
return os.path.expanduser("~/.bontecoin")
def read_bitcoin_config(dbdir):
"""Read the bontecoin.conf file from dbdir, returns dictionary of settings"""
from ConfigParser import SafeConfigParser
class FakeSecHead(object):
def __init__(self, fp):
self.fp = fp
self.sechead = '[all]\n'
def readline(self):
if self.sechead:
try: return self.sechead
finally: self.sechead = None
else:
s = self.fp.readline()
if s.find('#') != -1:
s = s[0:s.find('#')].strip() +"\n"
return s
config_parser = SafeConfigParser()
config_parser.readfp(FakeSecHead(open(os.path.join(dbdir, "bontecoin.conf"))))
return dict(config_parser.items("all"))
def connect_JSON(config):
"""Connect to a bontecoin JSON-RPC server"""
testnet = config.get('testnet', '0')
testnet = (int(testnet) > 0) # 0/1 in config file, convert to True/False
if not 'rpcport' in config:
config['rpcport'] = 51475 if testnet else 51473
connect = "http://%s:%[email protected]:%s"%(config['rpcuser'], config['rpcpassword'], config['rpcport'])
try:
result = ServiceProxy(connect)
# ServiceProxy is lazy-connect, so send an RPC command mostly to catch connection errors,
# but also make sure the bontecoind we're talking to is/isn't testnet:
if result.getmininginfo()['testnet'] != testnet:
sys.stderr.write("RPC server at "+connect+" testnet setting mismatch\n")
sys.exit(1)
return result
except:
sys.stderr.write("Error connecting to RPC server at "+connect+"\n")
sys.exit(1)
def unlock_wallet(bontecoind):
info = bontecoind.getinfo()
if 'unlocked_until' not in info:
return True # wallet is not encrypted
t = int(info['unlocked_until'])
if t <= time.time():
try:
passphrase = getpass.getpass("Wallet is locked; enter passphrase: ")
bontecoind.walletpassphrase(passphrase, 5)
except:
sys.stderr.write("Wrong passphrase\n")
info = bontecoind.getinfo()
return int(info['unlocked_until']) > time.time()
def list_available(bontecoind):
address_summary = dict()
address_to_account = dict()
for info in bontecoind.listreceivedbyaddress(0):
address_to_account[info["address"]] = info["account"]
unspent = bontecoind.listunspent(0)
for output in unspent:
# listunspent doesn't give addresses, so:
rawtx = bontecoind.getrawtransaction(output['txid'], 1)
vout = rawtx["vout"][output['vout']]
pk = vout["scriptPubKey"]
# This code only deals with ordinary pay-to-bontecoin-address
# or pay-to-script-hash outputs right now; anything exotic is ignored.
if pk["type"] != "pubkeyhash" and pk["type"] != "scripthash":
continue
address = pk["addresses"][0]
if address in address_summary:
address_summary[address]["total"] += vout["value"]
address_summary[address]["outputs"].append(output)
else:
address_summary[address] = {
"total" : vout["value"],
"outputs" : [output],
"account" : address_to_account.get(address, "")
}
return address_summary
def select_coins(needed, inputs):
# Feel free to improve this, this is good enough for my simple needs:
outputs = []
have = Decimal("0.0")
n = 0
while have < needed and n < len(inputs):
outputs.append({ "txid":inputs[n]["txid"], "vout":inputs[n]["vout"]})
have += inputs[n]["amount"]
n += 1
return (outputs, have-needed)
def create_tx(bontecoind, fromaddresses, toaddress, amount, fee):
all_coins = list_available(bontecoind)
total_available = Decimal("0.0")
needed = amount+fee
potential_inputs = []
for addr in fromaddresses:
if addr not in all_coins:
continue
potential_inputs.extend(all_coins[addr]["outputs"])
total_available += all_coins[addr]["total"]
if total_available < needed:
sys.stderr.write("Error, only %f BTC available, need %f\n"%(total_available, needed));
sys.exit(1)
#
# Note:
# Python's json/jsonrpc modules have inconsistent support for Decimal numbers.
# Instead of wrestling with getting json.dumps() (used by jsonrpc) to encode
# Decimals, I'm casting amounts to float before sending them to bontecoind.
#
outputs = { toaddress : float(amount) }
(inputs, change_amount) = select_coins(needed, potential_inputs)
if change_amount > BASE_FEE: # don't bother with zero or tiny change
change_address = fromaddresses[-1]
if change_address in outputs:
outputs[change_address] += float(change_amount)
else:
outputs[change_address] = float(change_amount)
rawtx = bontecoind.createrawtransaction(inputs, outputs)
signed_rawtx = bontecoind.signrawtransaction(rawtx)
if not signed_rawtx["complete"]:
sys.stderr.write("signrawtransaction failed\n")
sys.exit(1)
txdata = signed_rawtx["hex"]
return txdata
def compute_amount_in(bontecoind, txinfo):
result = Decimal("0.0")
for vin in txinfo['vin']:
in_info = bontecoind.getrawtransaction(vin['txid'], 1)
vout = in_info['vout'][vin['vout']]
result = result + vout['value']
return result
def compute_amount_out(txinfo):
result = Decimal("0.0")
for vout in txinfo['vout']:
result = result + vout['value']
return result
def sanity_test_fee(bontecoind, txdata_hex, max_fee):
class FeeError(RuntimeError):
pass
try:
txinfo = bontecoind.decoderawtransaction(txdata_hex)
total_in = compute_amount_in(bontecoind, txinfo)
total_out = compute_amount_out(txinfo)
if total_in-total_out > max_fee:
raise FeeError("Rejecting transaction, unreasonable fee of "+str(total_in-total_out))
tx_size = len(txdata_hex)/2
kb = tx_size/1000 # integer division rounds down
if kb > 1 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee transaction, larger than 1000 bytes")
if total_in < 0.01 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee, tiny-amount transaction")
# Exercise for the reader: compute transaction priority, and
# warn if this is a very-low-priority transaction
except FeeError as err:
sys.stderr.write((str(err)+"\n"))
sys.exit(1)
def main():
import optparse
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--from", dest="fromaddresses", default=None,
help="addresses to get BONTEs from")
parser.add_option("--to", dest="to", default=None,
help="address to get send BONTEs to")
parser.add_option("--amount", dest="amount", default=None,
help="amount to send")
parser.add_option("--fee", dest="fee", default="0.0",
help="fee to include")
parser.add_option("--datadir", dest="datadir", default=determine_db_dir(),
help="location of bontecoin.conf file with RPC username/password (default: %default)")
parser.add_option("--testnet", dest="testnet", default=False, action="store_true",
help="Use the test network")
parser.add_option("--dry_run", dest="dry_run", default=False, action="store_true",
help="Don't broadcast the transaction, just create and print the transaction data")
(options, args) = parser.parse_args()
check_json_precision()
config = read_bitcoin_config(options.datadir)
if options.testnet: config['testnet'] = True
bontecoind = connect_JSON(config)
if options.amount is None:
address_summary = list_available(bontecoind)
for address,info in address_summary.iteritems():
n_transactions = len(info['outputs'])
if n_transactions > 1:
print("%s %.8f %s (%d transactions)"%(address, info['total'], info['account'], n_transactions))
else:
print("%s %.8f %s"%(address, info['total'], info['account']))
else:
fee = Decimal(options.fee)
amount = Decimal(options.amount)
while unlock_wallet(bontecoind) == False:
pass # Keep asking for passphrase until they get it right
txdata = create_tx(bontecoind, options.fromaddresses.split(","), options.to, amount, fee)
sanity_test_fee(bontecoind, txdata, amount*Decimal("0.01"))
if options.dry_run:
print(txdata)
else:
txid = bontecoind.sendrawtransaction(txdata)
print(txid)
if __name__ == '__main__':
main()
|
the-stack_0_6878 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
from scipy.ndimage import gaussian_filter
import numpy as np
class AffinityRefinementOperation(metaclass=abc.ABCMeta):
def check_input(self, X):
"""Check the input to the refine() method.
Args:
X: the input to the refine() method
Raises:
TypeError: if X has wrong type
ValueError: if X has wrong shape, etc.
"""
if not isinstance(X, np.ndarray):
raise TypeError("X must be a numpy array")
shape = X.shape
if len(shape) != 2:
raise ValueError("X must be 2-dimensional")
if shape[0] != shape[1]:
raise ValueError("X must be a square matrix")
@abc.abstractmethod
def refine(self, X):
"""Perform the refinement operation.
Args:
X: the affinity matrix, of size (n_samples, n_samples)
Returns:
a matrix of the same size as X
"""
pass
class CropDiagonal(AffinityRefinementOperation):
"""Crop the diagonal.
Replace diagonal element by the max non-diagonal value of row.
After this operation, the matrix has similar properties to a standard
Laplacian matrix.
This also helps to avoid the bias during Gaussian blur and normalization.
"""
def refine(self, X):
self.check_input(X)
Y = np.copy(X)
np.fill_diagonal(Y, 0.0)
di = np.diag_indices(Y.shape[0])
Y[di] = Y.max(axis=1)
return Y
class GaussianBlur(AffinityRefinementOperation):
"""Apply Gaussian blur."""
def __init__(self, sigma=1):
self.sigma = sigma
def refine(self, X):
self.check_input(X)
return gaussian_filter(X, sigma=self.sigma)
class RowWiseThreshold(AffinityRefinementOperation):
"""Apply row wise thresholding."""
def __init__(self,
p_percentile=0.95,
thresholding_soft_multiplier=0.01,
thresholding_with_row_max=False):
self.p_percentile = p_percentile
self.multiplier = thresholding_soft_multiplier
self.thresholding_with_row_max = thresholding_with_row_max
def refine(self, X):
self.check_input(X)
Y = np.copy(X)
if self.thresholding_with_row_max:
# row_max based thresholding
row_max = Y.max(axis=1)
row_max = np.expand_dims(row_max, axis=1)
is_smaller = Y < (row_max * self.p_percentile)
else:
# percentile based thresholding
row_percentile = np.percentile(Y, self.p_percentile * 100, axis=1)
row_percentile = np.expand_dims(row_percentile, axis=1)
is_smaller = Y < row_percentile
Y = (Y * np.invert(is_smaller)) + (Y * self.multiplier * is_smaller)
return Y
class Symmetrize(AffinityRefinementOperation):
"""The Symmetrization operation."""
def refine(self, X):
self.check_input(X)
return np.maximum(X, np.transpose(X))
class Diffuse(AffinityRefinementOperation):
"""The diffusion operation."""
def refine(self, X):
self.check_input(X)
return np.matmul(X, np.transpose(X))
class RowWiseNormalize(AffinityRefinementOperation):
"""The row wise max normalization operation."""
def refine(self, X):
self.check_input(X)
Y = np.copy(X)
row_max = Y.max(axis=1)
Y /= np.expand_dims(row_max, axis=1)
return Y
|
the-stack_0_6880 | import sys
sys.path.append('./')
import unittest
from Added import Added
class AddedTest(unittest.TestCase):
def test_add(self):
added = Added()
expected = added.add(1,2)
self.assertEqual(3, expected)
if __name__ == '__main__':
unittest.main()
|
the-stack_0_6882 | import _plotly_utils.basevalidators
class SizeValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self,
plotly_name="size",
parent_name="densitymapbox.colorbar.tickfont",
**kwargs
):
super(SizeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
min=kwargs.pop("min", 1),
role=kwargs.pop("role", "style"),
**kwargs
)
|
the-stack_0_6885 | import torch.nn as nn
import math
import torch.utils.model_zoo as model_zoo
import torch.nn.functional as F
import pdb
import torch
import numpy as np
__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
'resnet152']
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
def conv3x3(in_planes, out_planes, stride=1):
"3x3 convolution with padding"
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000):
self.inplanes = 64
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.avgpool = nn.AvgPool2d(7, stride=1)
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def resnet18(pretrained=False, **kwargs):
"""Constructs a ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))
return model
def resnet34(pretrained=False, **kwargs):
"""Constructs a ResNet-34 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))
return model
def resnet50(pretrained=False, **kwargs):
"""Constructs a ResNet-50 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet50'], 'pretrained_model/encoder'))
return model
def resnet101(pretrained=False, **kwargs):
"""Constructs a ResNet-101 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet101']))
return model
def resnet152(pretrained=False, **kwargs):
"""Constructs a ResNet-152 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet152']))
return model
|
the-stack_0_6887 | import math
import torch
from torch.distributions import constraints
from pyro.nn import PyroModule, pyro_method, PyroParam
root_three = math.sqrt(3.0)
root_five = math.sqrt(5.0)
five_thirds = 5.0 / 3.0
class MaternKernel(PyroModule):
"""
Provides the building blocks for representing univariate Gaussian Processes (GPs)
with Matern kernels as state space models.
:param float nu: The order of the Matern kernel (one of 0.5, 1.5 or 2.5)
:param int num_gps: the number of GPs
:param torch.Tensor length_scale_init: optional `num_gps`-dimensional vector of initializers
for the length scale
:param torch.Tensor kernel_scale_init: optional `num_gps`-dimensional vector of initializers
for the kernel scale
**References**
[1] `Kalman Filtering and Smoothing Solutions to Temporal Gaussian Process Regression Models`,
Jouni Hartikainen and Simo Sarkka.
[2] `Stochastic Differential Equation Methods for Spatio-Temporal Gaussian Process Regression`,
Arno Solin.
"""
def __init__(self, nu=1.5, num_gps=1, length_scale_init=None, kernel_scale_init=None):
if nu not in [0.5, 1.5, 2.5]:
raise NotImplementedError("The only supported values of nu are 0.5, 1.5 and 2.5")
self.nu = nu
self.state_dim = {0.5: 1, 1.5: 2, 2.5: 3}[nu]
self.num_gps = num_gps
if length_scale_init is None:
length_scale_init = torch.ones(num_gps)
assert length_scale_init.shape == (num_gps,)
if kernel_scale_init is None:
kernel_scale_init = torch.ones(num_gps)
assert kernel_scale_init.shape == (num_gps,)
super().__init__()
self.length_scale = PyroParam(length_scale_init, constraint=constraints.positive)
self.kernel_scale = PyroParam(kernel_scale_init, constraint=constraints.positive)
if self.state_dim > 1:
for x in range(self.state_dim):
for y in range(self.state_dim):
mask = torch.zeros(self.state_dim, self.state_dim)
mask[x, y] = 1.0
self.register_buffer("mask{}{}".format(x, y), mask)
@pyro_method
def transition_matrix(self, dt):
"""
Compute the (exponentiated) transition matrix of the GP latent space.
The resulting matrix has layout (num_gps, old_state, new_state), i.e. this
matrix multiplies states from the right.
See section 5 in reference [1] for details.
:param float dt: the time interval over which the GP latent space evolves.
:returns torch.Tensor: a 3-dimensional tensor of transition matrices of shape
(num_gps, state_dim, state_dim).
"""
if self.nu == 0.5:
rho = self.length_scale.unsqueeze(-1).unsqueeze(-1)
return torch.exp(-dt / rho)
elif self.nu == 1.5:
rho = self.length_scale.unsqueeze(-1).unsqueeze(-1)
dt_rho = dt / rho
trans = (1.0 + root_three * dt_rho) * self.mask00 + \
(-3.0 * dt_rho / rho) * self.mask01 + \
dt * self.mask10 + \
(1.0 - root_three * dt_rho) * self.mask11
return torch.exp(-root_three * dt_rho) * trans
elif self.nu == 2.5:
rho = self.length_scale.unsqueeze(-1).unsqueeze(-1)
dt_rho = root_five * dt / rho
dt_rho_sq = dt_rho.pow(2.0)
dt_rho_cu = dt_rho.pow(3.0)
dt_rho_qu = dt_rho.pow(4.0)
dt_sq = dt ** 2.0
trans = (1.0 + dt_rho + 0.5 * dt_rho_sq) * self.mask00 + \
(-0.5 * dt_rho_cu / dt) * self.mask01 + \
((0.5 * dt_rho_qu - dt_rho_cu) / dt_sq) * self.mask02 + \
((dt_rho + 1.0) * dt) * self.mask10 + \
(1.0 + dt_rho - dt_rho_sq) * self.mask11 + \
((dt_rho_cu - 3.0 * dt_rho_sq) / dt) * self.mask12 + \
(0.5 * dt_sq) * self.mask20 + \
((1.0 - 0.5 * dt_rho) * dt) * self.mask21 + \
(1.0 - 2.0 * dt_rho + 0.5 * dt_rho_sq) * self.mask22
return torch.exp(-dt_rho) * trans
@pyro_method
def stationary_covariance(self):
"""
Compute the stationary state covariance. See Eqn. 3.26 in reference [2].
:returns torch.Tensor: a 3-dimensional tensor of covariance matrices of shape
(num_gps, state_dim, state_dim).
"""
if self.nu == 0.5:
sigmasq = self.kernel_scale.pow(2).unsqueeze(-1).unsqueeze(-1)
return sigmasq
elif self.nu == 1.5:
sigmasq = self.kernel_scale.pow(2).unsqueeze(-1).unsqueeze(-1)
rhosq = self.length_scale.pow(2).unsqueeze(-1).unsqueeze(-1)
p_infinity = self.mask00 + (3.0 / rhosq) * self.mask11
return sigmasq * p_infinity
elif self.nu == 2.5:
sigmasq = self.kernel_scale.pow(2).unsqueeze(-1).unsqueeze(-1)
rhosq = self.length_scale.pow(2).unsqueeze(-1).unsqueeze(-1)
p_infinity = 0.0
p_infinity = self.mask00 + \
(five_thirds / rhosq) * (self.mask11 - self.mask02 - self.mask20) + \
(25.0 / rhosq.pow(2.0)) * self.mask22
return sigmasq * p_infinity
@pyro_method
def process_covariance(self, A):
"""
Given a transition matrix `A` computed with `transition_matrix` compute the
the process covariance as described in Eqn. 3.11 in reference [2].
:returns torch.Tensor: a batched covariance matrix of shape (num_gps, state_dim, state_dim)
"""
assert A.shape[-3:] == (self.num_gps, self.state_dim, self.state_dim)
p = self.stationary_covariance()
q = p - torch.matmul(A.transpose(-1, -2), torch.matmul(p, A))
return q
@pyro_method
def transition_matrix_and_covariance(self, dt):
"""
Get the transition matrix and process covariance corresponding to a time interval `dt`.
:param float dt: the time interval over which the GP latent space evolves.
:returns tuple: (`transition_matrix`, `process_covariance`) both 3-dimensional tensors of
shape (num_gps, state_dim, state_dim)
"""
trans_matrix = self.transition_matrix(dt)
process_covar = self.process_covariance(trans_matrix)
return trans_matrix, process_covar
|
the-stack_0_6888 | import torch
import torch.nn as nn
import numpy as np
from mushroom_rl.policy.torch_policy import TorchPolicy, GaussianTorchPolicy
def abstract_method_tester(f, *args):
try:
f(*args)
except NotImplementedError:
pass
else:
assert False
class Network(nn.Module):
def __init__(self, input_shape, output_shape, n_features, **kwargs):
super(Network, self).__init__()
n_input = input_shape[-1]
n_output = output_shape[0]
self._h1 = nn.Linear(n_input, n_features)
self._h2 = nn.Linear(n_features, n_features)
self._h3 = nn.Linear(n_features, n_output)
nn.init.xavier_uniform_(self._h1.weight,
gain=nn.init.calculate_gain('tanh'))
nn.init.xavier_uniform_(self._h2.weight,
gain=nn.init.calculate_gain('tanh'))
nn.init.xavier_uniform_(self._h3.weight,
gain=nn.init.calculate_gain('linear'))
def forward(self, state, **kwargs):
features1 = torch.tanh(self._h1(torch.squeeze(state, -1).float()))
features2 = torch.tanh(self._h2(features1))
a = self._h3(features2)
return a
def test_torch_policy():
tmp = TorchPolicy(False)
abstract_method_tester(tmp.draw_action_t, None)
abstract_method_tester(tmp.log_prob_t, None, None)
abstract_method_tester(tmp.entropy_t, None)
abstract_method_tester(tmp.distribution_t, None)
abstract_method_tester(tmp.set_weights, None)
abstract_method_tester(tmp.get_weights)
abstract_method_tester(tmp.parameters)
tmp.reset()
tmp.use_cuda
def test_gaussian_torch_policy():
np.random.seed(88)
torch.manual_seed(88)
pi = GaussianTorchPolicy(Network, (3,), (2,), n_features=50)
state = np.random.rand(3)
action = pi.draw_action(state)
action_test = np.array([-0.21276927, 0.27437747])
assert np.allclose(action, action_test)
p_sa = pi(state, action)
p_sa_test = 0.07710557966732147
assert np.allclose(p_sa, p_sa_test)
entropy = pi.entropy()
entropy_test = 2.837877
assert np.allclose(entropy, entropy_test)
|
the-stack_0_6889 | import gym
from keras.models import Sequential
from keras.layers import Dense
from keras.optimizers import Adam
import numpy as np
import random
from matplotlib import pyplot as plt
from custom_gym.cartpole import CustomCartPoleEnv
import timeit
# AGENT/NETWORK HYPERPARAMETERS
EPSILON_INITIAL = 1.0 # exploration rate
EPSILON_DECAY = 0.997
EPSILON_MIN = 0.01
ALPHA = 0.001 # learning rate
GAMMA = 0.95 # discount factor
TAU = 0.3 # target network soft update hyperparameter
EXPERIENCE_REPLAY_BATCH_SIZE = 20
AGENT_MEMORY_LIMIT = 10000
STEPS_BEFORE_REPLAY = 10
OBSERVATION_SPACE_DIMS = 4
ACTION_SPACE = [0,1]
def create_dqn(action_space, observation_space):
nn = Sequential()
nn.add(Dense(128, input_dim=OBSERVATION_SPACE_DIMS, activation="relu"))
nn.add(Dense(128, activation='relu'))
nn.add(Dense(len(ACTION_SPACE), activation='linear'))
nn.compile(loss='mse', optimizer=Adam(lr=ALPHA))
return nn
class DoubleDQNAgent(object):
def __init__(self, action_space, observation_space):
self.memory = []
self.action_space = action_space
self.observation_space = observation_space
self.online_network = create_dqn(action_space, observation_space)
self.target_network = create_dqn(action_space, observation_space)
self.epsilon = EPSILON_INITIAL
self.has_talked = False
def act(self, state):
if self.epsilon > np.random.rand():
# explore
return np.random.choice(self.action_space)
else:
# exploit
state = self._reshape_state_for_net(state)
q_values = self.online_network.predict(state)[0]
return np.argmax(q_values)
def experience_replay(self):
minibatch = random.sample(self.memory, EXPERIENCE_REPLAY_BATCH_SIZE)
minibatch_new_q_values = []
for state, action, reward, next_state, done in minibatch:
state = self._reshape_state_for_net(state)
experience_new_q_values = self.online_network.predict(state)[0]
if done:
q_update = reward
else:
next_state = self._reshape_state_for_net(next_state)
# using online network to SELECT action
online_net_selected_action = np.argmax(self.online_network.predict(next_state))
# using target network to EVALUATE action
target_net_evaluated_q_value = self.target_network.predict(next_state)[0][online_net_selected_action]
q_update = reward + GAMMA * target_net_evaluated_q_value
experience_new_q_values[action] = q_update
minibatch_new_q_values.append(experience_new_q_values)
minibatch_states = np.array([state for state,_,_,_,_ in minibatch])
minibatch_new_q_values = np.array(minibatch_new_q_values)
self.online_network.fit(minibatch_states, minibatch_new_q_values, verbose=False, epochs=1)
def update_target_network(self):
q_network_theta = self.online_network.get_weights()
target_network_theta = self.target_network.get_weights()
counter = 0
for q_weight, target_weight in zip(q_network_theta,target_network_theta):
target_weight = target_weight * (1-TAU) + q_weight * TAU
target_network_theta[counter] = target_weight
counter += 1
self.target_network.set_weights(target_network_theta)
def remember(self, state, action, reward, next_state, done):
if len(self.memory) > AGENT_MEMORY_LIMIT:
self.memory.pop(0)
experience = (state, action, reward, next_state, done)
self.memory.append(experience)
def update_epsilon(self):
self.epsilon = max(self.epsilon * EPSILON_DECAY, EPSILON_MIN)
def _reshape_state_for_net(self, state):
return np.reshape(state,(1, OBSERVATION_SPACE_DIMS))
def save_model(self):
self.online_network.save_weights('./normal_model/weights_online')
self.target_network.save_weights('./normal_model/weights_target')
def load_model(self):
try:
self.target_network.load_weights('./normal_model/weights_target')
self.online_network.load_weights('./normal_model/weights_online')
except:
pass
def test_agent():
env = CustomCartPoleEnv(mode=0, render_mode='no')#'human')
trials = []
NUMBER_OF_TRIALS=5
MAX_TRAINING_EPISODES = 500
MAX_STEPS_PER_EPISODE = 400
observation_space = env.observation_space.shape[0]
action_space = env.action_space.n
log_list = list()
for trial_index in range(NUMBER_OF_TRIALS):
agent = DoubleDQNAgent(action_space, observation_space)
#agent.load_model()
trial_episode_scores = []
s = 0
for episode_index in range(1, MAX_TRAINING_EPISODES+1):
state = env.reset()
episode_score = 0
steps =0
for _ in range(MAX_STEPS_PER_EPISODE):
action = agent.act(state)
next_state, reward, done, _ = env.step(action)
episode_score += reward
s+=1
steps+=1
agent.remember(state, action, reward, next_state, done)
#print(state)
state = next_state
if s > STEPS_BEFORE_REPLAY and len(agent.memory) > 2*EXPERIENCE_REPLAY_BATCH_SIZE:
agent.experience_replay()
agent.update_target_network()
agent.save_model()
s=0
if done:
break
trial_episode_scores.append(episode_score)
agent.update_epsilon()
last_100_avg = np.mean(trial_episode_scores[-100:])
tmp = "Run: " + str(episode_index) + ", steps_done: " + str(steps) + ", avg_points_per_step: " + str(episode_score/steps) + ", exploration: " + str(agent.epsilon) + ", score: " + str(episode_score) +", avg_last_100_score: " + str(last_100_avg)+"\n"
log_list.append(tmp)
if len(log_list)>10:
with open("log3.log", "a") as myfile:
for log in log_list:
myfile.write(log)
log_list = list()
trials.append(np.array(trial_episode_scores))
return np.array(trials)
def plot_individual_trial(trial):
plt.plot(trial)
plt.ylabel('points in Episode')
plt.xlabel('Episode')
plt.title('Double DQN points in Select Trial')
plt.show()
if __name__ == '__main__':
trials = test_agent()
# print 'Saving', file_name
np.save('double_dqn_cartpole_trials.npy', trials)
trials = np.load('double_dqn_cartpole_trials.npy')
plot_individual_trial(trials[1]) |
the-stack_0_6890 | # -*- coding: utf-8 -*-
"""
Created on Fri Jan 6 23:45:59 2017
@author: yxl
"""
import os, sys, os.path as osp
from glob import glob
from sciapp.action import Macros, Widget, Report
from .. import root_dir
from .manager import DocumentManager, DictManager
from codecs import open
def get_path(root, path):
for i in range(10,0,-1):
if not '../'*i in path: continue
s = root
for j in range(i):s=os.path.dirname(s)
path = path.replace('../'*i, s+'/')
return path.replace('\\\\','\\').replace('\\','/')
def extend_plugins(path, lst, err):
rst = []
for i in lst:
if isinstance(i, tuple) or i=='-': rst.append(i)
elif i[-3:] == 'rpt':
pt = os.path.join(root_dir,path)
rst.append(Report(i[:-4], pt+'/'+i))
elif i[-3:] in {'.md', '.mc', '.wf'}:
p = os.path.join(os.path.join(root_dir, path), i).replace('\\','/')
rst.append(Macros(i[:-3], ['Open>{"path":"%s"}'%p]))
elif i[-6:] in ['wgt.py', 'gts.py']:
try:
rpath = path.replace('/', '.').replace('\\','.')
plg = __import__('imagepy.'+ rpath+'.'+i[:-3],'','',[''])
if hasattr(plg, 'wgts'):
rst.extend([j if j=='-' else Widget(j) for j in plg.wgts])
else:
rst.append(Widget(plg.Plugin))
except Exception as e:
err.append((path, i, sys.exc_info()[1]))
else:
try:
rpath = path.replace('/', '.').replace('\\','.')
plg = __import__('imagepy.'+ rpath+'.'+i[:-3],'','',[''])
if hasattr(plg, 'plgs'):
rst.extend([j for j in plg.plgs])
for p in plg.plgs:
if not isinstance(p, str): pass
else:
rst.append(plg.Plugin)
except Exception as e:
err.append((path, i, sys.exc_info()[1]))
return rst
def sort_plugins(catlog, lst):
rst = []
for i in catlog:
if i=='-':rst.append('-')
for j in lst:
if j[:-3]==i or j[:-4]==i or j[0].title==i:
lst.remove(j)
rst.append(j)
rst.extend(lst)
return rst
def build_plugins(path, err='root'):
root = err=='root'
if root: err=[]
subtree = []
cont = os.listdir(path)
for i in cont:
subp = os.path.join(path,i)
if os.path.isdir(subp):
sub = build_plugins(subp, err)
if len(sub)!=0:subtree.append(sub[:2])
elif i[-6:] in ('plg.py', 'lgs.py', 'wgt.py', 'gts.py'):
subtree.append(i)
elif i[-3:] in ('.mc', '.md', '.wf', 'rpt'):
subtree.append(i)
if len(subtree)==0:return []
path = path[path.index(root_dir)+len(root_dir)+1:]
rpath = path.replace('/', '.').replace('\\','.')
pg = __import__('imagepy.'+rpath,'','',[''])
pg.title = os.path.basename(path)
if hasattr(pg, 'catlog'):
subtree = sort_plugins(pg.catlog, subtree)
subtree = extend_plugins(path, subtree, err)
return pg, subtree, err
def extend_tools(path, lst, err):
rst = []
for i in lst:
if i[-3:] in ('.mc', '.md', '.wf', 'rpt'):
p = os.path.join(os.path.join(root_dir,path), i).replace('\\','/')
rst.append((Macros(i[:-3], ['Open>{"path":"%s"}'%p]),
os.path.join(root_dir, path)+'/'+i[:-3]+'.gif'))
else:
try:
rpath = path.replace('/', '.').replace('\\','.')
plg = __import__('imagepy.'+rpath+'.'+i,'','',[''])
if hasattr(plg, 'plgs'):
for i,j in plg.plgs: rst.append((i, path+'/'+j))
else: rst.append((plg.Plugin,
os.path.join(root_dir, path)+'/'+i.split('_')[0]+'.gif'))
except Exception as e:
err.append((path, i, sys.exc_info()[1]))
return rst
def sort_tools(catlog, lst):
rst = []
for i in catlog:
if i=='-':rst.append('-')
for j in lst:
if j==i or j[0].title==i or j[:-3]==i:
lst.remove(j)
rst.append(j)
rst.extend(lst)
return rst
def build_tools(path, err='root'):
root = err=='root'
if root: err=[]
subtree = []
cont = os.listdir(os.path.join(root_dir, path))
for i in cont:
subp = os.path.join(path,i)
if root and os.path.isdir(os.path.join(root_dir, subp)):
sub = build_tools(subp, err)
if len(sub)!=0:subtree.append(sub[:2])
elif not root:
if i[len(i)-7:] in ('_tol.py', 'tols.py'):
subtree.append(i[:-3])
elif i[-3:] in ('.mc', '.md', '.wf', 'rpt'):
subtree.append(i)
if len(subtree)==0:return []
rpath = path.replace('/', '.').replace('\\','.')
#rpath = rpath[rpath.index('imagepy.'):]
pg = __import__('imagepy.' + rpath,'','',[''])
pg.title = os.path.basename(path)
if hasattr(pg, 'catlog'):
subtree = sort_tools(pg.catlog, subtree)
if not root:subtree = extend_tools(path, subtree, err)
return pg, subtree, err
def extend_widgets(path, lst, err):
rst = []
for i in lst:
try:
rpath = path.replace('/', '.').replace('\\','.')
plg = __import__('imagepy.'+rpath+'.'+i,'','',[''])
rst.append(plg.Plugin)
except Exception as e:
err.append((path, i, sys.exc_info()[1]))
return rst
def sort_widgets(catlog, lst):
rst = []
for i in catlog:
if i=='-':rst.append('-')
for j in lst:
if j==i or j[:-3]==i or j[0].title==i:
lst.remove(j)
rst.append(j)
rst.extend(lst)
return rst
def build_widgets(path, err='root'):
root = err=='root'
if root: err=[]
subtree = []
cont = os.listdir(os.path.join(root_dir, path))
for i in cont:
subp = os.path.join(path,i)
if root and os.path.isdir(os.path.join(root_dir, subp)):
sub = build_widgets(subp, err)
if len(sub)!=0:subtree.append(sub[:2])
elif not root:
if i[len(i)-7:] in ('_wgt.py', 'wgts.py'):
subtree.append(i[:-3])
#print('====', subtree)
if len(subtree)==0:return []
rpath = path.replace('/', '.').replace('\\','.')
#rpath = rpath[rpath.index('imagepy.'):]
pg = __import__('imagepy.' + rpath,'','',[''])
pg.title = os.path.basename(path)
if hasattr(pg, 'catlog'):
subtree = sort_widgets(pg.catlog, subtree)
if not root: subtree = extend_widgets(path, subtree, err)
return pg, subtree, err
def build_document(path):
docs = []
for lang in [osp.split(i)[1] for i in glob(path+'/*') if osp.isdir(i)]:
for dirpath, dirnames, filenames in os.walk(path+'/'+lang):
for filename in filenames:
if filename[-3:] != '.md': continue
docs.append(os.path.join(dirpath, filename))
with open(docs[-1], encoding='utf-8') as f:
DocumentManager.add(filename[:-3], f.read(), lang)
return docs
def build_dictionary(path):
for lang in [osp.split(i)[1] for i in glob(path+'/*') if osp.isdir(i)]:
for dirpath, dirnames, filenames in os.walk(path+'/'+lang):
for filename in filenames:
if filename[-3:] != 'dic': continue
with open(os.path.join(dirpath, filename), encoding='utf-8') as f:
lines = f.read().replace('\r','').split('\n')
dic = []
for line in lines:
if line == '':
dic[-1] = (dic[-1][0][0], dict(dic[-1]))
elif line[0] == '\t':
dic[-1].append(line[1:].split('::'))
else:
dic.append([line.split('::')])
if isinstance(dic[-1], list):
dic[-1] = (dic[-1][0][0], dict(dic[-1]))
dic = dict(dic)
for i in dic:
obj = DictManager.get(i, tag=lang)
if not obj is None: obj.update(dic[i])
else: DictManager.add(i, dic[i], lang)
common = DictManager.get('common', tag=lang)
if common is None: return
objs = DictManager.gets(tag=lang)
for i in objs: i[1].update(common)
if __name__ == "__main__":
print (os.getcwd())
os.chdir('../../')
data = build_tools('tools') |
the-stack_0_6892 | """distutils.ccompiler
Contains CCompiler, an abstract base class that defines the interface
for the Distutils compiler abstraction model."""
import sys, os, re
from distutils.errors import *
from distutils.spawn import spawn
from distutils.file_util import move_file
from distutils.dir_util import mkpath
from distutils.dep_util import newer_pairwise, newer_group
from distutils.util import split_quoted, execute
from distutils import log
class CCompiler:
"""Abstract base class to define the interface that must be implemented
by real compiler classes. Also has some utility methods used by
several compiler classes.
The basic idea behind a compiler abstraction class is that each
instance can be used for all the compile/link steps in building a
single project. Thus, attributes common to all of those compile and
link steps -- include directories, macros to define, libraries to link
against, etc. -- are attributes of the compiler instance. To allow for
variability in how individual files are treated, most of those
attributes may be varied on a per-compilation or per-link basis.
"""
# 'compiler_type' is a class attribute that identifies this class. It
# keeps code that wants to know what kind of compiler it's dealing with
# from having to import all possible compiler classes just to do an
# 'isinstance'. In concrete CCompiler subclasses, 'compiler_type'
# should really, really be one of the keys of the 'compiler_class'
# dictionary (see below -- used by the 'new_compiler()' factory
# function) -- authors of new compiler interface classes are
# responsible for updating 'compiler_class'!
compiler_type = None
# XXX things not handled by this compiler abstraction model:
# * client can't provide additional options for a compiler,
# e.g. warning, optimization, debugging flags. Perhaps this
# should be the domain of concrete compiler abstraction classes
# (UnixCCompiler, MSVCCompiler, etc.) -- or perhaps the base
# class should have methods for the common ones.
# * can't completely override the include or library searchg
# path, ie. no "cc -I -Idir1 -Idir2" or "cc -L -Ldir1 -Ldir2".
# I'm not sure how widely supported this is even by Unix
# compilers, much less on other platforms. And I'm even less
# sure how useful it is; maybe for cross-compiling, but
# support for that is a ways off. (And anyways, cross
# compilers probably have a dedicated binary with the
# right paths compiled in. I hope.)
# * can't do really freaky things with the library list/library
# dirs, e.g. "-Ldir1 -lfoo -Ldir2 -lfoo" to link against
# different versions of libfoo.a in different locations. I
# think this is useless without the ability to null out the
# library search path anyways.
# Subclasses that rely on the standard filename generation methods
# implemented below should override these; see the comment near
# those methods ('object_filenames()' et. al.) for details:
src_extensions = None # list of strings
obj_extension = None # string
static_lib_extension = None
shared_lib_extension = None # string
static_lib_format = None # format string
shared_lib_format = None # prob. same as static_lib_format
exe_extension = None # string
# Default language settings. language_map is used to detect a source
# file or Extension target language, checking source filenames.
# language_order is used to detect the language precedence, when deciding
# what language to use when mixing source types. For example, if some
# extension has two files with ".c" extension, and one with ".cpp", it
# is still linked as c++.
language_map = {".c" : "c",
".cc" : "c++",
".cpp" : "c++",
".cxx" : "c++",
".m" : "objc",
}
language_order = ["c++", "objc", "c"]
def __init__(self, verbose=0, dry_run=0, force=0):
self.dry_run = dry_run
self.force = force
self.verbose = verbose
# 'output_dir': a common output directory for object, library,
# shared object, and shared library files
self.output_dir = None
# 'macros': a list of macro definitions (or undefinitions). A
# macro definition is a 2-tuple (name, value), where the value is
# either a string or None (no explicit value). A macro
# undefinition is a 1-tuple (name,).
self.macros = []
# 'include_dirs': a list of directories to search for include files
self.include_dirs = []
# 'libraries': a list of libraries to include in any link
# (library names, not filenames: eg. "foo" not "libfoo.a")
self.libraries = []
# 'library_dirs': a list of directories to search for libraries
self.library_dirs = []
# 'runtime_library_dirs': a list of directories to search for
# shared libraries/objects at runtime
self.runtime_library_dirs = []
# 'objects': a list of object files (or similar, such as explicitly
# named library files) to include on any link
self.objects = []
for key in self.executables.keys():
self.set_executable(key, self.executables[key])
def set_executables(self, **kwargs):
"""Define the executables (and options for them) that will be run
to perform the various stages of compilation. The exact set of
executables that may be specified here depends on the compiler
class (via the 'executables' class attribute), but most will have:
compiler the C/C++ compiler
linker_so linker used to create shared objects and libraries
linker_exe linker used to create binary executables
archiver static library creator
On platforms with a command-line (Unix, DOS/Windows), each of these
is a string that will be split into executable name and (optional)
list of arguments. (Splitting the string is done similarly to how
Unix shells operate: words are delimited by spaces, but quotes and
backslashes can override this. See
'distutils.util.split_quoted()'.)
"""
# Note that some CCompiler implementation classes will define class
# attributes 'cpp', 'cc', etc. with hard-coded executable names;
# this is appropriate when a compiler class is for exactly one
# compiler/OS combination (eg. MSVCCompiler). Other compiler
# classes (UnixCCompiler, in particular) are driven by information
# discovered at run-time, since there are many different ways to do
# basically the same things with Unix C compilers.
for key in kwargs:
if key not in self.executables:
raise ValueError("unknown executable '%s' for class %s" %
(key, self.__class__.__name__))
self.set_executable(key, kwargs[key])
def set_executable(self, key, value):
if isinstance(value, str):
setattr(self, key, split_quoted(value))
else:
setattr(self, key, value)
def _find_macro(self, name):
i = 0
for defn in self.macros:
if defn[0] == name:
return i
i += 1
return None
def _check_macro_definitions(self, definitions):
"""Ensures that every element of 'definitions' is a valid macro
definition, ie. either (name,value) 2-tuple or a (name,) tuple. Do
nothing if all definitions are OK, raise TypeError otherwise.
"""
for defn in definitions:
if not (isinstance(defn, tuple) and
(len(defn) in (1, 2) and
(isinstance (defn[1], str) or defn[1] is None)) and
isinstance (defn[0], str)):
raise TypeError(("invalid macro definition '%s': " % defn) + \
"must be tuple (string,), (string, string), or " + \
"(string, None)")
# -- Bookkeeping methods -------------------------------------------
def define_macro(self, name, value=None):
"""Define a preprocessor macro for all compilations driven by this
compiler object. The optional parameter 'value' should be a
string; if it is not supplied, then the macro will be defined
without an explicit value and the exact outcome depends on the
compiler used (XXX true? does ANSI say anything about this?)
"""
# Delete from the list of macro definitions/undefinitions if
# already there (so that this one will take precedence).
i = self._find_macro (name)
if i is not None:
del self.macros[i]
self.macros.append((name, value))
def undefine_macro(self, name):
"""Undefine a preprocessor macro for all compilations driven by
this compiler object. If the same macro is defined by
'define_macro()' and undefined by 'undefine_macro()' the last call
takes precedence (including multiple redefinitions or
undefinitions). If the macro is redefined/undefined on a
per-compilation basis (ie. in the call to 'compile()'), then that
takes precedence.
"""
# Delete from the list of macro definitions/undefinitions if
# already there (so that this one will take precedence).
i = self._find_macro (name)
if i is not None:
del self.macros[i]
undefn = (name,)
self.macros.append(undefn)
def add_include_dir(self, dir):
"""Add 'dir' to the list of directories that will be searched for
header files. The compiler is instructed to search directories in
the order in which they are supplied by successive calls to
'add_include_dir()'.
"""
self.include_dirs.append(dir)
def set_include_dirs(self, dirs):
"""Set the list of directories that will be searched to 'dirs' (a
list of strings). Overrides any preceding calls to
'add_include_dir()'; subsequence calls to 'add_include_dir()' add
to the list passed to 'set_include_dirs()'. This does not affect
any list of standard include directories that the compiler may
search by default.
"""
self.include_dirs = dirs[:]
def add_library(self, libname):
"""Add 'libname' to the list of libraries that will be included in
all links driven by this compiler object. Note that 'libname'
should *not* be the name of a file containing a library, but the
name of the library itself: the actual filename will be inferred by
the linker, the compiler, or the compiler class (depending on the
platform).
The linker will be instructed to link against libraries in the
order they were supplied to 'add_library()' and/or
'set_libraries()'. It is perfectly valid to duplicate library
names; the linker will be instructed to link against libraries as
many times as they are mentioned.
"""
self.libraries.append(libname)
def set_libraries(self, libnames):
"""Set the list of libraries to be included in all links driven by
this compiler object to 'libnames' (a list of strings). This does
not affect any standard system libraries that the linker may
include by default.
"""
self.libraries = libnames[:]
def add_library_dir(self, dir):
"""Add 'dir' to the list of directories that will be searched for
libraries specified to 'add_library()' and 'set_libraries()'. The
linker will be instructed to search for libraries in the order they
are supplied to 'add_library_dir()' and/or 'set_library_dirs()'.
"""
self.library_dirs.append(dir)
def set_library_dirs(self, dirs):
"""Set the list of library search directories to 'dirs' (a list of
strings). This does not affect any standard library search path
that the linker may search by default.
"""
self.library_dirs = dirs[:]
def add_runtime_library_dir(self, dir):
"""Add 'dir' to the list of directories that will be searched for
shared libraries at runtime.
"""
self.runtime_library_dirs.append(dir)
def set_runtime_library_dirs(self, dirs):
"""Set the list of directories to search for shared libraries at
runtime to 'dirs' (a list of strings). This does not affect any
standard search path that the runtime linker may search by
default.
"""
self.runtime_library_dirs = dirs[:]
def add_link_object(self, object):
"""Add 'object' to the list of object files (or analogues, such as
explicitly named library files or the output of "resource
compilers") to be included in every link driven by this compiler
object.
"""
self.objects.append(object)
def set_link_objects(self, objects):
"""Set the list of object files (or analogues) to be included in
every link to 'objects'. This does not affect any standard object
files that the linker may include by default (such as system
libraries).
"""
self.objects = objects[:]
# -- Private utility methods --------------------------------------
# (here for the convenience of subclasses)
# Helper method to prep compiler in subclass compile() methods
def _setup_compile(self, outdir, macros, incdirs, sources, depends,
extra):
"""Process arguments and decide which source files to compile."""
if outdir is None:
outdir = self.output_dir
elif not isinstance(outdir, str):
raise TypeError("'output_dir' must be a string or None")
if macros is None:
macros = self.macros
elif isinstance(macros, list):
macros = macros + (self.macros or [])
else:
raise TypeError("'macros' (if supplied) must be a list of tuples")
if incdirs is None:
incdirs = self.include_dirs
elif isinstance(incdirs, (list, tuple)):
incdirs = list(incdirs) + (self.include_dirs or [])
else:
raise TypeError(
"'include_dirs' (if supplied) must be a list of strings")
if extra is None:
extra = []
# Get the list of expected output (object) files
objects = self.object_filenames(sources, strip_dir=0,
output_dir=outdir)
assert len(objects) == len(sources)
pp_opts = gen_preprocess_options(macros, incdirs)
build = {}
for i in range(len(sources)):
src = sources[i]
obj = objects[i]
ext = os.path.splitext(src)[1]
self.mkpath(os.path.dirname(obj))
build[obj] = (src, ext)
return macros, objects, extra, pp_opts, build
def _get_cc_args(self, pp_opts, debug, before):
# works for unixccompiler, cygwinccompiler
cc_args = pp_opts + ['-c']
if debug:
cc_args[:0] = ['-g']
if before:
cc_args[:0] = before
return cc_args
def _fix_compile_args(self, output_dir, macros, include_dirs):
"""Typecheck and fix-up some of the arguments to the 'compile()'
method, and return fixed-up values. Specifically: if 'output_dir'
is None, replaces it with 'self.output_dir'; ensures that 'macros'
is a list, and augments it with 'self.macros'; ensures that
'include_dirs' is a list, and augments it with 'self.include_dirs'.
Guarantees that the returned values are of the correct type,
i.e. for 'output_dir' either string or None, and for 'macros' and
'include_dirs' either list or None.
"""
if output_dir is None:
output_dir = self.output_dir
elif not isinstance(output_dir, str):
raise TypeError("'output_dir' must be a string or None")
if macros is None:
macros = self.macros
elif isinstance(macros, list):
macros = macros + (self.macros or [])
else:
raise TypeError("'macros' (if supplied) must be a list of tuples")
if include_dirs is None:
include_dirs = self.include_dirs
elif isinstance(include_dirs, (list, tuple)):
include_dirs = list(include_dirs) + (self.include_dirs or [])
else:
raise TypeError(
"'include_dirs' (if supplied) must be a list of strings")
return output_dir, macros, include_dirs
def _prep_compile(self, sources, output_dir, depends=None):
"""Decide which souce files must be recompiled.
Determine the list of object files corresponding to 'sources',
and figure out which ones really need to be recompiled.
Return a list of all object files and a dictionary telling
which source files can be skipped.
"""
# Get the list of expected output (object) files
objects = self.object_filenames(sources, output_dir=output_dir)
assert len(objects) == len(sources)
# Return an empty dict for the "which source files can be skipped"
# return value to preserve API compatibility.
return objects, {}
def _fix_object_args(self, objects, output_dir):
"""Typecheck and fix up some arguments supplied to various methods.
Specifically: ensure that 'objects' is a list; if output_dir is
None, replace with self.output_dir. Return fixed versions of
'objects' and 'output_dir'.
"""
if not isinstance(objects, (list, tuple)):
raise TypeError("'objects' must be a list or tuple of strings")
objects = list(objects)
if output_dir is None:
output_dir = self.output_dir
elif not isinstance(output_dir, str):
raise TypeError("'output_dir' must be a string or None")
return (objects, output_dir)
def _fix_lib_args(self, libraries, library_dirs, runtime_library_dirs):
"""Typecheck and fix up some of the arguments supplied to the
'link_*' methods. Specifically: ensure that all arguments are
lists, and augment them with their permanent versions
(eg. 'self.libraries' augments 'libraries'). Return a tuple with
fixed versions of all arguments.
"""
if libraries is None:
libraries = self.libraries
elif isinstance(libraries, (list, tuple)):
libraries = list (libraries) + (self.libraries or [])
else:
raise TypeError(
"'libraries' (if supplied) must be a list of strings")
if library_dirs is None:
library_dirs = self.library_dirs
elif isinstance(library_dirs, (list, tuple)):
library_dirs = list (library_dirs) + (self.library_dirs or [])
else:
raise TypeError(
"'library_dirs' (if supplied) must be a list of strings")
if runtime_library_dirs is None:
runtime_library_dirs = self.runtime_library_dirs
elif isinstance(runtime_library_dirs, (list, tuple)):
runtime_library_dirs = (list(runtime_library_dirs) +
(self.runtime_library_dirs or []))
else:
raise TypeError("'runtime_library_dirs' (if supplied) "
"must be a list of strings")
return (libraries, library_dirs, runtime_library_dirs)
def _need_link(self, objects, output_file):
"""Return true if we need to relink the files listed in 'objects'
to recreate 'output_file'.
"""
if self.force:
return True
else:
if self.dry_run:
newer = newer_group (objects, output_file, missing='newer')
else:
newer = newer_group (objects, output_file)
return newer
def detect_language(self, sources):
"""Detect the language of a given file, or list of files. Uses
language_map, and language_order to do the job.
"""
if not isinstance(sources, list):
sources = [sources]
lang = None
index = len(self.language_order)
for source in sources:
base, ext = os.path.splitext(source)
extlang = self.language_map.get(ext)
try:
extindex = self.language_order.index(extlang)
if extindex < index:
lang = extlang
index = extindex
except ValueError:
pass
return lang
# -- Worker methods ------------------------------------------------
# (must be implemented by subclasses)
def preprocess(self, source, output_file=None, macros=None,
include_dirs=None, extra_preargs=None, extra_postargs=None):
"""Preprocess a single C/C++ source file, named in 'source'.
Output will be written to file named 'output_file', or stdout if
'output_file' not supplied. 'macros' is a list of macro
definitions as for 'compile()', which will augment the macros set
with 'define_macro()' and 'undefine_macro()'. 'include_dirs' is a
list of directory names that will be added to the default list.
Raises PreprocessError on failure.
"""
pass
def compile(self, sources, output_dir=None, macros=None,
include_dirs=None, debug=0, extra_preargs=None,
extra_postargs=None, depends=None):
"""Compile one or more source files.
'sources' must be a list of filenames, most likely C/C++
files, but in reality anything that can be handled by a
particular compiler and compiler class (eg. MSVCCompiler can
handle resource files in 'sources'). Return a list of object
filenames, one per source filename in 'sources'. Depending on
the implementation, not all source files will necessarily be
compiled, but all corresponding object filenames will be
returned.
If 'output_dir' is given, object files will be put under it, while
retaining their original path component. That is, "foo/bar.c"
normally compiles to "foo/bar.o" (for a Unix implementation); if
'output_dir' is "build", then it would compile to
"build/foo/bar.o".
'macros', if given, must be a list of macro definitions. A macro
definition is either a (name, value) 2-tuple or a (name,) 1-tuple.
The former defines a macro; if the value is None, the macro is
defined without an explicit value. The 1-tuple case undefines a
macro. Later definitions/redefinitions/ undefinitions take
precedence.
'include_dirs', if given, must be a list of strings, the
directories to add to the default include file search path for this
compilation only.
'debug' is a boolean; if true, the compiler will be instructed to
output debug symbols in (or alongside) the object file(s).
'extra_preargs' and 'extra_postargs' are implementation- dependent.
On platforms that have the notion of a command-line (e.g. Unix,
DOS/Windows), they are most likely lists of strings: extra
command-line arguments to prepend/append to the compiler command
line. On other platforms, consult the implementation class
documentation. In any event, they are intended as an escape hatch
for those occasions when the abstract compiler framework doesn't
cut the mustard.
'depends', if given, is a list of filenames that all targets
depend on. If a source file is older than any file in
depends, then the source file will be recompiled. This
supports dependency tracking, but only at a coarse
granularity.
Raises CompileError on failure.
"""
# A concrete compiler class can either override this method
# entirely or implement _compile().
macros, objects, extra_postargs, pp_opts, build = \
self._setup_compile(output_dir, macros, include_dirs, sources,
depends, extra_postargs)
cc_args = self._get_cc_args(pp_opts, debug, extra_preargs)
for obj in objects:
try:
src, ext = build[obj]
except KeyError:
continue
self._compile(obj, src, ext, cc_args, extra_postargs, pp_opts)
# Return *all* object filenames, not just the ones we just built.
return objects
def _compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts):
"""Compile 'src' to product 'obj'."""
# A concrete compiler class that does not override compile()
# should implement _compile().
pass
def create_static_lib(self, objects, output_libname, output_dir=None,
debug=0, target_lang=None):
"""Link a bunch of stuff together to create a static library file.
The "bunch of stuff" consists of the list of object files supplied
as 'objects', the extra object files supplied to
'add_link_object()' and/or 'set_link_objects()', the libraries
supplied to 'add_library()' and/or 'set_libraries()', and the
libraries supplied as 'libraries' (if any).
'output_libname' should be a library name, not a filename; the
filename will be inferred from the library name. 'output_dir' is
the directory where the library file will be put.
'debug' is a boolean; if true, debugging information will be
included in the library (note that on most platforms, it is the
compile step where this matters: the 'debug' flag is included here
just for consistency).
'target_lang' is the target language for which the given objects
are being compiled. This allows specific linkage time treatment of
certain languages.
Raises LibError on failure.
"""
pass
# values for target_desc parameter in link()
SHARED_OBJECT = "shared_object"
SHARED_LIBRARY = "shared_library"
EXECUTABLE = "executable"
def link(self,
target_desc,
objects,
output_filename,
output_dir=None,
libraries=None,
library_dirs=None,
runtime_library_dirs=None,
export_symbols=None,
debug=0,
extra_preargs=None,
extra_postargs=None,
build_temp=None,
target_lang=None):
"""Link a bunch of stuff together to create an executable or
shared library file.
The "bunch of stuff" consists of the list of object files supplied
as 'objects'. 'output_filename' should be a filename. If
'output_dir' is supplied, 'output_filename' is relative to it
(i.e. 'output_filename' can provide directory components if
needed).
'libraries' is a list of libraries to link against. These are
library names, not filenames, since they're translated into
filenames in a platform-specific way (eg. "foo" becomes "libfoo.a"
on Unix and "foo.lib" on DOS/Windows). However, they can include a
directory component, which means the linker will look in that
specific directory rather than searching all the normal locations.
'library_dirs', if supplied, should be a list of directories to
search for libraries that were specified as bare library names
(ie. no directory component). These are on top of the system
default and those supplied to 'add_library_dir()' and/or
'set_library_dirs()'. 'runtime_library_dirs' is a list of
directories that will be embedded into the shared library and used
to search for other shared libraries that *it* depends on at
run-time. (This may only be relevant on Unix.)
'export_symbols' is a list of symbols that the shared library will
export. (This appears to be relevant only on Windows.)
'debug' is as for 'compile()' and 'create_static_lib()', with the
slight distinction that it actually matters on most platforms (as
opposed to 'create_static_lib()', which includes a 'debug' flag
mostly for form's sake).
'extra_preargs' and 'extra_postargs' are as for 'compile()' (except
of course that they supply command-line arguments for the
particular linker being used).
'target_lang' is the target language for which the given objects
are being compiled. This allows specific linkage time treatment of
certain languages.
Raises LinkError on failure.
"""
raise NotImplementedError
# Old 'link_*()' methods, rewritten to use the new 'link()' method.
def link_shared_lib(self,
objects,
output_libname,
output_dir=None,
libraries=None,
library_dirs=None,
runtime_library_dirs=None,
export_symbols=None,
debug=0,
extra_preargs=None,
extra_postargs=None,
build_temp=None,
target_lang=None):
self.link(CCompiler.SHARED_LIBRARY, objects,
self.library_filename(output_libname, lib_type='shared'),
output_dir,
libraries, library_dirs, runtime_library_dirs,
export_symbols, debug,
extra_preargs, extra_postargs, build_temp, target_lang)
def link_shared_object(self,
objects,
output_filename,
output_dir=None,
libraries=None,
library_dirs=None,
runtime_library_dirs=None,
export_symbols=None,
debug=0,
extra_preargs=None,
extra_postargs=None,
build_temp=None,
target_lang=None):
self.link(CCompiler.SHARED_OBJECT, objects,
output_filename, output_dir,
libraries, library_dirs, runtime_library_dirs,
export_symbols, debug,
extra_preargs, extra_postargs, build_temp, target_lang)
def link_executable(self,
objects,
output_progname,
output_dir=None,
libraries=None,
library_dirs=None,
runtime_library_dirs=None,
debug=0,
extra_preargs=None,
extra_postargs=None,
target_lang=None):
self.link(CCompiler.EXECUTABLE, objects,
self.executable_filename(output_progname), output_dir,
libraries, library_dirs, runtime_library_dirs, None,
debug, extra_preargs, extra_postargs, None, target_lang)
# -- Miscellaneous methods -----------------------------------------
# These are all used by the 'gen_lib_options() function; there is
# no appropriate default implementation so subclasses should
# implement all of these.
def library_dir_option(self, dir):
"""Return the compiler option to add 'dir' to the list of
directories searched for libraries.
"""
raise NotImplementedError
def runtime_library_dir_option(self, dir):
"""Return the compiler option to add 'dir' to the list of
directories searched for runtime libraries.
"""
raise NotImplementedError
def library_option(self, lib):
"""Return the compiler option to add 'lib' to the list of libraries
linked into the shared library or executable.
"""
raise NotImplementedError
def has_function(self, funcname, includes=None, include_dirs=None,
libraries=None, library_dirs=None):
"""Return a boolean indicating whether funcname is supported on
the current platform. The optional arguments can be used to
augment the compilation environment.
"""
# this can't be included at module scope because it tries to
# import math which might not be available at that point - maybe
# the necessary logic should just be inlined?
import tempfile
if includes is None:
includes = []
if include_dirs is None:
include_dirs = []
if libraries is None:
libraries = []
if library_dirs is None:
library_dirs = []
fd, fname = tempfile.mkstemp(".c", funcname, text=True)
f = os.fdopen(fd, "w")
try:
for incl in includes:
f.write("""#include "%s"\n""" % incl)
f.write("""\
main (int argc, char **argv) {
%s();
}
""" % funcname)
finally:
f.close()
try:
objects = self.compile([fname], include_dirs=include_dirs)
except CompileError:
return False
try:
self.link_executable(objects, "a.out",
libraries=libraries,
library_dirs=library_dirs)
except (LinkError, TypeError):
return False
return True
def find_library_file (self, dirs, lib, debug=0):
"""Search the specified list of directories for a static or shared
library file 'lib' and return the full path to that file. If
'debug' true, look for a debugging version (if that makes sense on
the current platform). Return None if 'lib' wasn't found in any of
the specified directories.
"""
raise NotImplementedError
# -- Filename generation methods -----------------------------------
# The default implementation of the filename generating methods are
# prejudiced towards the Unix/DOS/Windows view of the world:
# * object files are named by replacing the source file extension
# (eg. .c/.cpp -> .o/.obj)
# * library files (shared or static) are named by plugging the
# library name and extension into a format string, eg.
# "lib%s.%s" % (lib_name, ".a") for Unix static libraries
# * executables are named by appending an extension (possibly
# empty) to the program name: eg. progname + ".exe" for
# Windows
#
# To reduce redundant code, these methods expect to find
# several attributes in the current object (presumably defined
# as class attributes):
# * src_extensions -
# list of C/C++ source file extensions, eg. ['.c', '.cpp']
# * obj_extension -
# object file extension, eg. '.o' or '.obj'
# * static_lib_extension -
# extension for static library files, eg. '.a' or '.lib'
# * shared_lib_extension -
# extension for shared library/object files, eg. '.so', '.dll'
# * static_lib_format -
# format string for generating static library filenames,
# eg. 'lib%s.%s' or '%s.%s'
# * shared_lib_format
# format string for generating shared library filenames
# (probably same as static_lib_format, since the extension
# is one of the intended parameters to the format string)
# * exe_extension -
# extension for executable files, eg. '' or '.exe'
def object_filenames(self, source_filenames, strip_dir=0, output_dir=''):
if output_dir is None:
output_dir = ''
obj_names = []
for src_name in source_filenames:
base, ext = os.path.splitext(src_name)
base = os.path.splitdrive(base)[1] # Chop off the drive
base = base[os.path.isabs(base):] # If abs, chop off leading /
if ext not in self.src_extensions:
raise UnknownFileError(
"unknown file type '%s' (from '%s')" % (ext, src_name))
if strip_dir:
base = os.path.basename(base)
obj_names.append(os.path.join(output_dir,
base + self.obj_extension))
return obj_names
def shared_object_filename(self, basename, strip_dir=0, output_dir=''):
assert output_dir is not None
if strip_dir:
basename = os.path.basename(basename)
return os.path.join(output_dir, basename + self.shared_lib_extension)
def executable_filename(self, basename, strip_dir=0, output_dir=''):
assert output_dir is not None
if strip_dir:
basename = os.path.basename(basename)
return os.path.join(output_dir, basename + (self.exe_extension or ''))
def library_filename(self, libname, lib_type='static', # or 'shared'
strip_dir=0, output_dir=''):
assert output_dir is not None
if lib_type not in ("static", "shared", "dylib", "xcode_stub"):
raise ValueError(
"'lib_type' must be \"static\", \"shared\", \"dylib\", or \"xcode_stub\"")
fmt = getattr(self, lib_type + "_lib_format")
ext = getattr(self, lib_type + "_lib_extension")
dir, base = os.path.split(libname)
filename = fmt % (base, ext)
if strip_dir:
dir = ''
return os.path.join(output_dir, dir, filename)
# -- Utility methods -----------------------------------------------
def announce(self, msg, level=1):
log.debug(msg)
def debug_print(self, msg):
from distutils.debug import DEBUG
if DEBUG:
print(msg)
def warn(self, msg):
sys.stderr.write("warning: %s\n" % msg)
def execute(self, func, args, msg=None, level=1):
execute(func, args, msg, self.dry_run)
def spawn(self, cmd):
spawn(cmd, dry_run=self.dry_run)
def move_file(self, src, dst):
return move_file(src, dst, dry_run=self.dry_run)
def mkpath (self, name, mode=0o777):
mkpath(name, mode, dry_run=self.dry_run)
# Map a sys.platform/os.name ('posix', 'nt') to the default compiler
# type for that platform. Keys are interpreted as re match
# patterns. Order is important; platform mappings are preferred over
# OS names.
_default_compilers = (
# Platform string mappings
# on a cygwin built python we can use gcc like an ordinary UNIXish
# compiler
('cygwin.*', 'unix'),
# OS name mappings
('posix', 'unix'),
('nt', 'msvc'),
)
def get_default_compiler(osname=None, platform=None):
"""Determine the default compiler to use for the given platform.
osname should be one of the standard Python OS names (i.e. the
ones returned by os.name) and platform the common value
returned by sys.platform for the platform in question.
The default values are os.name and sys.platform in case the
parameters are not given.
"""
if osname is None:
osname = os.name
if platform is None:
platform = sys.platform
for pattern, compiler in _default_compilers:
if re.match(pattern, platform) is not None or \
re.match(pattern, osname) is not None:
return compiler
# Default to Unix compiler
return 'unix'
# Map compiler types to (module_name, class_name) pairs -- ie. where to
# find the code that implements an interface to this compiler. (The module
# is assumed to be in the 'distutils' package.)
compiler_class = { 'unix': ('unixccompiler', 'UnixCCompiler',
"standard UNIX-style compiler"),
'msvc': ('_msvccompiler', 'MSVCCompiler',
"Microsoft Visual C++"),
'cygwin': ('cygwinccompiler', 'CygwinCCompiler',
"Cygwin port of GNU C Compiler for Win32"),
'mingw32': ('cygwinccompiler', 'Mingw32CCompiler',
"Mingw32 port of GNU C Compiler for Win32"),
'bcpp': ('bcppcompiler', 'BCPPCompiler',
"Borland C++ Compiler"),
}
def show_compilers():
"""Print list of available compilers (used by the "--help-compiler"
options to "build", "build_ext", "build_clib").
"""
# XXX this "knows" that the compiler option it's describing is
# "--compiler", which just happens to be the case for the three
# commands that use it.
from distutils.fancy_getopt import FancyGetopt
compilers = []
for compiler in compiler_class.keys():
compilers.append(("compiler="+compiler, None,
compiler_class[compiler][2]))
compilers.sort()
pretty_printer = FancyGetopt(compilers)
pretty_printer.print_help("List of available compilers:")
def new_compiler(plat=None, compiler=None, verbose=0, dry_run=0, force=0):
"""Generate an instance of some CCompiler subclass for the supplied
platform/compiler combination. 'plat' defaults to 'os.name'
(eg. 'posix', 'nt'), and 'compiler' defaults to the default compiler
for that platform. Currently only 'posix' and 'nt' are supported, and
the default compilers are "traditional Unix interface" (UnixCCompiler
class) and Visual C++ (MSVCCompiler class). Note that it's perfectly
possible to ask for a Unix compiler object under Windows, and a
Microsoft compiler object under Unix -- if you supply a value for
'compiler', 'plat' is ignored.
"""
if plat is None:
plat = os.name
try:
if compiler is None:
compiler = get_default_compiler(plat)
(module_name, class_name, long_description) = compiler_class[compiler]
except KeyError:
msg = "don't know how to compile C/C++ code on platform '%s'" % plat
if compiler is not None:
msg = msg + " with '%s' compiler" % compiler
raise DistutilsPlatformError(msg)
try:
module_name = "distutils." + module_name
__import__ (module_name)
module = sys.modules[module_name]
klass = vars(module)[class_name]
except ImportError:
raise DistutilsModuleError(
"can't compile C/C++ code: unable to load module '%s'" % \
module_name)
except KeyError:
raise DistutilsModuleError(
"can't compile C/C++ code: unable to find class '%s' "
"in module '%s'" % (class_name, module_name))
# XXX The None is necessary to preserve backwards compatibility
# with classes that expect verbose to be the first positional
# argument.
return klass(None, dry_run, force)
def gen_preprocess_options(macros, include_dirs):
"""Generate C pre-processor options (-D, -U, -I) as used by at least
two types of compilers: the typical Unix compiler and Visual C++.
'macros' is the usual thing, a list of 1- or 2-tuples, where (name,)
means undefine (-U) macro 'name', and (name,value) means define (-D)
macro 'name' to 'value'. 'include_dirs' is just a list of directory
names to be added to the header file search path (-I). Returns a list
of command-line options suitable for either Unix compilers or Visual
C++.
"""
# XXX it would be nice (mainly aesthetic, and so we don't generate
# stupid-looking command lines) to go over 'macros' and eliminate
# redundant definitions/undefinitions (ie. ensure that only the
# latest mention of a particular macro winds up on the command
# line). I don't think it's essential, though, since most (all?)
# Unix C compilers only pay attention to the latest -D or -U
# mention of a macro on their command line. Similar situation for
# 'include_dirs'. I'm punting on both for now. Anyways, weeding out
# redundancies like this should probably be the province of
# CCompiler, since the data structures used are inherited from it
# and therefore common to all CCompiler classes.
pp_opts = []
for macro in macros:
if not (isinstance(macro, tuple) and 1 <= len(macro) <= 2):
raise TypeError(
"bad macro definition '%s': "
"each element of 'macros' list must be a 1- or 2-tuple"
% macro)
if len(macro) == 1: # undefine this macro
pp_opts.append("-U%s" % macro[0])
elif len(macro) == 2:
if macro[1] is None: # define with no explicit value
pp_opts.append("-D%s" % macro[0])
else:
# XXX *don't* need to be clever about quoting the
# macro value here, because we're going to avoid the
# shell at all costs when we spawn the command!
pp_opts.append("-D%s=%s" % macro)
for dir in include_dirs:
pp_opts.append("-I%s" % dir)
return pp_opts
def gen_lib_options (compiler, library_dirs, runtime_library_dirs, libraries):
"""Generate linker options for searching library directories and
linking with specific libraries. 'libraries' and 'library_dirs' are,
respectively, lists of library names (not filenames!) and search
directories. Returns a list of command-line options suitable for use
with some compiler (depending on the two format strings passed in).
"""
lib_opts = []
for dir in library_dirs:
lib_opts.append(compiler.library_dir_option(dir))
for dir in runtime_library_dirs:
opt = compiler.runtime_library_dir_option(dir)
if isinstance(opt, list):
lib_opts = lib_opts + opt
else:
lib_opts.append(opt)
# XXX it's important that we *not* remove redundant library mentions!
# sometimes you really do have to say "-lfoo -lbar -lfoo" in order to
# resolve all symbols. I just hope we never have to say "-lfoo obj.o
# -lbar" to get things to work -- that's certainly a possibility, but a
# pretty nasty way to arrange your C code.
for lib in libraries:
(lib_dir, lib_name) = os.path.split(lib)
if lib_dir:
lib_file = compiler.find_library_file([lib_dir], lib_name)
if lib_file:
lib_opts.append(lib_file)
else:
compiler.warn("no library file corresponding to "
"'%s' found (skipping)" % lib)
else:
lib_opts.append(compiler.library_option (lib))
return lib_opts
|
the-stack_0_6893 | import pytest
from thefrick.rules.tsuru_login import match, get_new_command
from thefrick.types import Command
error_msg = (
"Error: you're not authenticated or your session has expired.",
("You're not authenticated or your session has expired. "
"Please use \"login\" command for authentication."),
)
@pytest.mark.parametrize('command', [
Command('tsuru app-shell', error_msg[0]),
Command('tsuru app-log -f', error_msg[1]),
])
def test_match(command):
assert match(command)
@pytest.mark.parametrize('command', [
Command('tsuru', ''),
Command('tsuru app-restart', 'Error: unauthorized'),
Command('tsuru app-log -f', 'Error: unparseable data'),
])
def test_not_match(command):
assert not match(command)
@pytest.mark.parametrize('command, new_command', [
(Command('tsuru app-shell', error_msg[0]),
'tsuru login && tsuru app-shell'),
(Command('tsuru app-log -f', error_msg[1]),
'tsuru login && tsuru app-log -f'),
])
def test_get_new_command(command, new_command):
assert get_new_command(command) == new_command
|
the-stack_0_6896 | from vpp_tunnel_interface import VppTunnelInterface
class VppIpsecTunInterface(VppTunnelInterface):
"""
VPP IPsec Tunnel interface
"""
def __init__(self, test, parent_if, local_spi,
remote_spi, crypto_alg, local_crypto_key, remote_crypto_key,
integ_alg, local_integ_key, remote_integ_key, is_ip6=False):
super(VppIpsecTunInterface, self).__init__(test, parent_if)
self.local_spi = local_spi
self.remote_spi = remote_spi
self.crypto_alg = crypto_alg
self.local_crypto_key = local_crypto_key
self.remote_crypto_key = remote_crypto_key
self.integ_alg = integ_alg
self.local_integ_key = local_integ_key
self.remote_integ_key = remote_integ_key
if is_ip6:
self.local_ip = self.parent_if.local_ip6
self.remote_ip = self.parent_if.remote_ip6
else:
self.local_ip = self.parent_if.local_ip4
self.remote_ip = self.parent_if.remote_ip4
def add_vpp_config(self):
r = self.test.vapi.ipsec_tunnel_if_add_del(
self.local_ip, self.remote_ip,
self.remote_spi, self.local_spi,
self.crypto_alg, self.local_crypto_key, self.remote_crypto_key,
self.integ_alg, self.local_integ_key, self.remote_integ_key)
self.set_sw_if_index(r.sw_if_index)
self.generate_remote_hosts()
self.test.registry.register(self, self.test.logger)
def remove_vpp_config(self):
self.test.vapi.ipsec_tunnel_if_add_del(
self.local_ip, self.remote_ip,
self.remote_spi, self.local_spi,
self.crypto_alg, self.local_crypto_key, self.remote_crypto_key,
self.integ_alg, self.local_integ_key, self.remote_integ_key,
is_add=0)
def object_id(self):
return "ipsec-tun-if-%d" % self._sw_if_index
class VppIpsecGRETunInterface(VppTunnelInterface):
"""
VPP IPsec GRE Tunnel interface
this creates headers
IP / ESP / IP / GRE / payload
i.e. it's GRE over IPSEC, rather than IPSEC over GRE.
"""
def __init__(self, test, parent_if, sa_out, sa_in):
super(VppIpsecGRETunInterface, self).__init__(test, parent_if)
self.sa_in = sa_in
self.sa_out = sa_out
def add_vpp_config(self):
r = self.test.vapi.ipsec_gre_tunnel_add_del(
self.parent_if.local_ip4n,
self.parent_if.remote_ip4n,
self.sa_out,
self.sa_in)
self.set_sw_if_index(r.sw_if_index)
self.generate_remote_hosts()
self.test.registry.register(self, self.test.logger)
def remove_vpp_config(self):
self.test.vapi.ipsec_gre_tunnel_add_del(
self.parent_if.local_ip4n,
self.parent_if.remote_ip4n,
self.sa_out,
self.sa_in,
is_add=0)
def query_vpp_config(self):
ts = self.test.vapi.ipsec_gre_tunnel_dump(sw_if_index=0xffffffff)
for t in ts:
if t.tunnel.sw_if_index == self._sw_if_index:
return True
return False
def __str__(self):
return self.object_id()
def object_id(self):
return "ipsec-gre-tun-if-%d" % self._sw_if_index
|
the-stack_0_6897 | import cv2
import numpy as np
f='image287.jpg'
img=cv2.imread(f)
gray=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
gray=np.float32(gray)
dst=cv2.cornerHarris(gray,2,3,0.04)
dst=cv2.dilate(dst,None)
ret,dst=cv2.threshold(dst,0.01*dst.max(),255,0)
dst=np.uint8(dst)
ret,labels,stats,centroids=cv2.connectedComponentsWithStats(dst)
criteria=(cv2.TermCriteria_EPS+cv2.TERM_CRITERIA_MAX_ITER, 100, 0.001)
corners=cv2.cornerSubPix(gray,np.float32(centroids),(5,5),(-1,-1),criteria)
res=np.hstack((centroids,corners))
res=np.int0(res)
img[res[:,1],res[:,0]]=[0,0,255]
img[res[:,3],res[:,2]]=[0,255,0]
cv2.imshow('dst',img)
if cv2.waitKey(0) &0xff == 27:
cv2.destroyAllWindows()
|
the-stack_0_6898 | from PyQt5.QtCore import QTimer, Qt
from PyQt5.QtWidgets import (QApplication, QVBoxLayout, QMainWindow, QTabWidget,
QPushButton, QWidget, QFileDialog)
from controller.config_controller import ConfigController
from widgets import (NewOrLoad, ExperimentConfigTab, CardSelectionsConfigTab,
InstructionsConfigTab)
ALIGN_RIGHT = 0x0002
class Application(object):
new_or_load = None
main_window = None
app = None
args = None
mode_select_widget = None
user_info_widget = None
experiment_window = None
controller = None
_startup_timer = None
def __init__(self, args):
self.args = args
self.controller = ConfigController()
def run(self):
self.app = QApplication(self.args)
self._startup_timer = QTimer().singleShot(0, self.on_started)
return self.app.exec_()
def on_started(self):
self.ask_new_or_load()
def ask_new_or_load(self):
self.new_or_load = NewOrLoad(self.controller)
self.new_or_load.accepted.connect(self.open_configurator)
def open_configurator(self):
cs_conf = CardSelectionsConfigTab(self.controller)
instr_conf = InstructionsConfigTab(self.controller)
exp_conf = ExperimentConfigTab(self.controller)
tabs = QTabWidget()
tabs.addTab(exp_conf, "Basic")
tabs.addTab(instr_conf, "Instructions")
tabs.addTab(cs_conf, "Cards")
save = QPushButton("Save and exit")
save.clicked.connect(self.on_save)
vbox = QVBoxLayout()
vbox.addWidget(tabs)
vbox.addWidget(save, alignment=Qt.AlignRight)
wrap = QWidget()
wrap.setLayout(vbox)
self.main_window = QMainWindow()
self.main_window.setCentralWidget(wrap)
self.main_window.setGeometry(300, 200, 500, 700)
self.main_window.show()
def on_save(self):
errors = self.controller.get_errors_list()
if errors:
pass
else:
fname = QFileDialog.getSaveFileName(
self.main_window,
'Save as',
filter='Experiment Configuration (*.conf)')
fname = fname[0]
if not fname:
return
if not fname.endswith('.conf'):
fname += '.conf'
if self.controller.save(fname):
self.app.quit()
|
the-stack_0_6901 | # Import models
from mmic_md.models.input import MDInput
from mmic_md_gmx.models import ComputeGmxInput
from cmselemental.util.files import random_file
# Import components
from mmic_cmd.components import CmdComponent
from mmic.components.blueprints import GenericComponent
from typing import Any, Dict, List, Tuple, Optional
import os
__all__ = ["PrepGmxComponent"]
_supported_solvents = ("spc", "tip3p", "tip4p") # This line may be delete later
class PrepGmxComponent(GenericComponent):
"""
Prepares input for running molecular dynamics simulations using GMX engine.
The Molecule object from MMIC schema will be
converted to a .pdb file here then converted to a .gro file.
.mdp and .top files will also be constructed
according to the info in MMIC schema.
"""
@classmethod
def input(cls):
return MDInput
@classmethod
def output(cls):
return ComputeGmxInput
def execute(
self,
inputs: MDInput,
extra_outfiles: Optional[List[str]] = None,
extra_commands: Optional[List[str]] = None,
scratch_name: Optional[str] = None,
timeout: Optional[int] = None,
) -> Tuple[bool, ComputeGmxInput]:
if isinstance(inputs, dict):
inputs = self.input()(**inputs)
# Start to build mdp file dict
mdp_inputs = {
"integrator": inputs.method,
"dt": inputs.step_size,
"nsteps": inputs.max_steps,
"coulombtype": inputs.long_forces.method,
"vdw-type": inputs.short_forces.method,
"pbc": inputs.boundary,
}
# Extract output setup from freq_write dict
for key, val in inputs.freq_write.items():
mdp_inputs[key] = val
# Extract T couple and P couple setup
for key, val in inputs.Tcoupl_arg.items():
mdp_inputs[key] = val
for key, val in inputs.Pcoupl_arg.items():
mdp_inputs[key] = val
# Translate boundary str tuple (perodic,perodic,perodic) to a string e.g. xyz
pbc_dict = dict(zip(["x", "y", "z"], list(mdp_inputs["pbc"])))
pbc = ""
for dim in list(pbc_dict.keys()):
if pbc_dict[dim] != "periodic":
continue
else:
pbc = pbc + dim # pbc is a str, may need to be initiated elsewhere
mdp_inputs["pbc"] = pbc
# Write .mdp file
mdp_file = random_file(suffix=".mdp")
with open(mdp_file, "w") as inp:
for key, val in mdp_inputs.items():
inp.write(f"{key} = {val}\n")
fs = inputs.forcefield
mols = inputs.molecule
ff_name, ff = list(
fs.items()
).pop() # Here ff_name gets actually the related mol name, but it will not be used
mol_name, mol = list(mols.items()).pop()
gro_file = random_file(suffix=".gro") # output gro
top_file = random_file(suffix=".top")
boxed_gro_file = random_file(suffix=".gro")
mol.to_file(gro_file, translator="mmic_parmed")
ff.to_file(top_file, translator="mmic_parmed")
input_model = {
"gro_file": gro_file,
"proc_input": inputs,
"boxed_gro_file": boxed_gro_file,
}
clean_files, cmd_input = self.build_input(input_model)
rvalue = CmdComponent.compute(cmd_input)
boxed_gro_file = str(rvalue.outfiles[boxed_gro_file])
scratch_dir = str(rvalue.scratch_directory)
self.cleanup(clean_files) # Del the gro in the working dir
gmx_compute = ComputeGmxInput(
proc_input=inputs,
mdp_file=mdp_file,
forcefield=top_file,
molecule=boxed_gro_file,
scratch_dir=scratch_dir,
schema_name=inputs.schema_name,
schema_version=inputs.schema_version,
)
return True, gmx_compute
@staticmethod
def cleanup(remove: List[str]):
for item in remove:
if os.path.isdir(item):
shutil.rmtree(item)
elif os.path.isfile(item):
os.remove(item)
def build_input(
self,
inputs: Dict[str, Any],
config: Optional["TaskConfig"] = None,
template: Optional[str] = None,
) -> Dict[str, Any]:
assert inputs["proc_input"].engine == "gmx", "Engine must be gmx (Gromacs)!"
clean_files = []
boxed_gro_file = inputs["boxed_gro_file"]
clean_files.append(inputs["gro_file"])
env = os.environ.copy()
if config:
env["MKL_NUM_THREADS"] = str(config.ncores)
env["OMP_NUM_THREADS"] = str(config.ncores)
scratch_directory = config.scratch_directory if config else None
cmd = [
inputs["proc_input"].engine,
"editconf",
"-f",
inputs["gro_file"],
"-d",
"2",
"-o",
boxed_gro_file,
]
outfiles = [boxed_gro_file]
return clean_files, {
"command": cmd,
"infiles": [inputs["gro_file"]],
"outfiles": outfiles,
"outfiles_track": outfiles,
"scratch_directory": scratch_directory,
"environment": env,
"scratch_messy": True,
}
|
the-stack_0_6903 | # _base_ = ['../../_base_/models/csn_ig65m_pretrained.py']
# ir-CSN (interaction-reduced channel-separated network) architecture
ann_type = 'tanz_base' # * change accordingly
num_classes = 9 if ann_type == 'tanz_base' else 42
# model settings
model = dict(
type='Recognizer3D',
backbone=dict(
type='ResNet3dCSN',
pretrained2d=False, # doesn't have imagenet pre-training
# but has 3D Reset pretraining
pretrained= # noqa: E251
'https://download.openmmlab.com/mmaction/recognition/csn/ircsn_from_scratch_r152_ig65m_20200807-771c4135.pth', # noqa: E501
depth=152,
with_pool2=False,
bottleneck_mode='ir',
norm_eval=True,
bn_frozen=True,
zero_init_residual=False),
cls_head=dict(
type='I3DHead',
num_classes=num_classes,
in_channels=2048,
spatial_type='avg',
dropout_ratio=0.5,
init_std=0.01),
# model training and testing settings
train_cfg=None,
test_cfg=dict(average_clips='prob'))
### dataset settings
dataset_type = 'RawframeDataset'
data_root = ''
data_root_val = data_root
data_root_test = data_root
ann_file_train = '/datasets/write/rawframes/rawframes_train.txt'
ann_file_val = '/datasets/write/rawframes/rawframes_val.txt'
ann_file_test = '/datasets/write/rawframes/rawframes_test.txt'
img_norm_cfg = dict(
mean=[110.2008, 100.63983, 95.99475],
std=[58.14765, 56.46975, 55.332195],
to_bgr=False)
train_pipeline = [
dict(type='SampleFrames', clip_len=32, frame_interval=2, num_clips=1),
dict(type='RawFrameDecode'),
dict(type='Resize', scale=(-1, 256)),
dict(type='RandomResizedCrop'),
dict(type='Resize', scale=(224, 224), keep_ratio=False),
dict(type='Flip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='FormatShape', input_format='NCTHW'),
dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
dict(type='ToTensor', keys=['imgs', 'label'])
]
val_pipeline = [
dict(
type='SampleFrames',
clip_len=32,
frame_interval=2,
num_clips=1,
test_mode=True),
dict(type='RawFrameDecode'),
dict(type='Resize', scale=(-1, 256)),
dict(type='CenterCrop', crop_size=224),
dict(type='Normalize', **img_norm_cfg),
dict(type='FormatShape', input_format='NCTHW'),
dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
dict(type='ToTensor', keys=['imgs'])
]
test_pipeline = [
dict(
type='SampleFrames',
clip_len=32,
frame_interval=2,
num_clips=10,
test_mode=True),
dict(type='RawFrameDecode'),
dict(type='Resize', scale=(-1, 256)),
dict(type='ThreeCrop', crop_size=256),
dict(type='Normalize', **img_norm_cfg),
dict(type='FormatShape', input_format='NCTHW'),
dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
dict(type='ToTensor', keys=['imgs'])
]
data = dict(
videos_per_gpu=3,
workers_per_gpu=3,
test_dataloader=dict(videos_per_gpu=1, workers_per_gpu=1),
val_dataloader=dict(videos_per_gpu=1, workers_per_gpu=1),
train=dict(
type=dataset_type,
ann_file=ann_file_train,
data_prefix=data_root,
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=ann_file_val,
data_prefix=data_root_val,
pipeline=val_pipeline),
test=dict(
type=dataset_type,
ann_file=ann_file_val,
data_prefix=data_root_val,
pipeline=test_pipeline))
# optimizer
optimizer = dict(
type='SGD',
lr=0.05625, # for 4 gpus
momentum=0.9,
weight_decay=0.0001)
optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
step=[32, 48],
warmup='linear',
warmup_ratio=0.1,
warmup_by_epoch=True,
warmup_iters=16)
total_epochs = 100
checkpoint_config = dict(interval=5)
evaluation = dict(
interval=5,
metric_options=dict(top_k_accuracy=dict(topk=(1, 2, 3, 4, 5))),
)
eval_config = dict(
metric_options=dict(top_k_accuracy=dict(topk=(1, 2, 3, 4, 5))))
log_config = dict(
interval=20,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook'),
])
# runtime settings
dist_params = dict(backend='nccl')
log_level = 'INFO'
load_from = ('https://download.openmmlab.com/mmaction/recognition/csn/'
'vmz/vmz_ircsn_ig65m_pretrained_r152_32x2x1_58e_kinetics400_rgb_20210617-e63ee1bd.pth')
resume_from = None
workflow = [('train', 1)]
find_unused_parameters = True
|
the-stack_0_6904 | import graphene
from django.core.exceptions import ValidationError
from ...account import models as account_models
from ...core.error_codes import ShopErrorCode
from ...core.permissions import SitePermissions
from ...core.utils.url import validate_storefront_url
from ...site import models as site_models
from ..account.i18n import I18nMixin
from ..account.types import AddressInput
from ..core.enums import WeightUnitsEnum
from ..core.mutations import BaseMutation, ModelDeleteMutation, ModelMutation
from ..core.types.common import ShopError
from ..product.types import Collection
from .types import AuthorizationKey, AuthorizationKeyType, Shop
class ShopSettingsInput(graphene.InputObjectType):
header_text = graphene.String(description="Header text.")
description = graphene.String(description="SEO description.")
include_taxes_in_prices = graphene.Boolean(description="Include taxes in prices.")
display_gross_prices = graphene.Boolean(
description="Display prices with tax in store."
)
charge_taxes_on_shipping = graphene.Boolean(description="Charge taxes on shipping.")
track_inventory_by_default = graphene.Boolean(
description="Enable inventory tracking."
)
default_weight_unit = WeightUnitsEnum(description="Default weight unit.")
automatic_fulfillment_digital_products = graphene.Boolean(
description="Enable automatic fulfillment for all digital products."
)
default_digital_max_downloads = graphene.Int(
description="Default number of max downloads per digital content URL."
)
default_digital_url_valid_days = graphene.Int(
description="Default number of days which digital content URL will be valid."
)
default_mail_sender_name = graphene.String(
description="Default email sender's name."
)
default_mail_sender_address = graphene.String(
description="Default email sender's address."
)
customer_set_password_url = graphene.String(
description="URL of a view where customers can set their password."
)
class SiteDomainInput(graphene.InputObjectType):
domain = graphene.String(description="Domain name for shop.")
name = graphene.String(description="Shop site name.")
class ShopSettingsUpdate(BaseMutation):
shop = graphene.Field(Shop, description="Updated shop.")
class Arguments:
input = ShopSettingsInput(
description="Fields required to update shop settings.", required=True
)
class Meta:
description = "Updates shop settings."
permissions = (SitePermissions.MANAGE_SETTINGS,)
error_type_class = ShopError
error_type_field = "shop_errors"
@classmethod
def clean_input(cls, _info, _instance, data):
if data.get("customer_set_password_url"):
try:
validate_storefront_url(data["customer_set_password_url"])
except ValidationError as error:
raise ValidationError(
{"customer_set_password_url": error}, code=ShopErrorCode.INVALID
)
return data
@classmethod
def construct_instance(cls, instance, cleaned_data):
for field_name, desired_value in cleaned_data.items():
current_value = getattr(instance, field_name)
if current_value != desired_value:
setattr(instance, field_name, desired_value)
return instance
@classmethod
def perform_mutation(cls, _root, info, **data):
instance = info.context.site.settings
data = data.get("input")
cleaned_input = cls.clean_input(info, instance, data)
instance = cls.construct_instance(instance, cleaned_input)
cls.clean_instance(info, instance)
instance.save()
return ShopSettingsUpdate(shop=Shop())
class ShopAddressUpdate(BaseMutation, I18nMixin):
shop = graphene.Field(Shop, description="Updated shop.")
class Arguments:
input = AddressInput(description="Fields required to update shop address.")
class Meta:
description = (
"Update the shop's address. If the `null` value is passed, the currently "
"selected address will be deleted."
)
permissions = (SitePermissions.MANAGE_SETTINGS,)
error_type_class = ShopError
error_type_field = "shop_errors"
@classmethod
def perform_mutation(cls, _root, info, **data):
site_settings = info.context.site.settings
data = data.get("input")
if data:
if not site_settings.company_address:
company_address = account_models.Address()
else:
company_address = site_settings.company_address
company_address = cls.validate_address(data, company_address, info=info)
company_address.save()
site_settings.company_address = company_address
site_settings.save(update_fields=["company_address"])
else:
if site_settings.company_address:
site_settings.company_address.delete()
return ShopAddressUpdate(shop=Shop())
class ShopDomainUpdate(BaseMutation):
shop = graphene.Field(Shop, description="Updated shop.")
class Arguments:
input = SiteDomainInput(description="Fields required to update site.")
class Meta:
description = "Updates site domain of the shop."
permissions = (SitePermissions.MANAGE_SETTINGS,)
error_type_class = ShopError
error_type_field = "shop_errors"
@classmethod
def perform_mutation(cls, _root, info, **data):
site = info.context.site
data = data.get("input")
domain = data.get("domain")
name = data.get("name")
if domain is not None:
site.domain = domain
if name is not None:
site.name = name
cls.clean_instance(info, site)
site.save()
return ShopDomainUpdate(shop=Shop())
class ShopFetchTaxRates(BaseMutation):
shop = graphene.Field(Shop, description="Updated shop.")
class Meta:
description = "Fetch tax rates."
permissions = (SitePermissions.MANAGE_SETTINGS,)
error_type_class = ShopError
error_type_field = "shop_errors"
@classmethod
def perform_mutation(cls, _root, info):
if not info.context.plugins.fetch_taxes_data():
raise ValidationError(
"Could not fetch tax rates. Make sure you have supplied a "
"valid credential for your tax plugin.",
code=ShopErrorCode.CANNOT_FETCH_TAX_RATES.value,
)
return ShopFetchTaxRates(shop=Shop())
class HomepageCollectionUpdate(BaseMutation):
shop = graphene.Field(Shop, description="Updated shop.")
class Arguments:
collection = graphene.ID(description="Collection displayed on homepage.")
class Meta:
description = "Updates homepage collection of the shop."
permissions = (SitePermissions.MANAGE_SETTINGS,)
error_type_class = ShopError
error_type_field = "shop_errors"
@classmethod
def perform_mutation(cls, _root, info, collection=None):
new_collection = cls.get_node_or_error(
info, collection, field="collection", only_type=Collection
)
site_settings = info.context.site.settings
site_settings.homepage_collection = new_collection
cls.clean_instance(info, site_settings)
site_settings.save(update_fields=["homepage_collection"])
return HomepageCollectionUpdate(shop=Shop())
class AuthorizationKeyInput(graphene.InputObjectType):
key = graphene.String(
required=True, description="Client authorization key (client ID)."
)
password = graphene.String(required=True, description="Client secret.")
class AuthorizationKeyAdd(BaseMutation):
authorization_key = graphene.Field(
AuthorizationKey, description="Newly added authorization key."
)
shop = graphene.Field(Shop, description="Updated shop.")
class Meta:
description = "Adds an authorization key."
permissions = (SitePermissions.MANAGE_SETTINGS,)
error_type_class = ShopError
error_type_field = "shop_errors"
class Arguments:
key_type = AuthorizationKeyType(
required=True, description="Type of an authorization key to add."
)
input = AuthorizationKeyInput(
required=True, description="Fields required to create an authorization key."
)
@classmethod
def perform_mutation(cls, _root, info, key_type, **data):
if site_models.AuthorizationKey.objects.filter(name=key_type).exists():
raise ValidationError(
{
"key_type": ValidationError(
"Authorization key already exists.",
code=ShopErrorCode.ALREADY_EXISTS,
)
}
)
site_settings = info.context.site.settings
instance = site_models.AuthorizationKey(
name=key_type, site_settings=site_settings, **data.get("input")
)
cls.clean_instance(info, instance)
instance.save()
return AuthorizationKeyAdd(authorization_key=instance, shop=Shop())
class AuthorizationKeyDelete(BaseMutation):
authorization_key = graphene.Field(
AuthorizationKey, description="Authorization key that was deleted."
)
shop = graphene.Field(Shop, description="Updated shop.")
class Arguments:
key_type = AuthorizationKeyType(
required=True, description="Type of a key to delete."
)
class Meta:
description = "Deletes an authorization key."
permissions = (SitePermissions.MANAGE_SETTINGS,)
error_type_class = ShopError
error_type_field = "shop_errors"
@classmethod
def perform_mutation(cls, _root, info, key_type):
try:
site_settings = info.context.site.settings
instance = site_models.AuthorizationKey.objects.get(
name=key_type, site_settings=site_settings
)
except site_models.AuthorizationKey.DoesNotExist:
raise ValidationError(
{
"key_type": ValidationError(
"Couldn't resolve authorization key",
code=ShopErrorCode.NOT_FOUND,
)
}
)
instance.delete()
return AuthorizationKeyDelete(authorization_key=instance, shop=Shop())
class StaffNotificationRecipientInput(graphene.InputObjectType):
user = graphene.ID(
required=False,
description="The ID of the user subscribed to email notifications..",
)
email = graphene.String(
required=False,
description="Email address of a user subscribed to email notifications.",
)
active = graphene.Boolean(
required=False, description="Determines if a notification active."
)
class StaffNotificationRecipientCreate(ModelMutation):
class Arguments:
input = StaffNotificationRecipientInput(
required=True,
description="Fields required to create a staff notification recipient.",
)
class Meta:
description = "Creates a new staff notification recipient."
model = account_models.StaffNotificationRecipient
permissions = (SitePermissions.MANAGE_SETTINGS,)
error_type_class = ShopError
error_type_field = "shop_errors"
@classmethod
def clean_input(cls, info, instance, data):
cleaned_input = super().clean_input(info, instance, data)
cls.validate_input(instance, cleaned_input)
email = cleaned_input.pop("email", None)
if email:
staff_user = account_models.User.objects.filter(email=email).first()
if staff_user:
cleaned_input["user"] = staff_user
else:
cleaned_input["staff_email"] = email
return cleaned_input
@staticmethod
def validate_input(instance, cleaned_input):
email = cleaned_input.get("email")
user = cleaned_input.get("user")
if not email and not user:
if instance.id and "user" in cleaned_input or "email" in cleaned_input:
raise ValidationError(
{
"staff_notification": ValidationError(
"User and email cannot be set empty",
code=ShopErrorCode.INVALID,
)
}
)
if not instance.id:
raise ValidationError(
{
"staff_notification": ValidationError(
"User or email is required", code=ShopErrorCode.REQUIRED
)
}
)
if user and not user.is_staff:
raise ValidationError(
{
"user": ValidationError(
"User has to be staff user", code=ShopErrorCode.INVALID
)
}
)
class StaffNotificationRecipientUpdate(StaffNotificationRecipientCreate):
class Arguments:
id = graphene.ID(
required=True, description="ID of a staff notification recipient to update."
)
input = StaffNotificationRecipientInput(
required=True,
description="Fields required to update a staff notification recipient.",
)
class Meta:
description = "Updates a staff notification recipient."
model = account_models.StaffNotificationRecipient
permissions = (SitePermissions.MANAGE_SETTINGS,)
error_type_class = ShopError
error_type_field = "shop_errors"
class StaffNotificationRecipientDelete(ModelDeleteMutation):
class Arguments:
id = graphene.ID(
required=True, description="ID of a staff notification recipient to delete."
)
class Meta:
description = "Delete staff notification recipient."
model = account_models.StaffNotificationRecipient
permissions = (SitePermissions.MANAGE_SETTINGS,)
error_type_class = ShopError
error_type_field = "shop_errors"
|
the-stack_0_6905 | # Lint as: python3
# Copyright 2020 Google Research. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Anchor definition."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
from absl import logging
import numpy as np
import tensorflow.compat.v1 as tf
import utils
from object_detection import argmax_matcher
from object_detection import box_list
from object_detection import faster_rcnn_box_coder
from object_detection import region_similarity_calculator
from object_detection import target_assigner
# The minimum score to consider a logit for identifying detections.
MIN_CLASS_SCORE = -5.0
# The score for a dummy detection
_DUMMY_DETECTION_SCORE = -1e5
# The maximum number of (anchor,class) pairs to keep for non-max suppression.
MAX_DETECTION_POINTS = 5000
# The maximum number of detections per image.
MAX_DETECTIONS_PER_IMAGE = 100
# The minimal score threshold.
MIN_SCORE_THRESH = 0.4
def sigmoid(x):
"""Sigmoid function for use with Numpy for CPU evaluation."""
return 1 / (1 + np.exp(-x))
def decode_box_outputs(rel_codes, anchors):
"""Transforms relative regression coordinates to absolute positions.
Network predictions are normalized and relative to a given anchor; this
reverses the transformation and outputs absolute coordinates for the input
image.
Args:
rel_codes: box regression targets.
anchors: anchors on all feature levels.
Returns:
outputs: bounding boxes.
"""
ycenter_a = (anchors[0] + anchors[2]) / 2
xcenter_a = (anchors[1] + anchors[3]) / 2
ha = anchors[2] - anchors[0]
wa = anchors[3] - anchors[1]
ty, tx, th, tw = rel_codes
w = np.exp(tw) * wa
h = np.exp(th) * ha
ycenter = ty * ha + ycenter_a
xcenter = tx * wa + xcenter_a
ymin = ycenter - h / 2.
xmin = xcenter - w / 2.
ymax = ycenter + h / 2.
xmax = xcenter + w / 2.
return np.column_stack([ymin, xmin, ymax, xmax])
def decode_box_outputs_tf(rel_codes, anchors):
"""Transforms relative regression coordinates to absolute positions.
Network predictions are normalized and relative to a given anchor; this
reverses the transformation and outputs absolute coordinates for the input
image.
Args:
rel_codes: box regression targets.
anchors: anchors on all feature levels.
Returns:
outputs: bounding boxes.
"""
ycenter_a = (anchors[..., 0] + anchors[..., 2]) / 2
xcenter_a = (anchors[..., 1] + anchors[..., 3]) / 2
ha = anchors[..., 2] - anchors[..., 0]
wa = anchors[..., 3] - anchors[..., 1]
ty, tx, th, tw = tf.unstack(rel_codes, num=4, axis=-1)
w = tf.math.exp(tw) * wa
h = tf.math.exp(th) * ha
ycenter = ty * ha + ycenter_a
xcenter = tx * wa + xcenter_a
ymin = ycenter - h / 2.
xmin = xcenter - w / 2.
ymax = ycenter + h / 2.
xmax = xcenter + w / 2.
return tf.stack([ymin, xmin, ymax, xmax], axis=-1)
def diou_nms(dets, iou_thresh=None):
"""DIOU non-maximum suppression.
diou = iou - square of euclidian distance of box centers
/ square of diagonal of smallest enclosing bounding box
Reference: https://arxiv.org/pdf/1911.08287.pdf
Args:
dets: detection with shape (num, 5) and format [x1, y1, x2, y2, score].
iou_thresh: IOU threshold,
Returns:
numpy.array: Retained boxes.
"""
iou_thresh = iou_thresh or 0.5
x1 = dets[:, 0]
y1 = dets[:, 1]
x2 = dets[:, 2]
y2 = dets[:, 3]
scores = dets[:, 4]
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
order = scores.argsort()[::-1]
center_x = (x1 + x2) / 2
center_y = (y1 + y2) / 2
keep = []
while order.size > 0:
i = order[0]
keep.append(i)
xx1 = np.maximum(x1[i], x1[order[1:]])
yy1 = np.maximum(y1[i], y1[order[1:]])
xx2 = np.minimum(x2[i], x2[order[1:]])
yy2 = np.minimum(y2[i], y2[order[1:]])
w = np.maximum(0.0, xx2 - xx1 + 1)
h = np.maximum(0.0, yy2 - yy1 + 1)
intersection = w * h
iou = intersection / (areas[i] + areas[order[1:]] - intersection)
smallest_enclosing_box_x1 = np.minimum(x1[i], x1[order[1:]])
smallest_enclosing_box_x2 = np.maximum(x2[i], x2[order[1:]])
smallest_enclosing_box_y1 = np.minimum(y1[i], y1[order[1:]])
smallest_enclosing_box_y2 = np.maximum(y2[i], y2[order[1:]])
square_of_the_diagonal = (
(smallest_enclosing_box_x2 - smallest_enclosing_box_x1)**2 +
(smallest_enclosing_box_y2 - smallest_enclosing_box_y1)**2)
square_of_center_distance = ((center_x[i] - center_x[order[1:]])**2 +
(center_y[i] - center_y[order[1:]])**2)
# Add 1e-10 for numerical stability.
diou = iou - square_of_center_distance / (square_of_the_diagonal + 1e-10)
inds = np.where(diou <= iou_thresh)[0]
order = order[inds + 1]
return dets[keep]
def hard_nms(dets, iou_thresh=None):
"""The basic hard non-maximum suppression.
Args:
dets: detection with shape (num, 5) and format [x1, y1, x2, y2, score].
iou_thresh: IOU threshold,
Returns:
numpy.array: Retained boxes.
"""
iou_thresh = iou_thresh or 0.5
x1 = dets[:, 0]
y1 = dets[:, 1]
x2 = dets[:, 2]
y2 = dets[:, 3]
scores = dets[:, 4]
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
order = scores.argsort()[::-1]
keep = []
while order.size > 0:
i = order[0]
keep.append(i)
xx1 = np.maximum(x1[i], x1[order[1:]])
yy1 = np.maximum(y1[i], y1[order[1:]])
xx2 = np.minimum(x2[i], x2[order[1:]])
yy2 = np.minimum(y2[i], y2[order[1:]])
w = np.maximum(0.0, xx2 - xx1 + 1)
h = np.maximum(0.0, yy2 - yy1 + 1)
intersection = w * h
overlap = intersection / (areas[i] + areas[order[1:]] - intersection)
inds = np.where(overlap <= iou_thresh)[0]
order = order[inds + 1]
return dets[keep]
def soft_nms(dets, nms_configs):
"""Soft non-maximum suppression.
[1] Soft-NMS -- Improving Object Detection With One Line of Code.
https://arxiv.org/abs/1704.04503
Args:
dets: detection with shape (num, 5) and format [x1, y1, x2, y2, score].
nms_configs: a dict config that may contain the following members
* method: one of {`linear`, `gaussian`, 'hard'}. Use `gaussian` if None.
* iou_thresh (float): IOU threshold, only for `linear`, `hard`.
* sigma: Gaussian parameter, only for method 'gaussian'.
* score_thresh (float): Box score threshold for final boxes.
Returns:
numpy.array: Retained boxes.
"""
method = nms_configs.get('method', 'gaussian')
# Default sigma and iou_thresh are from the original soft-nms paper.
sigma = nms_configs.get('sigma', 0.5)
iou_thresh = nms_configs.get('iou_thresh', 0.3)
score_thresh = nms_configs.get('score_thresh', 0.001)
x1 = dets[:, 0]
y1 = dets[:, 1]
x2 = dets[:, 2]
y2 = dets[:, 3]
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
# expand dets with areas, and the second dimension is
# x1, y1, x2, y2, score, area
dets = np.concatenate((dets, areas[:, None]), axis=1)
retained_box = []
while dets.size > 0:
max_idx = np.argmax(dets[:, 4], axis=0)
dets[[0, max_idx], :] = dets[[max_idx, 0], :]
retained_box.append(dets[0, :-1])
xx1 = np.maximum(dets[0, 0], dets[1:, 0])
yy1 = np.maximum(dets[0, 1], dets[1:, 1])
xx2 = np.minimum(dets[0, 2], dets[1:, 2])
yy2 = np.minimum(dets[0, 3], dets[1:, 3])
w = np.maximum(xx2 - xx1 + 1, 0.0)
h = np.maximum(yy2 - yy1 + 1, 0.0)
inter = w * h
iou = inter / (dets[0, 5] + dets[1:, 5] - inter)
if method == 'linear':
weight = np.ones_like(iou)
weight[iou > iou_thresh] -= iou[iou > iou_thresh]
elif method == 'gaussian':
weight = np.exp(-(iou * iou) / sigma)
else: # traditional nms
weight = np.ones_like(iou)
weight[iou > iou_thresh] = 0
dets[1:, 4] *= weight
retained_idx = np.where(dets[1:, 4] >= score_thresh)[0]
dets = dets[retained_idx + 1, :]
return np.vstack(retained_box)
def nms(dets, nms_configs):
"""Non-maximum suppression.
Args:
dets: detection with shape (num, 5) and format [x1, y1, x2, y2, score].
nms_configs: a dict config that may contain parameters.
Returns:
numpy.array: Retained boxes.
"""
nms_configs = nms_configs or {}
method = nms_configs.get('method', None)
if method == 'hard' or not method:
return hard_nms(dets, nms_configs.get('iou_thresh', None))
if method == 'diou':
return diou_nms(dets, nms_configs.get('iou_thresh', None))
if method in ('linear', 'gaussian'):
return soft_nms(dets, nms_configs)
raise ValueError('Unknown NMS method: {}'.format(method))
def _generate_anchor_configs(feat_sizes, min_level, max_level, num_scales,
aspect_ratios):
"""Generates mapping from output level to a list of anchor configurations.
A configuration is a tuple of (num_anchors, scale, aspect_ratio).
Args:
feat_sizes: list of dict of integer numbers of feature map sizes.
min_level: integer number of minimum level of the output feature pyramid.
max_level: integer number of maximum level of the output feature pyramid.
num_scales: integer number representing intermediate scales added
on each level. For instances, num_scales=2 adds two additional
anchor scales [2^0, 2^0.5] on each level.
aspect_ratios: list of tuples representing the aspect ratio anchors added
on each level. For instances, aspect_ratios =
[(1, 1), (1.4, 0.7), (0.7, 1.4)] adds three anchors on each level.
Returns:
anchor_configs: a dictionary with keys as the levels of anchors and
values as a list of anchor configuration.
"""
anchor_configs = {}
for level in range(min_level, max_level + 1):
anchor_configs[level] = []
for scale_octave in range(num_scales):
for aspect in aspect_ratios:
anchor_configs[level].append(
((feat_sizes[0]['height'] / float(feat_sizes[level]['height']),
feat_sizes[0]['width'] / float(feat_sizes[level]['width'])),
scale_octave / float(num_scales), aspect))
return anchor_configs
def _generate_anchor_boxes(image_size, anchor_scale, anchor_configs):
"""Generates multiscale anchor boxes.
Args:
image_size: tuple of integer numbers of input image size.
anchor_scale: float number representing the scale of size of the base
anchor to the feature stride 2^level.
anchor_configs: a dictionary with keys as the levels of anchors and
values as a list of anchor configuration.
Returns:
anchor_boxes: a numpy array with shape [N, 4], which stacks anchors on all
feature levels.
Raises:
ValueError: input size must be the multiple of largest feature stride.
"""
boxes_all = []
for _, configs in anchor_configs.items():
boxes_level = []
for config in configs:
stride, octave_scale, aspect = config
base_anchor_size_x = anchor_scale * stride[1] * 2**octave_scale
base_anchor_size_y = anchor_scale * stride[0] * 2**octave_scale
anchor_size_x_2 = base_anchor_size_x * aspect[0] / 2.0
anchor_size_y_2 = base_anchor_size_y * aspect[1] / 2.0
x = np.arange(stride[1] / 2, image_size[1], stride[1])
y = np.arange(stride[0] / 2, image_size[0], stride[0])
xv, yv = np.meshgrid(x, y)
xv = xv.reshape(-1)
yv = yv.reshape(-1)
boxes = np.vstack((yv - anchor_size_y_2, xv - anchor_size_x_2,
yv + anchor_size_y_2, xv + anchor_size_x_2))
boxes = np.swapaxes(boxes, 0, 1)
boxes_level.append(np.expand_dims(boxes, axis=1))
# concat anchors on the same level to the reshape NxAx4
boxes_level = np.concatenate(boxes_level, axis=1)
boxes_all.append(boxes_level.reshape([-1, 4]))
anchor_boxes = np.vstack(boxes_all)
return anchor_boxes
def _generate_detections_tf(cls_outputs,
box_outputs,
anchor_boxes,
indices,
classes,
image_id,
image_scale,
image_size,
min_score_thresh=MIN_SCORE_THRESH,
max_boxes_to_draw=MAX_DETECTIONS_PER_IMAGE,
soft_nms_sigma=0.25,
iou_threshold=0.5):
"""Generates detections with model outputs and anchors.
Args:
cls_outputs: a numpy array with shape [N, 1], which has the highest class
scores on all feature levels. The N is the number of selected
top-K total anchors on all levels. (k being MAX_DETECTION_POINTS)
box_outputs: a numpy array with shape [N, 4], which stacks box regression
outputs on all feature levels. The N is the number of selected top-k
total anchors on all levels. (k being MAX_DETECTION_POINTS)
anchor_boxes: a numpy array with shape [N, 4], which stacks anchors on all
feature levels. The N is the number of selected top-k total anchors on
all levels.
indices: a numpy array with shape [N], which is the indices from top-k
selection.
classes: a numpy array with shape [N], which represents the class
prediction on all selected anchors from top-k selection.
image_id: an integer number to specify the image id.
image_scale: a float tensor representing the scale between original image
and input image for the detector. It is used to rescale detections for
evaluating with the original groundtruth annotations.
image_size: a tuple (height, width) or an integer for image size.
min_score_thresh: A float representing the threshold for deciding when to
remove boxes based on score.
max_boxes_to_draw: Max number of boxes to draw.
soft_nms_sigma: A scalar float representing the Soft NMS sigma parameter;
See Bodla et al, https://arxiv.org/abs/1704.04503). When
`soft_nms_sigma=0.0` (which is default), we fall back to standard (hard)
NMS.
iou_threshold: A float representing the threshold for deciding whether boxes
overlap too much with respect to IOU.
Returns:
detections: detection results in a tensor with each row representing
[image_id, ymin, xmin, ymax, xmax, score, class]
"""
if not image_size:
raise ValueError('tf version generate_detection needs non-empty image_size')
logging.info('Using tf version of post-processing.')
anchor_boxes = tf.gather(anchor_boxes, indices)
scores = tf.math.sigmoid(cls_outputs)
# apply bounding box regression to anchors
boxes = decode_box_outputs_tf(box_outputs, anchor_boxes)
# TF API is slightly different from paper, here we follow the paper value:
# https://github.com/tensorflow/tensorflow/issues/40253.
top_detection_idx, scores = tf.image.non_max_suppression_with_scores(
boxes,
scores,
max_boxes_to_draw,
iou_threshold=iou_threshold,
score_threshold=min_score_thresh,
soft_nms_sigma=soft_nms_sigma)
boxes = tf.gather(boxes, top_detection_idx)
image_size = utils.parse_image_size(image_size)
detections = tf.stack([
tf.cast(tf.tile(image_id, tf.shape(top_detection_idx)), tf.float32),
tf.clip_by_value(boxes[:, 0], 0, image_size[0]) * image_scale,
tf.clip_by_value(boxes[:, 1], 0, image_size[1]) * image_scale,
tf.clip_by_value(boxes[:, 2], 0, image_size[0]) * image_scale,
tf.clip_by_value(boxes[:, 3], 0, image_size[1]) * image_scale,
scores,
tf.cast(tf.gather(classes, top_detection_idx) + 1, tf.float32)
], axis=1)
return detections
def _generate_detections(cls_outputs, box_outputs, anchor_boxes, indices,
classes, image_id, image_scale, num_classes,
max_boxes_to_draw, nms_configs):
"""Generates detections with model outputs and anchors.
Args:
cls_outputs: a numpy array with shape [N, 1], which has the highest class
scores on all feature levels. The N is the number of selected
top-K total anchors on all levels. (k being MAX_DETECTION_POINTS)
box_outputs: a numpy array with shape [N, 4], which stacks box regression
outputs on all feature levels. The N is the number of selected top-k
total anchors on all levels. (k being MAX_DETECTION_POINTS)
anchor_boxes: a numpy array with shape [N, 4], which stacks anchors on all
feature levels. The N is the number of selected top-k total anchors on
all levels.
indices: a numpy array with shape [N], which is the indices from top-k
selection.
classes: a numpy array with shape [N], which represents the class
prediction on all selected anchors from top-k selection.
image_id: an integer number to specify the image id.
image_scale: a float tensor representing the scale between original image
and input image for the detector. It is used to rescale detections for
evaluating with the original groundtruth annotations.
num_classes: a integer that indicates the number of classes.
max_boxes_to_draw: max number of boxes to draw per image.
nms_configs: A dict of NMS configs.
Returns:
detections: detection results in a tensor with each row representing
[image_id, x, y, width, height, score, class]
"""
anchor_boxes = anchor_boxes[indices, :]
scores = sigmoid(cls_outputs)
# apply bounding box regression to anchors
boxes = decode_box_outputs(
box_outputs.swapaxes(0, 1), anchor_boxes.swapaxes(0, 1))
boxes = boxes[:, [1, 0, 3, 2]]
# run class-wise nms
detections = []
for c in range(num_classes):
indices = np.where(classes == c)[0]
if indices.shape[0] == 0:
continue
boxes_cls = boxes[indices, :]
scores_cls = scores[indices]
# Select top-scoring boxes in each class and apply non-maximum suppression
# (nms) for boxes in the same class. The selected boxes from each class are
# then concatenated for the final detection outputs.
all_detections_cls = np.column_stack((boxes_cls, scores_cls))
top_detections_cls = nms(all_detections_cls, nms_configs)
top_detections_cls[:, 2] -= top_detections_cls[:, 0]
top_detections_cls[:, 3] -= top_detections_cls[:, 1]
top_detections_cls = np.column_stack(
(np.repeat(image_id, len(top_detections_cls)),
top_detections_cls,
np.repeat(c + 1, len(top_detections_cls)))
)
detections.append(top_detections_cls)
def _generate_dummy_detections(number):
detections_dummy = np.zeros((number, 7), dtype=np.float32)
detections_dummy[:, 0] = image_id[0]
detections_dummy[:, 5] = _DUMMY_DETECTION_SCORE
return detections_dummy
if detections:
detections = np.vstack(detections)
# take final 100 detections
indices = np.argsort(-detections[:, -2])
detections = np.array(
detections[indices[0:max_boxes_to_draw]], dtype=np.float32)
# Add dummy detections to fill up to 100 detections
n = max(max_boxes_to_draw - len(detections), 0)
detections_dummy = _generate_dummy_detections(n)
detections = np.vstack([detections, detections_dummy])
else:
detections = _generate_dummy_detections(max_boxes_to_draw)
detections[:, 1:5] *= image_scale
return detections
class Anchors(object):
"""Multi-scale anchors class."""
def __init__(self, min_level, max_level, num_scales, aspect_ratios,
anchor_scale, image_size):
"""Constructs multiscale anchors.
Args:
min_level: integer number of minimum level of the output feature pyramid.
max_level: integer number of maximum level of the output feature pyramid.
num_scales: integer number representing intermediate scales added
on each level. For instances, num_scales=2 adds two additional
anchor scales [2^0, 2^0.5] on each level.
aspect_ratios: list of tuples representing the aspect ratio anchors added
on each level. For instances, aspect_ratios =
[(1, 1), (1.4, 0.7), (0.7, 1.4)] adds three anchors on each level.
anchor_scale: float number representing the scale of size of the base
anchor to the feature stride 2^level.
image_size: integer number or tuple of integer number of input image size.
"""
self.min_level = min_level
self.max_level = max_level
self.num_scales = num_scales
self.aspect_ratios = aspect_ratios
self.anchor_scale = anchor_scale
self.image_size = utils.parse_image_size(image_size)
self.feat_sizes = utils.get_feat_sizes(image_size, max_level)
self.config = self._generate_configs()
self.boxes = self._generate_boxes()
def _generate_configs(self):
"""Generate configurations of anchor boxes."""
return _generate_anchor_configs(self.feat_sizes, self.min_level,
self.max_level, self.num_scales,
self.aspect_ratios)
def _generate_boxes(self):
"""Generates multiscale anchor boxes."""
boxes = _generate_anchor_boxes(self.image_size, self.anchor_scale,
self.config)
boxes = tf.convert_to_tensor(boxes, dtype=tf.float32)
return boxes
def get_anchors_per_location(self):
return self.num_scales * len(self.aspect_ratios)
class AnchorLabeler(object):
"""Labeler for multiscale anchor boxes."""
def __init__(self, anchors, num_classes, match_threshold=0.5):
"""Constructs anchor labeler to assign labels to anchors.
Args:
anchors: an instance of class Anchors.
num_classes: integer number representing number of classes in the dataset.
match_threshold: float number between 0 and 1 representing the threshold
to assign positive labels for anchors.
"""
similarity_calc = region_similarity_calculator.IouSimilarity()
matcher = argmax_matcher.ArgMaxMatcher(
match_threshold,
unmatched_threshold=match_threshold,
negatives_lower_than_unmatched=True,
force_match_for_each_row=True)
box_coder = faster_rcnn_box_coder.FasterRcnnBoxCoder()
self._target_assigner = target_assigner.TargetAssigner(
similarity_calc, matcher, box_coder)
self._anchors = anchors
self._match_threshold = match_threshold
self._num_classes = num_classes
def _unpack_labels(self, labels):
"""Unpacks an array of labels into multiscales labels."""
labels_unpacked = collections.OrderedDict()
anchors = self._anchors
count = 0
for level in range(anchors.min_level, anchors.max_level + 1):
feat_size = anchors.feat_sizes[level]
steps = feat_size['height'] * feat_size[
'width'] * anchors.get_anchors_per_location()
indices = tf.range(count, count + steps)
count += steps
labels_unpacked[level] = tf.reshape(
tf.gather(labels, indices),
[feat_size['height'], feat_size['width'], -1])
return labels_unpacked
def label_anchors(self, gt_boxes, gt_labels):
"""Labels anchors with ground truth inputs.
Args:
gt_boxes: A float tensor with shape [N, 4] representing groundtruth boxes.
For each row, it stores [y0, x0, y1, x1] for four corners of a box.
gt_labels: A integer tensor with shape [N, 1] representing groundtruth
classes.
Returns:
cls_targets_dict: ordered dictionary with keys
[min_level, min_level+1, ..., max_level]. The values are tensor with
shape [height_l, width_l, num_anchors]. The height_l and width_l
represent the dimension of class logits at l-th level.
box_targets_dict: ordered dictionary with keys
[min_level, min_level+1, ..., max_level]. The values are tensor with
shape [height_l, width_l, num_anchors * 4]. The height_l and
width_l represent the dimension of bounding box regression output at
l-th level.
num_positives: scalar tensor storing number of positives in an image.
"""
gt_box_list = box_list.BoxList(gt_boxes)
anchor_box_list = box_list.BoxList(self._anchors.boxes)
# cls_weights, box_weights are not used
cls_targets, _, box_targets, _, matches = self._target_assigner.assign(
anchor_box_list, gt_box_list, gt_labels)
# class labels start from 1 and the background class = -1
cls_targets -= 1
cls_targets = tf.cast(cls_targets, tf.int32)
# Unpack labels.
cls_targets_dict = self._unpack_labels(cls_targets)
box_targets_dict = self._unpack_labels(box_targets)
num_positives = tf.reduce_sum(
tf.cast(tf.not_equal(matches.match_results, -1), tf.float32))
return cls_targets_dict, box_targets_dict, num_positives
def generate_detections(self,
cls_outputs,
box_outputs,
indices,
classes,
image_id,
image_scale,
image_size=None,
min_score_thresh=MIN_SCORE_THRESH,
max_boxes_to_draw=MAX_DETECTIONS_PER_IMAGE,
disable_pyfun=None,
nms_configs=None):
"""Generate detections based on class and box predictions."""
if disable_pyfun:
return _generate_detections_tf(
cls_outputs,
box_outputs,
self._anchors.boxes,
indices,
classes,
image_id,
image_scale,
image_size,
min_score_thresh=min_score_thresh,
max_boxes_to_draw=max_boxes_to_draw)
else:
logging.info('nms_configs=%s', nms_configs)
return tf.py_func(
functools.partial(_generate_detections, nms_configs=nms_configs), [
cls_outputs,
box_outputs,
self._anchors.boxes,
indices,
classes,
image_id,
image_scale,
self._num_classes,
max_boxes_to_draw,
], tf.float32)
|
the-stack_0_6908 | ###############################################################################
# Convert coco annotations to a SQLite database #
# #
# #
# (c) 2020 Simon Wenkel #
# Released under the Apache 2.0 license #
# #
###############################################################################
#
# import libraries
#
import argparse
import gc
import json
import sqlite3
import time
from tqdm import tqdm
from joblib import Parallel, delayed
import numpy as np
import numba as nb
from .metrics import bb_iou
#
# functions
#
def is_SQLiteDB(db_file:str)->bool:
"""
Function to check if a file is a valid SQLite database
Inputs:
- file (str) : full path of the file/DB in question
Ouputs:
- is_SQLiteDB (bool) : file is a SQLite DB or not
"""
with open(db_file, "rb") as file:
header = file.read(100)
if header[0:16] == b'SQLite format 3\000':
is_SQLiteDB = True
else:
is_SQLiteDB = False
return is_SQLiteDB
def create_DB(db_conn:sqlite3.Connection,
db_curs:sqlite3.Cursor):
"""
Function to generate all tables required in an empty SQLite database
Inputs:
- db_conn (sqlite3.connector) : database connection
- db_curs (sqlite3.cursor) : database cursor to execute commands
Outputs:
- None
"""
db_curs.execute('''CREATE TABLE images
(`orig_id` INTEGER,
`file_name` TEXT,
`coco_url` TEXT,
`height` INTEGER,
`WIDTH` INTEGER,
`date_captured` TEXT,
`flickr_url` TEXT,
`license` INTEGER,
`subset` TEXT)''')
db_curs.execute('''CREATE TABLE annotations
(`segmentation` TEXT,
`area` REAL,
`iscrowd` INTEGER,
`image_id` INTEGER,
`bbox` TEXT,
`category_id` INTEGER,
`orig_id` INTEGER,
`subset` TEXT,
`isGT` INTEGER)''')
db_curs.execute('''CREATE TABLE supercategories
(`supercategory` TEXT)''')
db_curs.execute('''CREATE TABLE categories
(`category_id` INTEGER,
`name` TEXT,
`supercategory_id` INTEGER)''')
db_curs.execute('''CREATE TABLE licenses
(`url` TEXT,
`license_id` INTEGER,
`name` TEXT,
`subset` TEXT)''')
db_curs.execute('''CREATE TABLE predictions
(`image_id` INTEGER,
`category_id` INTEGER,
`bbox` TEXT,
`score` REAL,
`IoU` REAL,
`is_valid_class_in_img` TEXT,
`best_match_gt_annotation_id` INTEGER,
`model` TEXT,
`comments` TEXT )''')
db_curs.execute('''CREATE TABLE status\
(`model` TEXT,
`subset` TEXT,
`processed` TEXT)''')
db_conn.commit()
print("DB generated.")
def check_if_images_in_db(subset:str,
total_count:int,
db_curs)->bool:
"""
"""
if subset in db_curs.execute("SELECT DISTINCT subset\
FROM images").fetchall()[0]:
imgs_in_db = True
# check if subset is complete, throw exception otherwise
if db_curs.execute("SELECT COUNT(*)\
FROM images\
WHERE subset=?", \
[subset]).fetchall()[0][0] != total_count:
raise Exception("Subset of images is in DB but inclomplete!")
else:
imgs_in_db = False
return imgs_in_db
def check_if_annotations_in_db(subset:str,
total_count:int,
db_curs)->bool:
"""
"""
if subset in np.array(db_curs.execute("SELECT DISTINCT subset\
FROM annotations").fetchall()):
annot_in_db = True
# check if subset is complete, throw exception otherwise
if db_curs.execute("SELECT COUNT(*)\
FROM annotations\
WHERE subset=?",\
[subset]).fetchall()[0][0] != total_count:
raise Exception("Subset of annotations is in DB but inclomplete!")
else:
annot_in_db = False
return annot_in_db
def check_if_predictions_in_db(model:str,
total_count:int,
db_curs)->bool:
"""
"""
models = db_curs.execute("SELECT DISTINCT model\
FROM predictions").fetchall()
if len(models) != 0:
models = np.array(models)
if model in models:
annot_in_db = True
# check if subset is complete, throw exception otherwise
if db_curs.execute("SELECT COUNT(*)\
FROM predictions\
WHERE model=?",\
[model]).fetchall()[0][0] != total_count:
raise Exception(model," predictions is in DB but inclomplete!")
else:
annot_in_db = False
else:
annot_in_db = False
return annot_in_db
def image_data_to_list(item:dict,
subset:str)->list:
"""
Assuming the structure of each image dict:
`orig_id` INTEGER,
`file_name` TEXT,
`coco_url` TEXT,
`height` INTEGER,
`WIDTH` INTEGER,
`date_captured` TEXT,
`flickr_url` TEXT,
`license` INTEGER,
`subset` TEXT
Inputs:
- item (dict) : dict containing all data about an image
- subset (str) : is the name of the particular subset the image\
in question is part of
Outputs:
- list_to_move (list) : list containing items as required for \
insert into SQLite table
"""
list_to_move = [item["id"], \
item["file_name"], \
item["coco_url"], \
item["height"], \
item["width"], \
item["date_captured"], \
item["flickr_url"], \
item["license"], \
subset]
return list_to_move
def annotations_to_list(item:dict,
subset:str,
isGT:int)->list:
"""
Assumed table structure for groundtruth annotations:
`segmentation' TEXT,
`area' REAL,
`iscrowd` INTEGER,
`image_id` INTEGER,
`bbox` TEXT,
`category_id` INTEGER,
`orig_id` INTEGER,
`subset` TEXT
`isGT` INTEGER
"""
list_to_move = [json.dumps(item["segmentation"]), \
item["area"], \
item["iscrowd"], \
item["image_id"], \
json.dumps(item["bbox"]), \
item["category_id"], \
item["id"], \
subset,\
isGT]
return list_to_move
def add_gt_annotations(gt:dict,
subset:str,
db_conn:sqlite3.Connection,
db_curs:sqlite3.Cursor,
empty_db:bool = False):
"""
Adding GroundTruth data to the database
Assuming a fully coco compliant json structure
"""
keys = gt.keys()
# min. required keys are "annotations" and "images"
if ("images" not in keys) or ("annotations" not in keys):
raise Exception("Groundtruth data lacks images or annotations.\
Please provide a valid groundtruth annotation file")
# check if images are already in DB
if empty_db or not check_if_images_in_db(subset,\
len(gt["images"]),\
db_curs):
items_to_insert = Parallel(n_jobs=-1, prefer="threads")(
delayed(image_data_to_list)(item, subset)
for item in tqdm(gt["images"])
)
db_curs.executemany("INSERT INTO images\
VALUES (?,?,?,?,?,?,?,?,?)",
items_to_insert)
db_conn.commit()
else:
print("GT images in DB already.")
# check if annotations are in DB first
if empty_db:
items_to_insert = Parallel(n_jobs=-1, prefer="threads")(
delayed(annotations_to_list)(item, subset, 1)
for item in tqdm(gt["annotations"])
)
db_curs.executemany("INSERT INTO annotations\
VALUES (?,?,?,?,?,?,?,?,?)",
items_to_insert)
db_conn.commit()
elif not check_if_annotations_in_db(subset,\
len(gt["annotations"]),\
db_curs):
items_to_insert = Parallel(n_jobs=-1, prefer="threads")(
delayed(annotations_to_list)(item, subset, 1)
for item in tqdm(gt["annotations"])
)
db_curs.executemany("INSERT INTO annotations\
VALUES (?,?,?,?,?,?,?,?,?)",
items_to_insert)
db_conn.commit()
else:
print("GT annotations in DB already.")
# licenses
if "licenses" in keys:
list_to_move = []
for lic in gt["licenses"]:
list_to_move.append([lic["url"], \
lic["id"], \
lic["name"], \
subset])
db_curs.executemany("INSERT INTO licenses \
VALUES (?,?,?,?)", list_to_move)
db_conn.commit()
# if "catgegories" in keys:
# for cat in gt["categories"]:
# a = 1
def add_predictions_to_db(predictions:list,
model:str,
db_curs:sqlite3.Cursor,
db_conn:sqlite3.Connection):
"""
Assuming the following structure for the predictions table:
`image_id` INTEGER,
`category_id` INTEGER,
`bbox` TEXT,
`score` REAL,
`is_valid_class_in_img` TEXT,
`best_match_gt_annotation_id` INTEGER,
`model` TEXT,
`comments` TEXT
"""
def generate_prediction_list_(item:dict,\
model:str)->list:
"""
"""
prediction = [item["image_id"],
item["category_id"],
json.dumps(item["bbox"]),
item["score"],
"-0.1",
"unknown",
-999,
model,
"none"]
return prediction
if not check_if_predictions_in_db(model,\
len(predictions),\
db_curs):
print("Adding", model)
items_to_insert = Parallel(n_jobs=-1, prefer="threads")(
delayed(generate_prediction_list_)(item, model)
for item in tqdm(predictions)
)
db_curs.executemany("INSERT INTO predictions\
VALUES (?,?,?,?,?,?,?,?,?)",
items_to_insert)
db_conn.commit()
else:
print(model," results already in DB!")
def check_if_model_processed(model,
db_conn,
db_curs):
"""
"""
models_procssed = db_curs.execute("SELECT DISTINCT model\
FROM status").fetchall()
if len(models_procssed) != 0:
models_procssed = np.array(models_procssed)
if model in models_procssed:
is_processed = True
else:
is_processed = False
else:
is_processed = False
return is_processed
def get_image_ids_of_pred(model,
db_conn,
db_curs):
"""
"""
image_ids = np.array(db_curs.execute("SELECT DISTINCT image_id\
FROM predictions\
WHERE model=?",\
[model]).fetchall())
return image_ids
def process_predictions_per_image(image_id,\
subset, \
model, \
db_conn, \
db_curs):
"""
"""
# get all valid categories first
valid_categories = db_curs.execute("SELECT DISTINCT category_id\
FROM annotations\
WHERE subset=? AND image_id=?",\
[subset, image_id]).fetchall()
# returns an Array of tuples, so conversion to np.ndarray
# makes it much easier to find something in it
valid_categories = np.array(valid_categories)
# get groundtruth annotations
gt_annotations = db_curs.execute("SELECT area,bbox,category_id, orig_id\
FROM annotations\
WHERE subset=? AND image_id=?",\
[subset, image_id]).fetchall()
# get predictions
pred_annotations = db_curs.execute("SELECT rowid,bbox,category_id\
FROM predictions\
WHERE model=? AND image_id=?",\
[model, image_id]).fetchall()
correct_class_pred = []
incorrect_class_pred = []
for i in range(len(pred_annotations)):
if pred_annotations[i][2] not in valid_categories:
# append rowid of incorrect class only
incorrect_class_pred.append(pred_annotations[i][0])
else:
# append full prediction
correct_class_pred.append(pred_annotations[i])
# Set all the wrong predictions (classes) to False
for rID in incorrect_class_pred:
db_curs.execute("UPDATE predictions\
SET is_valid_class_in_img=?\
WHERE rowid=?",\
["False", rID])
# cacluate IoUs
for prediction in correct_class_pred:
# best prediction
# format [orig_id, IoU]
best_prediction = [-1, 0.0]
for annotation in gt_annotations:
# check if class is correct
if prediction[2] == annotation[2]:
iou_tmp = bb_iou(json.loads(annotation[1]),\
json.loads(prediction[1]))
if iou_tmp >= best_prediction[1]:
best_prediction = [annotation[3], iou_tmp]
db_curs.execute("UPDATE predictions\
SET (is_valid_class_in_img,\
best_match_gt_annotation_id,\
IoU)=(?,?,?)\
WHERE rowid=?",\
["True",\
best_prediction[0],\
best_prediction[1],\
prediction[0]])
db_conn.commit()
|
the-stack_0_6913 | import mailbox
import quopri
import email.utils
import lxml.html.clean
import re
def read_mail(path):
mdir = mailbox.Maildir(path)
return mdir
def extract_email_headers(msg):
"""Extract headers from email"""
msg_obj = {}
msg_obj["from"] = {}
from_field = msg.getheaders('From')[0]
msg_obj["from"]["name"], msg_obj["from"]["address"] = email.utils.parseaddr(from_field)
msg_obj["to"] = email.utils.getaddresses(msg.getheaders('To'))
msg_obj["subject"] = msg.getheaders('Subject')[0]
msg_obj["date"] = msg.getheaders('Date')[0]
return msg_obj
def format_plaintext_email(message):
"""Replace \n by <br> to display as HTML"""
return message.replace('\n', '<br>')
def extract_email(msg):
"""Extract all the interesting fields from an email"""
msg_obj = extract_email_headers(msg)
fpPos = msg.fp.tell()
msg.fp.seek(0)
mail = email.message_from_string(msg.fp.read())
contents = []
for part in mail.walk():
if part.get_content_type() == 'text/plain':
charset = part.get_content_charset()
if charset != None:
payload = quopri.decodestring(part.get_payload()).decode(charset)
else: # assume ascii
payload = quopri.decodestring(part.get_payload()).decode('ascii')
payload = format_plaintext_email(payload)
contents.append(payload)
content = "".join(contents)
msg_obj["contents"] = lxml.html.clean.clean_html(content).encode('utf-8')
return msg_obj
def get_emails(mdir):
l = []
for id, msg in mdir.iteritems():
msg_obj = extract_email(msg)
msg_obj["id"] = id
l.append(msg_obj)
return l
def get_email(mdir, id):
msg = mdir.get(id)
return extract_email(msg)
|
the-stack_0_6914 | # program to delete a specific item from a given doubly linked list.
class Node(object):
# Singly linked node
def __init__(self, value=None, next=None, prev=None):
self.value = value
self.next = next
self.prev = prev
class doubly_linked_list(object):
def __init__(self):
self.head = None
self.tail = None
self.count = 0
def append_item(self, value):
# Append an item
new_item = Node(value, None, None)
if self.head is None:
self.head = new_item
self.tail = self.head
else:
new_item.prev = self.tail
self.tail.next = new_item
self.tail = new_item
self.count += 1
def iter(self):
# Iterate the list
current = self.head
while current:
item_val = current.value
current = current.next
yield item_val
def print_foward(self):
for node in self.iter():
print(node)
def search_item(self, val):
for node in self.iter():
if val == node:
return True
return False
def delete(self, value):
# Delete a specific item
current = self.head
node_deleted = False
if current is None:
node_deleted = False
elif current.value == value:
self.head = current.next
self.head.prev = None
node_deleted = True
elif self.tail.value == value:
self.tail = self.tail.prev
self.tail.next = None
node_deleted = True
else:
while current:
if current.value == value:
current.prev.next = current.next
current.next.prev = current.prev
node_deleted = True
current = current.next
if node_deleted:
self.count -= 1
items = doubly_linked_list()
items.append_item('PHP')
items.append_item('Python')
items.append_item('C#')
items.append_item('C++')
items.append_item('Java')
items.append_item('SQL')
print("Original list:")
items.print_foward()
items.delete("Java")
items.delete("Python")
print("\nList after deleting two items:")
items.print_foward()
|
the-stack_0_6917 | from __future__ import absolute_import, division, print_function
import tensorflow as tf
import numpy as np
import os
import zipfile
def _parse_flat(filename, label):
image_string = tf.read_file(filename)
image_decoded = tf.image.decode_jpeg(image_string, channels=1) # the image gets decoded in the shape of height,width,channels
image_reshaped = tf.reshape(image_decoded, (-1,)) # flatten the tensor
image_casted = tf.cast(image_reshaped, tf.float32) # Convert the array to float32 as opposed to uint8
image_casted /= 255 # Convert the pixel values from integers between 0 and 255 to floats between 0 and 1
return image_casted, label
def _parse(filename, label):
image_string = tf.read_file(filename)
image_decoded = tf.image.decode_jpeg(image_string, channels=1) # the image gets decoded in the shape of height,width,channels
image_casted = tf.cast(image_decoded, tf.float32) # Convert the array to float32 as opposed to uint8
image_casted /= 255 # Convert the pixel values from integers between 0 and 255 to floats between 0 and 1
return image_casted, label
def load_data(image_dir, number_of_outputs=None, flatten=None, batch_size=None, shuffle_size=None, percent_of_test_examples=None):
subdirs = [x[1] for x in os.walk(image_dir)][0]
label_enums = []
trainFileList = []
trainLabelList = []
testFileList = []
testLabelList = []
if(percent_of_test_examples is None):
percent_of_test_examples = 0.1
for subdir in subdirs:
files = os.listdir(image_dir+"/"+subdir)
files = [image_dir+"/"+subdir+'/'+f for f in files]
if(subdir not in label_enums):
label_enums.append(subdir)
number_of_test_examples = int(percent_of_test_examples * len(files))
trainFiles = files[number_of_test_examples:]
trainFileList.extend(trainFiles)
trainLabelList.extend([label_enums.index(subdir)]*len(trainFiles))
testFiles = files[:number_of_test_examples]
testFileList.extend(testFiles)
testLabelList.extend([label_enums.index(subdir)]*len(testFiles))
trainFileList = tf.constant(trainFileList)
trainLabelList = tf.keras.utils.to_categorical(trainLabelList, number_of_outputs) # The format of the labels
trainLabelList = trainLabelList.astype(np.float32) # Cast the labels to floats
train_dataset = tf.data.Dataset.from_tensor_slices((trainFileList, trainLabelList))
testFileList = tf.constant(testFileList)
testLabelList = tf.keras.utils.to_categorical(testLabelList, number_of_outputs) # The format of the labels
testLabelList = testLabelList.astype(np.float32) # Cast the labels to floats
test_dataset = tf.data.Dataset.from_tensor_slices((testFileList, testLabelList))
if(flatten is None):
train_dataset = train_dataset.map(_parse)
test_dataset = test_dataset.map(_parse)
elif(flatten):
train_dataset = train_dataset.map(_parse_flat)
test_dataset = test_dataset.map(_parse_flat)
else:
train_dataset = train_dataset.map(_parse)
test_dataset = test_dataset.map(_parse)
# shuffle
if(shuffle_size is not None):
train_dataset = train_dataset.shuffle(shuffle_size)
# create batch
if(batch_size is not None):
train_dataset = train_dataset.batch(batch_size)
else:
train_dataset = train_dataset.batch()
test_dataset = test_dataset.batch(len(testLabelList))
return train_dataset, test_dataset
def load_one_data(image_dir, number_of_outputs=None, flatten=None):
image_set_dir = image_dir[:image_dir.rindex('/')]
image_set_dir = image_set_dir[:image_set_dir.rindex('/')]
subdirs = [x[1] for x in os.walk(image_set_dir)][0]
label_enums = []
testFileList = []
testLabelList = []
for subdir in subdirs:
if(subdir not in label_enums):
label_enums.append(subdir)
label = os.path.split(os.path.dirname(image_dir))[-1]
testFileList = tf.constant([image_dir])
testLabelList = tf.keras.utils.to_categorical([label_enums.index(label)], number_of_outputs) # The format of the labels
testLabelList = testLabelList.astype(np.float32) # Cast the labels to floats
test_dataset = tf.data.Dataset.from_tensor_slices((testFileList, testLabelList))
test_dataset = test_dataset.map(_parse)
test_dataset = test_dataset.batch(1)
return test_dataset
def prepare_data(image_dir):
# look for .zip files and unzip them
# returns number labels (folders) in the image_dir
subdirs = [x[1] for x in os.walk(image_dir)][0]
files = [x[2] for x in os.walk(image_dir)][0]
zip_files = list(filter(lambda file: file.endswith('.zip'), files))
dirs = set(subdirs)
for zip_file in zip_files:
if not zip_file[:-4] in dirs:
_unzip(zip_file,image_dir)
else:
print('found ' + zip_file + ' already unzipped')
labels = [x[1] for x in os.walk(image_dir)][0]
print('labels:', labels)
return labels
def _unzip(source,image_dir):
print('unzipping ' + source)
with zipfile.ZipFile(image_dir+"/"+source,"r") as zip_ref:
zip_ref.extractall(image_dir+"/"+source[:-4])
return True
|
the-stack_0_6918 | '''test pysftp.Connection.stat and .lstat - uses py.test'''
# pylint: disable = W0142
# pylint: disable=E1101
from common import *
def test_stat(psftp):
'''test stat'''
dirname = 'pub'
psftp.chdir('/home/test')
rslt = psftp.stat(dirname)
assert rslt.st_size >= 0
def test_lstat(psftp):
'''test lstat minimal'''
dirname = 'pub'
psftp.chdir('/home/test')
rslt = psftp.lstat(dirname)
assert rslt.st_size >= 0
|
the-stack_0_6919 | import os
from pathlib import Path
from shutil import which
from invoke import task
PKG_NAME = "conda_hooks"
PKG_PATH = Path(f"{PKG_NAME}")
ACTIVE_VENV = os.environ.get("VIRTUAL_ENV", None)
VENV_HOME = Path(os.environ.get("WORKON_HOME", "~/.local/share/virtualenvs"))
VENV_PATH = Path(ACTIVE_VENV) if ACTIVE_VENV else (VENV_HOME / PKG_NAME)
VENV = str(VENV_PATH.expanduser())
TOOLS = ["poetry", "pre-commit"]
POETRY = which("poetry") if which("poetry") else (VENV / Path("bin") / "poetry")
PRECOMMIT = (
which("pre-commit") if which("pre-commit") else (VENV / Path("bin") / "pre-commit")
)
@task
def tests(c):
"""Run the test suite"""
c.run(f"{VENV}/bin/pytest", pty=True)
@task
def black(c, check=False, diff=False):
"""Run Black auto-formatter, optionally with --check or --diff"""
check_flag, diff_flag = "", ""
if check:
check_flag = "--check"
if diff:
diff_flag = "--diff"
c.run(f"{VENV}/bin/black {check_flag} {diff_flag} {PKG_PATH} tasks.py")
@task
def isort(c, check=False, diff=False):
check_flag, diff_flag = "", ""
if check:
check_flag = "-c"
if diff:
diff_flag = "--diff"
c.run(f"{VENV}/bin/isort {check_flag} {diff_flag} .")
@task
def flake8(c):
c.run(f"{VENV}/bin/flake8 {PKG_PATH} tasks.py")
@task
def lint(c):
isort(c, check=True)
black(c, check=True)
flake8(c)
@task
def tools(c):
"""Install tools in the virtual environment if not already on PATH"""
for tool in TOOLS:
if not which(tool):
c.run(f"{VENV}/bin/pip install {tool}")
@task
def precommit(c):
"""Install pre-commit hooks to .git/hooks/pre-commit"""
c.run(f"{PRECOMMIT} install")
@task
def setup(c):
c.run(f"{VENV}/bin/pip install -U pip")
tools(c)
c.run(f"{POETRY} install")
precommit(c)
|
the-stack_0_6920 | from Maix import I2S, GPIO
from fpioa_manager import fm
from modules import SpeechRecognizer
import utime, time
# register i2s(i2s0) pin
fm.register(20, fm.fpioa.I2S0_OUT_D0, force=True)
fm.register(18, fm.fpioa.I2S0_SCLK, force=True)
fm.register(19, fm.fpioa.I2S0_WS, force=True)
# close WiFi, if use M1W Core module
if True:
fm.register(8, fm.fpioa.GPIO0, force=True)
wifi_en=GPIO(GPIO.GPIO0,GPIO.OUT)
wifi_en.value(0)
sample_rate = 8000
# init i2s(i2s0)
i2s_dev = I2S(I2S.DEVICE_0)
# config i2s according to speechrecognizer
i2s_dev.channel_config(i2s_dev.CHANNEL_0,
I2S.RECEIVER,
resolution = I2S.RESOLUTION_16_BIT,
cycles = I2S.SCLK_CYCLES_32,
align_mode = I2S.RIGHT_JUSTIFYING_MODE)
i2s_dev.set_sample_rate(sample_rate)
s = SpeechRecognizer(i2s_dev)
type(s)
print(s)
key_word_record = False
tim2 = time.ticks_ms()
def pins_irq(pin_num):
global key_word_record
global tim2
if (time.ticks_ms() - tim2 )> 800:
key_word_record = not key_word_record
tim2 = time.ticks_ms()
fm.register(16, fm.fpioa.GPIOHS0)
key_boot = GPIO(GPIO.GPIOHS0, GPIO.IN)
key_boot.irq(pins_irq, GPIO.IRQ_FALLING, GPIO.WAKEUP_NOT_SUPPORT, 7)
#Currently supports a maximum of 10 keywords, each recording a maximum of 4 templates
for i in range(3):
# Record three keywords, three times each
for j in range(3):
print("Press the button to record the {} keyword, the {}".format(i+1, j+1))
while True:
if key_word_record == True:
break
else:
print('.', end="")
utime.sleep_ms(500)
print("---")
s.record(i, j)
key_word_record = False
print("record successful!")
while True:
# recognize
ret = s.recognize()
if ret > 0:
if ret == 1:
print("ret:{}-{}".format(ret, "red"))
elif ret == 2:
print("ret:{}-{}".format(ret, "green"))
elif ret == 3:
print("ret:{}-{}".format(ret, "blue"))
else:
print("")
|
the-stack_0_6923 | import logging
from copy import deepcopy
from datetime import timezone
from typing import Any, Dict, List, Optional
import pytz
import requests
from dateutil import parser
from obsei.sink.base_sink import Convertor
from obsei.sink.http_sink import HttpSink, HttpSinkConfig
from obsei.payload import TextPayload
from obsei.misc.utils import flatten_dict
logger = logging.getLogger(__name__)
TWITTER_URL_PREFIX = "https://twitter.com/"
IST_TZ = pytz.timezone("Asia/Kolkata")
class PayloadConvertor(Convertor):
def convert(
self,
analyzer_response: TextPayload,
base_payload: Optional[Dict[str, Any]] = None,
**kwargs,
) -> Dict[str, Any]:
request_payload = base_payload or {}
if analyzer_response.source_name != "Twitter":
return {**request_payload, **analyzer_response.to_dict()}
source_information = kwargs["source_information"]
user_url = ""
positive = 0.0
negative = 0.0
text = ""
tweet_id = None
created_at_str = None
classification_list: List[str] = []
flat_dict = flatten_dict(analyzer_response.to_dict())
for k, v in flat_dict.items():
if "username" in k:
user_url = TWITTER_URL_PREFIX + v
elif "text" in k:
text = str(v).replace("\n", " ")
elif "positive" in k:
positive = float(v)
elif "negative" in k:
negative = float(v)
elif "meta_id" in k:
tweet_id = v
elif "created_at" in k:
created_at_str = v
elif "segmented_data" in k and len(classification_list) < 2:
classification_list.append(k.rsplit("_", 1)[1])
if created_at_str:
created_at = parser.isoparse(created_at_str)
created_at_str = (
created_at.replace(tzinfo=timezone.utc)
.astimezone(tz=IST_TZ)
.strftime("%Y-%m-%d %H:%M:%S")
)
tweet_url = f"{user_url}/status/{tweet_id}"
# Sentiment rules
if negative > 8.0:
sentiment = "Strong Negative"
elif 0.3 < negative <= 8.0:
sentiment = "Negative"
elif positive >= 0.8:
sentiment = "Strong Positive"
elif 0.4 < positive < 0.8:
sentiment = "Positive"
else:
sentiment = "Neutral"
enquiry = {
"Source": source_information,
"FeedbackBy": user_url,
"Sentiment": sentiment,
"TweetUrl": tweet_url,
"FormattedText": text,
"PredictedCategories": ",".join(classification_list),
}
if created_at_str:
enquiry["ReportedAt"] = created_at_str
kv_str_list = [k + ": " + str(v) for k, v in enquiry.items()]
request_payload["enquiryMessage"] = "\n".join(kv_str_list)
return request_payload
class DailyGetSinkConfig(HttpSinkConfig):
TYPE: str = "DailyGet"
partner_id: str
consumer_phone_number: str
source_information: str
headers: Dict[str, Any] = {"Content-type": "application/json"}
class DailyGetSink(HttpSink):
def __init__(self, convertor: Convertor = PayloadConvertor(), **data: Any):
super().__init__(convertor=convertor, **data)
def send_data( # type: ignore[override]
self,
analyzer_responses: List[TextPayload],
config: DailyGetSinkConfig,
**kwargs,
):
headers = config.headers
payloads = []
responses = []
for analyzer_response in analyzer_responses:
payloads.append(
self.convertor.convert(
analyzer_response=analyzer_response,
base_payload=dict()
if config.base_payload is None
else deepcopy(config.base_payload),
source_information=config.source_information,
)
)
for payload in payloads:
response = requests.post(
url=config.url,
json=payload,
headers=headers,
)
logger.info(f"payload='{payload}'")
logger.info(f"response='{response.__dict__}'")
responses.append(response)
return responses
|
the-stack_0_6925 | # -*- coding: utf-8 -*-
# Example for using WebDriver object: driver = self.get_current_driver() e.g driver.current_url
from QAutoLibrary.extension import TESTDATA
from selenium.webdriver.common.by import By
from QAutoLibrary.QAutoSelenium import *
from time import sleep
class Cs_backup_restore_dlg_up_back_conf_exist(CommonUtils):
"""
"""
# Pagemodel timestamp: 20171019021218
# Pagemodel url: https://xroad-lxd-cs.lxd:4000/backup
# Pagemodel area: (593, 355, 735, 146)
# Pagemodel screen resolution: (1920, 975)
# Use project settings: True
# Used filters: id, css_selector, class_name, link_text, xpath
# Xpath type: xpath-position
# Create automated methods: False
# Depth of css path: 3
# Minimize css selector: True
# Use css pattern: False
# Allow non unique css pattern: False
# Pagemodel template: False
# Use testability: True
# testability attribute: data-name
# Use contains text in xpath: False
# Exclude dynamic table filter: True
# Row count: 5
# Element count: 20
# Big element filter width: 55
# Big element filter height: 40
# Not filtered elements: button, strong, select
# Canvas modeling: False
# Pagemodel type: normal
# Links found: 0
# Page model constants:
DATA_NAME_FILE_UPLOAD_UI_RESIZABLE_W = (By.CSS_SELECTOR, u'div[data-name="file_upload_dialog"]>.ui-resizable-w') # x: 605 y: 312 width: 7 height: 230, tag: div, type: , name: None, form_id: , checkbox: , table_id: , href:
DATA_NAME_FILE_UPLOAD_UI_RESIZABLE_E = (By.CSS_SELECTOR, u'div[data-name="file_upload_dialog"]>.ui-resizable-e') # x: 1308 y: 312 width: 7 height: 230, tag: div, type: , name: None, form_id: , checkbox: , table_id: , href:
SUBMIT_0 = (By.XPATH, u'//div[8]/div[1]/div[1]/button[1]') # x: 1228 y: 352 width: 51 height: 49, tag: button, type: submit, name: None, form_id: upload_new, checkbox: , table_id: 2, href:
SUBMIT = (By.XPATH, u'//div[8]/div[1]/div[1]/button[2]') # x: 1279 y: 352 width: 51 height: 49, tag: button, type: submit, name: None, form_id: upload_new, checkbox: , table_id: 2, href:
ID_UI_ID_4 = (By.ID, u'ui-id-4') # x: 601 y: 366 width: 167 height: 21, tag: span, type: , name: None, form_id: , checkbox: , table_id: , href: None
UNKNOWN = (By.XPATH, u'//div[8]/div[1]/div[1]/button[1]/i[1]') # x: 1243 y: 366 width: 21 height: 21, tag: i, type: , name: None, form_id: upload_new, checkbox: , table_id: 2, href:
UNKNOWN_0 = (By.XPATH, u'//div[8]/div[1]/div[1]/button[2]/i[1]') # x: 1299 y: 369 width: 12 height: 15, tag: i, type: , name: None, form_id: upload_new, checkbox: , table_id: 2, href:
SELECTED_FILE_C_FAKEPATH_CONF_BACKUP_20171018_230834_TAR_TEXT = (By.CSS_SELECTOR, u'.selected_file') # x: 630 y: 382 width: 569 height: 32, tag: input, type: text, name: None, form_id: , checkbox: , table_id: 2, href:
UI_WIDGET_CONTENT_CORNER_ALL_FRONT_BUTTONS_DRAGGABLE_RESIZABLE_CONFIRM = (By.CSS_SELECTOR, u'div.ui-dialog.ui-widget.ui-widget-content.ui-corner-all.ui-front.ui-dialog-buttons.ui-draggable.ui-resizable>#confirm') # x: 591 y: 403 width: 740 height: 53, tag: div, type: , name: None, form_id: , checkbox: , table_id: , href:
CANCEL_0 = (By.XPATH, u'//div[11]/div[1]/button[2]') # x: 1155 y: 461 width: 75 height: 36, tag: button, type: button, name: None, form_id: upload_new, checkbox: , table_id: 2, href:
UI_BUTTONSET_CONFIRM = (By.CSS_SELECTOR, u'div.ui-dialog-buttonset>#confirm') # x: 1240 y: 461 width: 85 height: 36, tag: button, type: button, name: None, form_id: , checkbox: , table_id: , href:
CANCEL = (By.XPATH, u'//div[11]/div[1]/button[2]/span[1]') # x: 1168 y: 470 width: 49 height: 18, tag: span, type: , name: None, form_id: upload_new, checkbox: , table_id: 2, href: None
CONFIRM_UI_TEXT = (By.CSS_SELECTOR, u'#confirm>.ui-button-text') # x: 1253 y: 470 width: 59 height: 18, tag: span, type: , name: None, form_id: , checkbox: , table_id: , href: None
def click_button_confirm(self, parameters=None):
"""
Click button confirm
:param parameters: Test data section dictionary
"""
self.click_element(self.UI_BUTTONSET_CONFIRM)
|
the-stack_0_6926 | from pynput.mouse import *
import random
from time import sleep
import subprocess
subprocess.call("pip install pynput",shell=True)
mouse = Controller()
def randomMousePosition():
random_x = random.randint(1,10000)
random_y = random.randint(1,10000)
moveMouse(random_x,random_y)
def moveMouse(x,y):
mouse.move(x,y)
print('[+] Mouse moved')
while True:
randomMousePosition()
sleep(1)
mouse.click(Button.left,1)
mouse.click(Button.right,1) |
the-stack_0_6927 |
from constants import *
from mobject.mobject import Mobject
from utils.bezier import interpolate
from utils.color import color_gradient
from utils.color import color_to_rgba
from utils.color import rgba_to_color
from utils.config_ops import digest_config
from utils.iterables import stretch_array_to_length
from utils.space_ops import get_norm
class PMobject(Mobject):
CONFIG = {
"stroke_width": DEFAULT_STROKE_WIDTH,
}
def reset_points(self):
self.rgbas = np.zeros((0, 4))
self.points = np.zeros((0, 3))
return self
def get_array_attrs(self):
return Mobject.get_array_attrs(self) + ["rgbas"]
def add_points(self, points, rgbas=None, color=None, alpha=1):
"""
points must be a Nx3 numpy array, as must rgbas if it is not None
"""
if not isinstance(points, np.ndarray):
points = np.array(points)
num_new_points = len(points)
self.points = np.append(self.points, points, axis=0)
if rgbas is None:
color = Color(color) if color else self.color
rgbas = np.repeat(
[color_to_rgba(color, alpha)],
num_new_points,
axis=0
)
elif len(rgbas) != len(points):
raise Exception("points and rgbas must have same shape")
self.rgbas = np.append(self.rgbas, rgbas, axis=0)
return self
def set_color(self, color=YELLOW_C, family=True):
rgba = color_to_rgba(color)
mobs = self.family_members_with_points() if family else [self]
for mob in mobs:
mob.rgbas[:, :] = rgba
self.color = color
return self
# def set_color_by_gradient(self, start_color, end_color):
def set_color_by_gradient(self, *colors):
self.rgbas = np.array(list(map(
color_to_rgba,
color_gradient(colors, len(self.points))
)))
return self
start_rgba, end_rgba = list(map(color_to_rgba, [start_color, end_color]))
for mob in self.family_members_with_points():
num_points = mob.get_num_points()
mob.rgbas = np.array([
interpolate(start_rgba, end_rgba, alpha)
for alpha in np.arange(num_points) / float(num_points)
])
return self
def set_colors_by_radial_gradient(self, center=None, radius=1, inner_color=WHITE, outer_color=BLACK):
start_rgba, end_rgba = list(map(color_to_rgba, [start_color, end_color]))
if center is None:
center = self.get_center()
for mob in self.family_members_with_points():
num_points = mob.get_num_points()
t = min(1, np.abs(mob.get_center() - center) / radius)
mob.rgbas = np.array(
[interpolate(start_rgba, end_rgba, t)] * num_points
)
return self
def match_colors(self, mobject):
Mobject.align_data(self, mobject)
self.rgbas = np.array(mobject.rgbas)
return self
def filter_out(self, condition):
for mob in self.family_members_with_points():
to_eliminate = ~np.apply_along_axis(condition, 1, mob.points)
mob.points = mob.points[to_eliminate]
mob.rgbas = mob.rgbas[to_eliminate]
return self
def thin_out(self, factor=5):
"""
Removes all but every nth point for n = factor
"""
for mob in self.family_members_with_points():
num_points = self.get_num_points()
mob.apply_over_attr_arrays(
lambda arr: arr[
np.arange(0, num_points, factor)
]
)
return self
def sort_points(self, function=lambda p: p[0]):
"""
function is any map from R^3 to R
"""
for mob in self.family_members_with_points():
indices = np.argsort(
np.apply_along_axis(function, 1, mob.points)
)
mob.apply_over_attr_arrays(lambda arr: arr[indices])
return self
def fade_to(self, color, alpha):
self.rgbas = interpolate(self.rgbas, color_to_rgba(color), alpha)
for mob in self.submobjects:
mob.fade_to(color, alpha)
return self
def get_all_rgbas(self):
return self.get_merged_array("rgbas")
def ingest_submobjects(self):
attrs = self.get_array_attrs()
arrays = list(map(self.get_merged_array, attrs))
for attr, array in zip(attrs, arrays):
setattr(self, attr, array)
self.submobjects = []
return self
def get_color(self):
return rgba_to_color(self.rgbas[0, :])
def point_from_proportion(self, alpha):
index = alpha * (self.get_num_points() - 1)
return self.points[index]
# Alignment
def align_points_with_larger(self, larger_mobject):
assert(isinstance(larger_mobject, PMobject))
self.apply_over_attr_arrays(
lambda a: stretch_array_to_length(
a, larger_mobject.get_num_points()
)
)
def get_point_mobject(self, center=None):
if center is None:
center = self.get_center()
return Point(center)
def interpolate_color(self, mobject1, mobject2, alpha):
self.rgbas = interpolate(
mobject1.rgbas, mobject2.rgbas, alpha
)
def pointwise_become_partial(self, mobject, a, b):
lower_index, upper_index = [
int(x * mobject.get_num_points())
for x in (a, b)
]
for attr in self.get_array_attrs():
full_array = getattr(mobject, attr)
partial_array = full_array[lower_index:upper_index]
setattr(self, attr, partial_array)
# TODO, Make the two implementations bellow non-redundant
class Mobject1D(PMobject):
CONFIG = {
"density": DEFAULT_POINT_DENSITY_1D,
}
def __init__(self, **kwargs):
digest_config(self, kwargs)
self.epsilon = 1.0 / self.density
Mobject.__init__(self, **kwargs)
def add_line(self, start, end, color=None):
start, end = list(map(np.array, [start, end]))
length = get_norm(end - start)
if length == 0:
points = [start]
else:
epsilon = self.epsilon / length
points = [
interpolate(start, end, t)
for t in np.arange(0, 1, epsilon)
]
self.add_points(points, color=color)
class Mobject2D(PMobject):
CONFIG = {
"density": DEFAULT_POINT_DENSITY_2D,
}
def __init__(self, **kwargs):
digest_config(self, kwargs)
self.epsilon = 1.0 / self.density
Mobject.__init__(self, **kwargs)
class PointCloudDot(Mobject1D):
CONFIG = {
"radius": 0.075,
"stroke_width": 2,
"density": DEFAULT_POINT_DENSITY_1D,
"color": YELLOW,
}
def __init__(self, center=ORIGIN, **kwargs):
Mobject1D.__init__(self, **kwargs)
self.shift(center)
def generate_points(self):
self.add_points([
r * (np.cos(theta) * RIGHT + np.sin(theta) * UP)
for r in np.arange(0, self.radius, self.epsilon)
for theta in np.arange(0, 2 * np.pi, self.epsilon / r)
])
class Point(PMobject):
CONFIG = {
"color": BLACK,
}
def __init__(self, location=ORIGIN, **kwargs):
PMobject.__init__(self, **kwargs)
self.add_points([location])
|
the-stack_0_6929 | """Image loaders."""
from .common import SDLError
from .compat import UnsupportedError, byteify
from .. import endian, surface, pixels
_HASPIL = True
try:
from PIL import Image
except ImportError:
_HASPIL = False
_HASSDLIMAGE = True
try:
from .. import sdlimage
except ImportError:
_HASSDLIMAGE = False
__all__ = ["get_image_formats", "load_image"]
def get_image_formats():
"""Gets the formats supported in the default installation."""
if not _HASPIL and not _HASSDLIMAGE:
return ("bmp", )
return ("bmp", "cur", "gif", "ico", "jpg", "lbm", "pbm", "pcx", "pgm",
"png", "pnm", "ppm", "svg", "tga", "tif", "webp", "xcf", "xpm")
def load_image(fname, enforce=None):
"""Creates a SDL_Surface from an image file.
This function makes use of the Python Imaging Library, if it is available
on the target execution environment. The function will try to load the
file via sdl2 first. If the file could not be loaded, it will try
to load it via sdl2.sdlimage and PIL.
You can force the function to use only one of them, by passing the enforce
as either "PIL" or "SDL".
Note: This will call sdl2.sdlimage.init() implicitly with the default
arguments, if the module is available and if sdl2.SDL_LoadBMP() failed to
load the image.
"""
if enforce is not None and enforce not in ("PIL", "SDL"):
raise ValueError("enforce must be either 'PIL' or 'SDL', if set")
if fname is None:
raise ValueError("fname must be a string")
name = fname
if hasattr(fname, 'encode'):
name = byteify(fname, "utf-8")
if not _HASPIL and not _HASSDLIMAGE:
imgsurface = surface.SDL_LoadBMP(name)
if not imgsurface:
raise UnsupportedError(load_image,
"cannot use PIL or SDL for image loading")
return imgsurface.contents
if enforce == "PIL" and not _HASPIL:
raise UnsupportedError(load_image, "cannot use PIL (not found)")
if enforce == "SDL" and not _HASSDLIMAGE:
imgsurface = surface.SDL_LoadBMP(name)
if not imgsurface:
raise UnsupportedError(load_image,
"cannot use SDL_image (not found)")
return imgsurface.contents
imgsurface = None
if enforce != "PIL" and _HASSDLIMAGE:
sdlimage.IMG_Init(sdlimage.IMG_INIT_JPG | sdlimage.IMG_INIT_PNG |
sdlimage.IMG_INIT_TIF | sdlimage.IMG_INIT_WEBP)
imgsurface = sdlimage.IMG_Load(name)
if not imgsurface:
# An error occured - if we do not try PIL, break out now
if not _HASPIL or enforce == "SDL":
raise SDLError(sdlimage.IMG_GetError())
else:
imgsurface = imgsurface.contents
if enforce != "SDL" and _HASPIL and not imgsurface:
image = Image.open(fname)
mode = image.mode
width, height = image.size
rmask = gmask = bmask = amask = 0
if mode in ("1", "L", "P"):
# 1 = B/W, 1 bit per byte
# "L" = greyscale, 8-bit
# "P" = palette-based, 8-bit
pitch = width
depth = 8
elif mode == "RGB":
# 3x8-bit, 24bpp
if endian.SDL_BYTEORDER == endian.SDL_LIL_ENDIAN:
rmask = 0x0000FF
gmask = 0x00FF00
bmask = 0xFF0000
else:
rmask = 0xFF0000
gmask = 0x00FF00
bmask = 0x0000FF
depth = 24
pitch = width * 3
elif mode in ("RGBA", "RGBX"):
# RGBX: 4x8-bit, no alpha
# RGBA: 4x8-bit, alpha
if endian.SDL_BYTEORDER == endian.SDL_LIL_ENDIAN:
rmask = 0x000000FF
gmask = 0x0000FF00
bmask = 0x00FF0000
if mode == "RGBA":
amask = 0xFF000000
else:
rmask = 0xFF000000
gmask = 0x00FF0000
bmask = 0x0000FF00
if mode == "RGBA":
amask = 0x000000FF
depth = 32
pitch = width * 4
else:
# We do not support CMYK or YCbCr for now
raise TypeError("unsupported image format")
pxbuf = image.tobytes()
imgsurface = surface.SDL_CreateRGBSurfaceFrom(pxbuf, width, height,
depth, pitch, rmask,
gmask, bmask, amask)
if not imgsurface:
raise SDLError()
imgsurface = imgsurface.contents
# the pixel buffer must not be freed for the lifetime of the surface
imgsurface._pxbuf = pxbuf
if mode == "P":
# Create a SDL_Palette for the SDL_Surface
def _chunk(seq, size):
for x in range(0, len(seq), size):
yield seq[x:x + size]
rgbcolors = image.getpalette()
sdlpalette = pixels.SDL_AllocPalette(len(rgbcolors) // 3)
if not sdlpalette:
raise SDLError()
SDL_Color = pixels.SDL_Color
for idx, (r, g, b) in enumerate(_chunk(rgbcolors, 3)):
sdlpalette.contents.colors[idx] = SDL_Color(r, g, b)
ret = surface.SDL_SetSurfacePalette(imgsurface, sdlpalette)
# This will decrease the refcount on the palette, so it gets
# freed properly on releasing the SDL_Surface.
pixels.SDL_FreePalette(sdlpalette)
if ret != 0:
raise SDLError()
# If the image has a single transparent palette index, set
# that index as the color key to make blitting correct.
if 'transparency' in image.info and isinstance(image.info['transparency'], int):
sdl2.SDL_SetColorKey(imgsurface, True, image.info['transparency'])
return imgsurface
|
the-stack_0_6930 | """
Contains website related routes and views.
"""
import json
from operator import itemgetter
import os
from urllib import parse as urlparse
import boto3
from boto3.exceptions import Boto3Error
from botocore.exceptions import BotoCoreError
from pyramid.decorator import reify
from pyramid.events import NewResponse
from pyramid.events import subscriber
from pyramid.renderers import get_renderer
from pyramid.response import FileResponse
from pyramid.response import Response
from pyramid.view import view_config
from pyramid.httpexceptions import HTTPNotFound
from ichnaea.conf import settings
from ichnaea.content.stats import global_stats, histogram, regions
from ichnaea.models.content import StatKey
from ichnaea import util
HERE = os.path.dirname(__file__)
IMAGE_PATH = os.path.join(HERE, "static", "images")
FAVICON_PATH = os.path.join(IMAGE_PATH, "favicon.ico")
TOUCHICON_PATH = os.path.join(IMAGE_PATH, "apple-touch-icon.png")
CSP_BASE = "'self'"
# See https://docs.mapbox.com/mapbox-gl-js/api/#csp-directives
CSP_POLICY = """\
default-src 'self';
connect-src {base} {tiles} *.tiles.mapbox.com api.mapbox.com events.mapbox.com;
font-src {base};
img-src {base} {tiles} api.mapbox.com data: blob:;
script-src {base} data: 'unsafe-eval';
style-src {base};
child-src blob:;
worker-src blob:;
"""
CSP_POLICY = CSP_POLICY.replace("\n", " ").strip()
TILES_PATTERN = "{z}/{x}/{y}.png"
HOMEPAGE_MAP_IMAGE = (
"https://api.mapbox.com/styles/v1/mapbox/dark-v10/tiles"
"/256/0/0/0@2x?access_token={token}"
)
def get_map_tiles_url(asset_url):
"""Compute tiles url for maps based on the asset_url.
:arg str asset_url: the url to static assets or ''
:returns: tiles_url
"""
asset_url = asset_url if asset_url else "/static/datamap/"
if not asset_url.endswith("/"):
asset_url = asset_url + "/"
return urlparse.urljoin(asset_url, "tiles/" + TILES_PATTERN)
def get_csp_policy(asset_url):
"""Return value for Content-Security-Policy HTTP header.
:arg str asset_url: the url to static assets or ''
:returns: CSP policy string
"""
result = urlparse.urlsplit(asset_url)
map_tiles_src = urlparse.urlunparse((result.scheme, result.netloc, "", "", "", ""))
return CSP_POLICY.format(base=CSP_BASE, tiles=map_tiles_src)
def configure_content(config):
config.add_view(
favicon_view, name="favicon.ico", http_cache=(86400, {"public": True})
)
config.registry.skip_logging.add("/favicon.ico")
config.add_view(
robotstxt_view, name="robots.txt", http_cache=(86400, {"public": True})
)
config.registry.skip_logging.add("/robots.txt")
config.add_view(
touchicon_view,
name="apple-touch-icon-precomposed.png",
http_cache=(86400, {"public": True}),
)
config.registry.skip_logging.add("/apple-touch-icon-precomposed.png")
config.add_static_view(
name="static", path="ichnaea.content:static", cache_max_age=86400
)
config.add_route("stats_regions", "/stats/regions")
config.add_route("stats", "/stats")
config.scan("ichnaea.content.views")
@subscriber(NewResponse)
def security_headers(event):
response = event.response
# Headers for all responses.
response.headers.add(
"Strict-Transport-Security", "max-age=31536000; includeSubDomains"
)
response.headers.add("X-Content-Type-Options", "nosniff")
# Headers for HTML responses.
if response.content_type == "text/html":
response.headers.add(
"Content-Security-Policy", get_csp_policy(settings("asset_url"))
)
response.headers.add("X-Frame-Options", "DENY")
response.headers.add("X-XSS-Protection", "1; mode=block")
def s3_list_downloads(raven_client):
files = {"full": [], "diff1": [], "diff2": []}
if not settings("asset_bucket"):
return files
asset_url = settings("asset_url")
if not asset_url.endswith("/"):
asset_url = asset_url + "/"
diff = []
full = []
try:
s3 = boto3.resource("s3")
bucket = s3.Bucket(settings("asset_bucket"))
for obj in bucket.objects.filter(Prefix="export/"):
name = obj.key.split("/")[-1]
path = urlparse.urljoin(asset_url, obj.key)
# round to kilobyte
size = int(round(obj.size / 1024.0, 0))
file = dict(name=name, path=path, size=size)
if "diff-" in name:
diff.append(file)
elif "full-" in name:
full.append(file)
except (Boto3Error, BotoCoreError):
raven_client.captureException()
return files
half = len(diff) // 2 + len(diff) % 2
diff = list(sorted(diff, key=itemgetter("name"), reverse=True))
files["diff1"] = diff[:half]
files["diff2"] = diff[half:]
files["full"] = list(sorted(full, key=itemgetter("name"), reverse=True))
return files
class ContentViews(object):
def __init__(self, request):
self.request = request
self.session = request.db_session
self.redis_client = request.registry.redis_client
@reify
def base_template(self):
renderer = get_renderer("templates/base.pt")
return renderer.implementation().macros["layout"]
@property
def this_year(self):
return "%s" % util.utcnow().year
def _get_cache(self, cache_key):
cache_key = self.redis_client.cache_keys[cache_key]
cached = self.redis_client.get(cache_key)
if cached:
return json.loads(cached)
return None
def _set_cache(self, cache_key, data, ex=3600):
cache_key = self.redis_client.cache_keys[cache_key]
self.redis_client.set(cache_key, json.dumps(data), ex=ex)
def is_map_enabled(self):
"""Return whether maps are enabled.
Enable maps if and only if there's a mapbox token and a url for the
tiles location. Otherwise it's disabled.
"""
return bool(settings("mapbox_token"))
@view_config(renderer="templates/homepage.pt", http_cache=3600)
def homepage_view(self):
map_tiles_url = get_map_tiles_url(settings("asset_url"))
image_base_url = HOMEPAGE_MAP_IMAGE.format(token=settings("mapbox_token"))
image_url = map_tiles_url.format(z=0, x=0, y="0@2x")
return {
"page_title": "Overview",
"map_enabled": self.is_map_enabled(),
"map_image_base_url": image_base_url,
"map_image_url": image_url,
}
@view_config(renderer="templates/api.pt", name="api", http_cache=3600)
def api_view(self):
return {"page_title": "API"}
@view_config(renderer="templates/contact.pt", name="contact", http_cache=3600)
def contact_view(self):
return {"page_title": "Contact Us"}
@view_config(renderer="templates/downloads.pt", name="downloads", http_cache=3600)
def downloads_view(self):
data = self._get_cache("downloads")
if data is None:
data = s3_list_downloads(self.request.registry.raven_client)
self._set_cache("downloads", data, ex=1800)
return {"page_title": "Downloads", "files": data}
@view_config(renderer="templates/optout.pt", name="optout", http_cache=3600)
def optout_view(self):
return {"page_title": "Opt-Out"}
@view_config(renderer="templates/privacy.pt", name="privacy", http_cache=3600)
def privacy_view(self):
return {"page_title": "Privacy Notice"}
@view_config(renderer="templates/map.pt", name="map", http_cache=3600)
def map_view(self):
map_tiles_url = get_map_tiles_url(settings("asset_url"))
return {
"page_title": "Map",
"map_enabled": self.is_map_enabled(),
"map_tiles_url": map_tiles_url,
"map_token": settings("mapbox_token"),
}
@view_config(renderer="json", name="map.json", http_cache=3600)
def map_json(self):
map_tiles_url = get_map_tiles_url(settings("asset_url"))
offset = map_tiles_url.find(TILES_PATTERN)
base_url = map_tiles_url[:offset]
return {"tiles_url": base_url}
@view_config(renderer="json", name="stats_blue.json", http_cache=3600)
def stats_blue_json(self):
data = self._get_cache("stats_blue_json")
if data is None:
data = histogram(self.session, StatKey.unique_blue)
self._set_cache("stats_blue_json", data)
return {"series": [{"title": "MLS Bluetooth", "data": data[0]}]}
@view_config(renderer="json", name="stats_cell.json", http_cache=3600)
def stats_cell_json(self):
data = self._get_cache("stats_cell_json")
if data is None:
data = histogram(self.session, StatKey.unique_cell)
self._set_cache("stats_cell_json", data)
return {"series": [{"title": "MLS Cells", "data": data[0]}]}
@view_config(renderer="json", name="stats_wifi.json", http_cache=3600)
def stats_wifi_json(self):
data = self._get_cache("stats_wifi_json")
if data is None:
data = histogram(self.session, StatKey.unique_wifi)
self._set_cache("stats_wifi_json", data)
return {"series": [{"title": "MLS WiFi", "data": data[0]}]}
@view_config(renderer="templates/stats.pt", route_name="stats", http_cache=3600)
def stats_view(self):
data = self._get_cache("stats")
if data is None:
data = {"metrics1": [], "metrics2": []}
metric_names = [
("1", StatKey.unique_blue.name, "Bluetooth Networks"),
("1", StatKey.blue.name, "Bluetooth Observations"),
("1", StatKey.unique_wifi.name, "Wifi Networks"),
("1", StatKey.wifi.name, "Wifi Observations"),
("2", StatKey.unique_cell.name, "MLS Cells"),
("2", StatKey.cell.name, "MLS Cell Observations"),
]
metrics = global_stats(self.session)
for i, mid, name in metric_names:
data["metrics" + i].append({"name": name, "value": metrics[mid]})
self._set_cache("stats", data)
result = {"page_title": "Statistics"}
result.update(data)
return result
@view_config(
renderer="templates/stats_regions.pt",
route_name="stats_regions",
http_cache=3600,
)
def stats_regions_view(self):
data = self._get_cache("stats_regions")
if data is None:
data = regions(self.session)
self._set_cache("stats_regions", data)
return {"page_title": "Regions", "metrics": data}
@view_config(renderer="templates/terms.pt", name="terms", http_cache=3600)
def terms_of_service(self):
return {
"page_title": (
"Developer Terms of Service:" " Mozilla Location Service Query API"
)
}
@view_config(context=HTTPNotFound)
def default_not_found(exc):
response = Response("<h1>404 Not Found</h1>")
response.status_int = 404
return response
def favicon_view(request):
return FileResponse(FAVICON_PATH, request=request)
def touchicon_view(request):
return FileResponse(TOUCHICON_PATH, request=request)
_ROBOTS_RESPONSE = """\
User-agent: *
Disallow: /downloads
Disallow: /static/
Disallow: /v1/
Disallow: /v2/
Disallow: /__heartbeat__
Disallow: /__lbheartbeat__
Disallow: /__version__
"""
def robotstxt_view(context, request):
return Response(content_type="text/plain", body=_ROBOTS_RESPONSE)
|
the-stack_0_6931 | """Testing v0x05 error message class."""
from pyof.v0x05.asynchronous.error_msg import ErrorMsg
from tests.test_struct import TestStruct
class TestErrorMsg(TestStruct):
"""ErroMsg message tests (also those in :class:`.TestDump`)."""
@classmethod
def setUpClass(cls):
"""Configure raw file and its object in parent class (TestDump)."""
super().setUpClass()
super().set_raw_dump_file('v0x05', 'ofpt_error_msg')
super().set_raw_dump_object(ErrorMsg, xid=1, error_type=1, code=1)
super().set_minimum_size(12)
|
the-stack_0_6935 | # MIT License
#
# Copyright (c) 2020 Jonathan Zernik
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import logging
import threading
from typing import Dict
from typing import List
from typing import Optional
from squeaknode.core.peer_address import PeerAddress
from squeaknode.network.peer import Peer
from squeaknode.node.listener_subscription_client import EventListener
MIN_PEERS = 5
MAX_PEERS = 10
UPDATE_THREAD_SLEEP_TIME = 10
logger = logging.getLogger(__name__)
class ConnectionManager(object):
"""Maintains connections to other peers in the network.
"""
def __init__(self):
self._peers: Dict[PeerAddress, Peer] = {}
self.peers_lock = threading.Lock()
self.peer_changed_listener = EventListener()
self.single_peer_changed_listener = EventListener()
self.accept_connections = True
@property
def peers(self) -> List[Peer]:
return list(self._peers.values())
def has_connection(self, address):
"""Return True if the address is already connected."""
return address in self._peers
def on_peers_changed(self):
peers = self.peers
logger.info('Current number of peers {}'.format(len(peers)))
logger.info('Current peers:--------')
for peer in peers:
logger.info(peer)
logger.info('--------------')
self.peer_changed_listener.handle_new_item(peers)
def _is_duplicate_nonce(self, peer):
for other_peer in self.peers:
if other_peer.local_version:
if peer.remote_version == other_peer.local_version.nNonce:
return True
return False
def add_peer(self, peer: Peer):
"""Add a peer.
"""
with self.peers_lock:
if not self.accept_connections:
raise NotAcceptingConnectionsError()
if self._is_duplicate_nonce(peer):
logger.debug('Failed to add peer {}'.format(peer))
raise DuplicateNonceError()
if self.has_connection(peer.remote_address):
logger.debug('Failed to add peer {}'.format(peer))
raise DuplicatePeerError()
self._peers[peer.remote_address] = peer
logger.debug('Added peer {}'.format(peer))
self.on_peers_changed()
def remove_peer(self, peer: Peer):
"""Add a peer.
"""
with self.peers_lock:
if not self.has_connection(peer.remote_address):
logger.debug('Failed to remove peer {}'.format(peer))
raise MissingPeerError()
del self._peers[peer.remote_address]
logger.debug('Removed peer {}'.format(peer))
self.on_peers_changed()
def get_peer(self, address) -> Optional[Peer]:
"""Get a peer info by address.
"""
return self._peers.get(address)
def stop_connection(self, address):
"""Stop peer connections for address.
"""
with self.peers_lock:
peer = self.get_peer(address)
if peer is not None:
peer.stop()
def stop_all_connections(self):
"""Stop all peer connections.
"""
self.accept_connections = False
with self.peers_lock:
for peer in self.peers:
peer.stop()
def yield_peers_changed(self, stopped: threading.Event):
yield from self.peer_changed_listener.yield_items(stopped)
def yield_single_peer_changed(self, peer_address: PeerAddress, stopped: threading.Event):
for peer in self.single_peer_changed_listener.yield_items(stopped):
logger.debug('yield_single_peer_changed: {}'.format(peer))
if peer.remote_address == peer_address:
if peer.connect_time is None:
yield None
else:
yield peer
class DuplicatePeerError(Exception):
pass
class DuplicateNonceError(Exception):
pass
class MissingPeerError(Exception):
pass
class NotAcceptingConnectionsError(Exception):
pass
|
the-stack_0_6938 | # -*- coding: utf-8 -*-
#
# Copyright 2015 Ternaris, Munich, Germany
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import absolute_import, division
import hashlib
import os
from collections import namedtuple
from datetime import datetime
from fnmatch import fnmatch
from logging import getLogger
from .model import File, Fileset
from ._utils import multiplex
from .widgeting import make_register, WidgetBase
class Scanner(WidgetBase):
def __init__(self, pattern, **kw):
super(Scanner, self).__init__(**kw)
self.pattern = pattern
def __call__(self, fileinfos):
filtered = (x for x in fileinfos
if fnmatch(os.path.join(x.dirpath, x.name), self.pattern))
return self.callback(filtered)
SCANNER = dict()
scanner = make_register('scanner', namespace='', registry=SCANNER, cls=Scanner)
FileInfo = namedtuple('FileInfo', ('dirpath', 'name'))
FilesetInfo = namedtuple('FilesetInfo', ('type', 'dirpath', 'name', 'indexed_files'))
class BrokenFileset(Exception):
def __init__(self, *args, **kw):
super(BrokenFileset, self).__init__(self.__class__.__name__, *args, **kw)
class MissingFile(BrokenFileset):
pass
class MissingMD5(BrokenFileset):
pass
class EmptyFile(BrokenFileset):
pass
def detect_filesets(basedir, scanners):
"""Walk basedir using scanners to detect filesets, return filesetinfos"""
logger = getLogger(__name__)
assert os.path.isdir(basedir)
assert len(scanners) > 0
for dirpath, subdirs, filenames in os.walk(basedir):
logger.debug('Scanning %s', dirpath)
fileinfos = (FileInfo(dirpath, filename)
for filename in filenames
if filename[0] != '.' and # skip hidden files
filename[-4:] != '.md5') # skip md5 files
for filesetinfo in list(multiplex(fileinfos, scanners, dont_catch=True)):
yield filesetinfo
def make_file(fileinfo):
"""Make File model from FileInfo"""
path = os.path.join(fileinfo.dirpath, fileinfo.name)
md5file = '{}.md5'.format(path)
try:
with open(md5file, 'rb') as f:
md5 = f.read(32)
except IOError:
raise MissingMD5(fileinfo.dirpath, fileinfo.name)
stat = os.stat(path)
size = stat.st_size
if size == 0:
raise EmptyFile(fileinfo.dirpath, fileinfo.name)
return File(name=fileinfo.name, md5=md5, size=size)
def make_fileset(filesetinfo):
"""Make Fileset model from FilesetInfo"""
files = []
md5 = hashlib.md5()
for idx, fileinfo in filesetinfo.indexed_files:
file = make_file(fileinfo)
files.append(file)
md5.update(file.md5)
missing_files = 1 + idx - len(files)
if missing_files:
dirpath = filesetinfo.indexed_files[0][1].dirpath
raise MissingFile(dirpath, filesetinfo.name, missing_files)
now = datetime.utcnow()
return Fileset(name=filesetinfo.name, md5=md5.hexdigest(),
dirpath=filesetinfo.dirpath, type=filesetinfo.type,
files=files, time_added=now, time_updated=now)
def scan(basedir, scanner=SCANNER):
"""Scan basedir, return Fileset models, log warning for broken sets"""
logger = getLogger(__name__)
scanners = scanner.values()
for filesetinfo in detect_filesets(basedir, scanners):
try:
fileset = make_fileset(filesetinfo)
except BrokenFileset as e:
logger.warn(e)
continue
yield fileset
|
the-stack_0_6939 | from snovault import (
CONNECTION,
upgrade_step,
)
@upgrade_step('fastqc_quality_metric', '2', '3')
def fastqc_quality_metric_2_3(value, system):
# http://redmine.encodedcc.org/issues/3897
# get from the file the lab and award for the attribution!!!
conn = system['registry'][CONNECTION]
f = conn.get_by_uuid(value['quality_metric_of'][0])
award_uuid = str(f.properties['award'])
lab_uuid = str(f.properties['lab'])
award = conn.get_by_uuid(award_uuid)
lab = conn.get_by_uuid(lab_uuid)
value['award'] = '/awards/'+str(award.properties['name'])+'/'
value['lab'] = '/labs/'+str(lab.properties['name'])+'/'
@upgrade_step('fastqc_quality_metric', '3', '4')
def fastqc_quality_metric_3_4(value, system):
return
@upgrade_step('fastqc_quality_metric', '4', '5')
def fastqc_quality_metric_4_5(value, system):
# http://redmine.encodedcc.org/issues/2491
if 'assay_term_id' in value:
del value['assay_term_id']
if 'notes' in value:
if value['notes']:
value['notes'] = value['notes'].strip()
else:
del value['notes']
|
the-stack_0_6940 | # ===============================================================================
# Copyright 2012 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
# ============= standard library imports ========================
import os
from six.moves.configparser import ConfigParser
from traits.api import Str, Property, cached_property, Int, \
Any, String, Event, Bool, Dict, List, Button, CInt
# ============= local library imports ==========================
from pychron.core.helpers.filetools import glob_list_directory
from pychron.dvc.dvc_irradiationable import DVCAble
from pychron.entry.entry_views.user_entry import UserEntry
from pychron.globals import globalv
from pychron.paths import paths
from pychron.persistence_loggable import PersistenceLoggable
from pychron.pychron_constants import NULL_STR, LINE_STR
class ExperimentQueueFactory(DVCAble, PersistenceLoggable):
application = Any
username = String
email = Property(depends_on='username, use_email, _email')
_email = Str
_emails = Dict
use_group_email = Bool
use_email = Bool
edit_emails = Button
usernames = Property(depends_on='users_dirty, db_refresh_needed')
edit_user = Event
add_user = Event
users_dirty = Event
db_refresh_needed = Event
mass_spectrometer = String('Spectrometer')
mass_spectrometers = Property(depends_on='db_refresh_needed')
extract_device = String('Extract Device')
extract_devices = Property(depends_on='db_refresh_needed')
queue_conditionals_name = Str
available_conditionals = List
delay_between_analyses = Int(30)
delay_before_analyses = Int(5)
delay_after_blank = Int(15)
delay_after_air = Int(15)
tray = Str
trays = Property
note = Str
default_lighting = CInt(0)
load_name = Str
select_existing_load_name_button = Button
ok_make = Property(depends_on='mass_spectrometer, username')
persistence_name = 'queue_factory'
pattributes = ('mass_spectrometer',
'extract_device',
'use_group_email',
'delay_between_analyses',
'delay_before_analyses',
'delay_after_blank',
'delay_after_air',
'default_lighting',
'queue_conditionals_name')
def activate(self, load_persistence):
"""
called by ExperimentFactory
"""
self._load_queue_conditionals()
if load_persistence:
self.load()
self.username = globalv.username
def deactivate(self):
"""
called by ExperimentFactory.destroy
"""
self.dump()
# persistence
def _load_queue_conditionals(self):
root = paths.queue_conditionals_dir
cs = glob_list_directory(root, remove_extension=True)
self.available_conditionals = [NULL_STR] + cs
def _select_existing_load_name_button_fired(self):
db = self.get_database()
if db is None or not db.connect():
self.warning_dialog('Not connected to a database')
else:
with db.session_ctx(use_parent_session=False):
loads = db.get_loads()
from pychron.database.views.load_view import LoadView
lv = LoadView(records = loads)
info = lv.edit_traits()
if info.result:
self.load_name = lv.selected.name
self.tray = lv.selected.holderName
# ===============================================================================
# property get/set
# ===============================================================================
def _get_email(self):
email = ''
if self.use_email:
if self._email:
email = self._email
else:
if self.username in self._emails:
email = self._emails[self.username]
return email
def _set_email(self, v):
self._email = v
@cached_property
def _get_ok_make(self):
ms = self.mass_spectrometer.strip()
un = self.username.strip()
return bool(ms and ms not in ('Spectrometer', LINE_STR) and un)
@cached_property
def _get_trays(self):
db = self.get_database()
if db is None or not db.connect():
return []
trays = [NULL_STR]
dbtrays = db.get_load_holders()
if dbtrays:
trays.extend(dbtrays)
return trays
@cached_property
def _get_usernames(self):
db = self.get_database()
if db is None or not db.connect():
return []
us = []
with db.session_ctx(use_parent_session=False):
dbus = db.get_users()
if dbus:
us = [ui.name for ui in dbus]
self._emails = {ui.name: ui.email or '' for ui in dbus}
return [''] + us
@cached_property
def _get_extract_devices(self):
"""
look in db first
then look for a config file
then use hardcorded defaults
"""
db = self.get_database()
cp = os.path.join(paths.setup_dir, 'names')
if db:
if not db.connect():
return []
with db.session_ctx(use_parent_session=False):
names = db.get_extraction_device_names()
elif os.path.isfile(cp):
names = self._get_names_from_config(cp, 'Extraction Devices')
else:
names = ['Fusions Diode', 'Fusions UV', 'Fusions CO2']
return ['Extract Device', LINE_STR] + names
@cached_property
def _get_mass_spectrometers(self):
"""
look in db first
then look for a config file
then use hardcorded defaults
"""
db = self.get_database()
cp = os.path.join(paths.setup_dir, 'names')
if db:
if not db.connect():
self.warning('not connected to database')
return []
with db.session_ctx(use_parent_session=False):
ms = db.get_mass_spectrometer_names()
names = [mi.capitalize() for mi in ms]
elif os.path.isfile(cp):
names = self._get_names_from_config(cp, 'Mass Spectrometers')
else:
names = ['Jan', 'Obama']
return ['Spectrometer', LINE_STR] + names
def _get_names_from_config(self, cp, section):
config = ConfigParser()
config.read(cp)
if config.has_section(section):
return [config.get(section, option) for option in config.options(section)]
# handlers
def _edit_user_fired(self):
a = UserEntry(dvc=self.dvc)
nuser = a.edit(self.username)
if nuser:
self.users_dirty = True
self.username = nuser
def _mass_spectrometer_changed(self, new):
self.debug('mass spectrometer ="{}"'.format(new))
def _edit_emails_fired(self):
task = self.application.open_task('pychron.users')
task.auto_save = True
if __name__ == '__main__':
g = ExperimentQueueFactory()
g.configure_traits()
# ============= EOF =============================================
|
the-stack_0_6941 | import sqlite3
def make_db():
con = sqlite3.connect("Paths.db")
c = con.cursor()
c.execute('''CREATE TABLE IF NOT EXISTS Paths
( Fromm text not null, Tooo text not null)''')
con.commit()
def insert(x,y):
con = sqlite3.connect("Paths.db")
SQLinsertfb = '''INSERT INTO Paths (Fromm,Tooo) VALUES(?,?)'''
c = con.cursor()
c.execute(SQLinsertfb, (x,y))
con.commit()
def get_last_element():
Sqlmaxid='''SELECT Fromm,Tooo FROM Paths WHERE rowid=(SELECT MAX(rowid) FROM Paths)'''
con = sqlite3.connect("Paths.db")
c=con.cursor()
returned=c.execute(Sqlmaxid)
returned=[list(elem) for elem in returned]
con.commit()
return returned
|
the-stack_0_6942 | from nominal_unification.Syntax import *
class Closure():
""" A closure represents an expression within a context with bindings.
Variables within said expression may or may not be captured by the
scope.
"""
def __init__(self, expr, scope):
self.expr = expr
self.scope = scope
def __str__(self):
return "〈" + str(self.expr) + "; " + str(self.scope) + "〉"
__repr__ = __str__
def alphaEq(clo1, clo2):
""" Test if two closures are equivalent. Determines the alpha-equivalence
of two expressions with respect to their scopes.
If both terms are free in their respective scopes and have the same
string, then the closures are equivalent. If both are bound by their
respective scopes at the same index then they are also the same
closure, even if the terms have different strings. They are not the
same closure, otherwise.
See [Same-Free] and [Same-Bound] in Figure 1.
"""
l1 = lookupName(clo1.expr, clo1.scope)
l2 = lookupName(clo2.expr, clo2.scope)
# [Same-Free] Figure 3
# a1 = a2
# Φ1 ⊦ Fr a1
# Φ2 ⊦ Fr a2
# -------------------
# 〈a1; Φ1〉 ≈ 〈a2; Φ2〉
if isinstance(l1, Free) and isinstance(l2, Free):
return clo1.expr == clo2.expr
# [Same-Bound] Figure 3
# i1 = i2
# Φ1 ⊦ Bd a1 i1
# Φ2 ⊦ Bd a2 i2
# -------------------
# 〈a1; Φ1〉 ≈ 〈a2; Φ2〉
elif isinstance(l1, Bound) and isinstance(l2, Bound):
return l1.index == l2.index
else:
return False
class NuEquation():
""" Represents constraint equations between expressions which are either
names or variables. The first term in the equation should always be a
name, while the second is either a name (an NN problem) or a variable
(an NV problem).
These equations are used by nu machines to derive maps from variables
to names.
A "nu problem" is a list of nu equations.
See Figure 4.
"""
def __init__(self, clo1, clo2):
if not isName(clo1.expr):
raise UnificationError(
"First argument, " +
str(clo1) +
", of Nu Equation must be a name.")
if not isName(clo2.expr) and not isinstance(clo2.expr, Var):
raise UnificationError(
"Second argument, " +
str(clo2) +
", of Nu Equation must be a name or a variable.")
self.clo1 = clo1 # Clo Name
self.clo2 = clo2
self.var = isinstance(clo2.expr, Var)
# If self.var is true, then self.clo2 will be a closure over
# a Var, otherwise it's a closure over an Name.
def __str__(self):
if self.var:
return "(" + str(self.clo1) + " ≈NV " + str(self.clo2) + ")"
else:
return "(" + str(self.clo1) + " ≈NN " + str(self.clo2) + ")"
__repr__ = __str__
class DeltaEquation():
""" Represents constraint equations between expressions which are variables
(a VV problem).
These equations are used by delta machines to derive unifiers between
sets of variables.
A "delta problem" is a list of delta equations.
See Figure 4.
"""
def __init__(self, clo1, clo2):
if not isinstance(clo1.expr, Var):
raise UnificationError(
"First argument, " +
str(clo1) +
", of Delta Equation must be a variable.")
if not isinstance(clo2.expr, Var):
raise UnificationError(
"Second argument, " +
str(clo2) +
", of Delta Equation must be a variable.")
self.clo1 = clo1 # Clo Var
self.clo2 = clo2 # Clo Var
def __str__(self):
return "(" + str(self.clo1) + " ≈VV " + str(self.clo2) + ")"
__repr__ = __str__
class MultiEquation():
""" Represents a constraint equation between two expressions.
Used by rho machines to compute the nu problems and delta problems to
be fed into the nu and delta machines.
A "rho problem" is a list of multiequations.
See Figure 7.
"""
def __init__(self, clo1, clo2):
self.clo1 = clo1 # Clo Expr
self.clo2 = clo2 # Clo Expr
def __str__(self):
return "(" + str(self.clo1) + " ≈ " + str(self.clo2) + ")"
__repr__ = __str__
def extendSubst(var, expr, sub):
""" Given a variable and an expression it should be substituted for, extend
the substitution with that mapping.
This exists as a non-stateful way to extend substitutions. That is,
this creates a new substitution, rather than modifying an existing one.
"""
subp = sub.copy()
subp[var.string] = expr
return subp
|
the-stack_0_6943 | """
Utility functions for cmiles generator
"""
import numpy as np
import copy
import collections
import warnings
try:
from rdkit import Chem
has_rdkit = True
except ImportError:
has_rdkit = False
try:
from openeye import oechem
if not oechem.OEChemIsLicensed():
has_openeye = False
has_openeye = True
except ImportError:
has_openeye = False
if not has_openeye and not has_rdkit:
raise ImportError("Must have openeye or rdkit installed")
_symbols = {'H': 1, 'He': 2,
'Li': 3, 'Be': 4, 'B': 5, 'C': 6, 'N': 7, 'O': 8, 'F': 9, 'Ne': 10,
'Na': 11, 'Mg': 12, 'Al': 13,' Si': 14, 'P': 15, 'S': 16, 'Cl': 17, 'Ar': 18,
'K': 19, 'Ca': 20, 'Sc': 21, 'Ti': 22, 'V': 23, 'Cr': 24, 'Mn': 25, 'Fe': 26, 'Co': 27, 'Ni': 28, 'Cu': 29,
'Zn': 30, 'Ga': 31, 'Ge': 32, 'As': 33, 'Se': 34, 'Br': 35, 'Kr': 36, 'Rb': 37, 'Sr': 38, 'Y': 39, 'Zr': 40,
'Nb': 41, 'Mo': 42, 'Tc': 43, 'Ru': 44, 'Rh': 45, 'Pd': 46, 'Ag': 47, 'Cd': 48, 'In': 49, 'Sn': 50, 'Sb': 51,
'Te': 52, 'I': 53, 'Xe': 54, 'Cs': 55, 'Ba': 56, 'La': 57, 'Ce': 58, 'Pr': 59, 'Nd': 60, 'Pm': 61, 'Sm': 62,
'Eu': 63,' Gd': 64, 'Tb': 65, 'Dy': 66, 'Ho': 67, 'Er': 68, 'Tm': 69, 'Yb': 70, 'Lu': 71, 'Hf': 72, 'Ta': 73,
'W': 74, 'Re': 75, 'Os': 76, 'Ir': 77, 'Pt': 78, 'Au': 79, 'Hg': 80, 'Tl': 81, 'Pb': 82, 'Bi': 83, 'Po':84,
'At': 85, 'Rn': 86, 'Fr': 87, 'Ra': 88, 'Ac': 89, 'Th': 90,' Pa': 91, 'U': 92, 'Np': 93, 'Pu': 94, 'Am': 95,
'Cm': 96, 'Bk': 97, 'Cf': 98, 'Es': 99, 'Fm': 100, 'Md': 101, 'No': 102, 'Lr': 103, 'Rf': 104, 'Db': 105,
'Sg': 106, 'Bh': 107, 'Hs': 108, 'Mt': 109}
BOHR_2_ANGSTROM = 0.529177210
ANGSROM_2_BOHR = 1. / BOHR_2_ANGSTROM
def load_molecule(inp_molecule, toolkit='openeye', **kwargs):
"""
Load molecule.
Input is restrictive. Allowed inputs are:
1. Isomeric SMILES
2. JSON serialized molecule
Parameters
----------
inp_molecule: str or dict
isomeric SMILES or QCSChema
toolkit: str, optional, default openeye.
cheminformatics toolkit to use
Returns
-------
molecule:
`oechem.OEMOl` or `rdkit.Chem.Mol`
"""
# Check input
if isinstance(inp_molecule, dict):
# This is a JSON molecule.
molecule = mol_from_json(inp_molecule, toolkit=toolkit, **kwargs)
elif isinstance(inp_molecule, str):
if toolkit == 'openeye' and has_openeye:
molecule = oechem.OEMol()
if not oechem.OESmilesToMol(molecule, inp_molecule):
raise ValueError("The supplied SMILES {} could not be parsed".format(inp_molecule))
elif toolkit == 'rdkit' and has_rdkit:
a = Chem.rdmolfiles.SmilesParserParams()
a.removeHs = False
molecule = Chem.MolFromSmiles(inp_molecule, a)
if not molecule:
raise ValueError("The supplied SMILES {} could not be parsed".format(inp_molecule))
else:
raise ValueError("Only openeye and rdkit toolkits are supported")
else:
raise ValueError("Only QCSchema serialized molecule or an isomric SMILES are valid inputs")
return molecule
def mol_from_json(inp_molecule, toolkit='openeye', **kwargs):
"""
Load a molecule from QCSchema
see `QCSchema <https://molssi-qc-schema.readthedocs.io/en/latest/index.html#>`_
Required fields for the QCSchema molecule:
1. symbols
2. geometry
3. connectivity
Parameters
----------
inp_molecule: dict
QCSchema molecule with `symbols`, `geometry` and `connectivity`
toolkit: str, optional. Default openeye
cheminformatics toolkit to use. Currently supports `openeye` and `rdkit`
**permute_xyz: bool, optional, default False
If False, will add flag to molecule such that the mapped SMILES retains the order of serialized geometry. If True,
mapped SMILES will be in canonical order and serialized geometry will have to be reordered.
Returns
-------
molecule
`oechem.OEMol` or `rdkit.Chem.Mol`
"""
# Check fields
required_fields = ['symbols', 'geometry', 'connectivity']
for key in required_fields:
if key not in inp_molecule:
raise KeyError("input molecule must have {}".format(key))
symbols = inp_molecule['symbols']
connectivity = inp_molecule['connectivity']
# convert to Angstrom.
geometry = np.asarray(inp_molecule['geometry'], dtype=float)*BOHR_2_ANGSTROM
if len(symbols) != geometry.shape[0]/3:
raise ValueError("Number of atoms in molecule does not match length of position array")
if toolkit == 'openeye' and has_openeye:
import cmiles._cmiles_oe as mol_toolkit
elif toolkit == 'rdkit' and has_rdkit:
import cmiles._cmiles_rd as mol_toolkit
else:
raise ValueError("Only openeye and rdkit backends are supported")
molecule = mol_toolkit.mol_from_json(symbols, connectivity, geometry, **kwargs)
return molecule
def mol_to_smiles(molecule, **kwargs):
"""
Generate canonical smiles from molecule
Parameters
----------
molecule:
`oechem.OEMol` or `rdkit.Chem.Mol`
**isomeric: bool, optional, default True
If False, SMILES will not include stereo information
**explicit_hydrogen: bool, optional, default True
If True, SMILES will have explicit hydrogen.
**mapped: bool, optional, default True
If True, SMILES will have map indices
Example: O=O will be ``[O:1]=[O:2]``
Returns
-------
str
SMILES
"""
molecule = copy.deepcopy(molecule)
toolkit = _set_toolkit(molecule)
if has_atom_map(molecule):
remove_atom_map(molecule)
return toolkit.mol_to_smiles(molecule, **kwargs)
def to_canonical_label(mapped_smiles, labeled_atoms, toolkit='openeye'):
"""
Generate human readable index with labeled torsions, angles, or bonds
Parameters
----------
mapped_smiles : str
SMILES with map indices
labeled_atoms : tuple of int
ints should correspond to map indices -1 in mapped SMILES
Returns
-------
labeled SMILES
"""
mol = load_molecule(mapped_smiles, toolkit=toolkit)
toolkit = _set_toolkit(mol)
if not has_atom_map(mol):
raise RuntimeError("SMILES must have map indices")
return toolkit.generate_index(mol, labeled_atoms)
def mol_to_hill_molecular_formula(molecule):
"""
Generate Hill sorted empirical formula.
Hill sorted first lists C and H and then all other symbols in alphabetical
order
Parameters
----------
molecule:
`oechem.OEMol` or `rdkit.Chem.Mol`
Returns
-------
str
hill sorted empirical formula
"""
# check molecule
toolkit = _set_toolkit(molecule)
if not has_explicit_hydrogen(molecule):
molecule = toolkit.add_explicit_hydrogen(molecule)
symbols = toolkit.get_symbols(molecule)
count = collections.Counter(x.title() for x in symbols)
hill_sorted = []
for k in ['C', 'H']:
# remove C and H from count
if k in count:
c = count.pop(k)
hill_sorted.append(k)
if c > 1:
hill_sorted.append(str(c))
for k in sorted(count.keys()):
c = count[k]
hill_sorted.append(k)
if c > 1:
hill_sorted.append(str(c))
return "".join(hill_sorted)
def mol_to_map_ordered_qcschema(molecule, mapped_smiles, multiplicity=1, **kwargs):
"""
Genereate JSON serialize following `QCSchema specs <https://molssi-qc-schema.readthedocs.io/en/latest/index.html#>`_
Geometry, symbols and connectivity table ordered according to map indices in mapped SMILES
Parameters
----------
molecule:
`oechem.OEMol` or `rdkit.Chem.Mol`
**molecuel must have a conformer**.
molecule_ids: dict
cmiles generated molecular ids.
multiplicity: int, optional, defualt 1
multiplicity of molecule
Returns
-------
dict
JSON serialized molecule following QCSchema specs
"""
toolkit = _set_toolkit(molecule)
atom_map = toolkit.get_atom_map(molecule, mapped_smiles, **kwargs)
connectivity = get_connectivity_table(molecule, atom_map)
symbols, geometry = toolkit.get_map_ordered_geometry(molecule, atom_map)
charge = get_charge(molecule)
qcschema_mol = {'symbols': symbols, 'geometry': geometry, 'connectivity': connectivity,
'molecular_charge': charge, 'molecular_multiplicity': multiplicity}
return qcschema_mol
def get_atom_map(molecule, mapped_smiles, **kwargs):
"""
Get mapping of map index -> atom index
Parameters
----------
molecule:
`oechem.OEMol` or `rdkit.Chem.Mol`
mapped_smiles: str
explicit hydrogen mapped SMILES
Returns
-------
atom_map: dict
dictionary mapping `{map_index: atom_index}`
"""
toolkit = _set_toolkit(molecule)
atom_map = toolkit.get_atom_map(molecule, mapped_smiles)
return atom_map
def get_connectivity_table(molecule, atom_map):
"""
Generate connectivity table
Parameters
----------
molecule:
oechem.Mol or rdkit.Chem.Mol
atom_map: dict
``{map_idx : atom_idx}``
Returns
-------
list: list of lists
lists of atoms bonded and the bond order
[[map_idx_1, map_idx_2, bond_order] ...]
"""
toolkit = _set_toolkit(molecule)
inverse_map = dict(zip(atom_map.values(), atom_map.keys()))
return toolkit.get_connectivity_table(molecule, inverse_map)
def permute_qcschema(json_mol, molecule_ids, **kwargs):
"""
permute geometry and symbols to correspond to map indices on mapped SMILES
Parameters
----------
json_mol: dict
JSON serialized molecule.
Required fields: `symbols`, `geometry`, `connectivity` and `multiplicity`
molecule_ids: dict
cmiles generated molecular ids
Returns
-------
dict
JSON serialized molecule. `symbols`, `geometry`, and `connectivity` ordered according to map indices on mapped
SMILES.
Also includes `identifiers` field with cmiles generated identifiers.
"""
molecule = mol_from_json(json_mol, **kwargs)
ordered_qcschema = mol_to_map_ordered_qcschema(molecule, molecule_ids, json_mol['molecular_multiplicity'])
return ordered_qcschema
def has_atom_map(molecule):
"""
Check if molecule has atom map indices. Will return True even if only one atom has map index
Parameters
----------
molecule:
`oechem.Mol` or `rdkit.Chem.Mol`
Returns
-------
bool
True if has one map index. False if molecule has no map indices
"""
toolkit = _set_toolkit(molecule)
return toolkit.has_atom_map(molecule)
def is_missing_atom_map(molecule):
"""
Check if any atom in molecule is missing atom map index
Parameters
----------
molecule:
oechem.Mol or rdkit.Chem.Mol
Returns
-------
bool
True if even if only one atom map is missing. False if all atoms have atom maps.
"""
toolkit = _set_toolkit(molecule)
return toolkit.is_missing_atom_map(molecule)
def is_map_canonical(molecule):
"""
Check if map indices on molecule is in caononical order
Parameters
----------
molecule:
`oechem.Mol` or `rdkit.Chem.Mol`
Returns
-------
bool
"""
toolkit = _set_toolkit(molecule)
return toolkit.is_map_canonical(molecule)
def remove_atom_map(molecule, **kwargs):
"""
Remove atom map from molecule
Parameters
----------
molecule
`oechem.OEMol` or `rdkit.Chem.Mol`
keep_map_data: bool, optional, default True
If True, will save map indices in atom data
"""
toolkit = _set_toolkit(molecule)
toolkit.remove_atom_map(molecule, **kwargs)
def restore_atom_map(molecule):
"""
Restore atom map from atom data in place
Parameters
----------
molecule
`oechem.OEMol` or `rdkit.Chem.Mol`
"""
toolkit = _set_toolkit(molecule)
toolkit.restore_atom_map(molecule)
if not has_atom_map(molecule):
warnings.warn("There were no atom maps in atom data to restore")
def add_atom_map(molecule, **kwargs):
"""
Add canonical ordered atom map to molecule
Parameters
----------
molecule :
`oechem.OEMOl` or `rdkit.Chem.Mol`
Returns
-------
molecule with map indices
"""
toolkit = _set_toolkit(molecule)
return toolkit.add_atom_map(molecule, **kwargs)
def has_stereo_defined(molecule):
"""
Checks if molecule has all stereo defined.
Parameters
----------
molecule:
`oechem.OEMol` or `rdkit.Chem.Mol`
Returns
-------
bool
True if all stereo defined, False otherwise
Notes
-----
This does not check if all chirality or bond stereo are consistent. The best way to check is to try to generate a
3D conformer. If stereo information is inconsistent, this will fail.
"""
toolkit = _set_toolkit(molecule)
return toolkit.has_stereo_defined(molecule)
def has_explicit_hydrogen(molecule):
#ToDo: Use option in RDKit to generate explicit hydrogen molecules from explicit hydrogen SMILES
"""
Check if molecule has explicit hydrogen.
Parameters
----------
molecule:
`oechem.OEMol` or `rdkit.Chem.Mol`
Returns
-------
bool
True if has all explicit H. False otherwise.
"""
toolkit = _set_toolkit(molecule)
return toolkit.has_explicit_hydrogen(molecule)
def add_explicit_hydrogen(molecule):
"""
Add explicit hydrogen to molecule
Parameters
----------
molecule:
`oechem.OEMol` or `rdkit.Chem.Mol`
Returns
-------
molecule
`oechem.OEMol` or `rdkit.Chem.Mol` with explict hydrogen
"""
toolkit = _set_toolkit(molecule)
return toolkit.add_explicit_hydrogen(molecule)
def get_charge(molecule):
"""
Get charge state of molecule
Parameters
----------
molecule:
`oechem.OEMol` or `rdkit.Chem.Mol`
Returns
-------
int
total charge of molecule
"""
charge = 0
for atom in molecule.GetAtoms():
charge += atom.GetFormalCharge()
return charge
def _set_toolkit(molecule):
"""
Set toolkit to use by checking molecule instance and if the toolkit is installed
Parameters
----------
molecule:
oechem.OEMol or rdkit.Chem.Mol
Returns
-------
toolkit: module
either cmiles._cmiles_oe or cmiles._cmiles_rd
"""
if has_openeye and isinstance(molecule, oechem.OEMolBase):
import cmiles._cmiles_oe as toolkit
elif has_rdkit and isinstance(molecule, Chem.rdchem.Mol):
import cmiles._cmiles_rd as toolkit
else:
raise RuntimeError("Must have openeye or rdkit installed")
return toolkit
def invert_atom_map(atom_map):
"""
Invert atom map `{map_idx:atom_idx} --> {atom_idx:map_idx}`
Parameters
----------
atom_map: dict
`{map_idx:atom_idx}`
Returns
-------
dict
`{atom_idx:map_idx}`
"""
return dict(zip(atom_map.values(), atom_map.keys()))
|
the-stack_0_6945 | from cx_Freeze import setup, Executable
includefiles = []
includes = []
excludes = []
packages = ["PIL.Image", "PIL.WebPImagePlugin"]
setup(
name = "WEBP Converter",
version = "0.1.0",
description = "This is my program",
options = {'build_exe': {'includes': includes, 'excludes': excludes, 'packages': packages, 'include_files': includefiles}},
executables = [Executable("converter.py")]
)
|
the-stack_0_6946 | # -*- coding: utf-8 -*-
from setuptools import setup, find_packages
import os
_here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(_here, 'README.rst'), encoding='utf-8') as f:
README = f.read()
with open(os.path.join(_here, 'LICENSE'), encoding='utf-8') as f:
LICENSE = f.read()
version = {}
with open(os.path.join(_here, 'codeforces', 'version.py')) as f:
exec(f.read(), version)
setup(
name='codeforces',
version=version['__version__'],
description='Simple wrapper for the codeforces API',
long_description=README,
author='Vicfred',
author_email='[email protected]',
url='vicfred.dev',
license=LICENSE,
packages=find_packages(exclude=('tests', 'docs')),
install_requires=[
'requests>=2.20.0'
]
)
|
the-stack_0_6947 |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
import random
from frappe.utils import random_string
from frappe.desk import query_report
from erpnext.accounts.doctype.journal_entry.journal_entry import get_payment_entry_against_invoice
from erpnext.accounts.doctype.payment_entry.payment_entry import get_payment_entry
from frappe.utils.make_random import get_random
from erpnext.accounts.doctype.payment_request.payment_request import make_payment_request, make_payment_entry
from erpnext.demo.user.sales import make_sales_order
from erpnext.selling.doctype.sales_order.sales_order import make_sales_invoice
from erpnext.stock.doctype.purchase_receipt.purchase_receipt import make_purchase_invoice
def work():
frappe.set_user(frappe.db.get_global('demo_accounts_user'))
if random.random() <= 0.6:
report = "Ordered Items to be Billed"
for so in list(set([r[0] for r in query_report.run(report)["result"]
if r[0]!="Total"]))[:random.randint(1, 5)]:
try:
si = frappe.get_doc(make_sales_invoice(so))
si.posting_date = frappe.flags.current_date
for d in si.get("items"):
if not d.income_account:
d.income_account = "Sales - {}".format(frappe.db.get_value('Company', si.company, 'abbr'))
si.insert()
si.submit()
frappe.db.commit()
except frappe.ValidationError:
pass
if random.random() <= 0.6:
report = "Received Items to be Billed"
for pr in list(set([r[0] for r in query_report.run(report)["result"]
if r[0]!="Total"]))[:random.randint(1, 5)]:
try:
pi = frappe.get_doc(make_purchase_invoice(pr))
pi.posting_date = frappe.flags.current_date
pi.bill_no = random_string(6)
pi.insert()
pi.submit()
frappe.db.commit()
except frappe.ValidationError:
pass
if random.random() < 0.5:
make_payment_entries("Sales Invoice", "Accounts Receivable")
if random.random() < 0.5:
make_payment_entries("Purchase Invoice", "Accounts Payable")
if random.random() < 0.1:
#make payment request against sales invoice
sales_invoice_name = get_random("Sales Invoice", filters={"docstatus": 1})
if sales_invoice_name:
si = frappe.get_doc("Sales Invoice", sales_invoice_name)
if si.outstanding_amount > 0:
payment_request = make_payment_request(dt="Sales Invoice", dn=si.name, recipient_id=si.contact_email,
submit_doc=True, mute_email=True, use_dummy_message=True)
payment_entry = frappe.get_doc(make_payment_entry(payment_request.name))
payment_entry.posting_date = frappe.flags.current_date
payment_entry.submit()
make_pos_invoice()
def make_payment_entries(ref_doctype, report):
outstanding_invoices = list(set([r[3] for r in query_report.run(report,
{"report_date": frappe.flags.current_date })["result"] if r[2]==ref_doctype]))
# make Payment Entry
for inv in outstanding_invoices[:random.randint(1, 2)]:
pe = get_payment_entry(ref_doctype, inv)
pe.posting_date = frappe.flags.current_date
pe.reference_no = random_string(6)
pe.reference_date = frappe.flags.current_date
pe.insert()
pe.submit()
frappe.db.commit()
outstanding_invoices.remove(inv)
# make payment via JV
for inv in outstanding_invoices[:1]:
jv = frappe.get_doc(get_payment_entry_against_invoice(ref_doctype, inv))
jv.posting_date = frappe.flags.current_date
jv.cheque_no = random_string(6)
jv.cheque_date = frappe.flags.current_date
jv.insert()
jv.submit()
frappe.db.commit()
def make_pos_invoice():
make_sales_order()
for data in frappe.get_all('Sales Order', fields=["name"],
filters = [["per_billed", "<", "100"]]):
si = frappe.get_doc(make_sales_invoice(data.name))
si.is_pos =1
si.posting_date = frappe.flags.current_date
for d in si.get("items"):
if not d.income_account:
d.income_account = "Sales - {}".format(frappe.db.get_value('Company', si.company, 'abbr'))
si.set_missing_values()
make_payment_entries_for_pos_invoice(si)
si.insert()
si.submit()
def make_payment_entries_for_pos_invoice(si):
for data in si.payments:
data.amount = si.outstanding_amount
return
|
the-stack_0_6948 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Oct 16 19:59:07 2021
@author: Alexander Southan
"""
import numpy as np
import unittest
from src.pyPreprocessing import transform
class TestTransform(unittest.TestCase):
def test_transform(self):
x = np.linspace(0, 10, 1100)
y = x**2 -30
# test lls transformation
y_lls = transform.transform([y], 'log_log_sqrt', direction='direct')
y_lls_inv = transform.transform(
y_lls, 'log_log_sqrt', direction='inverse', min_value=y.min())
self.assertTrue(np.allclose(y, y_lls_inv[0]))
# test errors
self.assertRaises(
ValueError, transform.transform, [y], 'log_log_sq',
direction='direct')
self.assertRaises(
ValueError, transform.transform, [y], 'log_log_sq',
direction='inverse')
self.assertRaises(
ValueError, transform.transform, [y], 'log_log_sqrt',
direction='dir')
def test_normalize(self):
x = np.linspace(0, 10, 1100)
y = x**2 -30
y_norm = transform.normalize([y], 'total_intensity', x_data=x)
self.assertAlmostEqual(np.trapz(y_norm, x=x, axis=1)[0], 1)
y_norm_2 = transform.normalize([y], 'total_intensity', x_data=x,
factor=3.25)
self.assertAlmostEqual(np.trapz(y_norm_2, x=x, axis=1)[0], 3.25)
# test errors
self.assertRaises(ValueError, transform.normalize, [y], 'tot_int')
|
the-stack_0_6950 | #!/usr/bin/env python3
# Copyright (c) 2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""
Test addr relay
"""
from test_framework.messages import (
CAddress,
NODE_NETWORK,
NODE_WITNESS,
msg_addr,
msg_getaddr
)
from test_framework.p2p import P2PInterface
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
)
import time
class AddrReceiver(P2PInterface):
num_ipv4_received = 0
test_addr_contents = False
def __init__(self, test_addr_contents=False):
super().__init__()
self.test_addr_contents = test_addr_contents
def on_addr(self, message):
for addr in message.addrs:
self.num_ipv4_received += 1
if(self.test_addr_contents):
# relay_tests checks the content of the addr messages match
# expectations based on the message creation in setup_addr_msg
assert_equal(addr.nServices, 9)
if not 8333 <= addr.port < 8343:
raise AssertionError("Invalid addr.port of {} (8333-8342 expected)".format(addr.port))
assert addr.ip.startswith('123.123.123.')
def addr_received(self):
return self.num_ipv4_received != 0
def getaddr_received(self):
return self.message_count['getaddr'] > 0
class AddrTest(BitcoinTestFramework):
counter = 0
mocktime = int(time.time())
def set_test_params(self):
self.num_nodes = 1
def run_test(self):
self.oversized_addr_test()
self.relay_tests()
self.getaddr_tests()
self.blocksonly_mode_tests()
def setup_addr_msg(self, num):
addrs = []
for i in range(num):
addr = CAddress()
addr.time = self.mocktime + i
addr.nServices = NODE_NETWORK | NODE_WITNESS
addr.ip = f"123.123.123.{self.counter % 256}"
addr.port = 8333 + i
addrs.append(addr)
self.counter += 1
msg = msg_addr()
msg.addrs = addrs
return msg
def send_addr_msg(self, source, msg, receivers):
source.send_and_ping(msg)
# pop m_next_addr_send timer
self.mocktime += 10 * 60
self.nodes[0].setmocktime(self.mocktime)
for peer in receivers:
peer.sync_send_with_ping()
def oversized_addr_test(self):
self.log.info('Send an addr message that is too large')
addr_source = self.nodes[0].add_p2p_connection(P2PInterface())
msg = self.setup_addr_msg(1010)
with self.nodes[0].assert_debug_log(['addr message size = 1010']):
addr_source.send_and_ping(msg)
self.nodes[0].disconnect_p2ps()
def relay_tests(self):
self.log.info('Test address relay')
self.log.info('Check that addr message content is relayed and added to addrman')
addr_source = self.nodes[0].add_p2p_connection(P2PInterface())
num_receivers = 7
receivers = []
for _ in range(num_receivers):
receivers.append(self.nodes[0].add_p2p_connection(AddrReceiver(test_addr_contents=True)))
# Keep this with length <= 10. Addresses from larger messages are not
# relayed.
num_ipv4_addrs = 10
msg = self.setup_addr_msg(num_ipv4_addrs)
with self.nodes[0].assert_debug_log(
[
'Added {} addresses from 127.0.0.1: 0 tried'.format(num_ipv4_addrs),
'received: addr (301 bytes) peer=1',
]
):
self.send_addr_msg(addr_source, msg, receivers)
total_ipv4_received = sum(r.num_ipv4_received for r in receivers)
# Every IPv4 address must be relayed to two peers, other than the
# originating node (addr_source).
ipv4_branching_factor = 2
assert_equal(total_ipv4_received, num_ipv4_addrs * ipv4_branching_factor)
self.nodes[0].disconnect_p2ps()
self.log.info('Check relay of addresses received from outbound peers')
inbound_peer = self.nodes[0].add_p2p_connection(AddrReceiver(test_addr_contents=True))
full_outbound_peer = self.nodes[0].add_outbound_p2p_connection(AddrReceiver(), p2p_idx=0, connection_type="outbound-full-relay")
msg = self.setup_addr_msg(2)
self.send_addr_msg(full_outbound_peer, msg, [inbound_peer])
self.log.info('Check that the first addr message received from an outbound peer is not relayed')
# Currently, there is a flag that prevents the first addr message received
# from a new outbound peer to be relayed to others. Originally meant to prevent
# large GETADDR responses from being relayed, it now typically affects the self-announcement
# of the outbound peer which is often sent before the GETADDR response.
assert_equal(inbound_peer.num_ipv4_received, 0)
self.log.info('Check that subsequent addr messages sent from an outbound peer are relayed')
msg2 = self.setup_addr_msg(2)
self.send_addr_msg(full_outbound_peer, msg2, [inbound_peer])
assert_equal(inbound_peer.num_ipv4_received, 2)
self.log.info('Check address relay to outbound peers')
block_relay_peer = self.nodes[0].add_outbound_p2p_connection(AddrReceiver(), p2p_idx=1, connection_type="block-relay-only")
msg3 = self.setup_addr_msg(2)
self.send_addr_msg(inbound_peer, msg3, [full_outbound_peer, block_relay_peer])
self.log.info('Check that addresses are relayed to full outbound peers')
assert_equal(full_outbound_peer.num_ipv4_received, 2)
self.log.info('Check that addresses are not relayed to block-relay-only outbound peers')
assert_equal(block_relay_peer.num_ipv4_received, 0)
self.nodes[0].disconnect_p2ps()
def getaddr_tests(self):
self.log.info('Test getaddr behavior')
self.log.info('Check that we send a getaddr message upon connecting to an outbound-full-relay peer')
full_outbound_peer = self.nodes[0].add_outbound_p2p_connection(AddrReceiver(), p2p_idx=0, connection_type="outbound-full-relay")
full_outbound_peer.sync_with_ping()
assert full_outbound_peer.getaddr_received()
self.log.info('Check that we do not send a getaddr message upon connecting to a block-relay-only peer')
block_relay_peer = self.nodes[0].add_outbound_p2p_connection(AddrReceiver(), p2p_idx=1, connection_type="block-relay-only")
block_relay_peer.sync_with_ping()
assert_equal(block_relay_peer.getaddr_received(), False)
self.log.info('Check that we answer getaddr messages only from inbound peers')
inbound_peer = self.nodes[0].add_p2p_connection(AddrReceiver())
inbound_peer.sync_with_ping()
# Add some addresses to addrman
for i in range(1000):
first_octet = i >> 8
second_octet = i % 256
a = f"{first_octet}.{second_octet}.1.1"
self.nodes[0].addpeeraddress(a, 8333)
full_outbound_peer.send_and_ping(msg_getaddr())
block_relay_peer.send_and_ping(msg_getaddr())
inbound_peer.send_and_ping(msg_getaddr())
self.mocktime += 5 * 60
self.nodes[0].setmocktime(self.mocktime)
inbound_peer.wait_until(lambda: inbound_peer.addr_received() is True)
assert_equal(full_outbound_peer.num_ipv4_received, 0)
assert_equal(block_relay_peer.num_ipv4_received, 0)
assert inbound_peer.num_ipv4_received > 100
self.nodes[0].disconnect_p2ps()
def blocksonly_mode_tests(self):
self.log.info('Test addr relay in -blocksonly mode')
self.restart_node(0, ["-blocksonly"])
self.mocktime = int(time.time())
self.log.info('Check that we send getaddr messages')
full_outbound_peer = self.nodes[0].add_outbound_p2p_connection(AddrReceiver(), p2p_idx=0, connection_type="outbound-full-relay")
full_outbound_peer.sync_with_ping()
assert full_outbound_peer.getaddr_received()
self.log.info('Check that we relay address messages')
addr_source = self.nodes[0].add_p2p_connection(P2PInterface())
msg = self.setup_addr_msg(2)
self.send_addr_msg(addr_source, msg, [full_outbound_peer])
assert_equal(full_outbound_peer.num_ipv4_received, 2)
self.nodes[0].disconnect_p2ps()
if __name__ == '__main__':
AddrTest().main()
|
the-stack_0_6954 | """
Fourier Transforms
The frequency components of an image can be displayed after doing a Fourier Transform (FT).
An FT looks at the components of an image (edges that are high-frequency, and areas of smooth
color as low-frequency), and plots the frequencies that occur as points in spectrum.
In fact, an FT treats patterns of intensity in an image as sine waves with a particular frequency,
and you can look at an interesting visualization of these sine wave components on this page.
We'll first look at a few simple image patterns to build up an idea of what image frequency
components look like, and then transform a more complex image to see what it looks like in the frequency domain.
"""
import numpy as np
import matplotlib.pyplot as plt
import cv2
# Read in the images
image_stripes = cv2.imread('images/stripes.jpg')
image_solid = cv2.imread('images/pink_solid.jpg')
# Change color to RGB (from BGR)
image_stripes = cv2.cvtColor(image_stripes, cv2.COLOR_BGR2RGB)
image_solid = cv2.cvtColor(image_solid, cv2.COLOR_BGR2RGB)
# Display the images
f, (ax1,ax2) = plt.subplots(1, 2, figsize=(10,5))
ax1.imshow(image_stripes)
ax2.imshow(image_solid)
# convert to grayscale to focus on the intensity patterns in the image
gray_stripes = cv2.cvtColor(image_stripes, cv2.COLOR_RGB2GRAY)
gray_solid = cv2.cvtColor(image_solid, cv2.COLOR_RGB2GRAY)
# normalize the image color values from a range of [0,255] to [0,1] for further processing
norm_stripes = gray_stripes / 255.0
norm_solid = gray_solid / 255.0
# perform a fast fourier transform and create a scaled, frequency transform image
def ft_image(norm_image):
'''
This function takes in a normalized, grayscale image
and returns a frequency spectrum transform of that image.
'''
f = np.fft.fft2(norm_image)
fshift = np.fft.fftshift(f)
frequency_tx = 20 * np.log(np.abs(fshift))
return frequency_tx
f_stripes = ft_image(norm_stripes)
f_solid = ft_image(norm_solid)
# display the images
# original images to the left of their frequency transform
f, (ax1,ax2,ax3,ax4) = plt.subplots(1, 4, figsize=(20,10))
ax1.set_title('original image')
ax1.imshow(image_stripes)
ax2.set_title('frequency transform image')
ax2.imshow(f_stripes, cmap='gray')
ax3.set_title('original image')
ax3.imshow(image_solid)
ax4.set_title('frequency transform image')
ax4.imshow(f_solid, cmap='gray')
"""
Low frequencies are at the center of the frequency transform image.
The transform images for these example show that the solid image has most
low-frequency components (as seen by the center bright spot).
The stripes transform image contains low-frequencies for the areas of white
and black color and high frequencies for the edges in between those colors.
The stripes transform image also tells us that there is one dominating direction
for these frequencies; vertical stripes are represented by a horizontal line passing
through the center of the frequency transform image.
"""
# Read in an image
image = cv2.imread('images/birds.jpg')
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
norm_image = gray/255.0
f_image = ft_image(norm_image)
f, (ax1,ax2) = plt.subplots(1, 2, figsize=(20,10))
ax1.imshow(image)
ax2.imshow(f_image, cmap='gray')
"""
This image has components of all frequencies. You can see a bright spot in the center
of the transform image, which tells us that a large portion of the image is low-frequency;
this makes sense since the body of the birds and background are solid colors. The transform
image also tells us that there are two dominating directions for these frequencies;
vertical edges (from the edges of birds) are represented by a horizontal line passing through
the center of the frequency transform image, and horizontal edges (from the branch and tops
of the birds' heads) are represented by a vertical line passing through the center.
""" |
the-stack_0_6955 | # -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# (C) British Crown Copyright 2017-2021 Met Office.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Setup and checking of known good output for CLI tests"""
import functools
import hashlib
import importlib
import os
import pathlib
import shlex
import shutil
import pytest
from improver import cli
from improver.constants import DEFAULT_TOLERANCE
from improver.utilities.compare import compare_netcdfs
RECREATE_DIR_ENVVAR = "RECREATE_KGO"
ACC_TEST_DIR_ENVVAR = "IMPROVER_ACC_TEST_DIR"
IGNORE_CHECKSUMS = "IMPROVER_IGNORE_CHECKSUMS"
ACC_TEST_DIR_MISSING = pathlib.Path("/dev/null")
DEFAULT_CHECKSUM_FILE = pathlib.Path(__file__).parent / "SHA256SUMS"
IGNORED_ATTRIBUTES = ["history", "Conventions"]
def run_cli(cli_name, verbose=True):
"""
Prepare a function for running clize CLIs.
Use of the returned function avoids writing "improver" and the CLI name in
each test function.
Checksums of input files are verified before the clize CLI is run.
Args:
cli_name (str): name of the CLI
verbose (bool): pass verbose option to CLI
Returns:
Callable([Iterable[str], None]): function to run the specified CLI
"""
def run_function(args):
if not checksum_ignore():
verify_checksums(args)
cli.main("improver", cli_name, *args, verbose=verbose)
return run_function
def cli_name_with_dashes(dunder_file):
"""
Convert an acceptance test module name to the corresponding CLI
Args:
dunder_file (str): test module name retrieved from __file__
Returns:
str: CLI name
"""
module_name = str(pathlib.Path(dunder_file).stem)
if module_name.startswith("test_"):
module_name = module_name[5:]
module_dashes = module_name.replace("_", "-")
return module_dashes
@functools.lru_cache()
def acceptance_checksums(checksum_path=DEFAULT_CHECKSUM_FILE):
"""
Retrieve a list of checksums from file in text list format, as produced by
the sha256sum command line tool.
Args:
checksum_path (pathlib.Path): Path to checksum file. File
should be plain text in the format produced by the sha256sum
command line tool. Paths listed in the file should be relative to
the KGO root directory found by kgo_root().
Returns:
Dict[pathlib.Path, str]: Dict with keys being relative paths and
values being hexadecimal checksums
"""
if checksum_path is None:
checksum_path = DEFAULT_CHECKSUM_FILE
with open(checksum_path, mode="r") as checksum_file:
checksum_lines = checksum_file.readlines()
checksums = {}
for line in checksum_lines:
parts = line.strip().split(" ", maxsplit=1)
csum = parts[0]
path = pathlib.Path(parts[1])
checksums[path] = csum
return checksums
def verify_checksum(kgo_path, checksums=None, checksum_path=DEFAULT_CHECKSUM_FILE):
"""
Verify an individual KGO file's checksum.
Args:
kgo_path (pathlib.Path): Path to file in KGO directory
checksums (Optional[Dict[pathlib.Path, str]]): Lookup dictionary
mapping from paths to hexadecimal checksums. If provided, used in
preference to checksum_path.
checksum_path (pathlib.Path): Path to checksum file, used if checksums is
None. File should be plain text in the format produced by the
sha256sum command line tool.
Raises:
KeyError: File being verified is not found in checksum dict/file
ValueError: Checksum does not match value in checksum dict/file
"""
if checksums is None:
checksums_dict = acceptance_checksums(checksum_path=checksum_path)
checksums_source = checksum_path
else:
checksums_dict = checksums
checksums_source = "lookup dict"
kgo_csum = calculate_checksum(kgo_path)
kgo_norm_path = pathlib.Path(os.path.normpath(kgo_path))
kgo_rel_path = kgo_norm_path.relative_to(kgo_root())
try:
expected_csum = checksums_dict[kgo_rel_path]
except KeyError:
msg = f"Checksum for {kgo_rel_path} missing from {checksums_source}"
raise KeyError(msg)
if kgo_csum != expected_csum:
msg = (
f"Checksum for {kgo_rel_path} is {kgo_csum}, "
f"expected {expected_csum} in {checksums_source}"
)
raise ValueError(msg)
def calculate_checksum(path):
"""
Calculate SHA256 hash/checksum of a file
Args:
path (pathlib.Path): Path to file
Returns:
str: checksum as hexadecimal string
"""
hasher = hashlib.sha256()
with open(path, mode="rb") as kgo_file:
while True:
# read 1 megabyte binary chunks from file and feed them to hasher
kgo_chunk = kgo_file.read(2 ** 20)
if not kgo_chunk:
break
hasher.update(kgo_chunk)
checksum = hasher.hexdigest()
return checksum
def verify_checksums(cli_arglist):
"""
Verify input file checksums based on input arguments to a CLI.
Intended for use inside acceptance tests, so raises exceptions to report
various issues that should result in a test failure.
Args:
cli_arglist (List[Union[str,pathlib.Path]]): list of arguments being
passed to a CLI such as via improver.cli.main function.
"""
# copy the arglist as it will be edited to remove output args
arglist = cli_arglist.copy()
# if there is an --output argument, remove the path in the following argument
try:
output_idx = cli_arglist.index("--output")
arglist.pop(output_idx + 1)
except ValueError:
pass
# drop arguments of the form --output=file
arglist = [
arg
for arg in arglist
if not isinstance(arg, str) or not arg.startswith("--output=")
]
# check for non-path-type arguments that refer to KGOs
kgo_dir = str(kgo_root())
path_strs = [arg for arg in arglist if isinstance(arg, str) and kgo_dir in arg]
if path_strs:
msg = (
f"arg list contains KGO paths as strings {path_strs}, "
"expected paths to be pathlib.Path objects"
)
raise ValueError(msg)
# verify checksums of remaining path-type arguments
path_args = [arg for arg in arglist if isinstance(arg, pathlib.Path)]
for arg in path_args:
# expand any globs in the argument and verify each of them
arg_globs = list(arg.parent.glob(arg.name))
for arg_glob in arg_globs:
verify_checksum(arg_glob)
def checksum_ignore():
"""True if CHECKSUMs should be checked"""
return os.getenv(IGNORE_CHECKSUMS, "false").lower() == "true"
def kgo_recreate():
"""True if KGO should be re-created"""
return RECREATE_DIR_ENVVAR in os.environ
def kgo_root():
"""Path to the root of the KGO directories"""
try:
test_dir = os.environ[ACC_TEST_DIR_ENVVAR]
except KeyError:
return ACC_TEST_DIR_MISSING
return pathlib.Path(test_dir)
def kgo_exists():
"""True if KGO files exist"""
return not kgo_root().samefile(ACC_TEST_DIR_MISSING)
def recreate_if_needed(output_path, kgo_path, recreate_dir_path=None):
"""
Re-create a file in the KGO, depending on configuration.
Args:
output_path (pathlib.Path): Path to output produced by test
kgo_path (pathlib.Path): Path to expected/original KGO file
recreate_dir_path (Optional[pathlib.Path]): Path to directory where
recreated KGOs will be placed. Default is environment variable
specified in RECREATE_DIR_ENVVAR constant.
Returns:
bool: True if KGO was recreated
"""
if not kgo_recreate():
return False
if not kgo_path.is_absolute():
raise IOError("KGO path is not absolute")
if not output_path.is_file():
raise IOError("Expected output file not created by running test")
if recreate_dir_path is None:
recreate_dir_path = pathlib.Path(os.environ[RECREATE_DIR_ENVVAR])
kgo_root_dir = kgo_root()
if kgo_root_dir not in kgo_path.parents:
raise IOError("KGO path for test is not within KGO root directory")
if not recreate_dir_path.is_absolute():
raise IOError("Recreate KGO path is not absolute")
print("Comparison found differences - recreating KGO for this test")
if kgo_path.exists():
print(f"Original KGO file is at {kgo_path}")
else:
print("Original KGO file does not exist")
kgo_relative = kgo_path.relative_to(kgo_root_dir)
recreate_file_path = recreate_dir_path / kgo_relative
if recreate_file_path == kgo_path:
err = (
f"Recreate KGO path {recreate_file_path} must be different from"
f" original KGO path {kgo_path} to avoid overwriting"
)
raise IOError(err)
recreate_file_path.parent.mkdir(exist_ok=True, parents=True)
if recreate_file_path.exists():
recreate_file_path.unlink()
shutil.copyfile(str(output_path), str(recreate_file_path))
print(f"Updated KGO file is at {recreate_file_path}")
print(
f"Put the updated KGO file in {ACC_TEST_DIR_ENVVAR} to make this"
f" test pass. For example:"
)
quoted_kgo = shlex.quote(str(kgo_path))
quoted_recreate = shlex.quote(str(recreate_file_path))
print(f"cp {quoted_recreate} {quoted_kgo}")
return True
def statsmodels_available():
"""True if statsmodels library is importable"""
if importlib.util.find_spec("statsmodels"):
return True
return False
def iris_nimrod_patch_available():
"""True if iris_nimrod_patch library is importable"""
if importlib.util.find_spec("iris_nimrod_patch"):
return True
return False
def compare(
output_path,
kgo_path,
recreate=True,
atol=DEFAULT_TOLERANCE,
rtol=DEFAULT_TOLERANCE,
exclude_vars=None,
):
"""
Compare output against expected using KGO file with absolute and
relative tolerances. Also recreates KGO if that setting is enabled.
Args:
output_path (pathlib.Path): Path to output produced by test
kgo_path (pathlib.Path): Path to KGO file
recreate (bool): False to disable KGO recreation, compare only
atol (float): Absolute tolerance
rtol (float): Relative tolerance
exclude_vars (Iterable[str]): Variables to exclude from comparison
Returns:
None
"""
# don't show this function in pytest tracebacks
# pylint: disable=unused-variable
__tracebackhide__ = True
assert output_path.is_absolute()
assert kgo_path.is_absolute()
if not isinstance(atol, (int, float)):
raise ValueError("atol")
if not isinstance(rtol, (int, float)):
raise ValueError("rtol")
difference_found = False
message = ""
def message_recorder(exception_message):
nonlocal difference_found
nonlocal message
difference_found = True
message = exception_message
compare_netcdfs(
output_path,
kgo_path,
atol=atol,
rtol=rtol,
exclude_vars=exclude_vars,
reporter=message_recorder,
ignored_attributes=IGNORED_ATTRIBUTES,
)
if difference_found:
if recreate:
recreate_if_needed(output_path, kgo_path)
raise AssertionError(message)
if not checksum_ignore():
verify_checksum(kgo_path)
# Pytest decorator to skip tests if KGO is not available for use
# pylint: disable=invalid-name
skip_if_kgo_missing = pytest.mark.skipif(not kgo_exists(), reason="KGO files required")
# Pytest decorator to skip tests if statsmodels is available
# pylint: disable=invalid-name
skip_if_statsmodels = pytest.mark.skipif(
statsmodels_available(), reason="statsmodels library is available"
)
# Pytest decorator to skip tests if statsmodels is not available
# pylint: disable=invalid-name
skip_if_no_statsmodels = pytest.mark.skipif(
not statsmodels_available(), reason="statsmodels library is not available"
)
# Pytest decorator to skip tests if iris_nimrod_patch is not available
# pylint: disable=invalid-name
skip_if_no_iris_nimrod_patch = pytest.mark.skipif(
not iris_nimrod_patch_available(),
reason="iris_nimrod_patch library is not available",
)
|
the-stack_0_6956 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2021/11/26 14:20
# @Author : Adyan
# @File : setup.py
import setuptools
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setuptools.setup(
name="Adyan",
version="0.0.2",
author="Adyan",
author_email="[email protected]",
description="Special package",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/liujiang9/Utils",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.7',
)
|
the-stack_0_6957 | from typing import Callable
try: # Assume we're a sub-module in a package.
from utils import numeric as nm
except ImportError: # Apparently no higher-level package has been imported, fall back to a local import.
from ..utils import numeric as nm
def shifted_func(func) -> Callable:
def func_(x, y) -> list:
assert len(x) == len(y)
shift_max = len(x) - 1
result = list()
for shift in range(-shift_max + 2, 0):
shifted_x = x[0: shift_max + shift]
shifted_y = y[- shift: shift_max]
stat = func(shifted_x, shifted_y)
result.append(stat)
for shift in range(0, shift_max - 1):
shifted_x = x[shift: shift_max]
shifted_y = y[0: shift_max - shift]
stat = func(shifted_x, shifted_y)
result.append(stat)
return result
return func_
def pair_filter(function=nm.is_defined) -> Callable:
def func(a, b) -> tuple:
a_filtered, b_filtered = list(), list()
for cur_a, cur_b in zip(a, b):
take_a = function(cur_a)
take_b = function(cur_b)
if take_a and take_b:
a_filtered.append(cur_a)
b_filtered.append(cur_b)
return a_filtered, b_filtered
return func
def pair_stat(stat_func, filter_func=None) -> Callable:
def func(a, b) -> float:
if filter_func:
data = pair_filter(filter_func)(a, b)
else:
data = (a, b)
return stat_func(*data)
return func
def corr() -> Callable:
return pair_stat(
filter_func=nm.is_nonzero,
stat_func=lambda *v: nm.corr(*v),
)
|
the-stack_0_6959 | import math
# pad input string with character c and modulo operand mod_op
def padWithChars(sinp,c,mod_op):
ret_val = sinp
if len(sinp) % mod_op == 0:
return ret_val
for i in range(0,mod_op-len(sinp)%mod_op):
ret_val += c
return ret_val
# split input string into a list where each element is a group of n characters
def getInputList(sinp,n):
ret_val = []
while sinp != "":
ret_val.append(sinp[:n])
sinp = sinp[n:]
return ret_val
# eliminate leading 0b from string and pad with zeroes (in front) until there are 8 bits
def getFormattedBitString(binp,num_bits):
ret_val = binp[2:]
if ret_val == 0 and num_bits == 8:
return "00000000"
if ret_val == 0 and num_bits == 32:
return "00000000000000000000000000000000"
for i in range(0,num_bits-len(ret_val)):
ret_val = '0' + ret_val
return ret_val
# convert each character of string into binary string
def findBitPattern(sinp):
ret_val = ""
for i in range(0,len(sinp)):
ret_val += getFormattedBitString(bin(ord(sinp[i])),8)
return ret_val
# convert n*8 bit binary string to n ascii chars
def findAsciiPattern(sinp,n):
ret_val = ""
for i in range(0,n):
ret_val += chr(int(sinp[:8],2))
sinp = sinp[8:]
return ret_val
# convert input number into base 85 number (as a list)
def getBase85ValueList(dinp):
ret_val = []
div_dinp = dinp
while div_dinp != 0:
mod_dinp = div_dinp % 85
div_dinp = int(math.floor(div_dinp / 85))
ret_val.insert(0,str(mod_dinp))
return ret_val
# convert base 85 to base 10
def base85ToBase10(linp):
ret_val = 0
digits = len(linp)
for i in range(0,digits):
ret_val += linp[i] * (85 ** (digits - i - 1))
return ret_val
# add 33 to each number in list above and convert to ascii
def add33ConvertAscii(sinp):
ret_val = ""
for elmt in sinp:
ascii_int_partition = int(elmt) + 33
ret_val += chr(ascii_int_partition)
return ret_val
# eliminate trailing characters matching the number of trailing zeroes the input was padded with
def unpadResult(sinp,pad):
# this was fiddled with to get it working. I need to revisit why this is the correct conditional.
if pad % 4 == 0 or pad % 5 == 0:
return sinp
return sinp[:-pad]
# convert ascii to int and subtract 33 for each character; store each result in a list
def sub33NumList(sinp):
ret_val = []
for elmt in sinp:
ret_val.append(int(findBitPattern(elmt),2)-33)
return ret_val
# compute Base85 for all sets of 4 characters in input
def encodeAllSubSections(linp):
ret_val = ""
for elmt in linp:
bit_pattern = findBitPattern(elmt)
int_64bit = int(bit_pattern,2)
list_85base = getBase85ValueList(int_64bit)
ret_val += add33ConvertAscii(list_85base)
return ret_val
# decode for all sets of 5 characters in input in encoded result
def decodeAllSubSections(linp):
ret_val = ""
for elmt in linp:
sub_33_list = sub33NumList(elmt)
int_64bit = base85ToBase10(sub_33_list)
bit_pattern = getFormattedBitString(bin(int_64bit),32)
ret_val += findAsciiPattern(bit_pattern,4)
return ret_val
# encode sinp
def encodeAscii85(sinp):
padded_input = padWithChars(sinp,'\0',4)
padded_offset = 4 - (len(sinp)%4)
input_list = getInputList(padded_input,4)
final_result = unpadResult(encodeAllSubSections(input_list),padded_offset)
return final_result
# decode sinp
def decodeAscii85(sinp):
padded_input = padWithChars(sinp,'u',5)
padded_offset = 5 - (len(sinp)%5)
input_list = getInputList(padded_input,5)
final_result = unpadResult(decodeAllSubSections(input_list),padded_offset)
return final_result
|
the-stack_0_6960 | # coding: utf-8
"""
FreeClimb API
FreeClimb is a cloud-based application programming interface (API) that puts the power of the Vail platform in your hands. FreeClimb simplifies the process of creating applications that can use a full range of telephony features without requiring specialized or on-site telephony equipment. Using the FreeClimb REST API to write applications is easy! You have the option to use the language of your choice or hit the API directly. Your application can execute a command by issuing a RESTful request to the FreeClimb API. The base URL to send HTTP requests to the FreeClimb REST API is: /apiserver. FreeClimb authenticates and processes your request. # noqa: E501
The version of the OpenAPI document: 1.0.0
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from freeclimb.configuration import Configuration
class CallResultAllOf(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'call_id': 'str',
'parent_call_id': 'str',
'account_id': 'str',
'_from': 'str',
'to': 'str',
'phone_number_id': 'str',
'status': 'str',
'start_time': 'str',
'connect_time': 'str',
'end_time': 'str',
'duration': 'int',
'connect_duration': 'int',
'direction': 'str',
'answered_by': 'str',
'subresource_uris': 'object'
}
attribute_map = {
'call_id': 'callId',
'parent_call_id': 'parentCallId',
'account_id': 'accountId',
'_from': 'from',
'to': 'to',
'phone_number_id': 'phoneNumberId',
'status': 'status',
'start_time': 'startTime',
'connect_time': 'connectTime',
'end_time': 'endTime',
'duration': 'duration',
'connect_duration': 'connectDuration',
'direction': 'direction',
'answered_by': 'answeredBy',
'subresource_uris': 'subresourceUris'
}
def __init__(self, call_id=None, parent_call_id=None, account_id=None, _from=None, to=None, phone_number_id=None, status=None, start_time=None, connect_time=None, end_time=None, duration=None, connect_duration=None, direction=None, answered_by=None, subresource_uris=None, local_vars_configuration=None): # noqa: E501
"""CallResultAllOf - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._call_id = None
self._parent_call_id = None
self._account_id = None
self.__from = None
self._to = None
self._phone_number_id = None
self._status = None
self._start_time = None
self._connect_time = None
self._end_time = None
self._duration = None
self._connect_duration = None
self._direction = None
self._answered_by = None
self._subresource_uris = None
self.discriminator = None
if call_id is not None:
self.call_id = call_id
if parent_call_id is not None:
self.parent_call_id = parent_call_id
if account_id is not None:
self.account_id = account_id
if _from is not None:
self._from = _from
if to is not None:
self.to = to
if phone_number_id is not None:
self.phone_number_id = phone_number_id
if status is not None:
self.status = status
if start_time is not None:
self.start_time = start_time
if connect_time is not None:
self.connect_time = connect_time
if end_time is not None:
self.end_time = end_time
if duration is not None:
self.duration = duration
if connect_duration is not None:
self.connect_duration = connect_duration
if direction is not None:
self.direction = direction
if answered_by is not None:
self.answered_by = answered_by
if subresource_uris is not None:
self.subresource_uris = subresource_uris
@property
def call_id(self):
"""Gets the call_id of this CallResultAllOf. # noqa: E501
String that uniquely identifies this Call resource. # noqa: E501
:return: The call_id of this CallResultAllOf. # noqa: E501
:rtype: str
"""
return self._call_id
@call_id.setter
def call_id(self, call_id):
"""Sets the call_id of this CallResultAllOf.
String that uniquely identifies this Call resource. # noqa: E501
:param call_id: The call_id of this CallResultAllOf. # noqa: E501
:type: str
"""
self._call_id = call_id
@property
def parent_call_id(self):
"""Gets the parent_call_id of this CallResultAllOf. # noqa: E501
ID of the Call that created this leg (child Call). # noqa: E501
:return: The parent_call_id of this CallResultAllOf. # noqa: E501
:rtype: str
"""
return self._parent_call_id
@parent_call_id.setter
def parent_call_id(self, parent_call_id):
"""Sets the parent_call_id of this CallResultAllOf.
ID of the Call that created this leg (child Call). # noqa: E501
:param parent_call_id: The parent_call_id of this CallResultAllOf. # noqa: E501
:type: str
"""
self._parent_call_id = parent_call_id
@property
def account_id(self):
"""Gets the account_id of this CallResultAllOf. # noqa: E501
ID of the account that owns this Call. # noqa: E501
:return: The account_id of this CallResultAllOf. # noqa: E501
:rtype: str
"""
return self._account_id
@account_id.setter
def account_id(self, account_id):
"""Sets the account_id of this CallResultAllOf.
ID of the account that owns this Call. # noqa: E501
:param account_id: The account_id of this CallResultAllOf. # noqa: E501
:type: str
"""
self._account_id = account_id
@property
def _from(self):
"""Gets the _from of this CallResultAllOf. # noqa: E501
Phone number that initiated this Call. # noqa: E501
:return: The _from of this CallResultAllOf. # noqa: E501
:rtype: str
"""
return self.__from
@_from.setter
def _from(self, _from):
"""Sets the _from of this CallResultAllOf.
Phone number that initiated this Call. # noqa: E501
:param _from: The _from of this CallResultAllOf. # noqa: E501
:type: str
"""
self.__from = _from
@property
def to(self):
"""Gets the to of this CallResultAllOf. # noqa: E501
Phone number that received this Call. # noqa: E501
:return: The to of this CallResultAllOf. # noqa: E501
:rtype: str
"""
return self._to
@to.setter
def to(self, to):
"""Sets the to of this CallResultAllOf.
Phone number that received this Call. # noqa: E501
:param to: The to of this CallResultAllOf. # noqa: E501
:type: str
"""
self._to = to
@property
def phone_number_id(self):
"""Gets the phone_number_id of this CallResultAllOf. # noqa: E501
If the Call was inbound, this is the ID of the IncomingPhoneNumber that received the Call (DNIS). If the Call was outbound, this is the ID of the phone number from which the Call was placed (ANI). # noqa: E501
:return: The phone_number_id of this CallResultAllOf. # noqa: E501
:rtype: str
"""
return self._phone_number_id
@phone_number_id.setter
def phone_number_id(self, phone_number_id):
"""Sets the phone_number_id of this CallResultAllOf.
If the Call was inbound, this is the ID of the IncomingPhoneNumber that received the Call (DNIS). If the Call was outbound, this is the ID of the phone number from which the Call was placed (ANI). # noqa: E501
:param phone_number_id: The phone_number_id of this CallResultAllOf. # noqa: E501
:type: str
"""
self._phone_number_id = phone_number_id
@property
def status(self):
"""Gets the status of this CallResultAllOf. # noqa: E501
* `queued` – Call is ready and waiting in line before going out. * `ringing` – Call is currently ringing. * `inProgress` – Call was answered and is currently in progress. * `canceled` – Call was hung up while it was queued or ringing. * `completed` – Call was answered and has ended normally. * `busy` – Caller received a busy signal. * `failed` – Call could not be completed as dialed, most likely because the phone number was non-existent. * `noAnswer` – Call ended without being answered. # noqa: E501
:return: The status of this CallResultAllOf. # noqa: E501
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this CallResultAllOf.
* `queued` – Call is ready and waiting in line before going out. * `ringing` – Call is currently ringing. * `inProgress` – Call was answered and is currently in progress. * `canceled` – Call was hung up while it was queued or ringing. * `completed` – Call was answered and has ended normally. * `busy` – Caller received a busy signal. * `failed` – Call could not be completed as dialed, most likely because the phone number was non-existent. * `noAnswer` – Call ended without being answered. # noqa: E501
:param status: The status of this CallResultAllOf. # noqa: E501
:type: str
"""
allowed_values = ["queued", "ringing", "inProgress", "canceled", "completed", "busy", "failed", "noAnswer"] # noqa: E501
if self.local_vars_configuration.client_side_validation and status not in allowed_values: # noqa: E501
raise ValueError(
"Invalid value for `status` ({0}), must be one of {1}" # noqa: E501
.format(status, allowed_values)
)
self._status = status
@property
def start_time(self):
"""Gets the start_time of this CallResultAllOf. # noqa: E501
Start time of the Call (GMT) in RFC 1123 format (e.g., Mon, 15 Jun 2009 20:45:30 GMT). Empty if the Call has not yet been dialed. # noqa: E501
:return: The start_time of this CallResultAllOf. # noqa: E501
:rtype: str
"""
return self._start_time
@start_time.setter
def start_time(self, start_time):
"""Sets the start_time of this CallResultAllOf.
Start time of the Call (GMT) in RFC 1123 format (e.g., Mon, 15 Jun 2009 20:45:30 GMT). Empty if the Call has not yet been dialed. # noqa: E501
:param start_time: The start_time of this CallResultAllOf. # noqa: E501
:type: str
"""
self._start_time = start_time
@property
def connect_time(self):
"""Gets the connect_time of this CallResultAllOf. # noqa: E501
Time the Call was answered (GMT) in RFC 1123 format (e.g., Mon, 15 Jun 2009 20:45:30 GMT). Empty if the Call has not yet been dialed. # noqa: E501
:return: The connect_time of this CallResultAllOf. # noqa: E501
:rtype: str
"""
return self._connect_time
@connect_time.setter
def connect_time(self, connect_time):
"""Sets the connect_time of this CallResultAllOf.
Time the Call was answered (GMT) in RFC 1123 format (e.g., Mon, 15 Jun 2009 20:45:30 GMT). Empty if the Call has not yet been dialed. # noqa: E501
:param connect_time: The connect_time of this CallResultAllOf. # noqa: E501
:type: str
"""
self._connect_time = connect_time
@property
def end_time(self):
"""Gets the end_time of this CallResultAllOf. # noqa: E501
End time of the Call (GMT) in RFC 1123 format (e.g., Mon, 15 Jun 2009 20:45:30 GMT). Empty if the Call did not complete successfully. # noqa: E501
:return: The end_time of this CallResultAllOf. # noqa: E501
:rtype: str
"""
return self._end_time
@end_time.setter
def end_time(self, end_time):
"""Sets the end_time of this CallResultAllOf.
End time of the Call (GMT) in RFC 1123 format (e.g., Mon, 15 Jun 2009 20:45:30 GMT). Empty if the Call did not complete successfully. # noqa: E501
:param end_time: The end_time of this CallResultAllOf. # noqa: E501
:type: str
"""
self._end_time = end_time
@property
def duration(self):
"""Gets the duration of this CallResultAllOf. # noqa: E501
Total length of the Call in seconds. Measures time between startTime and endTime. This value is empty for busy, failed, unanswered or ongoing Calls. # noqa: E501
:return: The duration of this CallResultAllOf. # noqa: E501
:rtype: int
"""
return self._duration
@duration.setter
def duration(self, duration):
"""Sets the duration of this CallResultAllOf.
Total length of the Call in seconds. Measures time between startTime and endTime. This value is empty for busy, failed, unanswered or ongoing Calls. # noqa: E501
:param duration: The duration of this CallResultAllOf. # noqa: E501
:type: int
"""
self._duration = duration
@property
def connect_duration(self):
"""Gets the connect_duration of this CallResultAllOf. # noqa: E501
Length of time that the Call was connected in seconds. Measures time between connectTime and endTime. This value is empty for busy, failed, unanswered or ongoing Calls. # noqa: E501
:return: The connect_duration of this CallResultAllOf. # noqa: E501
:rtype: int
"""
return self._connect_duration
@connect_duration.setter
def connect_duration(self, connect_duration):
"""Sets the connect_duration of this CallResultAllOf.
Length of time that the Call was connected in seconds. Measures time between connectTime and endTime. This value is empty for busy, failed, unanswered or ongoing Calls. # noqa: E501
:param connect_duration: The connect_duration of this CallResultAllOf. # noqa: E501
:type: int
"""
self._connect_duration = connect_duration
@property
def direction(self):
"""Gets the direction of this CallResultAllOf. # noqa: E501
Direction of the Call. `inbound` for Calls into FreeClimb, `outboundAPI` for Calls initiated via the REST API, `outboundDial` for Calls initiated by the `OutDial` PerCL command. # noqa: E501
:return: The direction of this CallResultAllOf. # noqa: E501
:rtype: str
"""
return self._direction
@direction.setter
def direction(self, direction):
"""Sets the direction of this CallResultAllOf.
Direction of the Call. `inbound` for Calls into FreeClimb, `outboundAPI` for Calls initiated via the REST API, `outboundDial` for Calls initiated by the `OutDial` PerCL command. # noqa: E501
:param direction: The direction of this CallResultAllOf. # noqa: E501
:type: str
"""
self._direction = direction
@property
def answered_by(self):
"""Gets the answered_by of this CallResultAllOf. # noqa: E501
If this Call was initiated with answering machine detection, either `human` or `machine`. Empty otherwise. # noqa: E501
:return: The answered_by of this CallResultAllOf. # noqa: E501
:rtype: str
"""
return self._answered_by
@answered_by.setter
def answered_by(self, answered_by):
"""Sets the answered_by of this CallResultAllOf.
If this Call was initiated with answering machine detection, either `human` or `machine`. Empty otherwise. # noqa: E501
:param answered_by: The answered_by of this CallResultAllOf. # noqa: E501
:type: str
"""
self._answered_by = answered_by
@property
def subresource_uris(self):
"""Gets the subresource_uris of this CallResultAllOf. # noqa: E501
The list of subresources for this Call. These include things like logs and recordings associated with the Call. # noqa: E501
:return: The subresource_uris of this CallResultAllOf. # noqa: E501
:rtype: object
"""
return self._subresource_uris
@subresource_uris.setter
def subresource_uris(self, subresource_uris):
"""Sets the subresource_uris of this CallResultAllOf.
The list of subresources for this Call. These include things like logs and recordings associated with the Call. # noqa: E501
:param subresource_uris: The subresource_uris of this CallResultAllOf. # noqa: E501
:type: object
"""
self._subresource_uris = subresource_uris
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
attr = self.to_camel_case(attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
elif value is None:
continue
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CallResultAllOf):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, CallResultAllOf):
return True
return self.to_dict() != other.to_dict()
def to_camel_case(self, snake_str):
components = snake_str.split('_')
return components[0] + ''.join(x.title() for x in components[1:])
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.