ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | 1a40dd3431bdb109359620591f4ad1bbe5824b68 | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from msrest.serialization import Model
class TeamSettingsDaysOffPatch(Model):
"""TeamSettingsDaysOffPatch.
:param days_off:
:type days_off: list of :class:`DateRange <work.v4_0.models.DateRange>`
"""
_attribute_map = {
'days_off': {'key': 'daysOff', 'type': '[DateRange]'}
}
def __init__(self, days_off=None):
super(TeamSettingsDaysOffPatch, self).__init__()
self.days_off = days_off
|
py | 1a40ded5572e1dd23710c2bb5e42f6f4ef8aa56b | # Copyright 2013 Metacloud, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from lxml import etree
import mock
from oslo.config import cfg
import webob
from nova.api.openstack.compute.contrib import \
security_group_default_rules as security_group_default_rules_v2
from nova.api.openstack.compute.plugins.v3 import \
security_group_default_rules as security_group_default_rules_v21
from nova.api.openstack import wsgi
from nova import context
import nova.db
from nova import exception
from nova import test
from nova.tests.unit.api.openstack import fakes
CONF = cfg.CONF
class AttrDict(dict):
def __getattr__(self, k):
return self[k]
def security_group_default_rule_template(**kwargs):
rule = kwargs.copy()
rule.setdefault('ip_protocol', 'TCP')
rule.setdefault('from_port', 22)
rule.setdefault('to_port', 22)
rule.setdefault('cidr', '10.10.10.0/24')
return rule
def security_group_default_rule_db(security_group_default_rule, id=None):
attrs = security_group_default_rule.copy()
if id is not None:
attrs['id'] = id
return AttrDict(attrs)
class TestSecurityGroupDefaultRulesNeutronV21(test.TestCase):
controller_cls = (security_group_default_rules_v21.
SecurityGroupDefaultRulesController)
def setUp(self):
self.flags(security_group_api='neutron')
super(TestSecurityGroupDefaultRulesNeutronV21, self).setUp()
self.controller = self.controller_cls()
def test_create_security_group_default_rule_not_implemented_neutron(self):
sgr = security_group_default_rule_template()
req = fakes.HTTPRequest.blank(
'/v2/fake/os-security-group-default-rules', use_admin_context=True)
self.assertRaises(webob.exc.HTTPNotImplemented, self.controller.create,
req, {'security_group_default_rule': sgr})
def test_security_group_default_rules_list_not_implemented_neturon(self):
req = fakes.HTTPRequest.blank(
'/v2/fake/os-security-group-default-rules', use_admin_context=True)
self.assertRaises(webob.exc.HTTPNotImplemented, self.controller.index,
req)
def test_security_group_default_rules_show_not_implemented_neturon(self):
req = fakes.HTTPRequest.blank(
'/v2/fake/os-security-group-default-rules', use_admin_context=True)
self.assertRaises(webob.exc.HTTPNotImplemented, self.controller.show,
req, '602ed77c-a076-4f9b-a617-f93b847b62c5')
def test_security_group_default_rules_delete_not_implemented_neturon(self):
req = fakes.HTTPRequest.blank(
'/v2/fake/os-security-group-default-rules', use_admin_context=True)
self.assertRaises(webob.exc.HTTPNotImplemented, self.controller.delete,
req, '602ed77c-a076-4f9b-a617-f93b847b62c5')
class TestSecurityGroupDefaultRulesNeutronV2(test.TestCase):
controller_cls = (security_group_default_rules_v2.
SecurityGroupDefaultRulesController)
class TestSecurityGroupDefaultRulesV21(test.TestCase):
controller_cls = (security_group_default_rules_v21.
SecurityGroupDefaultRulesController)
def setUp(self):
super(TestSecurityGroupDefaultRulesV21, self).setUp()
self.controller = self.controller_cls()
def test_create_security_group_default_rule(self):
sgr = security_group_default_rule_template()
req = fakes.HTTPRequest.blank(
'/v2/fake/os-security-group-default-rules', use_admin_context=True)
sgr_dict = dict(security_group_default_rule=sgr)
res_dict = self.controller.create(req, sgr_dict)
security_group_default_rule = res_dict['security_group_default_rule']
self.assertEqual(security_group_default_rule['ip_protocol'],
sgr['ip_protocol'])
self.assertEqual(security_group_default_rule['from_port'],
sgr['from_port'])
self.assertEqual(security_group_default_rule['to_port'],
sgr['to_port'])
self.assertEqual(security_group_default_rule['ip_range']['cidr'],
sgr['cidr'])
def test_create_security_group_default_rule_with_no_to_port(self):
sgr = security_group_default_rule_template()
del sgr['to_port']
req = fakes.HTTPRequest.blank(
'/v2/fake/os-security-group-default-rules', use_admin_context=True)
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_default_rule': sgr})
def test_create_security_group_default_rule_with_no_from_port(self):
sgr = security_group_default_rule_template()
del sgr['from_port']
req = fakes.HTTPRequest.blank(
'/v2/fake/os-security-group-default-rules', use_admin_context=True)
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_default_rule': sgr})
def test_create_security_group_default_rule_with_no_ip_protocol(self):
sgr = security_group_default_rule_template()
del sgr['ip_protocol']
req = fakes.HTTPRequest.blank(
'/v2/fake/os-security-group-default-rules', use_admin_context=True)
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_default_rule': sgr})
def test_create_security_group_default_rule_with_no_cidr(self):
sgr = security_group_default_rule_template()
del sgr['cidr']
req = fakes.HTTPRequest.blank(
'/v2/fake/os-security-group-default-rules', use_admin_context=True)
res_dict = self.controller.create(req,
{'security_group_default_rule': sgr})
security_group_default_rule = res_dict['security_group_default_rule']
self.assertNotEqual(security_group_default_rule['id'], 0)
self.assertEqual(security_group_default_rule['ip_range']['cidr'],
'0.0.0.0/0')
def test_create_security_group_default_rule_with_blank_to_port(self):
sgr = security_group_default_rule_template(to_port='')
req = fakes.HTTPRequest.blank(
'/v2/fake/os-security-group-default-rules', use_admin_context=True)
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_default_rule': sgr})
def test_create_security_group_default_rule_with_blank_from_port(self):
sgr = security_group_default_rule_template(from_port='')
req = fakes.HTTPRequest.blank(
'/v2/fake/os-security-group-default-rules', use_admin_context=True)
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_default_rule': sgr})
def test_create_security_group_default_rule_with_blank_ip_protocol(self):
sgr = security_group_default_rule_template(ip_protocol='')
req = fakes.HTTPRequest.blank(
'/v2/fake/os-security-group-default-rules', use_admin_context=True)
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_default_rule': sgr})
def test_create_security_group_default_rule_with_blank_cidr(self):
sgr = security_group_default_rule_template(cidr='')
req = fakes.HTTPRequest.blank(
'/v2/fake/os-security-group-default-rules', use_admin_context=True)
res_dict = self.controller.create(req,
{'security_group_default_rule': sgr})
security_group_default_rule = res_dict['security_group_default_rule']
self.assertNotEqual(security_group_default_rule['id'], 0)
self.assertEqual(security_group_default_rule['ip_range']['cidr'],
'0.0.0.0/0')
def test_create_security_group_default_rule_non_numerical_to_port(self):
sgr = security_group_default_rule_template(to_port='invalid')
req = fakes.HTTPRequest.blank(
'/v2/fake/os-security-group-default-rules', use_admin_context=True)
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_default_rule': sgr})
def test_create_security_group_default_rule_non_numerical_from_port(self):
sgr = security_group_default_rule_template(from_port='invalid')
req = fakes.HTTPRequest.blank(
'/v2/fake/os-security-group-default-rules', use_admin_context=True)
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_default_rule': sgr})
def test_create_security_group_default_rule_invalid_ip_protocol(self):
sgr = security_group_default_rule_template(ip_protocol='invalid')
req = fakes.HTTPRequest.blank(
'/v2/fake/os-security-group-default-rules', use_admin_context=True)
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_default_rule': sgr})
def test_create_security_group_default_rule_invalid_cidr(self):
sgr = security_group_default_rule_template(cidr='10.10.2222.0/24')
req = fakes.HTTPRequest.blank(
'/v2/fake/os-security-group-default-rules', use_admin_context=True)
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_default_rule': sgr})
def test_create_security_group_default_rule_invalid_to_port(self):
sgr = security_group_default_rule_template(to_port='666666')
req = fakes.HTTPRequest.blank(
'/v2/fake/os-security-group-default-rules', use_admin_context=True)
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_default_rule': sgr})
def test_create_security_group_default_rule_invalid_from_port(self):
sgr = security_group_default_rule_template(from_port='666666')
req = fakes.HTTPRequest.blank(
'/v2/fake/os-security-group-default-rules', use_admin_context=True)
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_default_rule': sgr})
def test_create_security_group_default_rule_with_no_body(self):
req = fakes.HTTPRequest.blank(
'/v2/fake/os-security-group-default-rules', use_admin_context=True)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create, req, None)
def test_create_duplicate_security_group_default_rule(self):
sgr = security_group_default_rule_template()
req = fakes.HTTPRequest.blank(
'/v2/fake/os-security-group-default-rules', use_admin_context=True)
self.controller.create(req, {'security_group_default_rule': sgr})
req = fakes.HTTPRequest.blank(
'/v2/fake/os-security-group-default-rules', use_admin_context=True)
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_default_rule': sgr})
def test_security_group_default_rules_list(self):
self.test_create_security_group_default_rule()
rules = [dict(id=1,
ip_protocol='TCP',
from_port=22,
to_port=22,
ip_range=dict(cidr='10.10.10.0/24'))]
expected = {'security_group_default_rules': rules}
req = fakes.HTTPRequest.blank(
'/v2/fake/os-security-group-default-rules', use_admin_context=True)
res_dict = self.controller.index(req)
self.assertEqual(res_dict, expected)
@mock.patch('nova.db.security_group_default_rule_list',
side_effect=(exception.
SecurityGroupDefaultRuleNotFound("Rule Not Found")))
def test_non_existing_security_group_default_rules_list(self,
mock_sec_grp_rule):
req = fakes.HTTPRequest.blank(
'/v2/fake/os-security-group-default-rules', use_admin_context=True)
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.index, req)
def test_default_security_group_default_rule_show(self):
sgr = security_group_default_rule_template(id=1)
self.test_create_security_group_default_rule()
req = fakes.HTTPRequest.blank(
'/v2/fake/os-security-group-default-rules', use_admin_context=True)
res_dict = self.controller.show(req, '1')
security_group_default_rule = res_dict['security_group_default_rule']
self.assertEqual(security_group_default_rule['ip_protocol'],
sgr['ip_protocol'])
self.assertEqual(security_group_default_rule['to_port'],
sgr['to_port'])
self.assertEqual(security_group_default_rule['from_port'],
sgr['from_port'])
self.assertEqual(security_group_default_rule['ip_range']['cidr'],
sgr['cidr'])
@mock.patch('nova.db.security_group_default_rule_get',
side_effect=(exception.
SecurityGroupDefaultRuleNotFound("Rule Not Found")))
def test_non_existing_security_group_default_rule_show(self,
mock_sec_grp_rule):
req = fakes.HTTPRequest.blank(
'/v2/fake/os-security-group-default-rules', use_admin_context=True)
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.show, req, '1')
def test_delete_security_group_default_rule(self):
sgr = security_group_default_rule_template(id=1)
self.test_create_security_group_default_rule()
self.called = False
def security_group_default_rule_destroy(context, id):
self.called = True
def return_security_group_default_rule(context, id):
self.assertEqual(sgr['id'], id)
return security_group_default_rule_db(sgr)
self.stubs.Set(nova.db, 'security_group_default_rule_destroy',
security_group_default_rule_destroy)
self.stubs.Set(nova.db, 'security_group_default_rule_get',
return_security_group_default_rule)
req = fakes.HTTPRequest.blank(
'/v2/fake/os-security-group-default-rules', use_admin_context=True)
self.controller.delete(req, '1')
self.assertTrue(self.called)
@mock.patch('nova.db.security_group_default_rule_destroy',
side_effect=(exception.
SecurityGroupDefaultRuleNotFound("Rule Not Found")))
def test_non_existing_security_group_default_rule_delete(
self, mock_sec_grp_rule):
req = fakes.HTTPRequest.blank(
'/v2/fake/os-security-group-default-rules', use_admin_context=True)
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.delete, req, '1')
def test_security_group_ensure_default(self):
sgr = security_group_default_rule_template(id=1)
self.test_create_security_group_default_rule()
ctxt = context.get_admin_context()
setattr(ctxt, 'project_id', 'new_project_id')
sg = nova.db.security_group_ensure_default(ctxt)
rules = nova.db.security_group_rule_get_by_security_group(ctxt, sg.id)
security_group_rule = rules[0]
self.assertEqual(sgr['id'], security_group_rule.id)
self.assertEqual(sgr['ip_protocol'], security_group_rule.protocol)
self.assertEqual(sgr['from_port'], security_group_rule.from_port)
self.assertEqual(sgr['to_port'], security_group_rule.to_port)
self.assertEqual(sgr['cidr'], security_group_rule.cidr)
class TestSecurityGroupDefaultRulesV2(test.TestCase):
controller_cls = (security_group_default_rules_v2.
SecurityGroupDefaultRulesController)
class TestSecurityGroupDefaultRulesXMLDeserializer(test.TestCase):
def setUp(self):
super(TestSecurityGroupDefaultRulesXMLDeserializer, self).setUp()
deserializer = security_group_default_rules_v2.\
SecurityGroupDefaultRulesXMLDeserializer()
self.deserializer = deserializer
def test_create_request(self):
serial_request = """
<security_group_default_rule>
<from_port>22</from_port>
<to_port>22</to_port>
<ip_protocol>TCP</ip_protocol>
<cidr>10.10.10.0/24</cidr>
</security_group_default_rule>"""
request = self.deserializer.deserialize(serial_request)
expected = {
"security_group_default_rule": {
"from_port": "22",
"to_port": "22",
"ip_protocol": "TCP",
"cidr": "10.10.10.0/24"
},
}
self.assertEqual(request['body'], expected)
def test_create_no_to_port_request(self):
serial_request = """
<security_group_default_rule>
<from_port>22</from_port>
<ip_protocol>TCP</ip_protocol>
<cidr>10.10.10.0/24</cidr>
</security_group_default_rule>"""
request = self.deserializer.deserialize(serial_request)
expected = {
"security_group_default_rule": {
"from_port": "22",
"ip_protocol": "TCP",
"cidr": "10.10.10.0/24"
},
}
self.assertEqual(request['body'], expected)
def test_create_no_from_port_request(self):
serial_request = """
<security_group_default_rule>
<to_port>22</to_port>
<ip_protocol>TCP</ip_protocol>
<cidr>10.10.10.0/24</cidr>
</security_group_default_rule>"""
request = self.deserializer.deserialize(serial_request)
expected = {
"security_group_default_rule": {
"to_port": "22",
"ip_protocol": "TCP",
"cidr": "10.10.10.0/24"
},
}
self.assertEqual(request['body'], expected)
def test_create_no_ip_protocol_request(self):
serial_request = """
<security_group_default_rule>
<from_port>22</from_port>
<to_port>22</to_port>
<cidr>10.10.10.0/24</cidr>
</security_group_default_rule>"""
request = self.deserializer.deserialize(serial_request)
expected = {
"security_group_default_rule": {
"from_port": "22",
"to_port": "22",
"cidr": "10.10.10.0/24"
},
}
self.assertEqual(request['body'], expected)
def test_create_no_cidr_request(self):
serial_request = """
<security_group_default_rule>
<from_port>22</from_port>
<to_port>22</to_port>
<ip_protocol>TCP</ip_protocol>
</security_group_default_rule>"""
request = self.deserializer.deserialize(serial_request)
expected = {
"security_group_default_rule": {
"from_port": "22",
"to_port": "22",
"ip_protocol": "TCP",
},
}
self.assertEqual(request['body'], expected)
class TestSecurityGroupDefaultRuleXMLSerializer(test.TestCase):
def setUp(self):
super(TestSecurityGroupDefaultRuleXMLSerializer, self).setUp()
self.namespace = wsgi.XMLNS_V11
self.rule_serializer =\
security_group_default_rules_v2.SecurityGroupDefaultRuleTemplate()
self.index_serializer =\
security_group_default_rules_v2.SecurityGroupDefaultRulesTemplate()
def _tag(self, elem):
tagname = elem.tag
self.assertEqual(tagname[0], '{')
tmp = tagname.partition('}')
namespace = tmp[0][1:]
self.assertEqual(namespace, self.namespace)
return tmp[2]
def _verify_security_group_default_rule(self, raw_rule, tree):
self.assertEqual(raw_rule['id'], tree.get('id'))
seen = set()
expected = set(['ip_protocol', 'from_port', 'to_port', 'ip_range',
'ip_range/cidr'])
for child in tree:
child_tag = self._tag(child)
seen.add(child_tag)
if child_tag == 'ip_range':
for gr_child in child:
gr_child_tag = self._tag(gr_child)
self.assertIn(gr_child_tag, raw_rule[child_tag])
seen.add('%s/%s' % (child_tag, gr_child_tag))
self.assertEqual(gr_child.text,
raw_rule[child_tag][gr_child_tag])
else:
self.assertEqual(child.text, raw_rule[child_tag])
self.assertEqual(seen, expected)
def test_rule_serializer(self):
raw_rule = dict(id='123',
ip_protocol='TCP',
from_port='22',
to_port='22',
ip_range=dict(cidr='10.10.10.0/24'))
rule = dict(security_group_default_rule=raw_rule)
text = self.rule_serializer.serialize(rule)
tree = etree.fromstring(text)
self.assertEqual('security_group_default_rule', self._tag(tree))
self._verify_security_group_default_rule(raw_rule, tree)
def test_index_serializer(self):
rules = [dict(id='123',
ip_protocol='TCP',
from_port='22',
to_port='22',
ip_range=dict(cidr='10.10.10.0/24')),
dict(id='234',
ip_protocol='UDP',
from_port='23456',
to_port='234567',
ip_range=dict(cidr='10.12.0.0/18')),
dict(id='345',
ip_protocol='tcp',
from_port='3456',
to_port='4567',
ip_range=dict(cidr='192.168.1.0/32'))]
rules_dict = dict(security_group_default_rules=rules)
text = self.index_serializer.serialize(rules_dict)
tree = etree.fromstring(text)
self.assertEqual('security_group_default_rules', self._tag(tree))
self.assertEqual(len(rules), len(tree))
for idx, child in enumerate(tree):
self._verify_security_group_default_rule(rules[idx], child)
|
py | 1a40df717017f493e3d38a56a43a0dfc7ddd7df6 | import elasticsearch
import datetime
node = 'Elasticsearch:80'
#node = '54.186.33.136:9200'
es = elasticsearch.Elasticsearch(node)
entry_mapping = {
'entry-type': {
'properties': {
'id': {'type': 'string'},
'created': {'type': 'date'},
'title': {'type': 'string'},
'tags': {'type': 'string', 'analyzer': 'keyword'},
'content': {'type': 'string'}
}
}
}
es.index(
index='test-index',
doc_type='test_type',
id='test_id',
body={
'title': 'test_title',
'content': 'This is the content',
},
op_type='create'
)
|
py | 1a40dfeb0aa58fedd6c9b5059746eb55ada0bcd6 | import numpy as np
import torch
from .primitives import fexp, cuboid_inside_outside_function, \
inside_outside_function, points_to_cuboid_distances, \
transform_to_primitives_centric_system, deform, sq_volumes
from .regularizers import get as get_regularizer
def sampling_from_parametric_space_to_equivalent_points(
shape_params,
epsilons,
sq_sampler
):
"""
Given the sampling steps in the parametric space, we want to ge the actual
3D points.
Arguments:
----------
shape_params: Tensor with size BxMx3, containing the shape along each
axis for the M primitives
epsilons: Tensor with size BxMx2, containing the shape along the
latitude and the longitude for the M primitives
Returns:
---------
P: Tensor of size BxMxSx3 that contains S sampled points from the
surface of each primitive
N: Tensor of size BxMxSx3 that contains the normals of the S sampled
points from the surface of each primitive
"""
# Allocate memory to store the sampling steps
B = shape_params.shape[0] # batch size
M = shape_params.shape[1] # number of primitives
S = sq_sampler.n_samples
etas, omegas = sq_sampler.sample_on_batch(
shape_params.detach().cpu().numpy(),
epsilons.detach().cpu().numpy()
)
# Make sure we don't get nan for gradients
etas[etas == 0] += 1e-6
omegas[omegas == 0] += 1e-6
# Move to tensors
etas = shape_params.new_tensor(etas)
omegas = shape_params.new_tensor(omegas)
# Make sure that all tensors have the right shape
a1 = shape_params[:, :, 0].unsqueeze(-1) # size BxMx1
a2 = shape_params[:, :, 1].unsqueeze(-1) # size BxMx1
a3 = shape_params[:, :, 2].unsqueeze(-1) # size BxMx1
e1 = epsilons[:, :, 0].unsqueeze(-1) # size BxMx1
e2 = epsilons[:, :, 1].unsqueeze(-1) # size BxMx1
x = a1 * fexp(torch.cos(etas), e1) * fexp(torch.cos(omegas), e2)
y = a2 * fexp(torch.cos(etas), e1) * fexp(torch.sin(omegas), e2)
z = a3 * fexp(torch.sin(etas), e1)
# Make sure we don't get INFs
# x[torch.abs(x) <= 1e-9] = 1e-9
# y[torch.abs(y) <= 1e-9] = 1e-9
# z[torch.abs(z) <= 1e-9] = 1e-9
x = ((x > 0).float() * 2 - 1) * torch.max(torch.abs(x), x.new_tensor(1e-6))
y = ((y > 0).float() * 2 - 1) * torch.max(torch.abs(y), x.new_tensor(1e-6))
z = ((z > 0).float() * 2 - 1) * torch.max(torch.abs(z), x.new_tensor(1e-6))
# Compute the normals of the SQs
nx = (torch.cos(etas)**2) * (torch.cos(omegas)**2) / x
ny = (torch.cos(etas)**2) * (torch.sin(omegas)**2) / y
nz = (torch.sin(etas)**2) / z
return torch.stack([x, y, z], -1), torch.stack([nx, ny, nz], -1)
def sample_uniformly_from_cubes_surface(shape_params, epsilons, sampler):
"""
Given the sampling steps in the parametric space, we want to ge the actual
3D points on the surface of the cube.
Arguments:
----------
shape_params: Tensor with size BxMx3, containing the shape along each
axis for the M primitives
Returns:
---------
P: Tensor of size BxMxSx3 that contains S sampled points from the
surface of each primitive
"""
# TODO: Make sure that this is the proper way to do this!
# Check the device of the angles and move all the tensors to that device
device = shape_params.device
# Allocate memory to store the sampling steps
B = shape_params.shape[0] # batch size
M = shape_params.shape[1] # number of primitives
S = sampler.n_samples
N = S/6
X_SQ = torch.zeros(B, M, S, 3).to(device)
for b in range(B):
for m in range(M):
x_max = shape_params[b, m, 0]
y_max = shape_params[b, m, 1]
z_max = shape_params[b, m, 2]
x_min = -x_max
y_min = -y_max
z_min = -z_max
X_SQ[b, m] = torch.stack([
torch.stack([
torch.ones((N, 1)).to(device)*x_min,
torch.rand(N, 1).to(device)*(y_max-y_min) + y_min,
torch.rand(N, 1).to(device)*(z_max-z_min) + z_min
], dim=-1).squeeze(),
torch.stack([
torch.ones((N, 1)).to(device)*x_max,
torch.rand(N, 1).to(device)*(y_max-y_min) + y_min,
torch.rand(N, 1).to(device)*(z_max-z_min) + z_min
], dim=-1).squeeze(),
torch.stack([
torch.rand(N, 1).to(device)*(x_max-x_min) + x_min,
torch.ones((N, 1)).to(device)*y_min,
torch.rand(N, 1).to(device)*(z_max-z_min) + z_min
], dim=-1).squeeze(),
torch.stack([
torch.rand(N, 1).to(device)*(x_max-x_min) + x_min,
torch.ones((N, 1)).to(device)*y_max,
torch.rand(N, 1).to(device)*(z_max-z_min) + z_min
], dim=-1).squeeze(),
torch.stack([
torch.rand(N, 1).to(device)*(x_max-x_min) + x_min,
torch.rand(N, 1).to(device)*(y_max-y_min) + y_min,
torch.ones((N, 1)).to(device)*z_min,
], dim=-1).squeeze(),
torch.stack([
torch.rand(N, 1).to(device)*(x_max-x_min) + x_min,
torch.rand(N, 1).to(device)*(y_max-y_min) + y_min,
torch.ones((N, 1)).to(device)*z_max,
], dim=-1).squeeze()
]).view(-1, 3)
normals = X_SQ.new_zeros(X_SQ.shape)
normals[:, :, 0*N:1*N, 0] = -1
normals[:, :, 1*N:2*N, 0] = 1
normals[:, :, 2*N:3*N, 1] = -1
normals[:, :, 3*N:4*N, 1] = 1
normals[:, :, 4*N:5*N, 2] = -1
normals[:, :, 5*N:6*N, 2] = 1
# make sure that X_SQ has the expected shape
assert X_SQ.shape == (B, M, S, 3)
return X_SQ, normals
def euclidean_dual_loss_pair(
y_hat_from,
y_target_from,
y_hat_to,
y_target_to,
regularizer_terms,
sampler,
options):
"""
Arguments:
----------
y_hat: List of Tensors containing the predictions of the network
y_target: Tensor with size BxNx6 with the N points from the target
object and their corresponding normals
regularizer_terms: dictionary with the various regularizers, on the
volume of the primitives, the Bernoullis etc.
sampler: An object of either CuboidSampler or EqualDistanceSampler
depending on the type of the primitive we are using
options: A dictionary with various options
Returns:
--------
the loss
"""
loss_from, debug_stats_from = euclidean_dual_loss(y_hat_from,
y_target_from,
regularizer_terms,
sampler, options)
loss_to, debug_stats_to = euclidean_dual_loss(y_hat_to,
y_target_to,
regularizer_terms,
sampler, options)
# y_hat_from y_hat_to contain all of the parameters needed
# to do regularization
loss = loss_from + loss_to
debug_stats = {}
for key in debug_stats_from:
assert isinstance(key, str)
debug_stats[key + "_from"] = debug_stats_from[key]
for key in debug_stats_to:
assert isinstance(key, str)
debug_stats[key + "_to"] = debug_stats_to[key]
rotations_L2 = ((y_hat_to.rotations - y_hat_from.rotations)**2).sum(-1).sum(-1)
translation_L2 = ((y_hat_to.translations - y_hat_from.translations)**2).sum(-1).sum(-1)
sizes_L2 = ((y_hat_to.sizes - y_hat_from.sizes)**2).sum(-1).sum(-1)
shapes_L2 = ((y_hat_to.shapes - y_hat_from.shapes)**2).sum(-1).sum(-1)
params_L2 = 0.0005 * (rotations_L2 + translation_L2 + sizes_L2 + shapes_L2)
debug_stats['delta_params_L2'] = params_L2
# y_hat_to - y_hat_from
prob_L1 = 0.005 * torch.abs(y_hat_to.probs - y_hat_from.probs).sum(-1).sum(-1)
debug_stats['delta_prob_L1'] = prob_L1
loss += prob_L1 + params_L2
return loss, debug_stats
def euclidean_dual_loss(
y_hat,
y_target,
regularizer_terms,
sampler,
options
):
"""
Arguments:
----------
y_hat: List of Tensors containing the predictions of the network
y_target: Tensor with size BxNx6 with the N points from the target
object and their corresponding normals
regularizer_terms: dictionary with the various regularizers, on the
volume of the primitives, the Bernoullis etc.
sampler: An object of either CuboidSampler or EqualDistanceSampler
depending on the type of the primitive we are using
options: A dictionary with various options
Returns:
--------
the loss
"""
# If use_cuboids is true then use 3D cuboids as geometric primitives. If
# use_sq is true use SQs as geometric primitives. If none of the above is
# true the default geometric primitive is cuboidal superquadrics, namely
# SQs with \epsilon_1=\epsilon_2=0.25
use_cuboids = options.get("use_cuboids", False)
use_sq = options.get("use_sq", False)
use_chamfer = options.get("use_chamfer", False)
loss_weights = options.get(
"loss_weights",
{"pcl_to_prim_weight": 1.0, "prim_to_pcl_weight": 1.0}
)
gt_normals = y_target[:, :, 3:6]
gt_points = y_target[:, :, :3]
# Make sure that everything has the right shape
assert gt_points.shape[-1] == 3
# Declare some variables
B = gt_points.shape[0] # batch size
N = gt_points.shape[1] # number of points per sample
M = y_hat[0].shape[1] # number of primitives
S = sampler.n_samples # number of points sampled from the SQ
probs = y_hat[0].view(B, M)
translations = y_hat[1].view(B, M, 3)
rotations = y_hat[2].view(B, M, 4)
shapes = y_hat[3].view(B, M, 3)
epsilons = y_hat[4].view(B, M, 2)
tapering_params = y_hat[5].view(B, M, 2)
# Transform the 3D points from world-coordinates to primitive-centric
# coordinates with size BxNxMx3
X_transformed = transform_to_primitives_centric_system(
gt_points,
translations,
rotations
)
# Based on the shape of the primitive, do the sampling either on the
# surface of the SQ or on the surface of the cuboid
if use_cuboids:
sample_points_on_surface = sample_uniformly_from_cubes_surface
else:
sample_points_on_surface =\
sampling_from_parametric_space_to_equivalent_points
# Get the coordinates of the sampled points on the surfaces of the SQs,
# with size BxMxSx3
X_SQ, normals = sample_points_on_surface(
shapes,
epsilons,
sampler
)
X_SQ = deform(X_SQ, shapes, tapering_params)
# Make the normals unit vectors
normals_norm = normals.norm(dim=-1).view(B, M, S, 1)
normals = normals / normals_norm
# Make sure that everything has the right size
assert X_SQ.shape == (B, M, S, 3)
assert normals.shape == (B, M, S, 3)
assert X_transformed.shape == (B, N, M, 3)
# Make sure that the normals are unit vectors
assert torch.sqrt(torch.sum(normals ** 2, -1)).sum() == B*M*S
# Compute the pairwise Euclidean distances between points sampled on the
# surface of the SQ (X_SQ) with points sampled on the surface of the target
# object (X_transformed)
# In the code we do everything at once, but this comment helps understand
# what we are actually doing
# t = X_transformed.permute(0, 2, 1, 3) # now X_transformed has size
# BxMxNx3
# xx_sq = X_sq.unsqueeze(3) # now xx_sq has size BxMxSx1x3
# t = t.unsqueeze(2) # now t has size BxMx1xNx3
V = (X_SQ.unsqueeze(3) - (X_transformed.permute(0, 2, 1, 3)).unsqueeze(2))
assert V.shape == (B, M, S, N, 3)
# Now we can compute the distances from every point in the surface of the
# SQ to every point on the target object transformed in every
# primitive-based coordinate system
# D = torch.sum((xx_sq - t)**2, -1) # D has size BxMxSxN
# TODO: Should I add the SQRT, now we are computing the squared distances
D = torch.sum((V)**2, -1)
assert D.shape == (B, M, S, N)
pcl_to_prim, inside, debug_stats = pcl_to_prim_loss(
[probs, translations, rotations, shapes, epsilons, tapering_params],
X_transformed,
D,
use_cuboids,
use_sq,
use_chamfer
)
assert inside is None or inside.shape == (B, N, M)
prim_to_pcl = prim_to_pcl_loss(
y_hat,
V,
normals,
inside,
D,
use_chamfer
)
# Compute any regularizer terms
regularizers = get_regularizer_term(
y_hat,
debug_stats["F"],
X_SQ,
regularizer_terms
)
reg_values = get_regularizer_weights(
regularizers,
regularizer_terms
)
debug_stats["regularizer_terms"] = reg_values
debug_stats["pcl_to_prim_loss"] = pcl_to_prim
debug_stats["prim_to_pcl_loss"] = prim_to_pcl
# Sum up the regularization terms
regs = sum(reg_values.values())
w1 = loss_weights["pcl_to_prim_weight"]
w2 = loss_weights["prim_to_pcl_weight"]
return w1 * pcl_to_prim + w2 * prim_to_pcl + regs, debug_stats
def pcl_to_prim_loss(
y_hat,
X_transformed,
D,
use_cuboids=False,
use_sq=False,
use_chamfer=False
):
"""
Arguments:
----------
y_hat: List of Tensors containing the predictions of the network
X_transformed: Tensor with size BxNxMx3 with the N points from the
target object transformed in the M primitive-centric
coordinate systems
D: Tensor of size BxMxSxN that contains the pairwise distances between
points on the surface of the SQ to the points on the target object
use_cuboids: when True use cuboids as geometric primitives
use_sq: when True use superquadrics as geometric primitives
use_chamfer: when True compute the Chamfer distance
"""
# Declare some variables
B = X_transformed.shape[0] # batch size
N = X_transformed.shape[1] # number of points per sample
M = X_transformed.shape[2] # number of primitives
shapes = y_hat[3].view(B, M, 3)
epsilons = y_hat[4].view(B, M, 2)
probs = y_hat[0]
# Get the relative position of points with respect to the SQs using the
# inside-outside function
F = shapes.new_tensor(0)
inside = None
# XXX
# if not use_chamfer: # you should still calculate the F's regardless...
if True:
if use_cuboids:
F = points_to_cuboid_distances(X_transformed, shapes)
inside = F <= 0
elif use_sq:
F = inside_outside_function(
X_transformed,
shapes,
epsilons
)
inside = F <= 1
else:
# If no argument is given (use_sq and use_cuboids) the default
# geometric primitives are cuboidal superquadrics, namely
# with \epsilon_1=\epsilon_2=0.25
F = cuboid_inside_outside_function(
X_transformed,
shapes,
epsilon=0.25
)
inside = F <= 1
D = torch.min(D, 2)[0].permute(0, 2, 1) # size BxNxM
assert D.shape == (B, N, M)
if not use_chamfer:
D[inside] = 0.0
distances, idxs = torch.sort(D, dim=-1)
# Start by computing the cumulative product
# Sort based on the indices
probs = torch.cat([
probs[i].take(idxs[i]).unsqueeze(0) for i in range(len(idxs))
])
neg_cumprod = torch.cumprod(1-probs, dim=-1)
neg_cumprod = torch.cat(
[neg_cumprod.new_ones((B, N, 1)), neg_cumprod[:, :, :-1]],
dim=-1
)
# minprob[i, j, k] is the probability that for sample i and point j the
# k-th primitive has the minimum loss
minprob = probs.mul(neg_cumprod)
loss = torch.einsum("ijk,ijk->", [distances, minprob])
loss = loss / B / N
# Return some debug statistics
debug_stats = {}
debug_stats["F"] = F
debug_stats["distances"] = distances
debug_stats["minprob"] = minprob
debug_stats["neg_cumprod"] = neg_cumprod
return loss, inside, debug_stats
def prim_to_pcl_loss(
y_hat,
V,
normals,
inside,
D,
use_chamfer=False
):
"""
Arguments:
----------
y_hat: List of Tensors containing the predictions of the network
V: Tensor with size BxMxSxN3 with the vectors from the points on SQs to
the points on the target's object surface.
normals: Tensor with size BxMxSx3 with the normals at every sampled
points on the surfaces of the M primitives
inside: A mask containing 1 if a point is inside the corresponding
shape
D: Tensor of size BxMxSxN that contains the pairwise distances between
points on the surface of the SQ to the points on the target object
"""
B = V.shape[0] # batch size
M = V.shape[1] # number of primitives
S = V.shape[2] # number of points sampled on the SQ
N = V.shape[3] # number of points sampled on the target object
probs = y_hat[0]
assert D.shape == (B, M, S, N)
# We need to compute the distance to the closest point from the target
# object for every point S
# min_D = D.min(-1)[0] # min_D has size BxMxS
if not use_chamfer:
outside = (1-inside).permute(0, 2, 1).unsqueeze(2).float()
assert outside.shape == (B, M, 1, N)
D = D + (outside*1e30)
# Compute the minimum distances D, with size BxMxS
D = D.min(-1)[0]
D[D >= 1e30] = 0.0
assert D.shape == (B, M, S)
# Compute an approximate area of the superellipsoid as if it were an
# ellipsoid
shapes = y_hat[3].view(B, M, 3)
area = 4 * np.pi * (
(shapes[:, :, 0] * shapes[:, :, 1])**1.6 / 3 +
(shapes[:, :, 0] * shapes[:, :, 2])**1.6 / 3 +
(shapes[:, :, 1] * shapes[:, :, 2])**1.6 / 3
)**0.625
area = M * area / area.sum(dim=-1, keepdim=True)
# loss = torch.einsum("ij,ij,ij->", [torch.max(D, -1)[0], probs, volumes])
# loss = torch.einsum("ij,ij,ij->", [torch.mean(D, -1), probs, volumes])
# loss = torch.einsum("ij,ij->", [torch.max(D, -1)[0], probs])
loss = torch.einsum("ij,ij,ij->", [torch.mean(D, -1), probs, area])
loss = loss / B / M
return loss
def get_regularizer_term(
parameters,
F,
X_SQ,
regularizer_terms,
transition_matrix=None
):
regularizers = [
"sparsity_regularizer",
"bernoulli_regularizer",
"entropy_bernoulli_regularizer",
"parsimony_regularizer",
"overlapping_regularizer"
]
if regularizer_terms["regularizer_type"] is None:
regularizer_terms["regularizer_type"] = []
return {
r: get_regularizer(
r if r in regularizer_terms["regularizer_type"] else "",
parameters,
F,
X_SQ,
regularizer_terms
)
for r in regularizers
}
def get_regularizer_weights(regularizers, regularizer_terms):
# Ensures that the expected number of primitives lies between a minimum and
# a maximum number of primitives.
bernoulli_reg = regularizers["bernoulli_regularizer"] *\
regularizer_terms["bernoulli_regularizer_weight"]
# Ensures that the bernoullis will be either 1.0 or 0.0 and not 0.5
entropy_bernoulli_reg = regularizers["entropy_bernoulli_regularizer"] *\
regularizer_terms["entropy_bernoulli_regularizer_weight"]
# Minimizes the expected number of primitives
parsimony_reg = regularizers["parsimony_regularizer"] *\
regularizer_terms["parsimony_regularizer_weight"]
# Ensures that primitves do not intersect with each other using the F
# function
overlapping_reg = regularizers["overlapping_regularizer"] *\
regularizer_terms["overlapping_regularizer_weight"]
# Similar to the bernoulli_regularizer. Again we want to ensure that the
# expected number of primitives will be between a minimum an a maximum
# number of primitives.
sparsity_reg = regularizers["sparsity_regularizer"] *\
regularizer_terms["sparsity_regularizer_weight"]
reg_values = {
"sparsity_regularizer": sparsity_reg,
"overlapping_regularizer": overlapping_reg,
"parsimony_regularizer": parsimony_reg,
"entropy_bernoulli_regularizer": entropy_bernoulli_reg,
"bernoulli_regularizer": bernoulli_reg
}
return reg_values
|
py | 1a40e08d8bd4d2f185c300555a0751663587cfea | ###########################################################################
## Description: Script to get StackExchange q&a eg ids using their API ###
## Status: WIP ###
###########################################################################
# -*- coding: utf-8 -*-
# import required libraries
import requests, json, time, datetime, csv, sys
from pandas.io.json import json_normalize
# path to data folder
path = '/home/irina/data'
# data collection iteration (batch api requests) < should probs turn this into a args script
iteration = 8 ## UPDATE THIS: to improve data management
# define stackexchange api request properties
filter = '!asyat-glvTDN7Q*KS8FC0X2Ds8E427nbJlZsxliDugZwP6._EWQ0H)6SoS2c'
key = '564*QrqqIA5WwT1eXCHsTA((' ## UPDATE THIS: to improve speed
site = 'math.stackexchange'
#tag = 'examples-counterexamples+probability'
max = 1367752303 #int(time.time()) ## UPDATE THIS: to improve data management
min = 0
request_link = 'http://api.stackexchange.com/2.2/questions/?key=' + key + '&order=desc&sort=creation&pagesize=100&min=' + str(int(min)) + '&max=' + str(max) + '&site=' + site + '&filter=' + filter
print "Looking at ", request_link
# make api request
r = requests.get(request_link)
temp = json.loads(r.text)
while 'error_id' in temp.keys():
print "Sleeping for 60 seconds ..."
time.sleep(60)
r = requests.get(request_link)
temp = json.loads(r.text)
data = temp
# search for relevant info in the api response and construct dataset
master_count = 0
req_count = 1
dup_count = 0
counter = 60
final = []
current = max
tryagain = False
it = 1
while data:
if "items" in data.keys():
if len(data["items"]) > 0:
for question in data["items"]:
master_count += 1
final.append(question)
current = question["creation_date"]
# exporting dataset to json
with open(path + '/getAllData_V2_mse_%d_%d_%d_%d.json' % (iteration, it, int(current), int(max)), 'w') as outfile:
json.dump(final, outfile)
it += 1
# do another api request to get the next 100 posts
print "--------------NEXT---------------"
request_link = 'http://api.stackexchange.com/2.2/questions/?key=' + key + '&order=desc&sort=creation&pagesize=100&min=' + str(int(min)) + '&max=' + str(current) + '&site=' + site + '&filter=' + filter
current_max = int(question["creation_date"])
print 'Questions looked at: ', master_count, ", Requests made: ", req_count
print "Looking at ", request_link
try:
req_count += 1
r = requests.get(request_link)
tryagain = False
except requests.exceptions.RequestException as e:
tryagain = True
print e
temp = json.loads(r.text)
while 'error_id' in temp.keys() or tryagain:
print 'Questions looked at: ', master_count
print 'Sleeping for ' + str(counter) + ' seconds ...'
time.sleep(counter)
counter = 3 * counter
try:
req_count += 1
r = requests.get(request_link)
tryagain = False
except requests.exceptions.RequestException as e:
tryagain = True
print e
temp = json.loads(r.text)
data = temp
counter = 60
else:
print "Error: Couldn't find questions in this response"
print request_link
print data
break
else:
print "===========API-ERROR============="
print data
break
# print script summary
print "---------------END---------------"
print "Questions looked at: ", master_count, ", Requests made: ", req_count
#print "Duplicates found: ", dup_count
#if question:
# print "From ", datetime.datetime.fromtimestamp(int(question["creation_date"])).strftime('%Y-%m-%d %H:%M:%S'), "to ", datetime.datetime.fromtimestamp(int(max)).strftime('%Y-%m-%d %H:%M:%S')
# exporting dataset to json
with open(path + '/getAllData_V2_mse_aggregate_%d_%d_%d.json' % (iteration, int(current), int(max)), 'w') as outfile:
json.dump(final, outfile)
|
py | 1a40e15e5b1303d3f4573f2ab808234ba5361c43 | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = "Pyfmodex"
copyright = "2021, Bart Van Loon"
author = "Bart Van Loon"
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ["sphinx.ext.autodoc", "sphinx.ext.todo"]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
|
py | 1a40e1f18336e1efaac8e597ba4b76efc348ce30 | """
RESTful platform for notify component.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/notify.rest/
"""
import logging
import requests
import voluptuous as vol
from homeassistant.components.notify import (
ATTR_TARGET, ATTR_TITLE, ATTR_TITLE_DEFAULT, BaseNotificationService,
PLATFORM_SCHEMA)
from homeassistant.const import (CONF_RESOURCE, CONF_METHOD, CONF_NAME)
import homeassistant.helpers.config_validation as cv
CONF_MESSAGE_PARAMETER_NAME = 'message_param_name'
CONF_TARGET_PARAMETER_NAME = 'target_param_name'
CONF_TITLE_PARAMETER_NAME = 'title_param_name'
DEFAULT_MESSAGE_PARAM_NAME = 'message'
DEFAULT_METHOD = 'GET'
DEFAULT_TARGET_PARAM_NAME = None
DEFAULT_TITLE_PARAM_NAME = None
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_RESOURCE): cv.url,
vol.Optional(CONF_MESSAGE_PARAMETER_NAME,
default=DEFAULT_MESSAGE_PARAM_NAME): cv.string,
vol.Optional(CONF_METHOD, default=DEFAULT_METHOD):
vol.In(['POST', 'GET', 'POST_JSON']),
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_TARGET_PARAMETER_NAME,
default=DEFAULT_TARGET_PARAM_NAME): cv.string,
vol.Optional(CONF_TITLE_PARAMETER_NAME,
default=DEFAULT_TITLE_PARAM_NAME): cv.string,
})
_LOGGER = logging.getLogger(__name__)
def get_service(hass, config, discovery_info=None):
"""Get the RESTful notification service."""
resource = config.get(CONF_RESOURCE)
method = config.get(CONF_METHOD)
message_param_name = config.get(CONF_MESSAGE_PARAMETER_NAME)
title_param_name = config.get(CONF_TITLE_PARAMETER_NAME)
target_param_name = config.get(CONF_TARGET_PARAMETER_NAME)
return RestNotificationService(
resource, method, message_param_name, title_param_name,
target_param_name)
class RestNotificationService(BaseNotificationService):
"""Implementation of a notification service for REST."""
def __init__(self, resource, method, message_param_name, title_param_name,
target_param_name):
"""Initialize the service."""
self._resource = resource
self._method = method.upper()
self._message_param_name = message_param_name
self._title_param_name = title_param_name
self._target_param_name = target_param_name
def send_message(self, message="", **kwargs):
"""Send a message to a user."""
data = {
self._message_param_name: message
}
if self._title_param_name is not None:
data[self._title_param_name] = kwargs.get(
ATTR_TITLE, ATTR_TITLE_DEFAULT)
if self._target_param_name is not None and ATTR_TARGET in kwargs:
# Target is a list as of 0.29 and we don't want to break existing
# integrations, so just return the first target in the list.
data[self._target_param_name] = kwargs[ATTR_TARGET][0]
if self._method == 'POST':
response = requests.post(self._resource, data=data, timeout=10)
elif self._method == 'POST_JSON':
response = requests.post(self._resource, json=data, timeout=10)
else: # default GET
response = requests.get(self._resource, params=data, timeout=10)
if response.status_code not in (200, 201):
_LOGGER.exception(
"Error sending message. Response %d: %s:",
response.status_code, response.reason)
|
py | 1a40e4560c5577d4f20d6191bdcedc84efe13c13 | from __future__ import absolute_import, division, print_function
import boost_adaptbx.boost.python as bp
ext = bp.import_ext("iotbx_pdb_hierarchy_ext")
from iotbx_pdb_hierarchy_ext import *
from libtbx.str_utils import show_sorted_by_counts
from libtbx.utils import Sorry, plural_s, null_out
from libtbx import Auto, dict_with_default_0, group_args
from iotbx.pdb import hy36encode, hy36decode, common_residue_names_get_class
from iotbx.pdb.amino_acid_codes import one_letter_given_three_letter
from iotbx.pdb.modified_aa_names import lookup as aa_3_as_1_mod
from iotbx.pdb.modified_rna_dna_names import lookup as na_3_as_1_mod
from iotbx.pdb.utils import all_chain_ids, all_label_asym_ids
import iotbx.cif.model
from cctbx import crystal
from cctbx.array_family import flex
import six
from six.moves import cStringIO as StringIO
from six.moves import range, zip
import collections
import operator
import warnings
import math
import sys
class pickle_import_trigger(object): pass
level_ids = ["model", "chain", "residue_group", "atom_group", "atom"]
def _show_residue_group(rg, out, prefix):
atoms = rg.atoms()
if (atoms.size() == 0):
ch = rg.parent()
if (ch is None): ch = " "
else: ch = "%s" % ch.id
print(prefix+'empty: "%s%s"' % (ch, rg.resid()), file=out)
else:
def show_atom(atom):
print(prefix+'"%s"' % atom.format_atom_record(
replace_floats_with=".*."), file=out)
if (atoms.size() <= 3):
for atom in atoms: show_atom(atom)
else:
show_atom(atoms[0])
print(prefix+'... %d atom%s not shown' % plural_s(
atoms.size()-2), file=out)
show_atom(atoms[-1])
class overall_counts(object):
def __init__(self):
self._errors = None
self._warnings = None
def show(self,
out=None,
prefix="",
flag_errors=True,
flag_warnings=True,
residue_groups_max_show=10,
duplicate_atom_labels_max_show=10):
if (out is None): out = sys.stdout
self._errors = []
self._warnings = []
def add_err(msg):
if (flag_errors): print(prefix+msg, file=out)
self._errors.append(msg.strip())
def add_warn(msg):
if (flag_warnings): print(prefix+msg, file=out)
self._warnings.append(msg.strip())
fmt = "%%%dd" % len(str(self.n_atoms))
print(prefix+"total number of:", file=out)
if (self.n_duplicate_model_ids != 0):
add_err(" ### ERROR: duplicate model ids ###")
if (self.n_empty_models != 0):
add_warn(" ### WARNING: empty model ###")
print(prefix+" models: ", fmt % self.n_models, end='', file=out)
infos = []
if (self.n_duplicate_model_ids != 0):
infos.append("%d with duplicate model id%s" % plural_s(
self.n_duplicate_model_ids))
if (self.n_empty_models != 0):
infos.append("%d empty" % self.n_empty_models)
if (len(infos) != 0): print(" (%s)" % "; ".join(infos), end='', file=out)
print(file=out)
if (self.n_duplicate_chain_ids != 0):
add_warn(" ### WARNING: duplicate chain ids ###")
if (self.n_empty_chains != 0):
add_warn(" ### WARNING: empty chain ###")
print(prefix+" chains: ", fmt % self.n_chains, end='', file=out)
infos = []
if (self.n_duplicate_chain_ids != 0):
infos.append("%d with duplicate chain id%s" % plural_s(
self.n_duplicate_chain_ids))
if (self.n_empty_chains != 0):
infos.append("%d empty" % self.n_empty_chains)
if (self.n_explicit_chain_breaks != 0):
infos.append("%d explicit chain break%s" % plural_s(
self.n_explicit_chain_breaks))
if (len(infos) != 0): print(" (%s)" % "; ".join(infos), end='', file=out)
print(file=out)
print(prefix+" alt. conf.:", fmt % self.n_alt_conf, file=out)
print(prefix+" residues: ", fmt % (
self.n_residues + self.n_residue_groups + self.n_empty_residue_groups), end='', file=out)
if (self.n_residue_groups != 0):
print(" (%d with mixed residue names)" % self.n_residue_groups, end='', file=out)
print(file=out)
if (self.n_duplicate_atom_labels != 0):
add_err(" ### ERROR: duplicate atom labels ###")
print(prefix+" atoms: ", fmt % self.n_atoms, end='', file=out)
if (self.n_duplicate_atom_labels != 0):
print(" (%d with duplicate labels)" %self.n_duplicate_atom_labels, end='', file=out)
print(file=out)
print(prefix+" anisou: ", fmt % self.n_anisou, file=out)
if (self.n_empty_residue_groups != 0):
add_warn(" ### WARNING: empty residue_group ###")
print(prefix+" empty residue_groups:", \
fmt % self.n_empty_residue_groups, file=out)
if (self.n_empty_atom_groups != 0):
add_warn(" ### WARNING: empty atom_group ###")
print(prefix+" empty atom_groups:", \
fmt % self.n_empty_atom_groups, file=out)
#
c = self.element_charge_types
print(prefix+"number of atom element+charge types:", len(c), file=out)
if (len(c) != 0):
print(prefix+"histogram of atom element+charge frequency:", file=out)
show_sorted_by_counts(c.items(), out=out, prefix=prefix+" ")
#
c = self.resname_classes
print(prefix+"residue name classes:", end='', file=out)
if (len(c) == 0): print(" None", end='', file=out)
print(file=out)
show_sorted_by_counts(c.items(), out=out, prefix=prefix+" ")
#
c = self.chain_ids
print(prefix+"number of chain ids: %d" % len(c), file=out)
if (len(c) != 0):
print(prefix+"histogram of chain id frequency:", file=out)
show_sorted_by_counts(c.items(), out=out, prefix=prefix+" ")
#
c = self.alt_conf_ids
print(prefix+"number of alt. conf. ids: %d" % len(c), file=out)
if (len(c) != 0):
print(prefix+"histogram of alt. conf. id frequency:", file=out)
show_sorted_by_counts(c.items(), out=out, prefix=prefix+" ")
#
fmt = "%%%dd" % len(str(max(
self.n_alt_conf_none,
self.n_alt_conf_pure,
self.n_alt_conf_proper,
self.n_alt_conf_improper)))
print(prefix+"residue alt. conf. situations:", file=out)
print(prefix+" pure main conf.: ", fmt%self.n_alt_conf_none, file=out)
print(prefix+" pure alt. conf.: ", fmt%self.n_alt_conf_pure, file=out)
print(prefix+" proper alt. conf.: ", fmt%self.n_alt_conf_proper, file=out)
if (self.n_alt_conf_improper != 0):
add_err(" ### ERROR: improper alt. conf. ###")
print(prefix+" improper alt. conf.:", \
fmt % self.n_alt_conf_improper, file=out)
self.show_chains_with_mix_of_proper_and_improper_alt_conf(
out=out, prefix=prefix)
#
c = self.resnames
print(prefix+"number of residue names: %d" % len(c), file=out)
if (len(c) != 0):
print(prefix+"histogram of residue name frequency:", file=out)
annotation_appearance = {
"common_amino_acid": None,
"modified_amino_acid": " modified amino acid",
"common_rna_dna": None,
"modified_rna_dna": " modified rna/dna",
"common_water": " common water",
"common_small_molecule": " common small molecule",
"common_element": " common element",
"other": " other",
'd_amino_acid' : ' D-amino acid',
'common_saccharide' : ' common saccharide',
}
show_sorted_by_counts(c.items(), out=out, prefix=prefix+" ",
annotations=[
annotation_appearance[common_residue_names_get_class(name=name)]
for name in c.keys()])
#
if (len(self.consecutive_residue_groups_with_same_resid) != 0):
add_warn("### WARNING: consecutive residue_groups with same resid ###")
self.show_consecutive_residue_groups_with_same_resid(
out=out, prefix=prefix, max_show=residue_groups_max_show)
#
if (len(self.residue_groups_with_multiple_resnames_using_same_altloc)!= 0):
add_err("### ERROR: residue group with multiple resnames using"
" same altloc ###")
self.show_residue_groups_with_multiple_resnames_using_same_altloc(
out=out, prefix=prefix, max_show=residue_groups_max_show)
#
self.show_duplicate_atom_labels(
out=out, prefix=prefix, max_show=duplicate_atom_labels_max_show)
def as_str(self,
prefix="",
residue_groups_max_show=10,
duplicate_atom_labels_max_show=10):
out = StringIO()
self.show(
out=out,
prefix=prefix,
residue_groups_max_show=residue_groups_max_show,
duplicate_atom_labels_max_show=duplicate_atom_labels_max_show)
return out.getvalue()
def errors(self):
if (self._errors is None): self.show(out=null_out())
return self._errors
def get_n_residues_of_classes(self, classes):
result = 0
for resname, count in self.resnames.items():
if common_residue_names_get_class(resname) in classes:
result += count
return result
def warnings(self):
if (self._warnings is None): self.show(out=null_out())
return self._warnings
def errors_and_warnings(self):
return self.errors() + self.warnings()
def show_improper_alt_conf(self, out=None, prefix=""):
if (self.n_alt_conf_improper == 0): return
if (out is None): out = sys.stdout
for residue_group,label in [(self.alt_conf_proper, "proper"),
(self.alt_conf_improper, "improper")]:
if (residue_group is None): continue
print(prefix+"residue with %s altloc" % label, file=out)
for ag in residue_group.atom_groups():
for atom in ag.atoms():
print(prefix+' "%s"' % atom.format_atom_record(
replace_floats_with=".*."), file=out)
def raise_improper_alt_conf_if_necessary(self):
sio = StringIO()
self.show_improper_alt_conf(out=sio)
msg = sio.getvalue()
if (len(msg) != 0): raise Sorry(msg.rstrip())
def show_chains_with_mix_of_proper_and_improper_alt_conf(self,
out=None,
prefix=""):
if (out is None): out = sys.stdout
n = self.n_chains_with_mix_of_proper_and_improper_alt_conf
print(prefix+"chains with mix of proper and improper alt. conf.:", n, file=out)
if (n != 0): prefix += " "
self.show_improper_alt_conf(out=out, prefix=prefix)
def raise_chains_with_mix_of_proper_and_improper_alt_conf_if_necessary(self):
if (self.n_chains_with_mix_of_proper_and_improper_alt_conf == 0):
return
sio = StringIO()
self.show_chains_with_mix_of_proper_and_improper_alt_conf(out=sio)
raise Sorry(sio.getvalue().rstrip())
def show_consecutive_residue_groups_with_same_resid(self,
out=None,
prefix="",
max_show=10):
cons = self.consecutive_residue_groups_with_same_resid
if (len(cons) == 0): return
if (out is None): out = sys.stdout
print(prefix+"number of consecutive residue groups with same resid: %d" % \
len(cons), file=out)
if (max_show is None): max_show = len(cons)
elif (max_show <= 0): return
delim = prefix+" "+"-"*42
prev_rg = None
for rgs in cons[:max_show]:
for next,rg in zip(["", "next "], rgs):
if ( prev_rg is not None
and prev_rg.memory_id() == rg.memory_id()): continue
elif (next == "" and prev_rg is not None):
print(delim, file=out)
prev_rg = rg
print(prefix+" %sresidue group:" % next, file=out)
_show_residue_group(rg=rg, out=out, prefix=prefix+" ")
if (len(cons) > max_show):
print(delim, file=out)
print(prefix + " ... %d remaining instance%s not shown" % \
plural_s(len(cons)-max_show), file=out)
def show_residue_groups_with_multiple_resnames_using_same_altloc(self,
out=None,
prefix="",
max_show=10):
rgs = self.residue_groups_with_multiple_resnames_using_same_altloc
if (len(rgs) == 0): return
print(prefix+"residue groups with multiple resnames using" \
" same altloc:", len(rgs), file=out)
if (max_show is None): max_show = len(cons)
elif (max_show <= 0): return
for rg in rgs[:max_show]:
print(prefix+" residue group:", file=out)
_show_residue_group(rg=rg, out=out, prefix=prefix+" ")
if (len(rgs) > max_show):
print(prefix + " ... %d remaining instance%s not shown" % \
plural_s(len(rgs)-max_show), file=out)
def \
raise_residue_groups_with_multiple_resnames_using_same_altloc_if_necessary(
self, max_show=10):
sio = StringIO()
self.show_residue_groups_with_multiple_resnames_using_same_altloc(
out=sio, max_show=max_show)
msg = sio.getvalue()
if (len(msg) != 0): raise Sorry(msg.rstrip())
def show_duplicate_atom_labels(self, out=None, prefix="", max_show=10):
dup = self.duplicate_atom_labels
if (len(dup) == 0): return
if (out is None): out = sys.stdout
fmt = "%%%dd" % len(str(self.n_duplicate_atom_labels))
print(prefix+"number of groups of duplicate atom labels:", \
fmt % len(dup), file=out)
print(prefix+" total number of affected atoms: ", \
fmt % self.n_duplicate_atom_labels, file=out)
if (max_show is None): max_show = len(dup)
elif (max_show <= 0): return
for atoms in dup[:max_show]:
prfx = " group "
for atom in atoms:
atom_str = atom.format_atom_record(replace_floats_with=".*.")
# replacing atom number with .*.
a_s = atom_str[:4]+ " .*." + atom_str[11:]
print(prefix+prfx+'"%s"' % a_s, file=out)
prfx = " "
if (len(dup) > max_show):
print(prefix+" ... %d remaining group%s not shown" % \
plural_s(len(dup)-max_show), file=out)
def raise_duplicate_atom_labels_if_necessary(self, max_show=10):
sio = StringIO()
self.show_duplicate_atom_labels(out=sio, max_show=max_show)
msg = sio.getvalue()
if (len(msg) != 0): raise Sorry(msg.rstrip())
class __hash_eq_mixin(object):
def __hash__(self):
return hash(self.memory_id())
def __eq__(self, other):
if (isinstance(other, self.__class__)):
return (self.memory_id() == other.memory_id())
return False
def __ne__(self, other):
return not ( self == other )
bp.inject(ext.root, __hash_eq_mixin)
@bp.inject_into(ext.root)
class _():
__doc__ = """
Root node of the PDB hierarchy object. This is returned by the method
construct_hierarchy() of the PDB/mmCIF input objects, but it may also be
created programatically. Note that it does not contain any reference to
crystal symmetry or source scattering information, meaning that in practice
it must often be tracked alongside an equivalent cctbx.xray.structure object.
Pickling is supported, simply by writing out and reading back the PDB-format
representation of the hierarchy.
Examples
--------
>>> hierarchy = iotbx.pdb.hierarchy.root()
"""
def __getstate__(self):
version = 2
pdb_string = StringIO()
py3out = self._as_pdb_string_cstringio( # NOTE py3out will be None in py2
cstringio=pdb_string,
append_end=True,
interleaved_conf=0,
atoms_reset_serial_first_value=None,
atom_hetatm=True,
sigatm=True,
anisou=True,
siguij=True)
if six.PY3:
pdb_string.write(py3out)
return (version, pickle_import_trigger(), self.info, pdb_string.getvalue())
def __setstate__(self, state):
assert len(state) >= 3
version = state[0]
if (version == 1): assert len(state) == 3
elif (version == 2): assert len(state) == 4
else: raise RuntimeError("Unknown version of pickled state.")
self.info = state[-2]
import iotbx.pdb
models = iotbx.pdb.input(
source_info="pickle",
lines=flex.split_lines(state[-1])).construct_hierarchy(sort_atoms=False).models()
self.pre_allocate_models(number_of_additional_models=len(models))
for model in models:
self.append_model(model=model)
def chains(self):
"""
Iterate over all chains in all models.
"""
for model in self.models():
for chain in model.chains():
yield chain
def residue_groups(self):
"""Iterate over all residue groups (by model and then chain)"""
for model in self.models():
for chain in model.chains():
for rg in chain.residue_groups():
yield rg
def atom_groups(self):
"""
Iterate over all atom groups (by model, then chain, then residue group)
"""
for model in self.models():
for chain in model.chains():
for rg in chain.residue_groups():
for ag in rg.atom_groups():
yield ag
def only_model(self):
assert self.models_size() == 1
return self.models()[0]
def only_chain(self):
return self.only_model().only_chain()
def only_residue_group(self):
return self.only_chain().only_residue_group()
def only_conformer(self):
return self.only_chain().only_conformer()
def only_atom_group(self):
return self.only_residue_group().only_atom_group()
def only_residue(self):
return self.only_conformer().only_residue()
def only_atom(self):
return self.only_atom_group().only_atom()
def overall_counts(self):
"""
Calculate basic statistics for contents of the PDB hierarchy, including
number of residues of each type.
:returns: iotbx.pdb.hierarchy.overall_counts object
"""
result = overall_counts()
self.get_overall_counts(result)
return result
def occupancy_counts(self):
eps = 1.e-6
occ = self.atoms().extract_occ()
mean = flex.mean(occ)
negative = (occ<0).count(True)
zero_count = (flex.abs(occ)<eps).count(True)
zero_fraction = zero_count*100./occ.size()
equal_to_1_count = ((occ>(1.-eps)) & (occ<(1.+eps))).count(True)
equal_to_1_fraction = equal_to_1_count*100/occ.size()
between_0_and_1_count = ((occ>(0.+eps)) & (occ<(1.-eps))).count(True)
between_0_and_1_fraction = between_0_and_1_count*100/occ.size()
greater_than_1_count = (occ>(1.+eps)).count(True)
greater_than_1_fraction = greater_than_1_count*100./occ.size()
number_of_residues = len(list(self.residue_groups()))
number_of_alt_confs = 0
alt_loc_dist = collections.Counter()
for rg in self.residue_groups():
n_confs = len(rg.conformers())
if(n_confs > 1):
number_of_alt_confs += 1
alt_loc_dist[n_confs] += 1
return group_args(
mean = mean,
negative = negative,
zero_count = zero_count,
zero_fraction = zero_fraction,
equal_to_1_count = equal_to_1_count,
equal_to_1_fraction = equal_to_1_fraction,
between_0_and_1_count = between_0_and_1_count,
between_0_and_1_fraction = between_0_and_1_fraction,
greater_than_1_count = greater_than_1_count,
greater_than_1_fraction = greater_than_1_fraction,
alt_conf_frac = number_of_alt_confs*100/number_of_residues,
alt_loc_dist = alt_loc_dist)
def composition(self):
asc = self.atom_selection_cache()
def rc(sel_str, as_atoms=False):
sel = asc.selection(sel_str)
if(as_atoms):
return self.select(sel).atoms().size()
else:
return len(list(self.select(sel).residue_groups()))
sel_str_other = "not (water or nucleotide or protein)"
other_cnts = collections.Counter()
for rg in self.select(asc.selection(sel_str_other)).residue_groups():
for resname in rg.unique_resnames():
other_cnts[resname]+=1
return group_args(
n_atoms = self.atoms().size(),
n_chains = len(list(self.chains())),
n_protein = rc("protein"),
n_nucleotide = rc("nucleotide"),
n_water = rc("water"),
n_hd = rc(sel_str="element H or element D",as_atoms=True),
n_other = rc(sel_str_other),
other_cnts = other_cnts,
# atom counts for Table 1
n_protein_atoms = rc("protein and not (element H or element D)", as_atoms=True),
n_nucleotide_atoms = rc("nucleotide and not (element H or element D)", as_atoms=True),
n_water_atoms = rc("water", as_atoms=True),
n_other_atoms = rc(sel_str_other, as_atoms=True))
def show(self,
out=None,
prefix="",
level_id=None,
level_id_exception=ValueError):
"""
Display a summary of hierarchy contents.
"""
if (level_id == None): level_id = "atom"
try: level_no = level_ids.index(level_id)
except ValueError:
raise level_id_exception('Unknown level_id="%s"' % level_id)
if (out is None): out = sys.stdout
if (self.models_size() == 0):
print(prefix+'### WARNING: empty hierarchy ###', file=out)
model_ids = dict_with_default_0()
for model in self.models():
model_ids[model.id] += 1
for model in self.models():
chains = model.chains()
if (model_ids[model.id] != 1):
s = " ### ERROR: duplicate model id ###"
else: s = ""
print(prefix+'model id="%s"' % model.id, \
"#chains=%d%s" % (len(chains), s), file=out)
if (level_no == 0): continue
if (model.chains_size() == 0):
print(prefix+' ### WARNING: empty model ###', file=out)
model_chain_ids = dict_with_default_0()
for chain in chains:
model_chain_ids[chain.id] += 1
for chain in chains:
rgs = chain.residue_groups()
if (model_chain_ids[chain.id] != 1):
s = " ### WARNING: duplicate chain id ###"
else: s = ""
print(prefix+' chain id="%s"' % chain.id, \
"#residue_groups=%d%s" % (len(rgs), s), file=out)
if (level_no == 1): continue
if (chain.residue_groups_size() == 0):
print(prefix+' ### WARNING: empty chain ###', file=out)
suppress_chain_break = True
prev_resid = ""
for rg in rgs:
if (not rg.link_to_previous and not suppress_chain_break):
print(prefix+" ### chain break ###", file=out)
suppress_chain_break = False
ags = rg.atom_groups()
resnames = set()
for ag in rg.atom_groups():
resnames.add(ag.resname)
infos = []
if (len(resnames) > 1): infos.append("with mixed residue names")
resid = rg.resid()
if (prev_resid == resid): infos.append("same as previous resid")
prev_resid = resid
if (len(infos) != 0): s = " ### Info: %s ###" % "; ".join(infos)
else: s = ""
print(prefix+' resid="%s"' % resid, \
"#atom_groups=%d%s" % (len(ags), s), file=out)
if (level_no == 2): continue
if (rg.atom_groups_size() == 0):
print(prefix+' ### WARNING: empty residue_group ###', file=out)
for ag in ags:
atoms = ag.atoms()
print(prefix+' altloc="%s"' % ag.altloc, \
'resname="%s"' % ag.resname, \
"#atoms=%d" % len(atoms), file=out)
if (level_no == 3): continue
if (ag.atoms_size() == 0):
print(prefix+' ### WARNING: empty atom_group ###', file=out)
for atom in atoms:
print(prefix+' "%s"' % atom.name, file=out)
def as_str(self,
prefix="",
level_id=None,
level_id_exception=ValueError):
"""
Alias for show().
"""
out = StringIO()
self.show(
out=out,
prefix=prefix,
level_id=level_id,
level_id_exception=level_id_exception)
return out.getvalue()
def as_pdb_string(self,
crystal_symmetry=None,
cryst1_z=None,
write_scale_records=True,
append_end=False,
interleaved_conf=0,
atoms_reset_serial_first_value=None,
atom_hetatm=True,
sigatm=True,
anisou=True,
siguij=True,
output_break_records=True, # TODO deprecate
cstringio=None,
return_cstringio=Auto):
"""
Generate complete PDB-format string representation. External crystal
symmetry is strongly recommended if this is being output to a file.
:param crystal_symmetry: cctbx.crystal.symmetry object or equivalent (such
as an xray.structure object or Miller array)
:param write_scale_records: write fractional scaling records (SCALE) if
crystal symmetry is provided
:param anisou: write ANISOU records for anisotropic atoms
:param sigatm: write SIGATM records if applicable
:param siguij: write SIGUIJ records if applicable
:returns: Python str
"""
if (cstringio is None):
cstringio = StringIO()
if (return_cstringio is Auto):
return_cstringio = False
elif (return_cstringio is Auto):
return_cstringio = True
if (crystal_symmetry is not None or cryst1_z is not None):
from iotbx.pdb import format_cryst1_and_scale_records
print(format_cryst1_and_scale_records(
crystal_symmetry=crystal_symmetry,
cryst1_z=cryst1_z,
write_scale_records=write_scale_records), file=cstringio)
py3out = self._as_pdb_string_cstringio(
cstringio=cstringio,
append_end=append_end,
interleaved_conf=interleaved_conf,
atoms_reset_serial_first_value=atoms_reset_serial_first_value,
atom_hetatm=atom_hetatm,
sigatm=sigatm,
anisou=anisou,
siguij=siguij,
output_break_records=output_break_records)
if six.PY3:
cstringio.write(py3out)
if (return_cstringio):
return cstringio
return cstringio.getvalue()
# MARKED_FOR_DELETION_OLEG
# REASON: This is not equivalent conversion. Hierarchy does not have a lot
# of information pdb_input and cif_input should have. Therefore this
# function should not be used at all to avoid confusion and having crippled
# input objects. Moreover, the use of mmtbx.model should eliminate the
# need in this tranformation.
# Currently used exclusively in Tom's code.
def as_pdb_input(self, crystal_symmetry=None):
"""
Generate corresponding pdb.input object.
"""
import iotbx.pdb
pdb_str = self.as_pdb_string(crystal_symmetry=crystal_symmetry)
pdb_inp = iotbx.pdb.input(
source_info="pdb_hierarchy",
lines=flex.split_lines(pdb_str))
return pdb_inp
# END_MARKED_FOR_DELETION_OLEG
def extract_xray_structure(self, crystal_symmetry=None,
min_distance_sym_equiv=None):
"""
Generate the equivalent cctbx.xray.structure object. If the crystal
symmetry is not provided, this will be placed in a P1 box. In practice it
is usually best to keep the original xray structure object around, but this
method is helpful in corner cases.
"""
if min_distance_sym_equiv is not None: # use it
return self.as_pdb_input(crystal_symmetry).xray_structure_simple(
min_distance_sym_equiv=min_distance_sym_equiv)
else: # usual just use whatever is default in xray_structure_simple
return self.as_pdb_input(crystal_symmetry).xray_structure_simple()
def adopt_xray_structure(self, xray_structure, assert_identical_id_str=True):
"""
Apply the current (refined) atomic parameters from the cctbx.xray.structure
object to the atoms in the PDB hierarchy. This will fail if the labels of
the scatterers do not match the atom labels.
"""
from cctbx import adptbx
if(self.atoms_size() != xray_structure.scatterers().size()):
raise RuntimeError("Incompatible size of hierarchy and scatterers array.")
awl = self.atoms_with_labels()
scatterers = xray_structure.scatterers()
uc = xray_structure.unit_cell()
orth = uc.orthogonalize
def set_attr(sc, a):
a.set_xyz(new_xyz=orth(sc.site))
a.set_occ(new_occ=sc.occupancy)
a.set_b(new_b=adptbx.u_as_b(sc.u_iso_or_equiv(uc)))
if(sc.flags.use_u_aniso() and sc.u_star != (-1.0, -1.0, -1.0, -1.0, -1.0, -1.0)):
# a.set_uij(new_uij = adptbx.u_star_as_u_cart(uc,sc.u_star))
a.set_uij(new_uij = sc.u_cart_plus_u_iso(uc))
else:
a.uij_erase()
a.set_fp(new_fp=sc.fp)
a.set_fdp(new_fdp=sc.fdp)
element, charge = sc.element_and_charge_symbols()
a.set_element(element)
a.set_charge(charge)
def get_id(l):
r = [pos for pos, char in enumerate(l) if char == '"']
if(len(r)<2): return None
i,j = r[-2:]
r = "".join(l[i:j+1].replace('"',"").replace('"',"").split())
return r
for sc, a in zip(scatterers, awl):
id_str = a.id_str()
resname_from_sc = id_str[10:13]
cl1 = common_residue_names_get_class(resname_from_sc)
cl2 = common_residue_names_get_class(a.resname)
if assert_identical_id_str:
l1 = get_id(sc.label)
l2 = get_id(a.id_str())
if(l1 != l2):
raise RuntimeError("Mismatch: \n %s \n %s \n"%(sc.label,a.id_str()))
set_attr(sc=sc, a=a)
def apply_rotation_translation(self, rot_matrices, trans_vectors):
"""
LIMITATION: ANISOU records in resulting hierarchy will be invalid!!!
"""
roots=[]
for r,t in zip(rot_matrices, trans_vectors):
for model in self.models():
root = iotbx.pdb.hierarchy.root()
m = iotbx.pdb.hierarchy.model()
for c in model.chains():
c = c.detached_copy()
xyz = c.atoms().extract_xyz()
new_xyz = r.elems*xyz+t
c.atoms().set_xyz(new_xyz)
m.append_chain(c)
root.append_model(m)
roots.append(root)
result = iotbx.pdb.hierarchy.join_roots(roots=roots)
result.reset_i_seq_if_necessary()
return result
def remove_residue_groups_with_atoms_on_special_positions_selective(self,
crystal_symmetry):
self.reset_i_seq_if_necessary()
special_position_settings = crystal.special_position_settings(
crystal_symmetry = crystal_symmetry)
# Using
# unconditional_general_position_flags=(self.atoms().extract_occ() != 1)
# will skip atoms on sp that have partial occupancy.
site_symmetry_table = \
special_position_settings.site_symmetry_table(
sites_cart = self.atoms().extract_xyz())
spi = site_symmetry_table.special_position_indices()
removed = []
for c in self.chains():
for rg in c.residue_groups():
keep=True
for i in rg.atoms().extract_i_seq():
if(i in spi):
keep=False
break
if(not keep):
for resname in rg.unique_resnames():
if(common_residue_names_get_class(resname) == "common_amino_acid" or
common_residue_names_get_class(resname) == "common_rna_dna"):
raise RuntimeError(
"Amino-acid residue or NA is on special position.")
for resname in rg.unique_resnames():
removed.append(",".join([c.id, rg.resid(), resname]))
c.remove_residue_group(residue_group=rg)
return removed
def shift_to_origin(self, crystal_symmetry):
uc = crystal_symmetry.unit_cell()
sites_frac = uc.fractionalize(self.atoms().extract_xyz())
l = abs(min(sites_frac.min()))
r = abs(max(sites_frac.max()))
rl = max(l, r)+2
rr= range(int(-rl), int(rl))
shift_best = None
for x in rr:
for y in rr:
for z in rr:
sf = sites_frac+[x,y,z]
sc = uc.orthogonalize(sf)
cmf = uc.fractionalize(sc.mean())
if(cmf[0]>=0 and cmf[0]<1 and
cmf[1]>=0 and cmf[1]<1 and
cmf[2]>=0 and cmf[2]<1):
shift_best = [x,y,z]
assert shift_best is not None # should never happen
self.atoms().set_xyz(uc.orthogonalize(sites_frac+shift_best))
def expand_to_p1(self, crystal_symmetry, exclude_self=False):
# ANISOU will be invalid
import string
import scitbx.matrix
r = root()
m = model()
idl = [i for i in string.ascii_lowercase]
idu = [i for i in string.ascii_uppercase]
taken = [c.id for c in self.chains()]
n_atoms = []
for m_ in self.models():
for smx in crystal_symmetry.space_group().all_ops():
m3 = smx.r().as_double()
m3 = scitbx.matrix.sqr(m3)
if(exclude_self and m3.is_r3_identity_matrix()): continue
t = smx.t().as_double()
t = scitbx.matrix.col((t[0],t[1],t[2]))
for c_ in m_.chains():
n_at = len(c_.atoms())
if(not n_at in n_atoms): n_atoms.append(n_at)
c_ = c_.detached_copy()
xyz = c_.atoms().extract_xyz()
xyz = crystal_symmetry.unit_cell().fractionalize(xyz)
new_xyz = crystal_symmetry.unit_cell().orthogonalize(m3.elems*xyz+t)
c_.atoms().set_xyz(new_xyz)
#
if(not (smx.r().is_unit_mx() and smx.t().is_zero())):
found = False
for idu_ in idu:
for idl_ in idl:
id_ = idu_+idl_
if(not id_ in taken):
taken.append(id_)
found = id_
break
if(found): break
c_.id = found
#
m.append_chain(c_)
r.append_model(m)
return r
def write_pdb_file(self,
file_name,
open_append=False,
crystal_symmetry=None,
cryst1_z=None,
write_scale_records=True,
append_end=False,
interleaved_conf=0,
atoms_reset_serial_first_value=None,
atom_hetatm=True,
sigatm=True,
anisou=True,
siguij=True,
link_records=None,
):
if link_records:
if (open_append): mode = "a"
else: mode = "w"
with open(file_name, mode) as f:
print(link_records, file=f)
open_append = True
if (crystal_symmetry is not None or cryst1_z is not None):
if (open_append): mode = "a"
else: mode = "w"
from iotbx.pdb import format_cryst1_and_scale_records
with open(file_name, mode) as f:
print(format_cryst1_and_scale_records(
crystal_symmetry=crystal_symmetry,
cryst1_z=cryst1_z,
write_scale_records=write_scale_records), file=f)
open_append = True
self._write_pdb_file(
file_name=file_name,
open_append=open_append,
append_end=append_end,
interleaved_conf=interleaved_conf,
atoms_reset_serial_first_value=atoms_reset_serial_first_value,
atom_hetatm=atom_hetatm,
sigatm=sigatm,
anisou=anisou,
siguij=siguij,
)
def get_label_alt_id_iseq(self, iseq):
assert self.atoms_size() > iseq
return self.get_label_alt_id_atom(self.atoms()[iseq])
def get_label_alt_id_atom(self, atom):
alt_id = atom.parent().altloc
if alt_id == '': alt_id = '.'
return alt_id
def get_auth_asym_id_iseq(self, iseq):
assert self.atoms_size() > iseq, "%d, %d" % (self.atoms_size(), iseq)
return self.get_auth_asym_id(self.atoms()[iseq].parent().parent().parent())
def get_auth_asym_id(self, chain):
auth_asym_id = chain.id
if len(chain.atoms()[0].segid.strip()) > len(auth_asym_id):
auth_asym_id = chain.atoms()[0].segid.strip()
if auth_asym_id.strip() == '':
# chain id is empty, segid is empty, just duplicate label_asym_id
# since we cannot read mmCIF with empty auth_asym_id. Outputting a file
# that we cannot read - bad.
auth_asym_id = self.get_label_asym_id(chain.residue_groups()[0])
return auth_asym_id
def get_label_asym_id_iseq(self, iseq):
assert self.atoms_size() > iseq
return self.get_label_asym_id(self.atoms()[iseq].parent().parent())
def get_label_asym_id(self, residue_group):
if not hasattr(self, '_lai_lookup'):
self._lai_lookup = {}
# fill self._lai_lookup for the whole hierarchy
number_label_asym_id = 0
label_asym_ids = all_label_asym_ids()
for model in self.models():
for chain in model.chains():
previous = None
for rg in chain.residue_groups():
resname = rg.atom_groups()[0].resname.strip()
residue_class = common_residue_names_get_class(resname)
rg_mid = rg.memory_id()
if residue_class in ['common_amino_acid', 'modified_amino_acid',
'common_rna_dna', 'modified_rna_dna']:
if previous != 'poly' and previous is not None:
number_label_asym_id += 1
self._lai_lookup[rg_mid] = label_asym_ids[number_label_asym_id]
previous = 'poly'
elif residue_class in ['common_water']:
if previous != 'water' and previous is not None:
number_label_asym_id += 1
previous = 'water'
self._lai_lookup[rg_mid] = label_asym_ids[number_label_asym_id]
else: # ligand
if previous is not None:
number_label_asym_id += 1
previous = 'ligand'
self._lai_lookup[rg_mid] = label_asym_ids[number_label_asym_id]
number_label_asym_id += 1 # up for each chain
previous = None
number_label_asym_id += 1 # up for each model
rg_mid = residue_group.memory_id()
result = self._lai_lookup.get(rg_mid, None)
if result is None:
print (residue_group.id_str())
return result
# return self.number_label_asym_id, self.label_asym_ids[self.number_label_asym_id]
def get_auth_seq_id_iseq(self, iseq):
assert self.atoms_size() > iseq
return self.get_auth_seq_id(self.atoms()[iseq].parent().parent())
def get_auth_seq_id(self, rg):
return rg.resseq.strip()
def get_label_seq_id_iseq(self, iseq):
assert self.atoms_size() > iseq, "%d, %d" % (self.atoms_size(), iseq)
return self.get_label_seq_id(self.atoms()[iseq].parent())
def get_label_seq_id(self, atom_group):
if not hasattr(self, '_label_seq_id_dict'):
# make it
prev_ac_key = ''
self._label_seq_id_dict = {}
for model in self.models():
for chain in model.chains():
label_seq_id = 0
for rg in chain.residue_groups():
for ag in rg.atom_groups():
cur_ac_key = chain.id + rg.resseq + rg.icode
if cur_ac_key != prev_ac_key:
label_seq_id += 1
prev_ac_key = cur_ac_key
label_seq_id_str='.'
comp_id = ag.resname.strip()
residue_class = common_residue_names_get_class(comp_id)
if residue_class in ['common_amino_acid', 'modified_amino_acid']:
label_seq_id_str = str(label_seq_id)
self._label_seq_id_dict[ag.memory_id()] = label_seq_id_str
return self._label_seq_id_dict[atom_group.memory_id()]
def as_cif_block(self,
crystal_symmetry=None,
coordinate_precision=5,
occupancy_precision=3,
b_iso_precision=5,
u_aniso_precision=5):
if crystal_symmetry is None:
crystal_symmetry = crystal.symmetry()
cs_cif_block = crystal_symmetry.as_cif_block(format="mmcif")
h_cif_block = iotbx.cif.model.block()
coord_fmt_str = "%%.%if" %coordinate_precision
occ_fmt_str = "%%.%if" %occupancy_precision
b_iso_fmt_str = "%%.%if" %b_iso_precision
u_aniso_fmt_str = "%%.%if" %u_aniso_precision
atom_site_loop = iotbx.cif.model.loop(header=(
'_atom_site.group_PDB',
'_atom_site.id',
'_atom_site.label_atom_id',
'_atom_site.label_alt_id',
'_atom_site.label_comp_id',
'_atom_site.auth_asym_id',
'_atom_site.auth_seq_id',
'_atom_site.pdbx_PDB_ins_code',
'_atom_site.Cartn_x',
'_atom_site.Cartn_y',
'_atom_site.Cartn_z',
'_atom_site.occupancy',
'_atom_site.B_iso_or_equiv',
'_atom_site.type_symbol',
'_atom_site.pdbx_formal_charge',
'_atom_site.phenix_scat_dispersion_real',
'_atom_site.phenix_scat_dispersion_imag',
'_atom_site.label_asym_id',
'_atom_site.label_entity_id',
'_atom_site.label_seq_id',
#'_atom_site.auth_comp_id',
#'_atom_site.auth_atom_id',
'_atom_site.pdbx_PDB_model_num',
))
aniso_loop = iotbx.cif.model.loop(header=(
'_atom_site_anisotrop.id',
'_atom_site_anisotrop.pdbx_auth_atom_id',
'_atom_site_anisotrop.pdbx_label_alt_id',
'_atom_site_anisotrop.pdbx_auth_comp_id',
'_atom_site_anisotrop.pdbx_auth_asym_id',
'_atom_site_anisotrop.pdbx_auth_seq_id',
'_atom_site_anisotrop.pdbx_PDB_ins_code',
'_atom_site_anisotrop.U[1][1]',
'_atom_site_anisotrop.U[2][2]',
'_atom_site_anisotrop.U[3][3]',
'_atom_site_anisotrop.U[1][2]',
'_atom_site_anisotrop.U[1][3]',
'_atom_site_anisotrop.U[2][3]'
))
# cache dictionary lookups to save time in inner loop
atom_site_group_PDB = atom_site_loop['_atom_site.group_PDB']
atom_site_id = atom_site_loop['_atom_site.id']
atom_site_label_atom_id = atom_site_loop['_atom_site.label_atom_id']
atom_site_label_alt_id = atom_site_loop['_atom_site.label_alt_id']
atom_site_label_comp_id = atom_site_loop['_atom_site.label_comp_id']
atom_site_auth_asym_id = atom_site_loop['_atom_site.auth_asym_id']
atom_site_auth_seq_id = atom_site_loop['_atom_site.auth_seq_id']
atom_site_pdbx_PDB_ins_code = atom_site_loop['_atom_site.pdbx_PDB_ins_code']
atom_site_Cartn_x = atom_site_loop['_atom_site.Cartn_x']
atom_site_Cartn_y = atom_site_loop['_atom_site.Cartn_y']
atom_site_Cartn_z = atom_site_loop['_atom_site.Cartn_z']
atom_site_occupancy = atom_site_loop['_atom_site.occupancy']
atom_site_B_iso_or_equiv = atom_site_loop['_atom_site.B_iso_or_equiv']
atom_site_type_symbol = atom_site_loop['_atom_site.type_symbol']
atom_site_pdbx_formal_charge = atom_site_loop['_atom_site.pdbx_formal_charge']
atom_site_phenix_scat_dispersion_real = \
atom_site_loop['_atom_site.phenix_scat_dispersion_real']
atom_site_phenix_scat_dispersion_imag = \
atom_site_loop['_atom_site.phenix_scat_dispersion_imag']
atom_site_label_asym_id = atom_site_loop['_atom_site.label_asym_id']
atom_site_label_entity_id = atom_site_loop['_atom_site.label_entity_id']
atom_site_label_seq_id = atom_site_loop['_atom_site.label_seq_id']
#atom_site_loop['_atom_site.auth_comp_id'].append(comp_id)
#atom_site_loop['_atom_site.auth_atom_id'].append(atom.name.strip())
atom_site_pdbx_PDB_model_num = atom_site_loop['_atom_site.pdbx_PDB_model_num']
atom_site_anisotrop_id = aniso_loop['_atom_site_anisotrop.id']
atom_site_anisotrop_pdbx_auth_atom_id = \
aniso_loop['_atom_site_anisotrop.pdbx_auth_atom_id']
atom_site_anisotrop_pdbx_label_alt_id = \
aniso_loop['_atom_site_anisotrop.pdbx_label_alt_id']
atom_site_anisotrop_pdbx_auth_comp_id = \
aniso_loop['_atom_site_anisotrop.pdbx_auth_comp_id']
atom_site_anisotrop_pdbx_auth_asym_id = \
aniso_loop['_atom_site_anisotrop.pdbx_auth_asym_id']
atom_site_anisotrop_pdbx_auth_seq_id = \
aniso_loop['_atom_site_anisotrop.pdbx_auth_seq_id']
atom_site_anisotrop_pdbx_PDB_ins_code = \
aniso_loop['_atom_site_anisotrop.pdbx_PDB_ins_code']
atom_site_anisotrop_U11 = aniso_loop['_atom_site_anisotrop.U[1][1]']
atom_site_anisotrop_U22 = aniso_loop['_atom_site_anisotrop.U[2][2]']
atom_site_anisotrop_U33 = aniso_loop['_atom_site_anisotrop.U[3][3]']
atom_site_anisotrop_U12 = aniso_loop['_atom_site_anisotrop.U[1][2]']
atom_site_anisotrop_U13 = aniso_loop['_atom_site_anisotrop.U[1][3]']
atom_site_anisotrop_U23 = aniso_loop['_atom_site_anisotrop.U[2][3]']
unique_chain_ids = set()
auth_asym_ids = flex.std_string()
label_asym_ids = flex.std_string()
#
chem_comp_loop = iotbx.cif.model.loop(header=(
'_chem_comp.id',
))
struct_asym_loop = iotbx.cif.model.loop(header=(
'_struct_asym.id',
))
chem_comp_ids = []
chem_comp_atom_ids = []
struct_asym_ids = []
#
chain_ids = all_chain_ids()
for model in self.models():
model_id = model.id
if model_id == '': model_id = '1'
for chain in model.chains():
auth_asym_id = self.get_auth_asym_id(chain)
for residue_group in chain.residue_groups():
label_asym_id = self.get_label_asym_id(residue_group)
seq_id = self.get_auth_seq_id(residue_group)
icode = residue_group.icode
if icode == ' ' or icode == '': icode = '?'
for atom_group in residue_group.atom_groups():
comp_id = atom_group.resname.strip()
entity_id = '?' # XXX how do we determine this?
for atom in atom_group.atoms():
group_pdb = "ATOM"
if atom.hetero: group_pdb = "HETATM"
x, y, z = [coord_fmt_str %i for i in atom.xyz]
atom_charge = atom.charge_tidy()
if atom_charge is None:
atom_charge = "?"
else:
atom_charge = atom_charge.strip()
if atom_charge == "": atom_charge = "?"
fp, fdp = atom.fp, atom.fdp
if fp == 0 and fdp == 0:
fp = '.'
fdp = '.'
else:
fp = "%.4f" %fp
fdp = "%.4f" %fdp
atom_site_group_PDB.append(group_pdb)
atom_site_id.append(str(hy36decode(width=5, s=atom.serial)))
atom_site_label_atom_id.append(atom.name.strip())
if atom.name.strip() not in chem_comp_atom_ids:
chem_comp_atom_ids.append(atom.name.strip())
atom_site_label_alt_id.append(self.get_label_alt_id_atom(atom))
atom_site_label_comp_id.append(comp_id)
if comp_id not in chem_comp_ids: chem_comp_ids.append(comp_id)
atom_site_auth_asym_id.append(auth_asym_id)
atom_site_auth_seq_id.append(seq_id)
atom_site_pdbx_PDB_ins_code.append(icode)
atom_site_Cartn_x.append(x)
atom_site_Cartn_y.append(y)
atom_site_Cartn_z.append(z)
atom_site_occupancy.append(occ_fmt_str % atom.occ)
atom_site_B_iso_or_equiv.append(b_iso_fmt_str % atom.b)
atom_site_type_symbol.append(atom.element.strip())
atom_site_pdbx_formal_charge.append(atom_charge)
atom_site_phenix_scat_dispersion_real.append(fp)
atom_site_phenix_scat_dispersion_imag.append(fdp)
atom_site_label_asym_id.append(label_asym_id.strip())
if label_asym_id.strip() not in struct_asym_ids:
struct_asym_ids.append(label_asym_id.strip())
atom_site_label_entity_id.append(entity_id)
atom_site_label_seq_id.append(self.get_label_seq_id(atom_group))
#atom_site_loop['_atom_site.auth_comp_id'].append(comp_id)
#atom_site_loop['_atom_site.auth_atom_id'].append(atom.name.strip())
atom_site_pdbx_PDB_model_num.append(model_id.strip())
if atom.uij_is_defined():
u11, u22, u33, u12, u13, u23 = [
u_aniso_fmt_str %i for i in atom.uij]
atom_site_anisotrop_id.append(
str(hy36decode(width=5, s=atom.serial)))
atom_site_anisotrop_pdbx_auth_atom_id.append(atom.name.strip())
atom_site_anisotrop_pdbx_label_alt_id.append(self.get_label_alt_id_atom(atom))
atom_site_anisotrop_pdbx_auth_comp_id.append(comp_id)
atom_site_anisotrop_pdbx_auth_asym_id.append(auth_asym_id)
atom_site_anisotrop_pdbx_auth_seq_id.append(seq_id)
atom_site_anisotrop_pdbx_PDB_ins_code.append(icode)
atom_site_anisotrop_U11.append(u11)
atom_site_anisotrop_U22.append(u22)
atom_site_anisotrop_U33.append(u33)
atom_site_anisotrop_U12.append(u12)
atom_site_anisotrop_U13.append(u13)
atom_site_anisotrop_U23.append(u23)
for key in ('_atom_site.phenix_scat_dispersion_real',
'_atom_site.phenix_scat_dispersion_imag'):
if atom_site_loop[key].all_eq('.'):
del atom_site_loop[key]
h_cif_block.add_loop(atom_site_loop)
if aniso_loop.size() > 0:
h_cif_block.add_loop(aniso_loop)
h_cif_block.update(cs_cif_block)
#
chem_comp_ids.sort()
for row in chem_comp_ids: chem_comp_loop.add_row([row])
h_cif_block.add_loop(chem_comp_loop)
chem_comp_atom_ids.sort()
for row in struct_asym_ids: struct_asym_loop.add_row([row])
h_cif_block.add_loop(struct_asym_loop)
#
return h_cif_block
def write_mmcif_file(self,
file_name,
crystal_symmetry=None,
data_block_name=None):
cif_object = iotbx.cif.model.cif()
if data_block_name is None:
data_block_name = "phenix"
cif_object[data_block_name] = self.as_cif_block(
crystal_symmetry=crystal_symmetry)
with open(file_name, "w") as f:
print(cif_object, file=f)
def atoms_with_labels(self):
"""
Generator for atom_with_labels objects, presented in the same order as
the array returned by the atoms() method.
"""
for model in self.models():
for chain in model.chains():
is_first_in_chain = True
for rg in chain.residue_groups():
is_first_after_break = not (is_first_in_chain or rg.link_to_previous)
for ag in rg.atom_groups():
for atom in ag.atoms():
yield atom_with_labels(
atom=atom,
model_id=model.id,
chain_id=chain.id,
resseq=rg.resseq,
icode=rg.icode,
altloc=ag.altloc,
resname=ag.resname,
is_first_in_chain=is_first_in_chain,
is_first_after_break=is_first_after_break)
is_first_in_chain = False
is_first_after_break = False
def get_conformer_indices(self):
n_seq = self.atoms_size()
conformer_indices = flex.size_t(n_seq, 0)
altloc_indices = self.altloc_indices()
if ("" in altloc_indices): p = 0
else: p = 1
altlocs = sorted(altloc_indices.keys())
for i,altloc in enumerate(altlocs):
if (altloc == ""): continue
conformer_indices.set_selected(altloc_indices[altloc], i+p)
return conformer_indices
def remove_incomplete_main_chain_protein(self,
required_atom_names=['CA','N','C','O']):
# Remove each residue_group that does not contain CA N C O of protein
hierarchy = self
for model in hierarchy.models():
for chain in model.chains():
for residue_group in chain.residue_groups():
all_atom_names_found=[]
atom_groups = residue_group.atom_groups()
for atom_group in atom_groups:
for atom in atom_group.atoms():
atom_name=atom.name.strip()
if not atom_name in all_atom_names_found:
all_atom_names_found.append(atom_name)
for r in required_atom_names:
if not r in all_atom_names_found:
chain.remove_residue_group(residue_group=residue_group)
break
if (len(chain.residue_groups()) == 0):
model.remove_chain(chain=chain)
def remove_alt_confs(self, always_keep_one_conformer):
hierarchy = self
for model in hierarchy.models():
for chain in model.chains():
for residue_group in chain.residue_groups():
atom_groups = residue_group.atom_groups()
assert (len(atom_groups) > 0)
cleanup_needed = True
if always_keep_one_conformer :
if (len(atom_groups) == 1) and (atom_groups[0].altloc == ''):
continue
atom_groups_and_occupancies = []
for atom_group in atom_groups :
if (atom_group.altloc == ''):
continue
mean_occ = flex.mean(atom_group.atoms().extract_occ())
atom_groups_and_occupancies.append((atom_group, mean_occ))
atom_groups_and_occupancies.sort(key=operator.itemgetter(1), reverse=True)
for atom_group, occ in atom_groups_and_occupancies[1:] :
residue_group.remove_atom_group(atom_group=atom_group)
single_conf, occ = atom_groups_and_occupancies[0]
single_conf.altloc = ''
else :
for atom_group in atom_groups :
if (not atom_group.altloc in ["", "A"]):
residue_group.remove_atom_group(atom_group=atom_group)
else :
atom_group.altloc = ""
if (len(residue_group.atom_groups()) == 0):
chain.remove_residue_group(residue_group=residue_group)
cleanup_needed = False
if cleanup_needed and residue_group.atom_groups_size() > 1:
ags = residue_group.atom_groups()
for i in range(len(ags)-1, 0, -1):
residue_group.merge_atom_groups(ags[0], ags[i])
residue_group.remove_atom_group(ags[i])
if (len(chain.residue_groups()) == 0):
model.remove_chain(chain=chain)
atoms = hierarchy.atoms()
new_occ = flex.double(atoms.size(), 1.0)
atoms.set_occ(new_occ)
def rename_chain_id(self, old_id, new_id):
for model in self.models():
for chain in model.chains():
if(chain.id == old_id):
chain.id = new_id
def remove_atoms(self, fraction):
assert fraction>0 and fraction<1.
sel_keep = flex.random_bool(self.atoms_size(), 1-fraction)
return self.select(sel_keep)
def set_atomic_charge(self, iselection, charge):
assert isinstance(charge, int)
if(iselection is None):
raise Sorry("Specify an atom selection to apply a charge to.")
if(abs(charge) >= 10):
raise Sorry("The charge must be in the range from -9 to 9.")
if(iselection.size() == 0):
raise Sorry("Empty selection for charge modification")
if(charge == 0):
charge = " "
elif (charge < 0):
charge = "%1d-" % abs(charge)
else:
charge = "%1d+" % charge
atoms = self.atoms()
for i_seq in iselection:
atom = atoms[i_seq]
atom.set_charge(charge)
def truncate_to_poly(self, atom_names_set=set()):
pdb_atoms = self.atoms()
pdb_atoms.reset_i_seq()
aa_resnames = one_letter_given_three_letter
for model in self.models():
for chain in model.chains():
for rg in chain.residue_groups():
def have_amino_acid():
for ag in rg.atom_groups():
if (ag.resname in aa_resnames):
return True
return False
if (have_amino_acid()):
for ag in rg.atom_groups():
for atom in ag.atoms():
if (atom.name not in atom_names_set):
ag.remove_atom(atom=atom)
def truncate_to_poly_gly(self):
self.truncate_to_poly(
atom_names_set=set([" N ", " CA ", " C ", " O "]))
def truncate_to_poly_ala(self):
self.truncate_to_poly(
atom_names_set=set([" N ", " CA ", " C ", " O ", " CB "]))
def convert_semet_to_met(self):
for i_seq, atom in enumerate(self.atoms()):
if (atom.name.strip()=="SE") and (atom.element.strip().upper()=="SE"):
atom_group = atom.parent()
if(atom_group.resname == "MSE"):
atom_group.resname = "MET"
atom.name = " SD "
atom.element = " S"
for ag_atom in atom_group.atoms():
ag_atom.hetero = False
def convert_met_to_semet(self):
for i_seq, atom in enumerate(self.atoms()):
if((atom.name.strip()=="SD") and (atom.element.strip().upper()=="S")):
atom_group = atom.parent()
if(atom_group.resname == "MET"):
atom_group.resname = "MSE"
atom.name = " SE "
atom.element = "SE"
for ag_atom in atom_group.atoms():
ag_atom.hetero = True
def transfer_chains_from_other(self, other):
i_model = 0
other_models = other.models()
for md,other_md in zip(self.models(), other_models):
i_model += 1
md.id = hy36encode(width=4, value=i_model)
md.transfer_chains_from_other(other=other_md)
msz, omsz = self.models_size(), other.models_size()
if (omsz > msz):
for other_md in other_models[msz:]:
i_model += 1
md = model(id = hy36encode(width=4, value=i_model))
md.transfer_chains_from_other(other=other_md)
self.append_model(model=md)
def atom_selection_cache(self, special_position_settings=None):
from iotbx.pdb.atom_selection import cache
return cache(root=self,
special_position_settings=special_position_settings)
def occupancy_groups_simple(self, common_residue_name_class_only=None,
always_group_adjacent=True,
ignore_hydrogens=True):
if(ignore_hydrogens):
sentinel = self.atoms().reset_tmp_for_occupancy_groups_simple()
else:
sentinel = self.atoms().reset_tmp(first_value=0, increment=1)
result = []
for chain in self.chains():
if(common_residue_name_class_only is None):
if(chain.is_protein()):
common_residue_name_class_only = "common_amino_acid"
if(chain.is_na()):
common_residue_name_class_only = "common_rna_dna"
result.extend(chain.occupancy_groups_simple(
common_residue_name_class_only=common_residue_name_class_only,
always_group_adjacent=always_group_adjacent))
del sentinel
return result
def chunk_selections(self, residues_per_chunk):
result = []
if(residues_per_chunk<1): return result
for model in self.models():
for chain in model.chains():
residue_range_sel = flex.size_t()
cntr = 0
for rg in chain.residue_groups():
i_seqs = rg.atoms().extract_i_seq()
last_added=True
if(cntr!=residues_per_chunk):
residue_range_sel.extend(i_seqs)
last_added=False
else:
result.append(residue_range_sel)
residue_range_sel = flex.size_t()
residue_range_sel.extend(i_seqs)
cntr = 0
last_added=False
cntr += 1
if(len(result)==0 or not last_added):
assert residue_range_sel.size()>0
result.append(residue_range_sel)
return result
def flip_symmetric_amino_acids(self):
import time
from scitbx.math import dihedral_angle
def chirality_delta(sites, volume_ideal, both_signs):
d_01 = sites[1] - sites[0]
d_02 = sites[2] - sites[0]
d_03 = sites[3] - sites[0]
d_02_cross_d_03 = d_02.cross(d_03)
volume_model = d_01.dot(d_02_cross_d_03)
delta_sign = -1;
if both_signs and volume_model < 0:
delta_sign = 1
delta = volume_ideal + delta_sign * volume_model
return delta[0]
data = {
"ARG" : {"dihedral" : ["CD", "NE", "CZ", "NH1"],
"value" : [0, 1],
"pairs" : [["NH1", "NH2"],
["HH11","HH21"], # should this also be periodicty
["HH12","HH22"], # of 1
],
},
"ASP" : {"dihedral" : ["CA", "CB", "CG", "OD1"],
"value" : [0, 1],
"pairs" : [["OD1", "OD2"]],
},
"GLU" : {"dihedral" : ["CB", "CG", "CD", "OE1"],
"value" : [0, 1],
"pairs" : [["OE1", "OE2"]],
},
"PHE" : {"dihedral" : ["CA", "CB", "CG", "CD1"],
"value" : [0, 1],
"pairs" : [["CD1", "CD2"],
["CE1", "CE2"],
["HD1", "HD2"],
["HE1", "HE2"],
],
},
# even less symmetric flips - based on chirals
'VAL' : {'chiral' : ['CB', 'CA', 'CG1', 'CG2'],
'value' : [-2.5, False, 1],
'pairs' : [['CG1', 'CG2'],
['HG11','HG21'],
['HG12','HG22'],
['HG13','HG23'],
],
},
'LEU' : {'chiral' : ['CG', 'CB', 'CD1', 'CD2'],
'value' : [-2.5, False, 1],
'pairs' : [['CD1', 'CD2'],
['HD11','HD21'],
['HD12','HD22'],
['HD13','HD23'],
],
},
}
data["TYR"]=data["PHE"]
sites_cart = self.atoms().extract_xyz()
t0=time.time()
info = ""
for rg in self.residue_groups():
for ag in rg.atom_groups():
flip_data = data.get(ag.resname, None)
if flip_data is None: continue
assert not ('dihedral' in flip_data and 'chiral' in flip_data)
flip_it=False
if 'dihedral' in flip_data:
sites = []
for d in flip_data["dihedral"]:
atom = ag.get_atom(d)
if atom is None: break
sites.append(atom.xyz)
if len(sites)!=4: continue
dihedral = dihedral_angle(sites=sites, deg=True)
if abs(dihedral)>360./flip_data["value"][1]/4:
flip_it=True
elif 'chiral' in flip_data:
sites = []
for d in flip_data["chiral"]:
atom = ag.get_atom(d)
if atom is None: break
sites.append(atom.xyz)
if len(sites)!=4: continue
delta = chirality_delta(sites=[flex.vec3_double([xyz]) for xyz in sites],
volume_ideal=flip_data["value"][0],
both_signs=flip_data['value'][1],
)
if abs(delta)>2.:
flip_it=True
if flip_it:
info += ' Residue "%s %s %s":' % (
rg.parent().id,
ag.resname,
rg.resseq,
)
flips_stored = []
atoms = ag.atoms()
for pair in flip_data["pairs"]:
atom1 = ag.get_atom(pair[0])
atom2 = ag.get_atom(pair[1])
if atom1 is None and atom2 is None: continue
if len(list(filter(None, [atom1, atom2]))) == 1:
flips_stored=[]
info += ' not complete - not flipped'
break
flips_stored.append([atom1,atom2])
for atom1, atom2 in flips_stored:
for attr in ['xyz', 'b']:
tmp = getattr(atom1, attr)
setattr(atom1, attr, getattr(atom2, attr))
setattr(atom2, attr, tmp)
info += ' "%s" <-> "%s"' % (atom1.name.strip(),
atom2.name.strip())
info += '\n'
if not info: info = ' None\n'
info += ' Time to flip residues: %0.2fs\n' % (time.time()-t0)
return info
def distance_based_simple_two_way_bond_sets(self,
fallback_expected_bond_length=1.4,
fallback_search_max_distance=2.5):
from cctbx.crystal import distance_based_connectivity
atoms = self.atoms().deep_copy() # XXX potential bottleneck
atoms.set_chemical_element_simple_if_necessary()
sites_cart = atoms.extract_xyz()
elements = atoms.extract_element()
conformer_indices = self.get_conformer_indices()
return distance_based_connectivity.build_simple_two_way_bond_sets(
sites_cart=sites_cart,
elements=elements,
conformer_indices=conformer_indices,
fallback_expected_bond_length=fallback_expected_bond_length,
fallback_search_max_distance=fallback_search_max_distance)
def reset_i_seq_if_necessary(self):
atoms = self.atoms()
i_seqs = atoms.extract_i_seq()
if (i_seqs.all_eq(0)):
atoms.reset_i_seq()
def get_peptide_c_alpha_selection(self):
"""
Extract atom selection (flex.size_t) for protein C-alpha atoms.
"""
result = flex.size_t()
i_seqs = self.atoms().extract_i_seq()
if(i_seqs.size()>1): assert i_seqs[1:].all_ne(0)
for model in self.models():
for chain in model.chains():
for rg in chain.residue_groups():
for ag in rg.atom_groups():
if(common_residue_names_get_class(ag.resname) == "common_amino_acid"):
for atom in ag.atoms():
if(atom.name.strip() == "CA"):
result.append(atom.i_seq)
return result
def contains_protein(self, min_content=0):
"""
Inspect residue names and counts to determine if enough of them are protein.
"""
oc = self.overall_counts()
n_prot_residues = oc.get_n_residues_of_classes(
classes=['common_amino_acid', 'modified_amino_acid'])
n_water_residues = oc.get_n_residues_of_classes(
classes=['common_water'])
if oc.n_residues-n_water_residues > 0:
return n_prot_residues / (oc.n_residues-n_water_residues) > min_content
return n_prot_residues > min_content
def contains_nucleic_acid(self, min_content=0):
"""
Inspect residue names and counts to determine if enough of
them are RNA or DNA.
"""
oc = self.overall_counts()
n_na_residues = oc.get_n_residues_of_classes(
classes=['common_rna_dna', 'modified_rna_dna'])
n_water_residues = oc.get_n_residues_of_classes(
classes=['common_water'])
if oc.n_residues-n_water_residues > 0:
return n_na_residues / (oc.n_residues-n_water_residues) > min_content
return n_na_residues > min_content
def contains_rna(self):
"""
Inspect residue names and counts to determine if any of
them are RNA.
"""
oc = self.overall_counts()
for resname, count in oc.resnames.items():
if ( common_residue_names_get_class(resname) == "common_rna_dna"
and "D" not in resname.upper() ):
return True
return False
def remove_hd(self, reset_i_seq=False):
"""
Remove all hydrogen/deuterium atoms in-place. Returns the number of atoms
deleted.
"""
n_removed = 0
for pdb_model in self.models():
for pdb_chain in pdb_model.chains():
for pdb_residue_group in pdb_chain.residue_groups():
for pdb_atom_group in pdb_residue_group.atom_groups():
for pdb_atom in pdb_atom_group.atoms():
if (pdb_atom.element.strip().upper() in ["H","D"]):
pdb_atom_group.remove_atom(pdb_atom)
n_removed += 1
if (pdb_atom_group.atoms_size() == 0):
pdb_residue_group.remove_atom_group(pdb_atom_group)
if (pdb_residue_group.atom_groups_size() == 0):
pdb_chain.remove_residue_group(pdb_residue_group)
if (pdb_chain.residue_groups_size() == 0):
pdb_model.remove_chain(pdb_chain)
if (pdb_model.chains_size() == 0):
self.remove_model(pdb_model)
if (reset_i_seq):
self.atoms().reset_i_seq()
return n_removed
def is_ca_only(self):
"""
Determine if hierarchy consists only from CA atoms.
Upgrade options:
- implement threshold for cases where several residues are present in
full;
- figure out how to deal with HETATM records of the same chain.
- Ignore possible incorrect alignment of atom names.
"""
result = True
for model in self.models():
result = result and model.is_ca_only()
return result
bp.inject(ext.model, __hash_eq_mixin)
@bp.inject_into(ext.model)
class _():
"""
Class representing MODEL blocks in a PDB file (or equivalent mmCIF). There
will always be at least one of these in a hierarchy root extracted from a
PDB file even if no MODEL records are present.
Example
-------
>>> hierarchy = iotbx.pdb.hierarchy.root()
>>> model = iotbx.pdb.hierarchy.model(id="1")
>>> hierarchy.append_model(model)
>>> model = hierarchy.only_model()
"""
def residue_groups(self):
for chain in self.chains():
for rg in chain.residue_groups():
yield rg
def atom_groups(self):
for chain in self.chains():
for rg in chain.residue_groups():
for ag in rg.atom_groups():
yield ag
def only_chain(self):
assert self.chains_size() == 1
return self.chains()[0]
def only_residue_group(self):
return self.only_chain().only_residue_group()
def only_conformer(self):
return self.only_chain().only_conformer()
def only_atom_group(self):
return self.only_residue_group().only_atom_group()
def only_residue(self):
return self.only_conformer().only_residue()
def only_atom(self):
return self.only_atom_group().only_atom()
def is_ca_only(self):
"""
Determine if model consists only from CA atoms.
Upgrade options:
- implement threshold for cases where several residues are present in
full;
- figure out how to deal with HETATM records of the same chain.
- Ignore possible incorrect alignment of atom names.
"""
result = True
for chain in self.chains():
result = result and chain.is_ca_only()
return result
bp.inject(ext.chain, __hash_eq_mixin)
@bp.inject_into(ext.chain)
class _():
"""
Class representing a continuous chain of atoms, as defined by the combination
of chain ID field and TER records (or the chain index in mmCIF format). Note
that this does not necessarily correspond to a covalently linked entity, as
it may be used to group various heteroatoms (including water), but
chemically distinct protein or nucleic acid chains will typically be
grouped into exactly one chain object apiece.
"""
def atom_groups(self):
for rg in self.residue_groups():
for ag in rg.atom_groups():
yield ag
def only_residue_group(self):
assert self.residue_groups_size() == 1
return self.residue_groups()[0]
def only_conformer(self):
conformers = self.conformers()
assert len(conformers) == 1
return conformers[0]
def only_atom_group(self):
return self.only_residue_group().only_atom_group()
def only_residue(self):
return self.only_conformer().only_residue()
def only_atom(self):
return self.only_atom_group().only_atom()
def residues(self):
return self.only_conformer().residues()
def occupancy_groups_simple(self, common_residue_name_class_only=None,
always_group_adjacent=True):
result = []
residue_groups = self.residue_groups()
n_rg = len(residue_groups)
done = [False] * n_rg
def process_range(i_begin, i_end):
isolated_var_occ = []
groups = {}
for i_rg in range(i_begin, i_end):
done[i_rg] = True
rg = residue_groups[i_rg]
for ag in residue_groups[i_rg].atom_groups():
altloc = ag.altloc
if (altloc == ""):
for atom in ag.atoms():
if (atom.tmp < 0): continue
if (atom.occ > 0 and atom.occ < 1):
isolated_var_occ.append(atom.tmp)
else:
group = []
for atom in ag.atoms():
if (atom.tmp < 0): continue
group.append(atom.tmp)
if (len(group) != 0):
groups.setdefault(altloc, []).extend(group)
groups = list(groups.values())
if (len(groups) != 0):
for group in groups: group.sort()
groups.sort(key=operator.itemgetter(0))
result.append(groups)
for i in isolated_var_occ:
result.append([[i]])
for i_begin,i_end in self.find_pure_altloc_ranges(
common_residue_name_class_only=common_residue_name_class_only):
# use always_group_adjacent
do_this_step = True
nc = None
for i_rg in range(i_begin, i_end):
rg = residue_groups[i_rg]
n_conf = len(residue_groups[i_rg].conformers())
if(nc is None): nc = n_conf
else:
if(nc != n_conf):
do_this_step = False
#
if(always_group_adjacent):
process_range(i_begin, i_end)
else:
if(do_this_step):
process_range(i_begin, i_end)
for i_rg in range(n_rg):
if (done[i_rg]): continue
process_range(i_rg, i_rg+1)
result.sort(key=lambda element: element[0][0])
return result
def get_residue_names_and_classes(self):
"""
Extract the residue names and counts of each residue type (protein,
nucleic acid, etc) within the chain.
:returns: a tuple containing a list of residue names, and a dictionary of
residue type frequencies.
"""
from iotbx.pdb import residue_name_plus_atom_names_interpreter
rn_seq = []
residue_classes = dict_with_default_0()
for residue_group in self.residue_groups():
# XXX should we iterate over all atom_groups or just take the first one?
#for atom_group in residue_group.atom_groups():
atom_group = residue_group.atom_groups()[0]
rnpani = residue_name_plus_atom_names_interpreter(
residue_name=atom_group.resname,
atom_names=[atom.name for atom in atom_group.atoms()])
rn = rnpani.work_residue_name
rn_seq.append(rn)
if (rn is None):
c = None
else:
c = common_residue_names_get_class(name=rn)
residue_classes[c] += 1
return (rn_seq, residue_classes)
def as_sequence(self, substitute_unknown='X'):
"""
Naively extract single-character protein or nucleic acid sequence, without
accounting for residue numbering.
:param substitute_unknown: character to use for unrecognized 3-letter codes
"""
assert ((isinstance(substitute_unknown, str)) and
(len(substitute_unknown) == 1))
common_rna_dna_codes = {
"A": "A",
"C": "C",
"G": "G",
"U": "U",
"DA": "A",
"DC": "C",
"DG": "G",
"DT": "T"}
rn_seq, residue_classes = self.get_residue_names_and_classes()
n_aa = residue_classes["common_amino_acid"] + residue_classes["modified_amino_acid"]
n_na = residue_classes["common_rna_dna"] + residue_classes["modified_rna_dna"]
seq = []
if (n_aa > n_na):
aa_3_as_1 = one_letter_given_three_letter
for rn in rn_seq:
if (rn in aa_3_as_1_mod):
seq.append(aa_3_as_1_mod.get(rn, substitute_unknown))
else :
seq.append(aa_3_as_1.get(rn, substitute_unknown))
elif (n_na != 0):
for rn in rn_seq:
if rn not in common_rna_dna_codes and rn in na_3_as_1_mod:
rn = na_3_as_1_mod.get(rn, "N")
seq.append(common_rna_dna_codes.get(rn, "N"))
return seq
def _residue_is_aa_or_na(self, residue_name, include_modified=True):
"""
Helper function for checking if a residue is an amino acid or
nucleic acid
Parameters
----------
residue_name: str
The residue name
include_modified: bool
If set, include modified amino and nucleic acids
Returns
-------
bool
True if the residue is an amino or nucleic acid, false otherwise
"""
residue_class = common_residue_names_get_class(residue_name)
acceptable_classes = ['common_amino_acid', 'common_rna_dna']
if include_modified:
acceptable_classes += ['d_amino_acid', 'modified_amino_acid', 'modified_rna_dna']
return residue_class in acceptable_classes
def as_padded_sequence(self, missing_char='X', skip_insertions=False,
pad=True, substitute_unknown='X', pad_at_start=True,
ignore_hetatm=False):
"""
Extract protein or nucleic acid sequence, taking residue numbering into
account so that apparent gaps will be filled with substitute characters.
"""
seq = self.as_sequence()
padded_seq = []
last_resseq = 0
last_icode = " "
i = 0
for i, residue_group in enumerate(self.residue_groups()):
if (skip_insertions) and (residue_group.icode != " "):
continue
if ignore_hetatm and not self._residue_is_aa_or_na(residue_group.unique_resnames()[0]):
continue
resseq = residue_group.resseq_as_int()
if (pad) and (resseq > (last_resseq + 1)):
for x in range(resseq - last_resseq - 1):
if last_resseq == 0 and not pad_at_start: break
padded_seq.append(missing_char)
last_resseq = resseq
padded_seq.append(seq[i])
return "".join(padded_seq)
def get_residue_ids(self, skip_insertions=False, pad=True, pad_at_start=True,
ignore_hetatm=False):
resids = []
last_resseq = 0
last_icode = " "
for i, residue_group in enumerate(self.residue_groups()):
if (skip_insertions) and (residue_group.icode != " "):
continue
if ignore_hetatm and not self._residue_is_aa_or_na(residue_group.unique_resnames()[0]):
continue
resseq = residue_group.resseq_as_int()
if (pad) and (resseq > (last_resseq + 1)):
for x in range(resseq - last_resseq - 1):
if last_resseq == 0 and not pad_at_start: break
resids.append(None)
last_resseq = resseq
resids.append(residue_group.resid())
return resids
def get_residue_names_padded(
self, skip_insertions=False, pad=True, pad_at_start=True,
ignore_hetatm=False):
resnames = []
last_resseq = 0
last_icode = " "
for i, residue_group in enumerate(self.residue_groups()):
if (skip_insertions) and (residue_group.icode != " "):
continue
if ignore_hetatm and not self._residue_is_aa_or_na(residue_group.unique_resnames()[0]):
continue
resseq = residue_group.resseq_as_int()
if (pad) and (resseq > (last_resseq + 1)):
for x in range(resseq - last_resseq - 1):
if last_resseq == 0 and not pad_at_start: break
resnames.append(None)
last_resseq = resseq
resnames.append(residue_group.unique_resnames()[0])
return resnames
def is_protein(self, min_content=0.8, ignore_water=True):
"""
Determine whether the chain represents an amino acid polymer, based on the
frequency of residue names.
Very slow due to usage of residue_name_plus_atom_names_interpreter in
get_residue_names_and_classes (majority of the processing is unnecessary)
"""
rn_seq, residue_classes = self.get_residue_names_and_classes()
n_aa = residue_classes["common_amino_acid"] + residue_classes['modified_amino_acid']
n_na = residue_classes["common_rna_dna"] + residue_classes['modified_rna_dna']
if (ignore_water):
while rn_seq.count("HOH") > 0 :
rn_seq.remove("HOH")
if (len(rn_seq) == 0):
return False
elif ((n_aa > n_na) and ((n_aa / len(rn_seq)) >= min_content)):
return True
elif (rn_seq == (["UNK"] * len(rn_seq))):
return True
return False
def is_na(self, min_content=0.8, ignore_water=True):
"""
Determine whether the chain represents a nucleic acid polymer, based on the
frequency of base names.
Very slow due to usage of residue_name_plus_atom_names_interpreter in
get_residue_names_and_classes (majority of the processing is unnecessary)
"""
rn_seq, residue_classes = self.get_residue_names_and_classes()
n_aa = residue_classes["common_amino_acid"] + residue_classes['modified_amino_acid']
n_na = residue_classes["common_rna_dna"] + residue_classes['modified_rna_dna']
if (ignore_water):
while rn_seq.count("HOH") > 0 :
rn_seq.remove("HOH")
if (len(rn_seq) == 0):
return False
elif ((n_na > n_aa) and ((n_na / len(rn_seq)) >= min_content)):
return True
return False
def is_ca_only(self):
"""
Determine if chain consists only from CA atoms.
Upgrade options:
- implement threshold for cases where several residues are present in
full;
- figure out how to deal with HETATM records of the same chain.
- Ignore possible incorrect alignment of atom names.
"""
atom_names = self.atoms().extract_name()
return atom_names.all_eq(" CA ")
bp.inject(ext.residue_group, __hash_eq_mixin)
@bp.inject_into(ext.residue_group)
class _():
def only_atom_group(self):
assert self.atom_groups_size() == 1
return self.atom_groups()[0]
def only_atom(self):
return self.only_atom_group().only_atom()
def id_str(self):
chain_id = ""
chain = self.parent()
if (chain is not None):
chain_id = chain.id
return "%2s%4s%1s" % (chain_id, self.resseq, self.icode)
bp.inject(ext.atom_group, __hash_eq_mixin)
@bp.inject_into(ext.atom_group)
class _():
def only_atom(self):
assert self.atoms_size() == 1
return self.atoms()[0]
# FIXME suppress_segid has no effect here
def id_str(self, suppress_segid=None):
chain_id = ""
resid = ""
rg = self.parent()
if (rg is not None):
resid = rg.resid()
chain = rg.parent()
if (chain is not None):
chain_id = chain.id
return "%1s%3s%2s%5s" % (self.altloc, self.resname, chain_id, resid)
def occupancy(self, raise_error_if_non_uniform=False):
"""
Calculate the mean occupancy for atoms in this group, with option of
raising ValueError if they differ.
"""
atom_occupancies = self.atoms().extract_occ()
assert (len(atom_occupancies) > 0)
min_max_mean = atom_occupancies.min_max_mean()
if (min_max_mean.min != min_max_mean.max):
if (raise_error_if_non_uniform):
raise ValueError(("Non-uniform occupancies for atom group %s "+
"(range: %.2f - %.2f).") % (self.id_str(), min_max_mean.min,
min_max_mean.max))
return min_max_mean.mean
bp.inject(ext.atom, __hash_eq_mixin)
@bp.inject_into(ext.atom)
class _():
__doc__ = """
The basic unit of the PDB hierarchy (or the PDB input object in general),
representing a single point scatterer corresponding to an ATOM or HETATM
record in PDB format (plus associated ANISOU or related records if present).
Note that this does not directly store attributes of higher-level entities
whose identity is also recorded in ATOM records, such as the chain ID or
residue name. These may be retrieved either by walking up the hierarchy
starting with atom.parent(), or by calling atom.fetch_labels().
"""
def chain(self):
"""
Convenience method for fetching the chain object associated with this
atom (or None of not defined).
"""
ag = self.parent()
if (ag is not None):
rg = ag.parent()
if (rg is not None):
return rg.parent()
return None
def is_in_same_conformer_as(self, other):
"""
Indicate whether two atoms are part of the same conformer and thus are
capable of interacting directly, as defined by the parent atom_group and
model object(s).
"""
ag_i = self.parent(optional=False)
ag_j = other.parent(optional=False)
altloc_i = ag_i.altloc
altloc_j = ag_j.altloc
if ( len(altloc_i) != 0
and len(altloc_j) != 0
and altloc_i != altloc_j):
return False
def p3(ag):
return ag.parent(optional=False) \
.parent(optional=False) \
.parent(optional=False)
model_i = p3(ag_i)
model_j = p3(ag_j)
return model_i.memory_id() == model_j.memory_id()
def set_element_and_charge_from_scattering_type_if_necessary(self,
scattering_type):
from cctbx.eltbx.xray_scattering \
import get_element_and_charge_symbols \
as gec
sct_e, sct_c = gec(scattering_type=scattering_type, exact=False)
pdb_ec = self.element.strip() + self.charge.strip()
if (len(pdb_ec) != 0):
if (sct_e == "" and sct_c == ""):
return False
pdb_e, pdb_c = gec(scattering_type=pdb_ec, exact=False)
if ( pdb_e == sct_e
and pdb_c == sct_c):
return False
self.element = "%2s" % sct_e.upper()
self.charge = "%-2s" % sct_c
return True
def charge_as_int(self):
"""
Extract the atomic charge from the (string) charge field.
:returns: Python int, defaulting to zero
"""
charge = self.charge_tidy()
if charge is None:
return 0
if charge.endswith("-"):
sign = -1
else:
sign = 1
charge = charge.strip(" -+")
if charge != "":
return sign * int(charge)
else:
return 0
@bp.inject_into(ext.conformer)
class _():
__doc__ = """
Alternate view into a chain object, grouping sequential residues with
equivalent altlocs. As a general rule it is preferrable to iterate over
chain.residue_groups() instead.
"""
def only_residue(self):
residues = self.residues()
assert len(residues) == 1
return residues[0]
def only_atom(self):
return self.only_residue().only_atom()
def get_residue_names_and_classes(self):
# XXX This function should probably be deprecated, since it has been
# duplicated in chain.get_residue_names_and_classes which should probably
# be preferred to this function
rn_seq = []
residue_classes = dict_with_default_0()
for residue in self.residues():
rnpani = residue.residue_name_plus_atom_names_interpreter()
rn = rnpani.work_residue_name
rn_seq.append(rn)
if (rn is None):
c = None
else:
c = common_residue_names_get_class(name=rn)
residue_classes[c] += 1
return (rn_seq, residue_classes)
def is_protein(self, min_content=0.8):
# XXX DEPRECATED
# Used only in mmtbx/validation and wxtbx. Easy to eliminate.
rn_seq, residue_classes = self.get_residue_names_and_classes()
n_aa = residue_classes["common_amino_acid"] + residue_classes['modified_amino_acid']
n_na = residue_classes["common_rna_dna"] + residue_classes['modified_rna_dna']
non_water = len(rn_seq)-residue_classes.get('common_water', 0)
if ((n_aa > n_na) and ((n_aa / non_water) >= min_content)):
return True
return False
def is_na(self, min_content=0.8):
# XXX DEPRECATED
# Used only in mmtbx/validation and wxtbx. Easy to eliminate.
rn_seq, residue_classes = self.get_residue_names_and_classes()
n_aa = residue_classes["common_amino_acid"] + residue_classes['modified_amino_acid']
n_na = residue_classes["common_rna_dna"] + residue_classes['modified_rna_dna']
non_water = len(rn_seq)-residue_classes.get('common_water', 0)
if ((n_na > n_aa) and ((n_na / non_water) >= min_content)):
return True
return False
def as_sequence(self, substitute_unknown='X'):
# XXX This function should probably be deprecated, since it has been
# duplicated in chain.as_sequence which should probably be preferred to
# this function
assert ((isinstance(substitute_unknown, str)) and
(len(substitute_unknown) == 1))
common_rna_dna_codes = {
"A": "A",
"C": "C",
"G": "G",
"U": "U",
"DA": "A",
"DC": "C",
"DG": "G",
"DT": "T"}
rn_seq, residue_classes = self.get_residue_names_and_classes()
n_aa = residue_classes["common_amino_acid"] + residue_classes["modified_amino_acid"]
n_na = residue_classes["common_rna_dna"] + residue_classes["modified_rna_dna"]
seq = []
if (n_aa > n_na):
aa_3_as_1 = one_letter_given_three_letter
for rn in rn_seq:
if (rn in aa_3_as_1_mod):
seq.append(aa_3_as_1_mod.get(rn, substitute_unknown))
else :
seq.append(aa_3_as_1.get(rn, substitute_unknown))
elif (n_na != 0):
for rn in rn_seq:
if rn not in common_rna_dna_codes and rn in na_3_as_1_mod:
rn = na_3_as_1_mod.get(rn, "N")
seq.append(common_rna_dna_codes.get(rn, "N"))
return seq
def format_fasta(self, max_line_length=79):
seq = self.as_sequence()
n = len(seq)
if (n == 0): return None
comment = [">"]
p = self.parent()
if (p is not None):
comment.append('chain "%2s"' % p.id)
comment.append('conformer "%s"' % self.altloc)
result = [" ".join(comment)]
i = 0
while True:
j = min(n, i+max_line_length)
if (j == i): break
result.append("".join(seq[i:j]))
i = j
return result
def as_padded_sequence(self, missing_char='X', skip_insertions=False,
pad=True, substitute_unknown='X', pad_at_start=True):
# XXX This function should probably be deprecated, since it has been
# duplicated in chain.as_padded_sequence which should probably be preferred
# to this function
seq = self.as_sequence()
padded_seq = []
last_resseq = 0
last_icode = " "
i = 0
for i, residue in enumerate(self.residues()):
if (skip_insertions) and (residue.icode != " "):
continue
resseq = residue.resseq_as_int()
if (pad) and (resseq > (last_resseq + 1)):
for x in range(resseq - last_resseq - 1):
if last_resseq == 0 and not pad_at_start: break
padded_seq.append(missing_char)
last_resseq = resseq
padded_seq.append(seq[i])
return "".join(padded_seq)
def as_sec_str_sequence(self, helix_sele, sheet_sele, missing_char='X',
pad=True, pad_at_start=True):
ss_seq = []
last_resseq = 0
for i, residue in enumerate(self.residues()):
resseq = residue.resseq_as_int()
if pad and resseq > (last_resseq + 1):
for x in range(resseq - last_resseq - 1):
if last_resseq == 0 and not pad_at_start: break
ss_seq.append(missing_char)
found = False
for atom in residue.atoms():
if helix_sele[atom.i_seq] :
ss_seq.append('H')
found = True
break
elif sheet_sele[atom.i_seq] :
ss_seq.append('S')
found = True
break
if not found :
ss_seq.append('L')
last_resseq = resseq
return "".join(ss_seq)
def get_residue_ids(self, skip_insertions=False, pad=True, pad_at_start=True):
# XXX This function should probably be deprecated, since it has been
# duplicated in chain.get_residue_ids which should probably be preferred
# to this function
resids = []
last_resseq = 0
last_icode = " "
for i, residue in enumerate(self.residues()):
if (skip_insertions) and (residue.icode != " "):
continue
resseq = residue.resseq_as_int()
if (pad) and (resseq > (last_resseq + 1)):
for x in range(resseq - last_resseq - 1):
if last_resseq == 0 and not pad_at_start: break
resids.append(None)
last_resseq = resseq
resids.append(residue.resid())
return resids
def get_residue_names_padded(
self, skip_insertions=False, pad=True, pad_at_start=True):
# XXX This function should probably be deprecated, since it has been
# duplicated in chain.get_residue_names_padded which should probably be
# preferred to this function
resnames = []
last_resseq = 0
last_icode = " "
for i, residue in enumerate(self.residues()):
if (skip_insertions) and (residue.icode != " "):
continue
resseq = residue.resseq_as_int()
if (pad) and (resseq > (last_resseq + 1)):
for x in range(resseq - last_resseq - 1):
if last_resseq == 0 and not pad_at_start: break
resnames.append(None)
last_resseq = resseq
resnames.append(residue.resname)
return resnames
@bp.inject_into(ext.residue)
class _():
def __getinitargs__(self):
result_root = self.root()
if (result_root is None):
orig_conformer = self.parent()
assert orig_conformer is not None
orig_chain = orig_conformer.parent()
assert orig_chain is not None
orig_model = orig_chain.parent()
assert orig_model is not None
result_atom_group = atom_group(
altloc=orig_conformer.altloc, resname=self.resname)
result_residue_group = residue_group(
resseq=self.resseq, icode=self.icode)
result_chain = chain(id=orig_chain.id)
result_model = model(id=orig_model.id)
result_root = root()
result_root.append_model(result_model)
result_model.append_chain(result_chain)
result_chain.append_residue_group(result_residue_group)
result_residue_group.append_atom_group(result_atom_group)
for atom in self.atoms():
result_atom_group.append_atom(atom.detached_copy())
return (result_root,)
def standalone_copy(self):
return residue(root=self.__getinitargs__()[0])
def only_atom(self):
assert self.atoms_size() == 1
return self.atoms()[0]
def residue_name_plus_atom_names_interpreter(self,
translate_cns_dna_rna_residue_names=None,
return_mon_lib_dna_name=False):
from iotbx.pdb import residue_name_plus_atom_names_interpreter
return residue_name_plus_atom_names_interpreter(
residue_name=self.resname,
atom_names=[atom.name for atom in self.atoms()],
translate_cns_dna_rna_residue_names=translate_cns_dna_rna_residue_names,
return_mon_lib_dna_name=return_mon_lib_dna_name)
@bp.inject_into(ext.atom_with_labels)
class _():
__doc__ = """
Stand-in for atom object, which explicitly records the attributes normally
reserved for parent classes such as residue name, chain ID, etc.
"""
def __getstate__(self):
labels_dict = {}
for attr in [ "xyz", "sigxyz", "occ", "sigocc", "b", "sigb", "uij",
"siguij", "hetero", "serial", "name", "segid", "element",
"charge", "model_id", "chain_id", "resseq", "icode",
"altloc", "resname", ] :
labels_dict[attr] = getattr(self, attr, None)
return labels_dict
def __setstate__(self, state):
from iotbx.pdb import make_atom_with_labels
state = dict(state)
make_atom_with_labels(self, **state)
def fetch_labels(self):
return self
# MARKED_FOR_DELETION_OLEG
# Reason: so far fount only in iotbx/file_reader.py for no clear reason.
class input_hierarchy_pair(object):
def __init__(self,
input,
hierarchy=None,
sort_atoms=False,
):
self.input = input
if (hierarchy is None):
hierarchy = self.input.construct_hierarchy(
set_atom_i_seq=True, sort_atoms=sort_atoms)
self.hierarchy = hierarchy
def __getinitargs__(self):
from pickle import PicklingError
raise PicklingError
def hierarchy_to_input_atom_permutation(self):
"""
Return the permutation selection
(:py:class:`scitbx.array_family.flex.size_t`) mapping the atoms as ordered
by the hierarchy to their original positions in the PDB/mmCIF file.
"""
h_atoms = self.hierarchy.atoms()
sentinel = h_atoms.reset_tmp(first_value=0, increment=1)
return self.input.atoms().extract_tmp_as_size_t()
def input_to_hierarchy_atom_permutation(self):
"""
Return the permutation selection
(:py:class:`scitbx.array_family.flex.size_t`) mapping the atoms as ordered
in the original PDB/mmCIF file to their positions in the hierarchy.
"""
i_atoms = self.input.atoms()
sentinel = i_atoms.reset_tmp(first_value=0, increment=1)
return self.hierarchy.atoms().extract_tmp_as_size_t()
def xray_structure_simple(self, *args, **kwds):
"""
Wrapper for the equivalent method of the input object - extracts the
:py:class:`cctbx.xray.structure` with scatterers in the same order as in
the hierarchy.
"""
perm = self.input_to_hierarchy_atom_permutation()
xrs = self.input.xray_structure_simple(*args, **kwds)
return xrs.select(perm)
def construct_hierarchy(self, *args, **kwds) : # TODO remove eventually
"""
Returns a reference to the existing hierarchy. For backwards compatibility
only, and issues a :py:class:`warnings.DeprecationWarning`.
"""
warnings.warn("Please access input.hierarchy directly.",
DeprecationWarning)
return self.hierarchy
def crystal_symmetry(self, *args, **kwds):
return self.input.crystal_symmetry(*args, **kwds)
class input(input_hierarchy_pair):
"""
Class used for reading a PDB hierarchy from a file or string.
Attributes
----------
input : iotbx.pdb.pdb_input_from_any
hierarchy : iotbx.pdb.hierarchy.root
Examples
--------
>>> import iotbx.pdb.hierarchy
>>> pdb_in = iotbx.pdb.hierarchy.input(pdb_string='''
... ATOM 1 N ASP A 37 10.710 14.456 9.568 1.00 15.78 N
... ATOM 2 CA ASP A 37 9.318 14.587 9.999 1.00 18.38 C
... ''')
>>> print pdb_in.hierarchy.atoms_size()
2
"")
"""
def __init__(self, file_name=None,
pdb_string=None, source_info=Auto, sort_atoms=True):
"""
Initializes an input from a file or string.
Parameters
----------
file_name : str, optional
pdb_string : str, optional
source_info : str, optional
Indicates where this PDB came from (i.e. "string")
"""
assert [file_name, pdb_string].count(None) == 1
import iotbx.pdb
if (file_name is not None):
assert source_info is Auto
pdb_inp = iotbx.pdb.input(file_name=file_name)
else:
if (source_info is Auto): source_info = "string"
pdb_inp = iotbx.pdb.input(
source_info=source_info, lines=flex.split_lines(pdb_string))
super(input, self).__init__(input=pdb_inp, sort_atoms=sort_atoms)
# END_MARKED_FOR_DELETION_OLEG
class show_summary(input):
def __init__(self,
file_name=None,
pdb_string=None,
out=None,
prefix="",
flag_errors=True,
flag_warnings=True,
residue_groups_max_show=10,
duplicate_atom_labels_max_show=10,
level_id=None,
level_id_exception=ValueError):
input.__init__(self, file_name=file_name, pdb_string=pdb_string)
print(prefix+self.input.source_info(), file=out)
self.overall_counts = self.hierarchy.overall_counts()
self.overall_counts.show(
out=out,
prefix=prefix+" ",
residue_groups_max_show=residue_groups_max_show,
duplicate_atom_labels_max_show=duplicate_atom_labels_max_show)
if (level_id is not None):
self.hierarchy.show(
out=out,
prefix=prefix+" ",
level_id=level_id,
level_id_exception=level_id_exception)
# MARKED_FOR_DELETION_OLEG
# Reason: functionality is moved to mmtbx.model and uses better all_chain_ids
# function from iotbx.pdb.utils
# Not until used in iotbx/pdb/__init__py: join_fragment_files:
# GUI app: Combine PDB files
# CL app: iotbx.pdb.join_fragment_files
def suffixes_for_chain_ids(suffixes=Auto):
if (suffixes is Auto):
suffixes="123456789" \
"ABCDEFGHIJKLMNOPQRSTUVWXYZ" \
"abcdefghijklmnopqrstuvwxyz"
return suffixes
def append_chain_id_suffixes(roots, suffixes=Auto):
suffixes = suffixes_for_chain_ids(suffixes=suffixes)
assert len(roots) <= len(suffixes)
for root,suffix in zip(roots, suffixes):
for model in root.models():
for chain in model.chains():
assert len(chain.id) == 1, len(chain.id)
chain.id += suffix
def join_roots(roots, chain_id_suffixes=Auto):
"""
Combine two root objects.
"""
if (chain_id_suffixes is not None):
append_chain_id_suffixes(roots=roots, suffixes=chain_id_suffixes)
result = root()
for rt in roots:
result.transfer_chains_from_other(other=rt)
return result
# END_MARKED_FOR_DELETION_OLEG
# XXX: Nat's utility functions
# also used in ncs_search.py
def new_hierarchy_from_chain(chain):
"""
Given a chain object, create an entirely new hierarchy object contaning only
this chain (using a new copy).
"""
import iotbx.pdb.hierarchy
hierarchy = iotbx.pdb.hierarchy.root()
model = iotbx.pdb.hierarchy.model()
model.append_chain(chain.detached_copy())
hierarchy.append_model(model)
return hierarchy
def find_and_replace_chains(original_hierarchy, partial_hierarchy,
log=sys.stdout):
"""
Delete and replace the first chain in the original hierarchy corresponding
to each model/ID combination in the partial hierarchy. Note that this means
that if waters and heteroatoms are given the same ID as a protein chain
(separated by other chains or TER record(s)), but the partial hierarchy only
contains a substitute protein chain, the heteroatom chain will be kept.
"""
for original_model in original_hierarchy.models():
for partial_model in partial_hierarchy.models():
if original_model.id == partial_model.id :
#print >> log, " found model '%s'" % partial_model.id
i = 0
while i < len(original_model.chains()):
original_chain = original_model.chains()[i]
j = 0
while j < len(partial_model.chains()):
partial_chain = partial_model.chains()[j]
if original_chain.id == partial_chain.id :
#print >> log, " found chain '%s' at index %d" % (
# partial_chain.id, i)
original_model.remove_chain(i)
original_model.insert_chain(i, partial_chain.detached_copy())
partial_model.remove_chain(j)
break
j += 1
i += 1
def get_contiguous_ranges(hierarchy):
assert (len(hierarchy.models()) == 1)
chain_clauses = []
for chain in hierarchy.models()[0].chains():
resid_ranges = []
start_resid = None
last_resid = None
last_resseq = - sys.maxsize
for residue_group in chain.residue_groups():
resseq = residue_group.resseq_as_int()
resid = residue_group.resid()
if (resseq != last_resseq) and (resseq != (last_resseq + 1)):
if (start_resid is not None):
resid_ranges.append((start_resid, last_resid))
start_resid = resid
last_resid = resid
else :
if (start_resid is None):
start_resid = resid
last_resid = resid
last_resseq = resseq
if (start_resid is not None):
resid_ranges.append((start_resid, last_resid))
resid_clauses = []
for r1, r2 in resid_ranges :
if (r1 == r2):
resid_clauses.append("resid %s" % r1)
else :
resid_clauses.append("resid %s through %s" % (r1,r2))
sele = ("chain '%s' and ((" + ") or (".join(resid_clauses) + "))") % \
chain.id
chain_clauses.append(sele)
return chain_clauses
# used for reporting build results in phenix
def get_residue_and_fragment_count(pdb_file=None, pdb_hierarchy=None):
from libtbx import smart_open
if (pdb_file is not None):
raw_records = flex.std_string()
with smart_open.for_reading(file_name=pdb_file) as f:
lines = f.read()
raw_records.extend(flex.split_lines(lines))
pdb_in = iotbx.pdb.input(source_info=pdb_file, lines=raw_records)
pdb_hierarchy = pdb_in.construct_hierarchy()
assert (pdb_hierarchy is not None)
models = pdb_hierarchy.models()
if len(models) == 0 :
return (0, 0, 0)
chains = models[0].chains()
if len(chains) == 0 :
return (0, 0, 0)
n_res = 0
n_frag = 0
n_h2o = 0
for chain in chains :
i = -999
for res in chain.conformers()[0].residues():
residue_type = common_residue_names_get_class(
res.resname, consider_ccp4_mon_lib_rna_dna=True)
if ( ('amino_acid' in residue_type) or ('rna_dna' in residue_type) ):
n_res += 1
resseq = res.resseq_as_int()
if resseq > (i + 1):
n_frag += 1
i = resseq
elif ('water' in residue_type):
n_h2o += 1
return (n_res, n_frag, n_h2o)
def sites_diff(hierarchy_1,
hierarchy_2,
exclude_waters=True,
return_hierarchy=True,
log=None):
"""
Given two PDB hierarchies, calculate the shift of each atom (accounting for
possible insertions/deletions) and (optionally) apply it to the B-factor for
display in PyMOL, plotting in PHENIX GUI, etc.
"""
if (log is None) : log = null_out()
atom_lookup = {}
deltas = flex.double(hierarchy_2.atoms_size(), -1.)
for atom in hierarchy_1.atoms_with_labels():
if (atom.resname in ["HOH", "WAT"]) and (exclude_waters):
continue
atom_id = atom.id_str()
if (atom_id in atom_lookup):
raise RuntimeError("Duplicate atom ID - can't extract coordinates.")
atom_lookup[atom_id] = atom.xyz
for i_seq, atom in enumerate(hierarchy_2.atoms_with_labels()):
if (atom.resname in ["HOH", "WAT"]) and (exclude_waters):
continue
atom_id = atom.id_str()
if (atom_id in atom_lookup):
x1,y1,z1 = atom_lookup[atom_id]
x2,y2,z2 = atom.xyz
delta = math.sqrt((x2-x1)**2 + (y2-y1)**2 + (z2-z1)**2)
deltas[i_seq] = delta
if (return_hierarchy):
hierarchy_new = hierarchy_2.deep_copy()
hierarchy_new.atoms().set_b(deltas)
return hierarchy_new
else :
return deltas
def substitute_atom_group(
current_group,
new_group):
"""
Substitute sidechain atoms from one residue for another, using
least-squares superposition to align the backbone atoms.
Limited functionality:
1) Amino-acids only, 2) side chain atoms only.
"""
from scitbx.math import superpose
new_atoms = new_group.detached_copy().atoms()
selection_fixed = flex.size_t()
selection_moving = flex.size_t()
res_class = common_residue_names_get_class(current_group.resname)
if(res_class != "common_amino_acid"):
raise Sorry("Only common amino-acid residues supported.")
aa_backbone_atoms_1 = [" CA ", " C ", " N ", " O "]
aa_backbone_atoms_2 = [" CA ", " C ", " N ", " CB "]
aa_backbone_atoms_1.sort()
aa_backbone_atoms_2.sort()
#
def get_bb_atoms(current_group, aa_backbone_atoms):
result = []
for atom in current_group.atoms():
if(atom.name in aa_backbone_atoms_1):
result.append(atom.name)
result.sort()
return result
aa_backbone_atoms_current = get_bb_atoms(current_group, aa_backbone_atoms_1)
aa_backbone_atoms_new = get_bb_atoms(new_group, aa_backbone_atoms_1)
if(aa_backbone_atoms_current != aa_backbone_atoms_1 or
aa_backbone_atoms_new != aa_backbone_atoms_1):
outl = ''
for atom in current_group.atoms():
outl += '\n%s' % atom.quote()
raise Sorry("Main chain must be complete. %s" % outl)
#
for i_seq, atom in enumerate(current_group.atoms()):
if(not atom.name in aa_backbone_atoms_2): continue
for j_seq, other_atom in enumerate(new_group.atoms()):
if(atom.name == other_atom.name):
selection_fixed.append(i_seq)
selection_moving.append(j_seq)
sites_fixed = current_group.atoms().extract_xyz().select(selection_fixed)
sites_moving = new_atoms.extract_xyz().select(selection_moving)
assert sites_fixed.size() == sites_moving.size()
lsq_fit = superpose.least_squares_fit(
reference_sites = sites_fixed,
other_sites = sites_moving)
sites_new = new_atoms.extract_xyz()
sites_new = lsq_fit.r.elems * sites_new + lsq_fit.t.elems
new_atoms.set_xyz(sites_new)
atom_b_iso = {}
atom_occ = {}
mean_b = flex.mean(current_group.atoms().extract_b())
for atom in current_group.atoms():
if(not atom.name in aa_backbone_atoms_1):
current_group.remove_atom(atom)
atom_b_iso[atom.name] = atom.b
atom_occ[atom.name] = atom.occ
for atom in new_atoms:
if(not atom.name in aa_backbone_atoms_1):
if(atom.name in atom_b_iso): atom.b = atom_b_iso[atom.name]
else: atom.b = mean_b
if(atom.name in atom_occ): atom.occ = atom_occ[atom.name]
else: atom.occ = 1.
current_group.append_atom(atom)
current_group.resname = new_group.resname
return current_group
|
py | 1a40e482b8af51e55b30d4ed244dd7dfb8cebb38 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 1999-2018 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from numpy.linalg import LinAlgError
from ... import opcodes as OperandDef
from ...serialize import KeyField, StringField
from ...core import ExecutableTuple
from ..array_utils import device, as_same_device
from ..datasource import tensor as astensor
from ..operands import TensorHasInput, TensorOperandMixin
from ..core import TensorOrder
from .core import SFQR, TSQR
class TensorQR(TensorHasInput, TensorOperandMixin):
_op_type_ = OperandDef.QR
_input = KeyField('input')
_method = StringField('method')
def __init__(self, method=None, dtype=None, **kw):
super(TensorQR, self).__init__(_method=method, _dtype=dtype, **kw)
@property
def method(self):
return self._method
@property
def output_limit(self):
return 2
def _set_inputs(self, inputs):
super(TensorQR, self)._set_inputs(inputs)
self._input = self._inputs[0]
def __call__(self, a):
a = astensor(a)
if a.ndim != 2:
raise LinAlgError('{0}-dimensional tensor given. '
'Tensor must be two-dimensional'.format(a.ndim))
tiny_q, tiny_r = np.linalg.qr(np.ones((1, 1), dtype=a.dtype))
x, y = a.shape
q_shape, r_shape = (a.shape, (y, y)) if x > y else ((x, x), a.shape)
q, r = self.new_tensors([a],
kws=[{'side': 'q', 'dtype': tiny_q.dtype,
'shape': q_shape, 'order': TensorOrder.C_ORDER},
{'side': 'r', 'dtype': tiny_r.dtype,
'shape': r_shape, 'order': TensorOrder.C_ORDER}])
return ExecutableTuple([q, r])
@classmethod
def tile(cls, op):
q, r = op.outputs
q_dtype, r_dtype = q.dtype, r.dtype
q_shape, r_shape = q.shape, r.shape
in_tensor = op.input
if in_tensor.chunk_shape == (1, 1):
in_chunk = in_tensor.chunks[0]
chunk_op = op.copy().reset_key()
qr_chunks = chunk_op.new_chunks([in_chunk], shape=(q_shape, r_shape), index=in_chunk.index,
kws=[{'side': 'q'}, {'side': 'r'}])
q_chunk, r_chunk = qr_chunks
new_op = op.copy()
kws = [
{'chunks': [q_chunk], 'nsplits': ((q_shape[0],), (q_shape[1],)),
'dtype': q_dtype, 'shape': q_shape, 'order': q.order},
{'chunks': [r_chunk], 'nsplits': ((r_shape[0],), (r_shape[1],)),
'dtype': r_dtype, 'shape': r_shape, 'order': r.order}
]
return new_op.new_tensors(op.inputs, kws=kws)
elif op.method == 'tsqr':
return TSQR.tile(op)
elif op.method == 'sfqr':
return SFQR.tile(op)
else:
raise NotImplementedError('Only tsqr method supported for now')
@classmethod
def execute(cls, ctx, op):
(a,), device_id, xp = as_same_device(
[ctx[c.key] for c in op.inputs], device=op.device, ret_extra=True)
with device(device_id):
q, r = xp.linalg.qr(a)
qc, rc = op.outputs
ctx[qc.key] = q
ctx[rc.key] = r
def qr(a, method='tsqr'):
"""
Compute the qr factorization of a matrix.
Factor the matrix `a` as *qr*, where `q` is orthonormal and `r` is
upper-triangular.
Parameters
----------
a : array_like, shape (M, N)
Matrix to be factored.
method: {'tsqr', 'sfqr'}, optional
method to calculate qr factorization, tsqr as default
TSQR is presented in:
A. Benson, D. Gleich, and J. Demmel.
Direct QR factorizations for tall-and-skinny matrices in
MapReduce architectures.
IEEE International Conference on Big Data, 2013.
http://arxiv.org/abs/1301.1071
FSQR is a QR decomposition for fat and short matrix:
A = [A1, A2, A3, ...], A1 may be decomposed as A1 = Q1 * R1,
for A = Q * R, Q = Q1, R = [R1, R2, R3, ...] where A2 = Q1 * R2, A3 = Q1 * R3, ...
Returns
-------
q : Tensor of float or complex, optional
A matrix with orthonormal columns. When mode = 'complete' the
result is an orthogonal/unitary matrix depending on whether or not
a is real/complex. The determinant may be either +/- 1 in that
case.
r : Tensor of float or complex, optional
The upper-triangular matrix.
Raises
------
LinAlgError
If factoring fails.
Notes
-----
For more information on the qr factorization, see for example:
http://en.wikipedia.org/wiki/QR_factorization
Examples
--------
>>> import mars.tensor as mt
>>> a = mt.random.randn(9, 6)
>>> q, r = mt.linalg.qr(a)
>>> mt.allclose(a, mt.dot(q, r)).execute() # a does equal qr
True
"""
op = TensorQR(method=method)
return op(a)
|
py | 1a40e55c5b0ba65c9542d2a541566c69bb56e29d | import json
import plotly
import pandas as pd
from nltk.stem import WordNetLemmatizer
from nltk.tokenize import word_tokenize
from flask import Flask
from flask import render_template, request, jsonify
from plotly.graph_objs import Bar
from sklearn.externals import joblib
from sqlalchemy import create_engine
app = Flask(__name__)
def tokenize(text):
tokens = word_tokenize(text)
lemmatizer = WordNetLemmatizer()
clean_tokens = []
for tok in tokens:
clean_tok = lemmatizer.lemmatize(tok).lower().strip()
clean_tokens.append(clean_tok)
return clean_tokens
# load data
engine = create_engine('sqlite:///../data/YourDatabaseName.db')
df = pd.read_sql_table('data/DisasterResponse.db', engine)
# load model
model = joblib.load("/home/workspace/models/classifier.pkl")
# index webpage displays cool visuals and receives user input text for model
@app.route('/')
@app.route('/index')
def index():
# extract data needed for visuals
# TODO: Below is an example - modify to extract data for your own visuals
genre_counts = df.groupby('genre').count()['message']
genre_names = list(genre_counts.index)
# create visuals
# TODO: Below is an example - modify to create your own visuals
graphs = [
{
'data': [
Bar(
x=genre_names,
y=genre_counts
)
],
'layout': {
'title': 'Distribution of Message Genres',
'yaxis': {
'title': "Count"
},
'xaxis': {
'title': "Genre"
}
}
}
]
# encode plotly graphs in JSON
ids = ["graph-{}".format(i) for i, _ in enumerate(graphs)]
graphJSON = json.dumps(graphs, cls=plotly.utils.PlotlyJSONEncoder)
# render web page with plotly graphs
return render_template('master.html', ids=ids, graphJSON=graphJSON)
# web page that handles user query and displays model results
@app.route('/go')
def go():
# save user input in query
query = request.args.get('query', '')
# use model to predict classification for query
classification_labels = model.predict([query])[0]
classification_results = dict(zip(df.columns[4:], classification_labels))
# This will render the go.html Please see that file.
return render_template(
'go.html',
query=query,
classification_result=classification_results
)
def main():
app.run(host='0.0.0.0', port=3001, debug=True)
if __name__ == '__main__':
main() |
py | 1a40e63326557ffd4a077bd391f363c9bb3ad942 | # -*- coding: utf-8 -*-
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2018 The Electrum developers
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import base64
import hashlib
import functools
from typing import Union, Tuple, Optional
from ctypes import (
byref, c_byte, c_int, c_uint, c_char_p, c_size_t, c_void_p, create_string_buffer,
CFUNCTYPE, POINTER, cast
)
from .util import bfh, bh2u, assert_bytes, to_bytes, InvalidPassword, profiler, randrange
from .crypto import (sha256d, aes_encrypt_with_iv, aes_decrypt_with_iv, hmac_oneshot)
from . import constants
from .logging import get_logger
from .ecc_fast import _libsecp256k1, SECP256K1_EC_UNCOMPRESSED
_logger = get_logger(__name__)
def string_to_number(b: bytes) -> int:
return int.from_bytes(b, byteorder='big', signed=False)
def sig_string_from_der_sig(der_sig: bytes) -> bytes:
r, s = get_r_and_s_from_der_sig(der_sig)
return sig_string_from_r_and_s(r, s)
def der_sig_from_sig_string(sig_string: bytes) -> bytes:
r, s = get_r_and_s_from_sig_string(sig_string)
return der_sig_from_r_and_s(r, s)
def der_sig_from_r_and_s(r: int, s: int) -> bytes:
sig_string = (int.to_bytes(r, length=32, byteorder="big") +
int.to_bytes(s, length=32, byteorder="big"))
sig = create_string_buffer(64)
ret = _libsecp256k1.secp256k1_ecdsa_signature_parse_compact(_libsecp256k1.ctx, sig, sig_string)
if not ret:
raise Exception("Bad signature")
ret = _libsecp256k1.secp256k1_ecdsa_signature_normalize(_libsecp256k1.ctx, sig, sig)
der_sig = create_string_buffer(80) # this much space should be enough
der_sig_size = c_size_t(len(der_sig))
ret = _libsecp256k1.secp256k1_ecdsa_signature_serialize_der(_libsecp256k1.ctx, der_sig, byref(der_sig_size), sig)
if not ret:
raise Exception("failed to serialize DER sig")
der_sig_size = der_sig_size.value
return bytes(der_sig)[:der_sig_size]
def get_r_and_s_from_der_sig(der_sig: bytes) -> Tuple[int, int]:
assert isinstance(der_sig, bytes)
sig = create_string_buffer(64)
ret = _libsecp256k1.secp256k1_ecdsa_signature_parse_der(_libsecp256k1.ctx, sig, der_sig, len(der_sig))
if not ret:
raise Exception("Bad signature")
ret = _libsecp256k1.secp256k1_ecdsa_signature_normalize(_libsecp256k1.ctx, sig, sig)
compact_signature = create_string_buffer(64)
_libsecp256k1.secp256k1_ecdsa_signature_serialize_compact(_libsecp256k1.ctx, compact_signature, sig)
r = int.from_bytes(compact_signature[:32], byteorder="big")
s = int.from_bytes(compact_signature[32:], byteorder="big")
return r, s
def get_r_and_s_from_sig_string(sig_string: bytes) -> Tuple[int, int]:
if not (isinstance(sig_string, bytes) and len(sig_string) == 64):
raise Exception("sig_string must be bytes, and 64 bytes exactly")
sig = create_string_buffer(64)
ret = _libsecp256k1.secp256k1_ecdsa_signature_parse_compact(_libsecp256k1.ctx, sig, sig_string)
if not ret:
raise Exception("Bad signature")
ret = _libsecp256k1.secp256k1_ecdsa_signature_normalize(_libsecp256k1.ctx, sig, sig)
compact_signature = create_string_buffer(64)
_libsecp256k1.secp256k1_ecdsa_signature_serialize_compact(_libsecp256k1.ctx, compact_signature, sig)
r = int.from_bytes(compact_signature[:32], byteorder="big")
s = int.from_bytes(compact_signature[32:], byteorder="big")
return r, s
def sig_string_from_r_and_s(r: int, s: int) -> bytes:
sig_string = (int.to_bytes(r, length=32, byteorder="big") +
int.to_bytes(s, length=32, byteorder="big"))
sig = create_string_buffer(64)
ret = _libsecp256k1.secp256k1_ecdsa_signature_parse_compact(_libsecp256k1.ctx, sig, sig_string)
if not ret:
raise Exception("Bad signature")
ret = _libsecp256k1.secp256k1_ecdsa_signature_normalize(_libsecp256k1.ctx, sig, sig)
compact_signature = create_string_buffer(64)
_libsecp256k1.secp256k1_ecdsa_signature_serialize_compact(_libsecp256k1.ctx, compact_signature, sig)
return bytes(compact_signature)
def _x_and_y_from_pubkey_bytes(pubkey: bytes) -> Tuple[int, int]:
assert isinstance(pubkey, bytes), f'pubkey must be bytes, not {type(pubkey)}'
pubkey_ptr = create_string_buffer(64)
ret = _libsecp256k1.secp256k1_ec_pubkey_parse(
_libsecp256k1.ctx, pubkey_ptr, pubkey, len(pubkey))
if not ret:
raise InvalidECPointException('public key could not be parsed or is invalid')
pubkey_serialized = create_string_buffer(65)
pubkey_size = c_size_t(65)
_libsecp256k1.secp256k1_ec_pubkey_serialize(
_libsecp256k1.ctx, pubkey_serialized, byref(pubkey_size), pubkey_ptr, SECP256K1_EC_UNCOMPRESSED)
pubkey_serialized = bytes(pubkey_serialized)
assert pubkey_serialized[0] == 0x04, pubkey_serialized
x = int.from_bytes(pubkey_serialized[1:33], byteorder='big', signed=False)
y = int.from_bytes(pubkey_serialized[33:65], byteorder='big', signed=False)
return x, y
class InvalidECPointException(Exception):
"""e.g. not on curve, or infinity"""
@functools.total_ordering
class ECPubkey(object):
def __init__(self, b: Optional[bytes]):
if b is not None:
assert isinstance(b, (bytes, bytearray)), f'pubkey must be bytes-like, not {type(b)}'
if isinstance(b, bytearray):
b = bytes(b)
self._x, self._y = _x_and_y_from_pubkey_bytes(b)
else:
self._x, self._y = None, None
@classmethod
def from_sig_string(cls, sig_string: bytes, recid: int, msg_hash: bytes) -> 'ECPubkey':
assert_bytes(sig_string)
if len(sig_string) != 64:
raise Exception(f'wrong encoding used for signature? len={len(sig_string)} (should be 64)')
if recid < 0 or recid > 3:
raise ValueError('recid is {}, but should be 0 <= recid <= 3'.format(recid))
sig65 = create_string_buffer(65)
ret = _libsecp256k1.secp256k1_ecdsa_recoverable_signature_parse_compact(
_libsecp256k1.ctx, sig65, sig_string, recid)
if not ret:
raise Exception('failed to parse signature')
pubkey = create_string_buffer(64)
ret = _libsecp256k1.secp256k1_ecdsa_recover(_libsecp256k1.ctx, pubkey, sig65, msg_hash)
if not ret:
raise InvalidECPointException('failed to recover public key')
return ECPubkey._from_libsecp256k1_pubkey_ptr(pubkey)
@classmethod
def from_signature65(cls, sig: bytes, msg_hash: bytes) -> Tuple['ECPubkey', bool]:
if len(sig) != 65:
raise Exception(f'wrong encoding used for signature? len={len(sig)} (should be 65)')
nV = sig[0]
if nV < 27 or nV >= 35:
raise Exception("Bad encoding")
if nV >= 31:
compressed = True
nV -= 4
else:
compressed = False
recid = nV - 27
return cls.from_sig_string(sig[1:], recid, msg_hash), compressed
@classmethod
def from_x_and_y(cls, x: int, y: int) -> 'ECPubkey':
_bytes = (b'\x04'
+ int.to_bytes(x, length=32, byteorder='big', signed=False)
+ int.to_bytes(y, length=32, byteorder='big', signed=False))
return ECPubkey(_bytes)
def get_public_key_bytes(self, compressed=True):
if self.is_at_infinity(): raise Exception('point is at infinity')
x = int.to_bytes(self.x(), length=32, byteorder='big', signed=False)
y = int.to_bytes(self.y(), length=32, byteorder='big', signed=False)
if compressed:
header = b'\x03' if self.y() & 1 else b'\x02'
return header + x
else:
header = b'\x04'
return header + x + y
def get_public_key_hex(self, compressed=True):
return bh2u(self.get_public_key_bytes(compressed))
def point(self) -> Tuple[int, int]:
return self.x(), self.y()
def x(self) -> int:
return self._x
def y(self) -> int:
return self._y
def _to_libsecp256k1_pubkey_ptr(self):
pubkey = create_string_buffer(64)
public_pair_bytes = self.get_public_key_bytes(compressed=False)
ret = _libsecp256k1.secp256k1_ec_pubkey_parse(
_libsecp256k1.ctx, pubkey, public_pair_bytes, len(public_pair_bytes))
if not ret:
raise Exception('public key could not be parsed or is invalid')
return pubkey
@classmethod
def _from_libsecp256k1_pubkey_ptr(cls, pubkey) -> 'ECPubkey':
pubkey_serialized = create_string_buffer(65)
pubkey_size = c_size_t(65)
_libsecp256k1.secp256k1_ec_pubkey_serialize(
_libsecp256k1.ctx, pubkey_serialized, byref(pubkey_size), pubkey, SECP256K1_EC_UNCOMPRESSED)
return ECPubkey(bytes(pubkey_serialized))
def __repr__(self):
if self.is_at_infinity():
return f"<ECPubkey infinity>"
return f"<ECPubkey {self.get_public_key_hex()}>"
def __mul__(self, other: int):
if not isinstance(other, int):
raise TypeError('multiplication not defined for ECPubkey and {}'.format(type(other)))
other %= CURVE_ORDER
if self.is_at_infinity() or other == 0:
return POINT_AT_INFINITY
pubkey = self._to_libsecp256k1_pubkey_ptr()
ret = _libsecp256k1.secp256k1_ec_pubkey_tweak_mul(_libsecp256k1.ctx, pubkey, other.to_bytes(32, byteorder="big"))
if not ret:
return POINT_AT_INFINITY
return ECPubkey._from_libsecp256k1_pubkey_ptr(pubkey)
def __rmul__(self, other: int):
return self * other
def __add__(self, other):
if not isinstance(other, ECPubkey):
raise TypeError('addition not defined for ECPubkey and {}'.format(type(other)))
if self.is_at_infinity(): return other
if other.is_at_infinity(): return self
pubkey1 = self._to_libsecp256k1_pubkey_ptr()
pubkey2 = other._to_libsecp256k1_pubkey_ptr()
pubkey_sum = create_string_buffer(64)
pubkey1 = cast(pubkey1, c_char_p)
pubkey2 = cast(pubkey2, c_char_p)
array_of_pubkey_ptrs = (c_char_p * 2)(pubkey1, pubkey2)
ret = _libsecp256k1.secp256k1_ec_pubkey_combine(_libsecp256k1.ctx, pubkey_sum, array_of_pubkey_ptrs, 2)
if not ret:
return POINT_AT_INFINITY
return ECPubkey._from_libsecp256k1_pubkey_ptr(pubkey_sum)
def __eq__(self, other) -> bool:
if not isinstance(other, ECPubkey):
return False
return self.point() == other.point()
def __ne__(self, other):
return not (self == other)
def __hash__(self):
return hash(self.point())
def __lt__(self, other):
if not isinstance(other, ECPubkey):
raise TypeError('comparison not defined for ECPubkey and {}'.format(type(other)))
return (self.x() or 0) < (other.x() or 0)
def verify_message_for_address(self, sig65: bytes, message: bytes, algo=lambda x: sha256d(msg_magic(x))) -> None:
assert_bytes(message)
h = algo(message)
public_key, compressed = self.from_signature65(sig65, h)
# check public key
if public_key != self:
raise Exception("Bad signature")
# check message
self.verify_message_hash(sig65[1:], h)
# TODO return bool instead of raising
def verify_message_hash(self, sig_string: bytes, msg_hash: bytes) -> None:
assert_bytes(sig_string)
if len(sig_string) != 64:
raise Exception(f'wrong encoding used for signature? len={len(sig_string)} (should be 64)')
if not (isinstance(msg_hash, bytes) and len(msg_hash) == 32):
raise Exception("msg_hash must be bytes, and 32 bytes exactly")
sig = create_string_buffer(64)
ret = _libsecp256k1.secp256k1_ecdsa_signature_parse_compact(_libsecp256k1.ctx, sig, sig_string)
if not ret:
raise Exception("Bad signature")
ret = _libsecp256k1.secp256k1_ecdsa_signature_normalize(_libsecp256k1.ctx, sig, sig)
pubkey = self._to_libsecp256k1_pubkey_ptr()
if 1 != _libsecp256k1.secp256k1_ecdsa_verify(_libsecp256k1.ctx, sig, msg_hash, pubkey):
raise Exception("Bad signature")
def encrypt_message(self, message: bytes, magic: bytes = b'BIE1') -> bytes:
"""
ECIES encryption/decryption methods; AES-128-CBC with PKCS7 is used as the cipher; hmac-sha256 is used as the mac
"""
assert_bytes(message)
ephemeral = ECPrivkey.generate_random_key()
ecdh_key = (self * ephemeral.secret_scalar).get_public_key_bytes(compressed=True)
key = hashlib.sha512(ecdh_key).digest()
iv, key_e, key_m = key[0:16], key[16:32], key[32:]
ciphertext = aes_encrypt_with_iv(key_e, iv, message)
ephemeral_pubkey = ephemeral.get_public_key_bytes(compressed=True)
encrypted = magic + ephemeral_pubkey + ciphertext
mac = hmac_oneshot(key_m, encrypted, hashlib.sha256)
return base64.b64encode(encrypted + mac)
@classmethod
def order(cls):
return CURVE_ORDER
def is_at_infinity(self):
return self == POINT_AT_INFINITY
@classmethod
def is_pubkey_bytes(cls, b: bytes):
try:
ECPubkey(b)
return True
except:
return False
GENERATOR = ECPubkey(bytes.fromhex('0479be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798'
'483ada7726a3c4655da4fbfc0e1108a8fd17b448a68554199c47d08ffb10d4b8'))
CURVE_ORDER = 0xFFFFFFFF_FFFFFFFF_FFFFFFFF_FFFFFFFE_BAAEDCE6_AF48A03B_BFD25E8C_D0364141
POINT_AT_INFINITY = ECPubkey(None)
def msg_magic(message: bytes) -> bytes:
from .bitcoin import var_int
length = bfh(var_int(len(message)))
return b"\x15Qtum Signed Message:\n" + length + message
def verify_signature(pubkey: bytes, sig: bytes, h: bytes) -> bool:
try:
ECPubkey(pubkey).verify_message_hash(sig, h)
except:
return False
return True
def verify_message_with_address(address: str, sig65: bytes, message: bytes, *, net=None):
from .bitcoin import pubkey_to_address
assert_bytes(sig65, message)
if net is None: net = constants.net
try:
h = sha256d(msg_magic(message))
public_key, compressed = ECPubkey.from_signature65(sig65, h)
# check public key using the address
pubkey_hex = public_key.get_public_key_hex(compressed)
for txin_type in ['p2pkh','p2wpkh','p2wpkh-p2sh']:
addr = pubkey_to_address(txin_type, pubkey_hex, net=net)
if address == addr:
break
else:
raise Exception("Bad signature")
# check message
public_key.verify_message_hash(sig65[1:], h)
return True
except Exception as e:
_logger.info(f"Verification error: {repr(e)}")
return False
def is_secret_within_curve_range(secret: Union[int, bytes]) -> bool:
if isinstance(secret, bytes):
secret = string_to_number(secret)
return 0 < secret < CURVE_ORDER
class ECPrivkey(ECPubkey):
def __init__(self, privkey_bytes: bytes):
assert_bytes(privkey_bytes)
if len(privkey_bytes) != 32:
raise Exception('unexpected size for secret. should be 32 bytes, not {}'.format(len(privkey_bytes)))
secret = string_to_number(privkey_bytes)
if not is_secret_within_curve_range(secret):
raise InvalidECPointException('Invalid secret scalar (not within curve order)')
self.secret_scalar = secret
pubkey = GENERATOR * secret
super().__init__(pubkey.get_public_key_bytes(compressed=False))
@classmethod
def from_secret_scalar(cls, secret_scalar: int):
secret_bytes = int.to_bytes(secret_scalar, length=32, byteorder='big', signed=False)
return ECPrivkey(secret_bytes)
@classmethod
def from_arbitrary_size_secret(cls, privkey_bytes: bytes):
"""This method is only for legacy reasons. Do not introduce new code that uses it.
Unlike the default constructor, this method does not require len(privkey_bytes) == 32,
and the secret does not need to be within the curve order either.
"""
return ECPrivkey(cls.normalize_secret_bytes(privkey_bytes))
@classmethod
def normalize_secret_bytes(cls, privkey_bytes: bytes) -> bytes:
scalar = string_to_number(privkey_bytes) % CURVE_ORDER
if scalar == 0:
raise Exception('invalid EC private key scalar: zero')
privkey_32bytes = int.to_bytes(scalar, length=32, byteorder='big', signed=False)
return privkey_32bytes
def __repr__(self):
return f"<ECPrivkey {self.get_public_key_hex()}>"
@classmethod
def generate_random_key(cls):
randint = randrange(CURVE_ORDER)
ephemeral_exponent = int.to_bytes(randint, length=32, byteorder='big', signed=False)
return ECPrivkey(ephemeral_exponent)
def get_secret_bytes(self) -> bytes:
return int.to_bytes(self.secret_scalar, length=32, byteorder='big', signed=False)
def sign(self, msg_hash: bytes, sigencode=None) -> bytes:
if not (isinstance(msg_hash, bytes) and len(msg_hash) == 32):
raise Exception("msg_hash to be signed must be bytes, and 32 bytes exactly")
if sigencode is None:
sigencode = sig_string_from_r_and_s
privkey_bytes = self.secret_scalar.to_bytes(32, byteorder="big")
nonce_function = None
sig = create_string_buffer(64)
def sign_with_extra_entropy(extra_entropy):
ret = _libsecp256k1.secp256k1_ecdsa_sign(
_libsecp256k1.ctx, sig, msg_hash, privkey_bytes,
nonce_function, extra_entropy)
if not ret:
raise Exception('the nonce generation function failed, or the private key was invalid')
compact_signature = create_string_buffer(64)
_libsecp256k1.secp256k1_ecdsa_signature_serialize_compact(_libsecp256k1.ctx, compact_signature, sig)
r = int.from_bytes(compact_signature[:32], byteorder="big")
s = int.from_bytes(compact_signature[32:], byteorder="big")
return r, s
r, s = sign_with_extra_entropy(extra_entropy=None)
counter = 0
while r >= 2**255: # grind for low R value https://github.com/bitcoin/bitcoin/pull/13666
counter += 1
extra_entropy = counter.to_bytes(32, byteorder="little")
r, s = sign_with_extra_entropy(extra_entropy=extra_entropy)
sig_string = sig_string_from_r_and_s(r, s)
self.verify_message_hash(sig_string, msg_hash)
sig = sigencode(r, s)
return sig
def sign_transaction(self, hashed_preimage: bytes) -> bytes:
return self.sign(hashed_preimage, sigencode=der_sig_from_r_and_s)
def sign_message(self, message: bytes, is_compressed: bool, algo=lambda x: sha256d(msg_magic(x))) -> bytes:
def bruteforce_recid(sig_string):
for recid in range(4):
sig65 = construct_sig65(sig_string, recid, is_compressed)
try:
self.verify_message_for_address(sig65, message, algo)
return sig65, recid
except Exception as e:
continue
else:
raise Exception("error: cannot sign message. no recid fits..")
message = to_bytes(message, 'utf8')
msg_hash = algo(message)
sig_string = self.sign(msg_hash, sigencode=sig_string_from_r_and_s)
sig65, recid = bruteforce_recid(sig_string)
return sig65
def decrypt_message(self, encrypted: Union[str, bytes], magic: bytes=b'BIE1') -> bytes:
encrypted = base64.b64decode(encrypted) # type: bytes
if len(encrypted) < 85:
raise Exception('invalid ciphertext: length')
magic_found = encrypted[:4]
ephemeral_pubkey_bytes = encrypted[4:37]
ciphertext = encrypted[37:-32]
mac = encrypted[-32:]
if magic_found != magic:
raise Exception('invalid ciphertext: invalid magic bytes')
try:
ephemeral_pubkey = ECPubkey(ephemeral_pubkey_bytes)
except InvalidECPointException as e:
raise Exception('invalid ciphertext: invalid ephemeral pubkey') from e
ecdh_key = (ephemeral_pubkey * self.secret_scalar).get_public_key_bytes(compressed=True)
key = hashlib.sha512(ecdh_key).digest()
iv, key_e, key_m = key[0:16], key[16:32], key[32:]
if mac != hmac_oneshot(key_m, encrypted[:-32], hashlib.sha256):
raise InvalidPassword()
return aes_decrypt_with_iv(key_e, iv, ciphertext)
def construct_sig65(sig_string: bytes, recid: int, is_compressed: bool) -> bytes:
comp = 4 if is_compressed else 0
return bytes([27 + recid + comp]) + sig_string
|
py | 1a40e671f5040633d1674e60d2161ac7127b2f54 |
# finding the count of even numbers between 0 and 100.
#x=10%2
#print("x",x)
#y=7%2
#print("y",y)
#onemli not: space and tab are important in python!!!!
nums = [0,1,2,3,4,5,6,7,8,9,10]
count=0
for item in nums:
print("ev even a yan na:", item)
if item%2==0: # heger cift sayi ye yan na!
#bele cift sayi ya!
print("bele even a:", item)
count=count+1 # yek zede bike! mana wina ew e ku tu dijmeri.
print ("count:",count)
else:
print ("na ne even a, count zede neke! count=",count)
|
py | 1a40e6f538c74b213e02edaacd704dcff47157fe | from parameterized import parameterized
from test_plus.test import TestCase
from ...generic.tests.test_views import (
AuthorshipViewSetMixin,
GenericViewSetMixin,
OrderingViewSetMixin,
)
from ..factories import InstitutionFactory
from ..serializers import InstitutionSerializer
class InstitutionViewSetTestCase(
AuthorshipViewSetMixin, GenericViewSetMixin, OrderingViewSetMixin, TestCase
):
basename = "institution"
serializer_class = InstitutionSerializer
factory_class = InstitutionFactory
queries_less_than_limit = 11
ordering_fields = [
"comment",
"-comment",
"created_on",
"created_by__username",
"-created_by__username,comment",
]
def validate_item(self, item):
self.assertEqual(item["name"], self.obj.name)
self.assertEqual(item["comment"], self.obj.comment)
for i, tag in enumerate(item["tags"]):
self.assertEqual(tag, self.obj.tags.all()[i].name)
@parameterized.expand(
[
("CAT", ["CAT", "CATASTROPHE"]),
("cat", ["CAT", "CATASTROPHE"]),
("KITTY", ["KITTY"]),
("KIT", ["KITTY"]),
("INVALID", []),
]
)
def test_should_filter_by_name(self, query, expected_names):
InstitutionFactory(name="KITTY")
InstitutionFactory(name="CAT")
InstitutionFactory(name="CATASTROPHE")
self.login_required()
response = self.client.get(
self.get_url(name="list"),
content_type="application/json",
data={"query": query},
)
self.assertEqual(response.status_code, 200, response.json())
names = [item["name"] for item in response.json()["results"]]
self.assertCountEqual(expected_names, names)
|
py | 1a40ea27f8f8c1749fa2bbe39ea729fb7392c04b | #------------------------------------------------------------------------------
# Copyright 2013 Esri
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#------------------------------------------------------------------------------
# Name: TestModelLowestPoint.py
# Description: Automatic Test of Lowest Point Model
# Requirements: ArcGIS Desktop Standard with Spatial Analyst Extension
#------------------------------------------------------------------------------
import arcpy
import os
import sys
import traceback
import TestUtilities
def RunTest():
try:
arcpy.AddMessage("Starting Test: LowestPoint")
if arcpy.CheckExtension("Spatial") == "Available":
arcpy.CheckOutExtension("Spatial")
else:
# Raise a custom exception
raise Exception("LicenseError")
# WORKAROUND
print("Creating New Scratch Workspace (Workaround)")
TestUtilities.createScratch()
# Verify the expected configuration exists
inputPolygonFC = os.path.join(TestUtilities.inputGDB, "samplePolygonArea")
inputSurface = os.path.join(TestUtilities.inputGDB, "Jbad_SRTM_USGS_EROS")
outputPointsFC = os.path.join(TestUtilities.outputGDB, "LowestPoint")
toolbox = TestUtilities.toolbox
# Check For Valid Input
objects2Check = []
objects2Check.extend([inputPolygonFC, inputSurface, toolbox])
for object2Check in objects2Check :
desc = arcpy.Describe(object2Check)
if desc == None :
raise Exception("Bad Input")
else :
print("Valid Object: " + desc.Name)
# Set environment settings
print("Running from: " + str(TestUtilities.currentPath))
print("Geodatabase path: " + str(TestUtilities.geodatabasePath))
arcpy.env.overwriteOutput = True
arcpy.env.scratchWorkspace = TestUtilities.scratchGDB
arcpy.ImportToolbox(toolbox, "VandR")
inputFeatureCount = int(arcpy.GetCount_management(inputPolygonFC).getOutput(0))
print("Input FeatureClass: " + str(inputPolygonFC))
print("Input Feature Count: " + str(inputFeatureCount))
if (inputFeatureCount < 1) :
print("Invalid Input Feature Count: " + str(inputFeatureCount))
########################################################3
# Execute the Model under test:
arcpy.LowestPoint_VandR(inputPolygonFC, inputSurface, outputPointsFC)
########################################################3
# Verify the results
outputFeatureCount = int(arcpy.GetCount_management(outputPointsFC).getOutput(0))
print("Output FeatureClass: " + str(outputPointsFC))
print("Output Feature Count: " + str(outputFeatureCount))
if (outputPointsFC < 1) :
print("Invalid Output Feature Count: " + str(outputFeatureCount))
raise Exception("Test Failed")
# WORKAROUND: delete scratch db
print("Deleting Scratch Workspace (Workaround)")
TestUtilities.deleteScratch()
print("Test Successful")
except arcpy.ExecuteError:
# Get the tool error messages
msgs = arcpy.GetMessages()
arcpy.AddError(msgs)
# return a system error code
sys.exit(-1)
except Exception as e:
# Get the traceback object
tb = sys.exc_info()[2]
tbinfo = traceback.format_tb(tb)[0]
# Concatenate information together concerning the error into a message string
pymsg = "PYTHON ERRORS:\nTraceback info:\n" + tbinfo + "\nError Info:\n" + str(sys.exc_info()[1])
msgs = "ArcPy ERRORS:\n" + arcpy.GetMessages() + "\n"
# Return python error messages for use in script tool or Python Window
arcpy.AddError(pymsg)
arcpy.AddError(msgs)
# return a system error code
sys.exit(-1)
finally:
# Check in the 3D Analyst extension
arcpy.CheckInExtension("Spatial")
RunTest() |
py | 1a40ea74ac546102e2b6bbeeb183104322c9292c | '''
FRD Net, the function 'detect' in darknet has been modified to be able to receive cv2.imread as an input
see darknet.py for more information
'''
from os.path import splitext, basename, isdir
from os import makedirs, remove
import sys
import cv2
import numpy as np
import traceback
from src import FRD
from src.draw_BB import draw_bb
from WPOD_src.drawing_utils import draw_losangle
from WPOD_src.keras_utils import load_model, detect_lp
from WPOD_src.label import Label, lwrite, lread, Shape
from WPOD_src.utils import crop_region, image_files_from_folder, im2single
from darknet.python.darknet import detect
import src.quadrilateral_calculation as qucal
import darknet.python.darknet as dn
if __name__ == '__main__':
# vehicle detection
input_dir = 'samples/overlap_case'
output_dir = 'output'
vehicle_threshold = .5
vehicle_weights = 'data/vehicle-detector/yolo-voc.weights'
vehicle_netcfg = 'data/vehicle-detector/yolo-voc.cfg'
vehicle_dataset = 'data/vehicle-detector/voc.data'
vehicle_net = dn.load_net(vehicle_netcfg, vehicle_weights, 0)
vehicle_meta = dn.load_meta(vehicle_dataset)
imgs_paths = image_files_from_folder(input_dir)
imgs_paths.sort()
if not isdir(output_dir):
makedirs(output_dir)
print '\tSearching for vehicles using YOLO...'
for i, img_path in enumerate(imgs_paths):
print '\tScanning %s' % img_path
img = cv2.imread(img_path)
bname = basename(splitext(img_path)[0])
R, _ = detect(vehicle_net, vehicle_meta, img, thresh=vehicle_threshold)
R = [r for r in R if r[0] in ['car', 'bus']]
print '\t\t%d cars found' % len(R)
if len(R):
WH = np.array(img.shape[1::-1], dtype=float)
Lcars = []
for i, r in enumerate(R):
cx, cy, w, h = (np.array(r[2]) / np.concatenate((WH, WH))).tolist()
tl = np.array([cx - w / 2., cy - h / 2.])
br = np.array([cx + w / 2., cy + h / 2.])
label = Label(0, tl, br)
Lcars.append(label)
lwrite('%s/%s_cars.txt' % (output_dir, bname), Lcars)
# license plate detection
try:
# colors are BGR in opencv
YELLOW = (0, 255, 255)
RED = (0, 0, 255)
PINK = (232, 28, 232)
input_dir = output_dir
lp_threshold = 0.5
wpod_net_path = "data/lp-detector/wpod-net_update1.h5"
wpod_net = load_model(wpod_net_path)
print 'Searching for license plates using WPOD-NET'
for i, img_path in enumerate(imgs_paths):
print '\t Processing %s' % img_path
bname = splitext(basename(img_path))[0]
img = cv2.imread(img_path)
label_path = '%s/%s_cars.txt' % (output_dir, bname)
plates = []
car_labels = lread(label_path)
# remove the LP position information txt
remove('%s/%s_cars.txt' % (output_dir, bname))
for j, car_label in enumerate(car_labels):
car = crop_region(img, car_label)
ratio = float(max(car.shape[:2])) / min(car.shape[:2])
side = int(ratio * 288.)
bound_dim = min(side + (side % (2 ** 4)), 608)
print "\t\tvehicle %d, Bound dim: %d, ratio: %f" % (j, bound_dim, ratio)
Llp, LlpImgs, _ = detect_lp(wpod_net, im2single(car), bound_dim, 2 ** 4, (240, 80), lp_threshold)
if len(LlpImgs):
Ilp = LlpImgs[0]
Ilp = cv2.cvtColor(Ilp, cv2.COLOR_BGR2GRAY)
Ilp = cv2.cvtColor(Ilp, cv2.COLOR_GRAY2BGR)
s = Shape(Llp[0].pts)
# s.pts is the points for LP, it is a numpy array with shape(2, 4)
# this part is used to reconstruct the coordinates of LP into original image pixel scale
# also append j into the plates_cor to record its corresponding car
pts = s.pts * car_label.wh().reshape(2, 1) + car_label.tl().reshape(2, 1)
ptspx = pts * np.array(img.shape[1::-1], dtype=float).reshape(2, 1)
plates.append([j, ptspx])
# draw_losangle(img, ptspx, RED, 3)
# cv2.imwrite('%s/%s_lp.png' % (output_dir, bname), Ilp * 255.)
# writeShapes('%s/%s_lp.txt' % (output_dir, bname), [s])
# this part is used to detect the overlapped LP
plates_cor = [i[1] for i in plates]
non_overlap_plates = []
cars_processed = []
if len(plates) > 1 and qucal.overlap(np.array(plates_cor)):
FRD_record = open(output_dir + '/%s.txt' % bname, 'w')
for ele in qucal.overlap(np.array(plates_cor)):
print '\t\t\toverlapped LP found:', ele.couple()
FRD_record.write('%s %s\n' % ('overlapped LP found:', ele.couple()))
car_1 = plates[ele.couple()[0]][0]
car_2 = plates[ele.couple()[1]][0]
cars_processed.append(car_1)
cars_processed.append(car_2)
print '\t\t\trelated car:', car_1, 'with', car_2
FRD_record.write('%s %d %s %d\n' % ('related car:', car_1, 'with', car_2))
uni_area = qucal.union_area(np.array([car_labels[car_1].tl(), car_labels[car_1].br()]),
np.array([car_labels[car_2].tl(), car_labels[car_2].br()]))
uni_img = crop_region(img, uni_area)
try:
frs, cate = FRD.fr_detect(uni_img)
fr_lst = []
for fr in frs:
fr_lst.append(Label(tl=fr.tl()*uni_area.wh() + uni_area.tl(),
br=fr.br()*uni_area.wh() + uni_area.tl()))
for k, fr in enumerate(fr_lst):
owner_car = None
if qucal.FRCar(fr, car_labels[car_1]).cover_rate() >= \
qucal.FRCar(fr, car_labels[car_2]).cover_rate():
print '\t\t\tfr:', k, 'car:', car_1, 'has better cover rate'
FRD_record.write('%s %d %s %d %s \n' % ('fr:', k, 'car:', car_1, 'has better cover rate'))
owner_car = car_1
non_overlap_plates.append(ele.larger_plate)
if qucal.overlap(np.array([ele.larger_plate, fr.quadrilateral_format() *
np.array(img.shape[1::-1], dtype=float).reshape(2, 1)])):
print '\t\t\tthis plate belongs to car:', car_1
FRD_record.write('%s %d\n' % ('this plate belongs to car:', car_1))
else:
print '\t\t\tfr:', k, 'car:', car_2, 'has better cover rate'
FRD_record.write('%s %d %s %d %s \n' % ('fr:', k, 'car:', car_2, 'has better cover rate'))
owner_car = car_2
non_overlap_plates.append(ele.larger_plate)
if qucal.overlap(np.array([ele.larger_plate, fr.quadrilateral_format() *
np.array(img.shape[1::-1], dtype=float).reshape(2, 1)])):
print '\t\t\tthis plate belongs to car:', car_2
FRD_record.write('%s %d\n' % ('this plate belongs to car:', car_2))
# draw front & rear BB
draw_bb(img, fr, cate=cate[k], index=str(owner_car), text_color=(255, 255, 255))
except:
traceback.print_exc()
FRD_record.close()
# put the other plates into the list
for plate in plates:
if plate[0] in cars_processed:
continue
else:
non_overlap_plates.append(plate[1])
for plate_cor in non_overlap_plates:
# draw plates
draw_losangle(img, plate_cor, RED, 3)
for j, car_label in enumerate(car_labels):
# draw car BB
draw_bb(img, car_label, cate='car', index=str(j), bg_color=YELLOW, text_color=(0, 0, 0))
cv2.imwrite('%s/%s_output.png' % (output_dir, bname), img)
except:
traceback.print_exc()
sys.exit(1)
sys.exit(0)
|
py | 1a40ea9ee373a21e1e018d8ae6c4c732cc651b9f | # -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-10-04 20:04
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("events", "0003_auto_20170903_2206")]
operations = [
migrations.AlterField(
model_name="registration",
name="admin_reason",
field=models.CharField(blank=True, max_length=255),
),
migrations.AlterField(
model_name="registration",
name="feedback",
field=models.CharField(blank=True, max_length=255),
),
]
|
py | 1a40eb093dadaffc1a6f84d91b19a043b2d87604 | import bondhon
from docx import Document
def handle_paragraph(from_encoding: str, to_encoding: str, p):
inline = p.runs
for i in range(len(inline)):
inline[i].text = bondhon.convert(from_encoding, to_encoding, inline[i].text)
def handle_table(from_encoding, to_encoding, table):
for row in table.rows:
for cell in row.cells:
convert_document(from_encoding, to_encoding, cell)
def convert_document(from_encoding: str, to_encoding: str, document: Document):
print(document)
for p in document.paragraphs:
handle_paragraph(from_encoding, to_encoding, p)
for table in document.tables:
handle_table(from_encoding, to_encoding, table)
|
py | 1a40ebd591fee8f54dc4fe1d13de25f082fed597 | import math
import time
import datetime
class LoadBar:
"""
"""
def __init__(self, max=100, size=20, head='.', body='.', border_left='[', border_right=']', show_step=True,
show_percentage=True, show_eta=True, title=None, show_total_time=True, show_time=False):
"""
:param max: int: Max value of the load
"""
self.loading = False
self.max = max
self.size = size
self.head = head
self.body = body
self.border_left = border_left
self.border_right = border_right
self.show_step = show_step
self.show_percentage = show_percentage
# ----- ETA -----
self.show_eta = show_eta
self.eta = None
self.eta_last_i_t = None
self.start_time = None
self.stop_time = None
self.show_time = show_time
self.show_total_time = show_total_time or show_eta or show_time
# ----- End ETA -----
self.title = title
self._i = 0 # State of the progress
@property
def i(self):
return self._i
@i.setter
def i(self, i):
if self.use_time:
# Do some work to see how long it is gonna last
if self.eta_last_i_t is not None:
if self.eta_last_i_t[0] > i:
# Don't want to go backward
self.eta = None
self.eta_last_i_t = None
elif self.eta_last_i_t[0] < i:
# Do nothing if this is the same
t = time.time()
eta = (t - self.eta_last_i_t[1]) * self.max / (i - self.eta_last_i_t[0])
self.eta = eta if self.eta is None else 0.5 * eta + 0.5 * self.eta
self.eta_last_i_t = (i, t)
else:
# First iteration, I have to set up for the next one
self.eta_last_i_t = (i, time.time())
self._i = i
@property
def use_time(self):
return self.show_eta or self.show_total_time
def start(self, end=''):
"""
:return:
"""
self.loading = True
if self.use_time:
self.start_time = time.time()
self.update(step=0, end=end)
def update(self, step=None, to_add=None, end='', start='\r'):
"""
:param start:
:param end:
:param step:
:param to_add:
:return:
"""
if step is None:
to_add = 1 if to_add is None else to_add
self.i = self.i + to_add
else:
self.i = step
l = list()
if self.title is not None: l.append(self.title)
if self.show_step: l.append(self._get_step())
if self.show_percentage: l.append(self._get_percentage())
l.append(self._get_bar())
if self.show_time or (self.show_total_time and not self.loading): l.append(self._get_time())
if self.show_eta and self.loading: l.append(self._get_eta())
s = ' '.join(l)
self._print(s, end=end, start=start)
def end(self):
self.loading = False
if self.use_time:
self.stop_time = time.time()
self.update(step=self.max, end='\n')
def _print(self, to_print, end='', flush=True, start='\r'):
"""
Rewrite print function with default args
:param to_print:
:param end:
:param flush:
:param start
:return:
"""
# \r used to put the cursor at the beginning of the line
print(f'{start}{to_print}', end=end, flush=flush)
def _get_bar(self):
done = int(min(self.i, self.max) * self.size // self.max)
todo = self.size - done
todo_head = min(todo, 1) # 1 or 0
todo_blank = todo - todo_head
return f'{self.border_left}{self.body * done}{self.head * todo_head}{" " * todo_blank}{self.border_right}'
def _get_step(self):
if not self.show_step:
return ''
digit_nb = int(1 + math.floor(math.log10(self.max)))
return '{0:{1}}'.format(self.i, digit_nb) + f'/{self.max}'
def _get_percentage(self):
if not self.show_percentage:
return ''
percentage = self.i * 100 / self.max
percentage_string = f'{percentage:3.0f}%'
if self.show_step:
percentage_string = f'({percentage_string})'
return percentage_string
def _get_time(self):
if self.loading:
if not self.show_time:
return ''
else:
current_time = time.time() - self.start_time
current_time = datetime.timedelta(seconds=int(current_time))
return f'Time {current_time}'
else:
if not self.show_total_time:
return ''
if self.start_time is not None and self.stop_time is not None:
total_time = int(self.stop_time - self.start_time)
total_time = datetime.timedelta(seconds=total_time)
return f'Time {total_time}'
def _get_eta(self):
eta = '-:--:--' # if self.eta is None
if self.loading:
if not self.show_eta:
return ''
if self.eta is not None:
eta = self.eta * (self.max - self.i) / self.max
eta = datetime.timedelta(seconds=int(eta))
return f'ETA {eta}'
else:
return ''
|
py | 1a40ec7420fa9badb3f32eafd690160fcf1e8d8f | import base64
import json
import os
import tempfile
import zipfile
none = "d3043820717d74d9a17694c176d39733"
# region Application
class Application:
def __init__(self, name):
self.name = name
# endregion
# region Environment
class Environment:
def __init__(self, name, application_id, providers=none, locations=none):
self.name = name
self.application_id = application_id
self.preferences = {'providers': providers, 'locations': locations}
# endregion
# region Environment
class Function:
def __init__(
self,
name,
environment_id,
directory,
handler,
runtime,
memory,
timeout):
self.name = name
self.environment_id = environment_id
self.directory = directory
self.handler = handler
self.runtime = runtime
self.memory = memory
self.timeout = timeout
# endregion
class ApplicationCreationRequest:
def __init__(self, application):
self.application = application
def toJSON(self):
return json.dumps(self, default=lambda o: o.__dict__,
sort_keys=True, indent=4)
class EnvironmentCreationRequest:
def __init__(self, environment):
self.environment = environment
def toJSON(self):
return json.dumps(self, default=lambda o: o.__dict__,
sort_keys=True, indent=4)
class FunctionCreationRequest:
def __init__(self, function, print_output=True):
self.should_print_output = print_output
self.function = self.rebuildFunctionInlineCode(function)
def print_output(self, output):
if self.should_print_output is True:
print(output)
def rebuildFunctionInlineCode(self, function):
directory = function.directory
handler = function.handler
with tempfile.NamedTemporaryFile() as temp:
self.zip(directory, temp.name)
temp.seek(0)
base64content = base64.b64encode(temp.read())
function.code = {'source': base64content, 'handler': handler}
del function.directory
del function.handler
return function
def zip(self, src, dst):
zf = zipfile.ZipFile(dst, "w", zipfile.ZIP_DEFLATED)
abs_src = os.path.abspath(src)
for dirname, subdirs, files in os.walk(src):
for filename in files:
absname = os.path.abspath(os.path.join(dirname, filename))
arcname = absname[len(abs_src) + 1:]
self.print_output(
"collecting file {}".format(
os.path.join(
dirname, filename)))
zf.write(absname, arcname)
zf.close()
def toJSON(self):
del self.should_print_output
return json.dumps(self, default=lambda o: o.__dict__,
sort_keys=True, indent=4)
|
py | 1a40ecfd095406d8f419fb3c12b409e37d1b0a87 | """A setuptools based setup module.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
import os
import sys
from setuptools import setup, find_packages
# pylint: disable=redefined-builtin
here = os.path.abspath(os.path.dirname(__file__)) # pylint: disable=invalid-name
with open(os.path.join(here, "README.rst"), encoding="utf-8") as fid:
long_description = fid.read() # pylint: disable=invalid-name
with open(os.path.join(here, "requirements.txt"), encoding="utf-8") as fid:
install_requires = [line for line in fid.read().splitlines() if line.strip()]
setup(
name="aas-core-meta",
version="2021.11.20a2",
description="Provide meta-models for Asset Administration Shell information model.",
long_description=long_description,
url="https://github.com/aas-core-works/aas-core-meta",
author="Nico Braunisch, Marko Ristin, Robert Lehmann, Marcin Sadurski, Manuel Sauer",
author_email="[email protected]",
classifiers=[
"Development Status :: 3 - Alpha",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3.8",
],
license="License :: OSI Approved :: MIT License",
keywords="asset administration shell,design-by-contract,meta-model",
packages=find_packages(exclude=["tests"]),
install_requires=install_requires,
# fmt: off
extras_require={
"dev": [
"black==21.11b0",
"mypy==0.910",
],
},
# fmt: on
py_modules=["aas_core_meta"],
package_data={"aas_core_meta": ["py.typed"]},
data_files=[(".", ["LICENSE", "README.rst", "requirements.txt"])],
)
|
py | 1a40ed1a17dea5fd3094e5c2c3a166e8618f7b3c | import argparse
from io import BytesIO
from urllib.parse import unquote_plus
from urllib.request import urlopen
from flask import Flask, request, send_file
from waitress import serve
from ..bg import remove
app = Flask(__name__)
@app.route("/", methods=["GET", "POST"])
def index():
file_content = ""
if request.method == "POST":
if "file" not in request.files:
return {"error": "missing post form param 'file'"}, 400
file_content = request.files["file"].read()
if request.method == "GET":
url = request.args.get("url", type=str)
if url is None:
return {"error": "missing query param 'url'"}, 400
file_content = urlopen(unquote_plus(url)).read()
if file_content == "":
return {"error": "File content is empty"}, 400
alpha_matting = "a" in request.values
af = request.values.get("af", type=int, default=240)
ab = request.values.get("ab", type=int, default=10)
ae = request.values.get("ae", type=int, default=10)
model = request.args.get("model", type=str, default="u2net")
if model not in ("u2net", "u2netp"):
return {"error": "invalid query param 'model'"}, 400
try:
return send_file(
BytesIO(
remove(
file_content,
model_name=model,
alpha_matting=alpha_matting,
alpha_matting_foreground_threshold=af,
alpha_matting_background_threshold=ab,
alpha_matting_erode_structure_size=ae
)
),
mimetype="image/png",
)
except Exception as e:
app.logger.exception(e, exc_info=True)
return {"error": "oops, something went wrong!"}, 500
def main():
ap = argparse.ArgumentParser()
ap.add_argument(
"-a",
"--addr",
default="0.0.0.0",
type=str,
help="The IP address to bind to.",
)
ap.add_argument(
"-p",
"--port",
default=5000,
type=int,
help="The port to bind to.",
)
args = ap.parse_args()
serve(app, host=args.addr, port=args.port)
if __name__ == "__main__":
main()
|
py | 1a40ef92eab63ba9383949c889af98b0548e1735 | import argparse
import json
import os
import random
import time
import numpy as np
import torch.distributed as dist
import torch.utils.data.distributed
from apex import amp
from apex.parallel import DistributedDataParallel
from warpctc_pytorch import CTCLoss
from data.data_loader import AudioDataLoader, SpectrogramDataset, BucketingSampler, DistributedBucketingSampler
from decoder import GreedyDecoder
from logger import VisdomLogger, TensorBoardLogger
from model import DeepSpeech, supported_rnns
from test import evaluate
from utils import reduce_tensor, check_loss
parser = argparse.ArgumentParser(description='DeepSpeech training')
parser.add_argument('--train-manifest', metavar='DIR',
help='path to train manifest csv', default='data/cv-valid-train_manifest_en.csv')
parser.add_argument('--val-manifest', metavar='DIR',
help='path to validation manifest csv', default='data/cv-valid-test_manifest_en.csv')
parser.add_argument('--sample-rate', default=16000, type=int, help='Sample rate')
parser.add_argument('--batch-size', default=20, type=int, help='Batch size for training')
parser.add_argument('--num-workers', default=4, type=int, help='Number of workers used in data-loading')
parser.add_argument('--labels-path', default='labels.json', help='Contains all characters for transcription')
parser.add_argument('--window-size', default=.02, type=float, help='Window size for spectrogram in seconds')
parser.add_argument('--window-stride', default=.01, type=float, help='Window stride for spectrogram in seconds')
parser.add_argument('--window', default='hamming', help='Window type for spectrogram generation')
parser.add_argument('--hidden-size', default=800, type=int, help='Hidden size of RNNs')
parser.add_argument('--hidden-layers', default=5, type=int, help='Number of RNN layers')
parser.add_argument('--rnn-type', default='gru', help='Type of the RNN. rnn|gru|lstm are supported')
parser.add_argument('--epochs', default=70, type=int, help='Number of training epochs')
parser.add_argument('--cuda', dest='cuda', action='store_true', help='Use cuda to train model')
parser.add_argument('--lr', '--learning-rate', default=3e-4, type=float, help='initial learning rate')
parser.add_argument('--momentum', default=0.9, type=float, help='momentum')
parser.add_argument('--max-norm', default=400, type=int, help='Norm cutoff to prevent explosion of gradients')
parser.add_argument('--learning-anneal', default=1.1, type=float, help='Annealing applied to learning rate every epoch')
parser.add_argument('--silent', dest='silent', action='store_true', help='Turn off progress tracking per iteration')
parser.add_argument('--checkpoint', dest='checkpoint', action='store_true', help='Enables checkpoint saving of model')
parser.add_argument('--checkpoint-per-batch', default=0, type=int, help='Save checkpoint per batch. 0 means never save')
parser.add_argument('--visdom', dest='visdom', action='store_true', help='Turn on visdom graphing')
parser.add_argument('--tensorboard', dest='tensorboard', action='store_true', help='Turn on tensorboard graphing')
parser.add_argument('--log-dir', default='visualize/deepspeech_final', help='Location of tensorboard log')
parser.add_argument('--log-params', dest='log_params', action='store_true', help='Log parameter values and gradients')
parser.add_argument('--id', default='Deepspeech training', help='Identifier for visdom/tensorboard run')
parser.add_argument('--save-folder', default='models/', help='Location to save epoch models')
parser.add_argument('--model-path', default='models/deepspeech_final_cv1_252hr.pth',
help='Location to save best validation model')
parser.add_argument('--continue-from', default='', help='Continue from checkpoint model')
parser.add_argument('--finetune', dest='finetune', action='store_true',
help='Finetune the model from checkpoint "continue_from"')
parser.add_argument('--augment', dest='augment', action='store_true', help='Use random tempo and gain perturbations.')
parser.add_argument('--noise-dir', default=None,
help='Directory to inject noise into audio. If default, noise Inject not added')
parser.add_argument('--noise-prob', default=0.4, help='Probability of noise being added per sample')
parser.add_argument('--noise-min', default=0.0,
help='Minimum noise level to sample from. (1.0 means all noise, not original signal)', type=float)
parser.add_argument('--noise-max', default=0.5,
help='Maximum noise levels to sample from. Maximum 1.0', type=float)
parser.add_argument('--no-shuffle', dest='no_shuffle', action='store_true',
help='Turn off shuffling and sample from dataset based on sequence length (smallest to largest)')
parser.add_argument('--no-sortaGrad', dest='no_sorta_grad', action='store_true',
help='Turn off ordering of dataset on sequence length for the first epoch.')
parser.add_argument('--no-bidirectional', dest='bidirectional', action='store_false', default=True,
help='Turn off bi-directional RNNs, introduces lookahead convolution')
parser.add_argument('--dist-url', default='tcp://127.0.0.1:1550', type=str,
help='url used to set up distributed training')
parser.add_argument('--dist-backend', default='nccl', type=str, help='distributed backend')
parser.add_argument('--world-size', default=1, type=int,
help='number of distributed processes')
parser.add_argument('--rank', default=0, type=int,
help='The rank of this process')
parser.add_argument('--gpu-rank', default=None,
help='If using distributed parallel for multi-gpu, sets the GPU for the process')
parser.add_argument('--seed', default=123456, type=int, help='Seed to generators')
parser.add_argument('--opt-level', type=str)
parser.add_argument('--keep-batchnorm-fp32', type=str, default=None)
parser.add_argument('--loss-scale', type=str, default=None)
torch.manual_seed(123456)
torch.cuda.manual_seed_all(123456)
def to_np(x):
return x.cpu().numpy()
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
if __name__ == '__main__':
args = parser.parse_args()
# Set seeds for determinism
torch.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
np.random.seed(args.seed)
random.seed(args.seed)
device = torch.device("cuda" if args.cuda else "cpu")
args.distributed = args.world_size > 1
main_proc = True
device = torch.device("cuda" if args.cuda else "cpu")
if args.distributed:
if args.gpu_rank:
torch.cuda.set_device(int(args.gpu_rank))
dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
world_size=args.world_size, rank=args.rank)
main_proc = args.rank == 0 # Only the first proc should save models
save_folder = args.save_folder
os.makedirs(save_folder, exist_ok=True) # Ensure save folder exists
loss_results, cer_results, wer_results = torch.Tensor(args.epochs), torch.Tensor(args.epochs), torch.Tensor(
args.epochs)
best_wer = None
if main_proc and args.visdom:
visdom_logger = VisdomLogger(args.id, args.epochs)
if main_proc and args.tensorboard:
tensorboard_logger = TensorBoardLogger(args.id, args.log_dir, args.log_params)
avg_loss, start_epoch, start_iter, optim_state = 0, 0, 0, None
if args.continue_from: # Starting from previous model
print("Loading checkpoint model %s" % args.continue_from)
package = torch.load(args.continue_from, map_location=lambda storage, loc: storage)
model = DeepSpeech.load_model_package(package)
labels = model.labels
audio_conf = model.audio_conf
if not args.finetune: # Don't want to restart training
optim_state = package['optim_dict']
start_epoch = int(package.get('epoch', 1)) - 1 # Index start at 0 for training
start_iter = package.get('iteration', None)
if start_iter is None:
start_epoch += 1 # We saved model after epoch finished, start at the next epoch.
start_iter = 0
else:
start_iter += 1
avg_loss = int(package.get('avg_loss', 0))
loss_results, cer_results, wer_results = package['loss_results'], package['cer_results'], \
package['wer_results']
best_wer = wer_results[start_epoch]
if main_proc and args.visdom: # Add previous scores to visdom graph
visdom_logger.load_previous_values(start_epoch, package)
if main_proc and args.tensorboard: # Previous scores to tensorboard logs
tensorboard_logger.load_previous_values(start_epoch, package)
else:
with open(args.labels_path) as label_file:
labels = str(''.join(json.load(label_file)))
audio_conf = dict(sample_rate=args.sample_rate,
window_size=args.window_size,
window_stride=args.window_stride,
window=args.window,
noise_dir=args.noise_dir,
noise_prob=args.noise_prob,
noise_levels=(args.noise_min, args.noise_max))
rnn_type = args.rnn_type.lower()
assert rnn_type in supported_rnns, "rnn_type should be either lstm, rnn or gru"
model = DeepSpeech(rnn_hidden_size=args.hidden_size,
nb_layers=args.hidden_layers,
labels=labels,
rnn_type=supported_rnns[rnn_type],
audio_conf=audio_conf,
bidirectional=args.bidirectional)
decoder = GreedyDecoder(labels)
train_dataset = SpectrogramDataset(audio_conf=audio_conf, manifest_filepath=args.train_manifest, labels=labels,
normalize=True, augment=args.augment)
test_dataset = SpectrogramDataset(audio_conf=audio_conf, manifest_filepath=args.val_manifest, labels=labels,
normalize=True, augment=False)
if not args.distributed:
train_sampler = BucketingSampler(train_dataset, batch_size=args.batch_size)
else:
train_sampler = DistributedBucketingSampler(train_dataset, batch_size=args.batch_size,
num_replicas=args.world_size, rank=args.rank)
train_loader = AudioDataLoader(train_dataset,
num_workers=args.num_workers, batch_sampler=train_sampler)
test_loader = AudioDataLoader(test_dataset, batch_size=args.batch_size,
num_workers=args.num_workers)
if (not args.no_shuffle and start_epoch != 0) or args.no_sorta_grad:
print("Shuffling batches for the following epochs")
train_sampler.shuffle(start_epoch)
model = model.to(device)
parameters = model.parameters()
optimizer = torch.optim.SGD(parameters, lr=args.lr,
momentum=args.momentum, nesterov=True, weight_decay=1e-5)
if optim_state is not None:
optimizer.load_state_dict(optim_state)
model, optimizer = amp.initialize(model, optimizer,
opt_level=args.opt_level,
keep_batchnorm_fp32=args.keep_batchnorm_fp32,
loss_scale=args.loss_scale)
if args.distributed:
model = DistributedDataParallel(model)
print(model)
print("Number of parameters: %d" % DeepSpeech.get_param_size(model))
criterion = CTCLoss()
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
for epoch in range(start_epoch, args.epochs):
model.train()
end = time.time()
start_epoch_time = time.time()
for i, (data) in enumerate(train_loader, start=start_iter):
if i == len(train_sampler):
break
inputs, targets, input_percentages, target_sizes = data
input_sizes = input_percentages.mul_(int(inputs.size(3))).int()
# measure data loading time
data_time.update(time.time() - end)
inputs = inputs.to(device)
out, output_sizes = model(inputs, input_sizes)
out = out.transpose(0, 1) # TxNxH
float_out = out.float() # ensure float32 for loss
loss = criterion(float_out, targets, output_sizes, target_sizes).to(device)
loss = loss / inputs.size(0) # average the loss by minibatch
if args.distributed:
loss = loss.to(device)
loss_value = reduce_tensor(loss, args.world_size).item()
else:
loss_value = loss.item()
# Check to ensure valid loss was calculated
valid_loss, error = check_loss(loss, loss_value)
if valid_loss:
optimizer.zero_grad()
# compute gradient
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_norm)
optimizer.step()
else:
print(error)
print('Skipping grad update')
loss_value = 0
avg_loss += loss_value
losses.update(loss_value, inputs.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if not args.silent:
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'.format(
(epoch + 1), (i + 1), len(train_sampler), batch_time=batch_time, data_time=data_time, loss=losses))
if args.checkpoint_per_batch > 0 and i > 0 and (i + 1) % args.checkpoint_per_batch == 0 and main_proc:
file_path = '%s/deepspeech_checkpoint_epoch_%d_iter_%d.pth' % (save_folder, epoch + 1, i + 1)
print("Saving checkpoint model to %s" % file_path)
torch.save(DeepSpeech.serialize(model, optimizer=optimizer, epoch=epoch, iteration=i,
loss_results=loss_results,
wer_results=wer_results, cer_results=cer_results, avg_loss=avg_loss),
file_path)
del loss, out, float_out
avg_loss /= len(train_sampler)
epoch_time = time.time() - start_epoch_time
print('Training Summary Epoch: [{0}]\t'
'Time taken (s): {epoch_time:.0f}\t'
'Average Loss {loss:.3f}\t'.format(epoch + 1, epoch_time=epoch_time, loss=avg_loss))
start_iter = 0 # Reset start iteration for next epoch
with torch.no_grad():
wer, cer, output_data = evaluate(test_loader=test_loader,
device=device,
model=model,
decoder=decoder,
target_decoder=decoder)
loss_results[epoch] = avg_loss
wer_results[epoch] = wer
cer_results[epoch] = cer
print('Validation Summary Epoch: [{0}]\t'
'Average WER {wer:.3f}\t'
'Average CER {cer:.3f}\t'.format(
epoch + 1, wer=wer, cer=cer))
values = {
'loss_results': loss_results,
'cer_results': cer_results,
'wer_results': wer_results
}
if args.visdom and main_proc:
visdom_logger.update(epoch, values)
if args.tensorboard and main_proc:
tensorboard_logger.update(epoch, values, model.named_parameters())
values = {
'Avg Train Loss': avg_loss,
'Avg WER': wer,
'Avg CER': cer
}
if main_proc and args.checkpoint:
file_path = '%s/deepspeech_%d.pth.tar' % (save_folder, epoch + 1)
torch.save(DeepSpeech.serialize(model, optimizer=optimizer, epoch=epoch, loss_results=loss_results,
wer_results=wer_results, cer_results=cer_results),
file_path)
# anneal lr
for g in optimizer.param_groups:
g['lr'] = g['lr'] / args.learning_anneal
print('Learning rate annealed to: {lr:.6f}'.format(lr=g['lr']))
if main_proc and (best_wer is None or best_wer > wer):
print("Found better validated model, saving to %s" % args.model_path)
torch.save(DeepSpeech.serialize(model, optimizer=optimizer, epoch=epoch, loss_results=loss_results,
wer_results=wer_results, cer_results=cer_results)
, args.model_path)
best_wer = wer
avg_loss = 0
if not args.no_shuffle:
print("Shuffling batches...")
train_sampler.shuffle(epoch)
|
py | 1a40effde14f63d46a048bb3eb1e65002660b4cd | """
stdint
======
Although Python has native support for arbitrary-precision integers,
Javascript by default uses 64-bit floats as the only numeric type,
signifying they cannot store more than 53 integral bits.
Therefore, in Javascript, 64-bit integers are stored as an array
of 2 numbers. Likewise, 128-bit integers are stored as an array of 4
numbers. These functions the conversion of native Python integers
to and from a Javascript-like notation, to simplify integration with
data transfer objects.
This also provides routines to convert to and from fixed-width
integers in both catbuffer and data-transfer objects, as well
as extract high- and low-bit patterns from the types.
The module is named after <stdint.h>, which describes fixed-width
(standard) integers in C, even though it has no relationship
in terms of functionality.
"""
from __future__ import annotations
import typing
__all__ = [
# DTO Types
'I8DTOType',
'U8DTOType',
'U16DTOType',
'U24DTOType',
'U32DTOType',
'U64DTOType',
'U128DTOType',
# Byte sizes
'I8_BYTES',
'U8_BYTES',
'U16_BYTES',
'U24_BYTES',
'U32_BYTES',
'U64_BYTES',
'U128_BYTES',
# I8
# 'i8_high',
# 'i8_low',
'i8_iter_from_catbuffer',
'i8_iter_from_dto',
'i8_iter_to_catbuffer',
'i8_iter_to_dto',
'i8_from_catbuffer',
'i8_from_dto',
'i8_to_catbuffer',
'i8_to_dto',
# U8
'u8_high',
'u8_low',
'u8_iter_from_catbuffer',
'u8_iter_from_dto',
'u8_iter_to_catbuffer',
'u8_iter_to_dto',
'u8_from_catbuffer',
'u8_from_dto',
'u8_to_catbuffer',
'u8_to_dto',
# U16
'u16_high',
'u16_low',
'u16_iter_from_catbuffer',
'u16_iter_from_dto',
'u16_iter_to_catbuffer',
'u16_iter_to_dto',
'u16_from_catbuffer',
'u16_from_dto',
'u16_to_catbuffer',
'u16_to_dto',
# U24
'u24_high',
'u24_low',
'u24_iter_from_catbuffer',
'u24_iter_from_dto',
'u24_iter_to_catbuffer',
'u24_iter_to_dto',
'u24_from_catbuffer',
'u24_from_dto',
'u24_to_catbuffer',
'u24_to_dto',
# U32
'u32_high',
'u32_low',
'u32_iter_from_catbuffer',
'u32_iter_from_dto',
'u32_iter_to_catbuffer',
'u32_iter_to_dto',
'u32_from_catbuffer',
'u32_from_dto',
'u32_to_catbuffer',
'u32_to_dto',
# U64
'u64_high',
'u64_low',
'u64_iter_from_catbuffer',
'u64_iter_from_dto',
'u64_iter_to_catbuffer',
'u64_iter_to_dto',
'u64_from_catbuffer',
'u64_from_dto',
'u64_to_catbuffer',
'u64_to_dto',
# U128
'u128_high',
'u128_low',
'u128_iter_from_catbuffer',
'u128_iter_from_dto',
'u128_iter_to_catbuffer',
'u128_iter_to_dto',
'u128_from_catbuffer',
'u128_from_dto',
'u128_to_catbuffer',
'u128_to_dto',
]
U4_BITS = 4
U8_BITS = 8
U16_BITS = 16
U24_BITS = 24
U32_BITS = 32
U64_BITS = 64
U128_BITS = 128
U8_BYTES = U8_BITS // 8
U16_BYTES = U16_BITS // 8
U24_BYTES = U24_BITS // 8
U32_BYTES = U32_BITS // 8
U64_BYTES = U64_BITS // 8
U128_BYTES = U128_BITS // 8
U4_MAX = 0xF
U8_MAX = 0xFF
U16_MAX = 0xFFFF
U24_MAX = 0xFFFFFF
U32_MAX = 0xFFFFFFFF
U64_MAX = 0xFFFFFFFFFFFFFFFF
U128_MAX = 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF
I8_BITS = 8
I8_BYTES = I8_BITS // 8
I8_MAX = 0x7F
I8_MIN = -0x80
U8DTOType = int
U16DTOType = int
U24DTOType = int
U32DTOType = int
U64DTOType = typing.Sequence[U32DTOType]
U128DTOType = typing.Sequence[U64DTOType]
YieldIntType = typing.Generator[int, None, None]
YieldBytesType = typing.Generator[bytes, None, None]
I8DTOType = int
# HELPERS
def check_overflow(within_range: bool):
"""Raise exception if overflow."""
if not within_range:
raise OverflowError
def high(max: int, bits: int, mask: int) -> typing.Callable[[int], int]:
def wrapper(value: int) -> int:
check_overflow(0 <= value <= max)
return (value >> bits) & mask
wrapper.__name__ = f'u{2*bits}_high'
wrapper.__qualname__ = wrapper.__name__
wrapper.__doc__ = f'Get high {bits} from {2*bits}-bit integer.'
wrapper.__module__ = __name__
return wrapper
def low(max: int, bits: int, mask: int) -> typing.Callable[[int], int]:
def wrapper(value: int) -> int:
check_overflow(0 <= value <= max)
return value & mask
wrapper.__name__ = f'u{2*bits}_low'
wrapper.__qualname__ = wrapper.__name__
wrapper.__doc__ = f'Get low {bits} from {2*bits}-bit integer.'
wrapper.__module__ = __name__
return wrapper
def to_catbuffer_impl(size: int, signed: bool = False) -> typing.Callable[[int], bytes]:
def wrapper(value: int, signed: bool = False) -> bytes:
return value.to_bytes(size, 'little', signed=signed)
return wrapper
def to_catbuffer(bits: int, signed: bool = False) -> typing.Callable[[int], bytes]:
cb = to_catbuffer_impl(bits // 8, signed=signed)
def wrapper(value: int, signed: bool = False) -> bytes:
return cb(value)
wrapper.__name__ = f'u{bits}_to_catbuffer'
wrapper.__qualname__ = wrapper.__name__
wrapper.__doc__ = f'Convert {bits}-bit integer to catbuffer.'
wrapper.__module__ = __name__
return wrapper
def iter_to_catbuffer(bits: int, signed: bool = False):
cb = to_catbuffer_impl(bits // 8, signed=signed)
def wrapper(iterable, signed: bool = False):
for value in iterable:
yield cb(value)
wrapper.__name__ = f'u{bits}_iter_to_catbuffer'
wrapper.__qualname__ = wrapper.__name__
wrapper.__doc__ = f'Iteratively convert {bits}-bit integers to catbuffer.'
wrapper.__module__ = __name__
return wrapper
def from_catbuffer_impl(bits: int, signed: bool = False) -> typing.Callable[[bytes], int]:
def wrapper(catbuffer: bytes, signed: bool = False) -> int:
return int.from_bytes(catbuffer, 'little', signed=signed)
return wrapper
def from_catbuffer(bits: int, signed: bool = False) -> typing.Callable[[bytes], int]:
size = bits // 8
cb = from_catbuffer_impl(size, signed=signed)
def wrapper(catbuffer: bytes, signed: bool = False) -> int:
if len(catbuffer) > size:
raise OverflowError('bytes too big to convert')
return cb(catbuffer)
wrapper.__name__ = f'u{bits}_from_catbuffer'
wrapper.__qualname__ = wrapper.__name__
wrapper.__doc__ = f'Convert catbuffer to {bits}-bit integer.'
wrapper.__module__ = __name__
return wrapper
def iter_from_catbuffer(bits: int, signed: bool = False) -> typing.Callable[[bytes], YieldIntType]:
size = bits // 8
cb = from_catbuffer_impl(size, signed=signed)
def wrapper(catbuffer: bytes, signed: bool = False) -> YieldIntType:
length = len(catbuffer)
if length % size != 0:
raise ValueError(f'iter from_catbuffer requires multiple of {size}.')
for i in range(0, length, size):
start = i
stop = start + size
yield cb(catbuffer[start:stop])
wrapper.__name__ = f'u{bits}_iter_from_catbuffer'
wrapper.__qualname__ = wrapper.__name__
wrapper.__doc__ = f'Iteratively convert catbuffer to {bits}-bit integers.'
wrapper.__module__ = __name__
return wrapper
def iter_to_dto(bits: int, cb):
def wrapper(iterable):
for value in iterable:
yield cb(value)
wrapper.__name__ = f'u{bits}_iter_to_dto'
wrapper.__qualname__ = wrapper.__name__
wrapper.__doc__ = f'Iteratively convert {bits}-bit integers to DTO.'
wrapper.__module__ = __name__
return wrapper
def iter_from_dto(bits: int, cb):
def wrapper(iterable):
for value in iterable:
yield cb(value)
wrapper.__name__ = f'u{bits}_iter_from_dto'
wrapper.__qualname__ = wrapper.__name__
wrapper.__doc__ = f'Iteratively convert DTOs to {bits}-bit integers.'
wrapper.__module__ = __name__
return wrapper
# UINT8
def u8_to_dto(value: int) -> U8DTOType:
"""Convert 8-bit int to DTO."""
check_overflow(0 <= value <= U8_MAX)
return value
def u8_from_dto(dto: U8DTOType) -> int:
"""Convert DTO to 8-bit int."""
check_overflow(0 <= dto <= U8_MAX)
return dto
u8_high = high(U8_MAX, U4_BITS, U4_MAX)
u8_low = low(U8_MAX, U4_BITS, U4_MAX)
u8_to_catbuffer = to_catbuffer(U8_BITS)
u8_from_catbuffer = from_catbuffer(U8_BITS)
u8_iter_to_catbuffer = iter_to_catbuffer(U8_BITS)
u8_iter_from_catbuffer = iter_from_catbuffer(U8_BITS)
u8_iter_to_dto = iter_to_dto(U8_BITS, u8_to_dto)
u8_iter_from_dto = iter_from_dto(U8_BITS, u8_from_dto)
# INT8
def i8_to_dto(value: int) -> I8DTOType:
"""Convert 8-bit int to DTO."""
check_overflow(I8_MIN <= value <= I8_MAX)
return value
def i8_from_dto(dto: I8DTOType) -> int:
"""Convert DTO to 8-bit int."""
check_overflow(I8_MIN <= dto <= I8_MAX)
return dto
# i8_high = high(I8_MAX, I4_BITS, I4_MAX)
# i8_low = low(I8_MAX, I4_BITS, I4_MAX)
i8_to_catbuffer = to_catbuffer(I8_BITS, signed=True)
i8_from_catbuffer = from_catbuffer(I8_BITS, signed=True)
i8_iter_to_catbuffer = iter_to_catbuffer(I8_BITS, signed=True)
i8_iter_from_catbuffer = iter_from_catbuffer(I8_BITS, signed=True)
i8_iter_to_dto = iter_to_dto(I8_BITS, i8_to_dto)
i8_iter_from_dto = iter_from_dto(I8_BITS, i8_from_dto)
# UINT16
def u16_to_dto(value: int) -> U16DTOType:
"""Convert 16-bit int to DTO."""
check_overflow(0 <= value <= U16_MAX)
return value
def u16_from_dto(dto: U16DTOType) -> int:
"""Convert DTO to 16-bit int."""
check_overflow(0 <= dto <= U16_MAX)
return dto
u16_high = high(U16_MAX, U8_BITS, U8_MAX)
u16_low = low(U16_MAX, U8_BITS, U8_MAX)
u16_to_catbuffer = to_catbuffer(U16_BITS)
u16_from_catbuffer = from_catbuffer(U16_BITS)
u16_iter_to_catbuffer = iter_to_catbuffer(U16_BITS)
u16_iter_from_catbuffer = iter_from_catbuffer(U16_BITS)
u16_iter_to_dto = iter_to_dto(U16_BITS, u16_to_dto)
u16_iter_from_dto = iter_from_dto(U16_BITS, u16_from_dto)
# UINT24
def u24_to_dto(value: int) -> U24DTOType:
"""Convert 24-bit int to DTO."""
check_overflow(0 <= value <= U24_MAX)
return value
def u24_from_dto(dto: U24DTOType) -> int:
"""Convert DTO to 24-bit int."""
check_overflow(0 <= dto <= U24_MAX)
return dto
u24_high = high(U24_MAX, U8_BITS, U8_MAX)
u24_low = low(U24_MAX, U8_BITS, U8_MAX)
u24_to_catbuffer = to_catbuffer(U24_BITS)
u24_from_catbuffer = from_catbuffer(U24_BITS)
u24_iter_to_catbuffer = iter_to_catbuffer(U24_BITS)
u24_iter_from_catbuffer = iter_from_catbuffer(U24_BITS)
u24_iter_to_dto = iter_to_dto(U24_BITS, u24_to_dto)
u24_iter_from_dto = iter_from_dto(U24_BITS, u24_from_dto)
# UINT32
def u32_to_dto(value: int) -> U32DTOType:
"""Convert 32-bit int to DTO."""
check_overflow(0 <= value <= U32_MAX)
return value
def u32_from_dto(dto: U32DTOType) -> int:
"""Convert DTO to 32-bit int."""
check_overflow(0 <= dto <= U32_MAX)
return dto
u32_high = high(U32_MAX, U16_BITS, U16_MAX)
u32_low = low(U32_MAX, U16_BITS, U16_MAX)
u32_to_catbuffer = to_catbuffer(U32_BITS)
u32_from_catbuffer = from_catbuffer(U32_BITS)
u32_iter_to_catbuffer = iter_to_catbuffer(U32_BITS)
u32_iter_from_catbuffer = iter_from_catbuffer(U32_BITS)
u32_iter_to_dto = iter_to_dto(U32_BITS, u32_to_dto)
u32_iter_from_dto = iter_from_dto(U32_BITS, u32_from_dto)
# UINT64
def u64_to_dto(value: int) -> U64DTOType:
"""Convert 64-bit int to DTO."""
check_overflow(0 <= value <= U64_MAX)
return [u64_low(value), u64_high(value)]
def u64_from_dto(dto: U64DTOType) -> int:
"""Convert DTO to 64-bit int."""
if not (
len(dto) == 2
and dto[0] <= U32_MAX
and dto[1] <= U32_MAX
):
raise ArithmeticError
return (dto[0]) | (dto[1] << U32_BITS)
u64_high = high(U64_MAX, U32_BITS, U32_MAX)
u64_low = low(U64_MAX, U32_BITS, U32_MAX)
u64_to_catbuffer = to_catbuffer(U64_BITS)
u64_from_catbuffer = from_catbuffer(U64_BITS)
u64_iter_to_catbuffer = iter_to_catbuffer(U64_BITS)
u64_iter_from_catbuffer = iter_from_catbuffer(U64_BITS)
u64_iter_to_dto = iter_to_dto(U64_BITS, u64_to_dto)
u64_iter_from_dto = iter_from_dto(U64_BITS, u64_from_dto)
# UINT128
def u128_to_dto(value: int) -> U128DTOType:
"""Convert 128-bit int to DTO."""
check_overflow(0 <= value <= U128_MAX)
low = u128_low(value)
high = u128_high(value)
return [u64_to_dto(low), u64_to_dto(high)]
def u128_from_dto(dto: U128DTOType) -> int:
"""Convert DTO to 128-bit int."""
if len(dto) != 2:
raise ArithmeticError
low = u64_from_dto(dto[0])
high = u64_from_dto(dto[1])
return low | (high << U64_BITS)
u128_high = high(U128_MAX, U64_BITS, U64_MAX)
u128_low = low(U128_MAX, U64_BITS, U64_MAX)
u128_to_catbuffer = to_catbuffer(U128_BITS)
u128_from_catbuffer = from_catbuffer(U128_BITS)
u128_iter_to_catbuffer = iter_to_catbuffer(U128_BITS)
u128_iter_from_catbuffer = iter_from_catbuffer(U128_BITS)
u128_iter_to_dto = iter_to_dto(U128_BITS, u128_to_dto)
u128_iter_from_dto = iter_from_dto(U128_BITS, u128_from_dto)
|
py | 1a40f0652e55f445c1e9da38af82548b0c36905b | import os
import pytest
import ray
from ray import serve
if os.environ.get("RAY_SERVE_INTENTIONALLY_CRASH", False):
serve.controller._CRASH_AFTER_CHECKPOINT_PROBABILITY = 0.5
@pytest.fixture(scope="session")
def _shared_serve_instance():
ray.init(num_cpus=36)
serve.init()
yield
@pytest.fixture
def serve_instance(_shared_serve_instance):
serve.init()
yield
# Re-init if necessary.
serve.init()
controller = serve.api._get_controller()
# Clear all state between tests to avoid naming collisions.
for endpoint in ray.get(controller.get_all_endpoints.remote()):
serve.delete_endpoint(endpoint)
for backend in ray.get(controller.get_all_backends.remote()):
serve.delete_backend(backend)
|
py | 1a40f133c64b426a6433130c8939c8ec0c0c24b1 | import mock
import zeit.cms.browser.interfaces
import zeit.cms.browser.listing
import zeit.cms.content.interfaces
import zeit.cms.interfaces
import zeit.cms.testing
import zope.component
import zope.publisher.browser
class HitColumnTest(zeit.cms.testing.ZeitCmsTestCase):
def test_sort_key(self):
class FakeAccessCounter(object):
hits = 5
total_hits = 19
def __init__(self, context):
pass
zope.component.getSiteManager().registerAdapter(
FakeAccessCounter, (zeit.cms.interfaces.ICMSContent,),
zeit.cms.content.interfaces.IAccessCounter)
listrep = zope.component.queryMultiAdapter(
(self.repository['testcontent'],
zope.publisher.browser.TestRequest()),
zeit.cms.browser.interfaces.IListRepresentation)
column = zeit.cms.browser.listing.HitColumn()
self.assertEqual((19, 5), column.getSortKey(listrep, formatter=None))
class ListingTest(zeit.cms.testing.ZeitCmsBrowserTestCase):
def test_columns_ignore_exceptions(self):
with mock.patch(
'zeit.cms.testcontenttype.testcontenttype.'
'ExampleContentType.authors', new=mock.PropertyMock) as author:
author.side_effect = RuntimeError('provoked')
b = self.browser
b.handleErrors = False
with self.assertNothingRaised():
b.open('http://localhost/++skin++vivi/repository')
# Check that the cells are present but empty.
self.assertEllipsis(
'...<td> <span class="filename">testcontent</span> </td>'
' <td> 2008 ... </td> <td> </td> <td> </td> <td> </td>...',
b.contents)
|
py | 1a40f185807b2fb353ddd2692b74a4388f2a206f | import sys
from typing import Any
from typing import List
from kurobako import problem
from naslib.utils import get_dataset_api
op_names = [
"skip_connect",
"none",
"nor_conv_3x3",
"nor_conv_1x1",
"avg_pool_3x3",
]
edge_num = 4 * 3 // 2
max_epoch = 199
prune_start_epoch = 10
prune_epoch_step = 10
class NASLibProblemFactory(problem.ProblemFactory):
def __init__(self, dataset: str) -> None:
"""Creates ProblemFactory for NASBench201.
Args:
dataset:
Accepts one of "cifar10", "cifar100" or "ImageNet16-120".
"""
self._dataset = dataset
if dataset == "cifar10":
self._dataset = "cifar10-valid" # Set name used in dataset API
self._dataset_api = get_dataset_api("nasbench201", dataset)
def specification(self) -> problem.ProblemSpec:
params = [
problem.Var(f"x{i}", problem.CategoricalRange(op_names)) for i in range(edge_num)
]
return problem.ProblemSpec(
name=f"NASBench201-{self._dataset}",
params=params,
values=[problem.Var("value")],
steps=list(range(prune_start_epoch, max_epoch, prune_epoch_step)) + [max_epoch],
)
def create_problem(self, seed: int) -> problem.Problem:
return NASLibProblem(self._dataset, self._dataset_api)
class NASLibProblem(problem.Problem):
def __init__(self, dataset: str, dataset_api: Any) -> None:
super().__init__()
self._dataset = dataset
self._dataset_api = dataset_api
def create_evaluator(self, params: List[float]) -> problem.Evaluator:
ops = [op_names[int(x)] for x in params]
arch_str = "|{}~0|+|{}~0|{}~1|+|{}~0|{}~1|{}~2|".format(*ops)
return NASLibEvaluator(
self._dataset_api["nb201_data"][arch_str][self._dataset]["eval_acc1es"]
)
class NASLibEvaluator(problem.Evaluator):
def __init__(self, learning_curve: List[float]) -> None:
self._current_step = 0
self._lc = learning_curve
def current_step(self) -> int:
return self._current_step
def evaluate(self, next_step: int) -> List[float]:
self._current_step = next_step
return [-self._lc[next_step]]
if __name__ == "__main__":
if len(sys.argv) < 1 + 2:
print("Usage: python3 nas_bench_suite/problems.py <search_space> <dataset>")
print("Example: python3 nas_bench_suite/problems.py nasbench201 cifar10")
exit(1)
search_space_name = sys.argv[1]
# We currently do not support other benchmarks.
assert search_space_name == "nasbench201"
dataset = sys.argv[2]
runner = problem.ProblemRunner(NASLibProblemFactory(dataset))
runner.run()
|
py | 1a40f1ba763efbc29dce13c7d4cf158c8309b6fd | # Anything Cryptography
from hashlib import md5, sha256, sha512
from pyAesCrypt import encryptFile, decryptFile
from os import remove
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.kdf.pbkdf2 import PBKDF2HMAC
from cryptography.fernet import Fernet
import base64
def gen_key(master, salt):
# Generate Fernet key
password = master.encode()
salt = salt.encode()
kdf = PBKDF2HMAC(
algorithm=hashes.SHA512(),
length=32,
salt=salt,
iterations=100000,
backend=default_backend()
)
key = base64.urlsafe_b64encode(kdf.derive(password))
return key
def hash_rounding(word, algo="sha512", rounds=100, salt="@8fh::=G=-,./~~}%]"):
# Continual reversal hashing for a given word to make it more harder to crack
if algo == "md5":
algo = md5
elif algo == "sha256":
algo = sha256
else:
algo = sha512
hash = word
for round in range(rounds):
# hash the password with it's salt and reverse it to make it harder to crack
hash = algo((hash + salt).encode()).hexdigest()[::-1]
return hash
def hash_pass(word, algo="sha512", salt="Gu6#&3_==[';;/~~"):
# Hashes a given password
hash = word
hash = hash_rounding(hash, algo="md5", salt=salt) + hash_rounding(hash, algo="sha256", salt=salt) + hash_rounding(hash, algo="sha512", salt=salt)
hash = hash_rounding(hash, algo=algo)
return hash
def encrypt_pass(plain_pass, master_password):
# Encrypt a password with Fernet
key = gen_key(master_password, hash_pass(master_password))
encryptor = Fernet(key)
del key
hashed_pass = encryptor.encrypt(plain_pass.encode())
return hashed_pass.decode()
def decrypt_pass(hashed_pass, master_password):
# Decrypts an encrypted password with Fernet
key = gen_key(master_password, hash_pass(master_password))
decryptor = Fernet(key)
del key
dehashed_pass = decryptor.decrypt(hashed_pass.encode())
return dehashed_pass.decode()
def encryptDB(file, master, inplace=True):
# Encrypt the database with AES256-CBC
try:
encryptFile(file, file+".aes", master, 64 * 1024)
if inplace:
remove(file)
return True
except:
return False
def decryptDB(file, master, inplace=True):
# decrypt the AES256-CBC encrypted database
try:
decryptFile(file+".aes", file, master, 64 * 1024)
if inplace:
remove(file+".aes")
return True
except:
return False
|
py | 1a40f23daf0727f7e9da18eb72c0c9cf731df69f | def to_openmm_Topology(item, selection='all', frame_indices='all', syntaxis='MolSysMT'):
from molsysmt.tools.openmm_Modeller import is_openmm_Modeller
from molsysmt.basic import convert
if not is_openmm_Modeller(item):
raise ValueError
tmp_item = convert(item, to_form='openmm.Topology', selection=selection,
frame_indices=frame_indices, syntaxis=syntaxis)
return tmp_item
|
py | 1a40f2a35694266d68408943084b7f531f0140b3 | import re
from django import forms
from django.core.validators import RegexValidator
regex_validator_open = RegexValidator(
regex=re.compile("open", flags=re.ASCII),
message="You can't use open function",
inverse_match=True,
)
regex_validator_eval = RegexValidator(
regex=re.compile("eval", flags=re.ASCII),
message="You can't use eval function",
inverse_match=True,
)
regex_validator_exec = RegexValidator(
regex=re.compile("exec", flags=re.ASCII),
message="You can't use exec function",
inverse_match=True,
)
regex_validator_os = RegexValidator(
regex=re.compile(r"[%0-9\b]?os[\b%0-9]?", flags=re.ASCII),
message="You can't use os module",
inverse_match=True,
)
regex_validator_subprocess = RegexValidator(
regex=re.compile("subprocess", flags=re.ASCII),
message="You can't use subprocess module",
inverse_match=True,
)
regex_validator_pathlib = RegexValidator(
regex=re.compile("pathlib", flags=re.ASCII),
message="You can't use pathlib module",
inverse_match=True,
)
regex_validator_fileinput = RegexValidator(
regex=re.compile("fileinput", flags=re.ASCII),
message="You can't use fileinput module",
inverse_match=True,
)
regex_validator_shutil = RegexValidator(
regex=re.compile("shutil", flags=re.ASCII),
message="You can't use shutil module",
inverse_match=True,
)
regex_validator_parent_path = RegexValidator(
regex=re.compile(r"\.\.[/\\]{1}", flags=re.ASCII),
message="You can't go to the parent path",
inverse_match=True,
)
regex_validator_ftp = RegexValidator(
regex=re.compile(r".?ftp.?", flags=re.ASCII),
message="You can't use ftp protocol",
inverse_match=True,
)
class PythonInterpreterForm(forms.Form):
"""Form for the main page.
Attributes:
user_code: field for input user's code;
std_io: field for standard input-output;
timeout: field for setup of server's response timeout.
"""
user_code = forms.CharField(
widget=forms.Textarea,
label=False,
initial="# Type your Python code here and push Launch button.\n",
validators=[
regex_validator_open,
regex_validator_eval,
regex_validator_exec,
regex_validator_os,
regex_validator_subprocess,
regex_validator_pathlib,
regex_validator_fileinput,
regex_validator_shutil,
regex_validator_parent_path,
regex_validator_ftp,
],
)
std_io = forms.CharField(widget=forms.Textarea, label=False, required=False)
timeout = forms.IntegerField(max_value=20, label="Timeout, sec", initial=5)
|
py | 1a40f44cc3fe1c4e08a1cb4327d5c4086fc1eb65 | # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from ..ukftractography import UKFTractography
def test_UKFTractography_inputs():
input_map = dict(
Ql=dict(argstr='--Ql %f', ),
Qm=dict(argstr='--Qm %f', ),
Qw=dict(argstr='--Qw %f', ),
Rs=dict(argstr='--Rs %f', ),
args=dict(argstr='%s', ),
dwiFile=dict(
argstr='--dwiFile %s',
extensions=None,
),
environ=dict(
nohash=True,
usedefault=True,
),
freeWater=dict(argstr='--freeWater ', ),
fullTensorModel=dict(argstr='--fullTensorModel ', ),
labels=dict(
argstr='--labels %s',
sep=',',
),
maskFile=dict(
argstr='--maskFile %s',
extensions=None,
),
maxBranchingAngle=dict(argstr='--maxBranchingAngle %f', ),
maxHalfFiberLength=dict(argstr='--maxHalfFiberLength %f', ),
minBranchingAngle=dict(argstr='--minBranchingAngle %f', ),
minFA=dict(argstr='--minFA %f', ),
minGA=dict(argstr='--minGA %f', ),
numTensor=dict(argstr='--numTensor %s', ),
numThreads=dict(argstr='--numThreads %d', ),
recordCovariance=dict(argstr='--recordCovariance ', ),
recordFA=dict(argstr='--recordFA ', ),
recordFreeWater=dict(argstr='--recordFreeWater ', ),
recordLength=dict(argstr='--recordLength %f', ),
recordNMSE=dict(argstr='--recordNMSE ', ),
recordState=dict(argstr='--recordState ', ),
recordTensors=dict(argstr='--recordTensors ', ),
recordTrace=dict(argstr='--recordTrace ', ),
seedFALimit=dict(argstr='--seedFALimit %f', ),
seedsFile=dict(
argstr='--seedsFile %s',
extensions=None,
),
seedsPerVoxel=dict(argstr='--seedsPerVoxel %d', ),
stepLength=dict(argstr='--stepLength %f', ),
storeGlyphs=dict(argstr='--storeGlyphs ', ),
tracts=dict(
argstr='--tracts %s',
hash_files=False,
),
tractsWithSecondTensor=dict(
argstr='--tractsWithSecondTensor %s',
hash_files=False,
),
writeAsciiTracts=dict(argstr='--writeAsciiTracts ', ),
writeUncompressedTracts=dict(argstr='--writeUncompressedTracts ', ),
)
inputs = UKFTractography.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(inputs.traits()[key], metakey) == value
def test_UKFTractography_outputs():
output_map = dict(
tracts=dict(extensions=None, ),
tractsWithSecondTensor=dict(extensions=None, ),
)
outputs = UKFTractography.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(outputs.traits()[key], metakey) == value
|
py | 1a40f49bed9cea5052aa9077f7ea6a64b454d965 | import random
import time
import warnings
import sys
import argparse
import shutil
import torch
import torch.backends.cudnn as cudnn
from torch.optim import SGD
from torch.optim.lr_scheduler import LambdaLR, MultiStepLR
from torch.utils.data import DataLoader
from torchvision.transforms import Compose, ToPILImage
sys.path.append('../../..')
from dalib.adaptation.keypoint_detection.regda import PoseResNet as RegDAPoseResNet, \
PseudoLabelGenerator, RegressionDisparity
import common.vision.models as models
from common.vision.models.keypoint_detection.pose_resnet import Upsampling, PoseResNet
from common.vision.models.keypoint_detection.loss import JointsKLLoss
import common.vision.datasets.keypoint_detection as datasets
import common.vision.transforms.keypoint_detection as T
from common.vision.transforms import Denormalize
from common.utils.data import ForeverDataIterator
from common.utils.meter import AverageMeter, ProgressMeter, AverageMeterDict
from common.utils.metric.keypoint_detection import accuracy
from common.utils.logger import CompleteLogger
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def main(args: argparse.Namespace):
logger = CompleteLogger(args.log, args.phase)
if args.seed is not None:
random.seed(args.seed)
torch.manual_seed(args.seed)
cudnn.deterministic = True
warnings.warn('You have chosen to seed training. '
'This will turn on the CUDNN deterministic setting, '
'which can slow down your training considerably! '
'You may see unexpected behavior when restarting '
'from checkpoints.')
cudnn.benchmark = True
# Data loading code
normalize = T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
train_transform = T.Compose([
T.RandomRotation(args.rotation),
T.RandomResizedCrop(size=args.image_size, scale=args.resize_scale),
T.ColorJitter(brightness=0.25, contrast=0.25, saturation=0.25),
T.GaussianBlur(),
T.ToTensor(),
normalize
])
val_transform = T.Compose([
T.Resize(args.image_size),
T.ToTensor(),
normalize
])
image_size = (args.image_size, args.image_size)
heatmap_size = (args.heatmap_size, args.heatmap_size)
source_dataset = datasets.__dict__[args.source]
train_source_dataset = source_dataset(root=args.source_root, transforms=train_transform,
image_size=image_size, heatmap_size=heatmap_size)
train_source_loader = DataLoader(train_source_dataset, batch_size=args.batch_size,
shuffle=True, num_workers=args.workers, pin_memory=True, drop_last=True)
val_source_dataset = source_dataset(root=args.source_root, split='test', transforms=val_transform,
image_size=image_size, heatmap_size=heatmap_size)
val_source_loader = DataLoader(val_source_dataset, batch_size=args.batch_size, shuffle=False, pin_memory=True)
target_dataset = datasets.__dict__[args.target]
train_target_dataset = target_dataset(root=args.target_root, transforms=train_transform,
image_size=image_size, heatmap_size=heatmap_size)
train_target_loader = DataLoader(train_target_dataset, batch_size=args.batch_size,
shuffle=True, num_workers=args.workers, pin_memory=True, drop_last=True)
val_target_dataset = target_dataset(root=args.target_root, split='test', transforms=val_transform,
image_size=image_size, heatmap_size=heatmap_size)
val_target_loader = DataLoader(val_target_dataset, batch_size=args.batch_size, shuffle=False, pin_memory=True)
print("Source train:", len(train_source_loader))
print("Target train:", len(train_target_loader))
print("Source test:", len(val_source_loader))
print("Target test:", len(val_target_loader))
train_source_iter = ForeverDataIterator(train_source_loader)
train_target_iter = ForeverDataIterator(train_target_loader)
# create model
backbone = models.__dict__[args.arch](pretrained=True)
upsampling = Upsampling(backbone.out_features)
num_keypoints = train_source_dataset.num_keypoints
model = RegDAPoseResNet(backbone, upsampling, 256, num_keypoints, num_head_layers=args.num_head_layers, finetune=True).to(device)
# define loss function
criterion = JointsKLLoss()
pseudo_label_generator = PseudoLabelGenerator(num_keypoints, args.heatmap_size, args.heatmap_size)
regression_disparity = RegressionDisparity(pseudo_label_generator, JointsKLLoss(epsilon=1e-7))
# define optimizer and lr scheduler
optimizer_f = SGD([
{'params': backbone.parameters(), 'lr': 0.1},
{'params': upsampling.parameters(), 'lr': 0.1},
], lr=0.1, momentum=args.momentum, weight_decay=args.wd, nesterov=True)
optimizer_h = SGD(model.head.parameters(), lr=1., momentum=args.momentum, weight_decay=args.wd, nesterov=True)
optimizer_h_adv = SGD(model.head_adv.parameters(), lr=1., momentum=args.momentum, weight_decay=args.wd, nesterov=True)
lr_decay_function = lambda x: args.lr * (1. + args.lr_gamma * float(x)) ** (-args.lr_decay)
lr_scheduler_f = LambdaLR(optimizer_f, lr_decay_function)
lr_scheduler_h = LambdaLR(optimizer_h, lr_decay_function)
lr_scheduler_h_adv = LambdaLR(optimizer_h_adv, lr_decay_function)
start_epoch = 0
if args.resume is None:
if args.pretrain is None:
# first pretrain the backbone and upsampling
print("Pretraining the model on source domain.")
args.pretrain = logger.get_checkpoint_path('pretrain')
pretrained_model = PoseResNet(backbone, upsampling, 256, num_keypoints, True).to(device)
optimizer = SGD(pretrained_model.get_parameters(lr=args.lr), momentum=args.momentum, weight_decay=args.wd, nesterov=True)
lr_scheduler = MultiStepLR(optimizer, args.lr_step, args.lr_factor)
best_acc = 0
for epoch in range(args.pretrain_epochs):
lr_scheduler.step()
print(lr_scheduler.get_lr())
pretrain(train_source_iter, pretrained_model, criterion, optimizer, epoch, args)
source_val_acc = validate(val_source_loader, pretrained_model, criterion, None, args)
# remember best acc and save checkpoint
if source_val_acc['all'] > best_acc:
best_acc = source_val_acc['all']
torch.save(
{
'model': pretrained_model.state_dict()
}, args.pretrain
)
print("Source: {} best: {}".format(source_val_acc['all'], best_acc))
# load from the pretrained checkpoint
pretrained_dict = torch.load(args.pretrain, map_location='cpu')['model']
model_dict = model.state_dict()
# remove keys from pretrained dict that doesn't appear in model dict
pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}
model.load_state_dict(pretrained_dict, strict=False)
else:
# optionally resume from a checkpoint
checkpoint = torch.load(args.resume, map_location='cpu')
model.load_state_dict(checkpoint['model'])
optimizer_f.load_state_dict(checkpoint['optimizer_f'])
optimizer_h.load_state_dict(checkpoint['optimizer_h'])
optimizer_h_adv.load_state_dict(checkpoint['optimizer_h_adv'])
lr_scheduler_f.load_state_dict(checkpoint['lr_scheduler_f'])
lr_scheduler_h.load_state_dict(checkpoint['lr_scheduler_h'])
lr_scheduler_h_adv.load_state_dict(checkpoint['lr_scheduler_h_adv'])
start_epoch = checkpoint['epoch'] + 1
# define visualization function
tensor_to_image = Compose([
Denormalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
ToPILImage()
])
def visualize(image, keypoint2d, name, heatmaps=None):
"""
Args:
image (tensor): image in shape 3 x H x W
keypoint2d (tensor): keypoints in shape K x 2
name: name of the saving image
"""
train_source_dataset.visualize(tensor_to_image(image),
keypoint2d, logger.get_image_path("{}.jpg".format(name)))
if args.phase == 'test':
# evaluate on validation set
source_val_acc = validate(val_source_loader, model, criterion, None, args)
target_val_acc = validate(val_target_loader, model, criterion, visualize, args)
print("Source: {:4.3f} Target: {:4.3f}".format(source_val_acc['all'], target_val_acc['all']))
for name, acc in target_val_acc.items():
print("{}: {:4.3f}".format(name, acc))
return
# start training
best_acc = 0
print("Start regression domain adaptation.")
for epoch in range(start_epoch, args.epochs):
logger.set_epoch(epoch)
print(lr_scheduler_f.get_lr(), lr_scheduler_h.get_lr(), lr_scheduler_h_adv.get_lr())
# train for one epoch
train(train_source_iter, train_target_iter, model, criterion, regression_disparity,
optimizer_f, optimizer_h, optimizer_h_adv, lr_scheduler_f, lr_scheduler_h, lr_scheduler_h_adv,
epoch, visualize if args.debug else None, args)
# evaluate on validation set
source_val_acc = validate(val_source_loader, model, criterion, None, args)
target_val_acc = validate(val_target_loader, model, criterion, visualize if args.debug else None, args)
# remember best acc and save checkpoint
torch.save(
{
'model': model.state_dict(),
'optimizer_f': optimizer_f.state_dict(),
'optimizer_h': optimizer_h.state_dict(),
'optimizer_h_adv': optimizer_h_adv.state_dict(),
'lr_scheduler_f': lr_scheduler_f.state_dict(),
'lr_scheduler_h': lr_scheduler_h.state_dict(),
'lr_scheduler_h_adv': lr_scheduler_h_adv.state_dict(),
'epoch': epoch,
'args': args
}, logger.get_checkpoint_path(epoch)
)
if target_val_acc['all'] > best_acc:
shutil.copy(logger.get_checkpoint_path(epoch), logger.get_checkpoint_path('best'))
best_acc = target_val_acc['all']
print("Source: {:4.3f} Target: {:4.3f} Target(best): {:4.3f}".format(source_val_acc['all'], target_val_acc['all'], best_acc))
for name, acc in target_val_acc.items():
print("{}: {:4.3f}".format(name, acc))
logger.close()
def pretrain(train_source_iter, model, criterion, optimizer,
epoch: int, args: argparse.Namespace):
batch_time = AverageMeter('Time', ':4.2f')
data_time = AverageMeter('Data', ':3.1f')
losses_s = AverageMeter('Loss (s)', ":.2e")
acc_s = AverageMeter("Acc (s)", ":3.2f")
progress = ProgressMeter(
args.iters_per_epoch,
[batch_time, data_time, losses_s, acc_s],
prefix="Epoch: [{}]".format(epoch))
# switch to train mode
model.train()
end = time.time()
for i in range(args.iters_per_epoch):
optimizer.zero_grad()
x_s, label_s, weight_s, meta_s = next(train_source_iter)
x_s = x_s.to(device)
label_s = label_s.to(device)
weight_s = weight_s.to(device)
# measure data loading time
data_time.update(time.time() - end)
# compute output
y_s = model(x_s)
loss_s = criterion(y_s, label_s, weight_s)
# compute gradient and do SGD step
loss_s.backward()
optimizer.step()
# measure accuracy and record loss
_, avg_acc_s, cnt_s, pred_s = accuracy(y_s.detach().cpu().numpy(),
label_s.detach().cpu().numpy())
acc_s.update(avg_acc_s, cnt_s)
losses_s.update(loss_s, cnt_s)
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
progress.display(i)
def train(train_source_iter, train_target_iter, model, criterion,regression_disparity,
optimizer_f, optimizer_h, optimizer_h_adv, lr_scheduler_f, lr_scheduler_h, lr_scheduler_h_adv,
epoch: int, visualize, args: argparse.Namespace):
batch_time = AverageMeter('Time', ':4.2f')
data_time = AverageMeter('Data', ':3.1f')
losses_s = AverageMeter('Loss (s)', ":.2e")
losses_gf = AverageMeter('Loss (t, false)', ":.2e")
losses_gt = AverageMeter('Loss (t, truth)', ":.2e")
acc_s = AverageMeter("Acc (s)", ":3.2f")
acc_t = AverageMeter("Acc (t)", ":3.2f")
acc_s_adv = AverageMeter("Acc (s, adv)", ":3.2f")
acc_t_adv = AverageMeter("Acc (t, adv)", ":3.2f")
progress = ProgressMeter(
args.iters_per_epoch,
[batch_time, data_time, losses_s, losses_gf, losses_gt, acc_s, acc_t, acc_s_adv, acc_t_adv],
prefix="Epoch: [{}]".format(epoch))
# switch to train mode
model.train()
end = time.time()
for i in range(args.iters_per_epoch):
x_s, label_s, weight_s, meta_s = next(train_source_iter)
x_t, label_t, weight_t, meta_t = next(train_target_iter)
x_s = x_s.to(device)
label_s = label_s.to(device)
weight_s = weight_s.to(device)
x_t = x_t.to(device)
label_t = label_t.to(device)
weight_t = weight_t.to(device)
# measure data loading time
data_time.update(time.time() - end)
# Step A train all networks to minimize loss on source domain
optimizer_f.zero_grad()
optimizer_h.zero_grad()
optimizer_h_adv.zero_grad()
y_s, y_s_adv = model(x_s)
loss_s = criterion(y_s, label_s, weight_s) + \
args.margin * args.trade_off * regression_disparity(y_s, y_s_adv, weight_s, mode='min')
loss_s.backward()
optimizer_f.step()
optimizer_h.step()
optimizer_h_adv.step()
# Step B train adv regressor to maximize regression disparity
optimizer_h_adv.zero_grad()
y_t, y_t_adv = model(x_t)
loss_ground_false = args.trade_off * regression_disparity(y_t, y_t_adv, weight_t, mode='max')
loss_ground_false.backward()
optimizer_h_adv.step()
# Step C train feature extractor to minimize regression disparity
optimizer_f.zero_grad()
y_t, y_t_adv = model(x_t)
loss_ground_truth = args.trade_off * regression_disparity(y_t, y_t_adv, weight_t, mode='min')
loss_ground_truth.backward()
optimizer_f.step()
# do update step
model.step()
lr_scheduler_f.step()
lr_scheduler_h.step()
lr_scheduler_h_adv.step()
# measure accuracy and record loss
_, avg_acc_s, cnt_s, pred_s = accuracy(y_s.detach().cpu().numpy(),
label_s.detach().cpu().numpy())
acc_s.update(avg_acc_s, cnt_s)
_, avg_acc_t, cnt_t, pred_t = accuracy(y_t.detach().cpu().numpy(),
label_t.detach().cpu().numpy())
acc_t.update(avg_acc_t, cnt_t)
_, avg_acc_s_adv, cnt_s_adv, pred_s_adv = accuracy(y_s_adv.detach().cpu().numpy(),
label_s.detach().cpu().numpy())
acc_s_adv.update(avg_acc_s_adv, cnt_s)
_, avg_acc_t_adv, cnt_t_adv, pred_t_adv = accuracy(y_t_adv.detach().cpu().numpy(),
label_t.detach().cpu().numpy())
acc_t_adv.update(avg_acc_t_adv, cnt_t)
losses_s.update(loss_s, cnt_s)
losses_gf.update(loss_ground_false, cnt_s)
losses_gt.update(loss_ground_truth, cnt_s)
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
progress.display(i)
if visualize is not None:
visualize(x_s[0], pred_s[0] * args.image_size / args.heatmap_size, "source_{}_pred".format(i))
visualize(x_s[0], meta_s['keypoint2d'][0], "source_{}_label".format(i))
visualize(x_t[0], pred_t[0] * args.image_size / args.heatmap_size, "target_{}_pred".format(i))
visualize(x_t[0], meta_t['keypoint2d'][0], "target_{}_label".format(i))
visualize(x_s[0], pred_s_adv[0] * args.image_size / args.heatmap_size, "source_adv_{}_pred".format(i))
visualize(x_t[0], pred_t_adv[0] * args.image_size / args.heatmap_size, "target_adv_{}_pred".format(i))
def validate(val_loader, model, criterion, visualize, args: argparse.Namespace):
batch_time = AverageMeter('Time', ':6.3f')
losses = AverageMeter('Loss', ':.2e')
acc = AverageMeterDict(val_loader.dataset.keypoints_group.keys(), ":3.2f")
progress = ProgressMeter(
len(val_loader),
[batch_time, losses, acc['all']],
prefix='Test: ')
# switch to evaluate mode
model.eval()
with torch.no_grad():
end = time.time()
for i, (x, label, weight, meta) in enumerate(val_loader):
x = x.to(device)
label = label.to(device)
weight = weight.to(device)
# compute output
y = model(x)
loss = criterion(y, label, weight)
# measure accuracy and record loss
losses.update(loss.item(), x.size(0))
acc_per_points, avg_acc, cnt, pred = accuracy(y.cpu().numpy(),
label.cpu().numpy())
group_acc = val_loader.dataset.group_accuracy(acc_per_points)
acc.update(group_acc, x.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
progress.display(i)
if visualize is not None:
visualize(x[0], pred[0] * args.image_size / args.heatmap_size, "val_{}_pred.jpg".format(i))
visualize(x[0], meta['keypoint2d'][0], "val_{}_label.jpg".format(i))
return acc.average()
if __name__ == '__main__':
architecture_names = sorted(
name for name in models.__dict__
if name.islower() and not name.startswith("__")
and callable(models.__dict__[name])
)
dataset_names = sorted(
name for name in datasets.__dict__
if not name.startswith("__") and callable(datasets.__dict__[name])
)
parser = argparse.ArgumentParser(description='Source Only for Keypoint Detection Domain Adaptation')
# dataset parameters
parser.add_argument('source_root', help='root path of the source dataset')
parser.add_argument('target_root', help='root path of the target dataset')
parser.add_argument('-s', '--source', help='source domain(s)')
parser.add_argument('-t', '--target', help='target domain(s)')
parser.add_argument('--resize-scale', nargs='+', type=float, default=(0.6, 1.3),
help='scale range for the RandomResizeCrop augmentation')
parser.add_argument('--rotation', type=int, default=180,
help='rotation range of the RandomRotation augmentation')
parser.add_argument('--image-size', type=int, default=256,
help='input image size')
parser.add_argument('--heatmap-size', type=int, default=64,
help='output heatmap size')
# model parameters
parser.add_argument('-a', '--arch', metavar='ARCH', default='resnet101',
choices=architecture_names,
help='backbone architecture: ' +
' | '.join(architecture_names) +
' (default: resnet101)')
parser.add_argument("--pretrain", type=str, default=None,
help="Where restore pretrained model parameters from.")
parser.add_argument("--resume", type=str, default=None,
help="where restore model parameters from.")
parser.add_argument('--num-head-layers', type=int, default=2)
parser.add_argument('--margin', type=float, default=4., help="margin gamma")
parser.add_argument('--trade-off', default=1., type=float,
help='the trade-off hyper-parameter for transfer loss')
# training parameters
parser.add_argument('-b', '--batch-size', default=32, type=int,
metavar='N',
help='mini-batch size (default: 32)')
parser.add_argument('--lr', '--learning-rate', default=0.1, type=float,
metavar='LR', help='initial learning rate', dest='lr')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
parser.add_argument('--wd', '--weight-decay', default=0.0001, type=float,
metavar='W', help='weight decay (default: 1e-4)')
parser.add_argument('--lr-gamma', default=0.0001, type=float)
parser.add_argument('--lr-decay', default=0.75, type=float, help='parameter for lr scheduler')
parser.add_argument('--lr-step', default=[45, 60], type=tuple, help='parameter for lr scheduler')
parser.add_argument('--lr-factor', default=0.1, type=float, help='parameter for lr scheduler')
parser.add_argument('-j', '--workers', default=4, type=int, metavar='N',
help='number of data loading workers (default: 4)')
parser.add_argument('--pretrain_epochs', default=70, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--epochs', default=30, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('-i', '--iters-per-epoch', default=500, type=int,
help='Number of iterations per epoch')
parser.add_argument('-p', '--print-freq', default=100, type=int,
metavar='N', help='print frequency (default: 100)')
parser.add_argument('--seed', default=None, type=int,
help='seed for initializing training. ')
parser.add_argument("--log", type=str, default='src_only',
help="Where to save logs, checkpoints and debugging images.")
parser.add_argument("--phase", type=str, default='train', choices=['train', 'test'],
help="When phase is 'test', only test the model.")
parser.add_argument('--debug', action="store_true",
help='In the debug mode, save images and predictions')
args = parser.parse_args()
print(args)
main(args)
|
py | 1a40f60604bfea022cbf06bf6c1f004f6ba348a9 | import abc
class Prediction(object):
"""Prediction is an abstract class."""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def as_array(self):
return
|
py | 1a40f63135d5445c9995f99ee48c24aeb414217e | from django.contrib import admin
from fahari.common.admin import BaseAdmin
from .models import SheetToDBMappingsMetadata, StockVerificationReceiptsAdapter
@admin.register(SheetToDBMappingsMetadata)
class StockToDBMappingsMetadata(BaseAdmin):
list_display = ("name", "version")
@admin.register(StockVerificationReceiptsAdapter)
class StockVerificationReceiptsAdapterAdmin(BaseAdmin):
list_display = ("county", "position")
readonly_fields = BaseAdmin.readonly_fields + ("target_model",)
|
py | 1a40f659fafc60d067407cbb8e216ad748e8cf0b | # -*- coding: utf-8 -*-
#
# wxcast: A Python API and cli to collect weather information.
#
# Copyright (c) 2021 Sean Marlow
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import click
from collections import OrderedDict
from wxcast import api
from wxcast import utils
def print_license(ctx, param, value):
"""
Eager option to print license information and exit.
"""
if not value or ctx.resilient_parsing:
return
click.echo(
'wxcast Copyright (C) 2021 Sean Marlow. (MIT License)\n\n'
'See LICENSE for more information.'
)
ctx.exit()
@click.group()
@click.version_option()
@click.option(
'--license',
expose_value=False,
is_eager=True,
is_flag=True,
callback=print_license,
help='Display license information and exit.'
)
def main():
"""
Retrieve the latest weather information in your terminal.
Data provided by NWS and AVWX.
NWS: https://forecast-v3.weather.gov/documentation \n
AVWX: https://avwx.rest/
"""
pass
@click.command()
@click.option(
'--no-color',
is_flag=True,
help='Remove ANSI color and styling from output.'
)
@click.argument('location')
def forecast(no_color, location):
"""
Retrieve current 7 day forecast for given location.
Location can be a city, address or zip/postal code.
Examples:
wxcast forecast denver
wxcast forecast "denver, co"
:param location: Location string to get forecast for.
:param no_color: If True do not style string output.
"""
try:
response = api.get_seven_day_forecast(location)
except Exception as e:
utils.echo_style(str(e), no_color, fg='red')
else:
data = OrderedDict(
(d['name'], d['detailedForecast']) for d in response
)
utils.echo_dict(data, no_color)
@click.command()
@click.option(
'-d', '--decoded',
is_flag=True,
help='Decode raw metar to string format.'
)
@click.option(
'--no-color',
is_flag=True,
help='Remove ANSI color and styling from output.'
)
@click.option(
'-t',
'--temp-unit',
default='C',
type=click.Choice(['C', 'F']),
help='Unit of measurement for temperature values. '
'Default: (C).'
)
@click.argument('icao')
def metar(decoded, no_color, temp_unit, icao):
"""
Retrieve the latest METAR given an airport ICAO code.
Example: wxcast metar -d KSLC
:param decoded: Flag to decode the METAR output.
:param no_color: If True do not style string output.
:param icao: The airport ICAO code to retrieve METAR for.
"""
try:
response = api.get_metar(icao, temp_unit, decoded)
except Exception as e:
utils.echo_style(str(e), no_color, fg='red')
else:
if decoded:
click.echo(
''.join([
utils.style_string(
'At ', no_color, fg='green'
),
utils.style_string(
response['time'], no_color, fg='blue'
),
utils.style_string(
' the conditions are:', no_color, fg='green'
),
'\n'
])
)
spaces = utils.get_max_key(response)
try:
# Try to convert elevation to ft and meters.
response['elevation'] = '{}ft ({}m)'.format(
int(float(response['elevation']) * 3.28084),
response['elevation']
)
except (KeyError, Exception):
pass
utils.echo_dict(response, no_color, spaces=spaces)
else:
utils.echo_style(response, no_color, fg='blue')
@click.command()
@click.option(
'--no-color',
is_flag=True,
help='Remove ANSI color and styling from output.'
)
@click.option(
'-t',
'--temp-unit',
default='C',
type=click.Choice(['C', 'F']),
help='Unit of measurement for temperature values. '
'Default: (C).'
)
@click.argument('station_id')
def conditions(no_color, temp_unit, station_id):
"""
Retrieve the latest conditions given a weather station id.
Example: wxcast conditions KDTW
:param no_color: If True do not style string output.
:param station_id: The weather station id to retrieve conditions for.
"""
try:
response = api.get_metar(station_id, temp_unit, decoded=True)
except Exception as e:
utils.echo_style(str(e), no_color, fg='red')
else:
response.pop('station', None)
response.pop('type', None)
response.pop('station', None)
response.pop('sea level pressure', None)
response.pop('remarks', None)
response.pop('elevation', None)
utils.echo_dict(response, no_color)
@click.command()
@click.option(
'--no-color',
is_flag=True,
help='Remove ANSI color and styling from output.'
)
def offices(no_color):
"""
Retrieve the available weather forecast offices (WFO).
Example: wxcast offices
:param no_color: If True do not style string output.
"""
try:
response = api.get_wfo_list()
except Exception as e:
utils.echo_style(str(e), no_color, fg='red')
else:
utils.echo_dict(response, no_color)
@click.command()
@click.option(
'--no-color',
is_flag=True,
help='Remove ANSI color and styling from output.'
)
@click.argument('wfo')
def products(no_color, wfo):
"""
Retrieve the available text products for a given wfo.
Example: wxcast products slc
:param no_color: If True do not style string output.
:param wfo: The weather forecast office abbreviation (BOU).
"""
try:
response = api.get_wfo_products(wfo)
except Exception as e:
utils.echo_style(str(e), no_color, fg='red')
else:
utils.echo_dict(response, no_color)
@click.command()
@click.option(
'--no-color',
is_flag=True,
help='Remove ANSI color and styling from output.'
)
@click.argument('wfo')
@click.argument('product')
def text(no_color, wfo, product):
"""
Retrieve the NWS text product.
Example: wxcast text slc afd
:param no_color: If True do not style string output.
:param wfo: The weather forecast office abbreviation (BOU).
:param product: The text product to retrieve.
"""
try:
response = api.get_nws_product(wfo, product)
except Exception as e:
utils.echo_style(str(e), no_color, fg='red')
else:
click.echo_via_pager(response)
@click.command()
@click.option(
'--no-color',
is_flag=True,
help='Remove ANSI color and styling from output.'
)
@click.argument('wfo')
def office(no_color, wfo):
"""
Retrieve information for a given wfo.
Example: wxcast info slc
:param no_color: If True do not style string output.
:param wfo: The weather forecast office abbreviation (BOU).
"""
try:
response = api.get_wfo_info(wfo)
except Exception as e:
utils.echo_style(str(e), no_color, fg='red')
else:
utils.echo_dict(response, no_color)
@click.command()
@click.option(
'--no-color',
is_flag=True,
help='Remove ANSI color and styling from output.'
)
@click.argument('wfo')
def stations(no_color, wfo):
"""
Retrieve a list of stations for a given wfo.
Example: wxcast info slc
:param no_color: If True do not style string output.
:param wfo: The weather forecast office abbreviation (BOU).
"""
try:
response = api.get_stations_for_wfo(wfo)
except Exception as e:
utils.echo_style(str(e), no_color, fg='red')
else:
utils.echo_style('\n'.join(response), no_color)
@click.command()
@click.option(
'--no-color',
is_flag=True,
help='Remove ANSI color and styling from output.'
)
@click.argument('station_id')
def station(no_color, station_id):
"""
Retrieve info for a weather station.
Example: wxcast station kbna
:param no_color: If True do not style string output.
:param station_id: The weather station id.
"""
try:
response = api.get_station_info(station_id)
except Exception as e:
utils.echo_style(str(e), no_color, fg='red')
else:
try:
# Try to convert elevation to ft and meters.
response['elevation'] = '{}ft ({}m)'.format(
int(float(response['elevation']) * 3.28084),
response['elevation']
)
except (KeyError, Exception):
pass
utils.echo_dict(response, no_color)
main.add_command(metar)
main.add_command(text)
main.add_command(offices)
main.add_command(products)
main.add_command(forecast)
main.add_command(office)
main.add_command(stations)
main.add_command(station)
main.add_command(conditions)
|
py | 1a40f73685bf6dc7c77e209221b870f24226394d | import demistomock as demisto
from CommonServerPython import *
from CommonServerUserPython import *
from googleapiclient.discovery import build
from httplib2 import Http
import json
from oauth2client import service_account
from google.oauth2 import service_account as google_service_account
import googleapiclient.http
from googleapiclient._auth import authorized_http
import dateparser
import io
import os
# @@@@@@@@ GLOBALS @@@@@@@@
# If modifying these scopes, delete the file token.json.
SCOPES = ['https://www.googleapis.com/auth/ediscovery', 'https://www.googleapis.com/auth/devstorage.full_control']
DEMISTO_MATTER = 'test_search_phishing'
ADMIN_EMAIL = demisto.params()['gsuite_credentials']['identifier'].encode('utf-8')
PRIVATE_KEY_CONTENT = demisto.params()['auth_json'].encode('utf-8')
USE_SSL = not demisto.params().get('insecure', False)
# @@@@@@@@ HELPER FUNCS @@@@@@@@
def validate_input_values(arguments_values_to_verify, available_values):
for value in arguments_values_to_verify:
if value not in available_values:
return_error(
'Argument: \'{}\' is not one of the possible values: {}'.format(value, ', '.join(available_values)))
def get_credentials(additional_scopes=None, delegated_user=ADMIN_EMAIL):
"""Gets valid user credentials from storage.
If nothing has been stored, or if the stored credentials are invalid,
the OAuth2 flow is completed to obtain the new credentials.
Returns:
Credentials, the obtained credential.
"""
if delegated_user == 'me':
delegated_user = ADMIN_EMAIL
scopes = SCOPES
if additional_scopes is not None:
scopes += additional_scopes
try:
json_keyfile = json.loads(PRIVATE_KEY_CONTENT)
if not isinstance(json_keyfile, dict):
json_keyfile = json.loads(json_keyfile)
cred = service_account.ServiceAccountCredentials.from_json_keyfile_dict(json_keyfile,
scopes=scopes)
delegated_creds = cred.create_delegated(delegated_user)
except Exception as e:
LOG('An error occurred in the \'get_credentials\' function.')
err_msg = 'An error occurred while trying to construct an OAuth2 ' \
'ServiceAccountCredentials object - {}'.format(str(e))
return_error(err_msg)
return delegated_creds
def connect():
creds = get_credentials()
try:
service = build('vault', 'v1', http=creds.authorize(Http(disable_ssl_certificate_validation=(not USE_SSL))))
except Exception as e:
LOG('There was an error creating the Vault service in the \'connect\' function.')
err_msg = 'There was an error creating the Vault service - {}'.format(str(e))
return_error(err_msg)
return service
def is_matter_exist(service, matter_name): # Not needed at the moment
"""
Searches for existence of a matter by its name
Note - this is case-sensitive
:param service: Vault service object
:param matter_name: name of the matter to be searched
:return: True if exists, False otherwise.
"""
existing_matters = get_open_matters(service)
if any(matter_name == matter['name'] for matter in existing_matters):
return True
return False
def get_open_matters(service):
""" Gets first 10 matters """
open_matters = service.matters().list(state='OPEN').execute()
return open_matters
def get_matter_by_id(service, matter_id):
matter = service.matters().get(matterId=matter_id).execute()
return matter
def get_matters_by_state(service, state):
state = state.upper()
matter_state = state if state in ('OPEN', 'CLOSED', 'DELETED') else 'STATE_UNSPECIFIED'
matter_list = service.matters().list(state=matter_state).execute()
return matter_list
def delete_matter(service, matter_id):
_ = service.matters().delete(matterId=matter_id).execute()
return get_matter_by_id(service, matter_id) # Note - this is different that the other state updates
def close_matter(service, matter_id):
close_response = service.matters().close(matterId=matter_id, body={}).execute()
return close_response['matter']
def reopen_matter(service, matter_id):
reopen_response = service.matters().reopen(matterId=matter_id, body={}).execute()
return reopen_response['matter']
def undelete_matter(service, matter_id):
undeleted_matter = service.matters().undelete(matterId=matter_id, body={}).execute()
return undeleted_matter
def add_held_account(service, matter_id, hold_id, account_id):
held_account = {'accountId': account_id}
return service.matters().holds().accounts().create(matterId=matter_id, holdId=hold_id, body=held_account).execute()
def remove_held_account(service, matter_id, hold_id, account_id):
return service.matters().holds().accounts().delete(matterId=matter_id, holdId=hold_id,
accountId=account_id).execute()
def remove_hold(service, matter_id, hold_id):
return service.matters().holds().delete(matterId=matter_id, holdId=hold_id).execute()
def list_holds(service, matter_id):
"""
Return a list of existing holds
"""
done_paginating = False
response = service.matters().holds().list(matterId=matter_id).execute()
# append first page:
the_holds = response['holds']
# Keep paginating and appending:
while not done_paginating:
if 'nextPageToken' in response:
response = service.matters().holds.list(pageSize=10, pageToken=response['nextPageToken']).execute()
the_holds.extend(response['holds'])
else:
done_paginating = True
return the_holds
def timeframe_to_utc_zulu_range(timeframe_str):
"""
Converts a time-frame to UTC Zulu format that can be used for startTime and endTime in various Google Vault requests.
"""
try:
parsed_str = dateparser.parse(timeframe_str)
end_time = datetime.utcnow().isoformat() + 'Z' # Current time
start_time = parsed_str.isoformat() + 'Z'
return (start_time, end_time)
except Exception as ex:
err_msg = str(ex)
if 'Quota exceeded for quota metric' in err_msg:
err_msg = 'Quota for Google Vault API exceeded'
return_error('Unable to parse date correctly: {}'.format(err_msg))
else:
raise ex
def create_hold_query(hold_name, corpus, accounts, terms, time_frame="", start_time="", end_time=""):
"""
Creates the query that will be used to request the creation of a new hold. Returns the ready-to-be-sent request.
"""
# --- Sanitizing Input ---
corpus = corpus.upper()
if time_frame:
start_time, end_time = timeframe_to_utc_zulu_range(time_frame) # Making it UTC Zulu format
elif start_time:
if not end_time:
end_time = datetime.utcnow().isoformat() + 'Z' # End time will be now, if no end time was given
if isinstance(accounts, unicode):
accounts = accounts.split(',')
# --- Building Request ---
request = {}
mail_query = {} # type: Dict[Any, Any]
accounts_for_query = []
if not terms:
if start_time and end_time:
mail_query = {'startTime': start_time, 'endTime': end_time}
else:
if start_time and end_time:
mail_query = {'startTime': start_time, 'endTime': end_time, 'terms': terms}
# --- Building all small parts into big request object ---
request['name'] = hold_name
request['corpus'] = corpus
if mail_query:
request['query'] = {'mailQuery': mail_query} # Adding the ready mail query
for acc_id in accounts:
accounts_for_query.append({'accountId': acc_id})
request['accounts'] = accounts_for_query
return request
def create_hold_mail_accounts(service, matter_id, request_body):
"""
Creates a hold in Google Vault
"""
return service.matters().holds().create(matterId=matter_id, body=request_body).execute()
def create_export(service, matter, request_body):
"""
Creates an export in the given matter, with the given request_body (which is the actual JSON for the request).
"""
return service.matters().exports().create(matterId=matter, body=request_body).execute()
def create_mail_export_query(export_name, emails, time_frame, start_time, end_time, terms, org_unit="",
export_pst='True', export_mbox='False', search_method='All Accounts',
include_drafts='True', data_scope='All Data'):
"""
Creates the query that will be used in the request to create a mail export
"""
org_unit_id = org_unit
# --- Sanitizing Input ---
exclude_drafts = 'false'
if time_frame:
start_time, end_time = timeframe_to_utc_zulu_range(time_frame) # Making it UTC Zulu format
elif start_time:
if not end_time:
end_time = datetime.utcnow().isoformat() + 'Z' # End time will be now, if no end time was given
if isinstance(emails, (str, unicode)):
if ',' in emails:
emails = emails.split(',')
else:
emails = [emails]
if str(include_drafts).upper() == 'FALSE':
exclude_drafts = 'true'
if data_scope.upper() == 'HELD DATA':
data_scope = 'HELD_DATA'
if data_scope.upper() == 'ALL DATA':
data_scope = 'ALL_DATA'
if data_scope.upper() == 'UNPROCESSED DATA':
data_scope = 'UNPROCESSED_DATA'
if search_method.upper() == 'ORGANIZATIONAL UNIT(REQUIRES OU ARGUMENT)':
search_method = 'ORG_UNIT'
if search_method.upper() == 'ALL ACCOUNTS':
search_method = 'ENTIRE_ORG'
if search_method.upper() == 'SPECIFIC ACCOUNTS(REQUIRES EMAILS ARGUMENT)':
search_method = 'ACCOUNT'
# --- Building Request ---
request = {}
query = {}
emails_for_query = []
account_info = {'emails': []} # type: Dict[Any, Any]
org_unit_info = {'orgUnitId': org_unit_id}
corpus = 'MAIL'
export_format = 'PST' # Default
if export_mbox.upper() == 'TRUE':
export_format = 'MBOX'
mail_options = {
'exportFormat': export_format
}
# --- Building all small parts into big request object ---
query['dataScope'] = data_scope
query['searchMethod'] = search_method
query['corpus'] = corpus
query['mailOptions'] = {'excludeDrafts': exclude_drafts}
if start_time and end_time:
query['startTime'] = start_time
query['endTime'] = end_time
if terms:
query['terms'] = terms
if emails: # If user specified emails
for email in emails: # Go over all of them
emails_for_query.append(email) # Add them to the list
account_info['emails'] = emails_for_query # Add the list to the account_info dictionary
query['accountInfo'] = account_info # Add the account_info dictionary into the query object
if search_method == 'ORG_UNIT':
query['orgUnitInfo'] = org_unit_info
request['query'] = query # Adding query AFTER IT'S COMPLETED
request['exportOptions'] = {'mailOptions': mail_options}
request['name'] = export_name
return request
def create_drive_export_query(export_name, emails, team_drives, time_frame, start_time, end_time, terms, org_unit="",
search_method='Specific Accounts(requires emails argument)', include_teamdrives='True',
data_scope='All Data'):
"""
Creates the query that will be used in the request to create a groups export
"""
org_unit_id = org_unit
# --- Sanitizing Input ---
include_teamdrives = 'true'
if time_frame:
start_time, end_time = timeframe_to_utc_zulu_range(time_frame) # Making it UTC Zulu format
elif start_time:
if not end_time:
end_time = datetime.utcnow().isoformat() + 'Z' # End time will be now, if no end time was given
if isinstance(emails, (str, unicode)): # If emails were specified, making it a list:
if ',' in emails:
emails = emails.split(',')
else:
emails = [emails]
if isinstance(team_drives, (str, unicode)): # If team_drives were specified, making it a list:
if ',' in team_drives:
team_drives = team_drives.split(',')
else:
team_drives = [team_drives]
if str(include_teamdrives).upper() == 'FALSE':
include_teamdrives = 'false'
if data_scope.upper() == 'HELD DATA':
data_scope = 'HELD_DATA'
if data_scope.upper() == 'ALL DATA':
data_scope = 'ALL_DATA'
if data_scope.upper() == 'UNPROCESSED DATA':
data_scope = 'UNPROCESSED_DATA'
if search_method.upper() == 'ORGANIZATIONAL UNIT(REQUIRES OU ARGUMENT)':
search_method = 'ORG_UNIT'
if search_method.upper() == 'SPECIFIC ACCOUNTS(REQUIRES EMAILS ARGUMENT)':
search_method = 'ACCOUNT'
if search_method.upper() == 'TEAM DRIVE':
search_method = 'TEAM_DRIVE'
# --- Building Request ---
request = {}
query = {}
emails_for_query = []
teamdrives_for_query = []
account_info = {'emails': []} # type: Dict[Any, Any]
teamdrive_info = {'teamDriveIds': []} # type: Dict[Any, Any]
org_unit_info = {'orgUnitId': org_unit_id}
corpus = 'DRIVE'
# --- Building all small parts into big request object ---
query['dataScope'] = data_scope
query['searchMethod'] = search_method
query['corpus'] = corpus
query['driveOptions'] = {'includeTeamDrives': include_teamdrives}
if start_time and end_time:
query['startTime'] = start_time
query['endTime'] = end_time
if terms:
query['terms'] = terms
if emails: # If user specified emails
for email in emails: # Go over all of them
emails_for_query.append(email) # Add them to the list
account_info['emails'] = emails_for_query # Add the list to the account_info dictionary
if team_drives and include_teamdrives.upper() == 'TRUE': # If user specified team_drives and not emails
for teamdrive_id in team_drives:
teamdrives_for_query.append(teamdrive_id)
teamdrive_info['teamDriveIds'] = teamdrives_for_query
if search_method == 'ORG_UNIT':
query['orgUnitInfo'] = org_unit_info
if search_method == 'TEAM_DRIVE':
query['teamDriveInfo'] = teamdrive_info
if search_method == 'ACCOUNT':
# Add the account_info dictionary into the query object.
# This line SHOULD NOT exist if the user wants to use team_drives.
query['accountInfo'] = account_info
request['query'] = query # Adding query AFTER IT'S COMPLETED
request['name'] = export_name
return request
def create_groups_export_query(export_name, emails, time_frame, start_time, end_time, terms, search_method,
export_pst='True', export_mbox='False', data_scope='All Data'):
"""
Creates the query that will be used in the request to create a groups export
"""
# --- Sanitizing Input ---
if time_frame:
start_time, end_time = timeframe_to_utc_zulu_range(time_frame) # Making it UTC Zulu format
elif start_time:
if not end_time:
end_time = datetime.utcnow().isoformat() + 'Z' # End time will be now, if no end time was given
if isinstance(emails, (str, unicode)):
if ',' in emails:
emails = emails.split(',')
else:
emails = [emails]
if data_scope.upper() == 'HELD DATA':
data_scope = 'HELD_DATA'
if data_scope.upper() == 'ALL DATA':
data_scope = 'ALL_DATA'
if data_scope.upper() == 'UNPROCESSED DATA':
data_scope = 'UNPROCESSED_DATA'
# --- Building Request ---
request = {}
query = {}
emails_for_query = []
account_info = {'emails': []} # type: Dict[Any, Any]
corpus = 'GROUPS'
export_format = 'PST' # Default
if export_mbox.upper() == 'TRUE':
export_format = 'MBOX'
groups_options = {
'exportFormat': export_format
}
# --- Building all small parts into big request object ---
query['dataScope'] = data_scope
query['searchMethod'] = search_method
query['corpus'] = corpus
if start_time and end_time:
query['startTime'] = start_time
query['endTime'] = end_time
if terms:
query['terms'] = terms
if emails: # If user specified emails
for email in emails: # Go over all of them
emails_for_query.append(email) # Add them to the list
account_info['emails'] = emails_for_query # Add the list to the account_info dictionary
query['accountInfo'] = account_info # Add the account_info dictionary into the query object
request['query'] = query # Adding query AFTER IT'S COMPLETED
request['exportOptions'] = {'groupsOptions': groups_options}
request['name'] = export_name
return request
def get_export_by_id(service, matter_id, export_id):
return service.matters().exports().get(matterId=matter_id, exportId=export_id).execute()
def list_held_accounts(service, matter_id, hold_id):
return service.matters().holds().accounts().list(matterId=matter_id, holdId=hold_id).execute()['accounts']
def remove_held_accounts(service, matter_id, hold_id):
pass
def download_storage_object(object_ID, bucket_name):
service = connect_to_storage()
req = service.objects().get_media(bucket=bucket_name, object=object_ID) # pylint: disable=no-member
out_file = io.BytesIO()
downloader = googleapiclient.http.MediaIoBaseDownload(out_file, req)
done = False
while not done:
done = downloader.next_chunk()[1]
return out_file
def get_storage_credentials():
try:
privateKeyJson = json.loads(PRIVATE_KEY_CONTENT)
if not isinstance(privateKeyJson, dict):
privateKeyJson = json.loads(privateKeyJson)
crads = google_service_account.Credentials.from_service_account_info(privateKeyJson, scopes=SCOPES,
subject=ADMIN_EMAIL)
except Exception as e:
LOG('An error occurred in the \'get_storage_credentials\' function.')
err_msg = 'An error occurred while trying to construct an OAuth2 ' \
'Storage Credentials object - {}'.format(str(e))
return_error(err_msg)
return crads
def connect_to_storage():
try:
creds = get_storage_credentials()
ptth = authorized_http(creds)
ptth.disable_ssl_certificate_validation = (not USE_SSL)
service = build('storage', 'v1', http=ptth)
except Exception as e:
LOG('There was an error creating the Storage service in the \'connect_to_storage\' function.')
err_msg = 'There was an error creating the Storage service - {}'.format(str(e))
return_error(err_msg)
return service
def get_object_mame_by_type(objectsArr, extension):
for file in objectsArr:
objName = str(file.get('objectName'))
if (objName.endswith(extension)):
return objName
def build_key_val_pair(tagDict):
demisto.info('this is value: ')
demisto.info(tagDict['@TagName'])
demisto.info('this is key: ')
demisto.info(tagDict['@TagValue'])
key = filter(str.isalnum, str(tagDict['@TagName']))
value = tagDict['@TagValue'].encode('utf-8')
keyValPair = {key: value}
return keyValPair
def build_document_dict(document):
file_info = document['Files']['File']['ExternalFile']
newDocumentDict = {
'DocType': os.path.splitext(file_info['@FileName'])[1][1:].strip().lower(),
'MD5': file_info['@Hash']
}
tags = document['Tags']['Tag']
for currentTagDict in tags:
newDocumentDict.update(build_key_val_pair(currentTagDict))
return newDocumentDict
def build_dict_list(documentsArr):
documentsDictList = []
for document in documentsArr:
currentDocumentDict = build_document_dict(document)
documentsDictList.append(currentDocumentDict)
return documentsDictList
def get_current_matter_from_context(matter_id):
context_matter = demisto.dt(demisto.context(), 'GoogleVault.Matter(val.MatterID === "{0}")'.format(matter_id))
context_matter = context_matter[0] if type(context_matter) is list else context_matter
if not context_matter:
context_matter = {
'MatterID': matter_id,
'Export': []
}
return context_matter
def populate_matter_with_export(current_matter, current_export):
# add new export to matter
exports = current_matter.get('Export', [])
if type(exports) is dict:
exports = [exports]
# remove duplicate export after new updated exports were entered
filtered_export = list(filter(lambda export:
export['ExportID'] != current_export['ExportID'],
exports))
filtered_export.append(current_export)
current_matter['Export'] = filtered_export
return current_matter
# @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
# @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ ACTUAL FUNCS @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
# @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
def list_matters_command():
"""
Lists all matters in the project, with their corresponding state.
"""
try:
service = connect()
state = demisto.args().get('state', 'STATE_UNSPECIFIED')
validate_input_values([state], ['All', 'Open', 'Closed', 'Deleted', 'STATE_UNSPECIFIED', ''])
matters = (get_matters_by_state(service, state))['matters']
if not matters:
demisto.results('No matters found.')
else:
output = []
context_output = []
for matter in matters:
output.append({
'Matter Name': matter.get('name'),
'Matter ID': matter.get('matterId'),
'Matter State': matter.get('state')
})
context_output.append({
'Name': matter.get('name'),
'MatterID': matter.get('matterId'),
'State': matter.get('state') # Getting new state
})
markdown = '' # Use this to add extra line
title = ""
if state == 'All' or not state:
title = 'Here are all your matters'
else:
title = 'Here are your {} matters'.format(state.lower())
markdown += tableToMarkdown(title, output, ['Matter Name', 'Matter ID', 'Matter State'])
demisto.results({
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': matters,
'HumanReadable': markdown,
'EntryContext': {
'GoogleVault.Matter(val.MatterID === obj.MatterID)': context_output
}
})
except Exception as ex:
err_msg = str(ex)
if 'Quota exceeded for quota metric' in err_msg:
err_msg = 'Quota for Google Vault API exceeded'
return_error('Unable to list matters. Error: {}'.format(err_msg))
else:
raise ex
def create_matter_command():
try:
service = connect()
matter_name = demisto.getArg('name')
matter_description = demisto.getArg('description')
matter_content = {
'name': matter_name,
'description': matter_description,
}
matter = service.matters().create(body=matter_content).execute() # pylint: disable=no-member
markdown = ""
if matter_description:
markdown = 'Matter: {} was created successfully with description: {}.\nID: {}.'.format(matter_name,
matter_description,
matter.get(
'matterId'))
else:
markdown = 'Matter: {} was created successfully without a description.\nID: {}.'.format(matter_name,
matter.get(
'matterId'))
title = 'Matter creation successful.'
markdown_matter = []
markdown_matter.append({
'Matter Name': matter.get('name'),
'Matter ID': matter.get('matterId'),
'Matter State': matter.get('state')
})
markdown += tableToMarkdown(title, markdown_matter, ['Matter Name', 'Matter ID',
'Matter State']) # Why is the title displayed in a weird way?
output_context = []
output_context.append({
'Name': matter.get('name'),
'MatterID': matter.get('matterId'),
'State': matter.get('state')
})
demisto.results({
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': matter,
'HumanReadable': markdown,
'EntryContext': {
'GoogleVault.Matter(val.MatterID === obj.MatterID)': output_context
}
})
except Exception as ex:
err_msg = str(ex)
if 'Quota exceeded for quota metric' in err_msg:
err_msg = 'Quota for Google Vault API exceeded'
return_error('Unable to create matter. Error: {}'.format(err_msg))
else:
raise ex
def update_matter_state_command():
"""
* Note: This updates context only if a change in the current state was successful
"""
try:
service = connect()
matter_id = demisto.getArg('matterID')
wanted_state = demisto.getArg('state')
validate_input_values([wanted_state], ['CLOSE', 'DELETE', 'REOPEN', 'UNDELETE'])
matter_found = get_matter_by_id(service, matter_id)
current_state = matter_found.get('state')
if current_state: # if a matter was found with that ID:
context_output = []
result_of_update = ""
# Dealing with CLOSE:
if wanted_state == 'CLOSE':
if current_state == 'DELETED':
result_of_update = 'Matter is deleted and so it cannot be closed. It is possible to re-open it ' \
'and then close.'
elif current_state == 'CLOSED':
demisto.results('Matter is already closed.')
elif current_state == 'OPEN':
try:
close_response = close_matter(service, matter_id)
result_of_update = 'Matter was successfully closed.'
except Exception as ex:
if 'Matters have users on hold' in str(ex):
demisto.log('{}'.format(ex))
return_error('The matter has holds that prevent it from being closed.')
elif 'Quota exceeded for quota metric' in str(ex):
return_error('Quota for Google Vault API exceeded')
else:
raise ex
# Dealing with DELETE:
elif wanted_state == 'DELETE':
if current_state == 'OPEN':
try:
# Todo: check if contains holds. If it does, return error to user
close_response = close_matter(service, matter_id) # noqa: F841
_ = delete_matter(service, matter_id)
result_of_update = 'Matter was {} and is now DELETED.'.format(current_state)
except Exception as ex:
if 'Matters have users on hold' in str(ex):
demisto.log('{}'.format(ex))
return_error('The matter has holds that prevent it from being deleted.')
elif 'Quota exceeded for quota metric' in str(ex):
return_error('Quota for Google Vault API exceeded')
else:
raise ex
elif current_state == 'CLOSED':
try:
_ = delete_matter(service, matter_id)
result_of_update = 'Matter was {} and is not DELETED.'.format(current_state)
except Exception as ex:
if 'Matters have users on hold' in str(ex):
demisto.log('{}'.format(ex))
return_error('The matter has holds that prevent it from being deleted.')
elif 'Quota exceeded for quota metric' in str(ex):
return_error('Quota for Google Vault API exceeded')
else:
raise ex
elif current_state == 'DELETED':
demisto.results('Matter is already deleted.')
# Dealing with REOPEN:
elif wanted_state == 'REOPEN':
if current_state == 'OPEN':
demisto.results('Matter is already open.')
elif current_state == 'CLOSED':
_ = reopen_matter(service, matter_id)
result_of_update = 'Matter was {} and is now OPEN.'.format(current_state)
elif current_state == 'DELETED':
_ = undelete_matter(service, matter_id)
_ = reopen_matter(service, matter_id)
result_of_update = 'Matter was {} and is now OPEN.'.format(current_state)
# Dealing with UNDELETE:
elif wanted_state == 'UNDELETE':
if current_state == 'OPEN':
demisto.results('Matter is already open.')
elif current_state == 'CLOSED':
demisto.results('Matter is closed at the moment.')
elif current_state == 'DELETED':
_ = undelete_matter(service, matter_id)
result_of_update = 'Matter was {} and is now CLOSED.'.format(current_state)
if result_of_update: # If an update was done then update context:
context_output.append({
'Name': matter_found.get('name'),
'MatterID': matter_found.get('matterId'),
'State': get_matter_by_id(service, matter_id).get('state') # Getting new state
})
demisto.results({
'Type': entryTypes['note'],
'ContentsFormat': formats['text'],
'Contents': result_of_update,
'EntryContext': {
'GoogleVault.Matter(val.MatterID === obj.MatterID)': context_output
}
})
else:
demisto.results('No matter was found with that ID.') # Todo: never gets here. Gotta catch the exception
except Exception as ex:
err_msg = str(ex)
if 'Quota exceeded for quota metric' in err_msg:
err_msg = 'Quota for Google Vault API exceeded'
return_error('Unable to update matter. Error: {}'.format(err_msg))
else:
raise ex
def add_account_to_hold_command(): # Todo: Not sure if context is good (It works, but maybe not according to conventions)
try:
service = connect()
matter_id = demisto.getArg('matterID')
hold_id = demisto.getArg('holdID')
account_id = demisto.getArg('accountID')
_ = add_held_account(service, matter_id, hold_id, account_id)
msg_to_usr = 'Account {} was successfully added to hold {} in matter {}'.format(account_id, hold_id, matter_id)
context_output = []
context_output.append({
'ID': hold_id,
'matterID': matter_id,
'HeldAccount': {
'accountID': account_id,
'IsHeld': True
}
})
demisto.results({
'Type': entryTypes['note'],
'ContentsFormat': formats['text'],
'Contents': msg_to_usr,
'EntryContext': {
'GoogleVault.Hold(val.ID === obj.ID)': context_output
}
})
except Exception as ex:
err_msg = str(ex)
if 'Quota exceeded for quota metric' in err_msg:
err_msg = 'Quota for Google Vault API exceeded'
return_error('Unable to add account to hold. Error: {}'.format(err_msg))
else:
raise ex
def search_matter_command():
"""
* This can be highly optimized. What it currently does is search ALL matters and then filter by name / ID
* If a matter with an ID is found, there's no need to keep on searching. This can be optimized too.
* Note - this is case INSENSITIVE. Searching for 'MatTER1' will find 'matter1' too.
"""
try:
service = connect()
wanted_name = demisto.getArg('matterName')
wanted_id = demisto.getArg('matterID')
if wanted_name or wanted_id:
if wanted_name:
wanted_name = wanted_name.lower()
if wanted_id:
wanted_id = wanted_id.lower()
else:
demisto.results('No name or ID were specified. Please specify at least one of them.')
sys.exit(0)
matters = get_matters_by_state(service, state='STATE_UNSPECIFIED')['matters']
output = []
markdown_matters = []
found_anything = False
for matter in matters:
if matter.get('name').lower() == wanted_name or matter.get('matterId').lower() == wanted_id:
found_anything = True
markdown_matters.append({
'Matter Name': matter.get('name'),
'Matter ID': matter.get('matterId'),
'Matter State': matter.get('state')
})
output.append({
'Name': matter.get('name'),
'MatterID': matter.get('matterId'),
'State': matter.get('state')
})
if not found_anything: # If finished for loop through matters and no matter was found
demisto.results('No matters found.')
else:
markdown = '' # Use this to add extra line
if wanted_name:
title = 'Here are matters that have the name {}'.format(wanted_name)
else:
title = 'Here is the matter with ID {}'.format(wanted_id)
markdown += tableToMarkdown(title, markdown_matters, ['Matter Name', 'Matter ID', 'Matter State'])
demisto.results({
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': markdown_matters,
'HumanReadable': markdown,
'EntryContext': {
'GoogleVault.Matter(val.MatterID === obj.MatterID)': output
}
})
except Exception as ex:
err_msg = str(ex)
if 'Quota exceeded for quota metric' in err_msg:
err_msg = 'Quota for Google Vault API exceeded'
return_error('Unable to search matter. Error: {}'.format(err_msg))
else:
raise ex
def remove_account_from_hold_command():
try:
service = connect()
matter_id = demisto.getArg('matterID')
hold_id = demisto.getArg('holdID')
account_id = demisto.getArg('accountID')
_ = remove_held_account(service, matter_id, hold_id, account_id)
msg_to_usr = 'Account {} was successfully removed from hold {} in matter {}'.format(account_id, hold_id,
matter_id)
context_output = []
context_output.append({
'matterID': matter_id,
'ID': hold_id,
'HeldAccount': { # Does this allow only 1 HeldAccount to exist in a hold?
'ID': account_id,
'IsHeld': False
},
})
demisto.results({
'Type': entryTypes['note'],
'ContentsFormat': formats['text'],
'Contents': msg_to_usr,
'EntryContext': {
'GoogleVault.Hold(val.ID === obj.ID)': context_output
}
})
except Exception as ex:
err_msg = str(ex)
if 'Quota exceeded for quota metric' in err_msg:
err_msg = 'Quota for Google Vault API exceeded'
return_error('Unable to remove account from hold. Error: {}'.format(err_msg))
else:
raise ex
def delete_hold_command():
try:
service = connect()
matter_id = demisto.getArg('matterID')
hold_id = demisto.getArg('holdID')
_ = remove_hold(service, matter_id, hold_id)
msg_to_usr = 'Hold {} was successfully deleted from matter {}'.format(hold_id, matter_id)
demisto.results({
'Type': entryTypes['note'],
'ContentsFormat': formats['text'],
'Contents': msg_to_usr,
})
except Exception as ex:
err_msg = str(ex)
if 'Quota exceeded for quota metric' in err_msg:
err_msg = 'Quota for Google Vault API exceeded'
return_error('Unable to delete hold. Error: {}'.format(err_msg))
else:
raise ex
def list_holds_command():
try:
service = connect()
matter_id = demisto.getArg('matterID')
holds = list_holds(service, matter_id)
if not holds:
demisto.results('No holds found.')
else:
output = []
context_output = []
for hold in holds:
output.append({
'Matter ID': matter_id,
'Hold Name': hold.get('name'),
'Hold ID': hold.get('holdId')
})
context_output.append({
'name': hold.get('name'),
'ID': hold.get('holdId'),
'MatterID': matter_id
})
markdown = '' # Use this to add extra line
title = 'Here are all the holds under matter {}.'.format(matter_id)
markdown += tableToMarkdown(title, output, ['Hold Name', 'Hold ID', 'Matter ID'])
demisto.results({
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': holds,
'HumanReadable': markdown,
'EntryContext': {
'GoogleVault.Hold(val.ID === obj.ID)': context_output
}
})
except Exception as ex:
err_msg = str(ex)
if 'Quota exceeded for quota metric' in err_msg:
err_msg = 'Quota for Google Vault API exceeded'
return_error('Unable to list holds. Error: {}'.format(err_msg))
else:
raise ex
def create_hold_command():
service = connect()
matter_id = demisto.getArg('matterID')
hold_name = demisto.getArg('holdName')
corpus = demisto.getArg('corpus')
accounts = demisto.getArg('accountID')
time_frame = demisto.getArg('timeFrame')
start_time = demisto.getArg('startTime')
end_time = demisto.getArg('endTime')
terms = demisto.getArg('terms')
validate_input_values([corpus], ['Mail', 'Drive', 'Groups'])
query = create_hold_query(hold_name, corpus, accounts, time_frame, start_time, end_time, terms)
try:
response = create_hold_mail_accounts(service, matter_id, query)
except Exception as ex:
err_msg = str(ex)
if 'Quota exceeded for quota metric' in err_msg:
err_msg = 'Quota for Google Vault API exceeded'
return_error('Unable to create hold. Error: {}'.format(err_msg))
else:
raise ex
hold_id = response['holdId']
output = []
context_output = []
output.append({
'Hold Name': hold_name,
'Hold ID': hold_id
})
context_output.append({
'name': hold_name,
'ID': hold_id,
'matterID': matter_id
})
markdown = '' # Use this to add extra line
title = 'Here are the details of your newly created hold:'
markdown += tableToMarkdown(title, output, ['Hold Name', 'Hold ID'])
demisto.results({
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': {'Hold Name': hold_name, 'Hold ID': hold_id},
'HumanReadable': markdown,
'EntryContext': {
'GoogleVault.Hold(val.ID === obj.ID)': context_output
}
})
def create_mail_export_command():
"""
Creates a mail export in Google Vault
"""
service = connect()
matter_id = demisto.getArg('matterID')
export_name = demisto.getArg('exportName')
data_scope = demisto.getArg('dataScope')
search_method = demisto.getArg('searchMethod')
emails = demisto.getArg('emails')
include_drafts = demisto.getArg('includeDrafts')
start_time = demisto.getArg('startTime')
end_time = demisto.getArg('endTime')
time_frame = demisto.getArg('timeFrame')
terms = demisto.getArg('terms')
export_pst = demisto.getArg('exportPST')
export_mbox = demisto.getArg('exportMBOX')
org_unit = demisto.getArg('ou')
validate_input_values([include_drafts, export_pst, export_mbox], ['true', 'false', ''])
validate_input_values([data_scope], ['All Data', 'Held Data', 'Unprocessed Data'])
validate_input_values([search_method], ['All Accounts', 'Specific Accounts(requires emails argument)',
'Organizational Unit(requires ou argument)'])
query = create_mail_export_query(export_name, emails, time_frame, start_time, end_time, terms, org_unit, export_pst,
export_mbox, search_method, include_drafts, data_scope)
try:
response = create_export(service, matter_id, query)
except Exception as ex:
err_msg = str(ex)
if 'Quota exceeded for quota metric' in err_msg:
err_msg = 'Quota for Google Vault API exceeded'
return_error('Unable to create export. Error: {}'.format(err_msg))
else:
raise ex
create_time = response.get('createTime')
export_id = response.get('id')
title = 'A new export has been created successfully:\n'
output_for_markdown = { # This one is for tableToMarkdown to correctly map
'Matter ID': matter_id,
'Export ID': export_id,
'Export Name': export_name,
'Created Time': create_time
}
markdown = tableToMarkdown(title, output_for_markdown, ['Matter ID', 'Export ID', 'Export Name', 'Created Time'])
new_export = {
'MatterID': matter_id,
'ExportID': export_id,
'Name': export_name,
'CreateTime': create_time
}
context_matter = get_current_matter_from_context(matter_id)
new_matter = populate_matter_with_export(context_matter, new_export)
demisto.results({
'Type': entryTypes['note'],
'ContentsFormat': formats['text'],
'Contents': response,
'HumanReadable': markdown,
'EntryContext': {
'GoogleVault.Matter(val.MatterID === "{0}")'.format(matter_id): new_matter
}
})
def create_drive_export_command():
service = connect()
matter_id = demisto.getArg('matterID')
export_name = demisto.getArg('exportName')
data_scope = demisto.getArg('dataScope')
search_method = demisto.getArg('searchMethod')
emails = demisto.getArg('emails')
org_unit = demisto.getArg('ou')
team_drives = demisto.getArg('teamDrive')
include_teamdrives = demisto.getArg('includeTeamDrives')
time_frame = demisto.getArg('timeFrame')
start_time = demisto.getArg('startTime')
end_time = demisto.getArg('endTime')
terms = demisto.getArg('terms')
validate_input_values([include_teamdrives], ['true', 'false', ''])
validate_input_values([data_scope], ['All Data', 'Held Data', 'Unprocessed Data'])
validate_input_values([search_method], ['Team Drive', 'Specific Accounts(requires emails argument)',
'Organizational Unit(requires ou argument)'])
query = create_drive_export_query(export_name, emails, team_drives, time_frame, start_time, end_time, terms,
org_unit, search_method, include_teamdrives, data_scope)
try:
response = create_export(service, matter_id, query)
except Exception as ex:
err_msg = str(ex)
if 'Quota exceeded for quota metric' in err_msg:
err_msg = 'Quota for Google Vault API exceeded'
return_error('Unable to create export. Error: {}'.format(err_msg))
else:
raise ex
create_time = response.get('createTime')
export_id = response.get('id')
new_export = {
'MatterID': matter_id,
'ExportID': export_id,
'Name': export_name,
'CreateTime': create_time
}
context_matter = get_current_matter_from_context(matter_id)
new_matter = populate_matter_with_export(context_matter, new_export)
title = 'A new export has been created successfully:\n'
output_for_markdown = { # This one is for tableToMarkdown to correctly map
'Matter ID': matter_id,
'Export ID': export_id,
'Export Name': export_name,
'Created Time': create_time
}
markdown = tableToMarkdown(title, output_for_markdown, ['Matter ID', 'Export ID', 'Export Name', 'Created Time'])
demisto.results({
'Type': entryTypes['note'],
'ContentsFormat': formats['text'],
'Contents': response,
'HumanReadable': markdown,
'EntryContext': {
'GoogleVault.Matter(val.MatterID === "{0}")'.format(matter_id): new_matter
}
})
def create_groups_export_command():
service = connect()
matter_id = demisto.getArg('matterID')
export_name = demisto.getArg('exportName')
data_scope = demisto.getArg('dataScope')
search_method = 'ACCOUNT' # Hard-coded only for groups export
emails = demisto.getArg('groups')
start_time = demisto.getArg('startTime')
end_time = demisto.getArg('endTime')
time_frame = demisto.getArg('timeFrame')
terms = demisto.getArg('terms')
export_pst = demisto.getArg('exportPST')
export_mbox = demisto.getArg('exportMBOX')
validate_input_values([export_pst, export_mbox], ['true', 'false', ''])
validate_input_values([data_scope], ['All Data', 'Held Data', 'Unprocessed Data'])
query = create_groups_export_query(export_name, emails, time_frame, start_time, end_time, terms, search_method,
export_pst, export_mbox, data_scope)
try:
response = create_export(service, matter_id, query)
except Exception as ex:
err_msg = str(ex)
if 'Quota exceeded for quota metric' in err_msg:
err_msg = 'Quota for Google Vault API exceeded'
return_error('Unable to create export. Error: {}'.format(err_msg))
else:
raise ex
create_time = response.get('createTime')
export_id = response.get('id')
new_export = {
'MatterID': matter_id,
'ExportID': export_id,
'Name': export_name,
'CreateTime': create_time
}
context_matter = get_current_matter_from_context(matter_id)
new_matter = populate_matter_with_export(context_matter, new_export)
title = 'A new export has been created successfully:\n'
output_for_markdown = { # This one is for tableToMarkdown to correctly map
'Matter ID': matter_id,
'Export ID': export_id,
'Export Name': export_name,
'Created Time': create_time
}
markdown = tableToMarkdown(title, output_for_markdown, ['Matter ID', 'Export ID', 'Export Name', 'Created Time'])
demisto.results({
'Type': entryTypes['note'],
'ContentsFormat': formats['text'],
'Contents': response,
'HumanReadable': markdown,
'EntryContext': {
'GoogleVault.Matter(val.MatterID === "{0}")'.format(matter_id): new_matter
}
})
def get_multiple_exports_command():
export_IDs = argToList(demisto.getArg('exportIDS'))
matter_id = demisto.getArg('matterId')
id_concatenation = demisto.getArg('queryIDS')
if id_concatenation:
if '#' not in id_concatenation:
return_error(
'Should enter a concatenation of MatterID and ExportID with "#" delimeter such: <Matter_ID>#<ExportID>')
matter_id, export_id = id_concatenation.split('#')
export_IDs = [export_id]
if not (matter_id and export_IDs):
return_error('Missing parameter MetterID or ExportID')
current_matter = get_current_matter_from_context(matter_id)
for export_id in export_IDs:
new_export = get_export_command(export_id, matter_id)
current_matter = populate_matter_with_export(current_matter, new_export)
demisto.results({
'ContentsFormat': formats['text'],
'Contents': '',
'Type': entryTypes['note'],
'EntryContext': {
'GoogleVault.Matter(val.MatterID === "{0}")'.format(matter_id): current_matter
}
})
def get_export_command(export_id, matter_id):
service = connect()
try:
response = get_export_by_id(service, matter_id, export_id)
export_name = response.get('name')
export_status = response.get('status')
create_time = response.get('createTime')
bucket_name = response.get('cloudStorageSink').get('files')[0].get(
'bucketName') if export_status == 'COMPLETED' else ''
zip_object_name = get_object_mame_by_type(response.get('cloudStorageSink').get('files'),
'.zip') if export_status == 'COMPLETED' else ''
xml_object_name = get_object_mame_by_type(response.get('cloudStorageSink').get('files'),
'.xml') if export_status == 'COMPLETED' else ''
title = 'You Export details:\n'
output_for_markdown = { # This one is for tableToMarkdown to correctly map
'Matter ID': matter_id,
'Export ID': export_id,
'Export Name': export_name,
'Status': export_status,
'Created Time': create_time,
'Bucket Name(for download)': bucket_name,
'Download ID': zip_object_name,
'View ID': xml_object_name
}
if (export_status == 'COMPLETED'):
headers = ['Matter ID', 'Export ID', 'Export Name', 'Status', 'Created Time', 'Bucket Name(for download)',
'Download ID', 'View ID']
else:
headers = ['Matter ID', 'Export ID', 'Export Name', 'Status', 'Created Time']
markdown = tableToMarkdown(title, output_for_markdown, headers)
demisto.results({
'Type': entryTypes['note'],
'ContentsFormat': formats['text'],
'Contents': response,
'HumanReadable': markdown,
})
export_status = {
'MatterID': matter_id,
'ExportID': export_id,
'ExportName': export_name,
'Status': export_status,
'BucketName': bucket_name,
'DownloadID': zip_object_name,
'ViewID': xml_object_name
}
return export_status
except Exception as ex:
err_msg = str(ex)
if 'Quota exceeded for quota metric' in err_msg:
err_msg = 'Quota for Google Vault API exceeded'
return_error('Unable to get export. Error: {}'.format(err_msg))
else:
raise ex
def download_export_command():
try:
bucket_name = demisto.getArg('bucketName')
download_ID = demisto.getArg('downloadID')
out_file = download_storage_object(download_ID, bucket_name)
demisto.results(fileResult(demisto.uniqueFile() + '.zip', out_file.getvalue()))
except Exception as ex:
err_msg = str(ex)
if 'Quota exceeded for quota metric' in err_msg:
err_msg = 'Quota for Google Vault API exceeded'
return_error('Unable to download export. Error: {}'.format(err_msg))
else:
raise ex
def download_and_sanitize_export_results(object_ID, bucket_name, max_results):
out_file = download_storage_object(object_ID, bucket_name)
out_file_json = json.loads(xml2json(out_file.getvalue()))
if not out_file_json['Root']['Batch'].get('Documents'):
demisto.results('The export given contains 0 documents')
sys.exit(0)
documents = out_file_json['Root']['Batch']['Documents']['Document']
if type(documents) is dict:
documents = [documents]
dictList = build_dict_list(documents)
if len(dictList) > max_results:
return dictList[0:max_results]
return dictList
def get_drive_results_command():
try:
max_results = int(demisto.getArg('maxResult'))
view_ID = demisto.getArg('viewID')
bucket_name = demisto.getArg('bucketName')
output = download_and_sanitize_export_results(view_ID, bucket_name, max_results)
if not (output[0].get('Author') or output[0].get('Collaborators') or output[0].get('Title')):
return_error(
'Error displaying results: Corpus of the invoked command and the supplied ViewID does not match')
markedown_output = map(lambda document: {
'Title': document.get('Title'),
'Author': document.get('Author'),
'Collaborators': document.get('Collaborators'),
'Others': document.get('Others'),
'DateCreated': document.get('DateCreated'),
'DateModified': document.get('DateModified'),
'DocType': document.get('DocType'),
'MD5': document.get('MD5'),
}, output)
title = 'Your DRIVE inquiry details\n'
headers = ['Title', 'Author', 'Collaborators', 'Others', 'Labels', 'Viewers', 'DateCreated', 'DateModified',
'DocType', 'MD5']
markdown = tableToMarkdown(title, markedown_output, headers)
exportID = str(view_ID).split('/')[1]
contextOutput = {'ExportID': exportID, 'Results': markedown_output}
demisto.results({
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': contextOutput,
'HumanReadable': markdown,
'EntryContext': {
'GoogleVault.Matter.Export(val.ExportID === obj.ExportID)': contextOutput
}
})
except Exception as ex:
err_msg = str(ex)
if 'Quota exceeded for quota metric' in err_msg:
err_msg = 'Quota for Google Vault API exceeded'
return_error('Unable to display export result. Error: {}'.format(err_msg))
else:
raise ex
def get_mail_and_groups_results_command(inquiryType):
try:
max_results = int(demisto.getArg('maxResult'))
view_ID = demisto.getArg('viewID')
bucket_name = demisto.getArg('bucketName')
output = download_and_sanitize_export_results(view_ID, bucket_name, max_results)
if not (output[0].get('From') or output[0].get('To') or output[0].get('Subject')):
return_error(
'Error displaying results: Corpus of the invoked command and the supplied ViewID does not match')
markedown_output = map(lambda document: {
'From': document.get('From'),
'To': document.get('To'),
'CC': document.get('CC'),
'BCC': document.get('BCC'),
'Subject': document.get('Subject'),
'DateSent': document.get('DateSent'),
'DateReceived': document.get('DateReceived'),
}, output)
title = 'Your {} inquiry details\n'.format(inquiryType)
headers = ['Subject', 'From', 'To', 'CC', 'BCC', 'DateSent']
markdown = tableToMarkdown(title, markedown_output, headers)
exportID = str(view_ID).split('/')[1]
contextOutput = {'ExportID': exportID, 'Results': markedown_output}
demisto.results({
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': contextOutput,
'HumanReadable': markdown,
'EntryContext': {
'GoogleVault.Matter.Export(val.ExportID === obj.ExportID)': contextOutput
}
})
except Exception as ex:
err_msg = str(ex)
if 'Quota exceeded for quota metric' in err_msg:
err_msg = 'Quota for Google Vault API exceeded'
return_error('Unable to display export result. Error: {}'.format(err_msg))
else:
raise ex
def test_module():
"""
This is the call made when pressing the integration test button.
"""
try:
service = connect()
get_matters_by_state(service, 'STATE_UNSPECIFIED')
demisto.results('ok')
sys.exit(0)
except Exception as ex:
if 'Quota exceeded for quota metric' in str(ex):
return_error('Quota for Google Vault API exceeded')
else:
return_error(str(ex))
def main():
"""Main Execution Block"""
try:
handle_proxy()
# @@@@@@@@ DEMISTO COMMANDS @@@@@@@@
if demisto.command() == 'test-module':
# This is the call made when pressing the integration test button.
test_module()
elif demisto.command() == 'gvault-list-matters':
list_matters_command()
elif demisto.command() == 'gvault-create-matter':
create_matter_command()
elif demisto.command() == 'gvault-matter-update-state':
update_matter_state_command()
elif demisto.command() == 'gvault-add-heldAccount':
add_account_to_hold_command()
elif demisto.command() == 'gvault-get-matter':
search_matter_command()
elif demisto.command() == 'gvault-remove-heldAccount':
remove_account_from_hold_command()
elif demisto.command() == 'gvault-delete-hold':
delete_hold_command()
elif demisto.command() == 'gvault-list-holds':
list_holds_command()
elif demisto.command() == 'gvault-create-hold':
create_hold_command()
elif demisto.command() == 'gvault-create-export-mail':
create_mail_export_command()
elif demisto.command() == 'gvault-create-export-drive':
create_drive_export_command()
elif demisto.command() == 'gvault-create-export-groups':
create_groups_export_command()
elif demisto.command() == 'gvault-export-status':
get_multiple_exports_command()
elif demisto.command() == 'gvault-download-results':
download_export_command()
elif demisto.command() == 'gvault-get-drive-results':
get_drive_results_command()
elif demisto.command() == 'gvault-get-mail-results':
get_mail_and_groups_results_command('MAIL')
elif demisto.command() == 'gvault-get-groups-results':
get_mail_and_groups_results_command('GROUPS')
except Exception as e:
return_error(str(e))
# python2 uses __builtin__ python3 uses builtins
if __name__ == '__builtin__' or __name__ == 'builtins':
main()
|
py | 1a40f73e12e81550b5123c396890518904e07caf | from armulator.armv6.opcodes.abstract_opcodes.ldr_register_thumb import LdrRegisterThumb
from armulator.armv6.opcodes.opcode import Opcode
from armulator.armv6.shift import SRType
class LdrRegisterThumbT2(LdrRegisterThumb, Opcode):
def __init__(self, instruction, m, t, n, shift_t, shift_n):
Opcode.__init__(self, instruction)
LdrRegisterThumb.__init__(self, m, t, n, shift_t, shift_n)
def is_pc_changing_opcode(self):
return self.t == 15
@staticmethod
def from_bitarray(instr, processor):
rm = instr[28:32]
imm2 = instr[26:28]
rt = instr[16:20]
rn = instr[12:16]
if rm.uint in (13, 15) or (rt.uint == 15 and processor.in_it_block() and not processor.last_in_it_block()):
print "unpredictable"
else:
return LdrRegisterThumbT2(instr, **{"m": rm.uint, "t": rt.uint, "n": rn.uint,
"shift_t": SRType.SRType_LSL, "shift_n": imm2.uint})
|
py | 1a40f75ac4b55923e37b6a614dd2a840256bc3fc | import torch
from torch import nn
import torch.nn.functional as F
class EmbedVector(nn.Module):
def __init__(self, config):
super(EmbedVector, self).__init__()
self.config = config
target_size = config.label
self.embed = nn.Embedding(config.words_num, config.words_dim)
if config.train_embed == False:
self.embed.weight.requires_grad = False
if config.qa_mode.upper() == 'LSTM':
self.lstm = nn.LSTM(input_size=config.words_dim,
hidden_size=config.hidden_size,
num_layers=config.num_layer,
dropout=config.rnn_dropout,
bidirectional=True)
elif config.qa_mode.upper() == 'GRU':
self.gru = nn.GRU(input_size=config.words_dim,
hidden_size=config.hidden_size,
num_layers=config.num_layer,
dropout=config.rnn_dropout,
bidirectional=True)
self.dropout = nn.Dropout(p=config.rnn_fc_dropout)
self.nonlinear = nn.Tanh()
#self.attn = nn.Sequential(
# nn.Linear(config.hidden_size * 2 + config.words_dim, config.hidden_size),
# self.nonlinear,
# nn.Linear(config.hidden_size, 1)
#)
self.hidden2tag = nn.Sequential(
#nn.Linear(config.hidden_size * 2 + config.words_dim, config.hidden_size * 2),
nn.Linear(config.hidden_size * 2, config.hidden_size * 2),
nn.BatchNorm1d(config.hidden_size * 2),
self.nonlinear,
self.dropout,
nn.Linear(config.hidden_size * 2, target_size)
)
def forward(self, x):
# x = (sequence length, batch_size, dimension of embedding)
text = x.text
x = self.embed(text)
num_word, batch_size, words_dim = x.size()
# h0 / c0 = (layer*direction, batch_size, hidden_dim)
if self.config.qa_mode.upper() == 'LSTM':
outputs, (ht, ct) = self.lstm(x)
elif self.config.qa_mode.upper() == 'GRU':
outputs, ht = self.gru(x)
else:
print("Wrong Entity Prediction Mode")
exit(1)
outputs = outputs.view(-1, outputs.size(2))
#x = x.view(-1, words_dim)
#attn_weights = F.softmax(self.attn(torch.cat((x, outputs), 1)), dim=0)
#attn_applied = torch.bmm(torch.diag(attn_weights[:, 0]).unsqueeze(0), outputs.unsqueeze(0))
#outputs = torch.cat((x, attn_applied.squeeze(0)), 1)
tags = self.hidden2tag(outputs).view(num_word, batch_size, -1)
scores = nn.functional.normalize(torch.mean(tags, dim=0), dim=1)
return scores |
py | 1a40f834356abfea18709ddec61980f35b5f1623 | STEMS = [
('кон', ['конят', 'коня']),
('стол', ['столът']),
('хълм', ['хълма']),
('кола', ['колата', 'колите']),
('колело', ['колелото']),
('маса', ['маси']),
('стол', ['столове']),
('легло', ['легла']),
('чайник', ['чайници']),
('апарат', ['апарати']),
('дърво', ['дървета']),
('цвете', ['цветя']),
('самурай', ['самураи']),
('батерия', ['батерии']),
('чайник', ['чайниците']),
('метър', ['метри', 'метра', 'метрите']),
('километър', ['километри', 'километра', 'километрите']),
('квадратен', ['квадратна', 'квадратно', 'квадратни']),
('вървя', ['вървиш', 'върви', 'вървим', 'вървите', 'вървят']),
('мета', ['метат', 'метеш', 'мете', 'метем', 'метете']),
('рисувам', ['рисуват', 'рисуваш', 'рисува', 'рисуваме', 'рисувате']),
('стрелям', ['стрелят', 'стреляш', 'стреля', 'стреляме', 'стреляте']),
('чета', ['четоха', 'четох', 'чете', 'четохме', 'четохте']),
('говоря', ['говориха', 'говорих', 'говори', 'говорихме', 'говорихме']),
('рисувам', ['рисуваха', 'рисувах', 'рисува', 'рисувахме', 'рисувахте']),
('стрелям', ['стреляха', 'стрелях', 'стреля', 'стреляхме', 'стреляхте']),
('чета', ['четяха', 'четях', 'четеше', 'четяхме', 'четяхте']),
('говоря', ['говореха', 'говорех', 'говореше', 'говорехме', 'говорехте']),
(None, ['я']),
('отивам', ['отиваха', 'отиваше', 'отивах', 'отивахме', 'отивахте']),
('стрелям', ['стреляше']),
('чета', ['чели', 'чел', 'чела', 'чело']),
('чеша', ['чесали', 'чесал', 'чесала', 'чесало']),
('рисувам', ['рисували', 'рисувал', 'рисувала', 'рисувало']),
('стрелям', ['стреляли', 'стрелял', 'стреляла', 'стреляло']),
('говоря', ['говорили', 'говорил', 'говорила', 'говорило']),
('вампир', ['вампирката', 'вампир', 'вампирка']),
('красив', ['красивият', 'красива', 'красивата', 'красиви', 'красивите']),
('гладен', ['гладният', 'гладната', 'гладните', 'гладното']),
('археолог', ['археолози']),
('космически', ['космическа']),
('отивам', ['отишли', 'отишъл', 'отишла', 'отишло', 'отидохме', 'отидоха', 'отидохте']),
]
|
py | 1a40fa5ac1cecce6bafea8ac13466c680bb52ee3 | import sys
import palmettopy.exceptions
from palmettopy.palmetto import Palmetto
words = ["cherry", "pie", "cr_eam", "apple", "orange", "banana",
"pineapple", "plum", "pig", "cra_cker", "so_und", "kit"]
palmetto = Palmetto()
try:
result = palmetto.get_df_for_words(words)
sys.exit(0)
except palmettopy.exceptions.EndpointDown:
sys.exit(1)
|
py | 1a40fb383216d03be4db04a425e1880db31d9774 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
# Export this package's modules as members:
from delegation_set import *
from health_check import *
from query_log import *
from record import *
from zone import *
from zone_association import *
from get_zone import *
|
py | 1a40fbeec47f467339597f4a1c8a838b58586621 | import re
import vbox.base
from . import (
base,
props,
exceptions,
)
class HostDevice(base.SubEntity):
state = property(lambda s: s.getPayload()["Current State"].lower())
product = property(lambda s: s.getPayload()["Product"])
manufacturer = property(lambda s: s.getPayload()["Manufacturer"])
productId = property(lambda s: int(s.getPayload()["ProductId"].split()[0], 16))
vendorId = property(lambda s: int(s.getPayload()["VendorId"].split()[0], 16))
def __init__(self, parent, uuid):
super(HostDevice, self).__init__(parent)
self.UUID = uuid
def _getPayload():
for rec in self.source.getHostDevices():
if rec["UUID"] == self.UUID:
return dict(rec)
raise KeyError(self.UUID)
self.getPayload = base.ProxyRefreshTrail(
_getPayload, depends=(self.source.getHostDevices, )
)
def __repr__(self):
try:
payload = self.getPayload()
except KeyError:
payload = None
return "<{} payload={}>".format(self.__class__.__name__, payload)
class VmDevice(base.SubEntity):
UUID = property(lambda s: s.source.info["USBAttachedUUID" + s.idx])
vendorId = property(lambda s: int(s.source.info["USBAttachedVendorId" + s.idx], 16))
productId = property(lambda s: int(s.source.info["USBAttachedProductId" + s.idx], 16))
revisionId = property(lambda s: int(s.source.info["USBAttachedRevision" + s.idx], 16))
manufacturer = property(lambda s: s.source.info["USBAttachedManufacturer" + s.idx])
product = property(lambda s: s.source.info["USBAttachedProduct" + s.idx])
address = property(lambda s: s.source.info["USBAttachedAddress" + s.idx])
state = property(lambda s: "attached")
def __init__(self, parent, idx):
super(VmDevice, self).__init__(parent)
self.idx = idx
class VmUsb(base.SubEntity):
enabled = props.OnOff(**props.modify("usb"))
ehci = props.OnOff(**props.modify("usbehci")) # Enables/disable usb 2.0
def attach(self, device):
if device.state == "attached":
raise Exception("This USB device is already attached.")
target = device.UUID
self.source.usbAttach(target)
# notify the device backend that it should be refreshed.
device.source.clearCache()
for el in self.devices:
if el.UUID == target:
return el
else:
raise Exception("Device {!r} that was previously attached is now lost.".format(target))
@props.SourceProperty
def devices(self):
matcher = re.compile(r"^USBAttachedUUID(\d+)$")
foundIds = []
for key in self.source.info.iterkeys():
match = matcher.match(key)
if match:
foundIds.append(match.group(1))
return [
VmDevice(self, uuid)
for uuid in foundIds
]
class Library(base.Library):
@props.SourceProperty
def hostDevices(self):
return [
HostDevice(self, rec["UUID"])
for rec in self.source.getHostDevices()
] |
py | 1a40fc391712dd7f8277b2c785a035d0d573527b | #print is function when we want to print something on output
print("My name is Dhruv")
#You will notice something strange if you try to print any directory
#print("C:\Users\dhruv\Desktop\dhruv.github.io")
#Yes unicodeescape error
# Remember i told about escape character on previous tutorial
# yes it causing problems
# now place "r" in starting of sentence
print(r"C:\Users\dhruv\Desktop\dhruv.github.io")
#yes it is printed
# what what r means ? r means Rush string
# it means that " take the string as it , take no special meaning in this perticular STRING "
# One amazing thing you can do is , string can be store in variables
#You can also Add and Multiply strings
myname = "Dhruv "
myname + "Patel"
myname * 5
# now press run
# Do check my shell file for refrence
|
py | 1a40fd5b5f7de3090f47dd00b63d7b078dc161c5 | name = "sas7bdat_converter"
|
py | 1a40fe550be61c13074ccd41e6aa51f5011b28b8 | """
The tests in this package are to ensure the proper resultant dtypes of
set operations.
"""
import numpy as np
import pytest
from pandas.core.dtypes.common import is_dtype_equal
import pandas as pd
from pandas import Float64Index, Int64Index, RangeIndex, UInt64Index
import pandas._testing as tm
from pandas.api.types import pandas_dtype
COMPATIBLE_INCONSISTENT_PAIRS = {
(Int64Index, RangeIndex): (tm.makeIntIndex, tm.makeRangeIndex),
(Float64Index, Int64Index): (tm.makeFloatIndex, tm.makeIntIndex),
(Float64Index, RangeIndex): (tm.makeFloatIndex, tm.makeIntIndex),
(Float64Index, UInt64Index): (tm.makeFloatIndex, tm.makeUIntIndex),
}
def test_union_same_types(index):
# Union with a non-unique, non-monotonic index raises error
# Only needed for bool index factory
idx1 = index.sort_values()
idx2 = index.sort_values()
assert idx1.union(idx2).dtype == idx1.dtype
def test_union_different_types(index, index_fixture2):
# This test only considers combinations of indices
# GH 23525
idx1, idx2 = index, index_fixture2
type_pair = tuple(sorted([type(idx1), type(idx2)], key=lambda x: str(x)))
if type_pair in COMPATIBLE_INCONSISTENT_PAIRS:
pytest.xfail("This test only considers non compatible indexes.")
if any(isinstance(idx, pd.MultiIndex) for idx in (idx1, idx2)):
pytest.xfail("This test doesn't consider multiindixes.")
if is_dtype_equal(idx1.dtype, idx2.dtype):
pytest.xfail("This test only considers non matching dtypes.")
# A union with a CategoricalIndex (even as dtype('O')) and a
# non-CategoricalIndex can only be made if both indices are monotonic.
# This is true before this PR as well.
# Union with a non-unique, non-monotonic index raises error
# This applies to the boolean index
idx1 = idx1.sort_values()
idx2 = idx2.sort_values()
assert idx1.union(idx2).dtype == np.dtype("O")
assert idx2.union(idx1).dtype == np.dtype("O")
@pytest.mark.parametrize("idx_fact1,idx_fact2", COMPATIBLE_INCONSISTENT_PAIRS.values())
def test_compatible_inconsistent_pairs(idx_fact1, idx_fact2):
# GH 23525
idx1 = idx_fact1(10)
idx2 = idx_fact2(20)
res1 = idx1.union(idx2)
res2 = idx2.union(idx1)
assert res1.dtype in (idx1.dtype, idx2.dtype)
assert res2.dtype in (idx1.dtype, idx2.dtype)
@pytest.mark.parametrize(
"left, right, expected",
[
("int64", "int64", "int64"),
("int64", "uint64", "object"),
("int64", "float64", "float64"),
("uint64", "float64", "float64"),
("uint64", "uint64", "uint64"),
("float64", "float64", "float64"),
("datetime64[ns]", "int64", "object"),
("datetime64[ns]", "uint64", "object"),
("datetime64[ns]", "float64", "object"),
("datetime64[ns, CET]", "int64", "object"),
("datetime64[ns, CET]", "uint64", "object"),
("datetime64[ns, CET]", "float64", "object"),
("Period[D]", "int64", "object"),
("Period[D]", "uint64", "object"),
("Period[D]", "float64", "object"),
],
)
def test_union_dtypes(left, right, expected):
left = pandas_dtype(left)
right = pandas_dtype(right)
a = pd.Index([], dtype=left)
b = pd.Index([], dtype=right)
result = (a | b).dtype
assert result == expected
|
py | 1a40feb3e8f4657f0c48f9b6284b5a50d08228e2 | import sys, imp
###
def DoImport(name, paths, namesysprefix):
# Fast path: see if the module has already been imported.
try:
return sys.modules[namesysprefix + name]
except KeyError:
pass
fp, pathname, description = imp.find_module(name, paths)
try:
return imp.load_module(namesysprefix + name, fp, pathname, description)
finally:
# Since we may exit via an exception, close fp explicitly.
if fp:
fp.close()
|
py | 1a41000bcbf7153b612072b8d0253a21293e1dcf | # coding:utf-8
from schemaobject.collections import OrderedDict
def column_schema_builder(table):
"""
Returns a dictionary loaded with all of the columns availale in the table.
``table`` must be an instance of TableSchema.
.. note::
This function is automatically called for you and set to
``schema.databases[name].tables[name].columns``
when you create an instance of SchemaObject
"""
conn = table.parent.parent.connection
cols = OrderedDict()
sql = """
SELECT TABLE_NAME, COLUMN_NAME, ORDINAL_POSITION, COLUMN_DEFAULT,
IS_NULLABLE, COLUMN_TYPE, COLUMN_KEY, CHARACTER_MAXIMUM_LENGTH,
CHARACTER_SET_NAME, COLLATION_NAME, EXTRA, COLUMN_COMMENT
FROM information_schema.COLUMNS
WHERE TABLE_SCHEMA='%s'
AND TABLE_NAME='%s'
ORDER BY ORDINAL_POSITION
"""
columns = conn.execute(sql % (table.parent.name, table.name))
if not columns:
return cols
for col in columns:
field = col['COLUMN_NAME']
column = ColumnSchema(name=field, parent=table)
column.ordinal_position = col['ORDINAL_POSITION']
column.field = col['COLUMN_NAME']
column.type = col['COLUMN_TYPE']
column.charset = col['CHARACTER_SET_NAME']
column.collation = col['COLLATION_NAME']
column.key = col['COLUMN_KEY']
column.default = col['COLUMN_DEFAULT']
column.extra = col['EXTRA']
column.comment = col['COLUMN_COMMENT']
if col['IS_NULLABLE'] == "YES":
column.null = True
else:
column.null = False
cols[field] = column
return cols
class ColumnSchema(object):
"""
Object representation of a single column.
Supports equality and inequality comparison of ColumnSchema.
``name`` is the column name.
``parent`` is an instance of TableSchema
.. note::
ColumnSchema objects are automatically created for you by column_schema_builder
and loaded under ``schema.databases[name].tables[name].columns``
.. note::
Attributes ``key``, ``comment`` are ignored in ``__eq__``, ``__neq__`` comparisons.
Example
>>> schema.databases['sakila'].tables['rental'].columns.keys()
['rental_id', 'rental_date', 'inventory_id', 'customer_id', 'return_date', 'staff_id', 'last_update']
Column Attributes
>>> schema.databases['sakila'].tables['rental'].columns['rental_id'].name
'rental_id'
>>> schema.databases['sakila'].tables['rental'].columns['rental_id'].field
'rental_id'
>>> schema.databases['sakila'].tables['rental'].columns['rental_id'].ordinal_position
1L
>>> schema.databases['sakila'].tables['rental'].columns['rental_id'].type
'INT(11)'
>>> schema.databases['sakila'].tables['staff'].columns['password'].charset
'utf8'
>>> schema.databases['sakila'].tables['staff'].columns['password'].collation
'utf8_bin'
>>> schema.databases['sakila'].tables['rental'].columns['rental_id'].null
False
>>> schema.databases['sakila'].tables['rental'].columns['rental_id'].key
'PRI'
>>> schema.databases['sakila'].tables['rental'].columns['last_update'].default
'CURRENT_TIMESTAMP'
>>> schema.databases['sakila'].tables['rental'].columns['rental_id'].extra
'auto_increment'
>>> schema.databases['sakila'].tables['rental'].columns['rental_id'].comment
''
"""
def __init__(self, name, parent):
self.parent = parent
self.name = name
self.field = name # alias for name, following mysql spec
self.ordinal_position = 0
self.type = None
self.charset = None
self.collation = None
self.null = None
self.key = None
self.default = None
self.extra = None
self.comment = None
def define(self, after=None, with_comment=False):
"""
Generate the SQL for this column definition.
``after`` is the name(string) of the column this should appear after.
If ``after`` is None, ``FIRST`` is used.
``with_comment`` boolean, add column comment to sql statement
>>> schema.databases['sakila'].tables['rental'].columns['last_update'].define(after="staff_id")
'`last_update` TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP on update CURRENT_TIMESTAMP AFTER `staff_id`'
>>> schema.databases['sakila'].tables['rental'].columns['rental_id'].define()
'`rental_id` INT(11) NOT NULL auto_increment FIRST'
"""
sql = ["`%s` %s" % (self.field, self.type)]
if (self.collation and
self.charset and
(
self.parent.options['charset'].value != self.charset or
self.parent.options['collation'].value != self.collation
)):
sql.append("CHARACTER SET %s COLLATE %s" % (self.charset, self.collation))
if not self.null:
sql.append("NOT NULL")
else:
sql.append("NULL")
try:
basestring
except NameError:
basestring = str
if self.default is not None and isinstance(self.default, (str, basestring)) \
and self.default != 'CURRENT_TIMESTAMP':
sql.append("DEFAULT '%s'" % self.default)
elif self.default is not None:
sql.append("DEFAULT %s" % self.default)
if self.extra:
sql.append(self.extra)
if with_comment and self.comment:
sql.append("COMMENT '%s'" % self.comment)
if after:
sql.append("AFTER `%s`" % after)
else:
sql.append("FIRST")
return ' '.join(sql)
def create(self, *args, **kwargs):
"""
Generate the SQL to create (ADD) this column.
``after`` is the name(string) of the column this should appear after.
If ``after`` is None, ``FIRST`` is used.
``with_comment`` boolean, add column comment to sql statement
>>> schema.databases['sakila'].tables['rental'].columns['last_update'].create(after="staff_id")
'ADD COLUMN `last_update` TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP on update CURRENT_TIMESTAMP AFTER `staff_id`'
>>> schema.databases['sakila'].tables['rental'].columns['rental_id'].create()
'ADD COLUMN `rental_id` INT(11) NOT NULL auto_increment FIRST'
"""
return "ADD COLUMN %s" % self.define(*args, **kwargs)
def modify(self, *args, **kwargs):
"""
Generate the SQL to modify this column.
``after`` is the name(string) of the column this should appear after.
If ``after`` is None, ``FIRST`` is used.x
``with_comment`` boolean, add column comment to sql statement
>>> schema.databases['sakila'].tables['rental'].columns['customer_id'].define(after="inventory_id")
'`customer_id` SMALLINT(5) UNSIGNED NOT NULL AFTER `inventory_id`'
>>> schema.databases['sakila'].tables['rental'].columns['customer_id'].default = 123
>>> schema.databases['sakila'].tables['rental'].columns['customer_id'].modify(after="inventory_id")
'MODIFY COLUMN `customer_id` SMALLINT(5) UNSIGNED NOT NULL DEFAULT 123 AFTER `inventory_id`'
"""
return "MODIFY COLUMN %s" % self.define(*args, **kwargs)
def drop(self):
"""
Generate the SQL to drop this column::
>>> schema.databases['sakila'].tables['rental'].columns['rental_id'].drop()
'DROP COLUMN `rental_id`'
"""
return "DROP COLUMN `%s`" % self.field
def __eq__(self, other):
if not isinstance(other, ColumnSchema):
return False
return ((self.field == other.field)
and (self.type == other.type)
and (self.null == other.null)
and (self.default == other.default)
and (self.extra == other.extra)
and (self.collation == other.collation))
def __ne__(self, other):
return not self.__eq__(other)
|
py | 1a410108cbda37abde53138ec6f61421fcce08fb | """
Copyright (c) 2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import pytest
from tests.pruning.helpers import BigPruningTestModel, get_basic_pruning_config, \
PruningTestModelConcat, PruningTestModelEltwise
from tests.test_helpers import load_exported_onnx_version
def find_value_by_name_in_list(obj_list, name):
for obj in obj_list:
if obj.name == name:
return obj
return None
def check_bias_and_weight_shape(node_name, onnx_model_proto, weight_shape, bias_shape):
node_weight = find_value_by_name_in_list(onnx_model_proto.graph.initializer, node_name + '.weight')
node_bias = find_value_by_name_in_list(onnx_model_proto.graph.initializer, node_name + '.bias')
assert node_weight.dims == weight_shape
assert node_bias.dims == bias_shape
def test_pruning_export_simple_model(tmp_path):
model = BigPruningTestModel()
nncf_config = get_basic_pruning_config(input_sample_size=[1, 1, 8, 8])
nncf_config['compression']['pruning_init'] = 0.5
nncf_config['compression']['algorithm'] = 'filter_pruning'
onnx_model_proto = load_exported_onnx_version(nncf_config, model,
path_to_storage_dir=tmp_path)
# Check that conv2 + BN were pruned by output filters
# WARNING: starting from at least torch 1.7.0, torch.onnx.export will fuses BN into previous
# convs if torch.onnx.export is done with `training=False`, so this test might fail.
check_bias_and_weight_shape('nncf_module.conv2', onnx_model_proto, [16, 16, 3, 3], [16])
check_bias_and_weight_shape('nncf_module.bn', onnx_model_proto, [16], [16])
# Check that up was pruned by input filters
check_bias_and_weight_shape('nncf_module.up', onnx_model_proto, [16, 32, 3, 3], [32])
# Check that conv3 was pruned by input filters
check_bias_and_weight_shape('nncf_module.conv3', onnx_model_proto, [1, 32, 5, 5], [1])
@pytest.mark.parametrize(('prune_first', 'prune_last', 'ref_shapes'),
[(False, True, [[[16, 1, 2, 2], [16]], [[16, 16, 2, 2], [16]], [[16, 16, 2, 2], [16]],
[[8, 32, 3, 3], [8]]]),
(True, True, [[[8, 1, 2, 2], [8]], [[16, 8, 2, 2], [16]], [[16, 8, 2, 2], [16]],
[[8, 32, 3, 3], [8]]]),
(False, False, [[[16, 1, 2, 2], [16]], [[16, 16, 2, 2], [16]], [[16, 16, 2, 2], [16]],
[[16, 32, 3, 3], [16]]]),
(True, False, [[[8, 1, 2, 2], [8]], [[16, 8, 2, 2], [16]], [[16, 8, 2, 2], [16]],
[[16, 32, 3, 3], [16]]]),
]
)
def test_pruning_export_concat_model(tmp_path, prune_first, prune_last, ref_shapes):
model = PruningTestModelConcat()
nncf_config = get_basic_pruning_config(input_sample_size=[1, 1, 8, 8])
nncf_config['compression']['algorithm'] = 'filter_pruning'
nncf_config['compression']['params']['prune_first_conv'] = prune_first
nncf_config['compression']['params']['prune_last_conv'] = prune_last
nncf_config['compression']['pruning_init'] = 0.5
onnx_model_proto = load_exported_onnx_version(nncf_config, model,
path_to_storage_dir=tmp_path)
for i in range(1, 5):
conv_name = "nncf_module.conv{}".format(i)
check_bias_and_weight_shape(conv_name, onnx_model_proto, *ref_shapes[i - 1])
@pytest.mark.parametrize(('prune_first', 'prune_last', 'ref_shapes'),
[(False, True, [[[16, 1, 2, 2], [16]], [[16, 16, 2, 2], [16]], [[16, 16, 2, 2], [16]],
[[8, 16, 3, 3], [8]]]),
(True, True, [[[8, 1, 2, 2], [8]], [[16, 8, 2, 2], [16]], [[16, 8, 2, 2], [16]],
[[8, 16, 3, 3], [8]]]),
(False, False, [[[16, 1, 2, 2], [16]], [[16, 16, 2, 2], [16]], [[16, 16, 2, 2], [16]],
[[16, 16, 3, 3], [16]]]),
(True, False, [[[8, 1, 2, 2], [8]], [[16, 8, 2, 2], [16]], [[16, 8, 2, 2], [16]],
[[16, 16, 3, 3], [16]]]),
]
)
def test_pruning_export_eltwise_model(tmp_path, prune_first, prune_last, ref_shapes):
model = PruningTestModelEltwise()
nncf_config = get_basic_pruning_config(input_sample_size=[1, 1, 8, 8])
nncf_config['compression']['algorithm'] = 'filter_pruning'
nncf_config['compression']['params']['prune_first_conv'] = prune_first
nncf_config['compression']['params']['prune_last_conv'] = prune_last
nncf_config['compression']['pruning_init'] = 0.5
onnx_model_proto = load_exported_onnx_version(nncf_config, model,
path_to_storage_dir=tmp_path)
for i in range(1, 5):
conv_name = "nncf_module.conv{}".format(i)
check_bias_and_weight_shape(conv_name, onnx_model_proto, *ref_shapes[i - 1])
|
py | 1a410235f7dab6c28c6304ea5e17ccbf722e23ad | # -*- coding: utf-8 -*-
'''Chemical Engineering Design Library (ChEDL). Utilities for process modeling.
Copyright (C) 2016, 2017, 2018, 2019, Caleb Bell <[email protected]>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.'''
from __future__ import division
from ht import *
from fluids.numerics import assert_close, assert_close1d, assert_close2d
import pytest
def test_conv_jacket():
# actual example
h = Lehrer(2.5, 0.6, 0.65, 0.6, 0.025, 995.7, 4178.1, 0.615, 798E-6, 355E-6, dT=20.)
assert_close(h, 2922.128124761829)
# no wall correction
h = Lehrer(2.5, 0.6, 0.65, 0.6, 0.025, 995.7, 4178.1, 0.615, 798E-6, dT=20.)
assert_close(h, 2608.8602693706853)
# with isobaric expansion, all cases
h = Lehrer(m=2.5, Dtank=0.6, Djacket=0.65, H=0.6, Dinlet=0.025, dT=20., rho=995.7, Cp=4178.1, k=0.615, mu=798E-6, muw=355E-6, inlettype='radial', isobaric_expansion=0.000303)
assert_close(h, 3269.4389632666557)
h = Lehrer(m=2.5, Dtank=0.6, Djacket=0.65, H=0.6, Dinlet=0.025, dT=20., rho=995.7, Cp=4178.1, k=0.615, mu=798E-6, muw=355E-6, inlettype='radial', inletlocation='top', isobaric_expansion=0.000303)
assert_close(h, 2566.1198726589996)
h = Lehrer(m=2.5, Dtank=0.6, Djacket=0.65, H=0.6, Dinlet=0.025, dT=-20., rho=995.7, Cp=4178.1, k=0.615, mu=798E-6, muw=355E-6, inlettype='radial', isobaric_expansion=0.000303)
assert_close(h, 3269.4389632666557)
h = Lehrer(m=2.5, Dtank=0.6, Djacket=0.65, H=0.6, Dinlet=0.025, dT=-20., rho=995.7, Cp=4178.1, k=0.615, mu=798E-6, muw=355E-6, inlettype='radial', inletlocation='bottom', isobaric_expansion=0.000303)
assert_close(h, 2566.1198726589996)
### Stein Schmidt
h = Stein_Schmidt(2.5, 0.6, 0.65, 0.6, 0.025, 995.7, 4178.1, 0.615, 798E-6, 355E-6, 971.8)
assert_close(h, 5695.204169808863)
h = Stein_Schmidt(2.5, 0.6, 0.65, 0.6, 0.025, 995.7, 4178.1, 0.615, 798E-6, 355E-6, 971.8, inlettype='radial')
assert_close(h, 1217.1449686341773)
h = Stein_Schmidt(2.5, 0.6, 0.65, 0.6, 0.025, 995.7, 4178.1, 0.615, 798E-6, 355E-6, 971.8, inletlocation='top')
assert_close(h, 5675.841635061595)
h = Stein_Schmidt(2.5, 0.6, 0.65, 0.6, 0.025, 995.7, 4178.1, 0.615, 798E-6, 355E-6, 971.8, inletlocation='bottom')
assert_close(h, 5695.2041698088633)
h = Stein_Schmidt(2.5, 0.6, 0.65, 0.6, 0.025, 971.8, 4178.1, 0.615, 798E-6, 355E-6, 995.7, inletlocation='bottom')
assert_close(h, 5694.9722658952096)
h = Stein_Schmidt(2.5, 0.6, 0.65, 0.6, 0.025, 971.8, 4178.1, 0.615, 798E-6, 355E-6, 995.7, inletlocation='top')
assert_close(h, 5676.0744960391157)
h = Stein_Schmidt(2.5, 0.6, 0.65, 0.6, 0.025, 971.8, 4178.1, 0.615, 798E-6, 355E-6)
assert_close(h, 5685.532991556428)
h = Stein_Schmidt(.1, 0.6, 0.65, 0.6, 0.025, 971.8, 4178.1, 0.615, 798E-6)
assert_close(h, 151.78819106776797) |
py | 1a41041f212897bbc357533e83f340b9f0827848 | #-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
#--------------------------------------------------------------------------
from typing import Dict
from logging import getLogger
from onnx import helper
from .onnx_model import OnnxModel
from .fusion_base import Fusion
from .fusion_utils import FusionUtils
logger = getLogger(__name__)
class FusionEmbedLayerNoMask(Fusion):
"""
Embed Layer Normalization will fuse embeddings and mask processing into one node.
The embeddings before conversion:
(input_ids) --------> Gather ----------+ (segment_ids)
| | |
| v v
+--> Shape --> Expand -> Gather---->Add Gather
| ^ | |
| | v v
+---(optional graph) SkipLayerNormalization
Optional graph is used to generate position list (0, 1, ...) per batch. It can be a constant in some model.
(input_ids) --> Gather -----+ Slice
| |
v v
(segment_ids)--> Gather --->Add Reshape
| |
v v
SkipLayerNormalization
"""
def __init__(self, model: OnnxModel, description='no mask'):
super().__init__(model, "EmbedLayerNormalization", "SkipLayerNormalization", description)
self.utils = FusionUtils(model)
self.attention = None
def match_segment_path(self, normalize_node, input_name_to_nodes, output_name_to_node, input_ids_cast_node):
segment_ids = None
segment_embedding_gather = None
segment_embedding_path = self.model.match_parent_path(normalize_node, ['Gather'], [1])
if segment_embedding_path is None:
segment_embedding_path = self.model.match_parent_path(normalize_node, ['Add', 'Gather'], [0, 1])
if segment_embedding_path is None:
logger.info("Segment embedding is not found. Embed layer cannot be fused.")
return
_, segment_embedding_gather = segment_embedding_path
else:
segment_embedding_gather = segment_embedding_path[0]
segment_ids = segment_embedding_gather.input[1]
self.nodes_to_remove.extend(segment_embedding_path)
if self.model.find_graph_input(segment_ids):
casted, segment_ids = self.utils.cast_graph_input_to_int32(segment_ids)
else:
segment_ids, segment_ids_cast_node = self.utils.cast_input_to_int32(segment_ids)
# Cast might be removed by OnnxRuntime.
_, segment_id_path, _ = self.model.match_parent_paths(
segment_ids_cast_node,
[(['ConstantOfShape', 'Concat', 'Unsqueeze', 'Gather', 'Shape', 'Cast'], [0, 0, 1, 0, 0, 0]),
(['ConstantOfShape', 'Concat', 'Unsqueeze', 'Gather', 'Shape'], [0, 0, 1, 0, 0])], output_name_to_node)
if segment_id_path and input_ids_cast_node and input_ids_cast_node.input[0] == segment_id_path[-1].input[0]:
logger.debug("Simplify semgent id path...")
self.model.add_node(
helper.make_node('Shape', inputs=[input_ids_cast_node.input[0]], outputs=["input_shape"]))
self.model.add_node(
helper.make_node('ConstantOfShape',
inputs=["input_shape"],
outputs=["zeros_for_input_shape"],
value=helper.make_tensor("value", onnx.TensorProto.INT32, [1], [1])))
segment_ids = "zeros_for_input_shape"
return segment_ids, segment_embedding_gather
def fuse(self, node, input_name_to_nodes, output_name_to_node):
is_distill = False
if self.model.match_parent_path(node, ['Add', 'Gather'], [0, 0]) is None and self.model.match_parent_path(
node, ['Gather'], [0]) is None:
logger.debug(
"Failed to match path SkipLayerNormalization[0] <-- Add <-- Gather or SkipLayerNormalization[0] <-- Gather"
)
return
self.attention = self.model.find_first_child_by_type(node, 'Attention', input_name_to_nodes, recursive=False)
if self.attention is None:
# In case user disables attention fusion, check whether subgraph looks like Attention.
if node.output[0] not in input_name_to_nodes:
return
children = input_name_to_nodes[node.output[0]]
children_types = sorted([child.op_type for child in children])
if children_types != ['MatMul', 'MatMul', 'MatMul', 'SkipLayerNormalization'] and children_types != [
'MatMul', 'MatMul', 'MatMul', 'Shape', 'Shape', 'SkipLayerNormalization'
]:
logger.debug("No Attention like subgraph in children of SkipLayerNormalization")
return
# Assume the order of embeddings are word_embedding + position_embedding + segment_embedding
normalize_node = node
add_node = None
word_embedding_path = self.model.match_parent_path(normalize_node, ['Add', 'Gather'], [0, 0])
if word_embedding_path is not None:
add_node, word_embedding_gather = word_embedding_path
else:
word_embedding_path = self.model.match_parent_path(normalize_node, ['Gather'], [0])
if word_embedding_path is not None:
word_embedding_gather = word_embedding_path[0]
is_distill = True
from packaging.version import Version
import onnxruntime
if Version(onnxruntime.__version__) <= Version("1.4.0"):
logger.warning(
'Please install onnxruntime with version > 1.4.0 for embedlayer fusion support for distilbert')
return
else:
logger.info("Word embedding path is not found. Embed layer cannot be fused.")
return
input_ids = word_embedding_gather.input[1]
position_embedding_expand = None
position_embedding_shape = None
position_embedding_path = self.model.match_parent_path(normalize_node, ['Gather', 'Expand'],
[1, 1]) # for distill-bert
if position_embedding_path is not None:
position_embedding_weight_node, position_embedding_expand = position_embedding_path
else:
position_embedding_path = self.model.match_parent_path(normalize_node, ['Reshape', 'Slice'], [1, 0])
if position_embedding_path is not None:
_, position_embedding_weight_node = position_embedding_path
else:
position_embedding_path = self.model.match_parent_path(add_node, ['Gather', 'Expand', 'Shape'],
[1, 1, 1])
if position_embedding_path is not None:
position_embedding_weight_node, position_embedding_expand, position_embedding_shape = position_embedding_path
else:
position_embedding_path = self.model.match_parent_path(
add_node, ['Gather', 'Expand', 'Concat', 'Unsqueeze', 'Gather', 'Shape'], [1, 1, 1, 1, 0, 0])
if position_embedding_path is not None:
position_embedding_weight_node, position_embedding_expand, _, _, _, position_embedding_shape = position_embedding_path
else:
# Here we will not try to get exact match. Instead, we only try identify position embedding weights.
position_embedding_path = self.model.match_parent_path(add_node, ['Gather', 'Expand'], [1, 1])
if position_embedding_path is not None:
position_embedding_weight_node, position_embedding_expand = position_embedding_path
else:
logger.info("Position embedding path is not found. Embed layer cannot be fused.")
return
if position_embedding_shape is not None and position_embedding_shape.input[0] != input_ids:
logger.info("position and word embedding is expected to be applied on same input")
return
if position_embedding_expand and position_embedding_shape:
input_parent = self.model.get_parent(position_embedding_shape, 0, output_name_to_node)
subgraph_nodes = self.model.get_parent_subgraph_nodes(position_embedding_expand,
[input_parent] if input_parent else [],
output_name_to_node)
self.nodes_to_remove.extend(subgraph_nodes)
self.nodes_to_remove.extend(word_embedding_path)
self.nodes_to_remove.extend(position_embedding_path)
self.nodes_to_remove.extend([normalize_node])
# Cast input_ids and segment_ids to int32.
input_ids_cast_node = None
if self.model.find_graph_input(input_ids):
casted, input_ids = self.utils.cast_graph_input_to_int32(input_ids)
else:
input_ids, input_ids_cast_node = self.utils.cast_input_to_int32(input_ids)
node_name = self.model.create_node_name('EmbedLayerNormalization')
output_name = node_name + "_output"
embed_node_inputs = None
if is_distill == False:
segment_path = self.match_segment_path(normalize_node, input_name_to_nodes, output_name_to_node,
input_ids_cast_node)
if segment_path is None:
return
else:
segment_ids, segment_embedding_gather = segment_path
embed_node_inputs = [
input_ids,
segment_ids,
word_embedding_gather.input[0],
position_embedding_weight_node.input[0],
segment_embedding_gather.input[0],
normalize_node.input[2],
normalize_node.input[3] # gamma and beta
]
else:
embed_node_inputs = [
input_ids,
'',
word_embedding_gather.input[0],
position_embedding_weight_node.input[0],
'',
normalize_node.input[2],
normalize_node.input[3] # gamma and beta
]
embed_node = helper.make_node('EmbedLayerNormalization',
embed_node_inputs,
outputs=[node_name + "_output", node_name + "_dummy_mask_index"],
name=node_name)
embed_node.domain = "com.microsoft"
# Pass attribute "epsilon" from normalize node to EmbedLayerNormalization.
for att in normalize_node.attribute:
if att.name == 'epsilon':
embed_node.attribute.extend([att])
# Set default value to 1e-12 if no attribute is found.
# OnnxRuntime 1.2.0 or older has no epsilon attribute. The optimized model can only work for 1.3.0 or later.
if len(embed_node.attribute) == 0:
embed_node.attribute.extend([helper.make_attribute("epsilon", 1.0E-12)])
self.model.replace_input_of_all_nodes(normalize_node.output[0], output_name)
self.nodes_to_add.append(embed_node)
class FusionEmbedLayerNormalization(FusionEmbedLayerNoMask):
def __init__(self, model: OnnxModel):
super().__init__(model, "with mask")
def fuse(self, node, input_name_to_nodes, output_name_to_node):
old_count = len(self.nodes_to_add)
super().fuse(node, input_name_to_nodes, output_name_to_node)
if len(self.nodes_to_add) == old_count:
return
if self.attention is not None:
mask_index = self.attention.input[3]
if mask_index in output_name_to_node:
node = output_name_to_node[mask_index]
if node.op_type == "ReduceSum":
embed_node = self.nodes_to_add.pop()
mask_input_name = node.input[0]
self.nodes_to_remove.extend([node])
embed_node.input.append(mask_input_name)
embed_node.output[1] = mask_index
self.nodes_to_add.append(embed_node)
self.prune_graph = True
|
py | 1a4104496d6ceed8764df21764e1813632bcdde4 | #!/usr/bin/python
import pickle
import numpy
import _pickle as cPickle
from sklearn.model_selection import cross_validate
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_selection import SelectPercentile, f_classif
from sklearn.model_selection import train_test_split
def preprocess(words_file = "../tools/word_data.pkl", authors_file="../tools/email_authors.pkl"):
"""
this function takes a pre-made list of email texts (by default word_data.pkl)
and the corresponding authors (by default email_authors.pkl) and performs
a number of preprocessing steps:
-- splits into training/testing sets (10% testing)
-- vectorizes into tfidf matrix
-- selects/keeps most helpful features
after this, the feaures and labels are put into numpy arrays, which play nice with sklearn functions
4 objects are returned:
-- training/testing features
-- training/testing labels
"""
### the words (features) and authors (labels), already largely preprocessed
### this preprocessing will be repeated in the text learning mini-project
authors_file_handler = open(authors_file, "rb")
authors = pickle.load(authors_file_handler)
authors_file_handler.close()
original = words_file
destination = "word_data_unix.pkl"
content = ''
outsize = 0
with open(original, 'rb') as infile:
content = infile.read()
with open(destination, 'wb') as output:
for line in content.splitlines():
outsize = outsize + len(line) + 1
output.write(line + str.encode('\n'))
words_file_handler = open(destination, "rb")
word_data = cPickle.load(words_file_handler)
words_file_handler.close()
### test_size is the percentage of events assigned to the test set
### (remainder go into training)
features_train, features_test, labels_train, labels_test = train_test_split(word_data, authors, test_size=0.1, random_state=42)
### text vectorization--go from strings to lists of numbers
vectorizer = TfidfVectorizer(sublinear_tf=True, max_df=0.5,
stop_words='english')
features_train_transformed = vectorizer.fit_transform(features_train)
features_test_transformed = vectorizer.transform(features_test)
### feature selection, because text is super high dimensional and
### can be really computationally chewy as a result
# percentile can be changed from 10 to 1, for example.
selector = SelectPercentile(f_classif, percentile=10)
selector.fit(features_train_transformed, labels_train)
features_train_transformed = selector.transform(features_train_transformed).toarray()
features_test_transformed = selector.transform(features_test_transformed).toarray()
### info on the data
print ("no. of Chris training emails:", sum(labels_train))
print ("no. of Sara training emails:", len(labels_train)-sum(labels_train))
return features_train_transformed, features_test_transformed, labels_train, labels_test
|
py | 1a41044ea9ee5ee72d71093b431384267d8b5530 | import django
from gui.lnd_deps import router_pb2 as lnr
from gui.lnd_deps import router_pb2_grpc as lnrouter
from gui.lnd_deps.lnd_connect import lnd_connect
from lndg import settings
from os import environ
from time import sleep
environ['DJANGO_SETTINGS_MODULE'] = 'lndg.settings'
django.setup()
from gui.models import Channels, FailedHTLCs
def main():
try:
connection = lnd_connect(settings.LND_DIR_PATH, settings.LND_NETWORK, settings.LND_RPC_SERVER)
routerstub = lnrouter.RouterStub(connection)
for response in routerstub.SubscribeHtlcEvents(lnr.SubscribeHtlcEventsRequest()):
if response.event_type == 3 and str(response.link_fail_event) != '':
in_chan_id = response.incoming_channel_id
out_chan_id = response.outgoing_channel_id
in_chan = Channels.objects.filter(chan_id=in_chan_id)[0] if Channels.objects.filter(chan_id=in_chan_id).exists() else None
out_chan = Channels.objects.filter(chan_id=out_chan_id)[0] if Channels.objects.filter(chan_id=out_chan_id).exists() else None
in_chan_alias = in_chan.alias if in_chan is not None else None
out_chan_alias = out_chan.alias if out_chan is not None else None
out_chan_liq = out_chan.local_balance if out_chan is not None else None
out_chan_pending = out_chan.pending_outbound if out_chan is not None else None
amount = int(response.link_fail_event.info.outgoing_amt_msat/1000)
wire_failure = response.link_fail_event.wire_failure
failure_detail = response.link_fail_event.failure_detail
missed_fee = 0 if out_chan == None else round(((amount/1000000) * out_chan.local_fee_rate) + (out_chan.local_base_fee/1000), 3)
FailedHTLCs(amount=amount, chan_id_in=in_chan_id, chan_id_out=out_chan_id, chan_in_alias=in_chan_alias, chan_out_alias=out_chan_alias, chan_out_liq=out_chan_liq, chan_out_pending=out_chan_pending, wire_failure=wire_failure, failure_detail=failure_detail, missed_fee=missed_fee).save()
except Exception as e:
print('Error while running failed HTLC stream: ' + str(e))
sleep(20)
if __name__ == '__main__':
main() |
py | 1a4104964d485e7d1d1c7325bb968bce16bddb9a | """
Copyright (c) 2017 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from __future__ import print_function
import struct
import traceback
import os
from binaryninja.architecture import Architecture
from binaryninja.lowlevelil import LowLevelILLabel, LLIL_TEMP
from binaryninja.function import RegisterInfo, InstructionInfo, InstructionTextToken
from binaryninja.binaryview import BinaryView
from binaryninja.plugin import PluginCommand
from binaryninja.interaction import AddressField, ChoiceField, get_form_input
from binaryninja.types import Symbol
from binaryninja.log import log_error
from binaryninja.enums import (Endianness, BranchType, InstructionTextTokenType,
LowLevelILOperation, LowLevelILFlagCondition, FlagRole, SegmentFlag,
ImplicitRegisterExtend, SymbolType)
# Shift syles
SHIFT_SYLE_ARITHMETIC = 0,
SHIFT_SYLE_LOGICAL = 1,
SHIFT_SYLE_ROTATE_WITH_EXTEND = 2,
SHIFT_SYLE_ROTATE = 3,
ShiftStyle = [
'as', # SHIFT_SYLE_ARITHMETIC
'ls', # SHIFT_SYLE_LOGICAL
'rox', # SHIFT_SYLE_ROTATE_WITH_EXTEND
'ro' # SHIFT_SYLE_ROTATE
]
# Condition codes
CONDITION_TRUE = 0
CONDITION_FALSE = 1
CONDITION_HIGH = 2
CONDITION_LESS_OR_SAME = 3
CONDITION_CARRY_CLEAR = 4
CONDITION_CARRY_SET = 5
CONDITION_NOT_EQUAL = 6
CONDITION_EQUAL = 7
CONDITION_OVERFLOW_CLEAR = 8
CONDITION_OVERFLOW_SET = 9
CONDITION_PLUS = 10
CONDITION_MINUS = 11
CONDITION_GREATER_OR_EQUAL = 12
CONDITION_LESS_THAN = 13
CONDITION_GREATER_THAN = 14
CONDITION_LESS_OR_EQUAL = 15
Condition = [
't', # CONDITION_TRUE
'f', # CONDITION_FALSE
'hi', # CONDITION_HIGH
'ls', # CONDITION_LESS_OR_SAME
'cc', # CONDITION_CARRY_CLEAR
'cs', # CONDITION_CARRY_SET
'ne', # CONDITION_NOT_EQUAL
'eq', # CONDITION_EQUAL
'vc', # CONDITION_OVERFLOW_CLEAR
'vs', # CONDITION_OVERFLOW_SET
'pl', # CONDITION_PLUS
'mi', # CONDITION_MINUS
'ge', # CONDITION_GREATER_OR_EQUAL
'lt', # CONDITION_LESS_THAN
'gt', # CONDITION_GREATER_THAN
'le' # CONDITION_LESS_OR_EQUAL
]
# Registers
REGISTER_D0 = 0
REGISTER_D1 = 1
REGISTER_D2 = 2
REGISTER_D3 = 3
REGISTER_D4 = 4
REGISTER_D5 = 5
REGISTER_D6 = 6
REGISTER_D7 = 7
REGISTER_A0 = 8
REGISTER_A1 = 9
REGISTER_A2 = 10
REGISTER_A3 = 11
REGISTER_A4 = 12
REGISTER_A5 = 13
REGISTER_A6 = 14
REGISTER_A7 = 15
Registers = [
'd0', # REGISTER_D0
'd1', # REGISTER_D1
'd2', # REGISTER_D2
'd3', # REGISTER_D3
'd4', # REGISTER_D4
'd5', # REGISTER_D5
'd6', # REGISTER_D6
'd7', # REGISTER_D7
'a0', # REGISTER_A0
'a1', # REGISTER_A1
'a2', # REGISTER_A2
'a3', # REGISTER_A3
'a4', # REGISTER_A4
'a5', # REGISTER_A5
'a6', # REGISTER_A6
'sp' # REGISTER_A7
]
# Sizes
SIZE_BYTE = 0
SIZE_WORD = 1
SIZE_LONG = 2
SizeSuffix = [
'.b', # SIZE_BYTE
'.w', # SIZE_WORD
'.l', # SIZE_LONG
]
# Operands
class OpRegisterDirect:
def __init__(self, size, reg):
self.size = size
self.reg = reg
def __repr__(self):
return "OpRegisterDirect(%d, %s)" % (self.size, self.reg)
def format(self, addr):
# a0, d0
return [
InstructionTextToken(InstructionTextTokenType.RegisterToken, self.reg)
]
def get_pre_il(self, il):
return None
def get_post_il(self, il):
return None
def get_address_il(self, il):
return None
def get_source_il(self, il):
if self.reg == 'ccr':
c = il.flag_bit(1, 'c', 0)
v = il.flag_bit(1, 'v', 1)
z = il.flag_bit(1, 'z', 2)
n = il.flag_bit(1, 'n', 3)
x = il.flag_bit(1, 'x', 4)
return il.or_expr(1, il.or_expr(1, il.or_expr(1, il.or_expr(1, c, v), z), n), x)
else:
return il.reg(1 << self.size, self.reg)
def get_dest_il(self, il, value, flags=0):
if self.reg == 'ccr':
return None
else:
# return il.set_reg(1 << self.size, self.reg, value)
# if self.size == SIZE_BYTE:
# if self.reg[0] == 'a' or self.reg == 'sp':
# return None
# else:
# return il.set_reg(1, self.reg+'.b', value, flags)
# elif self.size == SIZE_WORD:
# return il.set_reg(2, self.reg+'.w', value, flags)
# else:
# return il.set_reg(4, self.reg, value, flags)
if self.size == SIZE_BYTE:
if self.reg[0] == 'a' or self.reg == 'sp':
return None
else:
return il.set_reg(4, self.reg, il.or_expr(4, il.and_expr(4, il.const(4, 0xffffff00), il.reg(4, self.reg)), il.and_expr(4, il.const(4, 0xff), value)), flags)
elif self.size == SIZE_WORD:
if self.reg[0] == 'a' or self.reg == 'sp':
return il.set_reg(4, self.reg, il.sign_extend(4, value), flags)
else:
return il.set_reg(4, self.reg, il.or_expr(4, il.and_expr(4, il.const(4, 0xffff0000), il.reg(4, self.reg)), il.and_expr(4, il.const(4, 0xffff), value)), flags)
else:
return il.set_reg(4, self.reg, value, flags)
class OpRegisterDirectPair:
def __init__(self, size, reg1, reg2):
self.size = size
self.reg1 = reg1
self.reg2 = reg2
def __repr__(self):
return "OpRegisterDirectPair(%d, %s, %s)" % (self.size, self.reg1, self.reg2)
def format(self, addr):
# d0:d1
return [
InstructionTextToken(InstructionTextTokenType.RegisterToken, self.reg1),
InstructionTextToken(InstructionTextTokenType.OperandSeparatorToken, ":"),
InstructionTextToken(InstructionTextTokenType.RegisterToken, self.reg2)
]
def get_pre_il(self, il):
return None
def get_post_il(self, il):
return None
def get_address_il(self, il):
return None
def get_source_il(self, il):
return (il.reg(1 << self.size, self.reg1), il.reg(1 << self.size, self.reg2))
def get_dest_il(self, il, values, flags=0):
return (il.set_reg(1 << self.size, self.reg1, values[0], flags), il.set_reg(1 << self.size, self.reg2, values[1], flags))
class OpRegisterMovemList:
def __init__(self, size, regs):
self.size = size
self.regs = regs
def __repr__(self):
return "OpRegisterMovemList(%d, %s)" % (self.size, repr(self.regs))
def format(self, addr):
# d0-d7/a0/a2/a4-a7
if len(self.regs) == 0:
return []
tokens = [InstructionTextToken(InstructionTextTokenType.RegisterToken, self.regs[0])]
last = self.regs[0]
first = None
for reg in self.regs[1:]:
if Registers[Registers.index(last)+1] == reg and reg != 'a0':
if first is None:
first = last
last = reg
else:
if first is not None:
tokens.append(InstructionTextToken(InstructionTextTokenType.OperandSeparatorToken, "-"))
tokens.append(InstructionTextToken(InstructionTextTokenType.RegisterToken, last))
tokens.append(InstructionTextToken(InstructionTextTokenType.OperandSeparatorToken, "/"))
tokens.append(InstructionTextToken(InstructionTextTokenType.RegisterToken, reg))
first = None
last = reg
if first is not None:
tokens.append(InstructionTextToken(InstructionTextTokenType.OperandSeparatorToken, "-"))
tokens.append(InstructionTextToken(InstructionTextTokenType.RegisterToken, last))
return tokens
def get_pre_il(self, il):
return None
def get_post_il(self, il):
return None
def get_address_il(self, il):
return None
def get_source_il(self, il):
return [il.reg(1 << self.size, reg) for reg in self.regs]
def get_dest_il(self, il, values, flags=0):
return [il.set_reg(1 << self.size, reg, val, flags) for reg, val in zip(self.regs, values)]
class OpRegisterIndirect:
def __init__(self, size, reg):
self.size = size
self.reg = reg
def __repr__(self):
return "OpRegisterIndirect(%d, %s)" % (self.size, self.reg)
def format(self, addr):
# (a0)
return [
InstructionTextToken(InstructionTextTokenType.BeginMemoryOperandToken, "("),
InstructionTextToken(InstructionTextTokenType.RegisterToken, self.reg),
InstructionTextToken(InstructionTextTokenType.EndMemoryOperandToken, ")")
]
def get_pre_il(self, il):
return None
def get_post_il(self, il):
return None
def get_address_il(self, il):
return il.reg(4, self.reg)
def get_source_il(self, il):
return il.load(1 << self.size, self.get_address_il(il))
def get_dest_il(self, il, value, flags=0):
#return il.store(1 << self.size, self.get_address_il(il), value, flags)
return il.expr(LowLevelILOperation.LLIL_STORE, self.get_address_il(il).index, value.index, size=1 << self.size, flags=flags)
class OpRegisterIndirectPair:
def __init__(self, size, reg1, reg2):
self.size = size
self.reg1 = reg1
self.reg2 = reg2
def __repr__(self):
return "OpRegisterIndirectPair(%d, %s, %s)" % (self.size, self.reg1, self.reg2)
def format(self, addr):
# d0:d1
return [
InstructionTextToken(InstructionTextTokenType.BeginMemoryOperandToken, "("),
InstructionTextToken(InstructionTextTokenType.RegisterToken, self.reg1),
InstructionTextToken(InstructionTextTokenType.EndMemoryOperandToken, ")"),
InstructionTextToken(InstructionTextTokenType.OperandSeparatorToken, ":"),
InstructionTextToken(InstructionTextTokenType.BeginMemoryOperandToken, "("),
InstructionTextToken(InstructionTextTokenType.RegisterToken, self.reg2),
InstructionTextToken(InstructionTextTokenType.EndMemoryOperandToken, ")")
]
def get_pre_il(self, il):
return None
def get_post_il(self, il):
return None
def get_address_il(self, il):
return (il.reg(4, self.reg1), il.reg(4, self.reg2))
def get_source_il(self, il):
return (il.load(1 << self.size, il.reg(4, self.reg1)), il.load(1 << self.size, il.reg(4, self.reg2)))
def get_dest_il(self, il, values, flags=0):
#return (il.store(1 << self.size, il.reg(4, self.reg1), values[0], flags), il.store(1 << self.size, il.reg(4, self.reg2), values[1], flags))
return (il.store(1 << self.size, il.reg(4, self.reg1), values[0]), il.store(1 << self.size, il.reg(4, self.reg2), values[1]))
class OpRegisterIndirectPostincrement:
def __init__(self, size, reg):
self.size = size
self.reg = reg
def __repr__(self):
return "OpRegisterIndirectPostincrement(%d, %s)" % (self.size, self.reg)
def format(self, addr):
# (a0)+
return [
InstructionTextToken(InstructionTextTokenType.BeginMemoryOperandToken, "("),
InstructionTextToken(InstructionTextTokenType.RegisterToken, self.reg),
InstructionTextToken(InstructionTextTokenType.EndMemoryOperandToken, ")"),
InstructionTextToken(InstructionTextTokenType.TextToken, "+")
]
def get_pre_il(self, il):
return None
def get_post_il(self, il):
return il.set_reg(4,
self.reg,
il.add(4,
il.reg(4, self.reg),
il.const(4, 1 << self.size)
)
)
def get_address_il(self, il):
return il.reg(4, self.reg)
def get_source_il(self, il):
return il.load(1 << self.size, self.get_address_il(il))
def get_dest_il(self, il, value, flags=0):
#return il.store(1 << self.size, self.get_address_il(il), value, flags)
return il.expr(LowLevelILOperation.LLIL_STORE, self.get_address_il(il).index, value.index, size=1 << self.size, flags=flags)
class OpRegisterIndirectPredecrement:
def __init__(self, size, reg):
self.size = size
self.reg = reg
def __repr__(self):
return "OpRegisterIndirectPredecrement(%d, %s)" % (self.size, self.reg)
def format(self, addr):
# -(a0)
return [
InstructionTextToken(InstructionTextTokenType.TextToken, "-"),
InstructionTextToken(InstructionTextTokenType.BeginMemoryOperandToken, "("),
InstructionTextToken(InstructionTextTokenType.RegisterToken, self.reg),
InstructionTextToken(InstructionTextTokenType.EndMemoryOperandToken, ")")
]
def get_pre_il(self, il):
return il.set_reg(4,
self.reg,
il.sub(4,
il.reg(4, self.reg),
il.const(4, 1 << self.size)
)
)
def get_post_il(self, il):
return None
def get_address_il(self, il):
return il.reg(4, self.reg)
def get_source_il(self, il):
return il.load(1 << self.size, self.get_address_il(il))
def get_dest_il(self, il, value, flags=0):
#return il.store(1 << self.size, self.get_address_il(il), value, flags)
return il.expr(LowLevelILOperation.LLIL_STORE, self.get_address_il(il).index, value.index, size=1 << self.size, flags=flags)
class OpRegisterIndirectDisplacement:
def __init__(self, size, reg, offset):
self.size = size
self.reg = reg
self.offset = offset
def __repr__(self):
return "OpRegisterIndirectDisplacement(%d, %s, 0x%x)" % (self.size, self.reg, self.offset)
def format(self, addr):
if self.reg == 'pc':
return [
InstructionTextToken(InstructionTextTokenType.BeginMemoryOperandToken, "("),
InstructionTextToken(InstructionTextTokenType.PossibleAddressToken, "${:08x}".format(addr+2+self.offset), addr+2+self.offset, 4),
InstructionTextToken(InstructionTextTokenType.EndMemoryOperandToken, ")")
]
else:
# $1234(a0)
return [
InstructionTextToken(InstructionTextTokenType.IntegerToken, "${:04x}".format(self.offset), self.offset, 2),
InstructionTextToken(InstructionTextTokenType.BeginMemoryOperandToken, "("),
InstructionTextToken(InstructionTextTokenType.RegisterToken, self.reg),
InstructionTextToken(InstructionTextTokenType.EndMemoryOperandToken, ")")
]
def get_pre_il(self, il):
return None
def get_post_il(self, il):
return None
def get_address_il(self, il):
if self.reg == 'pc':
return il.const(4, il.current_address+2+self.offset)
else:
return il.add(4,
il.reg(4, self.reg),
il.const(2, self.offset)
)
def get_source_il(self, il):
return il.load(1 << self.size, self.get_address_il(il))
def get_dest_il(self, il, value, flags=0):
if self.reg == 'pc':
return None
else:
#return il.store(1 << self.size, self.get_address_il(il), value, flags)
return il.expr(LowLevelILOperation.LLIL_STORE, self.get_address_il(il).index, value.index, size=1 << self.size, flags=flags)
class OpRegisterIndirectIndex:
def __init__(self, size, reg, offset, ireg, ireg_long, scale):
self.size = size
self.reg = reg
self.offset = offset
self.ireg = ireg
self.ireg_long = ireg_long
self.scale = scale
def __repr__(self):
return "OpRegisterIndirectIndex(%d, %s, 0x%x, %s, %d, %d)" % (self.size, self.reg, self.offset, self.ireg, self.ireg_long, self.scale)
def format(self, addr):
# $1234(a0,a1.l*4)
tokens = []
if self.offset != 0:
tokens.append(InstructionTextToken(InstructionTextTokenType.IntegerToken, "${:x}".format(self.offset), self.offset))
tokens.append(InstructionTextToken(InstructionTextTokenType.BeginMemoryOperandToken, "("))
tokens.append(InstructionTextToken(InstructionTextTokenType.RegisterToken, self.reg))
tokens.append(InstructionTextToken(InstructionTextTokenType.OperandSeparatorToken, ","))
tokens.append(InstructionTextToken(InstructionTextTokenType.RegisterToken, self.ireg))
tokens.append(InstructionTextToken(InstructionTextTokenType.TextToken, "."))
tokens.append(InstructionTextToken(InstructionTextTokenType.TextToken, "l" if self.ireg_long else 'w'))
if self.scale != 1:
tokens.append(InstructionTextToken(InstructionTextTokenType.OperandSeparatorToken, "*"))
tokens.append(InstructionTextToken(InstructionTextTokenType.IntegerToken, "{}".format(self.scale), self.scale))
tokens.append(InstructionTextToken(InstructionTextTokenType.EndMemoryOperandToken, ")"))
return tokens
def get_pre_il(self, il):
return None
def get_post_il(self, il):
return None
def get_address_il(self, il):
return il.add(4,
il.add(4,
il.const(4, il.current_address+2) if self.reg == 'pc' else il.reg(4, self.reg),
il.const(4, self.offset)
),
il.mult(4,
il.reg(4 if self.ireg_long else 2, self.ireg),
il.const(1, self.scale)
)
)
def get_source_il(self, il):
return il.load(1 << self.size, self.get_address_il(il))
def get_dest_il(self, il, value, flags=0):
if self.reg == 'pc':
return None
else:
#return il.store(1 << self.size, self.get_address_il(il), value, flags)
return il.expr(LowLevelILOperation.LLIL_STORE, self.get_address_il(il).index, value.index, size=1 << self.size, flags=flags)
class OpMemoryIndirect:
def __init__(self, size, reg, offset, outer_displacement):
self.size = size
self.reg = reg
self.offset = offset
self.outer_displacement = outer_displacement
def __repr__(self):
return "OpRegisterIndirectIndex(%d, %s, %d, %d)" % (self.size, self.reg, self.offset, self.outer_displacement)
def format(self, addr):
# ([$1234,a0],$1234)
tokens = []
tokens.append(InstructionTextToken(InstructionTextTokenType.BeginMemoryOperandToken, "("))
tokens.append(InstructionTextToken(InstructionTextTokenType.BeginMemoryOperandToken, "["))
if self.offset != 0:
tokens.append(InstructionTextToken(InstructionTextTokenType.IntegerToken, "${:x}".format(self.offset), self.offset))
tokens.append(InstructionTextToken(InstructionTextTokenType.OperandSeparatorToken, ","))
tokens.append(InstructionTextToken(InstructionTextTokenType.RegisterToken, self.reg))
tokens.append(InstructionTextToken(InstructionTextTokenType.EndMemoryOperandToken, "]"))
if self.outer_displacement != 0:
tokens.append(InstructionTextToken(InstructionTextTokenType.OperandSeparatorToken, ","))
tokens.append(InstructionTextToken(InstructionTextTokenType.IntegerToken, "${:x}".format(self.outer_displacement), self.outer_displacement))
tokens.append(InstructionTextToken(InstructionTextTokenType.EndMemoryOperandToken, ")"))
return tokens
def get_pre_il(self, il):
return None
def get_post_il(self, il):
return None
def get_address_il(self, il):
return il.add(4,
il.load(4,
il.add(4,
il.const(4, il.current_address+2) if self.reg == 'pc' else il.reg(4, self.reg),
il.const(4, self.offset)
),
),
il.const(4, self.outer_displacement)
)
def get_source_il(self, il):
return il.load(1 << self.size, self.get_address_il(il))
def get_dest_il(self, il, value, flags=0):
if self.reg == 'pc':
return None
else:
#return il.store(1 << self.size, self.get_address_il(il), value, flags)
return il.expr(LowLevelILOperation.LLIL_STORE, self.get_address_il(il).index, value.index, size=1 << self.size, flags=flags)
class OpMemoryIndirectPostindex:
def __init__(self, size, reg, offset, ireg, ireg_long, scale, outer_displacement):
self.size = size
self.reg = reg
self.offset = offset
self.ireg = ireg
self.ireg_long = ireg_long
self.scale = scale
self.outer_displacement = outer_displacement
def __repr__(self):
return "OpRegisterIndirectIndex(%d, %s, 0x%x, %s, %d, %d, 0x%x)" % (self.size, self.reg, self.offset, self.ireg, self.ireg_long, self.scale, self.outer_displacement)
def format(self, addr):
# ([$1234,a0],a1.l*4,$1234)
tokens = []
tokens.append(InstructionTextToken(InstructionTextTokenType.BeginMemoryOperandToken, "("))
tokens.append(InstructionTextToken(InstructionTextTokenType.BeginMemoryOperandToken, "["))
if self.offset != 0:
tokens.append(InstructionTextToken(InstructionTextTokenType.IntegerToken, "${:x}".format(self.offset), self.offset))
tokens.append(InstructionTextToken(InstructionTextTokenType.OperandSeparatorToken, ","))
tokens.append(InstructionTextToken(InstructionTextTokenType.RegisterToken, self.reg))
tokens.append(InstructionTextToken(InstructionTextTokenType.EndMemoryOperandToken, "]"))
tokens.append(InstructionTextToken(InstructionTextTokenType.OperandSeparatorToken, ","))
tokens.append(InstructionTextToken(InstructionTextTokenType.RegisterToken, self.ireg))
tokens.append(InstructionTextToken(InstructionTextTokenType.TextToken, "."))
tokens.append(InstructionTextToken(InstructionTextTokenType.TextToken, "l" if self.ireg_long else 'w'))
if self.scale != 1:
tokens.append(InstructionTextToken(InstructionTextTokenType.OperandSeparatorToken, "*"))
tokens.append(InstructionTextToken(InstructionTextTokenType.IntegerToken, "{}".format(self.scale), self.scale))
if self.outer_displacement != 0:
tokens.append(InstructionTextToken(InstructionTextTokenType.OperandSeparatorToken, ","))
tokens.append(InstructionTextToken(InstructionTextTokenType.IntegerToken, "${:x}".format(self.outer_displacement), self.outer_displacement))
tokens.append(InstructionTextToken(InstructionTextTokenType.EndMemoryOperandToken, ")"))
return tokens
def get_pre_il(self, il):
return None
def get_post_il(self, il):
return None
def get_address_il(self, il):
return il.add(4,
il.load(4,
il.add(4,
il.const(4, il.current_address+2) if self.reg == 'pc' else il.reg(4, self.reg),
il.const(4, self.offset)
)
),
il.add(4,
il.mult(4,
il.reg(4 if self.ireg_long else 2, self.ireg),
il.const(1, self.scale)
),
il.const(4, self.outer_displacement)
)
)
def get_source_il(self, il):
return il.load(1 << self.size, self.get_address_il(il))
def get_dest_il(self, il, value, flags=0):
if self.reg == 'pc':
return None
else:
#return il.store(1 << self.size, self.get_address_il(il), value, flags)
return il.expr(LowLevelILOperation.LLIL_STORE, self.get_address_il(il).index, value.index, size=1 << self.size, flags=flags)
class OpMemoryIndirectPreindex:
def __init__(self, size, reg, offset, ireg, ireg_long, scale, outer_displacement):
self.size = size
self.reg = reg
self.offset = offset
self.ireg = ireg
self.ireg_long = ireg_long
self.scale = scale
self.outer_displacement = outer_displacement
def __repr__(self):
return "OpRegisterIndirectIndex(%d, %s, 0x%x, %s, %d, %d, 0x%x)" % (self.size, self.reg, self.offset, self.ireg, self.ireg_long, self.scale, self.outer_displacement)
def format(self, addr):
# ([$1234,a0,a1.l*4],$1234)
tokens = []
tokens.append(InstructionTextToken(InstructionTextTokenType.BeginMemoryOperandToken, "("))
tokens.append(InstructionTextToken(InstructionTextTokenType.BeginMemoryOperandToken, "["))
if self.offset != 0:
tokens.append(InstructionTextToken(InstructionTextTokenType.IntegerToken, "${:x}".format(self.offset), self.offset))
tokens.append(InstructionTextToken(InstructionTextTokenType.OperandSeparatorToken, ","))
tokens.append(InstructionTextToken(InstructionTextTokenType.RegisterToken, self.reg))
tokens.append(InstructionTextToken(InstructionTextTokenType.OperandSeparatorToken, ","))
tokens.append(InstructionTextToken(InstructionTextTokenType.RegisterToken, self.ireg))
tokens.append(InstructionTextToken(InstructionTextTokenType.TextToken, "."))
tokens.append(InstructionTextToken(InstructionTextTokenType.TextToken, "l" if self.ireg_long else 'w'))
if self.scale != 1:
tokens.append(InstructionTextToken(InstructionTextTokenType.OperandSeparatorToken, "*"))
tokens.append(InstructionTextToken(InstructionTextTokenType.IntegerToken, "{}".format(self.scale), self.scale))
tokens.append(InstructionTextToken(InstructionTextTokenType.EndMemoryOperandToken, "]"))
if self.outer_displacement != 0:
tokens.append(InstructionTextToken(InstructionTextTokenType.OperandSeparatorToken, ","))
tokens.append(InstructionTextToken(InstructionTextTokenType.IntegerToken, "${:x}".format(self.outer_displacement), self.outer_displacement))
tokens.append(InstructionTextToken(InstructionTextTokenType.EndMemoryOperandToken, ")"))
return tokens
def get_pre_il(self, il):
return None
def get_post_il(self, il):
return None
def get_address_il(self, il):
return il.add(4,
il.load(4,
il.add(4,
il.add(4,
il.const(4, il.current_address+2) if self.reg == 'pc' else il.reg(4, self.reg),
il.const(4, self.offset)
),
il.mult(4,
il.reg(4 if self.ireg_long else 2, self.ireg),
il.const(1, self.scale)
)
)
),
il.const(4, self.outer_displacement)
)
def get_source_il(self, il):
return il.load(1 << self.size, self.get_address_il(il))
def get_dest_il(self, il, value, flags=0):
if self.reg == 'pc':
return None
else:
#return il.store(1 << self.size, self.get_address_il(il), value, flags)
return il.expr(LowLevelILOperation.LLIL_STORE, self.get_address_il(il).index, value.index, size=1 << self.size, flags=flags)
class OpAbsolute:
def __init__(self, size, address, address_size):
self.size = size
self.address = address
self.address_size = address_size
def __repr__(self):
return "OpAbsolute(%d, 0x%x, %d)" % (self.size, self.address, self.address_size)
def format(self, addr):
# ($1234).w
return [
InstructionTextToken(InstructionTextTokenType.BeginMemoryOperandToken, "("),
InstructionTextToken(InstructionTextTokenType.PossibleAddressToken, "${:0{}x}".format(self.address, 1 << self.address_size), self.address, 1 << self.address_size),
InstructionTextToken(InstructionTextTokenType.EndMemoryOperandToken, ")"+SizeSuffix[self.address_size])
]
def get_pre_il(self, il):
return None
def get_post_il(self, il):
return None
def get_address_il(self, il):
return il.sign_extend(4,
il.const(1 << self.address_size, self.address)
)
def get_source_il(self, il):
return il.load(1 << self.size, self.get_address_il(il))
def get_dest_il(self, il, value, flags=0):
#return il.store(1 << self.size, self.get_address_il(il), value, flags)
return il.expr(LowLevelILOperation.LLIL_STORE, self.get_address_il(il).index, value.index, size=1 << self.size, flags=flags)
class OpImmediate:
def __init__(self, size, value):
self.size = size
self.value = value
def __repr__(self):
return "OpImmediate(%d, 0x%x)" % (self.size, self.value)
def format(self, addr):
# #$1234
return [
InstructionTextToken(InstructionTextTokenType.TextToken, "#"),
#InstructionTextToken(InstructionTextTokenType.PossibleAddressToken, "${:0{}x}".format(self.value, 1 << self.size), self.value, 1 << self.size)
InstructionTextToken(InstructionTextTokenType.IntegerToken, "${:0{}x}".format(self.value, 1 << self.size), self.value, 1 << self.size)
]
def get_pre_il(self, il):
return None
def get_post_il(self, il):
return None
def get_address_il(self, il):
return None
def get_source_il(self, il):
return il.const(1 << self.size, self.value)
def get_dest_il(self, il, value, flags=0):
return None
# condition mapping to LLIL flag conditions
ConditionMapping = {
# 'hi': LowLevelILFlagCondition.
# 'ls': LowLevelILFlagCondition.
# 'cc': LowLevelILFlagCondition.
# 'cs': LowLevelILFlagCondition.
'ne': LowLevelILFlagCondition.LLFC_NE,
'eq': LowLevelILFlagCondition.LLFC_E,
'vc': LowLevelILFlagCondition.LLFC_NO,
'vs': LowLevelILFlagCondition.LLFC_O,
'pl': LowLevelILFlagCondition.LLFC_POS,
'mi': LowLevelILFlagCondition.LLFC_NEG,
'ge': LowLevelILFlagCondition.LLFC_UGE,
'lt': LowLevelILFlagCondition.LLFC_ULT,
'gt': LowLevelILFlagCondition.LLFC_UGT,
'le': LowLevelILFlagCondition.LLFC_ULE,
}
class M68000(Architecture):
name = "M68000"
address_size = 4
default_int_size = 4
max_instr_length = 22
endianness = Endianness.BigEndian
regs = {
'd0': RegisterInfo('d0', 4),
'd1': RegisterInfo('d1', 4),
'd2': RegisterInfo('d2', 4),
'd3': RegisterInfo('d3', 4),
'd4': RegisterInfo('d4', 4),
'd5': RegisterInfo('d5', 4),
'd6': RegisterInfo('d6', 4),
'd7': RegisterInfo('d7', 4),
'a0': RegisterInfo('a0', 4, extend=ImplicitRegisterExtend.SignExtendToFullWidth),
'a1': RegisterInfo('a1', 4, extend=ImplicitRegisterExtend.SignExtendToFullWidth),
'a2': RegisterInfo('a2', 4, extend=ImplicitRegisterExtend.SignExtendToFullWidth),
'a3': RegisterInfo('a3', 4, extend=ImplicitRegisterExtend.SignExtendToFullWidth),
'a4': RegisterInfo('a4', 4, extend=ImplicitRegisterExtend.SignExtendToFullWidth),
'a5': RegisterInfo('a5', 4, extend=ImplicitRegisterExtend.SignExtendToFullWidth),
'a6': RegisterInfo('a6', 4, extend=ImplicitRegisterExtend.SignExtendToFullWidth),
'sp': RegisterInfo('sp', 4, extend=ImplicitRegisterExtend.SignExtendToFullWidth),
'sr': RegisterInfo('sr', 2),
'ccr': RegisterInfo('sr', 1),
# control registers
# MC68010/MC68020/MC68030/MC68040/CPU32
'sfc': RegisterInfo('sfc', 4),
'dfc': RegisterInfo('dfc', 4),
'usp': RegisterInfo('usp', 4),
'vbr': RegisterInfo('vbr', 4),
# MC68020/MC68030/MC68040
'cacr': RegisterInfo('cacr', 4),
'caar': RegisterInfo('caar', 4),
'msp': RegisterInfo('msp', 4),
'isp': RegisterInfo('isp', 4),
# MC68040/MC68LC040
'tc': RegisterInfo('tc', 4),
'itt0': RegisterInfo('itt0', 4),
'itt1': RegisterInfo('itt1', 4),
'dtt0': RegisterInfo('dtt0', 4),
'dtt1': RegisterInfo('dtt1', 4),
'mmusr': RegisterInfo('mmusr', 4),
'urp': RegisterInfo('urp', 4),
'srp': RegisterInfo('srp', 4),
# MC68EC040
'iacr0': RegisterInfo('iacr0', 4),
'iacr1': RegisterInfo('iacr1', 4),
'dacr0': RegisterInfo('dacr0', 4),
'dacr1': RegisterInfo('dacr1', 4),
}
stack_pointer = 'sp'
flags = ['x', 'n', 'z', 'v', 'c']
flag_write_types = ['', '*', 'nzvc']
flags_written_by_flag_write_types = {
'*': ['x', 'n', 'z', 'v', 'c'],
'nzvc': ['n', 'z', 'v', 'c'],
}
flag_roles = {
'x': FlagRole.SpecialFlagRole,
'n': FlagRole.NegativeSignFlagRole,
'z': FlagRole.ZeroFlagRole,
'v': FlagRole.OverflowFlagRole,
'c': FlagRole.CarryFlagRole,
}
flags_required_for_flag_condition = {
# LowLevelILFlagCondition. ['c', 'z'], # hi
# LowLevelILFlagCondition. ['c', 'z'], # ls
# LowLevelILFlagCondition. ['c'], # cc
# LowLevelILFlagCondition. ['c'], # cs
LowLevelILFlagCondition.LLFC_NE: ['z'], # ne
LowLevelILFlagCondition.LLFC_E: ['z'], # eq
LowLevelILFlagCondition.LLFC_NO: ['v'], # vc
LowLevelILFlagCondition.LLFC_O: ['v'], # vs
LowLevelILFlagCondition.LLFC_POS: ['n'], # pl
LowLevelILFlagCondition.LLFC_NEG: ['n'], # mi
LowLevelILFlagCondition.LLFC_UGE: ['n', 'v'], # ge
LowLevelILFlagCondition.LLFC_ULT: ['n', 'v'], # lt
LowLevelILFlagCondition.LLFC_UGT: ['n', 'v', 'z'], # gt
LowLevelILFlagCondition.LLFC_ULE: ['n', 'v', 'z'], # le
}
control_registers = {
}
memory_indirect = False
movem_store_decremented = False
def decode_effective_address(self, mode, register, data, size=None):
mode &= 0x07
register &= 0x07
reg = None
if mode == 0:
# data register direct
return (OpRegisterDirect(size, Registers[register]), 0)
elif mode == 1:
# address register direct
return (OpRegisterDirect(size, Registers[register+8]), 0)
elif mode == 2:
# address register indirect
return (OpRegisterIndirect(size, Registers[register+8]), 0)
elif mode == 3:
# address register indirect with postincrement
return (OpRegisterIndirectPostincrement(size, Registers[register+8]), 0)
elif mode == 4:
# address register indirect with predecrement
return (OpRegisterIndirectPredecrement(size, Registers[register+8]), 0)
elif mode == 5:
# address register indirect with displacement
return (OpRegisterIndirectDisplacement(size, Registers[register+8], struct.unpack_from('>h', data, 0)[0]), 2)
elif mode == 6:
# extended addressing mode
reg = Registers[register+8]
elif mode == 7:
if register == 0:
# absolute short
val = struct.unpack_from('>H', data, 0)[0]
if val & 0x8000:
val |= 0xffff0000
return (OpAbsolute(size, val, 1), 2)
if register == 1:
# absolute long
return (OpAbsolute(size, struct.unpack_from('>L', data, 0)[0], 2), 4)
elif register == 2:
# program counter indirect with displacement
return (OpRegisterIndirectDisplacement(size, 'pc', struct.unpack_from('>h', data, 0)[0]), 2)
elif register == 3:
# extended addressing mode
reg = 'pc'
elif register == 4:
# immediate
if size == None:
# unspecified length
return (OpImmediate(size, None), None)
elif size == SIZE_BYTE:
# byte
return (OpImmediate(size, struct.unpack_from('>b', data, 1)[0]), 2)
elif size == 1:
# word
return (OpImmediate(size, struct.unpack_from('>h', data, 0)[0]), 2)
elif size == 2:
# long
return (OpImmediate(size, struct.unpack_from('>l', data, 0)[0]), 4)
if reg is not None:
extra = struct.unpack_from('>H', data, 0)[0]
# index register
xn = Registers[extra >> 12]
# index register size
index_size = (extra >> 11) & 1
# index register scale
scale = 1 << ((extra >> 9) & 3)
length = 2
if extra & 0x0100:
# full extension word
bd = 0
od = 0
# base displacement
if not (extra >> 7) & 1:
if (extra >> 4) & 3 == 2:
# word base displacement
bd = struct.unpack_from('>h', data, length)[0]
length += 2
elif (extra >> 4) & 3 == 3:
# long base displacement
bd = struct.unpack_from('>L', data, length)[0]
length += 4
# outer displacement
if extra & 3 == 2:
# word outer displacement
od = struct.unpack_from('>h', data, length)[0]
length += 2
elif extra & 3 == 3:
# long outer displacement
od = struct.unpack_from('>L', data, length)[0]
length += 4
# suppress index register
if extra & 7 == 0:
return (OpRegisterIndirectIndex(size, reg, bd, xn, index_size, scale), length)
elif (extra >> 6) & 1:
return (OpMemoryIndirect(size, reg, bd, od), length)
elif (extra >> 2) & 1:
return (OpMemoryIndirectPostindex(size, reg, bd, xn, index_size, scale, od), length)
else:
return (OpMemoryIndirectPreindex(size, reg, bd, xn, index_size, scale, od), length)
else:
# brief extension word
# 8 bit displacement
d8 = extra & 0xff
if d8 & 0x80:
d8 -= 256
return (OpRegisterIndirectIndex(size, reg, d8, xn, index_size, scale), length)
return (None, None)
def decode_instruction(self, data, addr):
error_value = (None, None, None, None, None, None)
if len(data) < 2:
return error_value
instruction = struct.unpack_from('>H', data)[0]
msb = instruction >> 8
operation_code = msb >> 4
#print((hex(addr), hex(instruction)))
instr = None
length = None
size = None
source = None
dest = None
third = None
if operation_code == 0x0:
# Bit manipulation/MOVEP/Immed late
if instruction & 0xf9c0 == 0x00c0:
# rtm, callm, chk2, cmp2
if instruction & 0xfff0 == 0x06c0:
instr = 'rtm'
dest = OpRegisterDirect(SIZE_LONG, Registers[instruction & 15])
length = 2
elif instruction & 0xffc0 == 0x06c0:
instr = 'callm'
source = OpImmediate(SIZE_BYTE, struct.unpack_from('>B', data, 3)[0])
dest, extra_dest = self.decode_effective_address(instruction >> 3, instruction, data[4:], SIZE_BYTE) # check
length = 4+extra_dest
else:
size = (instruction >> 9) & 3
extra = struct.unpack_from('>H', data, 2)[0]
if extra & 0x0800:
instr = 'chk2'
else:
instr = 'cmp2'
source, extra_source = self.decode_effective_address(instruction >> 3, instruction, data[4:], SIZE_BYTE) # check
dest = OpRegisterDirect(size, Registers[(instruction >> 12) & 15])
length = 4+extra_source
elif instruction & 0xffc0 in (0x0ac0, 0x0cc0, 0x0ec0):
if instruction & 0xf9ff == 0x08fc:
instr = 'cas2'
size = ((instruction >> 9) & 3) - 1
extra1 = struct.unpack_from('>H', data, 2)[0]
extra2 = struct.unpack_from('>H', data, 4)[0]
source = OpRegisterDirectPair(size, Registers[extra1 & 7], Registers[extra2 & 7])
dest = OpRegisterDirectPair(size, Registers[(extra1 >> 6) & 7], Registers[(extra2 >> 6) & 7])
third = OpRegisterIndirectPair(size, Registers[(extra1 >> 12) & 15], Registers[(extra2 >> 12) & 15])
length = 6
else:
instr = 'cas'
size = ((instruction >> 9) & 3) - 1
extra = struct.unpack_from('>H', data, 2)[0]
source = OpRegisterDirect(size, Registers[extra & 7])
dest = OpRegisterDirect(size, Registers[(extra >> 6) & 7])
third, extra_third = self.decode_effective_address(instruction >> 3, instruction, data[4:], size)
length = 4+extra_third
elif msb in (0x00, 0x02, 0x04, 0x06, 0x0a, 0x0c):
# ORI, ANDI, SUBI, ADDI, EORI, CMPI
if msb == 0x00:
instr = 'ori'
elif msb == 0x02:
instr = 'andi'
elif msb == 0x04:
instr = 'subi'
elif msb == 0x06:
instr = 'addi'
elif msb == 0x0a:
instr = 'eori'
elif msb == 0x0c:
instr = 'cmpi'
size = (instruction >> 6) & 0x03
source, extra_source = self.decode_effective_address(7, 4, data[2:], size)
if instruction & 0x00ff == 0x003c:
dest = OpRegisterDirect(size, 'ccr')
extra_dest = 0
elif instruction & 0x00ff == 0x007c:
dest = OpRegisterDirect(size, 'sr')
extra_dest = 0
else:
dest, extra_dest = self.decode_effective_address(instruction >> 3, instruction, data[2+extra_source:], size)
if dest is None:
instr = None
else:
length = 2+extra_source+extra_dest
elif msb == 0x08:
# btst, bchg, bclr, bset with constant
if instruction & 0xffc0 == 0x0800:
instr = 'btst'
elif instruction & 0xffc0 == 0x0840:
instr = 'bchg'
elif instruction & 0xffc0 == 0x0880:
instr = 'bclr'
elif instruction & 0xffc0 == 0x08C0:
instr = 'bset'
source = OpImmediate(SIZE_BYTE, struct.unpack_from('>B', data, 3)[0])
dest, extra_dest = self.decode_effective_address(instruction >> 3, instruction, data[4:], SIZE_BYTE)
if isinstance(dest, OpRegisterDirect):
dest.size = SIZE_LONG
if dest is None:
instr = None
else:
length = 4+extra_dest
elif msb & 0xf1 == 0x01:
# movep, btst, bchg, bclr, bset with register
if instruction & 0xf138 == 0x0108:
instr = 'movep'
size = ((instruction >> 6) & 1) + 1
source, extra_source = self.decode_effective_address(5, instruction, data[2:], SIZE_BYTE) # check
dest = OpRegisterDirect(size, Registers[(instruction >> 9) & 7])
length = 2+extra_source
if instruction & 0x0080:
source, dest = dest, source
else:
if instruction & 0xf1c0 == 0x0100:
instr = 'btst'
elif instruction & 0xf1c0 == 0x0140:
instr = 'bchg'
elif instruction & 0xf1c0 == 0x0180:
instr = 'bclr'
elif instruction & 0xf1c0 == 0x01c0:
instr = 'bset'
source = OpRegisterDirect(SIZE_BYTE, Registers[(instruction >> 9) & 7]) # check
dest, extra_dest = self.decode_effective_address(instruction >> 3, instruction, data[2:], SIZE_BYTE)
if isinstance(dest, OpRegisterDirect):
dest.size = SIZE_LONG
if dest is None:
instr = None
else:
length = 2+extra_dest
elif instruction & 0xff00 == 0x0e00:
instr = 'moves'
extra = struct.unpack_from('>H', data, 2)[0]
size = (instruction >> 6) & 3
dest = OpRegisterDirect(size, Registers[extra >> 12])
source, extra_source = self.decode_effective_address(instruction >> 3, instruction, data[4:], size)
if extra & 0x0800:
source, dest = dest, source
length = 4+extra_source
elif operation_code in (0x1, 0x2, 0x3):
# move
instr = 'move'
if operation_code == 0x1:
# Move byte
size = SIZE_BYTE
elif operation_code == 0x2:
# Move long
size = SIZE_LONG
elif operation_code == 0x3:
# Move word
size = SIZE_WORD
source, extra_source = self.decode_effective_address(instruction >> 3, instruction, data[2:], size)
if source is None:
instr = None
else:
dest, extra_dest = self.decode_effective_address(instruction >> 6, instruction >> 9, data[2+extra_source:], size)
if dest is None or isinstance(dest, OpImmediate):
instr = None
else:
if isinstance(dest, OpRegisterDirect) and (dest.reg[0] == 'a' or dest.reg == 'sp'):
instr = 'movea'
length = 2+extra_source+extra_dest
elif operation_code == 0x4:
# Miscellaneous
extra_source = 0
extra_dest = 0
size = None
skip_ea = False
if instruction & 0xf100 == 0x4100:
# lea, extb, chk
if instruction & 0xf1c0 == 0x41c0:
if instruction & 0x0038:
instr = 'lea'
dest = OpRegisterDirect(SIZE_LONG, Registers[((instruction >> 9) & 7) + 8])
else:
instr = 'extb'
size = SIZE_LONG
else:
instr = 'chk'
if instruction & 0x0080:
size = SIZE_WORD
else:
size = SIZE_LONG
dest = OpRegisterDirect(size, Registers[(instruction >> 9) & 7])
elif msb == 0x40:
# move from sr, negx
if instruction & 0xffc0 == 0x40c0:
# move from sr
instr = 'move'
size = SIZE_WORD
source = OpRegisterDirect(size, 'sr')
else:
instr = 'negx'
size = instruction >> 6
elif msb == 0x42:
# move to ccr, clr
if instruction & 0xffc0 == 0x42c0:
# move to ccr
instr = 'move'
size = SIZE_WORD
source = OpRegisterDirect(size, 'ccr')
else:
instr = 'clr'
size = instruction >> 6
elif msb == 0x44:
# move from ccr, neg
if instruction & 0xffc0 == 0x44c0:
# move from ccr
instr = 'move'
size = SIZE_WORD
dest = OpRegisterDirect(size, 'ccr')
else:
instr = 'neg'
size = instruction >> 6
elif msb == 0x46:
# move from sr, not
if instruction & 0xffc0 == 0x46c0:
# move from sr
instr = 'move'
size = SIZE_WORD
dest = OpRegisterDirect(size, 'sr')
else:
instr = 'not'
size = instruction >> 6
elif msb in (0x48, 0x4c):
# link, nbcd, movem, ext, swap, bkpt, pea, divs, divu, divsl, divul, muls, mulu
if instruction & 0xfff8 == 0x4808:
instr = 'link'
size = SIZE_LONG
dest, extra_dest = self.decode_effective_address(7, 4, data[2:], size)
elif instruction & 0xffc0 == 0x4800:
instr = 'nbcd'
dest, extra_dest = self.decode_effective_address(instruction >> 3, instruction, data[2+extra_source:], SIZE_BYTE)
skip_ea = True
elif instruction & 0xfb80 == 0x4880:
if instruction & 0x0040:
size = SIZE_LONG
else:
size = SIZE_WORD
if instruction & 0x0038:
instr = 'movem'
extra_source = 2
extra = struct.unpack_from('>H', data, 2)[0]
reg_list = []
if instruction & 0x0038 == 0x0020:
for k in range(16):
if extra << k & 0x8000:
reg_list.append(Registers[k])
else:
for k in range(16):
if extra >> k & 0x0001:
reg_list.append(Registers[k])
source = OpRegisterMovemList(size, reg_list)
else:
instr = 'ext'
dest, extra_dest = self.decode_effective_address(instruction >> 3, instruction, data[2+extra_source:], size)
skip_ea = True
if instruction & 0x0400:
source, dest = dest, source
elif instruction & 0xfff8 == 0x4840:
instr = 'swap'
dest, extra_dest = self.decode_effective_address(instruction >> 3, instruction, data[2+extra_source:], SIZE_LONG)
skip_ea = True
elif instruction & 0xfff8 == 0x4848:
instr = 'bkpt'
source = OpImmediate(SIZE_BYTE, instruction & 7)
skip_ea = True
elif instruction & 0xffc0 == 0x4840:
instr = 'pea'
size = SIZE_LONG
elif msb == 0x4c:
size = SIZE_LONG
extra_dest = 2
extra = struct.unpack_from('>H', data, 2)[0]
source, extra_source = self.decode_effective_address(instruction >> 3, instruction, data[2+extra_dest:], size)
dh = Registers[extra & 7]
dl = Registers[(extra >> 12) & 7]
dest = OpRegisterDirect(size, dl)
if instruction & 0x0040:
if extra & 0x0800:
instr = 'divs'
else:
instr = 'divu'
if extra & 0x0400:
dest = OpRegisterDirectPair(size, dh, dl)
elif dh != dl:
dest = OpRegisterDirectPair(size, dh, dl)
instr += 'l'
else:
if extra & 0x0800:
instr = 'muls'
else:
instr = 'mulu'
if extra & 0x0400:
dest = OpRegisterDirectPair(size, dh, dl)
skip_ea = True
elif msb == 0x4a:
# bgnd, illegal, tas, tst
if instruction == 0x4afa:
instr = 'bgnd'
skip_ea = True
elif instruction == 0x4afc:
instr = 'illegal'
skip_ea = True
elif instruction & 0xffc0 == 0x4ac0:
instr = 'tas'
skip_ea = True
dest, extra_dest = self.decode_effective_address(instruction >> 3, instruction, data[2:], SIZE_BYTE)
else:
instr = 'tst'
size = instruction >> 6
elif msb == 0x4e:
# trap, link, unlk, move, reset, nop, stop, rte, rtd, rts, trapv, rtr, movec, jsr, jmp
if instruction & 0xfff0 == 0x4e40:
instr = 'trap'
length = 2
source = OpImmediate(SIZE_BYTE, instruction & 15)
skip_ea = True
elif instruction & 0xfff0 == 0x4e50:
if instruction & 0xfff8 == 0x4e50:
instr = 'link'
dest, extra_dest = self.decode_effective_address(7, 4, data[2:], 1)
else:
instr = 'unlk'
source = OpRegisterDirect(SIZE_LONG, Registers[(instruction & 7) + 8])
skip_ea = True
elif instruction & 0xfff0 == 0x4e60:
instr = 'move'
size = SIZE_LONG
source = OpRegisterDirect(SIZE_LONG, Registers[(instruction & 7) + 8])
dest = OpRegisterDirect(size, 'usp')
if instruction & 0x08:
source, dest = dest, source
skip_ea = True
elif instruction == 0x4e70:
instr = 'reset'
skip_ea = True
elif instruction == 0x4e71:
instr = 'nop'
skip_ea = True
elif instruction == 0x4e72:
instr = 'stop'
source = OpImmediate(SIZE_WORD, struct.unpack_from(">H", data, 2)[0])
extra_source = 2
skip_ea = True
elif instruction == 0x4e73:
instr = 'rte'
skip_ea = True
elif instruction == 0x4e74:
instr = 'rtd'
dest, extra_dest = self.decode_effective_address(7, 4, data[2:], SIZE_WORD)
skip_ea = True
elif instruction == 0x4e75:
instr = 'rts'
skip_ea = True
elif instruction == 0x4e76:
instr = 'trapv'
skip_ea = True
elif instruction == 0x4e77:
instr = 'rtr'
skip_ea = True
elif instruction & 0xfffe == 0x4e7A:
instr = 'movec'
size = SIZE_LONG
extended = struct.unpack_from('>H', data, 2)[0]
control_reg = self.control_registers.get(extended & 0x0fff, None)
reg = (extended >> 12) & 15
if control_reg is None:
instr = None
else:
source = OpRegisterDirect(size, control_reg)
dest = OpRegisterDirect(size, Registers[reg])
if instruction & 1:
source, dest = dest, source
extra_source = 2
skip_ea = True
elif instruction & 0xff80 == 0x4e80:
if instruction & 0xffc0 == 0x4e80:
instr = 'jsr'
else:
instr = 'jmp'
dest, extra_dest = self.decode_effective_address(instruction >> 3, instruction, data[2+extra_source:], SIZE_LONG)
skip_ea = True
if instr is not None:
if size is not None:
size &= 3
if skip_ea:
pass
elif dest is None:
dest, extra_dest = self.decode_effective_address(instruction >> 3, instruction, data[2+extra_source:], size)
else:
source, extra_source = self.decode_effective_address(instruction >> 3, instruction, data[2+extra_dest:], size)
if extra_source is None or extra_dest is None:
instr = None
else:
length = 2+extra_source+extra_dest
elif operation_code == 0x5:
# ADDQ/SUBQ/Scc/DBcc/TRAPcc
if instruction & 0xf0c0 == 0x50c0:
if instruction & 0xf0f8 == 0x50c8:
instr = 'db'+Condition[(instruction >> 8) & 0xf]
source = OpRegisterDirect(SIZE_WORD, Registers[instruction & 7])
dest = OpRegisterIndirectDisplacement(SIZE_LONG, 'pc', struct.unpack_from('>h', data, 2)[0])
length = 4
elif instruction & 0xf0ff in (0x50fa, 0x50fb, 0x50fc):
instr = 'trap'+Condition[(instruction >> 8) & 0xf]
if instruction & 7 == 2:
length = 4
source = OpImmediate(SIZE_WORD, struct.unpack_from('>H', data, 2)[0])
elif instruction & 7 == 3:
length = 6
source = OpImmediate(SIZE_LONG, struct.unpack_from('>L', data, 2)[0])
elif instruction & 7 == 4:
length = 2
else:
instr = 's'+Condition[(instruction >> 8) & 0xf]
size = SIZE_BYTE
dest, extra_dest = self.decode_effective_address(instruction >> 3, instruction, data[2:], size)
length = 2+extra_dest
else:
if instruction & 0x0100:
instr = 'subq'
else:
instr = 'addq'
val = (instruction >> 9) & 7
if val == 0:
val = 8
size = (instruction >> 6) & 3
source = OpImmediate(SIZE_BYTE, val)
dest, extra_dest = self.decode_effective_address(instruction >> 3, instruction, data[2:], size)
length = 2+extra_dest
elif operation_code == 0x6:
# Bcc/BSR/BRA
if msb == 0x60:
instr = 'bra'
elif msb == 0x61:
instr = 'bsr'
else:
instr = 'b'+Condition[(instruction >> 8) & 0xf]
val = instruction & 0xff
if val == 0:
val = struct.unpack_from('>h', data, 2)[0]
length = 4
elif val == 0xff:
val = struct.unpack_from('>L', data, 2)[0]
length = 6
else:
if val & 0x80:
val -= 256
length = 2
dest = OpRegisterIndirectDisplacement(SIZE_LONG, 'pc', val)
elif operation_code == 0x7:
# MOVEQ
instr = 'moveq'
size = SIZE_LONG
val = instruction & 0xff
if val & 0x80:
val |= 0xffffff00
source = OpImmediate(size, val)
dest = OpRegisterDirect(size, Registers[(instruction >> 9) & 7])
length = 2
elif operation_code == 0x8:
# OR/DIV/SBCD
if instruction & 0xf0c0 == 0x80c0:
if instruction & 0x0100:
instr = 'divs'
else:
instr = 'divu'
size = SIZE_WORD
dest = OpRegisterDirect(size, Registers[(instruction >> 9) & 7])
source, extra_source = self.decode_effective_address(instruction >> 3, instruction, data[2:], size)
length = 2+extra_source
elif instruction & 0xf1f0 == 0x8100:
instr = 'sbcd'
length = 2
dest = OpRegisterDirect(SIZE_BYTE, Registers[(instruction >> 9) & 7])
source = OpRegisterDirect(SIZE_BYTE, Registers[instruction & 7])
if instruction & 8:
dest = OpRegisterIndirectPredecrement(SIZE_BYTE, Registers[((instruction >> 9) & 7) + 8])
source = OpRegisterIndirectPredecrement(SIZE_BYTE, Registers[(instruction & 7) + 8])
elif instruction & 0xf130 == 0x8100:
if instruction & 0x0040:
instr = 'pack'
if instruction & 8:
dest = OpRegisterIndirectPredecrement(SIZE_BYTE, Registers[((instruction >> 9) & 7) + 8])
source = OpRegisterIndirectPredecrement(SIZE_WORD, Registers[(instruction & 7) + 8])
else:
dest = OpRegisterDirect(SIZE_BYTE, Registers[(instruction >> 9) & 7])
source = OpRegisterDirect(SIZE_WORD, Registers[instruction & 7])
else:
instr = 'unpk'
if instruction & 8:
dest = OpRegisterIndirectPredecrement(SIZE_WORD, Registers[((instruction >> 9) & 7) + 8])
source = OpRegisterIndirectPredecrement(SIZE_BYTE, Registers[(instruction & 7) + 8])
else:
dest = OpRegisterDirect(SIZE_WORD, Registers[(instruction >> 9) & 7])
source = OpRegisterDirect(SIZE_BYTE, Registers[instruction & 7])
length = 4
third = OpImmediate(SIZE_WORD, struct.unpack_from(">H", data, 2)[0])
else:
instr = 'or'
opmode = (instruction >> 6) & 0x7
size = (instruction >> 6) & 3
dest = OpRegisterDirect(size, Registers[(instruction >> 9) & 7])
source, extra_source = self.decode_effective_address(instruction >> 3, instruction, data[2:], size)
if opmode & 4:
source, dest = dest, source
length = 2+extra_source
elif operation_code == 0x9:
# SUB/SUBA/SUBX
instr = 'sub'
opmode = (instruction >> 6) & 0x7
if opmode in (0x03, 0x07):
instr = 'suba'
if opmode == 0x03:
size = SIZE_WORD
else:
size = SIZE_LONG
dest = OpRegisterDirect(SIZE_LONG, Registers[((instruction >> 9) & 7) + 8])
else:
size = (instruction >> 6) & 3
dest = OpRegisterDirect(size, Registers[(instruction >> 9) & 7])
source, extra_source = self.decode_effective_address(instruction >> 3, instruction, data[2:], size)
if instr == 'sub' and opmode & 4:
if isinstance(source, OpRegisterDirect):
instr = 'subx'
if source.reg[0] == 'a' or source.reg == 'sp':
source = OpRegisterIndirectPredecrement(size, source.reg)
dest = OpRegisterIndirectPredecrement(size, dest.reg)
else:
source, dest = dest, source
length = 2+extra_source
elif operation_code == 0xa:
# (unassigned, reserved)
pass
elif operation_code == 0xb:
# CMP/EOR
instr = 'cmp'
opmode = (instruction >> 6) & 0x7
if opmode in (0x03, 0x07):
instr = 'cmpa'
if opmode == 0x03:
size = SIZE_WORD
else:
size = SIZE_LONG
dest = OpRegisterDirect(size, Registers[((instruction >> 9) & 7) + 8])
else:
size = (instruction >> 6) & 3
dest = OpRegisterDirect(size, Registers[(instruction >> 9) & 7])
source, extra_source = self.decode_effective_address(instruction >> 3, instruction, data[2:], size)
if instr == 'cmp' and opmode & 4:
if instruction & 0x0038 == 0x0008:
instr = 'cmpm'
source = OpRegisterIndirectPostincrement(size, Registers[instruction & 15])
dest = OpRegisterIndirectPostincrement(size, Registers[((instruction >> 9) & 7) + 8])
else:
source, dest = dest, source
instr = 'eor'
length = 2+extra_source
elif operation_code == 0xc:
# AND/MUL/ABCD/EXG
if instruction & 0xf0c0 == 0xc0c0:
if instruction & 0x0100:
instr = 'muls'
else:
instr = 'mulu'
size = SIZE_WORD
source, extra_source = self.decode_effective_address(instruction >> 3, instruction, data[2:], size)
dest = OpRegisterDirect(size, Registers[(instruction >> 9) & 7])
length = 2+extra_source
elif instruction & 0xf130 == 0xc100:
if instruction & 0xf1f0 == 0xc100:
instr = 'abcd'
if instruction & 0x0008:
source = OpRegisterIndirectPredecrement(SIZE_BYTE, Registers[(instruction & 7) + 8])
dest = OpRegisterIndirectPredecrement(SIZE_BYTE, Registers[((instruction >> 9) & 7) + 8])
else:
source = OpRegisterDirect(SIZE_BYTE, Registers[instruction & 7])
dest = OpRegisterDirect(SIZE_BYTE, Registers[(instruction >> 9) & 7])
else:
instr = 'exg'
size = SIZE_LONG
source = OpRegisterDirect(size, Registers[(instruction >> 9) & 7])
dest = OpRegisterDirect(size, Registers[instruction & 7])
if instruction & 0xf1f8 == 0xc148:
source = OpRegisterIndirectPredecrement(size, Registers[((instruction >> 9) & 7) + 8])
dest = OpRegisterIndirectPredecrement(size, Registers[(instruction & 7) + 8])
if instruction & 0xf1f8 == 0xc188:
dest = OpRegisterIndirectPredecrement(size, Registers[(instruction & 7) + 8])
length = 2
else:
instr = 'and'
opmode = (instruction >> 6) & 0x7
size = (instruction >> 6) & 3
dest = OpRegisterDirect(size, Registers[(instruction >> 9) & 7])
source, extra_source = self.decode_effective_address(instruction >> 3, instruction, data[2:], size)
if opmode & 4:
source, dest = dest, source
length = 2+extra_source
elif operation_code == 0xd:
# ADD/ADDA/ADDX
instr = 'add'
opmode = (instruction >> 6) & 0x7
if opmode in (0x03, 0x07):
instr = 'adda'
if opmode == 0x03:
size = SIZE_WORD
else:
size = SIZE_LONG
dest = OpRegisterDirect(SIZE_LONG, Registers[((instruction >> 9) & 7) + 8])
else:
size = (instruction >> 6) & 3
dest = OpRegisterDirect(size, Registers[(instruction >> 9) & 7])
source, extra_source = self.decode_effective_address(instruction >> 3, instruction, data[2:], size)
if instr == 'add' and opmode & 4:
if isinstance(source, OpRegisterDirect):
instr = 'addx'
if source.reg[0] == 'a' or source.reg == 'sp':
source = OpRegisterIndirectPredecrement(size, source.reg)
dest = OpRegisterIndirectPredecrement(size, dest.reg)
else:
source, dest = dest, source
length = 2+extra_source
elif operation_code == 0xe:
# shift/rotate/bit field
if instruction & 0xF8C0 == 0xE0C0:
# shift/rotate
size = SIZE_WORD
direction = (instruction >> 8) & 1
style = (instruction >> 9) & 3
dest, extra_dest = self.decode_effective_address(instruction >> 3, instruction, data[2:], size)
instr = ShiftStyle[style]
if direction:
instr += 'l'
else:
instr += 'r'
length = 2+extra_dest
elif instruction & 0xF8C0 == 0xE8C0:
# bit field instructions
# TODO
pass
else:
# shift/rotate
size = (instruction >> 6) & 3
direction = (instruction >> 8) & 1
style = (instruction >> 3) & 3
if (instruction >> 5) & 1:
source = OpRegisterDirect(SIZE_LONG, Registers[(instruction >> 9) & 7])
else:
val = (instruction >> 9) & 7
if val == 0:
val = 8
source = OpImmediate(SIZE_BYTE, val)
dest = OpRegisterDirect(size, Registers[instruction & 7])
instr = ShiftStyle[style]
if direction:
instr += 'l'
else:
instr += 'r'
length = 2
elif operation_code == 0xf:
# coprocessor instructions
# TODO
pass
if instr is None:
log_error('Bad opcode 0x{:x} at 0x{:x}'.format(instruction, addr))
return error_value
#print((instr, length, size, source, dest, third))
return instr, length, size, source, dest, third
def generate_instruction_il(self, il, instr, length, size, source, dest, third):
size_bytes = None
if size is not None:
size_bytes = 1 << size
if instr in ('move', 'moveq'):
if instr == 'move' and isinstance(dest, OpRegisterDirect) and dest.reg in ('ccr', 'sr'):
il.append(il.set_reg(1, LLIL_TEMP(0), source.get_source_il(il)))
il.append(il.set_flag('c', il.test_bit(1, il.reg(1, LLIL_TEMP(0)), il.const(1, 0x01))))
il.append(il.set_flag('v', il.test_bit(1, il.reg(1, LLIL_TEMP(0)), il.const(1, 0x02))))
il.append(il.set_flag('z', il.test_bit(1, il.reg(1, LLIL_TEMP(0)), il.const(1, 0x04))))
il.append(il.set_flag('n', il.test_bit(1, il.reg(1, LLIL_TEMP(0)), il.const(1, 0x08))))
il.append(il.set_flag('x', il.test_bit(1, il.reg(1, LLIL_TEMP(0)), il.const(1, 0x10))))
else:
flags = 'nzvc'
if ((isinstance(source, OpRegisterDirect) and source.reg in ('usp', 'ccr', 'sr')) or
(isinstance(dest, OpRegisterDirect) and dest.reg in ('usp', 'ccr', 'sr'))):
# move to/from control registers do not set flags
flags = 0
il.append(
dest.get_dest_il(il,
source.get_source_il(il),
flags
)
)
elif instr in ('movea', 'movec'):
# dest.size = SIZE_LONG
# il.append(
# dest.get_dest_il(il,
# il.sign_extend(4,
# source.get_source_il(il)
# )
# )
# )
il.append(
dest.get_dest_il(il,
source.get_source_il(il)
)
)
elif instr == 'clr':
il.append(
dest.get_dest_il(il,
il.const(4, 0),
'nzvc'
)
)
elif instr in ('add', 'addi', 'addq'):
il.append(
dest.get_dest_il(il,
il.add(size_bytes,
dest.get_source_il(il),
source.get_source_il(il),
flags='*'
)
)
)
elif instr == 'adda':
dest.size = SIZE_LONG
il.append(
dest.get_dest_il(il,
il.add(4,
dest.get_source_il(il),
il.sign_extend(4,
source.get_source_il(il)
)
)
)
)
elif instr == 'addx':
il.append(
dest.get_dest_il(il,
il.add(size_bytes,
il.add(size_bytes,
dest.get_source_il(il),
source.get_source_il(il),
flags='*'
),
il.flag('x'),
flags='*'
)
)
)
elif instr in ('sub', 'subi', 'subq'):
il.append(
dest.get_dest_il(il,
il.sub(size_bytes,
source.get_source_il(il),
dest.get_source_il(il),
flags='*'
)
)
)
elif instr == 'suba':
dest.size = SIZE_LONG
il.append(
dest.get_dest_il(il,
il.sub(4,
dest.get_source_il(il),
il.sign_extend(4,
source.get_source_il(il)
)
)
)
)
elif instr == 'subx':
il.append(
dest.get_dest_il(il,
il.sub(size_bytes,
il.sub(size_bytes,
dest.get_source_il(il),
source.get_source_il(il),
flags='*'
),
il.flag('x'),
flags='*'
)
)
)
elif instr == 'neg':
il.append(
dest.get_dest_il(il,
il.neg_expr(size_bytes,
dest.get_source_il(il),
flags='*'
)
)
)
elif instr == 'negx':
il.append(
dest.get_dest_il(il,
il.sub(size_bytes,
il.neg_expr(size_bytes,
dest.get_source_il(il),
flags='*'
),
il.flag('x'),
flags='*'
)
)
)
elif instr == 'abcd':
# TODO
il.append(il.unimplemented())
elif instr == 'sbcd':
# TODO
il.append(il.unimplemented())
elif instr == 'nbcd':
# TODO
il.append(il.unimplemented())
elif instr == 'pack':
il.append(
il.set_reg(2,
LLIL_TEMP(0),
il.add(2,
source.get_source_il(il),
third.get_source_il(il)
)
)
)
il.append(
dest.get_dest_il(il,
il.or_expr(1,
il.and_expr(2,
il.reg(2, LLIL_TEMP(0)),
il.const(2, 0x000F)
),
il.logical_shift_right(2,
il.and_expr(2,
il.reg(2, LLIL_TEMP(0)),
il.const(2, 0x0F00)
),
il.const(1, 4)
)
)
)
)
elif instr == 'unpk':
il.append(
il.set_reg(1,
LLIL_TEMP(0),
source.get_source_il(il)
)
)
il.append(
dest.get_dest_il(il,
il.add(2,
il.or_expr(2,
il.and_expr(2,
il.reg(1, LLIL_TEMP(0)),
il.const(1, 0x0F)
),
il.shift_left(2,
il.and_expr(2,
il.reg(1, LLIL_TEMP(0)),
il.const(1, 0xF0)
),
il.const(1, 4)
)
),
third.get_source_il(il)
)
)
)
elif instr in ('muls', 'mulu'):
if isinstance(dest, OpRegisterDirectPair):
il.append(
il.set_reg_split(4,
dest.reg1,
dest.reg2,
il.mult(4,
source.get_source_il(il),
dest.get_source_il(il)[0],
flags='nzvc'
)
)
)
else:
il.append(
il.set_reg(4,
dest.reg,
il.mult(4,
source.get_source_il(il),
dest.get_source_il(il),
flags='nzvc'
)
)
)
elif instr == 'divs':
if size == 1:
dividend_il = dest.get_source_il(il)
divisor_il = source.get_source_il(il)
dest.size = SIZE_LONG
il.append(
dest.get_dest_il(il,
il.or_expr(4,
il.shift_left(4, il.mod_signed(2, dividend_il, divisor_il), il.const(1, 16)),
il.div_signed(2, dividend_il, divisor_il, flags='nzvc')
)
)
)
elif isinstance(dest, OpRegisterDirect):
dividend_il = dest.get_source_il(il)
divisor_il = source.get_source_il(il)
il.append(
dest.get_dest_il(il,
il.div_signed(4, dividend_il, divisor_il, flags='nzvc')
)
)
else:
dividend_il = il.or_expr(8, il.shift_left(8, il.reg(4, dest.reg1), il.const(1, 32)), il.reg(4, dest.reg2))
divisor_il = source.get_source_il(il)
il.append(
il.set_reg(4,
LLIL_TEMP(0),
il.mod_signed(4, dividend_il, divisor_il)
)
)
il.append(
il.set_reg(4,
dest.reg2,
il.div_signed(4, dividend_il, divisor_il, flags='nzvc')
)
)
il.append(
il.set_reg(4,
dest.reg1,
il.reg(4, LLIL_TEMP(0))
)
)
elif instr == 'divsl':
dividend_il = il.reg(4, dest.reg2)
divisor_il = source.get_source_il(il)
il.append(
il.set_reg(4,
dest.reg1,
il.mod_signed(4, dividend_il, divisor_il)
)
)
il.append(
il.set_reg(4,
dest.reg2,
il.div_signed(4, dividend_il, divisor_il, flags='nzvc')
)
)
elif instr == 'divu':
if size == 1:
dividend_il = dest.get_source_il(il)
divisor_il = source.get_source_il(il)
dest.size = SIZE_LONG
il.append(
dest.get_dest_il(il,
il.or_expr(4,
il.shift_left(4, il.mod_unsigned(2, dividend_il, divisor_il), il.const(1, 16)),
il.div_unsigned(2, dividend_il, divisor_il, flags='nzvc')
)
)
)
elif isinstance(dest, OpRegisterDirect):
dividend_il = dest.get_source_il(il)
divisor_il = source.get_source_il(il)
il.append(
dest.get_dest_il(il,
il.div_unsigned(4, dividend_il, divisor_il, flags='nzvc')
)
)
else:
dividend_il = il.or_expr(8, il.shift_left(8, il.reg(4, dest.reg1), il.const(1, 32)), il.reg(4, dest.reg2))
divisor_il = source.get_source_il(il)
il.append(
il.set_reg(4,
LLIL_TEMP(0),
il.mod_unsigned(4, dividend_il, divisor_il)
)
)
il.append(
il.set_reg(4,
dest.reg2,
il.div_unsigned(4, dividend_il, divisor_il, flags='nzvc')
)
)
il.append(
il.set_reg(4,
dest.reg1,
il.reg(4, LLIL_TEMP(0))
)
)
elif instr == 'divul':
dividend_il = il.reg(4, dest.reg2)
divisor_il = source.get_source_il(il)
il.append(
il.set_reg(4,
dest.reg1,
il.mod_unsigned(4, dividend_il, divisor_il)
)
)
il.append(
il.set_reg(4,
dest.reg2,
il.div_unsigned(4, dividend_il, divisor_il, flags='nzvc')
)
)
elif instr == 'cas':
skip_label_found = True
skip = il.get_label_for_address(Architecture['M68000'], il.current_address+length)
if skip is None:
skip = LowLevelILLabel()
skip_label_found = False
il.append(
il.sub(size_bytes,
third.get_source_il(il),
source.get_source_il(il),
flags='nzvc'
)
)
equal = LowLevelILLabel()
not_equal = LowLevelILLabel()
il.append(
il.if_expr(il.flag_condition(LowLevelILFlagCondition.LLFC_E), equal, not_equal)
)
il.mark_label(equal)
il.append(
third.get_dest_il(il,
dest.get_source_il(il)
)
)
il.append(
il.goto(skip)
)
il.mark_label(not_equal)
il.append(
source.get_dest_il(il,
third.get_source_il(il)
)
)
if not skip_label_found:
il.mark_label(skip)
elif instr == 'cas2':
skip_label_found = True
skip = il.get_label_for_address(Architecture['M68000'], il.current_address+length)
if skip is None:
skip = LowLevelILLabel()
skip_label_found = False
il.append(
il.sub(size_bytes,
third.get_source_il(il)[0],
source.get_source_il(il)[0],
flags='nzvc'
)
)
equal = LowLevelILLabel()
not_equal = LowLevelILLabel()
check2 = LowLevelILLabel()
il.append(
il.if_expr(il.flag_condition(LowLevelILFlagCondition.LLFC_E), check2, not_equal)
)
il.mark_label(check2)
il.append(
il.sub(size_bytes,
third.get_source_il(il)[1],
source.get_source_il(il)[1],
flags='nzvc'
)
)
il.append(
il.if_expr(il.flag_condition(LowLevelILFlagCondition.LLFC_E), equal, not_equal)
)
il.mark_label(equal)
for it in third.get_dest_il(il,
dest.get_source_il(il)
):
il.append(it)
il.append(
il.goto(skip)
)
il.mark_label(not_equal)
for it in source.get_dest_il(il,
third.get_source_il(il)
):
il.append(it)
il.append(
il.goto(skip)
)
if not skip_label_found:
il.mark_label(skip)
elif instr == 'chk':
skip_label_found = True
skip = il.get_label_for_address(Architecture['M68000'], il.current_address+length)
if skip is None:
skip = LowLevelILLabel()
skip_label_found = False
trap = LowLevelILLabel()
check = LowLevelILLabel()
il.append(
il.if_expr(
il.compare_unsigned_less_than(size_bytes,
dest.get_source_il(il),
il.const(size_bytes, 0)
),
trap,
check
)
)
il.mark_label(check)
il.append(
il.if_expr(
il.compare_unsigned_greater_than(size_bytes,
dest.get_source_il(il),
source.get_source_il(il)
),
trap,
skip
)
)
il.mark_label(trap)
il.append(
il.system_call()
)
il.append(
il.goto(skip)
)
if not skip_label_found:
il.mark_label(skip)
elif instr == 'chk2':
skip_label_found = True
skip = il.get_label_for_address(Architecture['M68000'], il.current_address+length)
if skip is None:
skip = LowLevelILLabel()
skip_label_found = False
trap = LowLevelILLabel()
check = LowLevelILLabel()
il.append(
il.set_reg(4,
LLIL_TEMP(0),
source.get_address_il(il)
)
)
il.append(
il.if_expr(
il.compare_unsigned_less_than(size_bytes,
dest.get_source_il(il),
il.load(size_bytes,
il.reg(4, LLIL_TEMP(0))
)
),
trap,
check
)
)
il.mark_label(check)
il.append(
il.if_expr(
il.compare_unsigned_greater_than(size_bytes,
dest.get_source_il(il),
il.load(size_bytes,
il.add(4,
il.reg(4, LLIL_TEMP(0)),
il.const(4, size_bytes)
)
)
),
trap,
skip
)
)
il.mark_label(trap)
il.append(
il.system_call()
)
il.append(
il.goto(skip)
)
if not skip_label_found:
il.mark_label(skip)
elif instr == 'bchg':
bit_number_il = il.mod_unsigned(1,
source.get_source_il(il),
il.const(1, 8 << dest.size)
)
il.append(
il.set_flag('z',
il.compare_not_equal(4,
il.test_bit(4,
dest.get_source_il(il),
il.shift_left(4,
il.const(4, 1),
bit_number_il
)
),
il.const(4, 0)
)
)
)
il.append(
dest.get_dest_il(il,
il.xor_expr(4,
dest.get_source_il(il),
il.shift_left(4,
il.const(4, 1),
bit_number_il
)
)
)
)
elif instr == 'bclr':
bit_number_il = il.mod_unsigned(1,
source.get_source_il(il),
il.const(1, 8 << dest.size)
)
il.append(
il.set_flag('z',
il.compare_not_equal(4,
il.test_bit(4,
dest.get_source_il(il),
il.shift_left(4,
il.const(4, 1),
bit_number_il
)
),
il.const(4, 0)
)
)
)
il.append(
dest.get_dest_il(il,
il.and_expr(4,
dest.get_source_il(il),
il.not_expr(4,
il.shift_left(4,
il.const(4, 1),
bit_number_il
)
)
)
)
)
elif instr == 'bset':
bit_number_il = il.mod_unsigned(1,
source.get_source_il(il),
il.const(1, 8 << dest.size)
)
il.append(
il.set_flag('z',
il.compare_not_equal(4,
il.test_bit(4,
dest.get_source_il(il),
il.shift_left(4,
il.const(4, 1),
bit_number_il
)
),
il.const(4, 0)
)
)
)
il.append(
dest.get_dest_il(il,
il.or_expr(4,
dest.get_source_il(il),
il.shift_left(4,
il.const(4, 1),
bit_number_il
)
)
)
)
elif instr == 'btst':
bit_number_il = il.mod_unsigned(1,
source.get_source_il(il),
il.const(1, 8 << dest.size)
)
il.append(
il.set_flag('z',
il.compare_not_equal(4,
il.test_bit(4,
dest.get_source_il(il),
il.shift_left(4,
il.const(4, 1),
bit_number_il
)
),
il.const(4, 0)
)
)
)
elif instr in ('asl', 'lsl'):
source_il = il.const(1, 1)
if source is not None:
source_il = source.get_source_il(il)
il.append(
dest.get_dest_il(il,
il.shift_left(size_bytes,
dest.get_source_il(il),
source_il,
flags='*'
)
)
)
elif instr == 'asr':
source_il = il.const(1, 1)
if source is not None:
source_il = source.get_source_il(il)
il.append(
dest.get_dest_il(il,
il.arith_shift_right(size_bytes,
dest.get_source_il(il),
source_il,
flags='*'
)
)
)
elif instr == 'lsr':
source_il = il.const(1, 1)
if source is not None:
source_il = source.get_source_il(il)
il.append(
dest.get_dest_il(il,
il.logical_shift_right(size_bytes,
dest.get_source_il(il),
source_il,
flags='*'
)
)
)
elif instr == 'rol':
source_il = il.const(1, 1)
if source is not None:
source_il = source.get_source_il(il)
il.append(
dest.get_dest_il(il,
il.rotate_left(size_bytes,
dest.get_source_il(il),
source_il,
flags='*'
)
)
)
elif instr == 'ror':
source_il = il.const(1, 1)
if source is not None:
source_il = source.get_source_il(il)
il.append(
dest.get_dest_il(il,
il.rotate_right(size_bytes,
dest.get_source_il(il),
source_il,
flags='*'
)
)
)
elif instr == 'roxl':
source_il = il.const(1, 1)
if source is not None:
source_il = source.get_source_il(il)
il.append(
dest.get_dest_il(il,
il.rotate_left_carry(size_bytes,
dest.get_source_il(il),
source_il,
flags='*'
)
)
)
elif instr == 'roxr':
source_il = il.const(1, 1)
if source is not None:
source_il = source.get_source_il(il)
il.append(
dest.get_dest_il(il,
il.rotate_right_carry(size_bytes,
dest.get_source_il(il),
source_il,
flags='*'
)
)
)
elif instr in ('cmp', 'cmpi', 'cmpm'):
il.append(
il.sub(size_bytes,
dest.get_source_il(il),
source.get_source_il(il),
flags='nzvc'
)
)
elif instr == 'cmpa':
dest.size = SIZE_LONG
il.append(
il.sub(4,
dest.get_source_il(il),
il.sign_extend(4,
source.get_source_il(il)
),
flags='nzvc'
)
)
elif instr == 'cmp2':
skip_label_found = True
skip = il.get_label_for_address(Architecture['M68000'], il.current_address+length)
if skip is None:
skip = LowLevelILLabel()
skip_label_found = False
check = LowLevelILLabel()
il.append(
il.set_reg(4,
LLIL_TEMP(0),
source.get_address_il(il)
)
)
il.append(
il.sub(size_bytes,
dest.get_source_il(il),
il.load(size_bytes,
il.reg(4, LLIL_TEMP(0))
),
flags='nzvc'
)
)
il.append(
il.if_expr(
il.flag_condition(LowLevelILFlagCondition.LLFC_ULT),
skip,
check
)
)
il.mark_label(check)
il.append(
il.sub(size_bytes,
dest.get_source_il(il),
il.load(size_bytes,
il.add(4,
il.reg(4, LLIL_TEMP(0)),
il.const(4, size_bytes)
)
),
flags='nzvc'
)
)
il.append(
il.goto(skip)
)
if not skip_label_found:
il.mark_label(skip)
elif instr == 'tas':
il.append(
il.set_reg(1, LLIL_TEMP(0), dest.get_source_il(il), flags='nzvc')
)
il.append(
dest.get_dest_il(il,
il.or_expr(1,
il.reg(1, LLIL_TEMP(0)),
il.const(1, 0x80)
)
)
)
elif instr == 'tst':
il.append(
il.sub(size_bytes,
dest.get_source_il(il),
il.const(4, 0),
flags='nzvc'
)
)
elif instr in ('and', 'andi'):
if instr == 'andi' and isinstance(dest, OpRegisterDirect) and dest.reg in ('ccr', 'sr'):
if not source.value & 0x01: il.append(il.set_flag('c', il.const(1, 0)))
if not source.value & 0x02: il.append(il.set_flag('v', il.const(1, 0)))
if not source.value & 0x04: il.append(il.set_flag('z', il.const(1, 0)))
if not source.value & 0x08: il.append(il.set_flag('n', il.const(1, 0)))
if not source.value & 0x11: il.append(il.set_flag('x', il.const(1, 0)))
else:
il.append(
dest.get_dest_il(il,
il.and_expr(size_bytes,
dest.get_source_il(il),
source.get_source_il(il),
flags='nzvc'
)
)
)
elif instr in ('or', 'ori'):
if instr == 'ori' and isinstance(dest, OpRegisterDirect) and dest.reg in ('ccr', 'sr'):
if source.value & 0x01: il.append(il.set_flag('c', il.const(1, 1)))
if source.value & 0x02: il.append(il.set_flag('v', il.const(1, 1)))
if source.value & 0x04: il.append(il.set_flag('z', il.const(1, 1)))
if source.value & 0x08: il.append(il.set_flag('n', il.const(1, 1)))
if source.value & 0x11: il.append(il.set_flag('x', il.const(1, 1)))
else:
il.append(
dest.get_dest_il(il,
il.or_expr(size_bytes,
dest.get_source_il(il),
source.get_source_il(il),
flags='nzvc'
)
)
)
elif instr in ('eor', 'eori'):
if instr == 'eori' and isinstance(dest, OpRegisterDirect) and dest.reg in ('ccr', 'sr'):
if source.value & 0x01: il.append(il.set_flag('c', il.xor_expr(1, il.flag('c'), il.const(1, 1))))
if source.value & 0x02: il.append(il.set_flag('v', il.xor_expr(1, il.flag('v'), il.const(1, 1))))
if source.value & 0x04: il.append(il.set_flag('z', il.xor_expr(1, il.flag('z'), il.const(1, 1))))
if source.value & 0x08: il.append(il.set_flag('n', il.xor_expr(1, il.flag('n'), il.const(1, 1))))
if source.value & 0x11: il.append(il.set_flag('x', il.xor_expr(1, il.flag('x'), il.const(1, 1))))
else:
il.append(
dest.get_dest_il(il,
il.xor_expr(size_bytes,
dest.get_source_il(il),
source.get_source_il(il),
flags='nzvc'
)
)
)
elif instr == 'not':
il.append(
dest.get_dest_il(il,
il.not_expr(size_bytes,
dest.get_source_il(il),
flags='nzvc'
)
)
)
elif instr == 'swap':
il.append(
dest.get_dest_il(il,
il.rotate_right(4,
dest.get_source_il(il),
il.const(1, 16)
)
)
)
elif instr == 'exg':
il.append(
il.set_reg(4, LLIL_TEMP(0), source.get_source_il(il))
)
il.append(
source.get_dest_il(il, dest.get_source_il(il))
)
il.append(
dest.get_dest_il(il, il.reg(4, LLIL_TEMP(0)))
)
elif instr == 'ext':
reg = dest.reg
if dest.size == 1:
il.append(
il.set_reg(2,
reg,
il.sign_extend(4,
il.reg(1, reg),
flags='nzvc'
)
)
)
else:
il.append(
il.set_reg(4,
reg,
il.sign_extend(4,
il.reg(2, reg),
flags='nzvc'
)
)
)
elif instr == 'extb':
reg = dest.reg
il.append(
il.set_reg(4,
reg,
il.sign_extend(4,
il.reg(1, reg),
flags='nzvc'
)
)
)
elif instr == 'movem':
if isinstance(source, OpRegisterMovemList):
if isinstance(dest, OpRegisterIndirectPredecrement):
il.append(
il.set_reg(4, LLIL_TEMP(0), dest.get_address_il(il))
)
if self.movem_store_decremented:
il.append(
il.set_reg(4,
dest.reg,
il.sub(4,
il.reg(4, LLIL_TEMP(0)),
il.const(4, len(source.regs)*size_bytes)
)
)
)
for k in range(len(source.regs)):
il.append(
il.store(size_bytes,
il.sub(4,
il.reg(4, LLIL_TEMP(0)),
il.const(4, (k+1)*size_bytes)
),
il.reg(size_bytes, source.regs[len(source.regs)-1-k])
)
)
if not self.movem_store_decremented:
il.append(
il.set_reg(4,
dest.reg,
il.sub(4,
il.reg(4, LLIL_TEMP(0)),
il.const(4, len(source.regs)*size_bytes)
)
)
)
else:
il.append(
il.set_reg(4, LLIL_TEMP(0), dest.get_address_il(il))
)
for k in range(len(source.regs)):
il.append(
il.store(size_bytes,
il.add(4,
il.reg(4, LLIL_TEMP(0)),
il.const(4, k*size_bytes)
),
il.reg(size_bytes, source.regs[k])
)
)
else:
il.append(
il.set_reg(4, LLIL_TEMP(0), source.get_address_il(il))
)
for k in range(len(dest.regs)):
il.append(
il.set_reg(size_bytes,
dest.regs[k],
il.load(size_bytes,
il.add(4,
il.reg(4, LLIL_TEMP(0)),
il.const(4, k*size_bytes)
)
)
)
)
if isinstance(source, OpRegisterIndirectPostincrement):
il.append(
il.set_reg(4,
source.reg,
il.add(4,
il.reg(4, LLIL_TEMP(0)),
il.const(4, len(dest.regs)*size_bytes)
)
)
)
elif instr == 'lea':
il.append(
dest.get_dest_il(il, source.get_address_il(il))
)
elif instr == 'pea':
il.append(
il.push(4, dest.get_address_il(il))
)
elif instr == 'link':
source.size = SIZE_LONG
il.append(
il.push(4, source.get_source_il(il))
)
il.append(
source.get_dest_il(il, il.reg(4, "sp"))
)
il.append(
il.set_reg(4,
"sp",
il.add(4,
il.reg(4, "sp"),
il.sign_extend(4, dest.get_source_il(il))
)
)
)
elif instr == 'unlk':
il.append(
il.set_reg(4, "sp", source.get_source_il(il))
)
il.append(
source.get_dest_il(il, il.pop(4))
)
elif instr in ('jmp', 'bra'):
# TODO labels
il.append(
il.jump(dest.get_address_il(il))
)
elif instr in ('jsr', 'bsr'):
# TODO labels
il.append(
il.call(dest.get_address_il(il))
)
elif instr == 'callm':
# TODO
il.append(il.unimplemented())
elif instr in ('bhi', 'bls', 'bcc', 'bcs', 'bne', 'beq', 'bvc', 'bvs',
'bpl', 'bmi', 'bge', 'blt', 'bgt', 'ble'):
flag_cond = ConditionMapping.get(instr[1:], None)
dest_il = dest.get_address_il(il)
cond_il = None
if flag_cond is not None:
cond_il = il.flag_condition(flag_cond)
if cond_il is None:
il.append(il.unimplemented())
else:
t = il.get_label_for_address(Architecture['M68000'], il[dest_il].value)
indirect = False
if t is None:
t = LowLevelILLabel()
indirect = True
f_label_found = True
f = il.get_label_for_address(Architecture['M68000'], il.current_address+length)
if f is None:
f = LowLevelILLabel()
f_label_found = False
il.append(
il.if_expr(cond_il, t, f)
)
if indirect:
il.mark_label(t)
il.append(il.jump(dest_il))
if not f_label_found:
il.mark_label(f)
elif instr in ('dbt', 'dbf', 'dbhi', 'dbls', 'dbcc', 'dbcs', 'dbne',
'dbeq', 'dbvc', 'dbvs', 'dbpl', 'dbmi', 'dbge', 'dblt',
'dbgt', 'dble'):
flag_cond = ConditionMapping.get(instr[2:], None)
dest_il = dest.get_address_il(il)
cond_il = None
if flag_cond is not None:
cond_il = il.flag_condition(flag_cond)
elif instr == 'dbt':
cond_il = il.const(1, 1)
elif instr == 'dbf':
cond_il = il.const(1, 0)
if cond_il is None:
il.append(il.unimplemented())
else:
branch = il.get_label_for_address(Architecture['M68000'], il[dest_il].value)
indirect = False
if branch is None:
branch = LowLevelILLabel()
indirect = True
skip_label_found = True
skip = il.get_label_for_address(Architecture['M68000'], il.current_address+length)
if skip is None:
skip = LowLevelILLabel()
skip_label_found = False
decrement = LowLevelILLabel()
il.append(
il.if_expr(cond_il, skip, decrement)
)
il.mark_label(decrement)
il.append(
il.set_reg(2,
LLIL_TEMP(0),
il.sub(2,
source.get_source_il(il),
il.const(2, 1)
)
)
)
il.append(
source.get_dest_il(il, il.reg(2, LLIL_TEMP(0)))
)
il.append(
il.if_expr(
il.compare_equal(2,
il.reg(2, LLIL_TEMP(0)),
il.const(2, -1)
),
skip,
branch
)
)
if indirect:
il.mark_label(branch)
il.append(il.jump(dest_il))
if not skip_label_found:
il.mark_label(skip)
elif instr in ('st', 'sf', 'shi', 'sls', 'scc', 'scs', 'sne', 'seq',
'svc', 'svs', 'spl', 'smi', 'sge', 'slt', 'sgt', 'sle'):
flag_cond = ConditionMapping.get(instr[1:], None)
cond_il = None
if flag_cond is not None:
cond_il = il.flag_condition(flag_cond)
elif instr == 'st':
cond_il = il.const(1, 1)
elif instr == 'sf':
cond_il = il.const(1, 0)
if cond_il is None:
il.append(il.unimplemented())
else:
skip_label_found = True
skip = il.get_label_for_address(Architecture['M68000'], il.current_address+length)
if skip is None:
skip = LowLevelILLabel()
skip_label_found = False
set_dest = LowLevelILLabel()
clear_dest = LowLevelILLabel()
il.append(
il.if_expr(cond_il, set_dest, clear_dest)
)
il.mark_label(set_dest)
il.append(
dest.get_dest_il(il, il.const(1, 1))
)
il.append(
il.goto(skip)
)
il.mark_label(clear_dest)
il.append(
dest.get_dest_il(il, il.const(1, 0))
)
il.append(
il.goto(skip)
)
if not skip_label_found:
il.mark_label(skip)
elif instr == 'rtd':
il.append(
il.set_reg(4,
LLIL_TEMP(0),
il.pop(4)
)
)
il.append(
il.set_reg(4,
"sp",
il.add(4,
il.reg(4, "sp"),
source.get_source_il(il)
)
)
)
il.append(
il.ret(
il.reg(4, LLIL_TEMP(0))
)
)
elif instr == 'rte':
il.append(
il.set_reg(2,
"sr",
il.pop(2)
)
)
il.append(
il.ret(
il.pop(4)
)
)
elif instr == 'rtm':
# TODO
il.append(il.unimplemented())
elif instr == 'rtr':
il.append(
il.set_reg(2,
"ccr",
il.pop(2)
)
)
il.append(
il.ret(
il.pop(4)
)
)
elif instr == 'rts':
il.append(
il.ret(
il.pop(4)
)
)
elif instr in ('trapv', 'trapt', 'trapf', 'traphi', 'trapls', 'trapcc',
'trapcs', 'trapne', 'trapeq', 'trapvc', 'trapvs', 'trappl',
'trapmi', 'trapge', 'traplt', 'trapgt', 'traple'):
flag_cond = ConditionMapping.get(instr[4:], None)
cond_il = None
if flag_cond is not None:
cond_il = il.flag_condition(flag_cond)
elif instr == 'trapt':
cond_il = il.const(1, 1)
elif instr == 'trapf':
cond_il = il.const(1, 0)
elif instr == 'trapv':
cond_il = il.flag_condition(LowLevelILFlagCondition.LLFC_O)
if cond_il is None:
il.append(il.unimplemented())
else:
skip_label_found = True
skip = il.get_label_for_address(Architecture['M68000'], il.current_address+length)
if skip is None:
skip = LowLevelILLabel()
skip_label_found = False
trap = LowLevelILLabel()
il.append(
il.if_expr(cond_il, trap, skip)
)
il.mark_label(trap)
il.append(
il.system_call()
)
il.append(
il.goto(skip)
)
if not skip_label_found:
il.mark_label(skip)
elif instr in ('trap', 'illegal', 'bkpt'):
il.append(il.system_call())
elif instr in ('bgnd', 'nop', 'reset', 'stop'):
il.append(il.nop())
else:
il.append(il.unimplemented())
def perform_get_instruction_info(self, data, addr):
instr, length, size, source, dest, third = self.decode_instruction(data, addr)
if instr is None:
return None
result = InstructionInfo()
result.length = length
if instr in ('rtd', 'rte', 'rtr', 'rts'):
result.add_branch(BranchType.FunctionReturn)
elif instr in ('jmp', 'jsr',
'bra', 'bsr', 'bhi', 'bls', 'bcc', 'bcs', 'bne', 'beq',
'bvc', 'bvs', 'bpl', 'bmi', 'bge', 'blt', 'bgt', 'ble',
'dbt', 'dbf', 'dbhi', 'dbls', 'dbcc', 'dbcs', 'dbne',
'dbeq', 'dbvc', 'dbvs', 'dbpl', 'dbmi', 'dbge', 'dblt',
'dbgt', 'dble'):
conditional = False
call = False
branch_dest = None
bt = BranchType.UnresolvedBranch
if instr in ('jmp', 'bra'):
bt = BranchType.UnconditionalBranch
elif instr in ('jsr', 'bsr'):
call = True
bt = BranchType.CallDestination
else:
conditional = True
if isinstance(dest, OpAbsolute):
branch_dest = dest.address
elif isinstance(dest, OpRegisterIndirect):
if dest.reg == 'pc':
branch_dest = addr+2
else:
bt = BranchType.IndirectBranch
elif isinstance(dest, OpRegisterIndirectDisplacement):
if dest.reg == 'pc':
branch_dest = addr+2+dest.offset
else:
bt = BranchType.IndirectBranch
if conditional:
if instr[0:2] == 'db':
result.add_branch(BranchType.TrueBranch, addr+length)
result.add_branch(BranchType.FalseBranch, branch_dest)
else:
result.add_branch(BranchType.TrueBranch, branch_dest)
result.add_branch(BranchType.FalseBranch, addr+length)
else:
if call and bt == BranchType.IndirectBranch:
# don't branch at all for indirect calls
pass
elif bt == BranchType.IndirectBranch or bt == BranchType.UnresolvedBranch or branch_dest is None:
result.add_branch(bt)
else:
result.add_branch(bt, branch_dest)
return result
def perform_get_instruction_text(self, data, addr):
instr, length, size, source, dest, third = self.decode_instruction(data, addr)
if instr is None:
return None
if size is not None:
instr += SizeSuffix[size]
tokens = [InstructionTextToken(InstructionTextTokenType.InstructionToken, "%-10s" % instr)]
if source is not None:
tokens += source.format(addr)
if dest is not None:
if source is not None:
tokens += [InstructionTextToken(InstructionTextTokenType.OperandSeparatorToken, ',')]
tokens += dest.format(addr)
if third is not None:
if source is not None or dest is not None:
tokens += [InstructionTextToken(InstructionTextTokenType.OperandSeparatorToken, ',')]
tokens += third.format(addr)
return tokens, length
def perform_get_instruction_low_level_il(self, data, addr, il):
instr, length, size, source, dest, third = self.decode_instruction(data, addr)
if instr is None:
return None
if instr == 'movem':
# movem overrides default predecrement/postincrement IL generation
self.generate_instruction_il(il, instr, length, size, source, dest, third)
else:
# predecrement
if source is not None:
pre_il = source.get_pre_il(il)
if pre_il is not None:
il.append(pre_il)
if dest is not None:
pre_il = dest.get_pre_il(il)
if pre_il is not None:
il.append(pre_il)
if third is not None:
pre_il = third.get_pre_il(il)
if pre_il is not None:
il.append(pre_il)
self.generate_instruction_il(il, instr, length, size, source, dest, third)
# postincrement
if source is not None:
post_il = source.get_post_il(il)
if post_il is not None:
il.append(post_il)
if dest is not None:
post_il = dest.get_post_il(il)
if post_il is not None:
il.append(post_il)
if third is not None:
post_il = third.get_post_il(il)
if post_il is not None:
il.append(post_il)
return length
def perform_is_never_branch_patch_available(self, data, addr):
data = bytearray(data)
if data[0] & 0xf0 == 0x60:
# BRA, BSR, Bcc
return True
if data[0] == 0x4e and data[1] & 0x80 == 0x80:
# JMP, JSR
return True
return False
def perform_is_invert_branch_patch_available(self, data, addr):
data = bytearray(data)
if data[0] & 0xf0 == 0x60 and data[0] & 0xfe != 0x60:
# Bcc
return True
return False
def perform_is_always_branch_patch_available(self, data, addr):
data = bytearray(data)
if data[0] & 0xf0 == 0x60 and data[0] & 0xfe != 0x60:
# Bcc
return True
return False
def perform_is_skip_and_return_zero_patch_available(self, data, addr):
return self.perform_skip_and_return_value(data, addr)
def perform_is_skip_and_return_value_patch_available(self, data, addr):
data = bytearray(data)
if data[0] == 0x61:
# BSR
return True
if data[0] == 0x4e and data[1] & 0xc0 == 0x80:
# JSR
return True
return False
def perform_convert_to_nop(self, data, addr):
count = int(len(data)/2)
if count*2 != len(data):
return None
return b'\x4e\x71' * count
def perform_never_branch(self, data, addr):
data = bytearray(data)
if data[0] & 0xf0 == 0x60:
# BRA, BSR, Bcc
return self.perform_convert_to_nop(data, addr)
if data[0] == 0x4e and data[1] & 0x80 == 0x80:
# JMP, JSR
return self.perform_convert_to_nop(data, addr)
return None
def perform_invert_branch(self, data, addr):
data = bytearray(data)
if data[0] & 0xf0 == 0x60 and data[0] & 0xfe != 0x60:
# Bcc
return bytearray([data[0]^1])+data[1:]
return None
def perform_always_branch(self, data, addr):
data = bytearray(data)
if data[0] & 0xf0 == 0x60 and data[0] & 0xfe != 0x60:
# Bcc
return b'\x60'+data[1:]
return None
def perform_skip_and_return_value(self, data, addr, value=0):
count = int(len(data)/2)
if count*2 != len(data):
return None
data = bytearray(data)
ok = False
if data[0] == 0x61:
# BSR
ok = True
if data[0] == 0x4e and data[1] & 0xc0 == 0x80:
# JSR
ok = True
if not ok:
return None
if value > 0x80000000:
value = value - 0x100000000
if value >= -128 and value <= 127 and len(data) >= 2:
value = value & 0xff
return b'\x70'+chr(value)+b'\x4e\x71'*(count-1)
if len(data) >= 6:
return b'\x20\x3C'+struct.pack('>l', value)+b'\x4e\x71'*(count-3)
return None
class M68008(M68000):
name = "M68008"
class M68010(M68000):
name = "M68010"
control_registers = {
0x000: 'sfc',
0x001: 'dfc',
0x800: 'usp',
0x801: 'vbr',
}
# add BKPT, MOVE from CCR, MOVEC, MOVES, RTD
class M68020(M68010):
name = "M68020"
control_registers = {
0x000: 'sfc',
0x001: 'dfc',
0x800: 'usp',
0x801: 'vbr',
0x002: 'cacr',
0x802: 'caar',
0x803: 'msp',
0x804: 'isp',
}
memory_indirect = True
movem_store_decremented = True
# add BFCHG, BFCLR, BFEXTS, BFEXTU, BFFO, BFINS, BFSET, BFTST, CALLM, CAS, CAS2, CHK2, CMP2, cpBcc, cpDBcc, cpGEN, cpRESTORE, cpSAVE, cpScc, cpTRAPcc
# DIVSL, DIVUL, EXTB, PACK, RTM, TRAPcc, UNPK
# add memory indirect addressing
class M68030(M68020):
name = "M68030"
# remove CALLM, RTM
# add PFLUSH, PFLUSHA, PLOAD, PMOVE, PTEST
class M68040(M68030):
name = "M68040"
control_registers = {
0x000: 'sfc',
0x001: 'dfc',
0x800: 'usp',
0x801: 'vbr',
0x002: 'cacr',
0x803: 'msp',
0x804: 'isp',
0x003: 'tc',
0x004: 'itt0',
0x005: 'itt1',
0x006: 'dtt0',
0x007: 'dtt1',
0x805: 'mmusr',
0x806: 'urp',
0x807: 'srp',
}
# remove cpBcc, cpDBcc, cpGEN, cpRESTORE, cpSAVE, cpScc, cpTRAPcc, PFLUSHA, PLOAD, PMOVE
# add CINV, CPUSH, floating point, MOVE16
class M68LC040(M68040):
name = "M68LC040"
class M68EC040(M68040):
name = "M68EC040"
control_registers = {
0x000: 'sfc',
0x001: 'dfc',
0x800: 'usp',
0x801: 'vbr',
0x002: 'cacr',
0x803: 'msp',
0x804: 'isp',
0x004: 'iacr0',
0x005: 'iacr1',
0x006: 'dacr0',
0x007: 'dacr1'
}
class M68330(M68010):
name = "M68330"
movem_store_decremented = True
# AKA CPU32
# add BGND, CHK2, CMP2, DIVSL, DIVUL, EXTB, LPSTOP, TBLS, TBLSN, TBLU, TBLUN, TRAPcc
class M68340(M68330):
name = "M68340"
def create_vector_table(view, addr, size=256):
vectors = {
0: 'reset_initial_interrupt_stack_pointer',
1: 'reset_initial_program_counter',
2: 'access_fault',
3: 'address_error',
4: 'illegal_instruction',
5: 'integer_divide_by_zero',
6: 'chk_chk2_instruction',
7: 'ftrapcc_trapcc_trapv_instruction',
8: 'privilege_violation',
9: 'trace',
10: 'line_1010_emulator',
11: 'line_1111_emulator',
# 12 unassigned_reserved
13: 'coprocessor_protocol_violation',
14: 'format_error',
15: 'uninitialized_interrupt',
# 16-23 unassigned_reserved
24: 'spurious_interrupt',
25: 'level_1_interrupt_autovector',
26: 'level_2_interrupt_autovector',
27: 'level_3_interrupt_autovector',
28: 'level_4_interrupt_autovector',
29: 'level_5_interrupt_autovector',
30: 'level_6_interrupt_autovector',
31: 'level_7_interrupt_autovector',
32: 'trap_0_instruction',
33: 'trap_1_instruction',
34: 'trap_2_instruction',
35: 'trap_3_instruction',
36: 'trap_4_instruction',
37: 'trap_5_instruction',
38: 'trap_6_instruction',
39: 'trap_7_instruction',
40: 'trap_8_instruction',
41: 'trap_9_instruction',
42: 'trap_10_instruction',
43: 'trap_11_instruction',
44: 'trap_12_instruction',
45: 'trap_13_instruction',
46: 'trap_14_instruction',
47: 'trap_15_instruction',
48: 'fp_branch_or_set_on_unordered_condition',
49: 'fp_inexact_result',
50: 'fp_divide_by_zero',
51: 'fp_underflow',
52: 'fp_operand_error',
53: 'fp_overflow',
54: 'fp_signaling_nan',
55: 'fp_unimplemented_data_type',
56: 'mmu_configuration_error',
57: 'mmu_illegal_operation_error',
58: 'mmu_access_level_violation_error',
# 59-63 unassigned_reserved
}
for k in range(0, 192):
vectors[k+64] = 'user_%d' % k
t = view.parse_type_string("void *")[0]
for k in range(size):
name = vectors.get(k, 'unassigned_reserved')
view.define_user_symbol(Symbol(SymbolType.DataSymbol, addr+4*k, "_vector_%d_%s" % (k, name)))
view.define_user_data_var(addr+4*k, t)
value = struct.unpack(">L", view.read(addr+4*k, 4))[0]
if k > 0:
view.define_user_symbol(Symbol(SymbolType.FunctionSymbol, value, "vector_%d_%s" % (k, name)))
view.add_entry_point(value)
def prompt_create_vector_table(view, addr=None):
architectures = ['M68000', 'M68008', 'M68010', 'M68020', 'M68030', 'M68040', 'M68LC040', 'M68EC040', 'M68330', 'M68340']
size_choices = ['Full (256)', 'MMU (59)', 'FP (56)', 'Traps (48)', 'Interrupts (32)']
size_raw = [256, 59, 56, 48, 32]
if addr is None:
addr = 0
need_arch = True
if view.platform is not None and view.platform.arch.name in architectures:
# 68k arch already selected
need_arch = False
address_field = AddressField('Address', view, addr)
arch_field = ChoiceField('Architecture', architectures)
size_field = ChoiceField('Table size', size_choices)
res = False
if need_arch:
res = get_form_input([address_field, arch_field, size_field], 'Create M68k vector table')
else:
res = get_form_input([address_field, size_field], 'Create M68k vector table')
if res:
address = address_field.result
size = size_raw[size_field.result]
if need_arch:
arch = architectures[arch_field.result]
view.platform = Architecture[arch].standalone_platform
create_vector_table(view, address, size)
#PluginCommand.register("Create M68k vector table", "Create M68k vector table", prompt_create_vector_table)
PluginCommand.register_for_address("Create M68k vector table", "Create M68k vector table", prompt_create_vector_table)
M68000.register()
M68008.register()
M68010.register()
M68020.register()
M68030.register()
M68040.register()
M68LC040.register()
M68EC040.register()
M68330.register()
M68340.register()
|
py | 1a4105ad71311c7d4ce3c72af3483de90d72f0ab | import torch
from torch.utils.data import DataLoader, TensorDataset
from argparse import Namespace
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import numpy as np
import h5py
import json
import os
def load_data_1scale(hdf5_file, ndata, batch_size, singlescale=True):
with h5py.File(hdf5_file, 'r') as f:
x_data = f['train'][:ndata]
data_tuple = (torch.FloatTensor(x_data), ) if singlescale else (
torch.FloatTensor(x_data), torch.FloatTensor(y_data))
data_loader = DataLoader(TensorDataset(*data_tuple),
batch_size=batch_size, shuffle=True, drop_last=True)
return data_loader
def load_data_2scales(hdf5_file,hdf5_file1, ndata, batch_size, singlescale=False):
with h5py.File(hdf5_file, 'r') as f:
x2_data = f['train'][:ndata]
with h5py.File(hdf5_file1, 'r') as f:
x1_data = f['train'][:ndata]
data_tuple = (torch.FloatTensor(x_data), ) if singlescale else (
torch.FloatTensor(x2_data), torch.FloatTensor(x1_data))
data_loader = DataLoader(TensorDataset(*data_tuple),
batch_size=batch_size, shuffle=True, drop_last=True)
print(f'Loaded dataset: {hdf5_file}')
return data_loader
def load_data_3scales(hdf5_file,hdf5_file1,hdf5_file2, ndata, batch_size, singlescale=False):
with h5py.File(hdf5_file, 'r') as f:
x3_data = f['train'][:ndata]
with h5py.File(hdf5_file1, 'r') as f:
x2_data = f['train'][:ndata]
with h5py.File(hdf5_file2, 'r') as f:
x1_data = f['train'][:ndata]
data_tuple = (torch.FloatTensor(x_data), ) if singlescale else (
torch.FloatTensor(x3_data), torch.FloatTensor(x2_data),torch.FloatTensor(x1_data))
data_loader = DataLoader(TensorDataset(*data_tuple),
batch_size=batch_size, shuffle=True, drop_last=True)
return data_loader
|
py | 1a41068f762470cb97b6edc0585178fa3c9b69fa | from django.urls import path, include
from rest_framework.routers import DefaultRouter
from bills import views
from django.conf.urls import url
router = DefaultRouter()
router.register('headbill', views.HeadBillViewSet)
router.register('relationshipTaxProduct', views.RelationshipTaxProductViewSet)
router.register('billdetail', views.BillDetailViewSet)
app_name = 'bills'
urlpatterns = [
# url(r'^customer/$', views.customer_list),
# url(r'^customer/(?P<pk>[0-9]+)$', views.customer_detail)
# path('customers/',views.customer_list),
# path('customers/<int:pk>',views.customer_detail),
path('customers/',views.CustomerListView.as_view()),
path('customers/<int:pk>',views.CustomerDetailView.as_view()),
path('subscriptions/',views.SubscriptionListView.as_view()),
path('subscriptions/<int:pk>',views.SubscriptionDetailView.as_view()),
path("products/", views.ProductList.as_view()),
path("products/<int:pk>", views.ProductDetail.as_view()),
path("taxestype/", views.TaxTypeList.as_view()),
path("taxestype/<int:pk>", views.TaxTypeDetail.as_view()),
path("bill/<int:pk>", views.BillDetail_list),
path('api2/', include(router.urls)),
]
|
py | 1a41074aa5e4a66faac56dea1d3953f42d6eb73f | import os
import re
import subprocess
import sys
from setuptools import Extension, setup, find_packages
from setuptools.command.build_ext import build_ext
# Convert distutils Windows platform specifiers to CMake -A arguments
PLAT_TO_CMAKE = {
'win32': 'Win32',
'win-amd64': 'x64',
'win-arm32': 'ARM',
'win-arm64': 'ARM64',
}
base_path = os.path.abspath(os.path.dirname(__file__))
# A CMakeExtension needs a sourcedir instead of a file list.
# The name must be the _single_ output extension from the CMake build.
# If you need multiple extensions, see scikit-build.
class CMakeExtension(Extension):
def __init__(self, name, sourcedir=''):
Extension.__init__(self, name, sources=[])
self.sourcedir = os.path.abspath(sourcedir)
class CMakeBuild(build_ext):
def build_extension(self, ext):
extdir = os.path.abspath(os.path.dirname(self.get_ext_fullpath(ext.name)))
# required for auto-detection & inclusion of auxiliary 'native' libs
if not extdir.endswith(os.path.sep):
extdir += os.path.sep
debug = int(os.environ.get('DEBUG', 0)) if self.debug is None else self.debug
cfg = 'Debug' if debug else 'Release'
# CMake lets you override the generator - we need to check this.
# Can be set with Conda-Build, for example.
cmake_generator = os.environ.get('CMAKE_GENERATOR', '')
# Set Python_EXECUTABLE instead if you use PYBIND11_FINDPYTHON
# EXAMPLE_VERSION_INFO shows you how to pass a value into the C++ code
# from Python.
cmake_args = [
f'-DCMAKE_LIBRARY_OUTPUT_DIRECTORY={extdir}',
f'-DPYTHON_EXECUTABLE={sys.executable}',
f'-DCMAKE_BUILD_TYPE={cfg}', # not used on MSVC, but no harm
]
manylinux_inside = os.environ.get('MANYLINUX_INSIDE')
if manylinux_inside:
# using gcc 7.5 instead of default (Debian 9) 6.3
cmake_args.extend(
[
'-DCMAKE_C_COMPILER=/usr/local/bin/gcc',
'-DCMAKE_CXX_COMPILER=/usr/local/bin/g++',
]
)
build_args = []
# Adding CMake arguments set as environment variable
# (needed e.g. to build for ARM OSx on conda-forge)
if 'CMAKE_ARGS' in os.environ:
cmake_args += [item for item in os.environ['CMAKE_ARGS'].split(' ') if item]
# In this example, we pass in the version to C++. You might not need to.
# cmake_args += [f'-DEXAMPLE_VERSION_INFO={self.distribution.get_version()}']
if self.compiler.compiler_type != 'msvc':
# Using Ninja-build since it a) is available as a wheel and b)
# multithreads automatically. MSVC would require all variables be
# exported for Ninja to pick it up, which is a little tricky to do.
# Users can override the generator with CMAKE_GENERATOR in CMake
# 3.15+.
if not cmake_generator:
try:
import ninja # noqa: F401
cmake_args += ['-GNinja']
except ImportError:
pass
else:
# Single config generators are handled 'normally'
single_config = any(x in cmake_generator for x in {'NMake', 'Ninja'})
# CMake allows an arch-in-generator style for backward compatibility
contains_arch = any(x in cmake_generator for x in {'ARM', 'Win64'})
# Specify the arch if using MSVC generator, but only if it doesn't
# contain a backward-compatibility arch spec already in the
# generator name.
if not single_config and not contains_arch:
cmake_args += ['-A', PLAT_TO_CMAKE[self.plat_name]]
# Multi-config generators have a different way to specify configs
if not single_config:
cmake_args += [f'-DCMAKE_LIBRARY_OUTPUT_DIRECTORY_{cfg.upper()}={extdir}']
build_args += ['--config', cfg]
if sys.platform.startswith('darwin'):
# Cross-compile support for macOS - respect ARCHFLAGS if set
archs = re.findall(r'-arch (\S+)', os.environ.get('ARCHFLAGS', ''))
if archs:
cmake_args += ['-DCMAKE_OSX_ARCHITECTURES={}'.format(';'.join(archs))]
# Set CMAKE_BUILD_PARALLEL_LEVEL to control the parallel build level
# across all generators.
if 'CMAKE_BUILD_PARALLEL_LEVEL' not in os.environ:
# self.parallel is a Python 3 only way to set parallel jobs by hand
# using -j in the build_ext call, not supported by pip or PyPA-build.
if hasattr(self, 'parallel') and self.parallel:
# CMake 3.12+ only.
build_args += [f'-j{self.parallel}']
if not os.path.exists(self.build_temp):
os.makedirs(self.build_temp)
subprocess.check_call(['cmake', ext.sourcedir] + cmake_args, cwd=self.build_temp)
subprocess.check_call(['cmake', '--build', '.'] + build_args, cwd=self.build_temp)
with open(os.path.join(base_path, 'CMakeLists.txt'), 'r', encoding='utf-8') as f:
regex = re.compile(r'VERSION "([A-Za-z0-9.]+)"$', re.MULTILINE)
version = re.findall(regex, f.read())[0]
if version.count('.') == 3:
major, minor, path_, tweak = version.split('.')
version = f'{major}.{minor}.{path_}.dev{tweak}'
with open(os.path.join(base_path, 'README.md'), 'r', encoding='utf-8') as f:
readme = f.read()
setup(
name='wrtc', # webrtc for some reasons isn't allowed but looks like free...
version=version,
author='Il`ya Semyonov',
author_email='[email protected]',
license='BSD 3-Clause',
url='https://github.com/MarshalX/python-webrtc',
description='a Python extension that provides bindings to WebRTC M92',
long_description=readme,
long_description_content_type='text/markdown',
classifiers=[
'Development Status :: 1 - Planning',
'Natural Language :: English',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Operating System :: MacOS',
'Operating System :: Unix',
'Topic :: Internet',
'Topic :: Multimedia',
'Topic :: Multimedia :: Video',
'Topic :: Multimedia :: Video :: Capture',
'Topic :: Multimedia :: Sound/Audio',
'Topic :: Multimedia :: Sound/Audio :: Capture/Recording',
'Topic :: Communications',
'Topic :: Communications :: Internet Phone',
'Topic :: Communications :: Telephony',
"Topic :: Software Development :: Libraries",
"Topic :: Software Development :: Libraries :: Python Modules",
'Programming Language :: C++',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
"Programming Language :: Python :: Implementation",
"Programming Language :: Python :: Implementation :: CPython",
],
python_requires='~=3.7',
package_dir={'': 'python-webrtc/python'},
packages=find_packages(where='python-webrtc/python'),
ext_modules=[CMakeExtension('wrtc')],
# TODO add stub
cmdclass={'build_ext': CMakeBuild},
zip_safe=False,
project_urls={
'Author': 'https://github.com/MarshalX',
'Tracker': 'https://github.com/MarshalX/python-webrtc/issues',
'Source': 'https://github.com/MarshalX/python-webrtc',
},
)
|
py | 1a41083655fc292a423e854e37abc7138844b0eb | #
# Cormorant training script for the residue deletion dataset
#
import logging
import torch
from cormorant.data.collate import collate_activity
from cormorant.data.utils import initialize_datasets
from cormorant.engine import Engine
from cormorant.engine import init_argparse, init_file_paths, init_logger, init_cuda
from cormorant.engine import init_optimizer, init_scheduler
from cormorant.models import CormorantLEP
from cormorant.models.autotest import cormorant_tests
from torch.utils.data import DataLoader
# This makes printing tensors more readable.
torch.set_printoptions(linewidth=1000, threshold=100000)
logger = logging.getLogger('')
def main():
# Initialize arguments -- Just
args = init_argparse('lep')
# Initialize file paths
args = init_file_paths(args)
# Initialize logger
init_logger(args)
# Initialize dataloader
args, datasets, num_species, charge_scale = initialize_datasets(args, args.datadir, 'lep',
force_download=args.force_download,
ignore_check=args.ignore_check
)
# Construct PyTorch dataloaders from datasets
dataloaders = {split: DataLoader(dataset,
batch_size=args.batch_size,
shuffle=args.shuffle if (split == 'train') else False,
num_workers=args.num_workers,
collate_fn=collate_activity)
for split, dataset in datasets.items()}
# Initialize device and data type
device, dtype = init_cuda(args)
# Initialize model
model = CormorantLEP(args.maxl, args.max_sh, args.num_cg_levels, args.num_channels, num_species,
args.cutoff_type, args.hard_cut_rad, args.soft_cut_rad, args.soft_cut_width,
args.weight_init, args.level_gain, args.charge_power, args.basis_set,
charge_scale, args.gaussian_mask,
num_classes = args.num_classes,
cgprod_bounded = args.cgprod_bounded,
cg_agg_normalization = args.cg_agg_normalization,
cg_pow_normalization = args.cg_pow_normalization,
device = device, dtype = dtype)
# Initialize the scheduler and optimizer
optimizer = init_optimizer(args, model)
scheduler, restart_epochs = init_scheduler(args, optimizer)
# Define cross-entropy as the loss function.
loss_fn = torch.nn.functional.cross_entropy
# Apply the covariance and permutation invariance tests
print('Files:',dataloaders['train'])
cormorant_tests(model, dataloaders['train'], args, charge_scale=charge_scale, siamese=True)
# Instantiate the training class
trainer = Engine(args, dataloaders, model, loss_fn, optimizer, scheduler, restart_epochs, device, dtype, task='classification', clip_value=None)
print('Initialized a',trainer.task,'trainer.')
# Load from checkpoint file. If no checkpoint file exists, automatically does nothing.
trainer.load_checkpoint()
# Train model.
trainer.train()
# Test predictions on best model and also last checkpointed model.
trainer.evaluate()
if __name__ == '__main__':
main()
|
py | 1a4109fa828408a8dab3e3e370680e335498f3fb | #!/usr/bin/env python
# OpenVirteX control script
# Heavily based on FlowVisor's fvctl
#import python utilities to parse arguments
import sys
from optparse import OptionParser
import urllib2
import json
import getpass
VERSION = '0.1'
SUPPORTED_PROTO = ['tcp']
def getUrl(opts, path):
return URL % (opts.host, opts.port, path)
def buildRequest(data, url, cmd):
j = { "id" : "ovxctl", "method" : cmd , "jsonrpc" : "2.0" }
h = {"Content-Type" : "application/json-rpc"}
if data is not None:
j['params'] = data
return urllib2.Request(url, json.dumps(j), h)
def pa_none(args, cmd):
(sdesc, ldesc) = DESCS[cmd]
parser = OptionParser(usage=USAGE.format(cmd), description=ldesc)
(options, args) = parser.parse_args(args)
return (options, args)
#Create calls
def pa_addControllers(args, cmd):
usage = "%s <tenant_id> <vdpid> <ctrlUrls>" % USAGE.format(cmd)
(sdesc, ldesc) = DESCS[cmd]
parser = OptionParser(usage=usage, description=ldesc)
return parser.parse_args(args)
def do_addControllers(gopts, opts, args):
if len(args) != 3:
print "addControllers: Must specify tenant id, virtual dpid, controller list"
sys.exit()
req = { "controllerUrls" : buildControllerList(args[2]), \
"tenantId" : int(args[0]), "vdpid" : int(args[1].replace(":",""), 16) }
resp = connect(gopts, "tenant", "addControllers", data=req, passwd=getPasswd(gopts))
if resp:
print "Added controllers %s to switch %s" % (args[2], args[1])
print resp
def pa_createNetwork(args, cmd):
usage = "%s <protocol> <controller_urls> <ip_network> <ip_mask>" % USAGE.format(cmd)
(sdesc, ldesc) = DESCS[cmd]
parser = OptionParser(usage=usage, description=ldesc)
return parser.parse_args(args)
def buildControllerList(ctrls):
if ctrls.lower() == "none":
return []
l = ctrls.split(',')
controllerUrls = []
for ctrl in l:
parts = ctrl.split(":")
if len(parts) < 3:
print "%s is not a valid controller url" % ctrl
sys.exit()
if parts[0] not in SUPPORTED_PROTO:
print "%s in %s is not a supported protocol" % (parts[0], ctrl)
sys.exit()
try:
int(parts[2])
except:
print "%s in %s is not a valid port number" % (parts[2], ctrl)
sys.exit()
controllerUrls.append(ctrl)
return controllerUrls
def do_createNetwork(gopts, opts, args):
if len(args) != 3:
print "createNetwork : Must specify controllerUrls, network_ip, network_mask"
sys.exit()
req = { "controllerUrls" : buildControllerList(args[0]), \
"networkAddress" : args[1], "mask" : int(args[2]) }
network_id = connect(gopts, "tenant", "createNetwork", data=req, passwd=getPasswd(gopts))
if network_id:
print "Virtual network has been created (network_id %s)." % str(network_id)
def pa_createSwitch(args, cmd):
usage = "%s [options] <tenant_id> <physical_dpids> <plumbing_ids>" % USAGE.format(cmd)
(sdesc, ldesc) = DESCS[cmd]
parser = OptionParser(usage=usage, description=ldesc)
parser.add_option("-d", "--dpid", dest="dpid", type="str", default="0",
help="Specify the DPID for this switch")
return parser.parse_args(args)
def do_createSwitch(gopts, opts, args):
if len(args) != 3:
print ("createSwitch : must specify: " +
"virtual tenant_id, a comma separated list of physical dpids " +
"(e.g. 00:00:00:00:00:00:00:01) which will be associated to the virtual switch, " +
"and a comma separated list of plumbing switch ids")
sys.exit()
dpids = [int(dpid.replace(":", ""), 16) for dpid in args[1].split(',')]
plumbingIds = [int(plumbingId) for plumbingId in args[2].split(',')]
req = { "tenantId" : int(args[0]), "dpids" : dpids, "plumbingSwitchIds" : plumbingIds, "dpid" : int(opts.dpid.replace(":", ""), 16) }
reply = connect(gopts, "tenant", "createSwitch", data=req, passwd=getPasswd(gopts))
switchId = reply.get('vdpid')
if switchId:
switch_name = '00:' + ':'.join([("%x" % switchId)[i:i+2] for i in range(0, len(("%x" % switchId)), 2)])
print "Virtual switch has been created (tenant_id %s, switch_id %s)" % (args[0], switch_name)
def pa_createPort(args, cmd):
usage = "%s <tenant_id> <physical_dpid> <physical_port>" % USAGE.format(cmd)
(sdesc, ldesc) = DESCS[cmd]
parser = OptionParser(usage=usage, description=ldesc)
return parser.parse_args(args)
def do_createPort(gopts, opts, args):
if len(args) != 3:
print ("createPort : must specify: " +
"virtual tenant_id, physical dpid " +
"(e.g. 00:00:00:00:00:00:00:01) and physical port")
sys.exit()
req = { "tenantId" : int(args[0]), "dpid" : int(args[1].replace(":", ""), 16), "port" : int(args[2]) }
reply = connect(gopts, "tenant", "createPort", data=req, passwd=getPasswd(gopts))
switchId = reply.get('vdpid')
portId = reply.get('vport')
if switchId and portId:
switch_name = '00:' + ':'.join([("%x" %int(switchId))[i:i+2] for i in range(0, len(("%x" %int(switchId))), 2)])
print "Virtual port has been created (tenant_id %s, switch_id %s, port_id %s)" % (args[0], switch_name, portId)
def pa_setInternalRouting(args, cmd):
usage = "%s <tenant_id> <virtual_dpid> <routing_algorithm> <backup_routes_num>" % USAGE.format(cmd)
(sdesc, ldesc) = DESCS[cmd]
parser = OptionParser(usage=usage, description=ldesc)
return parser.parse_args(args)
def do_setInternalRouting(gopts, opts, args):
if len(args) != 4:
print ("setInternalRouting : Must specify virtual tenant_id, virtual switch_id, " +
"algorithm (spf, manual) and number of backup routes")
sys.exit()
req = { "tenantId" : int(args[0]), "vdpid" : int(args[1].replace(":", ""), 16),
"algorithm" : args[2], "backup_num" : int(args[3]) }
reply = connect(gopts, "tenant", "setInternalRouting", data=req, passwd=getPasswd(gopts))
tenantId = reply.get('tenantId')
switchId = reply.get('vdpid')
if tenantId and switchId:
print "Routing has be set for big switch (tenant_id %s, switch_id %s)" % (switchId, tenantId)
def pa_connectHost(args, cmd):
usage = "%s <tenant_id> <vitual_dpid> <virtual_port> <host_mac>" % USAGE.format(cmd)
(sdesc, ldesc) = DESCS[cmd]
parser = OptionParser(usage=usage, description=ldesc)
return parser.parse_args(args)
def do_connectHost(gopts, opts, args):
if len(args) != 4:
print "connectHost : Must specify virtual tenant_id, virtual switch_id, virtual port_id and host MAC address"
sys.exit()
req = { "tenantId" : int(args[0]), "vdpid" : int(args[1].replace(":", ""), 16),
"vport" : int(args[2]), "mac" : args[3] }
reply = connect(gopts, "tenant", "connectHost", data=req, passwd=getPasswd(gopts))
hostId = reply.get('hostId')
if hostId:
print "Host (host_id %s) has been connected to virtual port" % (hostId)
def pa_connectLink(args, cmd):
usage = "%s <tenant_id> <src_virtual_dpid> <src_virtual_port> <dst_virtual_dpid> <dst_virtual_port>" % USAGE.format(cmd)
(sdesc, ldesc) = DESCS[cmd]
parser = OptionParser(usage=usage, description=ldesc)
return parser.parse_args(args)
def do_connectLink(gopts, opts, args):
if len(args) != 7:
print ("connectLink : Must specify tenant_id, src_virtual_dpid, src_virtual_port, dst_virtual_dpid, dst_virtual_port, "
+ "algorithm (spf, manual), number of backup routes")
sys.exit()
req = { "tenantId" : int(args[0]), "srcDpid" : int(args[1].replace(":", ""), 16),
"srcPort" : int(args[2]), "dstDpid" : int(args[3].replace(":", ""), 16),
"dstPort" : int(args[4]), "algorithm" : args[5], "backup_num" : int(args[6]) }
reply = connect(gopts, "tenant", "connectLink", data=req, passwd=getPasswd(gopts))
linkId = reply.get('linkId')
if linkId:
print "Virtual link (link_id %s) has been created" % (linkId)
def pa_setLinkPath(args, cmd):
usage = "%s <tenant_id> <link_id> <physical_path> <priority>" % USAGE.format(cmd)
(sdesc, ldesc) = DESCS[cmd]
parser = OptionParser(usage=usage, description=ldesc)
return parser.parse_args(args)
def do_setLinkPath(gopts, opts, args):
if len(args) != 4:
print "setLinkPath : Must specify tenant_id, link_id, the physical path that connect the end-points and the priority [0-255]"
sys.exit()
req = { "tenantId" : int(args[0]), "linkId" : int(args[1]), "path" : translate_path(args[2]), "priority" : int(args[3]) }
reply = connect(gopts, "tenant", "setLinkPath", data=req, passwd=getPasswd(gopts))
linkId = reply.get('linkId')
if linkId:
print "Virtual link (link_id %s) path has been set" % (linkId)
def pa_connectRoute(args, cmd):
usage = "%s <tenant_id> <virtual_dpid> <src_virtual_port> <dst_virtual_port> <physical_path> <priority>" % USAGE.format(cmd)
(sdesc, ldesc) = DESCS[cmd]
parser = OptionParser(usage=usage, description=ldesc)
return parser.parse_args(args)
def do_connectRoute(gopts, opts, args):
if len(args) != 6:
print ("connectRoute : Must specify tenant_id, virtual_dpid, src_virtual_port, dst_virtual_port, " +
"the physical path that connect the end-points and the priority [0-255]")
sys.exit()
req = { "tenantId" : int(args[0]), "vdpid" : int(args[1].replace(":", ""), 16),
"srcPort" : int(args[2]), "dstPort" : int(args[3]),
"path" : translate_path(args[4]), "priority" : int(args[5]) }
reply = connect(gopts, "tenant", "connectRoute", data=req, passwd=getPasswd(gopts))
routeId = reply.get('routeId')
if routeId:
print "Big-switch internal route (route_id %s) has been created" % (routeId)
#Remove calls
def pa_removeNetwork(args, cmd):
usage = "%s <tenant_id>" % USAGE.format(cmd)
(sdesc, ldesc) = DESCS[cmd]
parser = OptionParser(usage=usage, description=ldesc)
return parser.parse_args(args)
def do_removeNetwork(gopts, opts, args):
if len(args) != 1:
print "removeNetwork : Must specify a virtual tenant_id"
sys.exit()
req = { "tenantId" : int(args[0]) }
result = connect(gopts, "tenant", "removeNetwork", data=req, passwd=getPasswd(gopts))
print "Network (tenant_id %s) has been removed" % (args[0])
def pa_removeSwitch(args, cmd):
usage = "%s <tenant_id> <virtual_dpid>" % USAGE.format(cmd)
(sdesc, ldesc) = DESCS[cmd]
parser = OptionParser(usage=usage, description=ldesc)
return parser.parse_args(args)
def do_removeSwitch(gopts, opts, args):
if len(args) != 2:
print "removeSwitch : Must specify a virtual tenant_id and a virtual switch_id"
sys.exit()
req = { "tenantId" : int(args[0]), "vdpid" : int(args[1].replace(":", ""), 16) }
result = connect(gopts, "tenant", "removeSwitch", data=req, passwd=getPasswd(gopts))
print "Switch (switch_id %s) has been removed" % (args[1])
def pa_removePort(args, cmd):
usage = "%s <tenant_id> <virtual_dpid> <virtual_port>" % USAGE.format(cmd)
(sdesc, ldesc) = DESCS[cmd]
parser = OptionParser(usage=usage, description=ldesc)
return parser.parse_args(args)
def do_removePort(gopts, opts, args):
if len(args) != 3:
print "removePort : Must specify a virtual tenant_id, a virtual switch_id and a virtual port_id"
sys.exit()
req = { "tenantId" : int(args[0]), "vdpid" : int(args[1].replace(":", ""), 16), "vport" : int(args[2])}
result = connect(gopts, "tenant", "removePort", data=req, passwd=getPasswd(gopts))
print "Port (port_id %s) has been removed from virtual switch (switch_id %s)" % (args[2], args[1])
def pa_disconnectHost(args, cmd):
usage = "%s <tenant_id> <host_id>" % USAGE.format(cmd)
(sdesc, ldesc) = DESCS[cmd]
parser = OptionParser(usage=usage, description=ldesc)
return parser.parse_args(args)
def do_disconnectHost(gopts, opts, args):
if len(args) != 2:
print "disconnectHost : Must specify a a virtual tenant_id and a host_id"
sys.exit()
req = { "tenantId" : int(args[0]), "hostId" : int(args[1]) }
result = connect(gopts, "tenant", "disconnectHost", data=req, passwd=getPasswd(gopts))
print "Host (host_id %s) has been disconnected from the virtual network (tenant_id %s)" % (args[1], args[0])
def pa_disconnectLink(args, cmd):
usage = "%s <tenant_id> <link_id>" % USAGE.format(cmd)
(sdesc, ldesc) = DESCS[cmd]
parser = OptionParser(usage=usage, description=ldesc)
return parser.parse_args(args)
def do_disconnectLink(gopts, opts, args):
if len(args) != 2:
print "disconnectLink : Must specify a a virtual tenant_id and a link_id"
sys.exit()
req = { "tenantId" : int(args[0]), "linkId" : int(args[1]) }
result = connect(gopts, "tenant", "disconnectLink", data=req, passwd=getPasswd(gopts))
print "Link (link_id %s) has been disconnected from the virtual network (tenant_id %s)" % (args[1], args[0])
def pa_disconnectRoute(args, cmd):
usage = "%s <tenant_id> <route_id>" % USAGE.format(cmd)
(sdesc, ldesc) = DESCS[cmd]
parser = OptionParser(usage=usage, description=ldesc)
return parser.parse_args(args)
def do_disconnectRoute(gopts, opts, args):
if len(args) != 3:
print "disconnectRoute : Must specify a virtual tenant_id, switch_id and a route_id"
sys.exit()
req = { "tenantId" : int(args[0]), "vdpid" : int(args[1].replace(":", ""), 16) , "routeId" : int(args[2]) }
result = connect(gopts, "tenant", "disconnectRoute", data=req, passwd=getPasswd(gopts))
print "Route (route_id %s) in virtual big-switch (switch_id %s) has been disconnected from the virtual network (tenant_id %s)" % (args[2], args[1], args[0])
#Runtime operations
def pa_startNetwork(args, cmd):
usage = "%s <tenant_id>" % USAGE.format(cmd)
(sdesc, ldesc) = DESCS[cmd]
parser = OptionParser(usage=usage, description=ldesc)
return parser.parse_args(args)
def do_startNetwork(gopts, opts, args):
if len(args) != 1:
print "startNetwork : Must specify a tenant_id"
sys.exit()
req = { "tenantId" : int(args[0]) }
result = connect(gopts, "tenant", "startNetwork", data=req, passwd=getPasswd(gopts))
if result:
print "Network (tenant_id %s) has been booted" % (args[0])
def pa_startSwitch(args, cmd):
usage = "%s <tenant_id> <virtual_dpid>" % USAGE.format(cmd)
(sdesc, ldesc) = DESCS[cmd]
parser = OptionParser(usage=usage, description=ldesc)
return parser.parse_args(args)
def do_startSwitch(gopts, opts, args):
if len(args) != 2:
print "startSwitch : Must specify a tenant_id and a virtual switch_id"
sys.exit()
req = { "tenantId" : int(args[0]), "vdpid" : int(args[1].replace(":", ""), 16)}
result = connect(gopts, "tenant", "startSwitch", data=req, passwd=getPasswd(gopts))
if result:
print "Switch (switch_id %s) has been booted in virtual network (tenant_id %s)" % (args[1], args[0])
def pa_startPort(args, cmd):
usage = "%s <tenant_id> <virtual_dpid>" % USAGE.format(cmd)
(sdesc, ldesc) = DESCS[cmd]
parser = OptionParser(usage=usage, description=ldesc)
return parser.parse_args(args)
def do_startPort(gopts, opts, args):
if len(args) != 3:
print "startPort : Must specify a tenant_id, a virtual switch_id and a virtual port_id"
sys.exit()
req = { "tenantId" : int(args[0]), "vdpid" : int(args[1].replace(":", ""), 16), "vport" : int(args[2])}
reply = connect(gopts, "tenant", "startPort", data=req, passwd=getPasswd(gopts))
tenantId = reply.get('tenantId')
switchId = reply.get('vdpid')
portId = reply.get('vport')
if tenantId and switchId and hostId:
print "Port (port_id %s) has been started in virtual switch (tenant_id %s, switch_id %s)" % (portId, tenantId, switchId)
def pa_stopNetwork(args, cmd):
usage = "%s <tenant_id>" % USAGE.format(cmd)
(sdesc, ldesc) = DESCS[cmd]
parser = OptionParser(usage=usage, description=ldesc)
return parser.parse_args(args)
def do_stopNetwork(gopts, opts, args):
if len(args) != 1:
print "stopNetwork : Must specify a tenant_id"
sys.exit()
req = { "tenantId" : int(args[0]) }
result = connect(gopts, "tenant", "stopNetwork", data=req, passwd=getPasswd(gopts))
if result:
print "Network (tenant_id %s) has been shutdown" % (args[0])
def pa_stopSwitch(args, cmd):
usage = "%s <tenant_id> <virtual_dpid>" % USAGE.format(cmd)
(sdesc, ldesc) = DESCS[cmd]
parser = OptionParser(usage=usage, description=ldesc)
return parser.parse_args(args)
def do_stopSwitch(gopts, opts, args):
if len(args) != 2:
print "stopSwitch : Must specify a tenant_id and a virtual switch_id"
sys.exit()
req = { "tenantId" : int(args[0]), "vdpid" : int(args[1].replace(":", ""), 16)}
result = connect(gopts, "tenant", "stopSwitch", data=req, passwd=getPasswd(gopts))
if result:
print "Switch (switch_id %s) has been shutdown in virtual network (tenant_id %s)" % (args[1], args[0])
def pa_stopPort(args, cmd):
usage = "%s <tenant_id> <virtual_dpid>" % USAGE.format(cmd)
(sdesc, ldesc) = DESCS[cmd]
parser = OptionParser(usage=usage, description=ldesc)
return parser.parse_args(args)
def do_stopPort(gopts, opts, args):
if len(args) != 3:
print "stopPort : Must specify a tenant_id, a virtual switch_id and a virtual port_id"
sys.exit()
req = { "tenantId" : int(args[0]), "vdpid" : int(args[1].replace(":", ""), 16), "vport" : int(args[2])}
result = connect(gopts, "tenant", "stopPort", data=req, passwd=getPasswd(gopts))
if result:
print "Port (port_id %s) has been shutdown in virtual switch (tenant_id %s, switch_id %s)" % (args[2], args[0], args[1])
def pa_getPhysicalFlowtable(args, cmd):
usage = "%s [<physical_dpid>]" % USAGE.format(cmd)
(sdesc, ldesc) = DESCS[cmd]
parser = OptionParser(usage=usage, description=ldesc)
return parser.parse_args(args)
def do_getPhysicalFlowtable(gopts, opts, args):
if len(args) > 1:
print "getPhysicalFlowtable : May specify optional physical dpid"
sys.exit()
req = {}
if len(args) == 1:
req["dpid"] = int(args[0].replace(":", ""), 16)
result = connect(gopts, "status", "getPhysicalFlowtable", data=req, passwd=getPasswd(gopts))
print json.dumps(result)
def pa_getPhysicalHosts(args, cmd):
usage = "%s" % USAGE.format(cmd)
(sdesc, ldesc) = DESCS[cmd]
parser = OptionParser(usage=usage, description=ldesc)
return parser.parse_args(args)
def do_getPhysicalHosts(gopts, opts, args):
if len(args) > 0:
print "getPhysicalHosts : No arguments"
sys.exit()
req = {}
result = connect(gopts, "status", "getPhysicalHosts", data=req, passwd=getPasswd(gopts))
print json.dumps(result)
def pa_getPhysicalTopology(args, cmd):
usage = "%s" % USAGE.format(cmd)
(sdesc, ldesc) = DESCS[cmd]
parser = OptionParser(usage=usage, description=ldesc)
return parser.parse_args(args)
def do_getPhysicalTopology(gopts, opts, args):
if len(args) > 0:
print "getPhysicalTopology : No arguments"
sys.exit()
req = {}
result = connect(gopts, "status", "getPhysicalTopology", data=req, passwd=getPasswd(gopts))
print json.dumps(result)
def pa_listVirtualNetworks(args, cmd):
usage = "%s" % USAGE.format(cmd)
(sdesc, ldesc) = DESCS[cmd]
parser = OptionParser(usage=usage, description=ldesc)
return parser.parse_args(args)
def do_listVirtualNetworks(gopts, opts, args):
if len(args) > 0:
print "listVirtualNetworks : No arguments"
sys.exit()
req = {}
result = connect(gopts, "status", "listVirtualNetworks", data=req, passwd=getPasswd(gopts))
print json.dumps(result)
def pa_getVirtualAddressMapping(args, cmd):
usage = "%s <tenant_id> <virtual_dpid>" % USAGE.format(cmd)
(sdesc, ldesc) = DESCS[cmd]
parser = OptionParser(usage=usage, description=ldesc)
return parser.parse_args(args)
def do_getVirtualAddressMapping(gopts, opts, args):
if len(args) != 1:
print "getVirtualAddressMapping : Must specify a tenant_id"
sys.exit()
req = { "tenantId" : int(args[0]) }
result = connect(gopts, "status", "getVirtualAddressMapping", data=req, passwd=getPasswd(gopts))
print json.dumps(result)
def pa_getVirtualFlowtable(args, cmd):
usage = "%s <tenant_id> [<virtual_dpid>]" % USAGE.format(cmd)
(sdesc, ldesc) = DESCS[cmd]
parser = OptionParser(usage=usage, description=ldesc)
return parser.parse_args(args)
def do_getVirtualFlowtable(gopts, opts, args):
if (len(args) == 0) or (len(args) > 2):
print "getVirtualFlowtable : Must specify a tenant_id, and optional virtual switch_id"
sys.exit()
req = { "tenantId" : int(args[0]) }
if len(args) == 2:
req["vdpid"] = int(args[1].replace(":", ""), 16)
result = connect(gopts, "status", "getVirtualFlowtable", data=req, passwd=getPasswd(gopts))
print json.dumps(result)
def pa_getVirtualHosts(args, cmd):
usage = "%s <tenant_id>" % USAGE.format(cmd)
(sdesc, ldesc) = DESCS[cmd]
parser = OptionParser(usage=usage, description=ldesc)
return parser.parse_args(args)
def do_getVirtualHosts(gopts, opts, args):
if len(args) != 1:
print "getVirtualHosts : Must specify a tenant_id"
sys.exit()
req = { "tenantId": int(args[0]) }
result = connect(gopts, "status", "getVirtualHosts", data=req, passwd=getPasswd(gopts))
print json.dumps(result)
def pa_getVirtualLinkMapping(args, cmd):
usage = "%s <tenant_id>" % USAGE.format(cmd)
(sdesc, ldesc) = DESCS[cmd]
parser = OptionParser(usage=usage, description=ldesc)
return parser.parse_args(args)
def do_getVirtualLinkMapping(gopts, opts, args):
if len(args) != 1:
print "getVirtualHosts : Must specify a tenant_id"
sys.exit()
req = { "tenantId": int(args[0]) }
result = connect(gopts, "status", "getVirtualLinkMapping", data=req, passwd=getPasswd(gopts))
print json.dumps(result)
def pa_getVirtualSwitchMapping(args, cmd):
usage = "%s <tenant_id>" % USAGE.format(cmd)
(sdesc, ldesc) = DESCS[cmd]
parser = OptionParser(usage=usage, description=ldesc)
return parser.parse_args(args)
def do_getVirtualSwitchMapping(gopts, opts, args):
if len(args) != 1:
print "getVirtualSwitchMapping : Must specify a tenant_id"
sys.exit()
req = { "tenantId": int(args[0]) }
result = connect(gopts, "status", "getVirtualSwitchMapping", data=req, passwd=getPasswd(gopts))
print json.dumps(result)
def pa_getVirtualTopology(args, cmd):
usage = "%s <tenant_id>" % USAGE.format(cmd)
(sdesc, ldesc) = DESCS[cmd]
parser = OptionParser(usage=usage, description=ldesc)
return parser.parse_args(args)
def do_getVirtualTopology(gopts, opts, args):
if len(args) != 1:
print "getVirtualTopology : Must specify a tenant_id"
sys.exit()
req = { "tenantId": int(args[0]) }
result = connect(gopts, "status", "getVirtualTopology", data=req, passwd=getPasswd(gopts))
print json.dumps(result)
# Other methods
def translate_path(path_string):
hop_list = path_string.split(",")
path = ""
for hop in hop_list:
src, dst = hop.split("-")
src_dpid, src_port = src.split("/")
dst_dpid, dst_port = dst.split("/")
src_long_dpid = int(src_dpid.replace(":", ""), 16)
dst_long_dpid = int(dst_dpid.replace(":", ""), 16)
path = path + str(src_long_dpid) + "/" + str(src_port) + "-" + str(dst_long_dpid) + "/" + str(dst_port) + ","
if len(path) > 0:
path.rstrip(",")
return path
def pa_createPolicy(args, cmd):
usage = "%s <physical_dpid> <plumbing_id> <policy>" % USAGE.format(cmd)
(sdesc, ldesc) = DESCS[cmd]
parser = OptionParser(usage=usage, description=ldesc)
return parser.parse_args(args)
def do_createPolicy(gopts, opts, args):
if len(args) != 3:
print ("createPolicy : Must specify a physical switch dpid, a plumbing switch id, and a policy")
sys.exit()
req = { "dpid":int(args[0].replace(":", ""), 16), "plumbingSwitchId":int(args[1]), "policy":args[2] }
print req
result = connect(gopts, "tenant", "createPolicy", data=req, passwd=getPasswd(gopts))
print "Policy has been created"
def pa_createACL(args, cmd):
usage = "%s <tenant_id> <acl_match> <acl_action>" % USAGE.format(cmd)
(sdesc, ldesc) = DESCS[cmd]
parser = OptionParser(usage=usage, description=ldesc)
return parser.parse_args(args)
def do_createACL(gopts, opts, args):
if len(args) != 3:
print ("createACL : Must specify a tenant ID, acl on match, and acl on action")
sys.exit()
req = { "tenantId":int(args[0]), "aclMatch":args[1], "aclAction":args[2] }
print req
result = connect(gopts, "tenant", "createACL", data=req, passwd=getPasswd(gopts))
print "acl has been created"
def pa_startComposition(args, cmd):
usage = "%s" % USAGE.format(cmd)
(sdesc, ldesc) = DESCS[cmd]
parser = OptionParser(usage=usage, description=ldesc)
return parser.parse_args(args)
def do_startComposition(gopts, opts, args):
req = {}
result = connect(gopts, "tenant", "startComposition", data=req, passwd=getPasswd(gopts))
print "Composition has been started"
def pa_stopComposition(args, cmd):
usage = "%s" % USAGE.format(cmd)
(sdesc, ldesc) = DESCS[cmd]
parser = OptionParser(usage=usage, description=ldesc)
return parser.parse_args(args)
def do_stopComposition(gopts, opts, args):
req = {}
result = connect(gopts, "tenant", "stopComposition", data=req, passwd=getPasswd(gopts))
print "Composition has been stopped"
def pa_setComposeAlgo(args, cmd):
usage = "%s <algo>" % USAGE.format(cmd)
(sdesc, ldesc) = DESCS[cmd]
parser = OptionParser(usage=usage, description=ldesc)
return parser.parse_args(args)
def do_setComposeAlgo(gopts, opts, args):
if len(args) != 1:
print ("setComposeAlgo : Must specify an algorithm")
sys.exit()
req = { "algo" : args[0] }
result = connect(gopts, "tenant", "setComposeAlgo", data=req, passwd=getPasswd(gopts))
print "Algorithm '%s' has been set" % args[0]
def pa_createMultiSwitch(args, cmd):
usage = "%s [options] <tenant_id> <physical_dpid> <number_of_baby_switches>" % \
USAGE.format(cmd)
(sdesc, ldesc) = DESCS[cmd]
parser = OptionParser(usage=usage, description=ldesc)
return parser.parse_args(args)
def do_createMultiSwitch(gopts, opts, args):
if len(args) != 3:
print ("createMultiSwitch : must specify: " +
"virtual tenant_id, physical_dpid (e.g. 00:00:00:00:00:00:00:01)," +
"number_of_baby_switches")
sys.exit()
req = { "tenantId" : int(args[0]), "physicalDpid" : int(args[1].replace(":", ""), 16),
"numberOfBabySwitches" : int(args[2]) };
print "req: " + str(req)
reply = connect(gopts, "tenant", "createMultiSwitch", data=req, passwd=getPasswd(gopts))
switchId = reply.get('vdpid')
babyDpids = reply.get("babyDpids")
if switchId and babyDpids:
switch_name = '00:' + ':'.join([("%x" % switchId)[i:i+2] for i in range(0, len(("%x" % switchId)), 2)])
print "Virtual switch has been created (tenant_id %s, switch_id %s) with internal baby switches %s" \
% (args[0], switch_name, babyDpids)
def pa_createPlumbingSwitch(args, cmd):
usage = "%s [options] <physical_switch_dpid> <number_of_plumbing_switches>" % \
USAGE.format(cmd)
(sdesc, ldesc) = DESCS[cmd]
parser = OptionParser(usage=usage, description=ldesc)
return parser.parse_args(args)
def do_createPlumbingSwitch(gopts, opts, args):
if len(args) != 2:
print ("createPlumbingSwitch : must specify: " +
"physical_switch_dpid (e.g. 00:00:00:00:00:00:00:01)," +
"number_of_plumbing_switches")
sys.exit()
req = { "physicalDpid" : int(args[0].replace(":", ""), 16),
"numberOfPlumbingSwitches" : int(args[1]) };
reply = connect(gopts, "tenant", "createPlumbingSwitch", data=req, passwd=getPasswd(gopts))
print "Plumbing switches have been created"
def pa_createPlumbingPort(args, cmd):
usage = "%s [options] <physical_dpid> <plumbing_id> <physical_port>" % USAGE.format(cmd)
(sdesc, ldesc) = DESCS[cmd]
parser = OptionParser(usage=usage, description=ldesc)
return parser.parse_args(args)
def do_createPlumbingPort(gopts, opts, args):
if len(args) != 3:
print ("createPlumbingPort : must specify: " +
"physical_dpid, plumbing_id and physical_port")
sys.exit()
req = {"physicalDpid" : int(args[0].replace(":", ""), 16),
"plumbingSwitchId" : int(args[1]),
"physicalPort" : int(args[2]) }
reply = connect(gopts, "tenant", "createPlumbingPort", data=req, passwd=getPasswd(gopts))
print "Plumbing port has been created"
def pa_createPlumbingLink(args, cmd):
usage = "%s <physical_dpid> <src_id> <src_port> <dst_id> <dst_port>" % USAGE.format(cmd)
(sdesc, ldesc) = DESCS[cmd]
parser = OptionParser(usage=usage, description=ldesc)
return parser.parse_args(args)
def do_createPlumbingLink(gopts, opts, args):
if len(args) != 5:
print ("connectLink : Must specify physical_dpid, src_id, src_port, dst_id, dst_port")
sys.exit()
req = { "physicalDpid" : int(args[0].replace(":", ""), 16),
"srcPlumbingSwitchId" : int(args[1]), "srcPort" : int(args[2]),
"dstPlumbingSwitchId" : int(args[3]), "dstPort" : int(args[4]) }
reply = connect(gopts, "tenant", "createPlumbingLink", data=req, passwd=getPasswd(gopts))
print "Plumbing link has been created"
def pa_startExpr(args, cmd):
usage = "%s <expr>" % USAGE.format(cmd)
(sdesc, ldesc) = DESCS[cmd]
parser = OptionParser(usage=usage, description=ldesc)
return parser.parse_args(args)
def do_startExpr(gopts, opts, args):
if len(args) != 1:
print ("createExpr : Must specify an experiment")
sys.exit()
req = { "expr" : args[0] }
result = connect(gopts, "tenant", "startExpr", data=req, passwd=getPasswd(gopts))
print "Expr '%s' has been started" % args[0]
def pa_help(args, cmd):
usage = "%s <cmd>" % USAGE.format(cmd)
parser = OptionParser(usage=usage)
return parser.parse_args(args)
def do_help(gopts, opts, args):
if len(args) != 1:
raise IndexError
try:
(pa, func) = CMDS[args[0]]
pa(['--help'], args[0])
except KeyError, e:
print "Invalid command : %s is an unknown command." % args[0]
sys.exit()
def connect(opts, path, cmd, data=None, passwd=None):
try:
url = getUrl(opts, path)
passman = urllib2.HTTPPasswordMgrWithDefaultRealm()
passman.add_password(None, url, opts.ovx_user, passwd)
authhandler = urllib2.HTTPBasicAuthHandler(passman)
opener = urllib2.build_opener(authhandler)
req = buildRequest(data, url, cmd)
#ph = urllib2.urlopen(req)
ph = opener.open(req)
return parseResponse(ph.read())
except urllib2.URLError as e:
print e
sys.exit(1)
except urllib2.HTTPError as e:
if e.code == 401:
print "Authentication failed: invalid password"
sys.exit(1)
elif e.code == 504:
print "HTTP Error 504: Gateway timeout"
sys.exit(1)
else:
print e
except RuntimeError as e:
print e
def parseResponse(data):
j = json.loads(data)
if 'error' in j:
print j
sys.exit(1)
return j['result']
def printVersion(option, opt, value, parser):
"""Print ovxctl version and exit"""
print "ovxctl-%s" % VERSION
sys.exit()
def printHelp (option, opt, value, parser):
"""Print ovxctl help and exit"""
cmds = [x for x in CMDS.iterkeys()]
cmds.remove('help')
cmds.sort()
print parser.format_help().strip()
print "\n Available commands are: "
for x in cmds:
(sdesc, ldesc) = DESCS[x]
print " {0:25} {1:10}".format(x, sdesc)
print "\n See '%s help <command>' for more info on a specific command." % sys.argv[0]
sys.exit()
CMDS = {
'addControllers': (pa_addControllers, do_addControllers),
'createNetwork': (pa_createNetwork, do_createNetwork),
'createSwitch': (pa_createSwitch, do_createSwitch),
'createPort': (pa_createPort, do_createPort),
'setInternalRouting': (pa_setInternalRouting, do_setInternalRouting),
'connectHost': (pa_connectHost, do_connectHost),
'connectLink': (pa_connectLink, do_connectLink),
'setLinkPath': (pa_setLinkPath, do_setLinkPath),
'connectRoute': (pa_connectRoute, do_connectRoute),
'removeNetwork': (pa_removeNetwork, do_removeNetwork),
'removeSwitch': (pa_removeSwitch, do_removeSwitch),
'removePort': (pa_removePort, do_removePort),
'disconnectHost': (pa_disconnectHost, do_disconnectHost),
'disconnectLink': (pa_disconnectLink, do_disconnectLink),
'disconnectRoute': (pa_disconnectRoute, do_disconnectRoute),
'startNetwork': (pa_startNetwork, do_startNetwork),
'startSwitch': (pa_startSwitch, do_startSwitch),
'startPort': (pa_startPort, do_startPort),
'stopNetwork': (pa_stopNetwork, do_stopNetwork),
'stopSwitch': (pa_stopSwitch, do_stopSwitch),
'stopPort': (pa_stopPort, do_stopPort),
'getPhysicalFlowtable': (pa_getPhysicalFlowtable, do_getPhysicalFlowtable),
'getPhysicalHosts': (pa_getPhysicalHosts, do_getPhysicalHosts),
'getPhysicalTopology': (pa_getPhysicalTopology, do_getPhysicalTopology),
'listVirtualNetworks': (pa_listVirtualNetworks, do_listVirtualNetworks),
'getVirtualAddressMapping': (pa_getVirtualAddressMapping, do_getVirtualAddressMapping),
'getVirtualFlowtable': (pa_getVirtualFlowtable, do_getVirtualFlowtable),
'getVirtualHosts': (pa_getVirtualHosts, do_getVirtualHosts),
'getVirtualLinkMapping': (pa_getVirtualLinkMapping, do_getVirtualLinkMapping),
'getVirtualSwitchMapping': (pa_getVirtualSwitchMapping, do_getVirtualSwitchMapping),
'getVirtualTopology': (pa_getVirtualTopology, do_getVirtualTopology),
'createPolicy': (pa_createPolicy, do_createPolicy),
'createACL': (pa_createACL, do_createACL),
'startComposition': (pa_startComposition, do_startComposition),
'stopComposition': (pa_stopComposition, do_stopComposition),
'setComposeAlgo': (pa_setComposeAlgo, do_setComposeAlgo),
'createMultiSwitch': (pa_createMultiSwitch, do_createMultiSwitch),
'createPlumbingSwitch': (pa_createPlumbingSwitch, do_createPlumbingSwitch),
'createPlumbingPort': (pa_createPlumbingPort, do_createPlumbingPort),
'createPlumbingLink': (pa_createPlumbingLink, do_createPlumbingLink),
'startExpr': (pa_startExpr, do_startExpr),
'help' : (pa_help, do_help)
}
DESCS = {
'addControllers' : ("Adds controllers to a virtual switch",
("Adds the specified list of controllers to a given virtual switch.\n"
"ExampleL addController <tenantId> <vdpid> <ctrlUrls>")),
'createNetwork' : ("Creates a virtual network",
("Creates a virtual network. Input: protocol, controllerIP, controller port, ip address, mask. "
"\nExample: createNetwork tcp 1.1.1.1 6634 192.168.1.0 24")),
'createSwitch' : ("Create virtual switch",
("Create a virtual switch. Must specify a tenant_id, a list of the "
"physical_dpids that will be part of the virtual switch, "
"and a list of the plumbing switch ids."
"\nExample: createSwitch 1 00:00:00:00:00:00:00:01,00:00:00:00:00:00:00:02 0,0")),
'createPort' : ("Create virtual port",
("Create a virtual port. Must specify a tenant_id, a physical_dpid and a physical_port."
"\nExample: createPort 1 00:00:00:00:00:00:00:01 1")),
'setInternalRouting' : ("Set big-switch internal routing mechanism",
("Set big-switch internal routing mechanism. Must specify a tenant_id, a virtual switch_id, the routing type (spf, manual) "
"and the number (0-255) of the backup paths that have to be computed."
"\nExample: setInternalRouting 1 00:00:00:00:00:00:00:01 spf 128")),
'connectHost' : ("Connect host to a virtual port",
("Connect host to a virtual port. Must specify a tenant_id, a virtual switch_id, a virtual port_id and the host MAC address."
"\nExample: connectHost 1 00:a4:23:05:00:00:00:01 1 00:00:00:00:00:01")),
'connectLink' : ("Connect two virtual ports through a virtual link",
("Connect two virtual ports through a virtual link. Must specify a tenant_id, a virtual src_switch_id, a virtual src_port_id, "
"a virtual dst_switch_id, a virtual dst_port_id, the routing type (spf, manual) and the number (0-255) of the backup paths that have to be computed."
"\nExample: connectLink 1 00:a4:23:05:00:00:00:01 1 00:a4:23:05:00:00:00:02 1 spf 1")),
'setLinkPath' : ("Set the physical path of a virtual link",
("Set the physical path of a virtual link. Must specify a tenant_id, a virtual link_id, a physical path and a priority (0-255)."
"\nExample: connectLink 1 1 00:00:00:00:00:00:00:01/1-00:00:00:00:00:00:00:02/1,"
"00:00:00:00:00:00:00:2/2-00:00:00:00:00:00:00:3/1 128")),
'connectRoute' : ("Connect two virtual ports inside a virtual big-switch",
("Connect two virtual ports inside a virtual big-switch. Must specify a tenant_id, a virtual switch_id, a virtual src_port_id, "
"a virtual dst_port_id, a physical path and a priority (0-255)."
"\nExample: connectRoute 1 00:a4:23:05:00:00:00:01 1 2 00:00:00:00:00:00:00:01/1-00:00:00:00:00:00:00:02/1,"
"00:00:00:00:00:00:00:2/2-00:00:00:00:00:00:00:3/1 128")),
'removeNetwork' : ("Remove a virtual network",
("Remove a virtual network. Must specify a tenant_id."
"\nExample: removeNetwork 1")),
'removeSwitch' : ("Remove virtual switch",
("Remove a virtual switch. Must specify a tenant_id and a virtual switch_id."
"\nExample: removeSwitch 1 00:a4:23:05:00:00:00:01")),
'removePort' : ("Remove virtual port",
("Remove a virtual port. Must specify a tenant_id, a virtual switch_id and a virtual port_id."
"\nExample: removePort 1 00:a4:23:05:00:00:00:01 1")),
'disconnectHost' : ("Disconnect host from a virtual port",
("Disconnect host from a virtual port. Must specify a tenant_id and the host_id."
"\nExample: disconnectHost 1 1")),
'disconnectLink' : ("Disconnect link between two virtual ports",
("Disconnect link between two virtual ports. Must specify a tenant_id and the link_id."
"\nExample: disconnectLink 1 1")),
'disconnectRoute' : ("Disconnect big-switch internal route between two virtual ports",
("Disconnect big-switch internal route between two virtual ports. Must specify a tenant_id and the route_id."
"\nExample: disconnectRoute 1 00:a4:23:05:00:00:00:01 1")),
'startNetwork' : ("Start a virtual network",
("Start a virtual network. Must specify a tenant_id."
"\nExample: startNetwork 1")),
'startSwitch' : ("Start a virtual switch",
("Start a virtual switch. Must specify a tenant_id and a virtual switch_id."
"\nExample: startSwitch 1 00:a4:23:05:00:00:00:01")),
'startPort' : ("Start a virtual port",
("Start a virtual port. Must specify a tenant_id, a virtual switch_id and a virtual port_id."
"\nExample: startPort 1 00:a4:23:05:00:00:00:01 1")),
'stopNetwork' : ("Stop a virtual network",
("Stop a virtual network. Must specify a tenant_id."
"\nExample: stopNetwork 1")),
'stopSwitch' : ("Shutdown a virtual switch",
("Shutdown a virtual switch. Must specify a tenant_id and a virtual switch_id."
"\nExample: stopSwitch 1 00:a4:23:05:00:00:00:01")),
'stopPort' : ("Shutdown a virtual port",
("Shutdown a virtual port. Must specify a tenant_id, a virtual switch_id and a virtual port_id."
"\nExample: stopPort 1 00:a4:23:05:00:00:00:01 1")),
# Monitoring API - admin only
'getPhysicalFlowtable' : ("Get the physical flowtable of a specified switch or all switches",
("Get the physical flowtable of a specified switch or all switches. Specify optional physical switch_id."
"\nExample: getPhysicalFlowtable 00:00:00:00:00:00:00:01")),
'getPhysicalHosts' : ("Get a list of physical hosts",
("Get a list of physical hosts."
"\nExample: getPhysicalHosts")),
'getPhysicalTopology': ("Get the physical topology",
("Get the physical topology."
"\nExample: getPhysicalTopology")),
'listVirtualNetworks': ("Get a list of all virtual network tenant ID's",
("Get a list of all virtual network tenant ID's."
"\nExample: listVirtualNetworks")),
# Monitoring API - tenant restricted
'getVirtualAddressMapping' : ("Get the virtual to physical address mapping for a specified virtual network",
("Get the virtual to physical address mapping. Must specify a virtual network tenant_id."
"\nExample: getVirtualAddressMapping 1")),
'getVirtualFlowtable' : ("Get the flowtable in the specified virtual network",
("Get the flowtable in the specified virtual network. Must specify a virtual switch_id, optional virtual switch_id."
"\nExample: getVirtualFlowtable 00:a4:23:05:00:00:00:01")),
'getVirtualHosts' : ("Get list of hosts in virtual network",
("Get list of hosts in virtual network. Must specify a tenant_id",
"\nExample: getVirtualHosts 1")),
'getVirtualLinkMapping' : ("Get the virtual to physical link mapping",
("Get the virtual to physical link mapping. Must specify a tenant_id.",
"\nExample: getVirtualLinkMapping 1")),
'getVirtualSwitchMapping' : ("Get the virtual to physical switch mapping",
("Get the virtual to physical switch mapping. Must specify a tenant_id.",
"\nExample: getVirtualSwitchMapping 1")),
'getVirtualTopology' : ("Get the virtual topology",
("Get the virtual topology. Must specify a tenant_id.",
"\nExample: getVirtualTopology 1")),
'createPolicy' : ("Create controller policy",
("Create controller policy. Must specify a physical switch dpid, plumbing switch id, and a policy.",
"\nExample: createPolicy 00:00:00:00:00:00:00:01 1 1+2")),
'createACL' : ("Create ACL policy",
("Create ACL policy. Must specify a tenantId, acl on match, and acl on action",
"\nExample: createACL 1 srcip:exact,dstip:exact output,mod:dstip")),
'startComposition' : ("Start composition",
("Start composition.",
"\nExample: startComposition")),
'stopComposition' : ("Stop composition",
("Stop composition.",
"\nExample: stopComposition")),
'setComposeAlgo' : ("Set composition algorihm",
("Set composition algorithm. Must specify an algorithm.",
"\nExample: setComposeAlgo strawman/incremental")),
'createMultiSwitch' : ("Create virtual multi switch",
("Create a virtual multi switch. Must specify a tenant_id, the dpid of the " +
"physical switch corresponding to this multi switch, and the number of " +
"baby switches internal to this multi switch." +
"\nExample: createMultiSwitch 1 00:00:00:00:00:00:00:01 4")),
'createPlumbingSwitch' : ("Create plumbing switch",
("Create virtual plumbing switch. Must specify the dpid of the "
"physical switch, and the number of plumbing switches"
"\nExample: createPlumbingSwitch 00:00:00:00:00:00:00:01 2")),
'createPlumbingPort' : ("Create plumbing port on plumbing switch",
("Create a plumbing port on a plumbing switch. Must specify "
"the dpid of the physical switch, the id of the plumbing switch, and the number of the "
"corresponding physical port (0 means not mapped to physical port."
"\nExample: createPlumbingPort 00:00:00:00:00:00:00:01 1 0\n"
" createPlumbingPort 00:00:00:00:00:00:00:01 2 3")),
'createPlumbingLink' : ("Connect two plumbing ports through a plumbing link",
("Connect two plumbing ports through a plumbing link. Must specify a "
"physical_dpid, a plumbing src_switch_id, a plumbing src_port_id, "
"a plumbing dst_switch_id and a plumbing dst_port_id"
"\nExample: connectPlumbingLink 00:00:00:00:00:00:00:01 0 1 1 2")),
'startExpr' : ("Start experiment",
("Start experiment. Must specify an experiment."
"\nExample: startExpr parallel/sequential/gateway")),
}
USAGE="%prog {}"
URL = "http://%s:%s/%s"
def getPasswd(opts):
if opts.no_passwd:
return ""
else:
return getpass.getpass("Password: ")
def addCommonOpts (parser):
parser.add_option("-h", "--hostname", dest="host", default="localhost",
help="Specify the OpenVirteX host; default='localhost'")
parser.add_option("-p", "--port", dest="port", default="8080",
help="Specify the OpenVirteX web port; default=8080")
parser.add_option("-u", "--user", dest="ovx_user", default="admin",
help="OpenVirtex admin user; default='admin'")
parser.add_option("-n", "--no-passwd", action="store_true", dest="no_passwd", default=False,
help="Run ovxctl with no password; default false")
parser.add_option("-v", "--version", action="callback", callback=printVersion)
parser.add_option("--help", action="callback", callback=printHelp)
def parse_global_args (arglist):
usage = "%s [options] command [command_args]" % sys.argv[0]
args = []
while (len(arglist) != 0 and arglist[0] not in CMDS):
args.append(arglist[0])
arglist.pop(0)
parser = OptionParser(add_help_option=False, usage=usage)
addCommonOpts(parser)
(opts, pargs) = parser.parse_args(args)
return (opts, arglist, parser)
if __name__ == '__main__':
try:
(gopts, rargs, parser) = parse_global_args(sys.argv[1:])
if len(rargs) < 1:
raise IndexError
(parse_args, do_func) = CMDS[rargs[0]]
(opts, args) = parse_args(rargs[1:], rargs[0])
do_func(gopts, opts, args)
sys.exit(0)
except ValueError, e:
print "The argument types being sent to the function %s are incorrect. Please double check them." % sys.argv[1]
except IndexError, e:
print "%s is an unknown command" % sys.argv[-1]
except Exception, e:
print "uknown error"
printHelp(None,None,None,parser)
|
py | 1a410a1977091d4d1f349ce80ac03445c7b08807 | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="PacaPy-raul-guajardo",
version="0.0.1",
author="Raul Guajardo",
author_email="[email protected]",
description="A package designed as a wrapper over Alpaca API for my general use.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/raulguajardo/PacaPy",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.7',
) |
py | 1a410b575c4efaf3bdaf7e00f193e4b5aa456ff5 | # coding=utf-8
import torch
import torch.utils.data as data
import torchvision.transforms as transforms
from PIL import Image
from PIL import ImageDraw
import os.path as osp
import numpy as np
import json
class CPDataset(data.Dataset):
"""Dataset for CP-VTON+.
"""
def __init__(self, opt):
super(CPDataset, self).__init__()
# base setting
self.opt = opt
self.root = opt.dataroot
self.datamode = opt.datamode # train or test or self-defined
self.stage = opt.stage # GMM or TOM
self.data_list = opt.data_list
self.fine_height = opt.fine_height
self.fine_width = opt.fine_width
self.radius = opt.radius
self.data_path = osp.join(opt.dataroot, opt.datamode)
self.transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
self.transformmask = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5,), (0.5,))])
# load data list
im_names = []
c_names = []
with open(osp.join(opt.dataroot, opt.data_list), 'r') as f:
for line in f.readlines():
im_name, c_name = line.strip().split()
im_names.append(im_name)
c_names.append(c_name)
self.im_names = im_names
self.c_names = c_names
def name(self):
return "CPDataset"
def __getitem__(self, index):
c_name = self.c_names[index]
im_name = self.im_names[index]
if self.stage == 'GMM':
c = Image.open(osp.join(self.data_path, 'cloth', c_name))
cm = Image.open(osp.join(self.data_path, 'cloth-mask', c_name)).convert('L')
else:
c = Image.open(osp.join(self.data_path, 'warp-cloth', im_name)) # c_name, if that is used when saved
cm = Image.open(osp.join(self.data_path, 'warp-mask', im_name)).convert('L') # c_name, if that is used when saved
c = self.transform(c) # [-1,1]
cm_array = np.array(cm)
cm_array = (cm_array >= 128).astype(np.float32)
cm = torch.from_numpy(cm_array) # [0,1]
cm.unsqueeze_(0)
# person image
im = Image.open(osp.join(self.data_path, 'image', im_name))
im = self.transform(im) # [-1,1]
"""
LIP labels
[(0, 0, 0), # 0=Background
(128, 0, 0), # 1=Hat
(255, 0, 0), # 2=Hair
(0, 85, 0), # 3=Glove
(170, 0, 51), # 4=SunGlasses
(255, 85, 0), # 5=UpperClothes
(0, 0, 85), # 6=Dress
(0, 119, 221), # 7=Coat
(85, 85, 0), # 8=Socks
(0, 85, 85), # 9=Pants
(85, 51, 0), # 10=Jumpsuits
(52, 86, 128), # 11=Scarf
(0, 128, 0), # 12=Skirt
(0, 0, 255), # 13=Face
(51, 170, 221), # 14=LeftArm
(0, 255, 255), # 15=RightArm
(85, 255, 170), # 16=LeftLeg
(170, 255, 85), # 17=RightLeg
(255, 255, 0), # 18=LeftShoe
(255, 170, 0) # 19=RightShoe
(170, 170, 50) # 20=Skin/Neck/Chest (Newly added after running dataset_neck_skin_correction.py)
]
"""
# load parsing image
parse_name = im_name.replace('.jpg', '.png')
im_parse = Image.open(
# osp.join(self.data_path, 'image-parse', parse_name)).convert('L')
osp.join(self.data_path, 'image-parse-new', parse_name)).convert('L') # updated new segmentation
parse_array = np.array(im_parse)
im_mask = Image.open(
osp.join(self.data_path, 'image-mask', parse_name)).convert('L')
mask_array = np.array(im_mask)
# parse_shape = (parse_array > 0).astype(np.float32) # CP-VTON body shape
# Get shape from body mask (CP-VTON+)
## parse_shape = (mask_array > 0).astype(np.float32) can only detect white background
parse_shape = (parse_array > 0).astype(np.float32)
if self.stage == 'GMM':
parse_head = (parse_array == 1).astype(np.float32) + \
(parse_array == 4).astype(np.float32) + \
(parse_array == 13).astype(
np.float32) # CP-VTON+ GMM input (reserved regions)
else:
parse_head = (parse_array == 1).astype(np.float32) + \
(parse_array == 2).astype(np.float32) + \
(parse_array == 4).astype(np.float32) + \
(parse_array == 9).astype(np.float32) + \
(parse_array == 12).astype(np.float32) + \
(parse_array == 13).astype(np.float32) + \
(parse_array == 16).astype(np.float32) + \
(parse_array == 17).astype(
np.float32) # CP-VTON+ TOM input (reserved regions)
parse_cloth = (parse_array == 5).astype(np.float32) + \
(parse_array == 6).astype(np.float32) + \
(parse_array == 7).astype(np.float32) # upper-clothes labels
# shape downsample
parse_shape_ori = Image.fromarray((parse_shape*255).astype(np.uint8))
parse_shape = parse_shape_ori.resize(
(self.fine_width//16, self.fine_height//16), Image.BILINEAR)
parse_shape = parse_shape.resize(
(self.fine_width, self.fine_height), Image.BILINEAR)
parse_shape_ori = parse_shape_ori.resize(
(self.fine_width, self.fine_height), Image.BILINEAR)
shape_ori = self.transformmask(parse_shape_ori) # [-1,1]
shape = self.transformmask(parse_shape)# [-1,1]
phead = torch.from_numpy(parse_head) # [0,1]
# phand = torch.from_numpy(parse_hand) # [0,1]
pcm = torch.from_numpy(parse_cloth) # [0,1]
# upper cloth
im_c = im * pcm + (1 - pcm) # [-1,1], fill 1 for other parts
im_h = im * phead - (1 - phead) # [-1,1], fill 0 for other parts
# load pose points
pose_name = im_name.replace('.jpg', '_keypoints.json')
with open(osp.join(self.data_path, 'pose', pose_name), 'r') as f:
pose_label = json.load(f)
pose_data = pose_label[0]["keypoints"]
pose_data = np.array(pose_data)
pose_data = pose_data.reshape((-1, 3))
point_num = pose_data.shape[0]
pose_map = torch.zeros(point_num, self.fine_height, self.fine_width)
r = self.radius
im_pose = Image.new('L', (self.fine_width, self.fine_height))
pose_draw = ImageDraw.Draw(im_pose)
for i in range(point_num):
one_map = Image.new('L', (self.fine_width, self.fine_height))
draw = ImageDraw.Draw(one_map)
pointx = pose_data[i, 0]
pointy = pose_data[i, 1]
if pointx > 1 and pointy > 1:
draw.rectangle((pointx-r, pointy-r, pointx +
r, pointy+r), 'white', 'white')
pose_draw.rectangle(
(pointx-r, pointy-r, pointx+r, pointy+r), 'white', 'white')
one_map = self.transformmask(one_map)
pose_map[i] = one_map[0]
# just for visualization
im_pose = self.transformmask(im_pose)
# cloth-agnostic representation
agnostic = torch.cat([shape, im_h, pose_map], 0)
if self.stage == 'GMM':
im_g = Image.open('grid.png')
im_g = self.transform(im_g)
else:
im_g = ''
pcm.unsqueeze_(0) # CP-VTON+
result = {
'c_name': c_name, # for visualization
'im_name': im_name, # for visualization or ground truth
'cloth': c, # for input
'cloth_mask': cm, # for input
'image': im, # for visualization
'agnostic': agnostic, # for input
'parse_cloth': im_c, # for ground truth
'shape': shape, # for visualization
'head': im_h, # for visualization
'pose_image': im_pose, # for visualization
'grid_image': im_g, # for visualization
'parse_cloth_mask': pcm, # for CP-VTON+, TOM input
'shape_ori': shape_ori, # original body shape without resize
}
return result
def __len__(self):
return len(self.im_names)
class CPDataLoader(object):
def __init__(self, opt, dataset):
super(CPDataLoader, self).__init__()
if opt.shuffle:
train_sampler = torch.utils.data.sampler.RandomSampler(dataset)
else:
train_sampler = None
self.data_loader = torch.utils.data.DataLoader(
dataset, batch_size=opt.batch_size, shuffle=(
train_sampler is None),
num_workers=opt.workers, pin_memory=True, sampler=train_sampler)
self.dataset = dataset
self.data_iter = self.data_loader.__iter__()
def next_batch(self):
try:
batch = self.data_iter.__next__()
except StopIteration:
self.data_iter = self.data_loader.__iter__()
batch = self.data_iter.__next__()
return batch
if __name__ == "__main__":
print("Check the dataset for geometric matching module!")
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--dataroot", default="data")
parser.add_argument("--datamode", default="train")
parser.add_argument("--stage", default="GMM")
parser.add_argument("--data_list", default="train_pairs.txt")
parser.add_argument("--fine_width", type=int, default=192)
parser.add_argument("--fine_height", type=int, default=256)
parser.add_argument("--radius", type=int, default=3)
parser.add_argument("--shuffle", action='store_true',
help='shuffle input data')
parser.add_argument('-b', '--batch-size', type=int, default=4)
parser.add_argument('-j', '--workers', type=int, default=1)
opt = parser.parse_args()
dataset = CPDataset(opt)
data_loader = CPDataLoader(opt, dataset)
print('Size of the dataset: %05d, dataloader: %04d'
% (len(dataset), len(data_loader.data_loader)))
first_item = dataset.__getitem__(0)
first_batch = data_loader.next_batch()
from IPython import embed
embed() |
py | 1a410c34c8a0a79b948f4ce7f79ec008fbdd5b60 | import sys
from time import time
import pandas as pd
from pandas import DataFrame
import numpy as np
import matplotlib.pyplot as plt
import itertools
import matplotlib as mpl
from scipy import linalg
from sklearn import metrics
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
from sklearn.preprocessing import scale
from sklearn import mixture
np.random.seed(42)
###Get command line arguments
clusterType = sys.argv[1] #Clustering algorithm
fileID = sys.argv[2]; #fileID
set = sys.argv[3]; #Set
numSpeakers = sys.argv[4]; #Number of Speakers
blockLength = sys.argv[5]; #Block length
hopLength = sys.argv[6]; #Hop length
thresholdOrder = sys.argv[7] #Adaptive Threshold order
extraid = int(sys.argv[8]); #extraid
gmm_co_var_type = sys.argv[9]; #'full' or 'tied'
estimated_labels = [];
###Prepare output file path
outputRoot = "/Users/avrosh/Documents/Coursework/7100_Spring_16/Dataset/dataset/"+set+"/"+"set"+set+"_S"+numSpeakers+"_"+hopLength+"_"+blockLength+"_"+fileID+"_"+thresholdOrder
if extraid != 0:
outputRoot = outputRoot + "_" + str(extraid)
outputRoot = outputRoot + "_" + clusterType + ".csv"
# print outputRoot
txtResultFile = open(outputRoot, "w")
###Prepare input file path
path = "/Users/avrosh/Documents/Coursework/7100_Spring_16/Dataset/dataset/"+set+"/features/set"+set+"_"+hopLength+"_"+blockLength+"_S"+numSpeakers+"_"+fileID+"_"+thresholdOrder
if extraid != 0:
path = path + "_" + str(extraid)
path = path + ".csv"
#print path
f = open(path)
f.readline()
###Read data
data = np.loadtxt(fname = f, delimiter=',')
all_labels = data[:,0]
labels = all_labels[all_labels != 0]
#labels = data[:,0]
#print labels
#normalize data
features = data[data[:,0] != 0]
features = scale(features[:,1:])
unscaled_features = features[:,1:]
#features = data[:,1:]
#print features
n_samples, n_features = features.shape
n_speakers = numSpeakers
#speaker_ids = np.unique(labels)
#print speaker_ids
print ("n_speakers %d \nn_samples %d \nn_features %d" % (int(n_speakers),int(n_samples),int(n_features)))
sample_size = 300
print(79 * '_')
###Method
def visualize_gmm(data,gmm):
##Visualize data
reduced_data = PCA(n_components=2).fit_transform(data)
gmm.fit(reduced_data)
color_iter = itertools.cycle(['r', 'g', 'b', 'c', 'm','k'])
global estimated_labels
# print estimated_labels
estimated_speaker_ids = np.unique(estimated_labels)
for speaker in estimated_speaker_ids:
speaker_labels = np.argwhere(labels==speaker)
plt.scatter(reduced_data[speaker_labels,0],
reduced_data[speaker_labels,1],
color=next(color_iter))
for i, (clf, title) in enumerate([(gmm, 'Clustered using GMM (showing PCA reduced plot)')]):
splot = plt.subplot(1, 1, 1 + i)
Y_ = clf.predict(reduced_data)
for i, (mean, covar, color) in enumerate(zip(clf.means_, clf._get_covars(), color_iter)):
v, w = linalg.eigh(covar)
u = w[0] / linalg.norm(w[0])
# as the DP will not use every component it has access to
# unless it needs it, we shouldn't plot the redundant
# components.
# if not np.any(Y_ == i):
# continue
# plt.scatter(X[Y_ == i, 0], X[Y_ == i, 1], .8, color=color)
# print X[Y_ == i, 0]
# Plot an ellipse to show the Gaussian component
angle = np.arctan(u[1] / u[0])
angle = 180 * angle / np.pi # convert to degrees
ell = mpl.patches.Ellipse(mean, v[0], v[1], 180 + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(0.5)
splot.add_artist(ell)
plt.xlim(-10, 10)
plt.ylim(-6, 6)
plt.xticks(())
plt.yticks(())
plt.title(title)
plt.legend(["Ellipses are clusters, dots are short blocks of audio"])
plt.show()
###Method
def visualize_kmeans(data):
########################################################################
#Visualize data
reduced_data = PCA(n_components=2).fit_transform(data)
kmeans = KMeans(init='k-means++',n_clusters=n_speakers,n_init=10)
kmeans.fit(reduced_data)
#step size of mesh
h = .02
#Plot the decision boundary
x_min, x_max = reduced_data[:,0].min() - 1, reduced_data[:,0].max() + 1
y_min, y_max = reduced_data[:,1].min() - 1, reduced_data[:,1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
#Obtain labels for each point in mesh
Z = kmeans.predict(np.c_[xx.ravel(), yy.ravel()])
#Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure(1)
plt.clf()
plt.imshow(Z, interpolation='nearest',
extent=(xx.min(), xx.max(), yy.min(), yy.max()),
cmap=plt.cm.Paired,
aspect='auto', origin='lower')
#Colour Cycler
colorcycler = itertools.cycle(['r', 'g', 'b', 'y','c','k','w','m'])
estimated_speaker_ids = np.unique(Z)
for speaker in estimated_speaker_ids:
speaker_labels = np.argwhere(labels==speaker)
# for every_speaker in speaker_labels:
# j = j + 1
# txtResultFile.write("{0},{1}".format(np.int_(speaker),np.int_(every_speaker)))
# if i==len(speaker_ids):
# if j<len(speaker_labels):
# txtResultFile.write(",")
# else:
# txtResultFile.write(",")
plt.scatter(reduced_data[speaker_labels,0],
reduced_data[speaker_labels,1],
color=next(colorcycler))
#plt.plot(reduced_data[:,0], reduced_data[:,1], 'k.',markersize=2)
#plt.plot(reduced_data[:,0],reduced_data[:,1],'g^', reduced_data[:,0])
#plot the centroids as white X
centroids = kmeans.cluster_centers_
plt.scatter(centroids[:,0],centroids[:,1],
marker='x', s=169, linewidths=3,
color='w', zorder=10)
plt.title('K-means clustering on the speakers (PCA-reduced data)')
plt.xlim(x_min,x_max)
plt.ylim(y_min,y_max)
plt.xticks(())
plt.yticks(())
plt.show()
###Method
def cluster(estimator, name, data):
t0 = time()
estimator.fit(data)
global estimated_labels
estimated_labels = estimator.predict(data)
# print estimated_labels
# homogeneity_score = metrics.homogeneity_score(labels,estimated_labels)
# completeness_score = metrics.completeness_score(labels, estimated_labels)
# v_measure_score = metrics.v_measure_score(labels, estimated_labels)
# adjusted_rand_score = metrics.adjusted_rand_score(labels, estimated_labels)
# adjusted_mutual_info_score = metrics.adjusted_mutual_info_score(labels, estimated_labels)
## silhouette_score = metrics.silhouette_score(features, estimated_labels,
## metric='euclidean',
## sample_size=sample_size)
i=0
j=0
for label in all_labels:
i = i + 1;
# txtResultFile.write("{0}".format(label))
# txtResultFile.write(",")
if label == 0:
txtResultFile.write("{0}".format(-1))
else:
txtResultFile.write("{0}".format(estimated_labels[j]))
j = j + 1
if i<len(all_labels):
txtResultFile.write("\n")
# print('Name: % 9s \n'
# 'Time: %.2fs \n'
# 'Homogeneity Score: %.3f \n'
# 'Completeness Score: %.3f \n'
# 'V Measure score: %.3f \n'
# 'Adjusted rand score: %.3f \n'
# 'Adjusted Mutual Info score: %.3f \n'
# % (name, (time()-t0),
# homogeneity_score,
# completeness_score,
# v_measure_score,
# adjusted_rand_score,
# adjusted_mutual_info_score))
print(79 * '_')
#KMeans
if (clusterType == "kmeans"):
cluster(KMeans(init='k-means++', n_clusters=n_speakers, n_init=10),
name='k-means++',
data=features)
visualize_kmeans(features)
##KMeans with random initialization
if (clusterType == "kmeans-rand"):
cluster(KMeans(init='random', n_clusters=n_speakers, n_init=10),
name='Random',
data=features)
visualize_kmeans(features)
#
##KMeans PCA
#in this case the seeding of the centers in deterministic, hence we run the algorithm only once
if (clusterType == "kmeans-pca"):
pca = PCA(n_components=n_speakers).fit(features)
cluster(KMeans(init=pca.components_, n_clusters=n_speakers, n_init=1),
name='PCA-based',
data=features)
visualize_kmeans(features)
##GMM
# Fit a mixture of Gaussians with EM using five components
if (clusterType == "gmm"):
gmm = mixture.GMM(n_components=int(n_speakers), covariance_type=gmm_co_var_type)
cluster(gmm,
name='gmm',
data=features)
visualize_gmm(features,gmm)
##GMM-PCA
# Fit a mixture of Gaussians with EM using five components
if (clusterType == "gmm-pca"):
reduced_data = PCA(n_components=10).fit_transform(unscaled_features)
reduced_data = scale(reduced_data)
gmm = mixture.GMM(n_components=n_speakers, covariance_type=gmm_co_var_type)
cluster(gmm,
name='gmm-pca',
data=reduced_data)
visualize_gmm(reduced_data,gmm)
###Close output file
txtResultFile.close()
sys.exit()
|
py | 1a410d266e134694d5764e9734bb7e3bfb49027f | '''
If we want to add a single element to an existing set, we can use the .add() operation.
It adds the element to the set and returns 'None'.
Example
>>> s = set('HackerRank')
>>> s.add('H')
>>> print s
set(['a', 'c', 'e', 'H', 'k', 'n', 'r', 'R'])
>>> print s.add('HackerRank')
None
>>> print s
set(['a', 'c', 'e', 'HackerRank', 'H', 'k', 'n', 'r', 'R'])
Task
Apply your knowledge of the .add() operation to help your friend Rupal.
Rupal has a huge collection of country stamps. She decided to count the total number of distinct country stamps in her collection. She asked for your help. You pick the stamps one by one from a stack of country stamps.
Find the total number of distinct country stamps.
Input Format
The first line contains an integer N , the total number of country stamps.
The next N lines contains the name of the country where the stamp is from.
Output Format
Output the total number of distinct country stamps on a single line.
Sample Input
7
UK
China
USA
France
New Zealand
UK
France
Sample Output
5
Explanation
UK and France repeat twice. Hence, the total number of distinct country stamps is 5 (five).
'''
s = set('')
for _ in range(int(input())):
s.add(str(input()))
print(len(s)) |
py | 1a410e85ee3511d7d6fef3c33781ca335ba8858d | from sys import argv
script, filename = argv
print "We're goin to erase %r." % filename
print "If you don't want that, hit CTRL-C (^C)."
print "If you do want that, hit RETURN."
raw_input("?")
print "Opening the file..."
target = open(filename, 'w')
print "Truncating the file. Goodbye!"
target.truncate()
print "Now I'm going to ask you for three lines."
line1 = raw_input("line 1: ")
line2 = raw_input("line 2: ")
line3 = raw_input("line 3: ")
print "I'm going to write these to the file."
target.write("%s\n%s\n%s\n" % (line1, line2, line3))
print "And finally, we close it."
target.close() |
py | 1a410f58dc5f210865cbc454476f329a9d8b290b | import os
from . import CONFIG_DIR, ensure_config_dir
import json
class TokenNotFoundError(Exception):
pass
TOKENS_PATH = os.path.join(CONFIG_DIR, "tokens.json")
def load_tokens_dict():
if not os.path.isfile(TOKENS_PATH):
return {}
with open(TOKENS_PATH) as file:
return json.load(file)
def save_token(lms, identifier, token):
ensure_config_dir()
tokens = load_tokens_dict()
if lms not in tokens:
tokens[lms] = {}
tokens[lms][identifier] = token
with open(TOKENS_PATH, "w") as file:
json.dump(tokens, file, indent=2)
def get_token(lms, identifier):
tokens = load_tokens_dict()
if lms not in tokens or identifier not in tokens[lms]:
raise Exception("Token not found")
return tokens[lms][identifier]
def get_or_prompt_token(console, lms, identifier):
try:
return get_token(lms, identifier)
except BaseException as e:
pass
token = console.get(f"Enter {lms} token")
if token is None or token == "":
raise TokenNotFoundError(f"Couldn't get token for {lms}:{identifier}")
token = token.strip()
save = console.ask(f"Save token [red]{TOKENS_PATH}[/red]?", default=True)
if save:
save_token(lms, identifier, token)
return token |
py | 1a41101860b91760ea62c578ddd4f73642288a58 | # class generated by DeVIDE::createDeVIDEModuleFromVTKObject
from module_kits.vtk_kit.mixins import SimpleVTKClassModuleBase
import vtk
class vtkGraphLayout(SimpleVTKClassModuleBase):
def __init__(self, module_manager):
SimpleVTKClassModuleBase.__init__(
self, module_manager,
vtk.vtkGraphLayout(), 'Processing.',
('vtkAbstractGraph',), ('vtkAbstractGraph',),
replaceDoc=True,
inputFunctions=None, outputFunctions=None)
|
py | 1a41102d1c9c409296a127cdf1e010438186e1f1 | from django.conf.urls import url
import polygon.contest.views as v
urlpatterns = [
url(r'^$', v.ContestList.as_view(), name='contest_list'),
url(r'^create/$', v.ContestCreate.as_view(), name='contest_create'),
url(r'^(?P<pk>\d+)/visible/$', v.ContestToggleVisible.as_view(), name='contest_toggle_visible'),
url(r'^(?P<pk>\d+)/meta/$', v.ContestEdit.as_view(), name='contest_meta'),
url(r'^(?P<pk>\d+)/access/$', v.ContestAccessManage.as_view(), name='contest_access_manage'),
url(r'^(?P<pk>\d+)/authors/$', v.ContestAuthorsManage.as_view(), name='contest_author_manage'),
url(r'^(?P<pk>\d+)/problems/$', v.ContestProblemManage.as_view(), name='contest_problem_manage'),
url(r'^(?P<pk>\d+)/problems/create/$', v.ContestProblemCreate.as_view(), name='contest_problem_create'),
url(r'^(?P<pk>\d+)/problems/reorder/$', v.ContestProblemReorder.as_view(), name='contest_problem_reorder'),
url(r'^(?P<pk>\d+)/problems/readjust/$', v.ContestProblemChangeWeight.as_view(),
name='contest_problem_readjust_point'),
url(r'^(?P<pk>\d+)/problems/identifier/change/$', v.ContestProblemChangeIdentifier.as_view(),
name='contest_problem_readjust_identifier'),
url(r'^(?P<pk>\d+)/problems/delete/$', v.ContestProblemDelete.as_view(), name='contest_problem_delete'),
url(r'^(?P<pk>\d+)/invitation/$', v.ContestInvitationList.as_view(), name='contest_invitation'),
url(r'^(?P<pk>\d+)/invitation/create/$', v.ContestInvitationCreate.as_view(),
name='contest_invitation_create'),
url(r'^(?P<pk>\d+)/invitation/(?P<invitation_pk>\d+)/delete/$', v.ContestInvitationDelete.as_view(),
name='contest_invitation_delete'),
url(r'^(?P<pk>\d+)/invitation/(?P<invitation_pk>\d+)/assign/$', v.ContestInvitationAssign.as_view(),
name='contest_invitation_assign'),
url(r'^(?P<pk>\d+)/invitation/download/$', v.ContestInvitationCodeDownload.as_view(),
name='contest_invitation_download'),
url(r'^(?P<pk>\d+)/participants/$', v.ContestParticipantList.as_view(), name='contest_participant'),
url(r'^(?P<pk>\d+)/participants/(?P<participant_pk>\d+)/change/$',
v.ContestParticipantCommentUpdate.as_view(), name='contest_participant_change'),
url(r'^(?P<pk>\d+)/participants/(?P<participant_pk>\d+)/star/$', v.ContestParticipantStarToggle.as_view(),
name='contest_participant_star_toggle'),
url(r'^(?P<pk>\d+)/participants/create/$', v.ContestParticipantCreate.as_view(),
name='contest_participant_create'),
url(r'^(?P<pk>\d+)/participants/download/$', v.ContestParticipantsNoteDownload.as_view(),
name='contest_participant_download'),
url(r'^(?P<pk>\d+)/status/$', v.ContestStatusBackend.as_view(), name='contest_status'),
url(r'^(?P<pk>\d+)/rejudge/$', v.RejudgeContestProblemSubmission.as_view(), name='contest_rejudge'),
url(r'^(?P<pk>\d+)/disable/(?P<participant_pk>\d+)/$', v.ContestAccountDisable.as_view(), name='contest_account_disable'),
url(r'^(?P<pk>\d+)/anticheat/$', v.ContestAntiCheatStatus.as_view(), name='contest_anti_cheat_status'),
url(r'^(?P<pk>\d+)/anticheat/start/$', v.ContestAntiCheatAnalysisStart.as_view(), name='contest_anti_cheat_start'),
url(r'^(?P<pk>\d+)/anticheat/report/(?P<submission_pk>\d+)/$', v.ContestAntiCheatReport.as_view(), name='contest_anti_cheat_report'),
]
|
py | 1a41105aa9b99671334724431ac19074bd2b22ea | # Copyright © 2020 Arm Ltd. All rights reserved.
# SPDX-License-Identifier: MIT
import inspect
import pytest
import pyarmnn as ann
import numpy as np
import pyarmnn._generated.pyarmnn as generated
def test_activation_descriptor_default_values():
desc = ann.ActivationDescriptor()
assert desc.m_Function == ann.ActivationFunction_Sigmoid
assert desc.m_A == 0
assert desc.m_B == 0
def test_argminmax_descriptor_default_values():
desc = ann.ArgMinMaxDescriptor()
assert desc.m_Function == ann.ArgMinMaxFunction_Min
assert desc.m_Axis == -1
def test_batchnormalization_descriptor_default_values():
desc = ann.BatchNormalizationDescriptor()
assert desc.m_DataLayout == ann.DataLayout_NCHW
np.allclose(0.0001, desc.m_Eps)
def test_batchtospacend_descriptor_default_values():
desc = ann.BatchToSpaceNdDescriptor()
assert desc.m_DataLayout == ann.DataLayout_NCHW
assert [1, 1] == desc.m_BlockShape
assert [(0, 0), (0, 0)] == desc.m_Crops
def test_batchtospacend_descriptor_assignment():
desc = ann.BatchToSpaceNdDescriptor()
desc.m_BlockShape = (1, 2, 3)
ololo = [(1, 2), (3, 4)]
size_1 = len(ololo)
desc.m_Crops = ololo
assert size_1 == len(ololo)
desc.m_DataLayout = ann.DataLayout_NHWC
assert ann.DataLayout_NHWC == desc.m_DataLayout
assert [1, 2, 3] == desc.m_BlockShape
assert [(1, 2), (3, 4)] == desc.m_Crops
@pytest.mark.parametrize("input_shape, value, vtype", [([-1], -1, 'int'), (("one", "two"), "'one'", 'str'),
([1.33, 4.55], 1.33, 'float'),
([{1: "one"}], "{1: 'one'}", 'dict')], ids=lambda x: str(x))
def test_batchtospacend_descriptor_rubbish_assignment_shape(input_shape, value, vtype):
desc = ann.BatchToSpaceNdDescriptor()
with pytest.raises(TypeError) as err:
desc.m_BlockShape = input_shape
assert "Failed to convert python input value {} of type '{}' to C type 'j'".format(value, vtype) in str(err.value)
@pytest.mark.parametrize("input_crops, value, vtype", [([(1, 2), (3, 4, 5)], '(3, 4, 5)', 'tuple'),
([(1, 'one')], "(1, 'one')", 'tuple'),
([-1], -1, 'int'),
([(1, (1, 2))], '(1, (1, 2))', 'tuple'),
([[1, [1, 2]]], '[1, [1, 2]]', 'list')
], ids=lambda x: str(x))
def test_batchtospacend_descriptor_rubbish_assignment_crops(input_crops, value, vtype):
desc = ann.BatchToSpaceNdDescriptor()
with pytest.raises(TypeError) as err:
desc.m_Crops = input_crops
assert "Failed to convert python input value {} of type '{}' to C type".format(value, vtype) in str(err.value)
def test_batchtospacend_descriptor_empty_assignment():
desc = ann.BatchToSpaceNdDescriptor()
desc.m_BlockShape = []
assert [] == desc.m_BlockShape
def test_batchtospacend_descriptor_ctor():
desc = ann.BatchToSpaceNdDescriptor([1, 2, 3], [(4, 5), (6, 7)])
assert desc.m_DataLayout == ann.DataLayout_NCHW
assert [1, 2, 3] == desc.m_BlockShape
assert [(4, 5), (6, 7)] == desc.m_Crops
def test_convolution2d_descriptor_default_values():
desc = ann.Convolution2dDescriptor()
assert desc.m_PadLeft == 0
assert desc.m_PadTop == 0
assert desc.m_PadRight == 0
assert desc.m_PadBottom == 0
assert desc.m_StrideX == 0
assert desc.m_StrideY == 0
assert desc.m_DilationX == 1
assert desc.m_DilationY == 1
assert desc.m_BiasEnabled == False
assert desc.m_DataLayout == ann.DataLayout_NCHW
def test_depthtospace_descriptor_default_values():
desc = ann.DepthToSpaceDescriptor()
assert desc.m_BlockSize == 1
assert desc.m_DataLayout == ann.DataLayout_NHWC
def test_depthwise_convolution2d_descriptor_default_values():
desc = ann.DepthwiseConvolution2dDescriptor()
assert desc.m_PadLeft == 0
assert desc.m_PadTop == 0
assert desc.m_PadRight == 0
assert desc.m_PadBottom == 0
assert desc.m_StrideX == 0
assert desc.m_StrideY == 0
assert desc.m_DilationX == 1
assert desc.m_DilationY == 1
assert desc.m_BiasEnabled == False
assert desc.m_DataLayout == ann.DataLayout_NCHW
def test_detectionpostprocess_descriptor_default_values():
desc = ann.DetectionPostProcessDescriptor()
assert desc.m_MaxDetections == 0
assert desc.m_MaxClassesPerDetection == 1
assert desc.m_DetectionsPerClass == 1
assert desc.m_NmsScoreThreshold == 0
assert desc.m_NmsIouThreshold == 0
assert desc.m_NumClasses == 0
assert desc.m_UseRegularNms == False
assert desc.m_ScaleH == 0
assert desc.m_ScaleW == 0
assert desc.m_ScaleX == 0
assert desc.m_ScaleY == 0
def test_fakequantization_descriptor_default_values():
desc = ann.FakeQuantizationDescriptor()
np.allclose(6, desc.m_Max)
np.allclose(-6, desc.m_Min)
def test_fully_connected_descriptor_default_values():
desc = ann.FullyConnectedDescriptor()
assert desc.m_BiasEnabled == False
assert desc.m_TransposeWeightMatrix == False
def test_instancenormalization_descriptor_default_values():
desc = ann.InstanceNormalizationDescriptor()
assert desc.m_Gamma == 1
assert desc.m_Beta == 0
assert desc.m_DataLayout == ann.DataLayout_NCHW
np.allclose(1e-12, desc.m_Eps)
def test_lstm_descriptor_default_values():
desc = ann.LstmDescriptor()
assert desc.m_ActivationFunc == 1
assert desc.m_ClippingThresCell == 0
assert desc.m_ClippingThresProj == 0
assert desc.m_CifgEnabled == True
assert desc.m_PeepholeEnabled == False
assert desc.m_ProjectionEnabled == False
assert desc.m_LayerNormEnabled == False
def test_l2normalization_descriptor_default_values():
desc = ann.L2NormalizationDescriptor()
assert desc.m_DataLayout == ann.DataLayout_NCHW
np.allclose(1e-12, desc.m_Eps)
def test_mean_descriptor_default_values():
desc = ann.MeanDescriptor()
assert desc.m_KeepDims == False
def test_normalization_descriptor_default_values():
desc = ann.NormalizationDescriptor()
assert desc.m_NormChannelType == ann.NormalizationAlgorithmChannel_Across
assert desc.m_NormMethodType == ann.NormalizationAlgorithmMethod_LocalBrightness
assert desc.m_NormSize == 0
assert desc.m_Alpha == 0
assert desc.m_Beta == 0
assert desc.m_K == 0
assert desc.m_DataLayout == ann.DataLayout_NCHW
def test_origin_descriptor_default_values():
desc = ann.ConcatDescriptor()
assert 0 == desc.GetNumViews()
assert 0 == desc.GetNumDimensions()
assert 1 == desc.GetConcatAxis()
def test_origin_descriptor_incorrect_views():
desc = ann.ConcatDescriptor(2, 2)
with pytest.raises(RuntimeError) as err:
desc.SetViewOriginCoord(1000, 100, 1000)
assert "Failed to set view origin coordinates." in str(err.value)
def test_origin_descriptor_ctor():
desc = ann.ConcatDescriptor(2, 2)
value = 5
for i in range(desc.GetNumViews()):
for j in range(desc.GetNumDimensions()):
desc.SetViewOriginCoord(i, j, value+i)
desc.SetConcatAxis(1)
assert 2 == desc.GetNumViews()
assert 2 == desc.GetNumDimensions()
assert [5, 5] == desc.GetViewOrigin(0)
assert [6, 6] == desc.GetViewOrigin(1)
assert 1 == desc.GetConcatAxis()
def test_pad_descriptor_default_values():
desc = ann.PadDescriptor()
assert desc.m_PadValue == 0
def test_permute_descriptor_default_values():
pv = ann.PermutationVector((0, 2, 3, 1))
desc = ann.PermuteDescriptor(pv)
assert desc.m_DimMappings.GetSize() == 4
assert desc.m_DimMappings[0] == 0
assert desc.m_DimMappings[1] == 2
assert desc.m_DimMappings[2] == 3
assert desc.m_DimMappings[3] == 1
def test_pooling_descriptor_default_values():
desc = ann.Pooling2dDescriptor()
assert desc.m_PoolType == ann.PoolingAlgorithm_Max
assert desc.m_PadLeft == 0
assert desc.m_PadTop == 0
assert desc.m_PadRight == 0
assert desc.m_PadBottom == 0
assert desc.m_PoolHeight == 0
assert desc.m_PoolWidth == 0
assert desc.m_StrideX == 0
assert desc.m_StrideY == 0
assert desc.m_OutputShapeRounding == ann.OutputShapeRounding_Floor
assert desc.m_PaddingMethod == ann.PaddingMethod_Exclude
assert desc.m_DataLayout == ann.DataLayout_NCHW
def test_reshape_descriptor_default_values():
desc = ann.ReshapeDescriptor()
# check the empty Targetshape
assert desc.m_TargetShape.GetNumDimensions() == 0
def test_slice_descriptor_default_values():
desc = ann.SliceDescriptor()
assert desc.m_TargetWidth == 0
assert desc.m_TargetHeight == 0
assert desc.m_Method == ann.ResizeMethod_NearestNeighbor
assert desc.m_DataLayout == ann.DataLayout_NCHW
def test_resize_descriptor_default_values():
desc = ann.ResizeDescriptor()
assert desc.m_TargetWidth == 0
assert desc.m_TargetHeight == 0
assert desc.m_Method == ann.ResizeMethod_NearestNeighbor
assert desc.m_DataLayout == ann.DataLayout_NCHW
assert desc.m_BilinearAlignCorners == False
def test_spacetobatchnd_descriptor_default_values():
desc = ann.SpaceToBatchNdDescriptor()
assert desc.m_DataLayout == ann.DataLayout_NCHW
def test_spacetodepth_descriptor_default_values():
desc = ann.SpaceToDepthDescriptor()
assert desc.m_BlockSize == 1
assert desc.m_DataLayout == ann.DataLayout_NHWC
def test_stack_descriptor_default_values():
desc = ann.StackDescriptor()
assert desc.m_Axis == 0
assert desc.m_NumInputs == 0
# check the empty Inputshape
assert desc.m_InputShape.GetNumDimensions() == 0
def test_slice_descriptor_default_values():
desc = ann.SliceDescriptor()
desc.m_Begin = [1, 2, 3, 4, 5]
desc.m_Size = (1, 2, 3, 4)
assert [1, 2, 3, 4, 5] == desc.m_Begin
assert [1, 2, 3, 4] == desc.m_Size
def test_slice_descriptor_ctor():
desc = ann.SliceDescriptor([1, 2, 3, 4, 5], (1, 2, 3, 4))
assert [1, 2, 3, 4, 5] == desc.m_Begin
assert [1, 2, 3, 4] == desc.m_Size
def test_strided_slice_descriptor_default_values():
desc = ann.StridedSliceDescriptor()
desc.m_Begin = [1, 2, 3, 4, 5]
desc.m_End = [6, 7, 8, 9, 10]
desc.m_Stride = (10, 10)
desc.m_BeginMask = 1
desc.m_EndMask = 2
desc.m_ShrinkAxisMask = 3
desc.m_EllipsisMask = 4
desc.m_NewAxisMask = 5
assert [1, 2, 3, 4, 5] == desc.m_Begin
assert [6, 7, 8, 9, 10] == desc.m_End
assert [10, 10] == desc.m_Stride
assert 1 == desc.m_BeginMask
assert 2 == desc.m_EndMask
assert 3 == desc.m_ShrinkAxisMask
assert 4 == desc.m_EllipsisMask
assert 5 == desc.m_NewAxisMask
def test_strided_slice_descriptor_ctor():
desc = ann.StridedSliceDescriptor([1, 2, 3, 4, 5], [6, 7, 8, 9, 10], (10, 10))
desc.m_Begin = [1, 2, 3, 4, 5]
desc.m_End = [6, 7, 8, 9, 10]
desc.m_Stride = (10, 10)
assert [1, 2, 3, 4, 5] == desc.m_Begin
assert [6, 7, 8, 9, 10] == desc.m_End
assert [10, 10] == desc.m_Stride
def test_softmax_descriptor_default_values():
desc = ann.SoftmaxDescriptor()
assert desc.m_Axis == -1
np.allclose(1.0, desc.m_Beta)
def test_space_to_batch_nd_descriptor_default_values():
desc = ann.SpaceToBatchNdDescriptor()
assert [1, 1] == desc.m_BlockShape
assert [(0, 0), (0, 0)] == desc.m_PadList
assert ann.DataLayout_NCHW == desc.m_DataLayout
def test_space_to_batch_nd_descriptor_assigned_values():
desc = ann.SpaceToBatchNdDescriptor()
desc.m_BlockShape = (90, 100)
desc.m_PadList = [(1, 2), (3, 4)]
assert [90, 100] == desc.m_BlockShape
assert [(1, 2), (3, 4)] == desc.m_PadList
assert ann.DataLayout_NCHW == desc.m_DataLayout
def test_space_to_batch_nd_descriptor_ctor():
desc = ann.SpaceToBatchNdDescriptor((1, 2, 3), [(1, 2), (3, 4)])
assert [1, 2, 3] == desc.m_BlockShape
assert [(1, 2), (3, 4)] == desc.m_PadList
assert ann.DataLayout_NCHW == desc.m_DataLayout
def test_transpose_convolution2d_descriptor_default_values():
desc = ann.DepthwiseConvolution2dDescriptor()
assert desc.m_PadLeft == 0
assert desc.m_PadTop == 0
assert desc.m_PadRight == 0
assert desc.m_PadBottom == 0
assert desc.m_StrideX == 0
assert desc.m_StrideY == 0
assert desc.m_BiasEnabled == False
assert desc.m_DataLayout == ann.DataLayout_NCHW
def test_view_descriptor_default_values():
desc = ann.SplitterDescriptor()
assert 0 == desc.GetNumViews()
assert 0 == desc.GetNumDimensions()
def test_elementwise_unary_descriptor_default_values():
desc = ann.ElementwiseUnaryDescriptor()
assert desc.m_Operation == ann.UnaryOperation_Abs
def test_view_descriptor_incorrect_input():
desc = ann.SplitterDescriptor(2, 3)
with pytest.raises(RuntimeError) as err:
desc.SetViewOriginCoord(1000, 100, 1000)
assert "Failed to set view origin coordinates." in str(err.value)
with pytest.raises(RuntimeError) as err:
desc.SetViewSize(1000, 100, 1000)
assert "Failed to set view size." in str(err.value)
def test_view_descriptor_ctor():
desc = ann.SplitterDescriptor(2, 3)
value_size = 1
value_orig_coord = 5
for i in range(desc.GetNumViews()):
for j in range(desc.GetNumDimensions()):
desc.SetViewOriginCoord(i, j, value_orig_coord+i)
desc.SetViewSize(i, j, value_size+i)
assert 2 == desc.GetNumViews()
assert 3 == desc.GetNumDimensions()
assert [5, 5] == desc.GetViewOrigin(0)
assert [6, 6] == desc.GetViewOrigin(1)
assert [1, 1] == desc.GetViewSizes(0)
assert [2, 2] == desc.GetViewSizes(1)
def test_createdescriptorforconcatenation_ctor():
input_shape_vector = [ann.TensorShape((2, 1)), ann.TensorShape((3, 1)), ann.TensorShape((4, 1))]
desc = ann.CreateDescriptorForConcatenation(input_shape_vector, 0)
assert 3 == desc.GetNumViews()
assert 0 == desc.GetConcatAxis()
assert 2 == desc.GetNumDimensions()
c = desc.GetViewOrigin(1)
d = desc.GetViewOrigin(0)
def test_createdescriptorforconcatenation_wrong_shape_for_axis():
input_shape_vector = [ann.TensorShape((1, 2)), ann.TensorShape((3, 4)), ann.TensorShape((5, 6))]
with pytest.raises(RuntimeError) as err:
desc = ann.CreateDescriptorForConcatenation(input_shape_vector, 0)
assert "All inputs to concatenation must be the same size along all dimensions except the concatenation dimension" in str(
err.value)
@pytest.mark.parametrize("input_shape_vector", [([-1, "one"]),
([1.33, 4.55]),
([{1: "one"}])], ids=lambda x: str(x))
def test_createdescriptorforconcatenation_rubbish_assignment_shape_vector(input_shape_vector):
with pytest.raises(TypeError) as err:
desc = ann.CreateDescriptorForConcatenation(input_shape_vector, 0)
assert "in method 'CreateDescriptorForConcatenation', argument 1 of type 'std::vector< armnn::TensorShape,std::allocator< armnn::TensorShape > >'" in str(
err.value)
generated_classes = inspect.getmembers(generated, inspect.isclass)
generated_classes_names = list(map(lambda x: x[0], generated_classes))
@pytest.mark.parametrize("desc_name", ['ActivationDescriptor',
'ArgMinMaxDescriptor',
'PermuteDescriptor',
'SoftmaxDescriptor',
'ConcatDescriptor',
'SplitterDescriptor',
'Pooling2dDescriptor',
'FullyConnectedDescriptor',
'Convolution2dDescriptor',
'DepthwiseConvolution2dDescriptor',
'DetectionPostProcessDescriptor',
'NormalizationDescriptor',
'L2NormalizationDescriptor',
'BatchNormalizationDescriptor',
'InstanceNormalizationDescriptor',
'BatchToSpaceNdDescriptor',
'FakeQuantizationDescriptor',
'ResizeDescriptor',
'ReshapeDescriptor',
'SpaceToBatchNdDescriptor',
'SpaceToDepthDescriptor',
'LstmDescriptor',
'MeanDescriptor',
'PadDescriptor',
'SliceDescriptor',
'StackDescriptor',
'StridedSliceDescriptor',
'TransposeConvolution2dDescriptor',
'ElementwiseUnaryDescriptor'])
class TestDescriptorMassChecks:
def test_desc_implemented(self, desc_name):
assert desc_name in generated_classes_names
def test_desc_equal(self, desc_name):
desc_class = next(filter(lambda x: x[0] == desc_name, generated_classes))[1]
assert desc_class() == desc_class()
generated_classes = inspect.getmembers(generated, inspect.isclass)
generated_classes_names = list(map(lambda x: x[0], generated_classes))
@pytest.mark.parametrize("desc_name", ['ActivationDescriptor',
'ArgMinMaxDescriptor',
'PermuteDescriptor',
'SoftmaxDescriptor',
'ConcatDescriptor',
'SplitterDescriptor',
'Pooling2dDescriptor',
'FullyConnectedDescriptor',
'Convolution2dDescriptor',
'DepthwiseConvolution2dDescriptor',
'DetectionPostProcessDescriptor',
'NormalizationDescriptor',
'L2NormalizationDescriptor',
'BatchNormalizationDescriptor',
'InstanceNormalizationDescriptor',
'BatchToSpaceNdDescriptor',
'FakeQuantizationDescriptor',
'ResizeDescriptor',
'ReshapeDescriptor',
'SpaceToBatchNdDescriptor',
'SpaceToDepthDescriptor',
'LstmDescriptor',
'MeanDescriptor',
'PadDescriptor',
'SliceDescriptor',
'StackDescriptor',
'StridedSliceDescriptor',
'TransposeConvolution2dDescriptor',
'ElementwiseUnaryDescriptor'])
class TestDescriptorMassChecks:
def test_desc_implemented(self, desc_name):
assert desc_name in generated_classes_names
def test_desc_equal(self, desc_name):
desc_class = next(filter(lambda x: x[0] == desc_name, generated_classes))[1]
assert desc_class() == desc_class()
|
py | 1a4111ab6b3bb61c9e3d7501e6b67d78c8fdf79c | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch_geometric.nn import GCNConv
class MPNNLSTM(nn.Module):
r"""An implementation of the Message Passing Neural Network with Long Short Term Memory.
For details see this paper: `"Transfer Graph Neural Networks for Pandemic Forecasting." <https://arxiv.org/abs/2009.08388>`_
Args:
in_channels (int): Number of input features.
hidden_size (int): Dimension of hidden representations.
out_channels (int): Number of output features.
num_nodes (int): Number of nodes in the network.
window (int): Number of past samples included in the input.
dropout (float): Dropout rate.
"""
def __init__(self, in_channels: int, hidden_size: int ,
out_channels: int, num_nodes: int, window: int, dropout: float):
super(MPNNLSTM, self).__init__()
self.window = window
self.num_nodes = num_nodes
self.hidden_size = hidden_size
self.dropout = dropout
self.in_channels = in_channels
self.out_channels = out_channels
self._create_parameters_and_layers()
def _create_parameters_and_layers(self):
self._convolution_1 = GCNConv(self.in_channels, self.hidden_size)
self._convolution_2 = GCNConv(self.hidden_size, self.hidden_size)
self._batch_norm_1 = nn.BatchNorm1d(self.hidden_size)
self._batch_norm_2 = nn.BatchNorm1d(self.hidden_size)
self._recurrent_1 = nn.LSTM(2*self.hidden_size, self.hidden_size, 1)
self._recurrent_2 = nn.LSTM(self.hidden_size, self.hidden_size, 1)
def _graph_convolution_1(self, X, edge_index, edge_weight):
X = F.relu(self._convolution_1(X, edge_index, edge_weight))
X = self._batch_norm_1(X)
X = F.dropout(X, p=self.dropout, training=self.training)
return X
def _graph_convolution_2(self, X, edge_index, edge_weight):
X = F.relu(self._convolution_2(X, edge_index, edge_weight))
X = self._batch_norm_2(X)
X = F.dropout(X, p=self.dropout, training=self.training)
return X
def forward(self, X: torch.FloatTensor, edge_index: torch.LongTensor,
edge_weight: torch.FloatTensor) -> torch.FloatTensor:
"""
Making a forward pass through the whole architecture.
Arg types:
* **X** *(PyTorch FloatTensor)* - Node features.
* **edge_index** *(PyTorch LongTensor)* - Graph edge indices.
* **edge_weight** *(PyTorch LongTensor, optional)* - Edge weight vector.
Return types:
* **H** *(PyTorch FloatTensor)* - The hidden representation of size 2*nhid+in_channels+window-1 for each node.
"""
R = list()
S = X.view(-1, self.window, self.num_nodes, self.in_channels)
S = torch.transpose(S, 1, 2)
S = S.reshape(-1, self.window, self.in_channels)
O = [S[:,0,:]]
for l in range(1, self.window):
O.append(S[:, l, self.in_channels-1].unsqueeze(1))
S = torch.cat(O, dim=1)
X = self._graph_convolution_1(X, edge_index, edge_weight)
R.append(X)
X = self._graph_convolution_2(X, edge_index, edge_weight)
R.append(X)
X = torch.cat(R, dim=1)
X = X.view(-1, self.window, self.num_nodes, X.size(1))
X = torch.transpose(X, 0, 1)
X = X.contiguous().view(self.window, -1, X.size(3))
X, (H_1, C_1) = self._recurrent_1(X)
X, (H_2, C_2) = self._recurrent_2(X)
H = torch.cat([H_1[0, :, :], H_2[0, :, :], S], dim=1)
return H
|
py | 1a41125ad7a08a33372071c6c0a23713b05fe61d | import os
import unittest
from digiroad.connection.PostgisServiceProvider import PostgisServiceProvider
from digiroad.entities import Point
from digiroad.logic.Operations import Operations
from digiroad.util import CostAttributes, FileActions
class PostgisServiceProviderTest(unittest.TestCase):
def setUp(self):
self.postgisServiceProvider = PostgisServiceProvider()
self.fileActions = FileActions()
self.operations = Operations(self.fileActions)
self.dir = os.getcwd()
def test_createATemporaryTable(self):
tableName = "temporalTable"
columns = {
"uuid": "uuid",
"ykr_from_id": "INTEGER",
"ykr_to_id": "INTEGER",
"travel_time": "DOUBLE PRECISION",
"travel_time_difference": "DOUBLE PRECISION",
"geometry": "GEOMETRY",
}
try:
connection = self.postgisServiceProvider.getConnection()
self.postgisServiceProvider.createTemporaryTable(
con=connection,
tableName=tableName,
columns=columns
)
finally:
connection.close()
def test_getUUIDCode(self):
uuid = self.postgisServiceProvider.getUUID(con=self.postgisServiceProvider.getConnection())
print(uuid)
self.assertIsNotNone(uuid)
def test_bucle(self):
arrayList = [0, 1, 2, 3, 4, 5, 6, 7, 8]
expected = [[0, 3], [4, 7], [8, 8]]
jump = 4
self.assertEqual(expected, self.getModules(arrayList, jump))
arrayList = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
expected = [[0, 3], [4, 7], [8, 9]]
self.assertEqual(expected, self.getModules(arrayList, jump))
arrayList = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
expected = [[0, 3], [4, 7], [8, 10]]
self.assertEqual(expected, self.getModules(arrayList, jump))
arrayList = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
expected = [[0, 3], [4, 7], [8, 11]]
self.assertEqual(expected, self.getModules(arrayList, jump))
arrayList = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
expected = [[0, 3], [4, 7], [8, 11], [12, 12]]
self.assertEqual(expected, self.getModules(arrayList, jump))
arrayList = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
expected = [[0, 2], [3, 5], [6, 8], [9, 11], [12, 12]]
jump = 3
self.assertEqual(expected, self.getModules(arrayList, jump))
def getModules(self, arrayList, jump):
counter = 0
intervals = []
while counter < len(arrayList):
if counter + jump > len(arrayList):
jump = len(arrayList) % jump
intervals.append([counter, counter + jump - 1])
counter = counter + jump
print(intervals)
return intervals
|
py | 1a411266911a23385907509af9c4415aefca681b | edge_url = 'https://zjbisc.demo.featurehub.io'
client_eval_key = 'default/806d0fe8-2842-4d17-9e1f-1c33eedc5f31/wFk8qfmJEkLnkkJ8A8ZPTbgZPHPTJJ*heGuAGL6U8EKOUXbbRCL'
|
py | 1a411377590e1da98d0b9a908ee823076bac0bb3 | # -*- coding: utf-8 -*-
from bs4 import BeautifulSoup
from base64 import b64decode
from glob import glob
import imghdr
import os
import requests
import warnings
def acquire_note(directory, div):
if div.find('div').text is not None:
with open(os.path.join(directory, f'curation_notes.txt'), 'w', encoding='utf-8') as out:
out.write(div.find('div').text.strip())
else:
warnings.warn(f'Curation note could not be parsed for {model_id}', UserWarning)
def acquire_image(directory, div):
if div.find('img')['src'] is not None:
for img_file in div.find_all('img'):
img_format = img_file['src'].partition('/')[2].partition(';')[0]
img_data = b64decode(img_file['src'].partition(',')[2])
img_filename = os.path.join(directory, f'curation_image.{img_format}')
with open(img_filename, 'wb') as out:
out.write(img_data)
correct_img_format = imghdr.what(img_filename)
if correct_img_format != img_format:
os.rename(img_filename, os.path.join(directory, f'curation_image.{correct_img_format}'))
else:
warnings.warn(f'Curation image could not be found for {model_id}', UserWarning)
for model_path in glob(os.path.join('manual-fixes', '*')):
# acquire the raw HTML
model_id = os.path.basename(model_path)
url = f'https://www.ebi.ac.uk/biomodels/{model_id}#Curation'
response = requests.get(url, headers={'accept': 'application/html'})
soup = BeautifulSoup(response.text, 'lxml')
# acquire and export the curation notes
curation_divs = soup.find_all(class_='small-12 medium-6 large-6 columns')
for div in curation_divs:
if div.find('strong'):
acquire_note(model_path, div)
if div.find('img'):
acquire_image(model_path, div)
|
py | 1a4113fa1064dc578b39f4c28f06a46cbb7b6f70 |
default_app_config = 'django_business_rules.apps.DbrAppConfig'
|
py | 1a4114219dc7e1cd1a312a35751ea7ef41205c5e | import os
import sys
import time
import shlex
import signal
import logging
import traceback
import subprocess as sp
import multiprocessing as mp
import path
import daemon
import packaging
logger = logging.getLogger(__name__)
def status(module_name):
"""
Return the status of the module *module_name*
A module is considered as running if its pid file exists.
Return true if the module is running else false.
"""
pid_file = path.pid_file(module_name)
return os.path.exists(pid_file)
def status_all():
"""
Return the status of all the installed modules. See the above function
*status* for more details.
Return a dictionary from name to status (as boolean).
"""
modules = packaging.get_installed_modules()
return {name: status(name) for name in modules}
def execm(module_name, daemonize=True):
"""
Start a new module identified by its name *module_name*. The current
processus is killed at the end of the module when it's not a daemon. If it
is, the current processus is killed immediately. Use *invoke*
instead if you want to create a new killable process.
"""
child_proc = None
def signal_handler(signum, frame):
"""
Signal handler. If no child was created, it does nothing.
Else, it broadcasts the signal to the child.
"""
logger.info('Received signal %s, broadcasting it to child' % signum)
if child_proc is not None:
child_proc.send_signal(signum)
child_proc.wait()
# Check that only one instance is running at the same time
pid_file = path.pid_file(module_name)
if os.path.exists(pid_file):
raise RuntimeError('A pid file already exists for this module')
sys.exit(1)
# Get the start command from the configuration file
module_config = packaging.get_config(module_name)
if not 'start' in module_config:
raise RuntimeError(
'Missing "start" entry in the module\'s configuration file')
sys.exit(1)
start_cmd = module_config['start']
# Add our bin directory to the PATH variable
os.environ['PATH'] = path.bin_directory() + ':' + os.environ['PATH']
# Daemon or not Daemon ?
if daemonize:
# Create a daemon
daemon.possess_me()
# Redirect stdout and stderr into a log file
sys.stdout = open(path.log_file(module_name), 'a')
sys.stderr = sys.stdout
# Change the directory to the module directory
os.chdir(path.module_directory(module_name))
# Prepare to receive signal SIGINT and SIGTERM
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
return_code = 0
try:
# Write the new daemon pid in a new file
with open(pid_file, 'w') as f:
f.write(str(os.getpid()))
f.flush()
except (OSError, IOError) as e:
return_code = 1
else:
# Execute the start command
logger.info('Starting the module `%s`', module_name)
try:
child_proc = sp.Popen(shlex.split(start_cmd))
except OSError as e:
logger.exception(e)
return_code = 1
else:
return_code = child_proc.wait()
finally:
# Remove the pid file and return the corresponding code
logger.info('Shutting down the module `%s`', module_name)
os.remove(pid_file)
sys.exit(return_code)
sys.exit(0)
def invoke(module_name, daemonize=True):
"""
As exec_module, execute a module but fork before to keep the current
process active. To see if the module is really running, use the
*status* function.
"""
if status(module_name):
raise RuntimeError('Module `%s` is already running' % module_name)
proc = mp.Process(target=execm, args=(module_name, daemonize))
proc.start()
proc.join()
def invoke_all():
"""
Invoke all installed modules as daemon. Doesn't check if the modules are
correctly launch. Return the list of pid of the new processes.
"""
modules = packaging.get_installed_modules()
for name in modules:
try:
invoke(name, True)
time.sleep(0.1)
except RuntimeError as e:
logger.exception(e)
def stop(module_name):
"""
Stop the *module_name* module.
"""
if not status(module_name):
raise RuntimeError('Module `%s` is not running' % module_name)
remove_file = False
pid = 0
pid_file = path.pid_file(module_name)
with open(pid_file, 'r') as f:
try:
pid = int(f.readline())
except ValueError:
remove_file = True
if pid != 0:
try:
os.kill(pid, signal.SIGTERM)
except OSError as e:
if e.errno == 3: # No such process
remove_file = True
else:
raise e
if remove_file:
os.remove(pid_file)
def stop_all():
"""
Stop all the running modules
"""
modules = packaging.get_installed_modules()
for name in modules:
try:
stop(name)
except RuntimeError:
pass # Ignore if we try to stop a stopped module
|
py | 1a4114e80ac03f8ffc1c80781bb3f522e734dbec | #!/usr/bin/env python3
import argparse
import json
from pathlib import Path
from typing import NamedTuple
import subprocess as sp
from dataset_utils import rm_imgs_without_labels
LABEL_MAP = {
"car": 0,
"bus": 1,
"person": 2,
"bike": 3,
"truck": 4,
"motor": 5,
"train": 6,
"rider": 7,
"traffic sign": 8,
"traffic light": 9,
}
IMG_WIDTH = 1280
IMG_HEIGHT = 720
class Arguments(NamedTuple):
data_path: Path
train_labels: Path
val_labels: Path
output_dir: Path
def parse_args() -> argparse.Namespace:
ap = argparse.ArgumentParser(
description="""
Create a dataset of images and labels, along with a corresponding
bdd100k.data file, a train.txt, and a validation.txt that can be inputed
into darknet to train a YOLO model on the BDD100k dataset.
WARNING: This will copy the images in the dataset to a different directory.
I am OK with this as storage is cheap on my PC, but modify this if you don't
like it.
"""
)
ap.add_argument(
"--data-path",
help="Path to BDD dataset root (e.g. bdd100k/images/100k). Should contain the directories `train`, `test`, and `val` with .jpg images",
)
ap.add_argument(
"--train-labels",
help="Path to BDD100k training labels JSON file (e.g. bdd100k_labels_images_train.json)",
)
ap.add_argument(
"--val-labels",
help="Path to BDD100k validation labels JSON file (e.g. bdd100k_labels_images_val.json)",
)
ap.add_argument(
"--output-dir",
help="Path to output the YOLO compatible dataset and other darknet helper files",
)
return ap.parse_args()
def validate_args(args: argparse.Namespace) -> Arguments:
data_path = Path(args.data_path).absolute().resolve()
assert data_path.is_dir(), "Given data path is not a directory"
assert (
data_path / "train"
).is_dir(), "Given data path doesn't contain a subdirectory `train`"
assert (
data_path / "val"
).is_dir(), "Given data path doesn't contain a subdirectory `val`"
assert (
data_path / "test"
).is_dir(), "Given data path doesn't contain a subdirectory `test`"
train_labels = Path(args.train_labels).absolute().resolve()
assert (
train_labels.is_file()
), "Given training labels path is either not a file or doesn't exist"
val_labels = Path(args.val_labels).absolute().resolve()
assert (
val_labels.is_file()
), "Given validation labels path is either not a file or doesn't exist"
output_dir = Path(args.output_dir).absolute().resolve()
if output_dir.is_dir():
import sys
print(
"[WARNING] Output directory already exists, contents may be overwritten",
file=sys.stderr,
)
output_dir.mkdir(parents=True, exist_ok=True)
return Arguments(
data_path=data_path,
train_labels=train_labels,
val_labels=val_labels,
output_dir=output_dir,
)
def box2d_to_yolo(box2d):
x1 = box2d["x1"] / IMG_WIDTH
x2 = box2d["x2"] / IMG_WIDTH
y1 = box2d["y1"] / IMG_HEIGHT
y2 = box2d["y2"] / IMG_HEIGHT
cx = (x1 + x2) / 2
cy = (y1 + y2) / 2
width = abs(x2 - x1)
height = abs(y2 - y1)
return cx, cy, width, height
def label2txt(labels_json: Path, output_dir: Path):
"""
This function converts the labels into a .txt file with the same name as the image.
It extracts the bounding box, class info from the .json file and converts it into
the darknet format.
The darknet format is
<object id> <x> <y> <width> <height>
"""
assert labels_json.is_file(), "Labels JSON file doesn't exist"
assert output_dir.is_dir(), "Output directory doesn't exist"
frames = json.load(open(labels_json, "r"))
for frame in frames:
img_name = Path(frame["name"])
assert img_name.suffix == ".jpg"
frame_file = output_dir / (img_name.with_suffix(".txt"))
# Creates, opens, and adds to a txt file with the name of each image.jpg
with open(frame_file, "w+") as f:
# For each sub label of each image, get the box2d variable
# Get the relative center point compared to the image size 1280/720
for label in frame["labels"]:
if "box2d" not in label:
continue
box2d = label["box2d"]
if box2d["x1"] >= box2d["x2"] or box2d["y1"] >= box2d["y2"]:
continue
cx, cy, width, height = box2d_to_yolo(box2d)
lbl = LABEL_MAP[label["category"]]
f.write("{} {} {} {} {}\n".format(lbl, cx, cy, width, height))
if __name__ == "__main__":
args = validate_args(parse_args())
# First, copy each data directory over to the output directory.
for dir in ["train", "val", "test"]:
src = args.data_path / dir
dst = args.output_dir / dir
dst.mkdir(parents=True, exist_ok=True)
cp_cmd = [
"rsync",
"-a",
str(src) + "/", # Trailing slash needed for rsync
str(dst),
]
print("-- Copying the data over to {}".format(dst))
print("> {}".format(" ".join(cp_cmd)))
proc = sp.Popen(cp_cmd, stdout=sp.DEVNULL)
if dir == "train" or dir == "val":
print("-- Generating labels at that dir in parallel")
if dir == "train":
label2txt(args.train_labels, dst)
if dir == "val":
label2txt(args.val_labels, dst)
proc.wait()
print("-- Done copying")
if dir == "train" or dir == "val":
print("-- Removing images without corresponding labels")
rm_imgs_without_labels(dst)
# Create names file
names = [''] * len(LABEL_MAP)
for label, num in LABEL_MAP.items():
names[num] = label
names_file = args.output_dir / "bdd100k.names"
with open(names_file, "w+") as f:
f.write("\n".join(names))
|
py | 1a41153381b7663912ae7513821143c79b4d3261 | #!/usr/bin/env python3
"""Python S3 Manager"""
import sys
import os
import pandas as pd
import boto3
from botocore.exceptions import ClientError
from shapely.geometry import box
class s3UploadDownload:
"""
A class to upload/pull files to/from S3.
"""
def __init__(self, bucket_name=None):
"""
constructor of the class.
"""
session = boto3.Session(
aws_access_key_id=os.getenv('AWS_ACCESS_KEY_ID'),
aws_secret_access_key=os.getenv('AWS_SECRET_ACCESS_KEY')
)
self.client = session.client('s3')
self.resource = session.resource('s3')
self.bucket_name = bucket_name
def pull_file(self, file_name):
"""
The function to download a file on the S3 bucket to the local instance.
Parameters:
file_name: name of the file on S3 bucket to pull.
"""
self.check_bucket_exists()
try:
self.client.download_file(self.bucket_name, file_name, file_name)
except ClientError:
return False
return True
def put_file(self, file_name, object_name=None):
"""
The function to upload a file to the S3 bucket.
Parameters:
file_name: name and path of the file to upload
"""
if object_name is None:
object_name = file_name
self.check_bucket_exists()
try:
self.client.upload_file(file_name, self.bucket_name, object_name)
except ClientError:
return False
return True
|
py | 1a41158cc7a8395e8bc7c86245880ba86d1c3dfa | from urllib import request
import xml.etree.ElementTree as ET
import os
wnid = "n02783161"
dirpath = os.path.join("raw_images",wnid)
if os.path.isdir(dirpath) == False:
os.mkdir(dirpath)
IMG_LIST_URL="http://www.image-net.org/api/text/imagenet.synset.geturls.getmapping?wnid={}"
url = IMG_LIST_URL.format(wnid)
with request.urlopen(url) as response:
html = response.read()
data = html.decode()
data = data.split()
fnames = data[::2]
urls = data[1::2]
files = os.listdir(os.path.join("bbox","Annotation",wnid))
annotated_index = [fnames.index(f.split('.')[0]) for f in files]
print(annotated_index)
#fnameをannotated_indexに
#for i in range(len(fnames)):
for i in annotated_index:
try:
print("Found:",urls[i],fnames[i])
with request.urlopen(urls[i]) as response:
img = response.read()
with open(os.path.join(dirpath,fnames[i]),'wb') as f:
f.write(img)
except:
print("Not Found:" + urls[i]) |
py | 1a4115cfcfa4cada59c49abd49bbcfdcebd6c396 | #!/usr/bin/env python
#
# Use the raw transactions API to spend BONTEs received on particular addresses,
# and send any change back to that same address.
#
# Example usage:
# spendfrom.py # Lists available funds
# spendfrom.py --from=ADDRESS --to=ADDRESS --amount=11.00
#
# Assumes it will talk to a bontecoind or bontecoin-Qt running
# on localhost.
#
# Depends on jsonrpc
#
from decimal import *
import getpass
import math
import os
import os.path
import platform
import sys
import time
from jsonrpc import ServiceProxy, json
BASE_FEE=Decimal("0.001")
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def determine_db_dir():
"""Return the default location of the bontecoin data directory"""
if platform.system() == "Darwin":
return os.path.expanduser("~/Library/Application Support/Bontecoin/")
elif platform.system() == "Windows":
return os.path.join(os.environ['APPDATA'], "Bontecoin")
return os.path.expanduser("~/.bontecoin")
def read_bitcoin_config(dbdir):
"""Read the bontecoin.conf file from dbdir, returns dictionary of settings"""
from ConfigParser import SafeConfigParser
class FakeSecHead(object):
def __init__(self, fp):
self.fp = fp
self.sechead = '[all]\n'
def readline(self):
if self.sechead:
try: return self.sechead
finally: self.sechead = None
else:
s = self.fp.readline()
if s.find('#') != -1:
s = s[0:s.find('#')].strip() +"\n"
return s
config_parser = SafeConfigParser()
config_parser.readfp(FakeSecHead(open(os.path.join(dbdir, "bontecoin.conf"))))
return dict(config_parser.items("all"))
def connect_JSON(config):
"""Connect to a bontecoin JSON-RPC server"""
testnet = config.get('testnet', '0')
testnet = (int(testnet) > 0) # 0/1 in config file, convert to True/False
if not 'rpcport' in config:
config['rpcport'] = 51475 if testnet else 51473
connect = "http://%s:%[email protected]:%s"%(config['rpcuser'], config['rpcpassword'], config['rpcport'])
try:
result = ServiceProxy(connect)
# ServiceProxy is lazy-connect, so send an RPC command mostly to catch connection errors,
# but also make sure the bontecoind we're talking to is/isn't testnet:
if result.getmininginfo()['testnet'] != testnet:
sys.stderr.write("RPC server at "+connect+" testnet setting mismatch\n")
sys.exit(1)
return result
except:
sys.stderr.write("Error connecting to RPC server at "+connect+"\n")
sys.exit(1)
def unlock_wallet(bontecoind):
info = bontecoind.getinfo()
if 'unlocked_until' not in info:
return True # wallet is not encrypted
t = int(info['unlocked_until'])
if t <= time.time():
try:
passphrase = getpass.getpass("Wallet is locked; enter passphrase: ")
bontecoind.walletpassphrase(passphrase, 5)
except:
sys.stderr.write("Wrong passphrase\n")
info = bontecoind.getinfo()
return int(info['unlocked_until']) > time.time()
def list_available(bontecoind):
address_summary = dict()
address_to_account = dict()
for info in bontecoind.listreceivedbyaddress(0):
address_to_account[info["address"]] = info["account"]
unspent = bontecoind.listunspent(0)
for output in unspent:
# listunspent doesn't give addresses, so:
rawtx = bontecoind.getrawtransaction(output['txid'], 1)
vout = rawtx["vout"][output['vout']]
pk = vout["scriptPubKey"]
# This code only deals with ordinary pay-to-bontecoin-address
# or pay-to-script-hash outputs right now; anything exotic is ignored.
if pk["type"] != "pubkeyhash" and pk["type"] != "scripthash":
continue
address = pk["addresses"][0]
if address in address_summary:
address_summary[address]["total"] += vout["value"]
address_summary[address]["outputs"].append(output)
else:
address_summary[address] = {
"total" : vout["value"],
"outputs" : [output],
"account" : address_to_account.get(address, "")
}
return address_summary
def select_coins(needed, inputs):
# Feel free to improve this, this is good enough for my simple needs:
outputs = []
have = Decimal("0.0")
n = 0
while have < needed and n < len(inputs):
outputs.append({ "txid":inputs[n]["txid"], "vout":inputs[n]["vout"]})
have += inputs[n]["amount"]
n += 1
return (outputs, have-needed)
def create_tx(bontecoind, fromaddresses, toaddress, amount, fee):
all_coins = list_available(bontecoind)
total_available = Decimal("0.0")
needed = amount+fee
potential_inputs = []
for addr in fromaddresses:
if addr not in all_coins:
continue
potential_inputs.extend(all_coins[addr]["outputs"])
total_available += all_coins[addr]["total"]
if total_available < needed:
sys.stderr.write("Error, only %f BTC available, need %f\n"%(total_available, needed));
sys.exit(1)
#
# Note:
# Python's json/jsonrpc modules have inconsistent support for Decimal numbers.
# Instead of wrestling with getting json.dumps() (used by jsonrpc) to encode
# Decimals, I'm casting amounts to float before sending them to bontecoind.
#
outputs = { toaddress : float(amount) }
(inputs, change_amount) = select_coins(needed, potential_inputs)
if change_amount > BASE_FEE: # don't bother with zero or tiny change
change_address = fromaddresses[-1]
if change_address in outputs:
outputs[change_address] += float(change_amount)
else:
outputs[change_address] = float(change_amount)
rawtx = bontecoind.createrawtransaction(inputs, outputs)
signed_rawtx = bontecoind.signrawtransaction(rawtx)
if not signed_rawtx["complete"]:
sys.stderr.write("signrawtransaction failed\n")
sys.exit(1)
txdata = signed_rawtx["hex"]
return txdata
def compute_amount_in(bontecoind, txinfo):
result = Decimal("0.0")
for vin in txinfo['vin']:
in_info = bontecoind.getrawtransaction(vin['txid'], 1)
vout = in_info['vout'][vin['vout']]
result = result + vout['value']
return result
def compute_amount_out(txinfo):
result = Decimal("0.0")
for vout in txinfo['vout']:
result = result + vout['value']
return result
def sanity_test_fee(bontecoind, txdata_hex, max_fee):
class FeeError(RuntimeError):
pass
try:
txinfo = bontecoind.decoderawtransaction(txdata_hex)
total_in = compute_amount_in(bontecoind, txinfo)
total_out = compute_amount_out(txinfo)
if total_in-total_out > max_fee:
raise FeeError("Rejecting transaction, unreasonable fee of "+str(total_in-total_out))
tx_size = len(txdata_hex)/2
kb = tx_size/1000 # integer division rounds down
if kb > 1 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee transaction, larger than 1000 bytes")
if total_in < 0.01 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee, tiny-amount transaction")
# Exercise for the reader: compute transaction priority, and
# warn if this is a very-low-priority transaction
except FeeError as err:
sys.stderr.write((str(err)+"\n"))
sys.exit(1)
def main():
import optparse
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--from", dest="fromaddresses", default=None,
help="addresses to get BONTEs from")
parser.add_option("--to", dest="to", default=None,
help="address to get send BONTEs to")
parser.add_option("--amount", dest="amount", default=None,
help="amount to send")
parser.add_option("--fee", dest="fee", default="0.0",
help="fee to include")
parser.add_option("--datadir", dest="datadir", default=determine_db_dir(),
help="location of bontecoin.conf file with RPC username/password (default: %default)")
parser.add_option("--testnet", dest="testnet", default=False, action="store_true",
help="Use the test network")
parser.add_option("--dry_run", dest="dry_run", default=False, action="store_true",
help="Don't broadcast the transaction, just create and print the transaction data")
(options, args) = parser.parse_args()
check_json_precision()
config = read_bitcoin_config(options.datadir)
if options.testnet: config['testnet'] = True
bontecoind = connect_JSON(config)
if options.amount is None:
address_summary = list_available(bontecoind)
for address,info in address_summary.iteritems():
n_transactions = len(info['outputs'])
if n_transactions > 1:
print("%s %.8f %s (%d transactions)"%(address, info['total'], info['account'], n_transactions))
else:
print("%s %.8f %s"%(address, info['total'], info['account']))
else:
fee = Decimal(options.fee)
amount = Decimal(options.amount)
while unlock_wallet(bontecoind) == False:
pass # Keep asking for passphrase until they get it right
txdata = create_tx(bontecoind, options.fromaddresses.split(","), options.to, amount, fee)
sanity_test_fee(bontecoind, txdata, amount*Decimal("0.01"))
if options.dry_run:
print(txdata)
else:
txid = bontecoind.sendrawtransaction(txdata)
print(txid)
if __name__ == '__main__':
main()
|
py | 1a4115e7855ca6b6edbd8caf5bf6f02e9defe4b2 | # Copyright 2018 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Trapped ion devices, gates, and compiling utilties."""
from cirq.ion.ion_gates import (
ms,
)
from cirq.ion.ion_decomposition import (
two_qubit_matrix_to_ion_operations,
)
from cirq.ion.ion_device import (
IonDevice,
)
from cirq.ion.convert_to_ion_gates import (
ConvertToIonGates,
)
from cirq.ion.ion_decomposition import (
two_qubit_matrix_to_ion_operations,
)
from cirq.ion.ion_device import (
IonDevice,
)
from cirq.ion.convert_to_ion_gates import (
ConvertToIonGates,
)
|
py | 1a4116f1ffb30e3884e2b19a130beb43c60e42f5 | """
A theano / pylearn2 wrapper for cuda-convnet's convFilterActs function.
"""
__authors__ = "David Warde-Farley and Ian Goodfellow"
__copyright__ = "Copyright 2010-2013, Universite de Montreal"
__credits__ = ["David Warde-Farley and Ian Goodfellow"]
__license__ = "3-clause BSD"
__maintainer__ = "LISA Lab"
__email__ = "pylearn-dev@googlegroups"
"""
This module may contain code copied directly or modified from cuda-convnet.
The copyright and licensing notice for this code is reproduced below:
/*
* Copyright (c) 2011, Alex Krizhevsky ([email protected])
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* - Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
* EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
"""
from theano.gradient import DisconnectedType
from theano.gof import Apply
from theano.sandbox.cuda import CudaNdarrayType
from theano.sandbox.cuda.basic_ops import as_cuda_ndarray_variable
from pylearn2.sandbox.cuda_convnet.base_acts import BaseActs
from pylearn2.sandbox.cuda_convnet.base_acts import UnimplementedError
# Must delay import to avoid circular import problem
FilterActs = None
WeightActs = None
class ImageActs(BaseActs):
"""
Transpose of FilterActs.
This is intended to be a very low-level, performance-oriented op.
It will not try to fix the input for you. That would slow it down.
The input must be in the right format. If not, it raises an exception.
Currently, this op must be inserted manually, not by optimizations.
Note that below the term "input" refers to the input to FilterActs.
This op does the tranpose of that, so its output is sized like
FilterActs' input.
* hid_acts: (output channels, rows, cols, batch_size)
* filters: (input channels, filter rows, filter cols, output channels).
Rows must be the same as cols. Output channels must be a multiple
of 16.
* output: (input channels, input rows, input cols, batch size)
Notes
-----
All of these convolution routines are optimized for the case when
the number of images (i.e. the minibatch size) is a multiple of 128.
Other batch sizes will work, but Alex "made no attempt whatsoever
to make them work fast."
"""
# __eq__ and __hash__ are defined in BaseActs.
# If you add an __init__ method that adds new members to ImageActs,
# you may need to implement a new version of __eq__ and __hash__
# in ImageActs, that considers these parameters.
def make_node(self, hid_acts, filters, output_shape=None):
"""
.. todo::
WRITEME
Parameters
----------
hid_acts : WRITEME
filters : WRITEME
output_shape : 2-element TensorVariable, optional
The spatial shape of the image
"""
if not isinstance(hid_acts.type, CudaNdarrayType):
raise TypeError("ImageActs: expected hid_acts.type to be CudaNdarrayType, "
"got " + str(hid_acts.type))
if not isinstance(filters.type, CudaNdarrayType):
raise TypeError("ImageActs: expected filters.type to be CudaNdarrayType, "
"got " + str(filters.type))
if output_shape is None:
if self.stride != 1:
raise ValueError("You must specify an output_shape for ImageActs if the stride is not 1.")
hid_shape = hid_acts.shape[1:3]
kernel_shape = filters.shape[1:3]
output_shape = hid_shape + kernel_shape - 2 * self.pad - 1
assert hid_acts.ndim == 4
assert filters.ndim == 4
channels_broadcastable = filters.type.broadcastable[3]
batch_broadcastable = hid_acts.type.broadcastable[3]
# Computing whether the rows and columns are broadcastable requires doing
# arithmetic on quantities that are known only at runtime, like the specific
# shape of the image and kernel
rows_broadcastable = False
cols_broadcastable = False
targets_broadcastable = (channels_broadcastable, rows_broadcastable,
cols_broadcastable, batch_broadcastable)
targets_type = CudaNdarrayType(broadcastable=targets_broadcastable)
targets = targets_type()
return Apply(self, [hid_acts, filters, output_shape], [targets])
def flops(self, inputs, outputs):
""" Useful with the hack in profilemode to print the MFlops"""
hid_acts, filters, output_shape = inputs
out, = outputs
assert hid_acts[0] == filters[3]
flops = (hid_acts[3] * filters[0] * hid_acts[0] *
filters[1] * filters[2] *
hid_acts[1] * hid_acts[2] * 2)
return flops
def connection_pattern(self, node):
"""
.. todo::
WRITEME
"""
return [[1], [1], [0]]
def grad(self, inputs, g_outputs):
"""
.. todo::
WRITEME
"""
hid_acts, filters, output_shape = inputs
g_images, = g_outputs
g_images = as_cuda_ndarray_variable(g_images)
assert not isinstance(g_images, list)
global FilterActs
global WeightActs
if FilterActs is None:
from pylearn2.sandbox.cuda_convnet.filter_acts import FilterActs
from pylearn2.sandbox.cuda_convnet.weight_acts import WeightActs
g_filters = WeightActs(stride=self.stride,
partial_sum=self.partial_sum, pad=self.pad)(
g_images, hid_acts, filters.shape[1:3])[0]
assert not isinstance(g_filters, list)
g_hid_acts = FilterActs(stride=self.stride, pad=self.pad,
partial_sum=self.partial_sum)(g_images, filters)
return [g_hid_acts, g_filters, DisconnectedType()()]
def c_code(self, node, name, inputs, outputs, sub):
"""
.. todo::
WRITEME
"""
hid_acts, filters, output_shape = inputs
targets, = outputs
fail = sub['fail']
# convFilterActs will multiply targets by scaleTargets
# then add scaleOutput * (the convolution value)
# We could make use of this to implement an inplace
# addconv op but for this op we just want to compute
# the convolution so we set them to 0 and 1 respectively
# Note: there is another version of convFilterActs that
# does not take these arguments, but it is just a wrapper
# around the version that does take them, so we save
# a function call by using the version that we use.
basic_setup = """
#define scaleTargets 0
#define scaleOutput 1
"""
if self.dense_connectivity:
basic_setup += """
#define numGroups 1
"""
basic_setup += """
#define paddingStart (-%d)
""" % self.pad
basic_setup += """
#define moduleStride %d
""" % self.stride
if self.copy_non_contiguous:
raise UnimplementedError()
else:
basic_setup += "#define IMAGEACTS_COPY_NON_CONTIGUOUS 0\n"
# The amount of braces that must be closed at the end
num_braces = 0
# Convert images int nv_hid_acts, an NVMatrix, for compatibility
# with the cuda-convnet functions
setup_nv_hid_acts = self._argument_contiguity_check("hid_acts") + """
if (%(hid_acts)s->nd != 4)
{
PyErr_Format(PyExc_ValueError,
"hid_acts must have nd=4, got nd=%%i", %(hid_acts)s->nd);
%(fail)s;
}
{ //setup_nv_hid_acts brace 1
const int *hid_act_dims = CudaNdarray_HOST_DIMS(%(hid_acts)s);
const int numFilters = hid_act_dims[0];
const int hidActsSizeY = hid_act_dims[1];
const int hidActsSizeX = hid_act_dims[2];
//printf("hidActs shape: %%d %%d\\n", hidActsSizeY, hidActsSizeX);
const int batch_size = hid_act_dims[3];
NVMatrix nv_hid_acts(%(hid_acts)s, numFilters * hidActsSizeY *
hidActsSizeX, batch_size, "image_acts:nv_hid_acts");
int img_channels = -1;
"""
num_braces += 1
# Convert filters into nv_filters, an NVMatrix, for compatibility
# with the cuda-convnet functions
setup_nv_filters = self._argument_contiguity_check("filters") + """
if (%(filters)s->nd != 4)
{
PyErr_Format(PyExc_ValueError,
"filters must have nd=4, got nd=%%i", %(filters)s->nd);
%(fail)s;
}
{ // setup_nv_filters brace 1
const int * filters_dims = CudaNdarray_HOST_DIMS(%(filters)s);
const int filter_channels = filters_dims[0];
const int filter_rows = filters_dims[1];
const int filter_cols = filters_dims[2];
const int num_filters = filters_dims[3];
if ((num_filters %% (numGroups * 16)) != 0)
{
PyErr_Format(PyExc_ValueError,
"Each group must have a multiple of 16 channels, but num_filters %%%% (numGroups * 16) = %%d %%%% ( %%d * 16) = %%d.",
num_filters, numGroups, num_filters %% (numGroups * 16));
%(fail)s;
}
if (filter_rows != filter_cols)
{
PyErr_Format(PyExc_ValueError,
"filter must be square, but have shape (%%d, %%d).",
filter_rows, filter_cols);
%(fail)s;
}
else if (moduleStride > filter_rows) {
PyErr_Format(PyExc_ValueError,
"stride %%d greater than filter size (%%d, %%d)",
moduleStride, filter_rows, filter_cols);
%(fail)s;
}
{ // setup_nv_filters brace 2
NVMatrix nv_filters(%(filters)s, filter_channels * filter_rows *
filter_cols, num_filters, "img_acts:nv_filters");
"""
num_braces += 2
#target_rows = "(hidActsSizeY + filter_rows + 2 * paddingStart) * moduleStride - 1"
#target_cols = "(hidActsSizeX + filter_cols + 2 * paddingStart) * moduleStride - 1"
setup_nv_targets = """
#define numModulesY hid_act_dims[1]
#define numModulesX hid_act_dims[2]
npy_intp *shape_dims = PyArray_DIMS(%(output_shape)s);
npy_intp target_rows, target_cols;
PyArrayObject *casted_shape;
PyArray_Descr *intp_dtype;
if (PyArray_NDIM(%(output_shape)s) != 1) {
PyErr_Format(PyExc_ValueError,
"output shape must be a vector, got %%d-tensor",
PyArray_NDIM(%(output_shape)s));
%(fail)s;
}
else if (shape_dims[0] != 2)
{
PyErr_Format(PyExc_ValueError,
"output shape must be length 2, got %%d",
(int)shape_dims[0]);
%(fail)s;
}
else if ((PyArray_DESCR(%(output_shape)s))->kind != 'i' &&
(PyArray_DESCR(%(output_shape)s))->kind != 'u')
{
PyErr_SetString(PyExc_TypeError,
"output shape must have integer or uint dtype");
%(fail)s;
}
intp_dtype = PyArray_DescrFromType(NPY_INTP);
casted_shape = (PyArrayObject *)PyArray_CastToType(%(output_shape)s,
intp_dtype, 0);
target_rows = *((npy_intp *)PyArray_GETPTR1(casted_shape, 0));
target_cols = *((npy_intp *)PyArray_GETPTR1(casted_shape, 1));
{
int target_dims [] = {
filter_channels,
target_rows,
target_cols,
batch_size };
#define filterSize filter_rows
#define MAX_ROWS (paddingStart + (numModulesY-1) * moduleStride + filterSize)
if ((target_rows > MAX_ROWS)
|| (paddingStart + (numModulesX-1) * moduleStride + filterSize < target_cols))
{
PyErr_Format(PyExc_ValueError, "pylearn2.sandbox.cuda_convnet.image_acts.ImageActs: incompatible target image size (%%d, %%d), maximum (%%d, %%d)",
(int)target_rows, (int)target_cols,
(int)MAX_ROWS,
(int)(paddingStart + (numModulesX-1) * moduleStride + filterSize));
%(fail)s;
}
if (CudaNdarray_prep_output(& %(targets)s, 4, target_dims))
{
%(fail)s;
}
{ // setup_nv_filters brace # 1
const int imgSizeY = (int)target_rows;
const int imgSizeX = (int)target_cols;
NVMatrix nv_targets(%(targets)s, target_dims[0] * target_dims[1]
* target_dims[2], target_dims[3], "image_acts: nv_targets");
"""
num_braces += 2
# note: numFilters is not specified here. it is determined by
# nv_filters.getNumCols()
#
# note: the size of the filters is determined by dividing
# nv_filters.getNumRows() by numFilterColors
#
do_convolution = """
convImgActs(nv_hid_acts, nv_filters, nv_targets,
imgSizeY, imgSizeX, numModulesY,
paddingStart, moduleStride, filter_channels,
numGroups);
"""
braces = '}' * num_braces
rval = basic_setup + \
setup_nv_hid_acts + \
setup_nv_filters + \
setup_nv_targets + \
do_convolution + \
braces
rval = rval % locals()
return rval
def c_code_cache_version(self):
"""
.. todo::
WRITEME
"""
return (9,)
|
py | 1a4118450c3e5e3ef9047d2bacd61c92cc023af0 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
from scipy.ndimage import gaussian_filter
import numpy as np
class AffinityRefinementOperation(metaclass=abc.ABCMeta):
def check_input(self, X):
"""Check the input to the refine() method.
Args:
X: the input to the refine() method
Raises:
TypeError: if X has wrong type
ValueError: if X has wrong shape, etc.
"""
if not isinstance(X, np.ndarray):
raise TypeError("X must be a numpy array")
shape = X.shape
if len(shape) != 2:
raise ValueError("X must be 2-dimensional")
if shape[0] != shape[1]:
raise ValueError("X must be a square matrix")
@abc.abstractmethod
def refine(self, X):
"""Perform the refinement operation.
Args:
X: the affinity matrix, of size (n_samples, n_samples)
Returns:
a matrix of the same size as X
"""
pass
class CropDiagonal(AffinityRefinementOperation):
"""Crop the diagonal.
Replace diagonal element by the max non-diagonal value of row.
After this operation, the matrix has similar properties to a standard
Laplacian matrix.
This also helps to avoid the bias during Gaussian blur and normalization.
"""
def refine(self, X):
self.check_input(X)
Y = np.copy(X)
np.fill_diagonal(Y, 0.0)
di = np.diag_indices(Y.shape[0])
Y[di] = Y.max(axis=1)
return Y
class GaussianBlur(AffinityRefinementOperation):
"""Apply Gaussian blur."""
def __init__(self, sigma=1):
self.sigma = sigma
def refine(self, X):
self.check_input(X)
return gaussian_filter(X, sigma=self.sigma)
class RowWiseThreshold(AffinityRefinementOperation):
"""Apply row wise thresholding."""
def __init__(self,
p_percentile=0.95,
thresholding_soft_multiplier=0.01,
thresholding_with_row_max=False):
self.p_percentile = p_percentile
self.multiplier = thresholding_soft_multiplier
self.thresholding_with_row_max = thresholding_with_row_max
def refine(self, X):
self.check_input(X)
Y = np.copy(X)
if self.thresholding_with_row_max:
# row_max based thresholding
row_max = Y.max(axis=1)
row_max = np.expand_dims(row_max, axis=1)
is_smaller = Y < (row_max * self.p_percentile)
else:
# percentile based thresholding
row_percentile = np.percentile(Y, self.p_percentile * 100, axis=1)
row_percentile = np.expand_dims(row_percentile, axis=1)
is_smaller = Y < row_percentile
Y = (Y * np.invert(is_smaller)) + (Y * self.multiplier * is_smaller)
return Y
class Symmetrize(AffinityRefinementOperation):
"""The Symmetrization operation."""
def refine(self, X):
self.check_input(X)
return np.maximum(X, np.transpose(X))
class Diffuse(AffinityRefinementOperation):
"""The diffusion operation."""
def refine(self, X):
self.check_input(X)
return np.matmul(X, np.transpose(X))
class RowWiseNormalize(AffinityRefinementOperation):
"""The row wise max normalization operation."""
def refine(self, X):
self.check_input(X)
Y = np.copy(X)
row_max = Y.max(axis=1)
Y /= np.expand_dims(row_max, axis=1)
return Y
|
py | 1a411856a8b2cf4dbeed9d0f28532c2ea8f8323a | PHASES = ["build", "test"]
CUDA_VERSIONS = [
"102",
"111",
"113",
"115",
]
ROCM_VERSIONS = [
"4.3.1",
"4.5.2",
]
ROCM_VERSION_LABELS = ["rocm" + v for v in ROCM_VERSIONS]
GPU_VERSIONS = [None] + ["cuda" + v for v in CUDA_VERSIONS] + ROCM_VERSION_LABELS
STANDARD_PYTHON_VERSIONS = [
"3.7",
"3.8",
"3.9",
"3.10"
]
|
py | 1a411906817516f33aca16d826d42105bf59cd77 | import sys
sys.path.append('./')
import unittest
from Added import Added
class AddedTest(unittest.TestCase):
def test_add(self):
added = Added()
expected = added.add(1,2)
self.assertEqual(3, expected)
if __name__ == '__main__':
unittest.main()
|
py | 1a4119281b2555c44da7fcab6c73f620dc9fda99 | # flake8: noqa
from catalyst_rl.contrib.rl import *
|
py | 1a411a3a21312b6b8fd9d0fd5578b96405717e4d | import _plotly_utils.basevalidators
class SizeValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self,
plotly_name="size",
parent_name="densitymapbox.colorbar.tickfont",
**kwargs
):
super(SizeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
min=kwargs.pop("min", 1),
role=kwargs.pop("role", "style"),
**kwargs
)
|
py | 1a411ac930b729a07a34650af7c7f1554eafab86 | import smtplib
import socket
import os
import subprocess
from email.MIMEMultipart import MIMEMultipart
from email.MIMEText import MIMEText
# PATH_TO_BE_MONITOR is the path to be monitored, e.g. "/"
# If the current percentage of free space under {PATH_TO_BE_MONITOR} is lower than {MIN_FREE_PERCENT},
# then a disk sweep will be performed and the contacts in the {TO_EMAILS} will get warned
PATH_TO_BE_MONITOR = "/path/to/be/monitored"
# PATHS_CAN_BE_CLEAN contains the paths could be swept
# These paths are typically paths to logs and tmp images you want to clean after some days
PATHS_CAN_BE_CLEAN = ["/path/can/be/deleted/1", "/path/can/be/deleted/2"]
# EXPIRY_DAYS is the expiry time of files to be swept.
# The files under {PATH_TO_BE_MONITOR} with modified-date more than {EXPIRY_DAYS} days ago will be deleted
EXPIRY_DAYS = 30
# TO_EMAILS is the list of person you want to notify upon disk-full
TO_EMAILS = ["[email protected]", "[email protected]"]
# TOOL_NAME is the name of this script, which will be shown as the 'From' bar in the warn email
TOOL_NAME = "system-monitor"
# MIN_FREE_PERCENT is the minimum percentage of free spaces required on the specified disk.
# see the description of PATH_TO_BE_MONITOR
MIN_FREE_PERCENT = 10
# PATH_LOGS is the path to the logs
# will be created if not exists
PATH_LOGS = "/data/disc-sweeper"
# EMAIL_SMTP_SERVER is your SMTP server
EMAIL_SMTP_SERVER = 'smtp.163.com'
# EMAIL_ACCOUNT is your account to be used by this script for sending the email
EMAIL_ACCOUNT = "[email protected]"
# EMAIL_PASSWD is the password of your email account
# if you use Gmail, plz enable 'https://myaccount.google.com/lesssecureapps'
# if you use 163 to send your email, plz set the 'authorization code' first, and use it as your password
EMAIL_PASSWD = "your-password"
def sendToEmail(fromaddr, toaddrs, subject, body):
account = EMAIL_ACCOUNT
password = EMAIL_PASSWD
# fill the msg body
msg = MIMEMultipart()
msg['From'] = fromaddr
msg['To'] = ', '.join(toaddrs)
msg['Subject'] = subject
msg.attach(MIMEText(body, 'plain'))
text = msg.as_string()
# login your smtp server using your account
server = smtplib.SMTP_SSL(EMAIL_SMTP_SERVER, 587, timeout=15)
server.login(account, password)
# send the email and logout
server.sendmail(account, toaddrs, text)
server.quit()
def notify_disc_full(freePercent):
toaddr = TO_EMAILS
fromaddr = TOOL_NAME
hostname = socket.gethostname()
hostip = socket.gethostbyname(hostname)
subject = "Warn: Disk of {mon_path} on {host} [{ip}] is nearly full! Only {free}% free now".format(
mon_path=PATH_TO_BE_MONITOR,
host = hostname,
ip = hostip,
free = freePercent
)
df_res = subprocess.check_output(['df', '-h', PATH_TO_BE_MONITOR])
du_res = subprocess.check_output(['du', '-h', '-d', '1', PATH_TO_BE_MONITOR])
# customize your email body here
body = """
The free disk space on {mon_path} is lower than {min_free_percent}% !
A disc auto-clean process will be performed on following paths:
{clean_path}
======================================
df -h {mon_path}
{df}
du -hd 1 {mon_path}
{du}
======================================
//NOTE:
DO NOT REPLY!
This is an email sent by a script automatically
System adminstrators:
[email protected];
[email protected];
""".format(
df = df_res,
du = du_res,
clean_path = '\n'.join(PATHS_CAN_BE_CLEAN),
mon_path=PATH_TO_BE_MONITOR,
min_free_percent = MIN_FREE_PERCENT
)
sendToEmail(fromaddr, toaddr, subject, body)
def cleanDisc(path, mtime):
subprocess.call(["mkdir", "-p", PATH_LOGS])
f = open(PATH_LOGS + "sweep-" + datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S") +".log", "w")
findCmd = ["find"] + path + ["-mindepth", "1", "-mtime", "+"+str(mtime), "-delete", "-print"]
subprocess.call(findCmd, stdout=f, stderr=f)
f.close()
def getFreeDiscPercentage(path):
s = os.statvfs(path)
total_disc = s.f_frsize * s.f_blocks
avail_disc = s.f_frsize * s.f_bavail
return round((float(avail_disc)/total_disc)*100, 2)
## program entrance
freeSpacePercent = getFreeDiscPercentage(PATH_TO_BE_MONITOR)
if freeSpacePercent < MIN_FREE_PERCENT:
print "disc is nearly full! At least", MIN_FREE_PERCENT, "% free spaces is need."
notify_disc_full(freeSpacePercent)
cleanDisc(PATHS_CAN_BE_CLEAN, EXPIRY_DAYS)
|
py | 1a411c0febefd2b048ce4afb0ff475b9ecc775ea | from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
class IS_IS_Level_1_MCAST_Link_State_PDU(Base):
__slots__ = ()
_SDM_NAME = 'isisL1McastLinkStatePDU'
_SDM_ATT_MAP = {
'Common header': 'isisL1McastLinkStatePDU.header.commonHeaderL1',
'Fixed header': 'isisL1McastLinkStatePDU.header.fixedHeaderL1LSP',
'TLV header': 'isisL1McastLinkStatePDU.header.variableHeaderL1LSP',
}
def __init__(self, parent):
super(IS_IS_Level_1_MCAST_Link_State_PDU, self).__init__(parent)
@property
def Common_header(self):
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Common header']))
@property
def Fixed_header(self):
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Fixed header']))
@property
def TLV_header(self):
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['TLV header']))
def add(self):
return self._create(self._map_locals(self._SDM_ATT_MAP, locals()))
|
py | 1a411c29ae9b3461e919a74498d2c32f7a840ad3 | import torch.nn as nn
import math
import torch.utils.model_zoo as model_zoo
import torch.nn.functional as F
import pdb
import torch
import numpy as np
__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
'resnet152']
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
def conv3x3(in_planes, out_planes, stride=1):
"3x3 convolution with padding"
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000):
self.inplanes = 64
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.avgpool = nn.AvgPool2d(7, stride=1)
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def resnet18(pretrained=False, **kwargs):
"""Constructs a ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))
return model
def resnet34(pretrained=False, **kwargs):
"""Constructs a ResNet-34 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))
return model
def resnet50(pretrained=False, **kwargs):
"""Constructs a ResNet-50 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet50'], 'pretrained_model/encoder'))
return model
def resnet101(pretrained=False, **kwargs):
"""Constructs a ResNet-101 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet101']))
return model
def resnet152(pretrained=False, **kwargs):
"""Constructs a ResNet-152 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet152']))
return model
|
py | 1a411da500c32b9c682e14ddf84c6add09e3cff2 | def modifyGlobal(value):
global globe
globe = value
modifyGlobal(10)
print(globe)
modifyGlobal(11)
print(globe) |
py | 1a411dcdba014916bea8097a7e1313b1e6d569fb | import math
import torch
from torch.distributions import constraints
from pyro.nn import PyroModule, pyro_method, PyroParam
root_three = math.sqrt(3.0)
root_five = math.sqrt(5.0)
five_thirds = 5.0 / 3.0
class MaternKernel(PyroModule):
"""
Provides the building blocks for representing univariate Gaussian Processes (GPs)
with Matern kernels as state space models.
:param float nu: The order of the Matern kernel (one of 0.5, 1.5 or 2.5)
:param int num_gps: the number of GPs
:param torch.Tensor length_scale_init: optional `num_gps`-dimensional vector of initializers
for the length scale
:param torch.Tensor kernel_scale_init: optional `num_gps`-dimensional vector of initializers
for the kernel scale
**References**
[1] `Kalman Filtering and Smoothing Solutions to Temporal Gaussian Process Regression Models`,
Jouni Hartikainen and Simo Sarkka.
[2] `Stochastic Differential Equation Methods for Spatio-Temporal Gaussian Process Regression`,
Arno Solin.
"""
def __init__(self, nu=1.5, num_gps=1, length_scale_init=None, kernel_scale_init=None):
if nu not in [0.5, 1.5, 2.5]:
raise NotImplementedError("The only supported values of nu are 0.5, 1.5 and 2.5")
self.nu = nu
self.state_dim = {0.5: 1, 1.5: 2, 2.5: 3}[nu]
self.num_gps = num_gps
if length_scale_init is None:
length_scale_init = torch.ones(num_gps)
assert length_scale_init.shape == (num_gps,)
if kernel_scale_init is None:
kernel_scale_init = torch.ones(num_gps)
assert kernel_scale_init.shape == (num_gps,)
super().__init__()
self.length_scale = PyroParam(length_scale_init, constraint=constraints.positive)
self.kernel_scale = PyroParam(kernel_scale_init, constraint=constraints.positive)
if self.state_dim > 1:
for x in range(self.state_dim):
for y in range(self.state_dim):
mask = torch.zeros(self.state_dim, self.state_dim)
mask[x, y] = 1.0
self.register_buffer("mask{}{}".format(x, y), mask)
@pyro_method
def transition_matrix(self, dt):
"""
Compute the (exponentiated) transition matrix of the GP latent space.
The resulting matrix has layout (num_gps, old_state, new_state), i.e. this
matrix multiplies states from the right.
See section 5 in reference [1] for details.
:param float dt: the time interval over which the GP latent space evolves.
:returns torch.Tensor: a 3-dimensional tensor of transition matrices of shape
(num_gps, state_dim, state_dim).
"""
if self.nu == 0.5:
rho = self.length_scale.unsqueeze(-1).unsqueeze(-1)
return torch.exp(-dt / rho)
elif self.nu == 1.5:
rho = self.length_scale.unsqueeze(-1).unsqueeze(-1)
dt_rho = dt / rho
trans = (1.0 + root_three * dt_rho) * self.mask00 + \
(-3.0 * dt_rho / rho) * self.mask01 + \
dt * self.mask10 + \
(1.0 - root_three * dt_rho) * self.mask11
return torch.exp(-root_three * dt_rho) * trans
elif self.nu == 2.5:
rho = self.length_scale.unsqueeze(-1).unsqueeze(-1)
dt_rho = root_five * dt / rho
dt_rho_sq = dt_rho.pow(2.0)
dt_rho_cu = dt_rho.pow(3.0)
dt_rho_qu = dt_rho.pow(4.0)
dt_sq = dt ** 2.0
trans = (1.0 + dt_rho + 0.5 * dt_rho_sq) * self.mask00 + \
(-0.5 * dt_rho_cu / dt) * self.mask01 + \
((0.5 * dt_rho_qu - dt_rho_cu) / dt_sq) * self.mask02 + \
((dt_rho + 1.0) * dt) * self.mask10 + \
(1.0 + dt_rho - dt_rho_sq) * self.mask11 + \
((dt_rho_cu - 3.0 * dt_rho_sq) / dt) * self.mask12 + \
(0.5 * dt_sq) * self.mask20 + \
((1.0 - 0.5 * dt_rho) * dt) * self.mask21 + \
(1.0 - 2.0 * dt_rho + 0.5 * dt_rho_sq) * self.mask22
return torch.exp(-dt_rho) * trans
@pyro_method
def stationary_covariance(self):
"""
Compute the stationary state covariance. See Eqn. 3.26 in reference [2].
:returns torch.Tensor: a 3-dimensional tensor of covariance matrices of shape
(num_gps, state_dim, state_dim).
"""
if self.nu == 0.5:
sigmasq = self.kernel_scale.pow(2).unsqueeze(-1).unsqueeze(-1)
return sigmasq
elif self.nu == 1.5:
sigmasq = self.kernel_scale.pow(2).unsqueeze(-1).unsqueeze(-1)
rhosq = self.length_scale.pow(2).unsqueeze(-1).unsqueeze(-1)
p_infinity = self.mask00 + (3.0 / rhosq) * self.mask11
return sigmasq * p_infinity
elif self.nu == 2.5:
sigmasq = self.kernel_scale.pow(2).unsqueeze(-1).unsqueeze(-1)
rhosq = self.length_scale.pow(2).unsqueeze(-1).unsqueeze(-1)
p_infinity = 0.0
p_infinity = self.mask00 + \
(five_thirds / rhosq) * (self.mask11 - self.mask02 - self.mask20) + \
(25.0 / rhosq.pow(2.0)) * self.mask22
return sigmasq * p_infinity
@pyro_method
def process_covariance(self, A):
"""
Given a transition matrix `A` computed with `transition_matrix` compute the
the process covariance as described in Eqn. 3.11 in reference [2].
:returns torch.Tensor: a batched covariance matrix of shape (num_gps, state_dim, state_dim)
"""
assert A.shape[-3:] == (self.num_gps, self.state_dim, self.state_dim)
p = self.stationary_covariance()
q = p - torch.matmul(A.transpose(-1, -2), torch.matmul(p, A))
return q
@pyro_method
def transition_matrix_and_covariance(self, dt):
"""
Get the transition matrix and process covariance corresponding to a time interval `dt`.
:param float dt: the time interval over which the GP latent space evolves.
:returns tuple: (`transition_matrix`, `process_covariance`) both 3-dimensional tensors of
shape (num_gps, state_dim, state_dim)
"""
trans_matrix = self.transition_matrix(dt)
process_covar = self.process_covariance(trans_matrix)
return trans_matrix, process_covar
|
py | 1a411debd67f6794daaa591f653e4eec9d4108d0 | import torch
import torch.nn as nn
import numpy as np
from mushroom_rl.policy.torch_policy import TorchPolicy, GaussianTorchPolicy
def abstract_method_tester(f, *args):
try:
f(*args)
except NotImplementedError:
pass
else:
assert False
class Network(nn.Module):
def __init__(self, input_shape, output_shape, n_features, **kwargs):
super(Network, self).__init__()
n_input = input_shape[-1]
n_output = output_shape[0]
self._h1 = nn.Linear(n_input, n_features)
self._h2 = nn.Linear(n_features, n_features)
self._h3 = nn.Linear(n_features, n_output)
nn.init.xavier_uniform_(self._h1.weight,
gain=nn.init.calculate_gain('tanh'))
nn.init.xavier_uniform_(self._h2.weight,
gain=nn.init.calculate_gain('tanh'))
nn.init.xavier_uniform_(self._h3.weight,
gain=nn.init.calculate_gain('linear'))
def forward(self, state, **kwargs):
features1 = torch.tanh(self._h1(torch.squeeze(state, -1).float()))
features2 = torch.tanh(self._h2(features1))
a = self._h3(features2)
return a
def test_torch_policy():
tmp = TorchPolicy(False)
abstract_method_tester(tmp.draw_action_t, None)
abstract_method_tester(tmp.log_prob_t, None, None)
abstract_method_tester(tmp.entropy_t, None)
abstract_method_tester(tmp.distribution_t, None)
abstract_method_tester(tmp.set_weights, None)
abstract_method_tester(tmp.get_weights)
abstract_method_tester(tmp.parameters)
tmp.reset()
tmp.use_cuda
def test_gaussian_torch_policy():
np.random.seed(88)
torch.manual_seed(88)
pi = GaussianTorchPolicy(Network, (3,), (2,), n_features=50)
state = np.random.rand(3)
action = pi.draw_action(state)
action_test = np.array([-0.21276927, 0.27437747])
assert np.allclose(action, action_test)
p_sa = pi(state, action)
p_sa_test = 0.07710557966732147
assert np.allclose(p_sa, p_sa_test)
entropy = pi.entropy()
entropy_test = 2.837877
assert np.allclose(entropy, entropy_test)
|
py | 1a411e0900ad1e0d37dff3492fd594ce2159f56f | import gym
from keras.models import Sequential
from keras.layers import Dense
from keras.optimizers import Adam
import numpy as np
import random
from matplotlib import pyplot as plt
from custom_gym.cartpole import CustomCartPoleEnv
import timeit
# AGENT/NETWORK HYPERPARAMETERS
EPSILON_INITIAL = 1.0 # exploration rate
EPSILON_DECAY = 0.997
EPSILON_MIN = 0.01
ALPHA = 0.001 # learning rate
GAMMA = 0.95 # discount factor
TAU = 0.3 # target network soft update hyperparameter
EXPERIENCE_REPLAY_BATCH_SIZE = 20
AGENT_MEMORY_LIMIT = 10000
STEPS_BEFORE_REPLAY = 10
OBSERVATION_SPACE_DIMS = 4
ACTION_SPACE = [0,1]
def create_dqn(action_space, observation_space):
nn = Sequential()
nn.add(Dense(128, input_dim=OBSERVATION_SPACE_DIMS, activation="relu"))
nn.add(Dense(128, activation='relu'))
nn.add(Dense(len(ACTION_SPACE), activation='linear'))
nn.compile(loss='mse', optimizer=Adam(lr=ALPHA))
return nn
class DoubleDQNAgent(object):
def __init__(self, action_space, observation_space):
self.memory = []
self.action_space = action_space
self.observation_space = observation_space
self.online_network = create_dqn(action_space, observation_space)
self.target_network = create_dqn(action_space, observation_space)
self.epsilon = EPSILON_INITIAL
self.has_talked = False
def act(self, state):
if self.epsilon > np.random.rand():
# explore
return np.random.choice(self.action_space)
else:
# exploit
state = self._reshape_state_for_net(state)
q_values = self.online_network.predict(state)[0]
return np.argmax(q_values)
def experience_replay(self):
minibatch = random.sample(self.memory, EXPERIENCE_REPLAY_BATCH_SIZE)
minibatch_new_q_values = []
for state, action, reward, next_state, done in minibatch:
state = self._reshape_state_for_net(state)
experience_new_q_values = self.online_network.predict(state)[0]
if done:
q_update = reward
else:
next_state = self._reshape_state_for_net(next_state)
# using online network to SELECT action
online_net_selected_action = np.argmax(self.online_network.predict(next_state))
# using target network to EVALUATE action
target_net_evaluated_q_value = self.target_network.predict(next_state)[0][online_net_selected_action]
q_update = reward + GAMMA * target_net_evaluated_q_value
experience_new_q_values[action] = q_update
minibatch_new_q_values.append(experience_new_q_values)
minibatch_states = np.array([state for state,_,_,_,_ in minibatch])
minibatch_new_q_values = np.array(minibatch_new_q_values)
self.online_network.fit(minibatch_states, minibatch_new_q_values, verbose=False, epochs=1)
def update_target_network(self):
q_network_theta = self.online_network.get_weights()
target_network_theta = self.target_network.get_weights()
counter = 0
for q_weight, target_weight in zip(q_network_theta,target_network_theta):
target_weight = target_weight * (1-TAU) + q_weight * TAU
target_network_theta[counter] = target_weight
counter += 1
self.target_network.set_weights(target_network_theta)
def remember(self, state, action, reward, next_state, done):
if len(self.memory) > AGENT_MEMORY_LIMIT:
self.memory.pop(0)
experience = (state, action, reward, next_state, done)
self.memory.append(experience)
def update_epsilon(self):
self.epsilon = max(self.epsilon * EPSILON_DECAY, EPSILON_MIN)
def _reshape_state_for_net(self, state):
return np.reshape(state,(1, OBSERVATION_SPACE_DIMS))
def save_model(self):
self.online_network.save_weights('./normal_model/weights_online')
self.target_network.save_weights('./normal_model/weights_target')
def load_model(self):
try:
self.target_network.load_weights('./normal_model/weights_target')
self.online_network.load_weights('./normal_model/weights_online')
except:
pass
def test_agent():
env = CustomCartPoleEnv(mode=0, render_mode='no')#'human')
trials = []
NUMBER_OF_TRIALS=5
MAX_TRAINING_EPISODES = 500
MAX_STEPS_PER_EPISODE = 400
observation_space = env.observation_space.shape[0]
action_space = env.action_space.n
log_list = list()
for trial_index in range(NUMBER_OF_TRIALS):
agent = DoubleDQNAgent(action_space, observation_space)
#agent.load_model()
trial_episode_scores = []
s = 0
for episode_index in range(1, MAX_TRAINING_EPISODES+1):
state = env.reset()
episode_score = 0
steps =0
for _ in range(MAX_STEPS_PER_EPISODE):
action = agent.act(state)
next_state, reward, done, _ = env.step(action)
episode_score += reward
s+=1
steps+=1
agent.remember(state, action, reward, next_state, done)
#print(state)
state = next_state
if s > STEPS_BEFORE_REPLAY and len(agent.memory) > 2*EXPERIENCE_REPLAY_BATCH_SIZE:
agent.experience_replay()
agent.update_target_network()
agent.save_model()
s=0
if done:
break
trial_episode_scores.append(episode_score)
agent.update_epsilon()
last_100_avg = np.mean(trial_episode_scores[-100:])
tmp = "Run: " + str(episode_index) + ", steps_done: " + str(steps) + ", avg_points_per_step: " + str(episode_score/steps) + ", exploration: " + str(agent.epsilon) + ", score: " + str(episode_score) +", avg_last_100_score: " + str(last_100_avg)+"\n"
log_list.append(tmp)
if len(log_list)>10:
with open("log3.log", "a") as myfile:
for log in log_list:
myfile.write(log)
log_list = list()
trials.append(np.array(trial_episode_scores))
return np.array(trials)
def plot_individual_trial(trial):
plt.plot(trial)
plt.ylabel('points in Episode')
plt.xlabel('Episode')
plt.title('Double DQN points in Select Trial')
plt.show()
if __name__ == '__main__':
trials = test_agent()
# print 'Saving', file_name
np.save('double_dqn_cartpole_trials.npy', trials)
trials = np.load('double_dqn_cartpole_trials.npy')
plot_individual_trial(trials[1]) |
py | 1a411e1890af918ada0e7f0d487ac3510563fa50 | # -*- coding: utf-8 -*-
"""
Created on Fri Jan 6 23:45:59 2017
@author: yxl
"""
import os, sys, os.path as osp
from glob import glob
from sciapp.action import Macros, Widget, Report
from .. import root_dir
from .manager import DocumentManager, DictManager
from codecs import open
def get_path(root, path):
for i in range(10,0,-1):
if not '../'*i in path: continue
s = root
for j in range(i):s=os.path.dirname(s)
path = path.replace('../'*i, s+'/')
return path.replace('\\\\','\\').replace('\\','/')
def extend_plugins(path, lst, err):
rst = []
for i in lst:
if isinstance(i, tuple) or i=='-': rst.append(i)
elif i[-3:] == 'rpt':
pt = os.path.join(root_dir,path)
rst.append(Report(i[:-4], pt+'/'+i))
elif i[-3:] in {'.md', '.mc', '.wf'}:
p = os.path.join(os.path.join(root_dir, path), i).replace('\\','/')
rst.append(Macros(i[:-3], ['Open>{"path":"%s"}'%p]))
elif i[-6:] in ['wgt.py', 'gts.py']:
try:
rpath = path.replace('/', '.').replace('\\','.')
plg = __import__('imagepy.'+ rpath+'.'+i[:-3],'','',[''])
if hasattr(plg, 'wgts'):
rst.extend([j if j=='-' else Widget(j) for j in plg.wgts])
else:
rst.append(Widget(plg.Plugin))
except Exception as e:
err.append((path, i, sys.exc_info()[1]))
else:
try:
rpath = path.replace('/', '.').replace('\\','.')
plg = __import__('imagepy.'+ rpath+'.'+i[:-3],'','',[''])
if hasattr(plg, 'plgs'):
rst.extend([j for j in plg.plgs])
for p in plg.plgs:
if not isinstance(p, str): pass
else:
rst.append(plg.Plugin)
except Exception as e:
err.append((path, i, sys.exc_info()[1]))
return rst
def sort_plugins(catlog, lst):
rst = []
for i in catlog:
if i=='-':rst.append('-')
for j in lst:
if j[:-3]==i or j[:-4]==i or j[0].title==i:
lst.remove(j)
rst.append(j)
rst.extend(lst)
return rst
def build_plugins(path, err='root'):
root = err=='root'
if root: err=[]
subtree = []
cont = os.listdir(path)
for i in cont:
subp = os.path.join(path,i)
if os.path.isdir(subp):
sub = build_plugins(subp, err)
if len(sub)!=0:subtree.append(sub[:2])
elif i[-6:] in ('plg.py', 'lgs.py', 'wgt.py', 'gts.py'):
subtree.append(i)
elif i[-3:] in ('.mc', '.md', '.wf', 'rpt'):
subtree.append(i)
if len(subtree)==0:return []
path = path[path.index(root_dir)+len(root_dir)+1:]
rpath = path.replace('/', '.').replace('\\','.')
pg = __import__('imagepy.'+rpath,'','',[''])
pg.title = os.path.basename(path)
if hasattr(pg, 'catlog'):
subtree = sort_plugins(pg.catlog, subtree)
subtree = extend_plugins(path, subtree, err)
return pg, subtree, err
def extend_tools(path, lst, err):
rst = []
for i in lst:
if i[-3:] in ('.mc', '.md', '.wf', 'rpt'):
p = os.path.join(os.path.join(root_dir,path), i).replace('\\','/')
rst.append((Macros(i[:-3], ['Open>{"path":"%s"}'%p]),
os.path.join(root_dir, path)+'/'+i[:-3]+'.gif'))
else:
try:
rpath = path.replace('/', '.').replace('\\','.')
plg = __import__('imagepy.'+rpath+'.'+i,'','',[''])
if hasattr(plg, 'plgs'):
for i,j in plg.plgs: rst.append((i, path+'/'+j))
else: rst.append((plg.Plugin,
os.path.join(root_dir, path)+'/'+i.split('_')[0]+'.gif'))
except Exception as e:
err.append((path, i, sys.exc_info()[1]))
return rst
def sort_tools(catlog, lst):
rst = []
for i in catlog:
if i=='-':rst.append('-')
for j in lst:
if j==i or j[0].title==i or j[:-3]==i:
lst.remove(j)
rst.append(j)
rst.extend(lst)
return rst
def build_tools(path, err='root'):
root = err=='root'
if root: err=[]
subtree = []
cont = os.listdir(os.path.join(root_dir, path))
for i in cont:
subp = os.path.join(path,i)
if root and os.path.isdir(os.path.join(root_dir, subp)):
sub = build_tools(subp, err)
if len(sub)!=0:subtree.append(sub[:2])
elif not root:
if i[len(i)-7:] in ('_tol.py', 'tols.py'):
subtree.append(i[:-3])
elif i[-3:] in ('.mc', '.md', '.wf', 'rpt'):
subtree.append(i)
if len(subtree)==0:return []
rpath = path.replace('/', '.').replace('\\','.')
#rpath = rpath[rpath.index('imagepy.'):]
pg = __import__('imagepy.' + rpath,'','',[''])
pg.title = os.path.basename(path)
if hasattr(pg, 'catlog'):
subtree = sort_tools(pg.catlog, subtree)
if not root:subtree = extend_tools(path, subtree, err)
return pg, subtree, err
def extend_widgets(path, lst, err):
rst = []
for i in lst:
try:
rpath = path.replace('/', '.').replace('\\','.')
plg = __import__('imagepy.'+rpath+'.'+i,'','',[''])
rst.append(plg.Plugin)
except Exception as e:
err.append((path, i, sys.exc_info()[1]))
return rst
def sort_widgets(catlog, lst):
rst = []
for i in catlog:
if i=='-':rst.append('-')
for j in lst:
if j==i or j[:-3]==i or j[0].title==i:
lst.remove(j)
rst.append(j)
rst.extend(lst)
return rst
def build_widgets(path, err='root'):
root = err=='root'
if root: err=[]
subtree = []
cont = os.listdir(os.path.join(root_dir, path))
for i in cont:
subp = os.path.join(path,i)
if root and os.path.isdir(os.path.join(root_dir, subp)):
sub = build_widgets(subp, err)
if len(sub)!=0:subtree.append(sub[:2])
elif not root:
if i[len(i)-7:] in ('_wgt.py', 'wgts.py'):
subtree.append(i[:-3])
#print('====', subtree)
if len(subtree)==0:return []
rpath = path.replace('/', '.').replace('\\','.')
#rpath = rpath[rpath.index('imagepy.'):]
pg = __import__('imagepy.' + rpath,'','',[''])
pg.title = os.path.basename(path)
if hasattr(pg, 'catlog'):
subtree = sort_widgets(pg.catlog, subtree)
if not root: subtree = extend_widgets(path, subtree, err)
return pg, subtree, err
def build_document(path):
docs = []
for lang in [osp.split(i)[1] for i in glob(path+'/*') if osp.isdir(i)]:
for dirpath, dirnames, filenames in os.walk(path+'/'+lang):
for filename in filenames:
if filename[-3:] != '.md': continue
docs.append(os.path.join(dirpath, filename))
with open(docs[-1], encoding='utf-8') as f:
DocumentManager.add(filename[:-3], f.read(), lang)
return docs
def build_dictionary(path):
for lang in [osp.split(i)[1] for i in glob(path+'/*') if osp.isdir(i)]:
for dirpath, dirnames, filenames in os.walk(path+'/'+lang):
for filename in filenames:
if filename[-3:] != 'dic': continue
with open(os.path.join(dirpath, filename), encoding='utf-8') as f:
lines = f.read().replace('\r','').split('\n')
dic = []
for line in lines:
if line == '':
dic[-1] = (dic[-1][0][0], dict(dic[-1]))
elif line[0] == '\t':
dic[-1].append(line[1:].split('::'))
else:
dic.append([line.split('::')])
if isinstance(dic[-1], list):
dic[-1] = (dic[-1][0][0], dict(dic[-1]))
dic = dict(dic)
for i in dic:
obj = DictManager.get(i, tag=lang)
if not obj is None: obj.update(dic[i])
else: DictManager.add(i, dic[i], lang)
common = DictManager.get('common', tag=lang)
if common is None: return
objs = DictManager.gets(tag=lang)
for i in objs: i[1].update(common)
if __name__ == "__main__":
print (os.getcwd())
os.chdir('../../')
data = build_tools('tools') |
py | 1a411eb73cb58c09e850aed6984678681f8ac919 | # generated from genmsg/cmake/pkg-genmsg.context.in
messages_str = "/home/xtark/ros_ws/src/third_packages/teb_local_planner/msg/TrajectoryPointMsg.msg;/home/xtark/ros_ws/src/third_packages/teb_local_planner/msg/TrajectoryMsg.msg;/home/xtark/ros_ws/src/third_packages/teb_local_planner/msg/FeedbackMsg.msg"
services_str = ""
pkg_name = "teb_local_planner"
dependencies_str = "geometry_msgs;std_msgs;costmap_converter"
langs = "gencpp;geneus;genlisp;gennodejs;genpy"
dep_include_paths_str = "teb_local_planner;/home/xtark/ros_ws/src/third_packages/teb_local_planner/msg;geometry_msgs;/opt/ros/kinetic/share/geometry_msgs/cmake/../msg;std_msgs;/opt/ros/kinetic/share/std_msgs/cmake/../msg;costmap_converter;/home/xtark/ros_ws/src/third_packages/costmap_converter/msg"
PYTHON_EXECUTABLE = "/usr/bin/python"
package_has_static_sources = '' == 'TRUE'
genmsg_check_deps_script = "/opt/ros/kinetic/share/genmsg/cmake/../../../lib/genmsg/genmsg_check_deps.py"
|
py | 1a411ed1113224cb0ae52233377155dbd2784378 | """distutils.ccompiler
Contains CCompiler, an abstract base class that defines the interface
for the Distutils compiler abstraction model."""
import sys, os, re
from distutils.errors import *
from distutils.spawn import spawn
from distutils.file_util import move_file
from distutils.dir_util import mkpath
from distutils.dep_util import newer_pairwise, newer_group
from distutils.util import split_quoted, execute
from distutils import log
class CCompiler:
"""Abstract base class to define the interface that must be implemented
by real compiler classes. Also has some utility methods used by
several compiler classes.
The basic idea behind a compiler abstraction class is that each
instance can be used for all the compile/link steps in building a
single project. Thus, attributes common to all of those compile and
link steps -- include directories, macros to define, libraries to link
against, etc. -- are attributes of the compiler instance. To allow for
variability in how individual files are treated, most of those
attributes may be varied on a per-compilation or per-link basis.
"""
# 'compiler_type' is a class attribute that identifies this class. It
# keeps code that wants to know what kind of compiler it's dealing with
# from having to import all possible compiler classes just to do an
# 'isinstance'. In concrete CCompiler subclasses, 'compiler_type'
# should really, really be one of the keys of the 'compiler_class'
# dictionary (see below -- used by the 'new_compiler()' factory
# function) -- authors of new compiler interface classes are
# responsible for updating 'compiler_class'!
compiler_type = None
# XXX things not handled by this compiler abstraction model:
# * client can't provide additional options for a compiler,
# e.g. warning, optimization, debugging flags. Perhaps this
# should be the domain of concrete compiler abstraction classes
# (UnixCCompiler, MSVCCompiler, etc.) -- or perhaps the base
# class should have methods for the common ones.
# * can't completely override the include or library searchg
# path, ie. no "cc -I -Idir1 -Idir2" or "cc -L -Ldir1 -Ldir2".
# I'm not sure how widely supported this is even by Unix
# compilers, much less on other platforms. And I'm even less
# sure how useful it is; maybe for cross-compiling, but
# support for that is a ways off. (And anyways, cross
# compilers probably have a dedicated binary with the
# right paths compiled in. I hope.)
# * can't do really freaky things with the library list/library
# dirs, e.g. "-Ldir1 -lfoo -Ldir2 -lfoo" to link against
# different versions of libfoo.a in different locations. I
# think this is useless without the ability to null out the
# library search path anyways.
# Subclasses that rely on the standard filename generation methods
# implemented below should override these; see the comment near
# those methods ('object_filenames()' et. al.) for details:
src_extensions = None # list of strings
obj_extension = None # string
static_lib_extension = None
shared_lib_extension = None # string
static_lib_format = None # format string
shared_lib_format = None # prob. same as static_lib_format
exe_extension = None # string
# Default language settings. language_map is used to detect a source
# file or Extension target language, checking source filenames.
# language_order is used to detect the language precedence, when deciding
# what language to use when mixing source types. For example, if some
# extension has two files with ".c" extension, and one with ".cpp", it
# is still linked as c++.
language_map = {".c" : "c",
".cc" : "c++",
".cpp" : "c++",
".cxx" : "c++",
".m" : "objc",
}
language_order = ["c++", "objc", "c"]
def __init__(self, verbose=0, dry_run=0, force=0):
self.dry_run = dry_run
self.force = force
self.verbose = verbose
# 'output_dir': a common output directory for object, library,
# shared object, and shared library files
self.output_dir = None
# 'macros': a list of macro definitions (or undefinitions). A
# macro definition is a 2-tuple (name, value), where the value is
# either a string or None (no explicit value). A macro
# undefinition is a 1-tuple (name,).
self.macros = []
# 'include_dirs': a list of directories to search for include files
self.include_dirs = []
# 'libraries': a list of libraries to include in any link
# (library names, not filenames: eg. "foo" not "libfoo.a")
self.libraries = []
# 'library_dirs': a list of directories to search for libraries
self.library_dirs = []
# 'runtime_library_dirs': a list of directories to search for
# shared libraries/objects at runtime
self.runtime_library_dirs = []
# 'objects': a list of object files (or similar, such as explicitly
# named library files) to include on any link
self.objects = []
for key in self.executables.keys():
self.set_executable(key, self.executables[key])
def set_executables(self, **kwargs):
"""Define the executables (and options for them) that will be run
to perform the various stages of compilation. The exact set of
executables that may be specified here depends on the compiler
class (via the 'executables' class attribute), but most will have:
compiler the C/C++ compiler
linker_so linker used to create shared objects and libraries
linker_exe linker used to create binary executables
archiver static library creator
On platforms with a command-line (Unix, DOS/Windows), each of these
is a string that will be split into executable name and (optional)
list of arguments. (Splitting the string is done similarly to how
Unix shells operate: words are delimited by spaces, but quotes and
backslashes can override this. See
'distutils.util.split_quoted()'.)
"""
# Note that some CCompiler implementation classes will define class
# attributes 'cpp', 'cc', etc. with hard-coded executable names;
# this is appropriate when a compiler class is for exactly one
# compiler/OS combination (eg. MSVCCompiler). Other compiler
# classes (UnixCCompiler, in particular) are driven by information
# discovered at run-time, since there are many different ways to do
# basically the same things with Unix C compilers.
for key in kwargs:
if key not in self.executables:
raise ValueError("unknown executable '%s' for class %s" %
(key, self.__class__.__name__))
self.set_executable(key, kwargs[key])
def set_executable(self, key, value):
if isinstance(value, str):
setattr(self, key, split_quoted(value))
else:
setattr(self, key, value)
def _find_macro(self, name):
i = 0
for defn in self.macros:
if defn[0] == name:
return i
i += 1
return None
def _check_macro_definitions(self, definitions):
"""Ensures that every element of 'definitions' is a valid macro
definition, ie. either (name,value) 2-tuple or a (name,) tuple. Do
nothing if all definitions are OK, raise TypeError otherwise.
"""
for defn in definitions:
if not (isinstance(defn, tuple) and
(len(defn) in (1, 2) and
(isinstance (defn[1], str) or defn[1] is None)) and
isinstance (defn[0], str)):
raise TypeError(("invalid macro definition '%s': " % defn) + \
"must be tuple (string,), (string, string), or " + \
"(string, None)")
# -- Bookkeeping methods -------------------------------------------
def define_macro(self, name, value=None):
"""Define a preprocessor macro for all compilations driven by this
compiler object. The optional parameter 'value' should be a
string; if it is not supplied, then the macro will be defined
without an explicit value and the exact outcome depends on the
compiler used (XXX true? does ANSI say anything about this?)
"""
# Delete from the list of macro definitions/undefinitions if
# already there (so that this one will take precedence).
i = self._find_macro (name)
if i is not None:
del self.macros[i]
self.macros.append((name, value))
def undefine_macro(self, name):
"""Undefine a preprocessor macro for all compilations driven by
this compiler object. If the same macro is defined by
'define_macro()' and undefined by 'undefine_macro()' the last call
takes precedence (including multiple redefinitions or
undefinitions). If the macro is redefined/undefined on a
per-compilation basis (ie. in the call to 'compile()'), then that
takes precedence.
"""
# Delete from the list of macro definitions/undefinitions if
# already there (so that this one will take precedence).
i = self._find_macro (name)
if i is not None:
del self.macros[i]
undefn = (name,)
self.macros.append(undefn)
def add_include_dir(self, dir):
"""Add 'dir' to the list of directories that will be searched for
header files. The compiler is instructed to search directories in
the order in which they are supplied by successive calls to
'add_include_dir()'.
"""
self.include_dirs.append(dir)
def set_include_dirs(self, dirs):
"""Set the list of directories that will be searched to 'dirs' (a
list of strings). Overrides any preceding calls to
'add_include_dir()'; subsequence calls to 'add_include_dir()' add
to the list passed to 'set_include_dirs()'. This does not affect
any list of standard include directories that the compiler may
search by default.
"""
self.include_dirs = dirs[:]
def add_library(self, libname):
"""Add 'libname' to the list of libraries that will be included in
all links driven by this compiler object. Note that 'libname'
should *not* be the name of a file containing a library, but the
name of the library itself: the actual filename will be inferred by
the linker, the compiler, or the compiler class (depending on the
platform).
The linker will be instructed to link against libraries in the
order they were supplied to 'add_library()' and/or
'set_libraries()'. It is perfectly valid to duplicate library
names; the linker will be instructed to link against libraries as
many times as they are mentioned.
"""
self.libraries.append(libname)
def set_libraries(self, libnames):
"""Set the list of libraries to be included in all links driven by
this compiler object to 'libnames' (a list of strings). This does
not affect any standard system libraries that the linker may
include by default.
"""
self.libraries = libnames[:]
def add_library_dir(self, dir):
"""Add 'dir' to the list of directories that will be searched for
libraries specified to 'add_library()' and 'set_libraries()'. The
linker will be instructed to search for libraries in the order they
are supplied to 'add_library_dir()' and/or 'set_library_dirs()'.
"""
self.library_dirs.append(dir)
def set_library_dirs(self, dirs):
"""Set the list of library search directories to 'dirs' (a list of
strings). This does not affect any standard library search path
that the linker may search by default.
"""
self.library_dirs = dirs[:]
def add_runtime_library_dir(self, dir):
"""Add 'dir' to the list of directories that will be searched for
shared libraries at runtime.
"""
self.runtime_library_dirs.append(dir)
def set_runtime_library_dirs(self, dirs):
"""Set the list of directories to search for shared libraries at
runtime to 'dirs' (a list of strings). This does not affect any
standard search path that the runtime linker may search by
default.
"""
self.runtime_library_dirs = dirs[:]
def add_link_object(self, object):
"""Add 'object' to the list of object files (or analogues, such as
explicitly named library files or the output of "resource
compilers") to be included in every link driven by this compiler
object.
"""
self.objects.append(object)
def set_link_objects(self, objects):
"""Set the list of object files (or analogues) to be included in
every link to 'objects'. This does not affect any standard object
files that the linker may include by default (such as system
libraries).
"""
self.objects = objects[:]
# -- Private utility methods --------------------------------------
# (here for the convenience of subclasses)
# Helper method to prep compiler in subclass compile() methods
def _setup_compile(self, outdir, macros, incdirs, sources, depends,
extra):
"""Process arguments and decide which source files to compile."""
if outdir is None:
outdir = self.output_dir
elif not isinstance(outdir, str):
raise TypeError("'output_dir' must be a string or None")
if macros is None:
macros = self.macros
elif isinstance(macros, list):
macros = macros + (self.macros or [])
else:
raise TypeError("'macros' (if supplied) must be a list of tuples")
if incdirs is None:
incdirs = self.include_dirs
elif isinstance(incdirs, (list, tuple)):
incdirs = list(incdirs) + (self.include_dirs or [])
else:
raise TypeError(
"'include_dirs' (if supplied) must be a list of strings")
if extra is None:
extra = []
# Get the list of expected output (object) files
objects = self.object_filenames(sources, strip_dir=0,
output_dir=outdir)
assert len(objects) == len(sources)
pp_opts = gen_preprocess_options(macros, incdirs)
build = {}
for i in range(len(sources)):
src = sources[i]
obj = objects[i]
ext = os.path.splitext(src)[1]
self.mkpath(os.path.dirname(obj))
build[obj] = (src, ext)
return macros, objects, extra, pp_opts, build
def _get_cc_args(self, pp_opts, debug, before):
# works for unixccompiler, cygwinccompiler
cc_args = pp_opts + ['-c']
if debug:
cc_args[:0] = ['-g']
if before:
cc_args[:0] = before
return cc_args
def _fix_compile_args(self, output_dir, macros, include_dirs):
"""Typecheck and fix-up some of the arguments to the 'compile()'
method, and return fixed-up values. Specifically: if 'output_dir'
is None, replaces it with 'self.output_dir'; ensures that 'macros'
is a list, and augments it with 'self.macros'; ensures that
'include_dirs' is a list, and augments it with 'self.include_dirs'.
Guarantees that the returned values are of the correct type,
i.e. for 'output_dir' either string or None, and for 'macros' and
'include_dirs' either list or None.
"""
if output_dir is None:
output_dir = self.output_dir
elif not isinstance(output_dir, str):
raise TypeError("'output_dir' must be a string or None")
if macros is None:
macros = self.macros
elif isinstance(macros, list):
macros = macros + (self.macros or [])
else:
raise TypeError("'macros' (if supplied) must be a list of tuples")
if include_dirs is None:
include_dirs = self.include_dirs
elif isinstance(include_dirs, (list, tuple)):
include_dirs = list(include_dirs) + (self.include_dirs or [])
else:
raise TypeError(
"'include_dirs' (if supplied) must be a list of strings")
return output_dir, macros, include_dirs
def _prep_compile(self, sources, output_dir, depends=None):
"""Decide which souce files must be recompiled.
Determine the list of object files corresponding to 'sources',
and figure out which ones really need to be recompiled.
Return a list of all object files and a dictionary telling
which source files can be skipped.
"""
# Get the list of expected output (object) files
objects = self.object_filenames(sources, output_dir=output_dir)
assert len(objects) == len(sources)
# Return an empty dict for the "which source files can be skipped"
# return value to preserve API compatibility.
return objects, {}
def _fix_object_args(self, objects, output_dir):
"""Typecheck and fix up some arguments supplied to various methods.
Specifically: ensure that 'objects' is a list; if output_dir is
None, replace with self.output_dir. Return fixed versions of
'objects' and 'output_dir'.
"""
if not isinstance(objects, (list, tuple)):
raise TypeError("'objects' must be a list or tuple of strings")
objects = list(objects)
if output_dir is None:
output_dir = self.output_dir
elif not isinstance(output_dir, str):
raise TypeError("'output_dir' must be a string or None")
return (objects, output_dir)
def _fix_lib_args(self, libraries, library_dirs, runtime_library_dirs):
"""Typecheck and fix up some of the arguments supplied to the
'link_*' methods. Specifically: ensure that all arguments are
lists, and augment them with their permanent versions
(eg. 'self.libraries' augments 'libraries'). Return a tuple with
fixed versions of all arguments.
"""
if libraries is None:
libraries = self.libraries
elif isinstance(libraries, (list, tuple)):
libraries = list (libraries) + (self.libraries or [])
else:
raise TypeError(
"'libraries' (if supplied) must be a list of strings")
if library_dirs is None:
library_dirs = self.library_dirs
elif isinstance(library_dirs, (list, tuple)):
library_dirs = list (library_dirs) + (self.library_dirs or [])
else:
raise TypeError(
"'library_dirs' (if supplied) must be a list of strings")
if runtime_library_dirs is None:
runtime_library_dirs = self.runtime_library_dirs
elif isinstance(runtime_library_dirs, (list, tuple)):
runtime_library_dirs = (list(runtime_library_dirs) +
(self.runtime_library_dirs or []))
else:
raise TypeError("'runtime_library_dirs' (if supplied) "
"must be a list of strings")
return (libraries, library_dirs, runtime_library_dirs)
def _need_link(self, objects, output_file):
"""Return true if we need to relink the files listed in 'objects'
to recreate 'output_file'.
"""
if self.force:
return True
else:
if self.dry_run:
newer = newer_group (objects, output_file, missing='newer')
else:
newer = newer_group (objects, output_file)
return newer
def detect_language(self, sources):
"""Detect the language of a given file, or list of files. Uses
language_map, and language_order to do the job.
"""
if not isinstance(sources, list):
sources = [sources]
lang = None
index = len(self.language_order)
for source in sources:
base, ext = os.path.splitext(source)
extlang = self.language_map.get(ext)
try:
extindex = self.language_order.index(extlang)
if extindex < index:
lang = extlang
index = extindex
except ValueError:
pass
return lang
# -- Worker methods ------------------------------------------------
# (must be implemented by subclasses)
def preprocess(self, source, output_file=None, macros=None,
include_dirs=None, extra_preargs=None, extra_postargs=None):
"""Preprocess a single C/C++ source file, named in 'source'.
Output will be written to file named 'output_file', or stdout if
'output_file' not supplied. 'macros' is a list of macro
definitions as for 'compile()', which will augment the macros set
with 'define_macro()' and 'undefine_macro()'. 'include_dirs' is a
list of directory names that will be added to the default list.
Raises PreprocessError on failure.
"""
pass
def compile(self, sources, output_dir=None, macros=None,
include_dirs=None, debug=0, extra_preargs=None,
extra_postargs=None, depends=None):
"""Compile one or more source files.
'sources' must be a list of filenames, most likely C/C++
files, but in reality anything that can be handled by a
particular compiler and compiler class (eg. MSVCCompiler can
handle resource files in 'sources'). Return a list of object
filenames, one per source filename in 'sources'. Depending on
the implementation, not all source files will necessarily be
compiled, but all corresponding object filenames will be
returned.
If 'output_dir' is given, object files will be put under it, while
retaining their original path component. That is, "foo/bar.c"
normally compiles to "foo/bar.o" (for a Unix implementation); if
'output_dir' is "build", then it would compile to
"build/foo/bar.o".
'macros', if given, must be a list of macro definitions. A macro
definition is either a (name, value) 2-tuple or a (name,) 1-tuple.
The former defines a macro; if the value is None, the macro is
defined without an explicit value. The 1-tuple case undefines a
macro. Later definitions/redefinitions/ undefinitions take
precedence.
'include_dirs', if given, must be a list of strings, the
directories to add to the default include file search path for this
compilation only.
'debug' is a boolean; if true, the compiler will be instructed to
output debug symbols in (or alongside) the object file(s).
'extra_preargs' and 'extra_postargs' are implementation- dependent.
On platforms that have the notion of a command-line (e.g. Unix,
DOS/Windows), they are most likely lists of strings: extra
command-line arguments to prepend/append to the compiler command
line. On other platforms, consult the implementation class
documentation. In any event, they are intended as an escape hatch
for those occasions when the abstract compiler framework doesn't
cut the mustard.
'depends', if given, is a list of filenames that all targets
depend on. If a source file is older than any file in
depends, then the source file will be recompiled. This
supports dependency tracking, but only at a coarse
granularity.
Raises CompileError on failure.
"""
# A concrete compiler class can either override this method
# entirely or implement _compile().
macros, objects, extra_postargs, pp_opts, build = \
self._setup_compile(output_dir, macros, include_dirs, sources,
depends, extra_postargs)
cc_args = self._get_cc_args(pp_opts, debug, extra_preargs)
for obj in objects:
try:
src, ext = build[obj]
except KeyError:
continue
self._compile(obj, src, ext, cc_args, extra_postargs, pp_opts)
# Return *all* object filenames, not just the ones we just built.
return objects
def _compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts):
"""Compile 'src' to product 'obj'."""
# A concrete compiler class that does not override compile()
# should implement _compile().
pass
def create_static_lib(self, objects, output_libname, output_dir=None,
debug=0, target_lang=None):
"""Link a bunch of stuff together to create a static library file.
The "bunch of stuff" consists of the list of object files supplied
as 'objects', the extra object files supplied to
'add_link_object()' and/or 'set_link_objects()', the libraries
supplied to 'add_library()' and/or 'set_libraries()', and the
libraries supplied as 'libraries' (if any).
'output_libname' should be a library name, not a filename; the
filename will be inferred from the library name. 'output_dir' is
the directory where the library file will be put.
'debug' is a boolean; if true, debugging information will be
included in the library (note that on most platforms, it is the
compile step where this matters: the 'debug' flag is included here
just for consistency).
'target_lang' is the target language for which the given objects
are being compiled. This allows specific linkage time treatment of
certain languages.
Raises LibError on failure.
"""
pass
# values for target_desc parameter in link()
SHARED_OBJECT = "shared_object"
SHARED_LIBRARY = "shared_library"
EXECUTABLE = "executable"
def link(self,
target_desc,
objects,
output_filename,
output_dir=None,
libraries=None,
library_dirs=None,
runtime_library_dirs=None,
export_symbols=None,
debug=0,
extra_preargs=None,
extra_postargs=None,
build_temp=None,
target_lang=None):
"""Link a bunch of stuff together to create an executable or
shared library file.
The "bunch of stuff" consists of the list of object files supplied
as 'objects'. 'output_filename' should be a filename. If
'output_dir' is supplied, 'output_filename' is relative to it
(i.e. 'output_filename' can provide directory components if
needed).
'libraries' is a list of libraries to link against. These are
library names, not filenames, since they're translated into
filenames in a platform-specific way (eg. "foo" becomes "libfoo.a"
on Unix and "foo.lib" on DOS/Windows). However, they can include a
directory component, which means the linker will look in that
specific directory rather than searching all the normal locations.
'library_dirs', if supplied, should be a list of directories to
search for libraries that were specified as bare library names
(ie. no directory component). These are on top of the system
default and those supplied to 'add_library_dir()' and/or
'set_library_dirs()'. 'runtime_library_dirs' is a list of
directories that will be embedded into the shared library and used
to search for other shared libraries that *it* depends on at
run-time. (This may only be relevant on Unix.)
'export_symbols' is a list of symbols that the shared library will
export. (This appears to be relevant only on Windows.)
'debug' is as for 'compile()' and 'create_static_lib()', with the
slight distinction that it actually matters on most platforms (as
opposed to 'create_static_lib()', which includes a 'debug' flag
mostly for form's sake).
'extra_preargs' and 'extra_postargs' are as for 'compile()' (except
of course that they supply command-line arguments for the
particular linker being used).
'target_lang' is the target language for which the given objects
are being compiled. This allows specific linkage time treatment of
certain languages.
Raises LinkError on failure.
"""
raise NotImplementedError
# Old 'link_*()' methods, rewritten to use the new 'link()' method.
def link_shared_lib(self,
objects,
output_libname,
output_dir=None,
libraries=None,
library_dirs=None,
runtime_library_dirs=None,
export_symbols=None,
debug=0,
extra_preargs=None,
extra_postargs=None,
build_temp=None,
target_lang=None):
self.link(CCompiler.SHARED_LIBRARY, objects,
self.library_filename(output_libname, lib_type='shared'),
output_dir,
libraries, library_dirs, runtime_library_dirs,
export_symbols, debug,
extra_preargs, extra_postargs, build_temp, target_lang)
def link_shared_object(self,
objects,
output_filename,
output_dir=None,
libraries=None,
library_dirs=None,
runtime_library_dirs=None,
export_symbols=None,
debug=0,
extra_preargs=None,
extra_postargs=None,
build_temp=None,
target_lang=None):
self.link(CCompiler.SHARED_OBJECT, objects,
output_filename, output_dir,
libraries, library_dirs, runtime_library_dirs,
export_symbols, debug,
extra_preargs, extra_postargs, build_temp, target_lang)
def link_executable(self,
objects,
output_progname,
output_dir=None,
libraries=None,
library_dirs=None,
runtime_library_dirs=None,
debug=0,
extra_preargs=None,
extra_postargs=None,
target_lang=None):
self.link(CCompiler.EXECUTABLE, objects,
self.executable_filename(output_progname), output_dir,
libraries, library_dirs, runtime_library_dirs, None,
debug, extra_preargs, extra_postargs, None, target_lang)
# -- Miscellaneous methods -----------------------------------------
# These are all used by the 'gen_lib_options() function; there is
# no appropriate default implementation so subclasses should
# implement all of these.
def library_dir_option(self, dir):
"""Return the compiler option to add 'dir' to the list of
directories searched for libraries.
"""
raise NotImplementedError
def runtime_library_dir_option(self, dir):
"""Return the compiler option to add 'dir' to the list of
directories searched for runtime libraries.
"""
raise NotImplementedError
def library_option(self, lib):
"""Return the compiler option to add 'lib' to the list of libraries
linked into the shared library or executable.
"""
raise NotImplementedError
def has_function(self, funcname, includes=None, include_dirs=None,
libraries=None, library_dirs=None):
"""Return a boolean indicating whether funcname is supported on
the current platform. The optional arguments can be used to
augment the compilation environment.
"""
# this can't be included at module scope because it tries to
# import math which might not be available at that point - maybe
# the necessary logic should just be inlined?
import tempfile
if includes is None:
includes = []
if include_dirs is None:
include_dirs = []
if libraries is None:
libraries = []
if library_dirs is None:
library_dirs = []
fd, fname = tempfile.mkstemp(".c", funcname, text=True)
f = os.fdopen(fd, "w")
try:
for incl in includes:
f.write("""#include "%s"\n""" % incl)
f.write("""\
main (int argc, char **argv) {
%s();
}
""" % funcname)
finally:
f.close()
try:
objects = self.compile([fname], include_dirs=include_dirs)
except CompileError:
return False
try:
self.link_executable(objects, "a.out",
libraries=libraries,
library_dirs=library_dirs)
except (LinkError, TypeError):
return False
return True
def find_library_file (self, dirs, lib, debug=0):
"""Search the specified list of directories for a static or shared
library file 'lib' and return the full path to that file. If
'debug' true, look for a debugging version (if that makes sense on
the current platform). Return None if 'lib' wasn't found in any of
the specified directories.
"""
raise NotImplementedError
# -- Filename generation methods -----------------------------------
# The default implementation of the filename generating methods are
# prejudiced towards the Unix/DOS/Windows view of the world:
# * object files are named by replacing the source file extension
# (eg. .c/.cpp -> .o/.obj)
# * library files (shared or static) are named by plugging the
# library name and extension into a format string, eg.
# "lib%s.%s" % (lib_name, ".a") for Unix static libraries
# * executables are named by appending an extension (possibly
# empty) to the program name: eg. progname + ".exe" for
# Windows
#
# To reduce redundant code, these methods expect to find
# several attributes in the current object (presumably defined
# as class attributes):
# * src_extensions -
# list of C/C++ source file extensions, eg. ['.c', '.cpp']
# * obj_extension -
# object file extension, eg. '.o' or '.obj'
# * static_lib_extension -
# extension for static library files, eg. '.a' or '.lib'
# * shared_lib_extension -
# extension for shared library/object files, eg. '.so', '.dll'
# * static_lib_format -
# format string for generating static library filenames,
# eg. 'lib%s.%s' or '%s.%s'
# * shared_lib_format
# format string for generating shared library filenames
# (probably same as static_lib_format, since the extension
# is one of the intended parameters to the format string)
# * exe_extension -
# extension for executable files, eg. '' or '.exe'
def object_filenames(self, source_filenames, strip_dir=0, output_dir=''):
if output_dir is None:
output_dir = ''
obj_names = []
for src_name in source_filenames:
base, ext = os.path.splitext(src_name)
base = os.path.splitdrive(base)[1] # Chop off the drive
base = base[os.path.isabs(base):] # If abs, chop off leading /
if ext not in self.src_extensions:
raise UnknownFileError(
"unknown file type '%s' (from '%s')" % (ext, src_name))
if strip_dir:
base = os.path.basename(base)
obj_names.append(os.path.join(output_dir,
base + self.obj_extension))
return obj_names
def shared_object_filename(self, basename, strip_dir=0, output_dir=''):
assert output_dir is not None
if strip_dir:
basename = os.path.basename(basename)
return os.path.join(output_dir, basename + self.shared_lib_extension)
def executable_filename(self, basename, strip_dir=0, output_dir=''):
assert output_dir is not None
if strip_dir:
basename = os.path.basename(basename)
return os.path.join(output_dir, basename + (self.exe_extension or ''))
def library_filename(self, libname, lib_type='static', # or 'shared'
strip_dir=0, output_dir=''):
assert output_dir is not None
if lib_type not in ("static", "shared", "dylib", "xcode_stub"):
raise ValueError(
"'lib_type' must be \"static\", \"shared\", \"dylib\", or \"xcode_stub\"")
fmt = getattr(self, lib_type + "_lib_format")
ext = getattr(self, lib_type + "_lib_extension")
dir, base = os.path.split(libname)
filename = fmt % (base, ext)
if strip_dir:
dir = ''
return os.path.join(output_dir, dir, filename)
# -- Utility methods -----------------------------------------------
def announce(self, msg, level=1):
log.debug(msg)
def debug_print(self, msg):
from distutils.debug import DEBUG
if DEBUG:
print(msg)
def warn(self, msg):
sys.stderr.write("warning: %s\n" % msg)
def execute(self, func, args, msg=None, level=1):
execute(func, args, msg, self.dry_run)
def spawn(self, cmd):
spawn(cmd, dry_run=self.dry_run)
def move_file(self, src, dst):
return move_file(src, dst, dry_run=self.dry_run)
def mkpath (self, name, mode=0o777):
mkpath(name, mode, dry_run=self.dry_run)
# Map a sys.platform/os.name ('posix', 'nt') to the default compiler
# type for that platform. Keys are interpreted as re match
# patterns. Order is important; platform mappings are preferred over
# OS names.
_default_compilers = (
# Platform string mappings
# on a cygwin built python we can use gcc like an ordinary UNIXish
# compiler
('cygwin.*', 'unix'),
# OS name mappings
('posix', 'unix'),
('nt', 'msvc'),
)
def get_default_compiler(osname=None, platform=None):
"""Determine the default compiler to use for the given platform.
osname should be one of the standard Python OS names (i.e. the
ones returned by os.name) and platform the common value
returned by sys.platform for the platform in question.
The default values are os.name and sys.platform in case the
parameters are not given.
"""
if osname is None:
osname = os.name
if platform is None:
platform = sys.platform
for pattern, compiler in _default_compilers:
if re.match(pattern, platform) is not None or \
re.match(pattern, osname) is not None:
return compiler
# Default to Unix compiler
return 'unix'
# Map compiler types to (module_name, class_name) pairs -- ie. where to
# find the code that implements an interface to this compiler. (The module
# is assumed to be in the 'distutils' package.)
compiler_class = { 'unix': ('unixccompiler', 'UnixCCompiler',
"standard UNIX-style compiler"),
'msvc': ('_msvccompiler', 'MSVCCompiler',
"Microsoft Visual C++"),
'cygwin': ('cygwinccompiler', 'CygwinCCompiler',
"Cygwin port of GNU C Compiler for Win32"),
'mingw32': ('cygwinccompiler', 'Mingw32CCompiler',
"Mingw32 port of GNU C Compiler for Win32"),
'bcpp': ('bcppcompiler', 'BCPPCompiler',
"Borland C++ Compiler"),
}
def show_compilers():
"""Print list of available compilers (used by the "--help-compiler"
options to "build", "build_ext", "build_clib").
"""
# XXX this "knows" that the compiler option it's describing is
# "--compiler", which just happens to be the case for the three
# commands that use it.
from distutils.fancy_getopt import FancyGetopt
compilers = []
for compiler in compiler_class.keys():
compilers.append(("compiler="+compiler, None,
compiler_class[compiler][2]))
compilers.sort()
pretty_printer = FancyGetopt(compilers)
pretty_printer.print_help("List of available compilers:")
def new_compiler(plat=None, compiler=None, verbose=0, dry_run=0, force=0):
"""Generate an instance of some CCompiler subclass for the supplied
platform/compiler combination. 'plat' defaults to 'os.name'
(eg. 'posix', 'nt'), and 'compiler' defaults to the default compiler
for that platform. Currently only 'posix' and 'nt' are supported, and
the default compilers are "traditional Unix interface" (UnixCCompiler
class) and Visual C++ (MSVCCompiler class). Note that it's perfectly
possible to ask for a Unix compiler object under Windows, and a
Microsoft compiler object under Unix -- if you supply a value for
'compiler', 'plat' is ignored.
"""
if plat is None:
plat = os.name
try:
if compiler is None:
compiler = get_default_compiler(plat)
(module_name, class_name, long_description) = compiler_class[compiler]
except KeyError:
msg = "don't know how to compile C/C++ code on platform '%s'" % plat
if compiler is not None:
msg = msg + " with '%s' compiler" % compiler
raise DistutilsPlatformError(msg)
try:
module_name = "distutils." + module_name
__import__ (module_name)
module = sys.modules[module_name]
klass = vars(module)[class_name]
except ImportError:
raise DistutilsModuleError(
"can't compile C/C++ code: unable to load module '%s'" % \
module_name)
except KeyError:
raise DistutilsModuleError(
"can't compile C/C++ code: unable to find class '%s' "
"in module '%s'" % (class_name, module_name))
# XXX The None is necessary to preserve backwards compatibility
# with classes that expect verbose to be the first positional
# argument.
return klass(None, dry_run, force)
def gen_preprocess_options(macros, include_dirs):
"""Generate C pre-processor options (-D, -U, -I) as used by at least
two types of compilers: the typical Unix compiler and Visual C++.
'macros' is the usual thing, a list of 1- or 2-tuples, where (name,)
means undefine (-U) macro 'name', and (name,value) means define (-D)
macro 'name' to 'value'. 'include_dirs' is just a list of directory
names to be added to the header file search path (-I). Returns a list
of command-line options suitable for either Unix compilers or Visual
C++.
"""
# XXX it would be nice (mainly aesthetic, and so we don't generate
# stupid-looking command lines) to go over 'macros' and eliminate
# redundant definitions/undefinitions (ie. ensure that only the
# latest mention of a particular macro winds up on the command
# line). I don't think it's essential, though, since most (all?)
# Unix C compilers only pay attention to the latest -D or -U
# mention of a macro on their command line. Similar situation for
# 'include_dirs'. I'm punting on both for now. Anyways, weeding out
# redundancies like this should probably be the province of
# CCompiler, since the data structures used are inherited from it
# and therefore common to all CCompiler classes.
pp_opts = []
for macro in macros:
if not (isinstance(macro, tuple) and 1 <= len(macro) <= 2):
raise TypeError(
"bad macro definition '%s': "
"each element of 'macros' list must be a 1- or 2-tuple"
% macro)
if len(macro) == 1: # undefine this macro
pp_opts.append("-U%s" % macro[0])
elif len(macro) == 2:
if macro[1] is None: # define with no explicit value
pp_opts.append("-D%s" % macro[0])
else:
# XXX *don't* need to be clever about quoting the
# macro value here, because we're going to avoid the
# shell at all costs when we spawn the command!
pp_opts.append("-D%s=%s" % macro)
for dir in include_dirs:
pp_opts.append("-I%s" % dir)
return pp_opts
def gen_lib_options (compiler, library_dirs, runtime_library_dirs, libraries):
"""Generate linker options for searching library directories and
linking with specific libraries. 'libraries' and 'library_dirs' are,
respectively, lists of library names (not filenames!) and search
directories. Returns a list of command-line options suitable for use
with some compiler (depending on the two format strings passed in).
"""
lib_opts = []
for dir in library_dirs:
lib_opts.append(compiler.library_dir_option(dir))
for dir in runtime_library_dirs:
opt = compiler.runtime_library_dir_option(dir)
if isinstance(opt, list):
lib_opts = lib_opts + opt
else:
lib_opts.append(opt)
# XXX it's important that we *not* remove redundant library mentions!
# sometimes you really do have to say "-lfoo -lbar -lfoo" in order to
# resolve all symbols. I just hope we never have to say "-lfoo obj.o
# -lbar" to get things to work -- that's certainly a possibility, but a
# pretty nasty way to arrange your C code.
for lib in libraries:
(lib_dir, lib_name) = os.path.split(lib)
if lib_dir:
lib_file = compiler.find_library_file([lib_dir], lib_name)
if lib_file:
lib_opts.append(lib_file)
else:
compiler.warn("no library file corresponding to "
"'%s' found (skipping)" % lib)
else:
lib_opts.append(compiler.library_option (lib))
return lib_opts
|
py | 1a411ee9a3e03f0a9a4175cc5d51ebb63ba9a21b | import pytest
from thefrick.rules.tsuru_login import match, get_new_command
from thefrick.types import Command
error_msg = (
"Error: you're not authenticated or your session has expired.",
("You're not authenticated or your session has expired. "
"Please use \"login\" command for authentication."),
)
@pytest.mark.parametrize('command', [
Command('tsuru app-shell', error_msg[0]),
Command('tsuru app-log -f', error_msg[1]),
])
def test_match(command):
assert match(command)
@pytest.mark.parametrize('command', [
Command('tsuru', ''),
Command('tsuru app-restart', 'Error: unauthorized'),
Command('tsuru app-log -f', 'Error: unparseable data'),
])
def test_not_match(command):
assert not match(command)
@pytest.mark.parametrize('command, new_command', [
(Command('tsuru app-shell', error_msg[0]),
'tsuru login && tsuru app-shell'),
(Command('tsuru app-log -f', error_msg[1]),
'tsuru login && tsuru app-log -f'),
])
def test_get_new_command(command, new_command):
assert get_new_command(command) == new_command
|
py | 1a41206f68c09dd47e581162e31d0330d4d363db | # flake8: noqa
from .abstract import IFeatureExtractor
from .errors import FeatureTypeExtractionError, ScenariosTestFileNotFound
from .extractor import FeatureExtractor
from .types import FeatureTypeName
|
py | 1a4122a8cce4e45681742e97a6beddd2bb99c1af | from django.contrib import admin
from django.forms import ModelForm, ModelChoiceField, TextInput
from django.utils.translation import ugettext_lazy as _
from dal import autocomplete
from danceschool.core.models import Customer, Registration, TemporaryRegistration
from danceschool.core.admin import CustomerAdmin
from .models import VoucherCategory, Voucher, DanceTypeVoucher, ClassVoucher, SeriesCategoryVoucher, PublicEventCategoryVoucher, SessionVoucher, CustomerGroupVoucher, CustomerVoucher, VoucherUse, TemporaryVoucherUse, VoucherCredit
class CustomerVoucherInlineForm(ModelForm):
customer = ModelChoiceField(
queryset=Customer.objects.all(),
widget=autocomplete.ModelSelect2(
url='autocompleteCustomer',
attrs={
# This will set the input placeholder attribute:
'data-placeholder': _('Enter a customer name'),
# This will set the yourlabs.Autocomplete.minimumCharacters
# options, the naming conversion is handled by jQuery
'data-minimum-input-length': 2,
'data-max-results': 4,
'class': 'modern-style',
}
)
)
class Meta:
model = CustomerVoucher
exclude = []
class CustomerGroupVoucherInline(admin.StackedInline):
model = CustomerGroupVoucher
extra = 1
classes = ['collapse',]
class CustomerVoucherInline(admin.StackedInline):
model = CustomerVoucher
form = CustomerVoucherInlineForm
extra = 1
classes = ['collapse',]
class CustomerAdminWithVouchers(CustomerAdmin):
inlines = CustomerAdmin.inlines + [CustomerVoucherInline]
class RegistrationVoucherInline(admin.TabularInline):
model = VoucherUse
extra = 0
readonly_fields = ['voucher','amount']
# Prevents adding new voucher uses without going through
# the standard registration process
def has_add_permission(self, request, obj=None):
return False
def has_delete_permission(self, request, obj=None):
return False
class TemporaryRegistrationVoucherInline(admin.TabularInline):
model = TemporaryVoucherUse
extra = 0
readonly_fields = ['voucher','amount']
# Prevents adding new voucher uses without going through
# the standard registration process
def has_add_permission(self, request, obj=None):
return False
def has_delete_permission(self, request, obj=None):
return False
class DanceTypeVoucherInline(admin.StackedInline):
model = DanceTypeVoucher
extra = 1
classes = ['collapse',]
class ClassVoucherInline(admin.StackedInline):
model = ClassVoucher
extra = 1
classes = ['collapse',]
class SeriesCategoryVoucherInline(admin.StackedInline):
model = SeriesCategoryVoucher
extra = 1
classes = ['collapse',]
class PublicEventCategoryVoucherInline(admin.StackedInline):
model = PublicEventCategoryVoucher
extra = 1
classes = ['collapse',]
class EventSessionVoucherInline(admin.StackedInline):
model = SessionVoucher
extra = 1
classes = ['collapse',]
class ClassVoucherInline(admin.StackedInline):
model = ClassVoucher
extra = 1
classes = ['collapse',]
class VoucherUseInline(admin.TabularInline):
model = VoucherUse
extra = 0
readonly_fields = ['registration','amount']
# Prevents adding new voucher uses without going through
# the standard registration process.
def has_add_permission(self, request, obj=None):
return False
def has_delete_permission(self, request, obj=None):
return False
class VoucherCreditInlineForm(ModelForm):
class Meta:
widgets = {
'description': TextInput,
}
class VoucherCreditInline(admin.TabularInline):
model = VoucherCredit
extra = 1
fields = ['amount', 'description','creationDate']
readonly_fields = ['creationDate',]
form = VoucherCreditInlineForm
class VoucherAdmin(admin.ModelAdmin):
inlines = [DanceTypeVoucherInline,ClassVoucherInline,SeriesCategoryVoucherInline,PublicEventCategoryVoucherInline,EventSessionVoucherInline,CustomerGroupVoucherInline,CustomerVoucherInline,VoucherUseInline,VoucherCreditInline]
list_display = ['voucherId','name','category','amountLeft','maxAmountPerUse','expirationDate','isEnabled','restrictions']
list_filter = ['category','expirationDate','disabled','forFirstTimeCustomersOnly','forPreviousCustomersOnly']
search_fields = ['voucherId','name','description']
readonly_fields = ['refundAmount','creationDate']
actions = ['enableVoucher','disableVoucher']
fieldsets = (
(None, {
'fields': (('voucherId','category'),'name','description',('originalAmount','maxAmountPerUse'),),
}),
(_('Voucher Restrictions'), {
'fields': ('expirationDate',('singleUse','forFirstTimeCustomersOnly','forPreviousCustomersOnly','disabled')),
}),
(_('Other Info'), {
'classes': ('collapse',),
'fields': ('creationDate','refundAmount'),
}),
)
def isEnabled(self,obj):
return obj.disabled is False
isEnabled.short_description = _('Enabled')
isEnabled.boolean = True
def restrictions(self,obj):
text = []
if obj.singleUse:
text.append(_('Single use'))
if obj.forFirstTimeCustomersOnly:
text.append(_('First-time customer'))
if obj.forPreviousCustomersOnly:
text.append(_('Previous customer'))
if obj.customervoucher_set.all().exists():
text.append(_('Specific customer'))
if obj.classvoucher_set.all().exists():
text.append(_('Specific class'))
if obj.dancetypevoucher_set.all().exists():
text.append(_('Specific dance type/level'))
return ', '.join([str(x) for x in text])
restrictions.short_description = _('Restrictions')
def disableVoucher(self, request, queryset):
rows_updated = queryset.update(disabled=True)
if rows_updated == 1:
message_bit = "1 voucher was"
else:
message_bit = "%s vouchers were" % rows_updated
self.message_user(request, "%s successfully disabled." % message_bit)
disableVoucher.short_description = _('Disable selected Vouchers')
def enableVoucher(self, request, queryset):
rows_updated = queryset.update(disabled=False)
if rows_updated == 1:
message_bit = "1 voucher was"
else:
message_bit = "%s vouchers were" % rows_updated
self.message_user(request, "%s successfully enabled." % message_bit)
enableVoucher.short_description = _('Enable selected Vouchers')
# This adds inlines to Registration and TemporaryRegistration without subclassing
admin.site._registry[Registration].inlines.insert(0,RegistrationVoucherInline)
admin.site._registry[TemporaryRegistration].inlines.insert(0,TemporaryRegistrationVoucherInline)
admin.site.register(VoucherCategory)
admin.site.register(Voucher,VoucherAdmin)
admin.site.unregister(Customer)
admin.site.register(Customer,CustomerAdminWithVouchers)
|
py | 1a41244a0c5faa5186f73e7a4f043938eef88009 | from vpp_tunnel_interface import VppTunnelInterface
class VppIpsecTunInterface(VppTunnelInterface):
"""
VPP IPsec Tunnel interface
"""
def __init__(self, test, parent_if, local_spi,
remote_spi, crypto_alg, local_crypto_key, remote_crypto_key,
integ_alg, local_integ_key, remote_integ_key, is_ip6=False):
super(VppIpsecTunInterface, self).__init__(test, parent_if)
self.local_spi = local_spi
self.remote_spi = remote_spi
self.crypto_alg = crypto_alg
self.local_crypto_key = local_crypto_key
self.remote_crypto_key = remote_crypto_key
self.integ_alg = integ_alg
self.local_integ_key = local_integ_key
self.remote_integ_key = remote_integ_key
if is_ip6:
self.local_ip = self.parent_if.local_ip6
self.remote_ip = self.parent_if.remote_ip6
else:
self.local_ip = self.parent_if.local_ip4
self.remote_ip = self.parent_if.remote_ip4
def add_vpp_config(self):
r = self.test.vapi.ipsec_tunnel_if_add_del(
self.local_ip, self.remote_ip,
self.remote_spi, self.local_spi,
self.crypto_alg, self.local_crypto_key, self.remote_crypto_key,
self.integ_alg, self.local_integ_key, self.remote_integ_key)
self.set_sw_if_index(r.sw_if_index)
self.generate_remote_hosts()
self.test.registry.register(self, self.test.logger)
def remove_vpp_config(self):
self.test.vapi.ipsec_tunnel_if_add_del(
self.local_ip, self.remote_ip,
self.remote_spi, self.local_spi,
self.crypto_alg, self.local_crypto_key, self.remote_crypto_key,
self.integ_alg, self.local_integ_key, self.remote_integ_key,
is_add=0)
def object_id(self):
return "ipsec-tun-if-%d" % self._sw_if_index
class VppIpsecGRETunInterface(VppTunnelInterface):
"""
VPP IPsec GRE Tunnel interface
this creates headers
IP / ESP / IP / GRE / payload
i.e. it's GRE over IPSEC, rather than IPSEC over GRE.
"""
def __init__(self, test, parent_if, sa_out, sa_in):
super(VppIpsecGRETunInterface, self).__init__(test, parent_if)
self.sa_in = sa_in
self.sa_out = sa_out
def add_vpp_config(self):
r = self.test.vapi.ipsec_gre_tunnel_add_del(
self.parent_if.local_ip4n,
self.parent_if.remote_ip4n,
self.sa_out,
self.sa_in)
self.set_sw_if_index(r.sw_if_index)
self.generate_remote_hosts()
self.test.registry.register(self, self.test.logger)
def remove_vpp_config(self):
self.test.vapi.ipsec_gre_tunnel_add_del(
self.parent_if.local_ip4n,
self.parent_if.remote_ip4n,
self.sa_out,
self.sa_in,
is_add=0)
def query_vpp_config(self):
ts = self.test.vapi.ipsec_gre_tunnel_dump(sw_if_index=0xffffffff)
for t in ts:
if t.tunnel.sw_if_index == self._sw_if_index:
return True
return False
def __str__(self):
return self.object_id()
def object_id(self):
return "ipsec-gre-tun-if-%d" % self._sw_if_index
|
py | 1a412561f384bee427e62f5c0ff46835671a7a9e | import cv2
import numpy as np
f='image287.jpg'
img=cv2.imread(f)
gray=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
gray=np.float32(gray)
dst=cv2.cornerHarris(gray,2,3,0.04)
dst=cv2.dilate(dst,None)
ret,dst=cv2.threshold(dst,0.01*dst.max(),255,0)
dst=np.uint8(dst)
ret,labels,stats,centroids=cv2.connectedComponentsWithStats(dst)
criteria=(cv2.TermCriteria_EPS+cv2.TERM_CRITERIA_MAX_ITER, 100, 0.001)
corners=cv2.cornerSubPix(gray,np.float32(centroids),(5,5),(-1,-1),criteria)
res=np.hstack((centroids,corners))
res=np.int0(res)
img[res[:,1],res[:,0]]=[0,0,255]
img[res[:,3],res[:,2]]=[0,255,0]
cv2.imshow('dst',img)
if cv2.waitKey(0) &0xff == 27:
cv2.destroyAllWindows()
|
py | 1a41264122caf32c5902840ece8aaf7e9535ed86 | from PyQt5.QtCore import QTimer, Qt
from PyQt5.QtWidgets import (QApplication, QVBoxLayout, QMainWindow, QTabWidget,
QPushButton, QWidget, QFileDialog)
from controller.config_controller import ConfigController
from widgets import (NewOrLoad, ExperimentConfigTab, CardSelectionsConfigTab,
InstructionsConfigTab)
ALIGN_RIGHT = 0x0002
class Application(object):
new_or_load = None
main_window = None
app = None
args = None
mode_select_widget = None
user_info_widget = None
experiment_window = None
controller = None
_startup_timer = None
def __init__(self, args):
self.args = args
self.controller = ConfigController()
def run(self):
self.app = QApplication(self.args)
self._startup_timer = QTimer().singleShot(0, self.on_started)
return self.app.exec_()
def on_started(self):
self.ask_new_or_load()
def ask_new_or_load(self):
self.new_or_load = NewOrLoad(self.controller)
self.new_or_load.accepted.connect(self.open_configurator)
def open_configurator(self):
cs_conf = CardSelectionsConfigTab(self.controller)
instr_conf = InstructionsConfigTab(self.controller)
exp_conf = ExperimentConfigTab(self.controller)
tabs = QTabWidget()
tabs.addTab(exp_conf, "Basic")
tabs.addTab(instr_conf, "Instructions")
tabs.addTab(cs_conf, "Cards")
save = QPushButton("Save and exit")
save.clicked.connect(self.on_save)
vbox = QVBoxLayout()
vbox.addWidget(tabs)
vbox.addWidget(save, alignment=Qt.AlignRight)
wrap = QWidget()
wrap.setLayout(vbox)
self.main_window = QMainWindow()
self.main_window.setCentralWidget(wrap)
self.main_window.setGeometry(300, 200, 500, 700)
self.main_window.show()
def on_save(self):
errors = self.controller.get_errors_list()
if errors:
pass
else:
fname = QFileDialog.getSaveFileName(
self.main_window,
'Save as',
filter='Experiment Configuration (*.conf)')
fname = fname[0]
if not fname:
return
if not fname.endswith('.conf'):
fname += '.conf'
if self.controller.save(fname):
self.app.quit()
|
py | 1a4126beeef4fdc2e54fcf464e3098300d1b741e | # Generated by Django 2.2.7 on 2019-12-12 10:29
import django.contrib.postgres.fields.jsonb
from django.db import migrations, models
import django.db.models.deletion
import terra_layer.schema
class Migration(migrations.Migration):
dependencies = [
("terra_layer", "0044_order"),
]
operations = [
migrations.AlterModelOptions(
name="layer",
options={"ordering": ("order", "name")},
),
migrations.AddField(
model_name="scene",
name="tree",
field=django.contrib.postgres.fields.jsonb.JSONField(
default=list,
validators=[
terra_layer.schema.JSONSchemaValidator(
limit_value={
"$id": "http://terralego.com/scene_layertree.json",
"$schema": "http://json-schema.org/draft-07/schema#",
"definitions": {},
"items": {
"$id": "#/items",
"dependencies": {"group": ["children", "label"]},
"properties": {
"children": {"$ref": "#"},
"expanded": {
"$id": "#/items/properties/expanded",
"default": False,
"examples": [True],
"title": "The expanded status in admin. Not used yet",
"type": "boolean",
},
"geolayer": {
"$id": "#/items/properties/geolayer",
"default": 0,
"examples": [96],
"title": "The geolayer id",
"type": "integer",
},
"group": {
"$id": "#/items/properties/group",
"default": False,
"examples": [True],
"title": "The group name. Present if it's a group.",
"type": "boolean",
},
"label": {
"$id": "#/items/properties/label",
"default": "",
"examples": ["My Group"],
"pattern": "^(.*)$",
"title": "The group name",
"type": "string",
},
},
"required": [],
"title": "Layer tree item",
"type": "object",
},
"title": "Scene layer tree schema",
"type": "array",
}
)
],
),
),
migrations.AlterField(
model_name="layer",
name="group",
field=models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="layers",
to="terra_layer.LayerGroup",
),
),
migrations.AlterUniqueTogether(
name="layergroup",
unique_together=set(),
),
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.