max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
python/ts/flint/utils.py | mattomatic/flint | 972 | 38501 | <filename>python/ts/flint/utils.py
#
# Copyright 2017 TWO SIGMA OPEN SOURCE, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
_UNIT_TO_JUNIT = {
"s": "SECONDS",
"ms": "MILLISECONDS",
"us": "MICROSECONDS",
"ns": "NANOSECONDS"
}
def jsc(sc):
"""Returns the underlying Scala SparkContext
:param sc: SparkContext
:return: :class:`py4j.java_gateway.JavaObject` (org.apache.spark.SparkContext)
"""
return sc._jsc.sc()
def jvm(sc):
"""Returns the Pyspark JVM handle
:param sc: SparkContext
:return: :class:`py4j.java_gateway.JavaView
` """
return sc._jvm
def scala_object(jpkg, obj):
return jpkg.__getattr__(obj + "$").__getattr__("MODULE$")
def scala_package_object(jpkg):
return scala_object(jpkg, "package")
def pyutils(sc):
"""Returns a handle to ``com.twosigma.flint.rdd.PythonUtils``
:param sc: SparkContext
:return: :class:`py4j.java_gateway.JavaPackage` (com.twosigma.flint.rdd.PythonUtils)
"""
return jvm(sc).com.twosigma.flint.rdd.PythonUtils
def copy_jobj(sc, obj):
"""Returns a Java object ``obj`` with an additional reference count
:param sc: Spark Context
:param obj: :class:`py4j.java_gateway.JavaObject`
:return: ``obj`` (:class:`py4j.java_gateway.JavaObject`) with an additional reference count
"""
return pyutils(sc).makeCopy(obj)
def to_list(lst):
"""Make sure the object is wrapped in a list
:return: a ``list`` object, either lst or lst in a list
"""
if isinstance(lst, str):
lst = [lst]
elif not isinstance(lst, list):
try:
lst = list(lst)
except TypeError:
lst = [lst]
return lst
def list_to_seq(sc, lst, preserve_none=False):
"""Shorthand for accessing PythonUtils Java Package
If lst is a Python None, returns a None or empty Scala Seq (depending on preserve_none)
If lst is a Python object, such as str, returns a Scala Seq containing the object
If lst is a Python tuple/list, returns a Scala Seq containing the objects in the tuple/list
:return: A copy of ``lst`` as a ``scala.collection.Seq``
"""
if lst is None:
if preserve_none:
return None
else:
lst = []
return jvm(sc).org.apache.spark.api.python.PythonUtils.toSeq(to_list(lst))
def py_col_to_scala_col(sc, py_col):
converters = {
list: list_to_seq,
tuple: list_to_seq
}
convert = converters.get(type(py_col))
if convert:
return convert(sc, py_col)
else:
return py_col
def junit(sc, unit):
"""Converts a Pandas unit to scala.concurrent.duration object
:return: Scala equivalent of ``unit`` as ``scala.concurrent.duration object``
"""
if unit not in _UNIT_TO_JUNIT:
raise ValueError("unit must be in {}".format(_UNIT_TO_JUNIT.keys()))
return scala_package_object(jvm(sc).scala.concurrent.duration).__getattr__(_UNIT_TO_JUNIT[unit])()
def jschema(sc, schema):
"""Converts a Python schema (StructType) to a Scala schema ``org.apache.spark.sql.types.StructType``
:return: :class:``org.apache.spark.sql.types.StructType``
"""
import json
return jvm(sc).org.apache.spark.sql.types.StructType.fromString(json.dumps(schema.jsonValue))
|
sympy/diffgeom/tests/test_class_structure.py | shilpiprd/sympy | 8,323 | 38550 | <reponame>shilpiprd/sympy
from sympy.diffgeom import Manifold, Patch, CoordSystem, Point
from sympy import symbols, Function
from sympy.testing.pytest import warns_deprecated_sympy
m = Manifold('m', 2)
p = Patch('p', m)
a, b = symbols('a b')
cs = CoordSystem('cs', p, [a, b])
x, y = symbols('x y')
f = Function('f')
s1, s2 = cs.coord_functions()
v1, v2 = cs.base_vectors()
f1, f2 = cs.base_oneforms()
def test_point():
point = Point(cs, [x, y])
assert point != Point(cs, [2, y])
#TODO assert point.subs(x, 2) == Point(cs, [2, y])
#TODO assert point.free_symbols == set([x, y])
def test_subs():
assert s1.subs(s1, s2) == s2
assert v1.subs(v1, v2) == v2
assert f1.subs(f1, f2) == f2
assert (x*f(s1) + y).subs(s1, s2) == x*f(s2) + y
assert (f(s1)*v1).subs(v1, v2) == f(s1)*v2
assert (y*f(s1)*f1).subs(f1, f2) == y*f(s1)*f2
def test_deprecated():
with warns_deprecated_sympy():
cs_wname = CoordSystem('cs', p, ['a', 'b'])
assert cs_wname == cs_wname.func(*cs_wname.args)
|
datrie/run_test.py | nikicc/anaconda-recipes | 130 | 38556 | <reponame>nikicc/anaconda-recipes
import string
import datrie
trie = datrie.Trie(string.ascii_lowercase)
trie[u'foo'] = 5
assert u'foo' in trie
|
tests/vision/metrics/vqa_test.py | shunk031/allennlp-models | 402 | 38559 | <reponame>shunk031/allennlp-models<filename>tests/vision/metrics/vqa_test.py
from typing import Any, Dict, List, Tuple, Union
import pytest
import torch
from allennlp.common.testing import (
AllenNlpTestCase,
multi_device,
global_distributed_metric,
run_distributed_test,
)
from allennlp_models.vision import VqaMeasure
class VqaMeasureTest(AllenNlpTestCase):
@multi_device
def test_vqa(self, device: str):
vqa = VqaMeasure()
logits = torch.tensor(
[[0.35, 0.25, 0.1, 0.1, 0.2], [0.1, 0.6, 0.1, 0.2, 0.0]], device=device
)
labels = torch.tensor([[0], [3]], device=device)
label_weights = torch.tensor([[1 / 3], [2 / 3]], device=device)
vqa(logits, labels, label_weights)
vqa_score = vqa.get_metric()["score"]
assert vqa_score == pytest.approx((1 / 3) / 2)
@multi_device
def test_vqa_accumulates_and_resets_correctly(self, device: str):
vqa = VqaMeasure()
logits = torch.tensor(
[[0.35, 0.25, 0.1, 0.1, 0.2], [0.1, 0.6, 0.1, 0.2, 0.0]], device=device
)
labels = torch.tensor([[0], [3]], device=device)
labels2 = torch.tensor([[4], [4]], device=device)
label_weights = torch.tensor([[1 / 3], [2 / 3]], device=device)
vqa(logits, labels, label_weights)
vqa(logits, labels, label_weights)
vqa(logits, labels2, label_weights)
vqa(logits, labels2, label_weights)
vqa_score = vqa.get_metric(reset=True)["score"]
assert vqa_score == pytest.approx((1 / 3 + 1 / 3 + 0 + 0) / 8)
vqa(logits, labels, label_weights)
vqa_score = vqa.get_metric(reset=True)["score"]
assert vqa_score == pytest.approx((1 / 3) / 2)
@multi_device
def test_does_not_divide_by_zero_with_no_count(self, device: str):
vqa = VqaMeasure()
assert vqa.get_metric()["score"] == pytest.approx(0.0)
def test_distributed_accuracy(self):
logits = [
torch.tensor([[0.35, 0.25, 0.1, 0.1, 0.2]]),
torch.tensor([[0.1, 0.6, 0.1, 0.2, 0.0]]),
]
labels = [torch.tensor([[0]]), torch.tensor([[3]])]
label_weights = [torch.tensor([[1 / 3]]), torch.tensor([[2 / 3]])]
metric_kwargs = {"logits": logits, "labels": labels, "label_weights": label_weights}
desired_accuracy = {"score": (1 / 3) / 2}
run_distributed_test(
[-1, -1],
global_distributed_metric,
VqaMeasure(),
metric_kwargs,
desired_accuracy,
exact=False,
)
def test_distributed_accuracy_unequal_batches(self):
logits = [
torch.tensor([[0.35, 0.25, 0.1, 0.1, 0.2], [0.35, 0.25, 0.1, 0.1, 0.2]]),
torch.tensor([[0.1, 0.6, 0.1, 0.2, 0.0]]),
]
labels = [torch.tensor([[0], [0]]), torch.tensor([[3]])]
label_weights = [torch.tensor([[1], [1]]), torch.tensor([[1 / 3]])]
metric_kwargs = {"logits": logits, "labels": labels, "label_weights": label_weights}
desired_accuracy = {"score": (1 + 1 + 0) / 3}
run_distributed_test(
[-1, -1],
global_distributed_metric,
VqaMeasure(),
metric_kwargs,
desired_accuracy,
exact=False,
)
def test_multiple_distributed_runs(self):
logits = [
torch.tensor([[0.35, 0.25, 0.1, 0.1, 0.2]]),
torch.tensor([[0.1, 0.6, 0.1, 0.2, 0.0]]),
]
labels = [torch.tensor([[0]]), torch.tensor([[3]])]
label_weights = [torch.tensor([[1 / 3]]), torch.tensor([[2 / 3]])]
metric_kwargs = {"logits": logits, "labels": labels, "label_weights": label_weights}
desired_accuracy = {"score": (1 / 3) / 2}
run_distributed_test(
[-1, -1],
global_distributed_metric,
VqaMeasure(),
metric_kwargs,
desired_accuracy,
exact=True,
number_of_runs=200,
)
|
tests/epyccel/test_epyccel_transpose.py | dina-fouad/pyccel | 206 | 38562 | <reponame>dina-fouad/pyccel<filename>tests/epyccel/test_epyccel_transpose.py
# pylint: disable=missing-function-docstring, missing-module-docstring/
from numpy.random import randint
from pyccel.epyccel import epyccel
def test_transpose_shape(language):
def f1(x : 'int[:,:]'):
from numpy import transpose
y = transpose(x)
n, m = y.shape
return n, m, y[-1,0], y[0,-1]
def f2(x : 'int[:,:,:]'):
from numpy import transpose
y = transpose(x)
n, m, p = y.shape
return n, m, p, y[0,-1,0], y[0,0,-1], y[-1,-1,0]
x1 = randint(50, size=(2,5))
x2 = randint(50, size=(2,3,7))
f1_epyc = epyccel(f1, language=language)
assert f1( x1 ) == f1_epyc( x1 )
f2_epyc = epyccel(f2, language=language)
assert f2( x2 ) == f2_epyc( x2 )
def test_transpose_property(language):
def f1(x : 'int[:,:]'):
y = x.T
n, m = y.shape
return n, m, y[-1,0], y[0,-1]
def f2(x : 'int[:,:,:]'):
y = x.T
n, m, p = y.shape
return n, m, p, y[0,-1,0], y[0,0,-1], y[-1,-1,0]
x1 = randint(50, size=(2,5))
x2 = randint(50, size=(2,3,7))
f1_epyc = epyccel(f1, language=language)
assert f1( x1 ) == f1_epyc( x1 )
f2_epyc = epyccel(f2, language=language)
assert f2( x2 ) == f2_epyc( x2 )
def test_transpose_in_expression(language):
def f1(x : 'int[:,:]'):
from numpy import transpose
y = transpose(x)+3
n, m = y.shape
return n, m, y[-1,0], y[0,-1]
def f2(x : 'int[:,:,:]'):
y = x.T*3
n, m, p = y.shape
return n, m, p, y[0,-1,0], y[0,0,-1], y[-1,-1,0]
x1 = randint(50, size=(2,5))
x2 = randint(50, size=(2,3,7))
f1_epyc = epyccel(f1, language=language)
assert f1( x1 ) == f1_epyc( x1 )
f2_epyc = epyccel(f2, language=language)
assert f2( x2 ) == f2_epyc( x2 )
def test_mixed_order(language):
def f1(x : 'int[:,:]'):
from numpy import transpose, ones
n, m = x.shape
y = ones((m,n), order='F')
z = x+transpose(y)
n, m = z.shape
return n, m, z[-1,0], z[0,-1]
def f2(x : 'int[:,:]'):
from numpy import transpose, ones
n, m = x.shape
y = ones((m,n), order='F')
z = x.transpose()+y
n, m = z.shape
return n, m, z[-1,0], z[0,-1]
def f3(x : 'int[:,:,:]'):
from numpy import transpose, ones
n, m, p = x.shape
y = ones((p,m,n))
z = transpose(x)+y
n, m, p = z.shape
return n, m, p, z[0,-1,0], z[0,0,-1], z[-1,-1,0]
x1 = randint(50, size=(2,5))
x2 = randint(50, size=(2,3,7))
f1_epyc = epyccel(f1, language=language)
assert f1( x1 ) == f1_epyc( x1 )
f2_epyc = epyccel(f2, language=language)
assert f2( x1 ) == f2_epyc( x1 )
f3_epyc = epyccel(f3, language=language)
assert f3( x2 ) == f3_epyc( x2 )
def test_transpose_pointer(language):
def f1(x : 'int[:,:]'):
from numpy import transpose
y = transpose(x)
x[0,-1] += 22
n, m = y.shape
return n, m, y[-1,0], y[0,-1]
def f2(x : 'int[:,:,:]'):
y = x.T
x[0,-1,0] += 11
n, m, p = y.shape
return n, m, p, y[0,-1,0], y[0,0,-1], y[-1,-1,0]
x1 = randint(50, size=(2,5))
x1_copy = x1.copy()
x2 = randint(50, size=(2,3,7))
x2_copy = x2.copy()
f1_epyc = epyccel(f1, language=language)
assert f1( x1 ) == f1_epyc( x1_copy )
f2_epyc = epyccel(f2, language=language)
assert f2( x2 ) == f2_epyc( x2_copy )
def test_transpose_of_expression(language):
def f1(x : 'int[:,:]'):
from numpy import transpose
y = transpose(x*2)+3
n, m = y.shape
return n, m, y[-1,0], y[0,-1]
def f2(x : 'int[:,:,:]'):
y = (x*2).T*3
n, m, p = y.shape
return n, m, p, y[0,-1,0], y[0,0,-1], y[-1,-1,0]
x1 = randint(50, size=(2,5))
x2 = randint(50, size=(2,3,7))
f1_epyc = epyccel(f1, language=language)
assert f1( x1 ) == f1_epyc( x1 )
f2_epyc = epyccel(f2, language=language)
assert f2( x2 ) == f2_epyc( x2 )
def test_force_transpose(language):
def f1(x : 'int[:,:]'):
from numpy import transpose, empty
n,m = x.shape
y = empty((m,n))
y[:,:] = transpose(x)
n, m = y.shape
return n, m, y[-1,0], y[0,-1]
def f2(x : 'int[:,:,:]'):
from numpy import empty
n,m,p = x.shape
y = empty((p,m,n))
y[:,:,:] = x.transpose()
n, m, p = y.shape
return n, m, p, y[0,-1,0], y[0,0,-1], y[-1,-1,0]
x1 = randint(50, size=(2,5))
x2 = randint(50, size=(2,3,7))
f1_epyc = epyccel(f1, language=language)
assert f1( x1 ) == f1_epyc( x1 )
f2_epyc = epyccel(f2, language=language)
assert f2( x2 ) == f2_epyc( x2 )
|
examples/example.py | jakevdp/Mmani | 303 | 38586 | <gh_stars>100-1000
import sys
import numpy as np
import scipy as sp
import scipy.sparse as sparse
from megaman.geometry import Geometry
from sklearn import datasets
from megaman.embedding import (Isomap, LocallyLinearEmbedding,
LTSA, SpectralEmbedding)
# Generate an example data set
N = 10
X, color = datasets.samples_generator.make_s_curve(N, random_state=0)
# Geometry is the main class that will Cache things like distance, affinity, and laplacian.
# you instantiate the Geometry class with the parameters & methods for the three main components:
# Adjacency: an NxN (sparse) pairwise matrix indicating neighborhood regions
# Affinity an NxN (sparse) pairwise matrix insicated similarity between points
# Laplacian an NxN (sparse) pairwsie matrix containing geometric manifold information
radius = 5
adjacency_method = 'cyflann'
adjacency_kwds = {'radius':radius} # ignore distances above this radius
affinity_method = 'gaussian'
affinity_kwds = {'radius':radius} # A = exp(-||x - y||/radius^2)
laplacian_method = 'geometric'
laplacian_kwds = {'scaling_epps':radius} # scaling ensures convergence to Laplace-Beltrami operator
geom = Geometry(adjacency_method=adjacency_method, adjacency_kwds=adjacency_kwds,
affinity_method=affinity_method, affinity_kwds=affinity_kwds,
laplacian_method=laplacian_method, laplacian_kwds=laplacian_kwds)
# You can/should also use the set_data_matrix, set_adjacency_matrix, set_affinity_matrix
# to send your data set (in whichever form it takes) this way.
geom.set_data_matrix(X)
# You can get the distance, affinity etc with e.g: Geometry.get_distance_matrix()
# you can update the keyword arguments passed inially using these functions
adjacency_matrix = geom.compute_adjacency_matrix()
# by defualt this is pass-by-reference. Use copy=True to get a copied version.
# If you don't want to pre-compute a Geometry you can pass a dictionary or geometry
# arguments to one of the embedding classes.
geom = {'adjacency_method':adjacency_method, 'adjacency_kwds':adjacency_kwds,
'affinity_method':affinity_method, 'affinity_kwds':affinity_kwds,
'laplacian_method':laplacian_method, 'laplacian_kwds':laplacian_kwds}
# an example follows for creating each embedding into 2 dimensions.
n_components = 2
# LTSA
ltsa =LTSA(n_components=n_components, eigen_solver='arpack',
geom=geom)
embed_ltsa = ltsa.fit_transform(X)
# LLE
lle = LocallyLinearEmbedding(n_components=n_components, eigen_solver='arpack',
geom=geom)
embed_lle = lle.fit_transform(X)
# Isomap
isomap = Isomap(n_components=n_components, eigen_solver='arpack',
geom=geom)
embed_isomap = isomap.fit_transform(X)
# Spectral Embedding
spectral = SpectralEmbedding(n_components=n_components, eigen_solver='arpack',
geom=geom)
embed_spectral = spectral.fit_transform(X) |
chrome/test/pyautolib/generate_docs.py | nagineni/chromium-crosswalk | 231 | 38593 | #!/usr/bin/env python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import optparse
import os
import pydoc
import shutil
import sys
def main():
parser = optparse.OptionParser()
parser.add_option('-w', '--write', dest='dir', metavar='FILE',
default=os.path.join(os.getcwd(), 'pyauto_docs'),
help=('Directory path to write all of the documentation. '
'Defaults to "pyauto_docs" in current directory.'))
parser.add_option('-p', '--pyautolib', dest='pyautolib', metavar='FILE',
default=os.getcwd(),
help='Location of pyautolib directory')
(options, args) = parser.parse_args()
if not os.path.isdir(options.dir):
os.makedirs(options.dir)
# Add these paths so pydoc can find everything
sys.path.append(os.path.join(options.pyautolib,
'../../../third_party/'))
sys.path.append(options.pyautolib)
# Get a snapshot of the current directory where pydoc will export the files
previous_contents = set(os.listdir(os.getcwd()))
pydoc.writedocs(options.pyautolib)
current_contents = set(os.listdir(os.getcwd()))
if options.dir == os.getcwd():
print 'Export complete, files are located in %s' % options.dir
return 1
new_files = current_contents.difference(previous_contents)
for file_name in new_files:
basename, extension = os.path.splitext(file_name)
if extension == '.html':
# Build the complete path
full_path = os.path.join(os.getcwd(), file_name)
existing_file_path = os.path.join(options.dir, file_name)
if os.path.isfile(existing_file_path):
os.remove(existing_file_path)
shutil.move(full_path, options.dir)
print 'Export complete, files are located in %s' % options.dir
return 0
if __name__ == '__main__':
sys.exit(main())
|
d2/detr/__init__.py | reubenwenisch/detr_custom | 8,849 | 38617 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from .config import add_detr_config
from .detr import Detr
from .dataset_mapper import DetrDatasetMapper
|
code_legacy/PostfixLogSummary.py | rhymeswithmogul/starttls-everywhere | 339 | 38650 | <filename>code_legacy/PostfixLogSummary.py
#!/usr/bin/env python
import argparse
import collections
import os
import re
import sys
import time
import Config
TIME_FORMAT = "%b %d %H:%M:%S"
# TODO: There's more to be learned from postfix logs! Here's one sample
# observed during failures from the sender vagrant vm:
# Jun 6 00:21:31 precise32 postfix/smtpd[3648]: connect from localhost[127.0.0.1]
# Jun 6 00:21:34 precise32 postfix/smtpd[3648]: lost connection after STARTTLS from localhost[127.0.0.1]
# Jun 6 00:21:34 precise32 postfix/smtpd[3648]: disconnect from localhost[127.0.0.1]
# Jun 6 00:21:56 precise32 postfix/master[3001]: reload -- version 2.9.6, configuration /etc/postfix
# Jun 6 00:22:01 precise32 postfix/pickup[3674]: AF3B6480475: uid=0 from=<root>
# Jun 6 00:22:01 precise32 postfix/cleanup[3680]: AF3B6480475: message-id=<<EMAIL>>
# Jun 6 00:22:01 precise32 postfix/qmgr[3673]: AF3B6480475: from=<<EMAIL>>, size=576, nrcpt=1 (queue active)
# Jun 6 00:22:01 precise32 postfix/smtp[3682]: SSL_connect error to valid-example-recipient.com[192.168.33.7]:25: -1
# Jun 6 00:22:01 precise32 postfix/smtp[3682]: warning: TLS library problem: 3682:error:140740BF:SSL routines:SSL23_CLIENT_HELLO:no protocols available:s23_clnt.c:381:
# Jun 6 00:22:01 precise32 postfix/smtp[3682]: AF3B6480475: to=<<EMAIL>>, relay=valid-example-recipient.com[192.168.33.7]:25, delay=0.06, delays=0.03/0.03/0/0, dsn=4.7.5, status=deferred (Cannot start TLS: handshake failure)
#
# Also:
# Oct 10 19:12:13 sender postfix/smtp[1711]: 62D3F481249: to=<<EMAIL>>, relay=valid-example-recipient.com[192.168.33.7]:25, delay=0.07, delays=0.03/0.01/0.03/0, dsn=4.7.4, status=deferred (TLS is required, but was not offered by host valid-example-recipient.com[192.168.33.7])
def get_counts(input, config, earliest_timestamp):
seen_trusted = False
counts = collections.defaultdict(lambda: collections.defaultdict(int))
tls_deferred = collections.defaultdict(int)
# Typical line looks like:
# Jun 12 06:24:14 sender postfix/smtp[9045]: Untrusted TLS connection established to valid-example-recipient.com[192.168.33.7]:25: TLSv1.1 with cipher AECDH-AES256-SHA (256/256 bits)
# indicate a problem that should be alerted on.
# ([^[]*) <--- any group of characters that is not "["
# Log lines for when a message is deferred for a TLS-related reason. These
deferred_re = re.compile("relay=([^[ ]*).* status=deferred.*TLS")
# Log lines for when a TLS connection was successfully established. These can
# indicate the difference between Untrusted, Trusted, and Verified certs.
connected_re = re.compile("([A-Za-z]+) TLS connection established to ([^[]*)")
mx_to_domain_mapping = config.get_mx_to_domain_policy_map()
timestamp = 0
for line in sys.stdin:
timestamp = time.strptime(line[0:15], TIME_FORMAT)
if timestamp < earliest_timestamp:
continue
deferred = deferred_re.search(line)
connected = connected_re.search(line)
if connected:
validation = connected.group(1)
mx_hostname = connected.group(2).lower()
if validation == "Trusted" or validation == "Verified":
seen_trusted = True
address_domains = config.get_address_domains(mx_hostname, mx_to_domain_mapping)
if address_domains:
domains_str = [ a.domain for a in address_domains ]
d = ', '.join(domains_str)
counts[d][validation] += 1
counts[d]["all"] += 1
elif deferred:
mx_hostname = deferred.group(1).lower()
tls_deferred[mx_hostname] += 1
return (counts, tls_deferred, seen_trusted, timestamp)
def print_summary(counts):
for mx_hostname, validations in counts.items():
for validation, validation_count in validations.items():
if validation == "all":
continue
print mx_hostname, validation, validation_count / validations["all"], "of", validations["all"]
if __name__ == "__main__":
arg_parser = argparse.ArgumentParser(description='Detect delivery problems'
' in Postfix log files that may be caused by security policies')
arg_parser.add_argument('-c', action="store_true", dest="cron", default=False)
arg_parser.add_argument("policy_file", nargs='?',
default=os.path.join("examples", "starttls-everywhere.json"),
help="STARTTLS Everywhere policy file")
args = arg_parser.parse_args()
config = Config.Config()
config.load_from_json_file(args.policy_file)
last_timestamp_processed = 0
timestamp_file = '/tmp/starttls-everywhere-last-timestamp-processed.txt'
if os.path.isfile(timestamp_file):
last_timestamp_processed = time.strptime(open(timestamp_file).read(), TIME_FORMAT)
(counts, tls_deferred, seen_trusted, latest_timestamp) = get_counts(sys.stdin, config, last_timestamp_processed)
with open(timestamp_file, "w") as f:
f.write(time.strftime(TIME_FORMAT, latest_timestamp))
# If not running in cron, print an overall summary of log lines seen from known hosts.
if not args.cron:
print_summary(counts)
if not seen_trusted:
print 'No Trusted connections seen! Probably need to install a CAfile.'
if len(tls_deferred) > 0:
print "Some mail was deferred due to TLS problems:"
for (k, v) in tls_deferred.iteritems():
print "%s: %s" % (k, v)
|
experiments/plot.py | henrytseng/srcnn | 125 | 38662 | from pathlib import Path
import matplotlib.pyplot as plt
import pandas as pd
results_dir = Path('results')
results_dir.mkdir(exist_ok=True)
# Performance plot
for scale in [3, 4]:
for test_set in ['Set5', 'Set14']:
time = []
psnr = []
model = []
for save_dir in sorted(Path('.').glob(f'*-sc{scale}')):
if 'bicubic' not in save_dir.stem:
model += [save_dir.stem.rsplit('-', 1)[0].upper()]
metrics_file = save_dir / f'test/{test_set}/metrics.csv'
metrics = pd.read_csv(str(metrics_file), index_col='name')
time += [metrics.time.average]
psnr += [metrics.psnr.average]
plt.figure()
plt.semilogx(time, psnr, '.')
plt.grid(True, which='both')
for x, y, s in zip(time, psnr, model):
if 'NS' in s:
s = s.split('-')[1]
plt.text(x, y, s)
plt.xlabel('Run time (sec)')
plt.ylabel('PSNR (dB)')
plt.title(f'Scale {scale} on {test_set}')
plt.savefig(str(results_dir / f'performance-sc{scale}-{test_set}.png'))
plt.close()
# History plot
for scale in [3, 4]:
plt.figure()
for save_dir in sorted(Path('.').glob(f'*-sc{scale}')):
if 'bicubic' not in save_dir.stem:
model = save_dir.stem.rsplit('-', 1)[0].upper()
history_file = save_dir / f'train/history.csv'
history = pd.read_csv(str(history_file))
plt.plot(history.epoch, history.val_psnr, label=model, alpha=0.8)
plt.legend()
plt.xlabel('Epochs')
plt.ylabel('Average test PSNR (dB)')
plt.savefig(str(results_dir / f'history-sc{scale}.png'))
plt.xlim(0, 500)
if scale == 3:
plt.ylim(31.5, 34.5)
if scale == 4:
plt.ylim(29, 32)
plt.savefig(str(results_dir / f'history-sc{scale}-zoom.png'))
plt.close()
|
dni/mlp.py | DingKe/pytorch_workplace | 184 | 38677 | <gh_stars>100-1000
import torch
import torch.nn as nn
import torchvision.datasets as dsets
import torchvision.transforms as transforms
from torch.autograd import Variable
# Hyper Parameters
input_size = 784
hidden_size = 256
dni_size = 1024
num_classes = 10
num_epochs = 50
batch_size = 500
learning_rate = 1e-3
use_cuda = torch.cuda.is_available()
# MNIST Dataset
train_dataset = dsets.MNIST(root='../data',
train=True,
transform=transforms.ToTensor(),
download=True)
test_dataset = dsets.MNIST(root='../data',
train=False,
transform=transforms.ToTensor())
# Data Loader (Input Pipeline)
train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
batch_size=batch_size,
shuffle=True)
test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
batch_size=batch_size,
shuffle=False)
class DNI(nn.Module):
def __init__(self, input_size, hidden_size):
super(DNI, self).__init__()
self.fc1 = nn.Linear(input_size, hidden_size)
self.bn1 = nn.BatchNorm1d(hidden_size)
self.act1 = nn.ReLU()
self.fc2 = nn.Linear(hidden_size, input_size)
def forward(self, x):
out = self.fc1(x)
out = self.bn1(out)
out = self.act1(out)
out = self.fc2(out)
return out
def reset_parameters(self):
super(DNI, self).reset_parameters()
for param in self.fc2.parameters():
param.data.zero_()
dni = DNI(hidden_size, dni_size)
class Net1(nn.Module):
def __init__(self, input_size, hidden_size):
super(Net1, self).__init__()
self.mlp = nn.Sequential(nn.Linear(input_size, hidden_size),
nn.BatchNorm1d(hidden_size),
nn.ReLU())
def forward(self, x):
return self.mlp.forward(x)
net1 = Net1(input_size, hidden_size)
class Net2(nn.Module):
def __init__(self, input_size, hidden_size, num_classes):
super(Net2, self).__init__()
self.mlp = nn.Sequential()
self.mlp.add_module('fc1', nn.Linear(input_size, hidden_size))
self.mlp.add_module('bn1', nn.BatchNorm1d(hidden_size))
self.mlp.add_module('act1', nn.ReLU())
self.mlp.add_module('fc', nn.Linear(hidden_size, num_classes))
def forward(self, x):
return self.mlp.forward(x)
net2 = Net2(hidden_size, hidden_size, num_classes)
# Loss
xent = nn.CrossEntropyLoss()
mse = nn.MSELoss()
# Optimizers
opt_net1 = torch.optim.Adam(net1.parameters(), lr=learning_rate)
opt_net2 = torch.optim.Adam(net2.parameters(), lr=learning_rate)
opt_dni = torch.optim.Adam(dni.parameters(), lr=learning_rate)
if use_cuda:
net1.cuda()
net2.cuda()
dni.cuda()
# Train the Model
for epoch in range(num_epochs):
for i, (images, labels) in enumerate(train_loader):
# Convert torch tensor to Variable
if use_cuda:
images = images.cuda()
labels = labels.cuda()
images = Variable(images.view(-1, 28 * 28))
labels = Variable(labels)
# Forward + Backward + Optimize
opt_net1.zero_grad() # zero the gradient buffer
opt_net2.zero_grad() # zero the gradient buffer
opt_dni.zero_grad() # zero the gradient buffer
# Forward, Stage1
h = net1(images)
h1 = Variable(h.data, requires_grad=True)
h2 = Variable(h.data, requires_grad=False)
# Forward, Stage2
outputs = net2(h1)
# Backward
loss = xent(outputs, labels)
loss.backward()
# Synthetic gradient and backward
grad = dni(h2)
h.backward(grad)
# regress
regress_loss = mse(grad, Variable(h1.grad.data))
regress_loss.backward()
# optimize
opt_net1.step()
opt_net2.step()
opt_dni.step()
if (i + 1) % 100 == 0:
print ('Epoch [%d/%d], Step [%d/%d], Loss: %.4f'
% (epoch + 1, num_epochs, i + 1, len(train_dataset) // batch_size, loss.data[0]))
# Test the Model
correct = 0
total = 0
for images, labels in test_loader:
if use_cuda:
images = images.cuda()
labels = labels.cuda()
images = Variable(images.view(-1, 28 * 28))
outputs = net2(net1(images))
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum()
print('Accuracy of the network on the 10000 test images: %d %%' %
(100 * correct / total))
|
train-xception.py | jGsch/kaggle-dfdc | 124 | 38679 | <filename>train-xception.py<gh_stars>100-1000
import os
import csv
import shutil
import random
from PIL import Image
import numpy as np
import torch
from torch import nn, optim
from torch.utils.data import Dataset, DataLoader
import xception_conf as config
from model_def import xception
from augmentation_utils import train_transform, val_transform
def save_checkpoint(path, state_dict, epoch=0, arch="", acc1=0):
new_state_dict = {}
for k, v in state_dict.items():
if k.startswith("module."):
k = k[7:]
if torch.is_tensor(v):
v = v.cpu()
new_state_dict[k] = v
torch.save({
"epoch": epoch,
"arch": arch,
"acc1": acc1,
"state_dict": new_state_dict,
}, path)
class DFDCDataset(Dataset):
def __init__(self, data_csv, required_set, data_root="",
ratio=(0.25, 0.05), stable=False, transform=None):
video_info = []
data_list = []
with open(data_csv) as fin:
reader = csv.DictReader(fin)
for row in reader:
if row["set_name"] == required_set:
label = int(row["is_fake"])
n_frame = int(row["n_frame"])
select_frame = round(n_frame * ratio[label])
for sample_idx in range(select_frame):
data_list.append((len(video_info), sample_idx))
video_info.append({
"name": row["name"],
"label": label,
"n_frame": n_frame,
"select_frame": select_frame,
})
self.stable = stable
self.data_root = data_root
self.video_info = video_info
self.data_list = data_list
self.transform = transform
def __getitem__(self, index):
video_idx, sample_idx = self.data_list[index]
info = self.video_info[video_idx]
if self.stable:
frame_idx = info["n_frame"] * sample_idx // info["select_frame"]
else:
frame_idx = random.randint(0, info["n_frame"] - 1)
image_path = os.path.join(self.data_root, info["name"],
"%03d.png" % frame_idx)
try:
img = Image.open(image_path).convert("RGB")
except OSError:
img = np.random.randint(0, 255, (320, 320, 3), dtype=np.uint8)
if self.transform is not None:
# img = self.transform(img)
result = self.transform(image=np.array(img))
img = result["image"]
return img, info["label"]
def __len__(self):
return len(self.data_list)
def main():
torch.backends.cudnn.benchmark = True
train_dataset = DFDCDataset(config.data_list, "train", config.data_root,
transform=train_transform)
val_dataset = DFDCDataset(config.data_list, "val", config.data_root,
transform=val_transform, stable=True)
kwargs = dict(batch_size=config.batch_size, num_workers=config.num_workers,
shuffle=True, pin_memory=True)
train_loader = DataLoader(train_dataset, **kwargs)
val_loader = DataLoader(val_dataset, **kwargs)
# Model initialization
model = xception(num_classes=2, pretrained=None)
if hasattr(config, "resume") and os.path.isfile(config.resume):
ckpt = torch.load(config.resume, map_location="cpu")
start_epoch = ckpt.get("epoch", 0)
best_acc = ckpt.get("acc1", 0.0)
model.load_state_dict(ckpt["state_dict"])
else:
start_epoch = 0
best_acc = 0.0
model = model.cuda()
model = nn.DataParallel(model)
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(),
0.01, momentum=0.9, weight_decay=1e-4)
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=2, gamma=0.2)
os.makedirs(config.save_dir, exist_ok=True)
for epoch in range(config.n_epoches):
if epoch < start_epoch:
scheduler.step()
continue
print("Epoch {}".format(epoch + 1))
model.train()
loss_record = []
acc_record = []
for count, (inputs, labels) in enumerate(train_loader):
inputs = inputs.cuda(non_blocking=True)
labels = labels.cuda(non_blocking=True)
outputs = model(inputs)
loss = criterion(outputs, labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
iter_loss = loss.item()
loss_record.append(iter_loss)
preds = torch.argmax(outputs.data, 1)
iter_acc = torch.sum(preds == labels).item() / len(preds)
acc_record.append(iter_acc)
if count and count % 100 == 0:
print("T-Iter %d: loss=%.4f, acc=%.4f"
% (count, iter_loss, iter_acc))
epoch_loss = np.mean(loss_record)
epoch_acc = np.mean(acc_record)
print("Training: loss=%.4f, acc=%.4f" % (epoch_loss, epoch_acc))
model.eval()
loss_record = []
acc_record = []
with torch.no_grad():
for count, (inputs, labels) in enumerate(val_loader):
inputs = inputs.cuda(non_blocking=True)
labels = labels.cuda(non_blocking=True)
outputs = model(inputs)
preds = torch.argmax(outputs, 1)
loss = criterion(outputs, labels)
iter_loss = loss.item()
loss_record.append(iter_loss)
preds = torch.argmax(outputs.data, 1)
iter_acc = torch.sum(preds == labels).item() / len(preds)
acc_record.append(iter_acc)
if count and count % 100 == 0:
print("V-Iter %d: loss=%.4f, acc=%.4f"
% (count, iter_loss, iter_acc))
epoch_loss = np.mean(loss_record)
epoch_acc = np.mean(acc_record)
print("Validation: loss=%.4f, acc=%.4f" % (epoch_loss, epoch_acc))
scheduler.step()
ckpt_path = os.path.join(config.save_dir, "ckpt-%d.pth" % epoch)
save_checkpoint(
ckpt_path,
model.state_dict(),
epoch=epoch + 1,
acc1=epoch_acc)
if epoch_acc > best_acc:
print("Best accuracy!")
shutil.copy(ckpt_path,
os.path.join(config.save_dir, "best.pth"))
best_acc = epoch_acc
print()
if __name__ == "__main__":
main()
|
corehq/apps/hqadmin/management/commands/static_analysis.py | andyasne/commcare-hq | 471 | 38684 | import os
import re
import subprocess
from collections import Counter
from django.conf import settings
from django.core.management.base import BaseCommand
import datadog
from dimagi.ext.couchdbkit import Document
from corehq.feature_previews import all_previews
from corehq.toggles import all_toggles
class DatadogLogger:
def __init__(self, stdout):
self.stdout = stdout
self.datadog = os.environ.get("TRAVIS_EVENT_TYPE") == 'cron'
if self.datadog:
api_key = os.environ.get("DATADOG_API_KEY")
app_key = os.environ.get("DATADOG_APP_KEY")
assert api_key and app_key, "DATADOG_API_KEY and DATADOG_APP_KEY must both be set"
datadog.initialize(api_key=api_key, app_key=app_key)
self.metrics = []
def log(self, metric, value, tags=None):
self.stdout.write(f"{metric}: {value} {tags or ''}")
if self.datadog:
self.metrics.append({
'metric': metric,
'points': value,
'type': "gauge",
'host': "travis-ci.org",
'tags': [
"environment:travis",
f"travis_build:{os.environ.get('TRAVIS_BUILD_ID')}",
f"travis_number:{os.environ.get('TRAVIS_BUILD_NUMBER')}",
f"travis_job_number:{os.environ.get('TRAVIS_JOB_NUMBER')}",
] + (tags or []),
})
def send_all(self):
if self.datadog:
datadog.api.Metric.send(self.metrics)
self.metrics = []
class Command(BaseCommand):
help = ("Display a variety of code-quality metrics. This is run on every travis "
"build, but only submitted to datadog during the daily cron job.")
def handle(self, **options):
self.stdout.write("----------> Begin Static Analysis <----------")
self.logger = DatadogLogger(self.stdout)
self.show_couch_model_count()
self.show_custom_modules()
self.show_js_dependencies()
self.show_toggles()
self.show_complexity()
self.logger.send_all()
self.stdout.write("----------> End Static Analysis <----------")
def show_couch_model_count(self):
def all_subclasses(cls):
return set(cls.__subclasses__()).union([
s for c in cls.__subclasses__() for s in all_subclasses(c)
])
model_count = len(all_subclasses(Document))
self.logger.log("commcare.static_analysis.couch_model_count", model_count)
def show_custom_modules(self):
custom_module_count = len(set(settings.DOMAIN_MODULE_MAP.values()))
custom_domain_count = len(settings.DOMAIN_MODULE_MAP)
self.logger.log("commcare.static_analysis.custom_module_count", custom_module_count)
self.logger.log("commcare.static_analysis.custom_domain_count", custom_domain_count)
def show_js_dependencies(self):
proc = subprocess.Popen(["./scripts/codechecks/hqDefine.sh", "static-analysis"], stdout=subprocess.PIPE)
output = proc.communicate()[0].strip().decode("utf-8")
(step1, step2, step3) = output.split(" ")
self.logger.log("commcare.static_analysis.hqdefine_file_count", int(step1), tags=[
'status:unmigrated',
])
self.logger.log("commcare.static_analysis.hqdefine_file_count", int(step2), tags=[
'status:hqdefine_only',
])
self.logger.log("commcare.static_analysis.requirejs_file_count", int(step3), tags=[
'status:migrated',
])
def show_toggles(self):
counts = Counter(t.tag.name for t in all_toggles() + all_previews())
for tag, count in counts.items():
self.logger.log("commcare.static_analysis.toggle_count", count, [f"toggle_tag:{tag}"])
def show_complexity(self):
# We can use `--json` for more granularity, but it doesn't provide a summary
output = subprocess.run([
"radon", "cc", ".",
"--min=C",
"--total-average",
"--exclude=node_modules/*,staticfiles/*",
], stdout=subprocess.PIPE).stdout.decode('utf-8').strip()
raw_blocks, raw_complexity = output.split('\n')[-2:]
blocks_pattern = r'^(\d+) blocks \(classes, functions, methods\) analyzed.$'
blocks = int(re.match(blocks_pattern, raw_blocks).group(1))
self.logger.log("commcare.static_analysis.code_blocks", blocks)
complexity_pattern = r'^Average complexity: A \(([\d.]+)\)$'
complexity = round(float(re.match(complexity_pattern, raw_complexity).group(1)), 3)
self.logger.log("commcare.static_analysis.avg_complexity", complexity)
for grade in ["C", "D", "E", "F"]:
count = len(re.findall(f" - {grade}\n", output))
self.logger.log(
"commcare.static_analysis.complex_block_count",
count,
tags=[f"complexity_grade:{grade}"],
)
|
scripts/artifacts/installedappsGass.py | Krypterry/ALEAPP | 187 | 38720 | import sqlite3
from scripts.artifact_report import ArtifactHtmlReport
from scripts.ilapfuncs import logfunc, tsv, open_sqlite_db_readonly
def get_installedappsGass(files_found, report_folder, seeker, wrap_text):
for file_found in files_found:
file_found = str(file_found)
if file_found.endswith('.db'):
db = open_sqlite_db_readonly(file_found)
cursor = db.cursor()
cursor.execute('''
SELECT
distinct(package_name)
FROM
app_info
''')
if 'user' in file_found:
usernum = file_found.split("/")
usernum = '_'+str(usernum[-4])
else:
usernum = ''
all_rows = cursor.fetchall()
usageentries = len(all_rows)
if usageentries > 0:
report = ArtifactHtmlReport('Installed Apps')
report.start_artifact_report(report_folder, f'Installed Apps (GMS){usernum}')
report.add_script()
data_headers = ('Bundle ID',) # Don't remove the comma, that is required to make this a tuple as there is only 1 element
data_list = []
for row in all_rows:
data_list.append((row[0],))
report.write_artifact_data_table(data_headers, data_list, file_found)
report.end_artifact_report()
tsvname = f'installed apps - GMS{usernum}'
tsv(report_folder, data_headers, data_list, tsvname)
else:
logfunc('No Installed Apps data available{usernum}')
db.close()
|
RecoVertex/BeamSpotProducer/scripts/copyFromCastor.py | ckamtsikis/cmssw | 852 | 38797 | #!/usr/bin/env python
import sys,os,commands
from CommonMethods import *
def main():
if len(sys.argv) < 3:
error = "Usage: cpFromCastor fromDir toDir (optional filter)"
exit(error)
user = os.getenv("USER")
castorDir = "/castor/cern.ch/cms/store/caf/user/" + user + "/" + sys.argv[1] + "/"
filter = ""
if len(sys.argv) > 3:
filter = sys.argv[3]
fileList = ls(castorDir,filter)
destDir = sys.argv[2]
copiedFiles = cp(castorDir,destDir,fileList)
if len(copiedFiles) != len(fileList):
error = "ERROR: I couldn't copy all files from castor"
exit(error)
if __name__ == "__main__":
main()
|
mmfashion/apis/test_fashion_recommender.py | RyanJiang0416/mmfashion | 952 | 38803 | <reponame>RyanJiang0416/mmfashion<gh_stars>100-1000
from __future__ import division
import torch
from mmcv.parallel import MMDataParallel
from ..datasets import build_dataloader
from .env import get_root_logger
def test_fashion_recommender(model,
dataset,
cfg,
distributed=False,
validate=False,
logger=None):
if logger is None:
logger = get_root_logger(cfg.log_level)
# start testing predictor
if distributed: # to do
_dist_test(model, dataset, cfg, validate=validate)
else:
_non_dist_test(model, dataset, cfg, validate=validate)
def _process_embeds(dataset, model, cfg):
data_loader = build_dataloader(
dataset,
cfg.data.imgs_per_gpu,
cfg.data.workers_per_gpu,
len(cfg.gpus.test),
dist=False,
shuffle=False)
print('dataloader built')
embeds = []
with torch.no_grad():
for data in data_loader:
embed = model(data['img'], return_loss=False)
embeds.append(embed.data.cpu())
embeds = torch.cat(embeds)
return embeds
def _non_dist_test(model, dataset, cfg, validate=False):
model = MMDataParallel(model, device_ids=cfg.gpus.test).cuda()
model.eval()
embeds = _process_embeds(dataset, model, cfg)
metric = model.module.triplet_net.metric_branch
# compatibility auc
auc = dataset.test_compatibility(embeds, metric)
# fill-in-blank accuracy
acc = dataset.test_fitb(embeds, metric)
print('Compat AUC: {:.2f} FITB: {:.1f}\n'.format(
round(auc, 2), round(acc * 100, 1)))
def _dist_test(model, dataset, cfg, validate=False):
raise NotImplementedError
|
context_cache/context_cache.py | tervay/the-blue-alliance | 266 | 38850 | from google.appengine.ext import ndb
CACHE_DATA = {}
def get(cache_key):
full_cache_key = '{}:{}'.format(cache_key, ndb.get_context().__hash__())
return CACHE_DATA.get(full_cache_key, None)
def set(cache_key, value):
full_cache_key = '{}:{}'.format(cache_key, ndb.get_context().__hash__())
CACHE_DATA[full_cache_key] = value
|
src/beanmachine/applications/hme/interface.py | horizon-blue/beanmachine-1 | 177 | 38854 | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Tuple
import pandas as pd
from .configs import InferConfig, ModelConfig
from .null_mixture_model import NullMixtureMixedEffectModel
class HME:
"""The Hierarchical Mixed Effect model interface.
:param data: observed train data
:param model_config: HME model configuration parameters
"""
def __init__(self, data: pd.DataFrame, model_config: ModelConfig) -> None:
self.model = NullMixtureMixedEffectModel(data, model_config)
self.posterior_samples = None
self.posterior_diagnostics = None
def infer(self, infer_config: InferConfig) -> Tuple[pd.DataFrame]:
"""Performs MCMC posterior inference on HME model parameters and
returns MCMC samples for those parameters registered in the query.
:param infer_config: configuration settings of posterior inference
:return: posterior samples and their diagnostic summary statistics
"""
self.posterior_samples, self.posterior_diagnostics = self.model.infer(
infer_config
)
return self.posterior_samples, self.posterior_diagnostics
def predict(self, new_data: pd.DataFrame) -> pd.DataFrame:
"""Computes predictive distributions on the new test data according to
MCMC posterior samples.
:param new_data: test data for prediction
:return: predictive distributions on the new test data
"""
return self.model.predict(new_data, self.posterior_samples)
|
tests/utils/test_string_utils.py | PeterSulcs/mlflow | 10,351 | 38858 | <filename>tests/utils/test_string_utils.py
import pytest
from mlflow.utils.string_utils import strip_prefix, strip_suffix, is_string_type
@pytest.mark.parametrize(
"original,prefix,expected",
[("smoketest", "smoke", "test"), ("", "test", ""), ("", "", ""), ("test", "", "test")],
)
def test_strip_prefix(original, prefix, expected):
assert strip_prefix(original, prefix) == expected
@pytest.mark.parametrize(
"original,suffix,expected",
[("smoketest", "test", "smoke"), ("", "test", ""), ("", "", ""), ("test", "", "test")],
)
def test_strip_suffix(original, suffix, expected):
assert strip_suffix(original, suffix) == expected
def test_is_string_type():
assert is_string_type("validstring")
assert is_string_type("")
assert is_string_type((b"dog").decode("utf-8"))
assert not is_string_type(None)
assert not is_string_type(["teststring"])
assert not is_string_type([])
assert not is_string_type({})
assert not is_string_type({"test": "string"})
assert not is_string_type(12)
assert not is_string_type(12.7)
|
tests/data/samplecfg.py | glebshevchukk/gradslam | 1,048 | 38867 | import os
import sys
from gradslam.config import CfgNode as CN
cfg = CN()
cfg.TRAIN = CN()
cfg.TRAIN.HYPERPARAM_1 = 0.9
|
tests/chainer_tests/functions_tests/math_tests/test_linear_interpolate.py | zaltoprofen/chainer | 3,705 | 38902 | <reponame>zaltoprofen/chainer
import numpy
from chainer import functions
from chainer import testing
from chainer import utils
@testing.parameterize(*testing.product({
'shape': [(3, 4), ()],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
@testing.fix_random()
@testing.inject_backend_tests(
None,
# CPU tests
[
{},
]
# GPU tests
+ testing.product({
'use_cuda': [True],
'cuda_device': [0, 1],
})
# ChainerX tests
+ testing.product({
'use_chainerx': [True],
'chainerx_device': ['native:0', 'cuda:0', 'cuda:1'],
})
)
class TestLinearInterpolate(testing.FunctionTestCase):
def setUp(self):
if self.dtype == numpy.float16:
self.check_forward_options.update({
'atol': 1e-3, 'rtol': 1e-3})
self.check_backward_options.update({
'atol': 5e-4, 'rtol': 5e-3})
self.check_double_backward_options.update({
'atol': 5e-3, 'rtol': 5e-2})
def generate_inputs(self):
p = numpy.random.uniform(0, 1, self.shape).astype(self.dtype)
x = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype)
y = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype)
return p, x, y
def forward(self, inputs, device):
p, x, y = inputs
ret = functions.linear_interpolate(p, x, y)
ret = functions.cast(ret, numpy.float64)
return ret,
def forward_expected(self, inputs):
p, x, y = inputs
expected = p * x + (1 - p) * y
expected = utils.force_array(expected, dtype=numpy.float64)
return expected,
testing.run_module(__name__, __file__)
|
regtests/list/slice.py | ahakingdom/Rusthon | 622 | 38934 | <reponame>ahakingdom/Rusthon
from runtime import *
"""list slice"""
class XXX:
def __init__(self):
self.v = range(10)
def method(self, a):
return a
def main():
a = range(10)[:-5]
assert( len(a)==5 )
assert( a[4]==4 )
print '--------'
b = range(10)[::2]
print b
assert( len(b)==5 )
assert( b[0]==0 )
assert( b[1]==2 )
assert( b[2]==4 )
assert( b[3]==6 )
assert( b[4]==8 )
#if BACKEND=='DART':
# print(b[...])
#else:
# print(b)
c = range(20)
d = c[ len(b) : ]
#if BACKEND=='DART':
# print(d[...])
#else:
# print(d)
assert( len(d)==15 )
x = XXX()
e = x.v[ len(b) : ]
assert( len(e)==5 )
f = x.method( x.v[len(b):] )
assert( len(f)==5 )
main()
|
selenium/load-html-from-string-instead-of-url/main.py | whitmans-max/python-examples | 140 | 38948 | #!/usr/bin/env python3
# date: 2019.11.24
import selenium.webdriver
driver = selenium.webdriver.Firefox()
html_content = """
<div class=div1>
<ul>
<li>
<a href='path/to/div1stuff/1'>Generic string 1</a>
<a href='path/to/div1stuff/2'>Generic string 2</a>
<a href='path/to/div1stuff/3'>Generic string 3</a>
</li>
</ul>
</div>
<div class=div2>
<ul>
<li>
<a href='path/to/div2stuff/1'>Generic string 1</a>
<a href='path/to/div2stuff/2'>Generic string 2</a>
<a href='path/to/div2stuff/3'>Generic string 3</a>
</li>
</ul>
</div>
"""
driver.get("data:text/html;charset=utf-8," + html_content)
elements = driver.find_elements_by_css_selector("div.div2 a")
for x in elements:
print(x.get_attribute('href'))
item = driver.find_element_by_xpath("//div[@class='div2']//a[contains(text(),'Generic string 2')]")
print(item.get_attribute('href'))
item.click()
|
gluoncv/data/video_custom/__init__.py | Kh4L/gluon-cv | 5,447 | 38951 | # pylint: disable=wildcard-import
"""
Customized data loader for video classification related tasks.
"""
from __future__ import absolute_import
from .classification import *
|
src/cowrie/output/csirtg.py | uwacyber/cowrie | 2,316 | 38997 | from __future__ import annotations
import os
from datetime import datetime
from twisted.python import log
import cowrie.core.output
from cowrie.core.config import CowrieConfig
token = CowrieConfig.get("output_csirtg", "token", fallback="<PASSWORD>")
if token == "<PASSWORD>":
log.msg("output_csirtg: token not found in configuration file")
exit(1)
os.environ["CSIRTG_TOKEN"] = token
import csirtgsdk # noqa: E402
class Output(cowrie.core.output.Output):
"""
CSIRTG output
"""
def start(self):
"""
Start the output module.
Note that csirtsdk is imported here because it reads CSIRTG_TOKEN on import
Cowrie sets this environment variable.
"""
self.user = CowrieConfig.get("output_csirtg", "username")
self.feed = CowrieConfig.get("output_csirtg", "feed")
self.debug = CowrieConfig.getboolean("output_csirtg", "debug", fallback=False)
self.description = CowrieConfig.get("output_csirtg", "description")
self.context = {}
# self.client = csirtgsdk.client.Client()
def stop(self):
pass
def write(self, e):
"""
Only pass on connection events
"""
if e["eventid"] == "cowrie.session.connect":
self.submitIp(e)
def submitIp(self, e):
peerIP = e["src_ip"]
ts = e["timestamp"]
system = e.get("system", None)
if system not in [
"cowrie.ssh.factory.CowrieSSHFactory",
"cowrie.telnet.transport.HoneyPotTelnetFactory",
]:
return
today = str(datetime.now().date())
if not self.context.get(today):
self.context = {}
self.context[today] = set()
key = ",".join([peerIP, system])
if key in self.context[today]:
return
self.context[today].add(key)
tags = "scanner,ssh"
port = 22
if e["system"] == "cowrie.telnet.transport.HoneyPotTelnetFactory":
tags = "scanner,telnet"
port = 23
i = {
"user": self.user,
"feed": self.feed,
"indicator": peerIP,
"portlist": port,
"protocol": "tcp",
"tags": tags,
"firsttime": ts,
"lasttime": ts,
"description": self.description,
}
if self.debug is True:
log.msg(f"output_csirtg: Submitting {i!r} to CSIRTG")
ind = csirtgsdk.indicator.Indicator(i).submit()
if self.debug is True:
log.msg(f"output_csirtg: Submitted {ind!r} to CSIRTG")
log.msg("output_csirtg: submitted to csirtg at {} ".format(ind["location"]))
|
main/api/fields.py | lipis/gae-init-magic | 465 | 39012 | <gh_stars>100-1000
# coding: utf-8
import urllib
from flask_restful import fields
from flask_restful.fields import *
class BlobKey(fields.Raw):
def format(self, value):
return urllib.quote(str(value))
class Blob(fields.Raw):
def format(self, value):
return repr(value)
class DateTime(fields.DateTime):
def format(self, value):
return value.isoformat()
class GeoPt(fields.Raw):
def format(self, value):
return '%s,%s' % (value.lat, value.lon)
class Id(fields.Raw):
def output(self, key, obj):
try:
value = getattr(obj, 'key', None).id()
return super(Id, self).output(key, {'id': value})
except AttributeError:
return None
class Integer(fields.Integer):
def format(self, value):
if value > 9007199254740992 or value < -9007199254740992:
return str(value)
return value
class Key(fields.Raw):
def format(self, value):
return value.urlsafe()
|
xero/filesmanager.py | Ian2020/pyxero | 246 | 39015 | <filename>xero/filesmanager.py
from __future__ import unicode_literals
import os
import requests
from six.moves.urllib.parse import parse_qs
from .constants import XERO_FILES_URL
from .exceptions import (
XeroBadRequest,
XeroExceptionUnknown,
XeroForbidden,
XeroInternalError,
XeroNotAvailable,
XeroNotFound,
XeroNotImplemented,
XeroRateLimitExceeded,
XeroUnauthorized,
XeroUnsupportedMediaType,
)
class FilesManager(object):
DECORATED_METHODS = (
"get",
"all",
"create",
"save",
"delete",
"get_files",
"upload_file",
"get_association",
"get_associations",
"make_association",
"delete_association",
"get_content",
)
def __init__(self, name, credentials):
self.credentials = credentials
self.name = name
self.base_url = credentials.base_url + XERO_FILES_URL
for method_name in self.DECORATED_METHODS:
method = getattr(self, "_%s" % method_name)
setattr(self, method_name, self._get_data(method))
def _get_results(self, data):
response = data["Response"]
if self.name in response:
result = response[self.name]
elif "Attachments" in response:
result = response["Attachments"]
else:
return None
if isinstance(result, tuple) or isinstance(result, list):
return result
if isinstance(result, dict) and self.singular in result:
return result[self.singular]
def _get_data(self, func):
""" This is the decorator for our DECORATED_METHODS.
Each of the decorated methods must return:
uri, params, method, body, headers, singleobject
"""
def wrapper(*args, **kwargs):
uri, params, method, body, headers, singleobject, files = func(
*args, **kwargs
)
response = getattr(requests, method)(
uri,
data=body,
headers=headers,
auth=self.credentials.oauth,
params=params,
files=files,
)
if response.status_code == 200 or response.status_code == 201:
if response.headers["content-type"].startswith("application/json"):
return response.json()
else:
# return a byte string without doing any Unicode conversions
return response.content
# Delete will return a response code of 204 - No Content
elif response.status_code == 204:
return "Deleted"
elif response.status_code == 400:
raise XeroBadRequest(response)
elif response.status_code == 401:
raise XeroUnauthorized(response)
elif response.status_code == 403:
raise XeroForbidden(response)
elif response.status_code == 404:
raise XeroNotFound(response)
elif response.status_code == 415:
raise XeroUnsupportedMediaType(response)
elif response.status_code == 500:
raise XeroInternalError(response)
elif response.status_code == 501:
raise XeroNotImplemented(response)
elif response.status_code == 503:
# Two 503 responses are possible. Rate limit errors
# return encoded content; offline errors don't.
# If you parse the response text and there's nothing
# encoded, it must be a not-available error.
payload = parse_qs(response.text)
if payload:
raise XeroRateLimitExceeded(response, payload)
else:
raise XeroNotAvailable(response)
else:
raise XeroExceptionUnknown(response)
return wrapper
def _get(self, id, headers=None):
uri = "/".join([self.base_url, self.name, id])
return uri, {}, "get", None, headers, True, None
def _get_files(self, folderId):
"""Retrieve the list of files contained in a folder"""
uri = "/".join([self.base_url, self.name, folderId, "Files"])
return uri, {}, "get", None, None, False, None
def _get_associations(self, id):
uri = "/".join([self.base_url, self.name, id, "Associations"]) + "/"
return uri, {}, "get", None, None, False, None
def _get_association(self, fileId, objectId):
uri = "/".join([self.base_url, self.name, fileId, "Associations", objectId])
return uri, {}, "get", None, None, False, None
def _delete_association(self, fileId, objectId):
uri = "/".join([self.base_url, self.name, fileId, "Associations", objectId])
return uri, {}, "delete", None, None, False, None
def create_or_save(self, data, method="post", headers=None, summarize_errors=True):
if "Id" not in data:
uri = "/".join([self.base_url, self.name])
else:
uri = "/".join([self.base_url, self.name, data["Id"]])
body = data
if summarize_errors:
params = {}
else:
params = {"summarizeErrors": "false"}
return uri, params, method, body, headers, False, None
def _create(self, data):
return self.create_or_save(data, method="post")
def _save(self, data, summarize_errors=True):
return self.create_or_save(
data, method="put", summarize_errors=summarize_errors
)
def _delete(self, id):
uri = "/".join([self.base_url, self.name, id])
return uri, {}, "delete", None, None, False, None
def _upload_file(self, path, folderId=None):
if folderId is not None:
uri = "/".join([self.base_url, self.name, folderId])
else:
uri = "/".join([self.base_url, self.name])
filename = self.filename(path)
files = dict()
files[filename] = open(path, mode="rb")
return uri, {}, "post", None, None, False, files
def _get_content(self, fileId):
uri = "/".join([self.base_url, self.name, fileId, "Content"])
return uri, {}, "get", None, None, False, None
def _make_association(self, id, data):
uri = "/".join([self.base_url, self.name, id, "Associations"])
body = data
return uri, {}, "post", body, None, False, None
def _all(self):
uri = "/".join([self.base_url, self.name])
return uri, {}, "get", None, None, False, None
def filename(self, path):
head, tail = os.path.split(path)
return tail or os.path.basename(head)
|
hermione/module_templates/__IMPLEMENTED_BASE__/src/ml/preprocessing/preprocessing.py | RodrigoATorres/hermione | 183 | 39030 | <filename>hermione/module_templates/__IMPLEMENTED_BASE__/src/ml/preprocessing/preprocessing.py
import pandas as pd
from ml.preprocessing.normalization import Normalizer
from category_encoders import *
import logging
logging.getLogger().setLevel(logging.INFO)
class Preprocessing:
"""
Class to perform data preprocessing before training
"""
def clean_data(self, df: pd.DataFrame):
"""
Perform data cleansing.
Parameters
----------
df : pd.Dataframe
Dataframe to be processed
Returns
-------
pd.Dataframe
Cleaned Data Frame
"""
logging.info("Cleaning data")
df_copy = df.copy()
df_copy['Pclass'] = df_copy.Pclass.astype('object')
df_copy = df_copy.dropna()
return df_copy
def categ_encoding(self, df: pd.DataFrame):
"""
Perform encoding of the categorical variables
Parameters
----------
df : pd.Dataframe
Dataframe to be processed
Returns
-------
pd.Dataframe
Cleaned Data Frame
"""
logging.info("Category encoding")
df_copy = df.copy()
df_copy = pd.get_dummies(df_copy)
return df_copy
|
DevTools/lineCount.py | spiiin/CadEditor | 164 | 39032 | #!/usr/bin/env python2
#Script for calculate LoC of all source files of project
import os,string
import sys
extension_list = ['h','hpp','cpp','c','pas','dpr','asm','py','q3asm','def','sh','bat','cs','java','cl','lisp','ui',"nut"]
comment_sims = {'asm' : ';', 'py' : '#', 'cl':';','lisp':';'}
source_files = { }
exclude_names = ["libs", "release", ".git"]
if len(sys.argv)!=2:
print "You must call script as 'lineCount.py <path_to_folder>'"
raw_input()
exit(-1)
path = sys.argv[1]
files_count = 0
def calc_files_count (arg,dirname, names):
global files_count
files_count+=len(names)
def calc_strings_count(arg, dirname, names):
#print "%32s"%dirname
if any(dirname.lower().find(exclude_name) != -1 for exclude_name in exclude_names):
return
for name in names:
full_name = os.path.join(dirname,name)
file_name,file_ext = os.path.splitext(full_name)
file_ext = file_ext[1:].lower()
if comment_sims.has_key(file_ext):
comment_sim = comment_sims[file_ext]
else:
comment_sim = "//"
if file_ext in extension_list:
#.designer.cs files don't count
if file_name.lower().find(".designer") != -1:
continue
f = file(full_name)
file_text = f.readlines()
empty_lines_count = 0
comment_lines = 0
for line in file_text :
line_without_spaces = line.lstrip(string.whitespace)
if line_without_spaces=="":
empty_lines_count += 1
elif line_without_spaces.startswith(comment_sim):
comment_lines +=1
source_files[full_name]= {"full" : len(file_text) ,"empty" :empty_lines_count, "comment":comment_lines}
f.close()
def calc(path_root):
os.path.walk(path_root,calc_files_count,0)
print "Found : %4i files"%files_count
print ""
#calculate line count
os.path.walk(path_root,calc_strings_count,0)
#convert to list and sort
lst = source_files.items()
lst.sort(key = lambda (key, val): val["full"])
strings_count=0
empty_lines_count=0
comment_lines_count=0
for name,val in lst:
l_f,l_e,l_c = val["full"],val["empty"],val["comment"]
dummy,short_name = os.path.split(name)
print "%-36s : %5i (%i/%i/%i)"%(short_name,l_f, l_f-l_c-l_e,l_c,l_e )
strings_count+=l_f
empty_lines_count+=l_e
comment_lines_count+=l_c
print "\nformat -\nfilename : full_lines_count (code_lines_count/comments_count/empty_lines_count)"
print 24*"-"
print "Found : %4i files"%files_count
print "Summary : %4i lines"%strings_count
print "Code : %4i lines"%(strings_count - comment_lines_count - empty_lines_count)
print "Comments: %4i lines"%comment_lines_count
print "Empty : %4i lines"%empty_lines_count
print 24*"-"
print "%s %s %s"%( "="*24, "================", "="*24)
print "%-24s %s %24s"%( "="*3, "Spiiin LineCounter", "="*3)
print "%s %s %s"%( "="*24, "================", "="*24)
calc(path)
raw_input() |
doc2json/jats2json/pmc_utils/back_tag_utils.py | josephcc/s2orc-doc2json | 132 | 39081 | <reponame>josephcc/s2orc-doc2json
from typing import Dict, List
def _wrap_text(tag):
return tag.text if tag else ''
def parse_authors(authors_tag) -> List:
"""The PMC XML has a slightly different format than authors listed in front tag."""
if not authors_tag:
return []
authors = []
for name_tag in authors_tag.find_all('name', recursive=False):
surname = name_tag.find('surname')
given_names = name_tag.find('given-names')
given_names = given_names.text.split(' ') if given_names else None
suffix = name_tag.find('suffix')
authors.append({
'first': given_names[0] if given_names else '',
'middle': given_names[1:] if given_names else [],
'last': surname.text if surname else '',
'suffix': suffix.text if suffix else ''
})
return authors
def parse_bib_entries(back_tag) -> Dict:
bib_entries = {}
# TODO: PMC2778891 does not have 'ref-list' in its back_tag. do we even need this, or can directly .find_all('ref')?
ref_list_tag = back_tag.find('ref-list')
if ref_list_tag:
for ref_tag in ref_list_tag.find_all('ref'):
# The ref ID and label are semantically swapped between CORD-19 and PMC, lol
ref_label = ref_tag['id']
ref_id = ref_tag.find('label')
authors_tag = ref_tag.find('person-group', {'person-group-type': 'author'})
year = ref_tag.find('year')
fpage = ref_tag.find('fpage')
lpage = ref_tag.find('lpage')
pages = f'{fpage.text}-{lpage.text}' if fpage and lpage else None
dois = [tag.text for tag in ref_tag.find_all('pub-id', {'pub-id-type': 'doi'})]
bib_entries[ref_label] = {
'ref_id': _wrap_text(ref_id),
'title': _wrap_text(ref_tag.find('article-title')),
'authors': parse_authors(authors_tag),
'year': int(year.text) if year and year.text.isdigit() else None,
'venue': _wrap_text(ref_tag.find('source')),
'volume': _wrap_text(ref_tag.find('volume')),
'issn': _wrap_text(ref_tag.find('issue')),
'pages': pages,
'other_ids': {
'DOI': dois,
}
}
return bib_entries |
matrixprofile/io/protobuf/protobuf_utils.py | MORE-EU/matrixprofile | 262 | 39103 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
range = getattr(__builtins__, 'xrange', range)
# end of py2 compatability boilerplate
import numpy as np
from matrixprofile import core
from matrixprofile.io.protobuf.proto_messages_pb2 import (
Location, Motif, MPFOutput
)
def get_matrix_attributes(matrix):
"""
Utility function to extract the rows, cols and flattened array from a
numpy array so it can be stored in the MPFOutput protobuf message.
Parameters
----------
matrix : np.ndarray
The numpy array to extract the attributes from.
Returns
-------
tuple :
A tuple containing the rows, cols and flattened array.
"""
if not core.is_array_like(matrix) or len(matrix) < 1:
return None, None, None
rows = matrix.shape[0]
cols = 0
if len(matrix.shape) > 1:
cols = matrix.shape[1]
return rows, cols, matrix.flatten()
def get_windows(profile):
"""
Utility function to format the windows from a profile structure ensuring
that the windows are in an array.
Parameters
----------
profile : dict
The MatrixProfile or PMP profile.
Returns
-------
list :
The window(s) in a list.
"""
windows = []
if core.is_mp_obj(profile):
windows.append(profile.get('w'))
elif core.is_pmp_obj(profile):
windows = profile.get('windows')
return windows
def get_proto_motif(motif):
"""
Utility function to convert a motif from a MatrixProfile or PMP structure
ensuring that it is compatible with the MPFOutput message.
Note
----
A single dimensional motif location will only have a row index and
a column index of 0.
Parameters
----------
motif : dict
The motif to convert.
Returns
-------
Motif :
The motif object for MPFOutput message.
"""
out_motif = Motif()
for indices in motif['motifs']:
tmp = Location()
tmp.row = 0
tmp.col = 0
# handle single integer location
if core.is_array_like(indices):
tmp.row = indices[0]
tmp.col = indices[1]
else:
tmp.row = indices
out_motif.motifs.append(tmp)
for neighbor in motif['neighbors']:
tmp = Location()
tmp.row = 0
tmp.col = 0
# handle single integer location
if core.is_array_like(neighbor):
tmp.row = neighbor[0]
tmp.col = neighbor[1]
else:
tmp.row = neighbor
out_motif.neighbors.append(tmp)
return out_motif
def get_proto_discord(discord):
"""
Utility function to convert a discord into the MPFOutput message
format.
Note
----
A single dimensional discord location will only have a row index and
a column index of 0.
Parameters
----------
discord : int or tuple
The discord with row, col index or single index.
Returns
-------
Location :
The Location message used in the MPFOutput protobuf message.
"""
out_discord = Location()
out_discord.row = 0
out_discord.col = 0
if core.is_array_like(discord):
out_discord.row = discord[0]
out_discord.col = discord[1]
else:
out_discord.row = discord
return out_discord
def profile_to_proto(profile):
"""
Utility function that takes a MatrixProfile or PMP profile data structure
and converts it to the MPFOutput protobuf message object.
Parameters
----------
profile : dict
The profile to convert.
Returns
-------
MPFOutput :
The MPFOutput protobuf message object.
"""
output = MPFOutput()
# add higher level attributes that work for PMP and MP
output.klass = profile.get('class')
output.algorithm = profile.get('algorithm')
output.metric = profile.get('metric')
output.sample_pct = profile.get('sample_pct')
# add time series data
ts = profile.get('data').get('ts')
query = profile.get('data').get('query')
rows, cols, data = get_matrix_attributes(ts)
output.ts.rows = rows
output.ts.cols = cols
output.ts.data.extend(data)
# add query data
query = profile.get('data').get('query')
rows, cols, data = get_matrix_attributes(query)
if rows and cols and core.is_array_like(data):
output.query.rows = rows
output.query.cols = cols
output.query.data.extend(data)
# add window(s)
output.windows.extend(get_windows(profile))
# add motifs
motifs = profile.get('motifs')
if not isinstance(motifs, type(None)):
for motif in motifs:
output.motifs.append(get_proto_motif(motif))
# add discords
discords = profile.get('discords')
if not isinstance(discords, type(None)):
for discord in discords:
output.discords.append(get_proto_discord(discord))
# add cmp
cmp = profile.get('cmp')
if not isinstance(cmp, type(None)):
rows, cols, data = get_matrix_attributes(cmp)
output.cmp.rows = rows
output.cmp.cols = cols
output.cmp.data.extend(data)
# add av
av = profile.get('av')
if not isinstance(av, type(None)):
rows, cols, data = get_matrix_attributes(av)
output.av.rows = rows
output.av.cols = cols
output.av.data.extend(data)
# add av_type
av_type = profile.get('av_type')
if not isinstance(av_type, type(None)) and len(av_type) > 0:
output.av_type = av_type
# add the matrix profile specific attributes
if core.is_mp_obj(profile):
output.mp.ez = profile.get('ez')
output.mp.join = profile.get('join')
# add mp
rows, cols, data = get_matrix_attributes(profile.get('mp'))
output.mp.mp.rows = rows
output.mp.mp.cols = cols
output.mp.mp.data.extend(data)
# add pi
rows, cols, data = get_matrix_attributes(profile.get('pi'))
output.mp.pi.rows = rows
output.mp.pi.cols = cols
output.mp.pi.data.extend(data)
# add lmp
rows, cols, data = get_matrix_attributes(profile.get('lmp'))
if rows and cols and core.is_array_like(data):
output.mp.lmp.rows = rows
output.mp.lmp.cols = cols
output.mp.lmp.data.extend(data)
# add lpi
rows, cols, data = get_matrix_attributes(profile.get('lpi'))
if rows and cols and core.is_array_like(data):
output.mp.lpi.rows = rows
output.mp.lpi.cols = cols
output.mp.lpi.data.extend(data)
# add rmp
rows, cols, data = get_matrix_attributes(profile.get('rmp'))
if rows and cols and core.is_array_like(data):
output.mp.rmp.rows = rows
output.mp.rmp.cols = cols
output.mp.rmp.data.extend(data)
# add rpi
rows, cols, data = get_matrix_attributes(profile.get('rpi'))
if rows and cols and core.is_array_like(data):
output.mp.rpi.rows = rows
output.mp.rpi.cols = cols
output.mp.rpi.data.extend(data)
# add the pan matrix profile specific attributes
elif core.is_pmp_obj(profile):
# add pmp
rows, cols, data = get_matrix_attributes(profile.get('pmp'))
output.pmp.pmp.rows = rows
output.pmp.pmp.cols = cols
output.pmp.pmp.data.extend(data)
# add pmpi
rows, cols, data = get_matrix_attributes(profile.get('pmpi'))
output.pmp.pmpi.rows = rows
output.pmp.pmpi.cols = cols
output.pmp.pmpi.data.extend(data)
else:
raise ValueError('Expecting Pan-MatrixProfile or MatrixProfile!')
return output
def to_mpf(profile):
"""
Converts a given profile object into MPF binary file format.
Parameters
----------
profile : dict_like
A MatrixProfile or Pan-MatrixProfile data structure.
Returns
-------
str :
The profile as a binary formatted string.
"""
obj = profile_to_proto(profile)
return obj.SerializeToString()
def from_proto_to_array(value):
"""
Utility function to convert a protobuf array back into the correct
dimensions.
Parameters
----------
value : array_like
The array to transform.
Returns
-------
np.ndarray :
The transformed array.
"""
if isinstance(value, type(None)) or len(value.data) < 1:
return None
shape = (value.rows, value.cols)
out = np.array(value.data)
if shape[1] > 0:
out = out.reshape(shape)
return out
def discords_from_proto(discords, is_one_dimensional=False):
"""
Utility function to transform discord locations back to single dimension
or multi-dimension location.
Parameter
---------
discords : array_like
The protobuf formatted array.
is_one_dimensional : boolean
A flag to indicate if the original locations should be 1D.
Returns
-------
np.ndarray :
The transformed discord locations.
"""
out = []
for discord in discords:
if is_one_dimensional:
out.append(discord.row)
else:
out.append((discord.row, discord.col))
return np.array(out, dtype=int)
def motifs_from_proto(motifs, is_one_dimensional=False):
"""
Utility function to transform motif locations back to single dimension
or multi-dimension location.
Parameter
---------
motifs : array_like
The protobuf formatted array.
is_one_dimensional : boolean
A flag to indicate if the original locations should be 1D.
Returns
-------
list :
The transformed motif locations.
"""
out = []
for motif in motifs:
tmp = {'motifs': [], 'neighbors': []}
for location in motif.motifs:
if is_one_dimensional:
tmp['motifs'].append(location.row)
else:
tmp['motifs'].append((location.row, location.col))
for neighbor in motif.neighbors:
if is_one_dimensional:
tmp['neighbors'].append(neighbor.row)
else:
tmp['neighbors'].append((neighbor.row, neighbor.col))
out.append(tmp)
return out
def from_mpf(profile):
"""
Converts binary formatted MPFOutput message into a profile data structure.
Parameters
----------
profile : str
The profile as a binary formatted MPFOutput message.
Returns
-------
profile : dict_like
A MatrixProfile or Pan-MatrixProfile data structure.
"""
obj = MPFOutput()
obj.ParseFromString(profile)
out = {}
is_one_dimensional = False
# load in all higher level attributes
out['class'] = obj.klass
out['algorithm'] = obj.algorithm
out['metric'] = obj.metric
out['sample_pct'] = obj.sample_pct
out['data'] = {
'ts': from_proto_to_array(obj.ts),
'query': from_proto_to_array(obj.query)
}
if obj.klass == 'MatrixProfile':
out['mp'] = from_proto_to_array(obj.mp.mp)
out['pi'] = from_proto_to_array(obj.mp.pi)
out['lmp'] = from_proto_to_array(obj.mp.lmp)
out['lpi'] = from_proto_to_array(obj.mp.lpi)
out['rmp'] = from_proto_to_array(obj.mp.rmp)
out['rpi'] = from_proto_to_array(obj.mp.rpi)
out['ez'] = obj.mp.ez
out['join'] = obj.mp.join
out['w'] = obj.windows[0]
is_one_dimensional = len(out['mp'].shape) == 1
elif obj.klass == 'PMP':
out['pmp'] = from_proto_to_array(obj.pmp.pmp)
out['pmpi'] = from_proto_to_array(obj.pmp.pmpi)
out['windows'] = np.array(obj.windows)
if not isinstance(obj.discords, type(None)) and len(obj.discords) > 0:
out['discords'] = discords_from_proto(
obj.discords, is_one_dimensional=is_one_dimensional)
if not isinstance(obj.motifs, type(None)) and len(obj.motifs) > 0:
out['motifs'] = motifs_from_proto(
obj.motifs, is_one_dimensional=is_one_dimensional)
if not isinstance(obj.cmp, type(None)) and len(obj.cmp.data) > 0:
out['cmp'] = from_proto_to_array(obj.cmp)
if not isinstance(obj.av, type(None)) and len(obj.av.data) > 0:
out['av'] = from_proto_to_array(obj.av)
if not isinstance(obj.av_type, type(None)) and len(obj.av_type) > 0:
out['av_type'] = obj.av_type
return out
|
moe/optimal_learning/python/interfaces/domain_interface.py | misokg/Cornell-MOE | 218 | 39133 | # -*- coding: utf-8 -*-
"""Interface for a domain: in/out test, random point generation, and update limiting (for constrained optimization)."""
from builtins import object
from abc import ABCMeta, abstractmethod, abstractproperty
from future.utils import with_metaclass
class DomainInterface(with_metaclass(ABCMeta, object)):
"""Interface for a domain: in/out test, random point generation, and update limiting (for constrained optimization)."""
@abstractproperty
def dim(self):
"""Return the number of spatial dimensions."""
pass
@abstractmethod
def check_point_inside(self, point):
r"""Check if a point is inside the domain/on its boundary or outside.
:param point: point to check
:type point: array of float64 with shape (dim)
:return: true if point is inside the domain
:rtype: bool
"""
pass
@abstractmethod
def get_bounding_box(self):
"""Return a list of ClosedIntervals representing a bounding box for this domain."""
pass
@abstractmethod
def get_constraint_list(self):
"""Return a list of lambda functions expressing the domain bounds as linear constraints. Used by COBYLA.
:return: a list of lambda functions corresponding to constraints
:rtype: array of lambda functions with shape (dim * 2)
"""
pass
@abstractmethod
def generate_random_point_in_domain(self, random_source=None):
"""Generate ``point`` uniformly at random such that ``self.check_point_inside(point)`` is True.
.. Note:: if you need multiple points, use generate_uniform_random_points_in_domain instead;
depending on implementation, it may ield better distributions over many points. For example,
tensor product type domains use latin hypercube sampling instead of repeated random draws
which guarantees that no non-uniform clusters may arise (in subspaces) versus this method
which treats all draws independently.
:return: point in domain
:rtype: array of float64 with shape (dim)
"""
pass
@abstractmethod
def generate_uniform_random_points_in_domain(self, num_points, random_source):
r"""Generate AT MOST ``num_points`` uniformly distributed points from the domain.
.. NOTE::
The number of points returned may be LESS THAN ``num_points``!
Implementations may use rejection sampling. In such cases, generating the requested
number of points may be unreasonably slow, so implementers are allowed to generate
fewer than ``num_points`` results.
:param num_points: max number of points to generate
:type num_points: int >= 0
:param random_source:
:type random_source: callable yielding uniform random numbers in [0,1]
:return: uniform random sampling of points from the domain; may be fewer than ``num_points``!
:rtype: array of float64 with shape (num_points_generated, dim)
"""
pass
@abstractmethod
def compute_update_restricted_to_domain(self, max_relative_change, current_point, update_vector):
r"""Compute a new update so that CheckPointInside(``current_point`` + ``new_update``) is true.
Changes new_update_vector so that:
``point_new = point + new_update_vector``
has coordinates such that ``CheckPointInside(point_new)`` returns true.
``new_update_vector`` is a function of ``update_vector``.
``new_update_vector`` is just a copy of ``update_vector`` if ``current_point`` is already inside the domain.
.. NOTE::
We modify update_vector (instead of returning point_new) so that further update
limiting/testing may be performed.
:param max_relative_change: max change allowed per update (as a relative fraction of current distance to boundary)
:type max_relative_change: float64 in (0, 1]
:param current_point: starting point
:type current_point: array of float64 with shape (dim)
:param update_vector: proposed update
:type update_vector: array of float64 with shape (dim)
:return: new update so that the final point remains inside the domain
:rtype: array of float64 with shape (dim)
"""
pass
|
RecoPPS/Local/python/ctppsDiamondLocalReconstruction_cff.py | ckamtsikis/cmssw | 852 | 39143 | <gh_stars>100-1000
import FWCore.ParameterSet.Config as cms
# reco hit production
from RecoPPS.Local.ctppsDiamondRecHits_cfi import ctppsDiamondRecHits
# local track fitting
from RecoPPS.Local.ctppsDiamondLocalTracks_cfi import ctppsDiamondLocalTracks
ctppsDiamondLocalReconstructionTask = cms.Task(
ctppsDiamondRecHits,
ctppsDiamondLocalTracks
)
ctppsDiamondLocalReconstruction = cms.Sequence(ctppsDiamondLocalReconstructionTask)
|
text/opencv_dnn_detect.py | kingemma/invoice | 1,017 | 39174 | <filename>text/opencv_dnn_detect.py
from config import yoloCfg,yoloWeights,opencvFlag
from config import AngleModelPb,AngleModelPbtxt
from config import IMGSIZE
from PIL import Image
import numpy as np
import cv2
if opencvFlag=='keras':
##转换为tf模型,以便GPU调用
import tensorflow as tf
from tensorflow.python.platform import gfile
config = tf.ConfigProto(allow_soft_placement=True)
sess = tf.Session(config=config)
with gfile.FastGFile(AngleModelPb, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
sess.graph.as_default()
tf.import_graph_def(graph_def, name='')
inputImg = sess.graph.get_tensor_by_name('input_1:0')
predictions = sess.graph.get_tensor_by_name('predictions/Softmax:0')
keep_prob = tf.placeholder(tf.float32)
else:
angleNet = cv2.dnn.readNetFromTensorflow(AngleModelPb,AngleModelPbtxt)##dnn 文字方向检测
textNet = cv2.dnn.readNetFromDarknet(yoloCfg,yoloWeights)##文字定位
def text_detect(img):
thresh=0
h,w = img.shape[:2]
inputBlob = cv2.dnn.blobFromImage(img, scalefactor=0.00390625, size=IMGSIZE,swapRB=True ,crop=False);
textNet.setInput(inputBlob)
pred = textNet.forward()
cx = pred[:,0]*w
cy = pred[:,1]*h
xmin = cx - pred[:,2]*w/2
xmax = cx + pred[:,2]*w/2
ymin = cy - pred[:,3]*h/2
ymax = cy + pred[:,3]*h/2
scores = pred[:,4]
indx = np.where(scores>thresh)[0]
scores = scores[indx]
boxes = np.array(list(zip(xmin[indx],ymin[indx],xmax[indx],ymax[indx])))
return boxes,scores
def angle_detect_dnn(img,adjust=True):
"""
文字方向检测
"""
h,w = img.shape[:2]
ROTATE = [0,90,180,270]
if adjust:
thesh = 0.05
xmin,ymin,xmax,ymax = int(thesh*w),int(thesh*h),w-int(thesh*w),h-int(thesh*h)
img = img[ymin:ymax,xmin:xmax]##剪切图片边缘
inputBlob = cv2.dnn.blobFromImage(img,
scalefactor=1.0,
size=(224, 224),
swapRB=True ,
mean=[103.939,116.779,123.68],crop=False);
angleNet.setInput(inputBlob)
pred = angleNet.forward()
index = np.argmax(pred,axis=1)[0]
return ROTATE[index]
def angle_detect_tf(img,adjust=True):
"""
文字方向检测
"""
h,w = img.shape[:2]
ROTATE = [0,90,180,270]
if adjust:
thesh = 0.05
xmin,ymin,xmax,ymax = int(thesh*w),int(thesh*h),w-int(thesh*w),h-int(thesh*h)
img = img[ymin:ymax,xmin:xmax]##剪切图片边缘
img = cv2.resize(img,(224,224))
img = img[..., ::-1].astype(np.float32)
img[..., 0] -= 103.939
img[..., 1] -= 116.779
img[..., 2] -= 123.68
img = np.array([img])
out = sess.run(predictions, feed_dict={inputImg: img,
keep_prob: 0
})
index = np.argmax(out,axis=1)[0]
return ROTATE[index]
def angle_detect(img,adjust=True):
"""
文字方向检测
"""
if opencvFlag=='keras':
return angle_detect_tf(img,adjust=adjust)
else:
return angle_detect_dnn(img,adjust=adjust) |
016 3Sum Closest.py | ChiFire/legend_LeetCode | 872 | 39211 | <filename>016 3Sum Closest.py<gh_stars>100-1000
"""
Given an array S of n integers, find three integers in S such that the sum is closest to a given number, target. Return
the sum of the three integers. You may assume that each input would have exactly one solution.
For example, given array S = {-1 2 1 -4}, and target = 1.
The sum that is closest to the target is 2. (-1 + 2 + 1 = 2).
"""
__author__ = 'Danyang'
class Solution:
def threeSumClosest(self, num, target):
"""
Three pointers scanning algorithm
Similar to 014 3Sum
:param num: array
:param target: target
:return: sum of the three digits
"""
min_distance = 1<<32
num.sort()
min_summation = 0
for i, val in enumerate(num):
j = i+1
k = len(num)-1
while j<k:
lst = [val, num[j], num[k]]
if min_distance>abs(target-sum(lst)):
min_summation = sum(lst)
if sum(lst)==target:
return min_summation
min_distance = abs(target-min_summation)
elif sum(lst)>target:
k -= 1
else:
j += 1
return min_summation
if __name__=="__main__":
print Solution().threeSumClosest([1, 1, 1, 1], 0)
|
tests/spec/cms/blogs/test_blogs.py | fakepop/hubspot-api-python | 117 | 39227 | <reponame>fakepop/hubspot-api-python
from hubspot import HubSpot
from hubspot.discovery.cms.blogs.discovery import Discovery
def test_is_discoverable():
apis = HubSpot().cms
assert isinstance(apis.blogs, Discovery)
|
notebooks/pixel_cnn/pixelcnn_helpers.py | bjlkeng/sandbox | 158 | 39256 | <gh_stars>100-1000
import math
import numpy as np
from keras import backend as K
from keras.layers import Conv2D, Concatenate, Activation, Add
from keras.engine import InputSpec
def logsoftmax(x):
''' Numerically stable log(softmax(x)) '''
m = K.max(x, axis=-1, keepdims=True)
return x - m - K.log(K.sum(K.exp(x - m), axis=-1, keepdims=True))
def pixelcnn_loss(target, output, img_rows, img_cols, img_chns, n_components):
''' Keras PixelCNN loss function. Use a lambda to fill in the last few
parameters
Args:
img_rows, img_cols, img_chns: image dimensions
n_components: number of mixture components
Returns:
log-loss
'''
assert img_chns == 3
# Extract out each of the mixture parameters (multiple of 3 b/c of image channels)
output_m = output[:, :, :, :3*n_components]
output_invs = output[:, :, :, 3*n_components:6*n_components]
output_logit_weights = output[:, :, :, 6*(n_components):]
# Repeat the target to match the number of mixture component shapes
x = K.reshape(target, (-1, img_rows, img_cols, img_chns))
slices = []
for c in range(img_chns):
slices += [x[:, :, :, c:c+1]] * n_components
x = K.concatenate(slices, axis=-1)
x_decoded_m = output_m
x_decoded_invs = output_invs
x_logit_weights = output_logit_weights
# Pixels rescaled to be in [-1, 1] interval
offset = 1. / 127.5 / 2.
centered_mean = x - x_decoded_m
cdfminus_arg = (centered_mean - offset) * K.exp(x_decoded_invs)
cdfplus_arg = (centered_mean + offset) * K.exp(x_decoded_invs)
cdfminus_safe = K.sigmoid(cdfminus_arg)
cdfplus_safe = K.sigmoid(cdfplus_arg)
# Generate the PDF (logistic) in case the `m` is way off (cdf is too small)
# pdf = e^(-(x-m)/s) / {s(1 + e^{-(x-m)/s})^2}
# logpdf = -(x-m)/s - log s - 2 * log(1 + e^(-(x-m)/s))
# = -mid_in - invs - 2 * softplus(-mid_in)
mid_in = centered_mean * K.exp(x_decoded_invs)
log_pdf_mid = -mid_in - x_decoded_invs - 2. * K.tf.nn.softplus(-mid_in)
# Use trick from PixelCNN++ implementation to protect against edge/overflow cases
# In extreme cases (cdfplus_safe - cdf_minus_safe < 1e-5), use the
# log_pdf_mid and assume that density is 1 pixel width wide (1/127.5) as
# the density: log(pdf * 1/127.5) = log(pdf) - log(127.5)
# Add on line of best fit (see notebooks/blog post) to the difference between
# edge case and the standard case
edge_case = log_pdf_mid - np.log(127.5) + 2.04 * x_decoded_invs - 0.107
# ln (sigmoid(x)) = x - ln(e^x + 1) = x - softplus(x)
# ln (1 - sigmoid(x)) = ln(1 / (1 + e^x)) = -softplus(x)
log_cdfplus = cdfplus_arg - K.tf.nn.softplus(cdfplus_arg)
log_1minus_cdf = -K.tf.nn.softplus(cdfminus_arg)
log_ll = K.tf.where(x <= -0.999, log_cdfplus,
K.tf.where(x >= 0.999, log_1minus_cdf,
K.tf.where(cdfplus_safe - cdfminus_safe > 1e-5,
K.log(K.maximum(cdfplus_safe - cdfminus_safe, 1e-12)),
edge_case)))
# x_weights * [sigma(x+0.5...) - sigma(x-0.5 ...) ]
# = log x_weights + log (...)
# Compute log(softmax(.)) directly here, instead of doing 2-step to avoid overflow
pre_result = logsoftmax(x_logit_weights) + log_ll
result = []
for chn in range(img_chns):
chn_result = pre_result[:, :, :, chn*n_components:(chn+1)*n_components]
v = K.logsumexp(chn_result, axis=-1)
result.append(v)
result = K.batch_flatten(K.stack(result, axis=-1))
return -K.sum(result, axis=-1)
def sigmoid(x):
# Protect overflow
if x < -20:
return 0.0
elif x > 20:
return 1.0
return 1 / (1 + math.exp(-x))
def logistic_cdf(x, loc, scale):
return sigmoid((x - loc) / scale)
def compute_pvals(m, invs):
pvals = []
for i in range(256):
if i == 0:
pval = logistic_cdf((0.5 - 127.5) / 127.5, loc=m, scale=1. / np.exp(invs))
elif i == 255:
pval = 1. - logistic_cdf((254.5 - 127.5) / 127.5, loc=m, scale=1. / np.exp(invs))
else:
pval = (logistic_cdf((i + 0.5 - 127.5) / 127.5, loc=m, scale=1. / np.exp(invs))
- logistic_cdf((i - 0.5 - 127.5) / 127.5, loc=m, scale=1. / np.exp(invs)))
pvals.append(pval)
return pvals
def compute_mixture(ms, invs, weights, n_comps):
components = []
for i in range(n_comps):
pvals = compute_pvals(ms[i], invs[i])
arr = np.array(pvals)
components.append(weights[i] * arr)
return np.sum(components, axis=0)
class PixelConv2D(Conv2D):
def __init__(self, ptype, *args, **kwargs):
# ptype corresponds to pixel type and mask type, e.g. ra, ga, ba, rb, gb, bb
assert ptype[0] in ['r', 'g', 'b'], ptype
assert ptype[1] in ['a', 'b'], ptype
self.ptype = ptype
super(PixelConv2D, self).__init__(*args, **kwargs)
def build_mask(self, kernel_shape):
# kernel_shape = kern_dim x kern_dim x total_filters
# = kern_dim x kern_dim x r_g_b_filters x filters_per_channel
assert kernel_shape[0] == kernel_shape[1], \
"{} must be equal in first two dims".format(kernel_shape)
assert kernel_shape[0] % 2 == 1, \
"{} must be odd size in first two dims".format(kernel_shape)
assert kernel_shape[2] % 3 == 0, \
"{} must be divisible by 3".format(kernel_shape)
data = np.ones(kernel_shape)
data.shape
mid = data.shape[0] // 2
if self.ptype[0] == 'r':
filt_prev = 0
filt_thres = int(data.shape[2] / 3)
elif self.ptype[0] == 'g':
filt_prev = int(data.shape[2] / 3)
filt_thres = int(2 * data.shape[2] / 3)
else:
assert self.ptype[0] == 'b', self.ptype
filt_prev = int(2 * data.shape[2] / 3)
filt_thres = data.shape[2]
for k1 in range(data.shape[0]):
for k2 in range(data.shape[1]):
for chan in range(data.shape[2]):
if (self.ptype[1] == 'a'
and filt_prev <= chan < filt_thres
and k1 == mid and k2 == mid):
# Handle the only difference between 'a' and 'b' ptypes
data[k1, k2, chan, :] = 0
elif k1 > mid or (k1 >= mid and k2 > mid) or chan >= filt_thres:
# Turn off anything:
# a) Below currrent pixel
# b) Past the current pixel (scanning left from right, up to down)
# c) In a later filter
data[k1, k2, chan, :] = 0
return K.constant(np.ravel(data), dtype='float32', shape=kernel_shape)
def build(self, input_shape):
if self.data_format == 'channels_first':
channel_axis = 1
else:
channel_axis = -1
if input_shape[channel_axis] is None:
raise ValueError('The channel dimension of the inputs '
'should be defined. Found `None`.')
input_dim = input_shape[channel_axis]
kernel_shape = self.kernel_size + (input_dim, self.filters)
self.kernel_mask = self.build_mask(kernel_shape)
self.kernel = self.add_weight(shape=kernel_shape,
initializer=self.kernel_initializer,
name='kernel',
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
if self.use_bias:
self.bias = self.add_weight(shape=(self.filters,),
initializer=self.bias_initializer,
name='bias',
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
else:
self.bias = None
# Set input spec.
self.input_spec = InputSpec(ndim=self.rank + 2,
axes={channel_axis: input_dim})
self.built = True
def call(self, inputs):
masked_kernel = self.kernel * self.kernel_mask
outputs = K.conv2d(
inputs,
masked_kernel,
strides=self.strides,
padding=self.padding,
data_format=self.data_format,
dilation_rate=self.dilation_rate)
if self.use_bias:
outputs = K.bias_add(
outputs,
self.bias,
data_format=self.data_format)
if self.activation is not None:
return self.activation(outputs)
return outputs
def conv_block(input_tensor, filters, kernel_size, name, is_first=False):
outs = []
for t in ['rb', 'gb', 'bb']:
if is_first:
t = t[0] + 'a'
x = PixelConv2D(t, filters, kernel_size,
name='res' + name + t, padding='same')(input_tensor)
x = Activation('relu')(x)
outs.append(x)
return Concatenate()(outs)
def resnet_block(input_tensor, filters, stage, block, kernel=3):
name_base = str(stage) + block + '_branch'
filters1, filters2, filters3 = filters
x = input_tensor
x = conv_block(x, filters1, (1, 1), name=name_base + '_a-1x1')
x = conv_block(x, filters2, (kernel, kernel),
name=name_base + '_b-{}x{}'.format(kernel, kernel))
x = conv_block(x, filters3, (1, 1), name=name_base + '_c-1x1')
x = Add()([x, input_tensor])
return x
def final_block(input_tensor, filters, in_filters, name, kernel_size=(1, 1)):
outs = []
for t in ['rb', 'gb', 'bb']:
x = PixelConv2D(t, filters, kernel_size,
name='final' + name + '_' + t,
padding='same')(input_tensor)
x = Activation('relu')(x)
outs.append(x)
return Concatenate()(outs)
|
terrascript/data/logicmonitor.py | hugovk/python-terrascript | 507 | 39260 | # terrascript/data/logicmonitor.py
import terrascript
class logicmonitor_collectors(terrascript.Data):
pass
class logicmonitor_dashboard(terrascript.Data):
pass
class logicmonitor_dashboard_group(terrascript.Data):
pass
class logicmonitor_device_group(terrascript.Data):
pass
__all__ = [
"logicmonitor_collectors",
"logicmonitor_dashboard",
"logicmonitor_dashboard_group",
"logicmonitor_device_group",
]
|
api/v2/serializers/fields/identity.py | simpsonw/atmosphere | 197 | 39292 | <reponame>simpsonw/atmosphere
from rest_framework import exceptions, serializers
from api.v2.serializers.summaries import IdentitySummarySerializer
from core.models import Identity
class IdentityRelatedField(serializers.RelatedField):
def get_queryset(self):
return Identity.objects.all()
def to_representation(self, identity):
serializer = IdentitySummarySerializer(identity, context=self.context)
return serializer.data
def to_internal_value(self, data):
queryset = self.get_queryset()
if isinstance(data, dict):
identity = data.get("id", None)
else:
identity = data
try:
return queryset.get(id=identity)
except:
raise exceptions.ValidationError(
"Identity with id '%s' does not exist." % identity
)
|
opps/core/tags/views.py | jeanmask/opps | 159 | 39311 | <reponame>jeanmask/opps
# -*- encoding: utf-8 -*-
from django.utils import timezone
from django.contrib.sites.models import get_current_site
from django.conf import settings
from haystack.query import SearchQuerySet
from opps.views.generic.list import ListView
from opps.containers.models import Container
from opps.channels.models import Channel
from .models import Tag
USE_HAYSTACK = getattr(settings, 'OPPS_TAGS_USE_HAYSTACK', False)
class TagList(ListView):
model = Container
def get_template_list(self, domain_folder="containers"):
templates = []
list_name = 'list_tags'
if self.request.GET.get('page') and\
self.__class__.__name__ not in settings.OPPS_PAGINATE_NOT_APP:
templates.append('{0}/{1}_paginated.html'.format(domain_folder,
list_name))
templates.append('{0}/{1}.html'.format(domain_folder, list_name))
return templates
def get_context_data(self, **kwargs):
context = super(TagList, self).get_context_data(**kwargs)
context['tag'] = self.kwargs['tag']
site = get_current_site(self.request)
context['channel'] = Channel.objects.get_homepage(site)
return context
def get_queryset(self):
self.site = get_current_site(self.request)
# without the long_slug, the queryset will cause an error
self.long_slug = 'tags'
self.tag = self.kwargs['tag']
if USE_HAYSTACK:
return self.get_queryset_from_haystack()
return self.get_queryset_from_db()
def get_queryset_from_haystack(self):
models = Container.get_children_models()
sqs = SearchQuerySet().models(*models).filter(
tags=self.tag).order_by('-date_available')
sqs.model = Container
return sqs
def get_queryset_from_db(self):
tags = Tag.objects.filter(slug=self.tag).values_list('name') or []
tags_names = []
if tags:
tags_names = [i[0] for i in tags]
ids = []
for tag in tags_names:
result = self.containers = self.model.objects.filter(
site_domain=self.site,
tags__contains=tag,
date_available__lte=timezone.now(),
published=True
)
if result.exists():
ids.extend([i.id for i in result])
# remove the repeated
ids = list(set(ids))
# grab the containers
self.containers = self.model.objects.filter(id__in=ids)
return self.containers
|
aiida/cmdline/groups/__init__.py | aiidateam/aiida_core | 153 | 39332 | <filename>aiida/cmdline/groups/__init__.py
# -*- coding: utf-8 -*-
"""Module with custom implementations of :class:`click.Group`."""
# AUTO-GENERATED
# yapf: disable
# pylint: disable=wildcard-import
from .dynamic import *
from .verdi import *
__all__ = (
'DynamicEntryPointCommandGroup',
'VerdiCommandGroup',
)
# yapf: enable
|
src/storage-preview/azext_storage_preview/tests/latest/test_storage_file_scenarios.py | haroonf/azure-cli-extensions | 207 | 39336 | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import os
from azure.cli.testsdk import (ResourceGroupPreparer, StorageAccountPreparer, JMESPathCheck, ScenarioTest)
from ..storage_test_util import StorageScenarioMixin
class StorageFileShareScenarios(StorageScenarioMixin, ScenarioTest):
@ResourceGroupPreparer()
@StorageAccountPreparer()
def test_storage_file_upload_small_file_v2(self, resource_group, storage_account_info):
account_info = storage_account_info
share_name = self.create_share(account_info)
curr_dir = os.path.dirname(os.path.realpath(__file__))
local_file = os.path.join(curr_dir, 'upload_file').replace('\\', '\\\\')
local_file_name = 'upload_file'
self.storage_cmd('storage file upload -s {} --source "{}" '
'--content-cache-control no-cache '
'--content-disposition attachment '
'--content-encoding compress '
'--content-language en-US '
'--content-type "multipart/form-data;" '
'--metadata key=val ', account_info, share_name, local_file)
self.storage_cmd('storage file show -s {} -p "{}"', account_info, share_name, local_file_name) \
.assert_with_checks(JMESPathCheck('name', local_file_name),
JMESPathCheck('properties.contentSettings.cacheControl', 'no-cache'),
JMESPathCheck('properties.contentSettings.contentDisposition', 'attachment'),
JMESPathCheck('properties.contentSettings.contentEncoding', 'compress'),
JMESPathCheck('properties.contentSettings.contentLanguage', 'en-US'),
JMESPathCheck('properties.contentSettings.contentType', 'multipart/form-data;'),
JMESPathCheck('metadata', {'key': 'val'}))
dest_dir = 'dest_dir'
from azure.core.exceptions import ResourceNotFoundError
with self.assertRaises(ResourceNotFoundError):
self.storage_cmd('storage file upload -s {} --source "{}" -p {}',
account_info, share_name, local_file, dest_dir)
self.storage_cmd('storage directory create -s {} -n {}', account_info, share_name, dest_dir)
self.storage_cmd('storage file upload -s {} --source "{}" -p {}',
account_info, share_name, local_file, dest_dir)
self.storage_cmd('storage file show -s {} -p "{}"', account_info, share_name, dest_dir + '/' + local_file_name) \
.assert_with_checks(JMESPathCheck('name', local_file_name))
dest_file = 'dest_file.json'
self.storage_cmd('storage file upload -s {} --source "{}" -p {}',
account_info, share_name, local_file, dest_file)
self.storage_cmd('storage file show -s {} -p "{}"', account_info, share_name, dest_file) \
.assert_with_checks(JMESPathCheck('name', dest_file))
dest_path = dest_dir + '/' + dest_file
self.storage_cmd('storage file upload -s {} --source "{}" -p {}',
account_info, share_name, local_file, dest_path)
self.storage_cmd('storage file show -s {} -p "{}"', account_info, share_name, dest_path) \
.assert_with_checks(JMESPathCheck('name', dest_file))
sub_deep_path = dest_dir + '/' + 'sub_dir'
self.storage_cmd('storage directory create -s {} -n {}', account_info, share_name, sub_deep_path)
self.storage_cmd('storage file upload -s {} --source "{}" -p {}',
account_info, share_name, local_file, sub_deep_path)
self.storage_cmd('storage file show -s {} -p "{}"', account_info, share_name,
sub_deep_path + '/' + local_file_name). \
assert_with_checks(JMESPathCheck('name', local_file_name))
sub_deep_file = sub_deep_path + '/' + dest_file
self.storage_cmd('storage file upload -s {} --source "{}" -p {}',
account_info, share_name, local_file, sub_deep_file)
self.storage_cmd('storage file show -s {} -p "{}"', account_info, share_name,
sub_deep_file).assert_with_checks(JMESPathCheck('name', dest_file))
|
plugins/lucid/ui/explorer.py | gaasedelen/lucid | 342 | 39359 | import ctypes
import ida_ida
import ida_funcs
import ida_graph
import ida_idaapi
import ida_kernwin
import ida_hexrays
from PyQt5 import QtWidgets, QtGui, QtCore, sip
from lucid.ui.sync import MicroCursorHighlight
from lucid.ui.subtree import MicroSubtreeView
from lucid.util.python import register_callback, notify_callback
from lucid.util.hexrays import get_microcode, get_mmat, get_mmat_name, get_mmat_levels
from lucid.microtext import MicrocodeText, MicroInstructionToken, MicroOperandToken, AddressToken, BlockNumberToken, translate_mtext_position, remap_mtext_position
#------------------------------------------------------------------------------
# Microcode Explorer
#------------------------------------------------------------------------------
#
# The Microcode Explorer UI is mostly implemented following a standard
# Model-View-Controller pattern. This is a little abnormal for Qt, but
# I've come to appreciate it more for its portability and testability.
#
class MicrocodeExplorer(object):
"""
The controller component of the microcode explorer.
The role of the controller is to handle user gestures, map user actions to
model updates, and change views based on controls. In theory, the
controller should be able to drive the 'view' headlessly or simulate user
UI interaction.
"""
def __init__(self):
self.model = MicrocodeExplorerModel()
self.view = MicrocodeExplorerView(self, self.model)
self.view._code_sync.enable_sync(True) # XXX/HACK
def show(self, address=None):
"""
Show the microcode explorer.
"""
if address is None:
address = ida_kernwin.get_screen_ea()
self.select_function(address)
self.view.show()
def show_subtree(self, insn_token):
"""
Show the sub-instruction graph for the given instruction token.
"""
graph = MicroSubtreeView(insn_token.insn)
graph.show()
# TODO/HACK: this is dumb, but moving it breaks my centering code so
# i'll figure it out later...
gv = ida_graph.get_graph_viewer(graph.GetWidget())
ida_graph.viewer_set_titlebar_height(gv, 15)
#-------------------------------------------------------------------------
# View Toggles
#-------------------------------------------------------------------------
def set_highlight_mutual(self, status):
"""
Toggle the highlighting of lines containing the same active address.
"""
if status:
self.view._code_sync.hook()
else:
self.view._code_sync.unhook()
ida_kernwin.refresh_idaview_anyway()
def set_verbose(self, status):
"""
Toggle the verbosity of the printed microcode text.
"""
self.model.verbose = status
ida_kernwin.refresh_idaview_anyway()
#-------------------------------------------------------------------------
# View Controls
#-------------------------------------------------------------------------
def select_function(self, address):
"""
Switch the microcode view to the specified function.
"""
func = ida_funcs.get_func(address)
if not func:
return False
for maturity in get_mmat_levels():
mba = get_microcode(func, maturity)
mtext = MicrocodeText(mba, self.model.verbose)
self.model.update_mtext(mtext, maturity)
self.view.refresh()
ida_kernwin.refresh_idaview_anyway()
return True
def select_maturity(self, maturity_name):
"""
Switch the microcode view to the specified maturity level.
"""
self.model.active_maturity = get_mmat(maturity_name)
#self.view.refresh()
def select_address(self, address):
"""
Select a token in the microcode view matching the given address.
"""
tokens = self.model.mtext.get_tokens_for_address(address)
if not tokens:
return None
token_line_num, token_x = self.model.mtext.get_pos_of_token(tokens[0])
rel_y = self.model.current_position[2]
if self.model.current_position[2] == 0:
rel_y = 30
self.model.current_position = (token_line_num, token_x, rel_y)
return tokens[0]
def select_position(self, line_num, x, y):
"""
Select the given text position in the microcode view.
"""
self.model.current_position = (line_num, x, y)
#print(" - hovered token: %s" % self.model.current_token.text)
#print(" - hovered taddr: 0x%08X" % self.model.current_token.address)
#print(" - hovered laddr: 0x%08X" % self.model.current_address)
def activate_position(self, line_num, x, y):
"""
Activate (eg. double click) the given text position in the microcode view.
"""
token = self.model.mtext.get_token_at_position(line_num, x)
if isinstance(token, AddressToken):
ida_kernwin.jumpto(token.target_address, -1, 0)
return
if isinstance(token, BlockNumberToken) or (isinstance(token, MicroOperandToken) and token.mop.t == ida_hexrays.mop_b):
blk_idx = token.blk_idx if isinstance(token, BlockNumberToken) else token.mop.b
blk_token = self.model.mtext.blks[blk_idx]
blk_line_num, _ = self.model.mtext.get_pos_of_token(blk_token.lines[0])
self.model.current_position = (blk_line_num, 0, y)
self.view._code_view.Jump(*self.model.current_position)
return
class MicrocodeExplorerModel(object):
"""
The model component of the microcode explorer.
The role of the model is to encapsulate application state, respond to
state queries, and notify views of changes. Ideally, the model could be
serialized / unserialized to save and restore state.
"""
def __init__(self):
#
# 'mtext' is short for MicrocodeText objects (see microtext.py)
#
# this dictionary will contain a mtext object (the renderable text
# mapping of a given hexrays mba_t) for each microcode maturity level
# of the current function.
#
# at any given time, one mtext will be 'active' in the model, and
# therefore visible in the UI/Views
#
self._mtext = {x: None for x in get_mmat_levels()}
#
# there is a 'cursor' (ViewCursor) for each microcode maturity level /
# mtext object. cursors don't actually contain the 'position' in the
# rendered text (line_num, x), but also information to position the
# cursor within the line view (y)
#
self._view_cursors = {x: None for x in get_mmat_levels()}
#
# the currently active / selected maturity level of the model. this
# determines which mtext is currently visible / active in the
# microcode view, and which cursor will be used
#
self._active_maturity = ida_hexrays.MMAT_GENERATED
# this flag tracks the verbosity toggle state
self._verbose = False
#----------------------------------------------------------------------
# Callbacks
#----------------------------------------------------------------------
self._mtext_refreshed_callbacks = []
self._position_changed_callbacks = []
self._maturity_changed_callbacks = []
#-------------------------------------------------------------------------
# Read-Only Properties
#-------------------------------------------------------------------------
@property
def mtext(self):
"""
Return the microcode text mapping for the current maturity level.
"""
return self._mtext[self._active_maturity]
@property
def current_line(self):
"""
Return the line token at the current viewport cursor position.
"""
if not self.mtext:
return None
line_num, _, _ = self.current_position
return self.mtext.lines[line_num]
@property
def current_function(self):
"""
Return the current function address.
"""
if not self.mtext:
return ida_idaapi.BADADDR
return self.mtext.mba.entry_ea
@property
def current_token(self):
"""
Return the token at the current viewport cursor position.
"""
return self.mtext.get_token_at_position(*self.current_position[:2])
@property
def current_address(self):
"""
Return the address at the current viewport cursor position.
"""
return self.mtext.get_address_at_position(*self.current_position[:2])
@property
def current_cursor(self):
"""
Return the current viewport cursor.
"""
return self._view_cursors[self._active_maturity]
#-------------------------------------------------------------------------
# Mutable Properties
#-------------------------------------------------------------------------
@property
def current_position(self):
"""
Return the current viewport cursor position (line_num, view_x, view_y).
"""
return self.current_cursor.viewport_position
@current_position.setter
def current_position(self, value):
"""
Set the cursor position of the viewport.
"""
self._gen_cursors(value, self.active_maturity)
self._notify_position_changed()
@property
def verbose(self):
"""
Return the microcode verbosity status of the viewport.
"""
return self._verbose
@verbose.setter
def verbose(self, value):
"""
Set the verbosity of the microcode displayed by the viewport.
"""
if self._verbose == value:
return
# update the active verbosity setting
self._verbose = value
# verbosity must have changed, so force a mtext refresh
self.refresh_mtext()
@property
def active_maturity(self):
"""
Return the active microcode maturity level.
"""
return self._active_maturity
@active_maturity.setter
def active_maturity(self, new_maturity):
"""
Set the active microcode maturity level.
"""
self._active_maturity = new_maturity
self._notify_maturity_changed()
#----------------------------------------------------------------------
# Misc
#----------------------------------------------------------------------
def update_mtext(self, mtext, maturity):
"""
Set the mtext for a given microcode maturity level.
"""
self._mtext[maturity] = mtext
self._view_cursors[maturity] = ViewCursor(0, 0, 0)
def refresh_mtext(self):
"""
Regenerate the rendered text for all microcode maturity levels.
TODO: This is a bit sloppy, and is basically only used for the
verbosity toggle.
"""
for maturity, mtext in self._mtext.items():
if maturity == self.active_maturity:
new_mtext = MicrocodeText(mtext.mba, self.verbose)
self._mtext[maturity] = new_mtext
self.current_position = translate_mtext_position(self.current_position, mtext, new_mtext)
continue
mtext.refresh(self.verbose)
self._notify_mtext_refreshed()
def _gen_cursors(self, position, mmat_src):
"""
Generate the cursors for all levels from a source position and maturity.
"""
mmat_levels = get_mmat_levels()
mmat_first, mmat_final = mmat_levels[0], mmat_levels[-1]
# clear out all the existing cursor mappings
self._view_cursors = {x: None for x in mmat_levels}
# save the starting cursor
line_num, x, y = position
self._view_cursors[mmat_src] = ViewCursor(line_num, x, y, True)
# map the cursor backwards from the source maturity
mmat_lower = range(mmat_first, mmat_src)[::-1]
current_maturity = mmat_src
for next_maturity in mmat_lower:
self._transfer_cursor(current_maturity, next_maturity)
current_maturity = next_maturity
# map the cursor forward from the source maturity
mmat_higher = range(mmat_src+1, mmat_final + 1)
current_maturity = mmat_src
for next_maturity in mmat_higher:
self._transfer_cursor(current_maturity, next_maturity)
current_maturity = next_maturity
def _transfer_cursor(self, mmat_src, mmat_dst):
"""
Translate the cursor position from one maturity to the next.
"""
position = self._view_cursors[mmat_src].viewport_position
mapped = self._view_cursors[mmat_src].mapped
# attempt to translate the position in one mtext to another
projection = translate_mtext_position(position, self._mtext[mmat_src], self._mtext[mmat_dst])
# if translation failed, we will generate an approximate cursor
if not projection:
mapped = False
projection = remap_mtext_position(position, self._mtext[mmat_src], self._mtext[mmat_dst])
# save the generated cursor
line_num, x, y = projection
self._view_cursors[mmat_dst] = ViewCursor(line_num, x, y, mapped)
#----------------------------------------------------------------------
# Callbacks
#----------------------------------------------------------------------
def mtext_refreshed(self, callback):
"""
Subscribe a callback for mtext refresh events.
"""
register_callback(self._mtext_refreshed_callbacks, callback)
def _notify_mtext_refreshed(self):
"""
Notify listeners of a mtext refresh event.
"""
notify_callback(self._mtext_refreshed_callbacks)
def position_changed(self, callback):
"""
Subscribe a callback for cursor position changed events.
"""
register_callback(self._position_changed_callbacks, callback)
def _notify_position_changed(self):
"""
Notify listeners of a cursor position changed event.
"""
notify_callback(self._position_changed_callbacks)
def maturity_changed(self, callback):
"""
Subscribe a callback for maturity changed events.
"""
register_callback(self._maturity_changed_callbacks, callback)
def _notify_maturity_changed(self):
"""
Notify listeners of a maturity changed event.
"""
notify_callback(self._maturity_changed_callbacks)
#-----------------------------------------------------------------------------
# UI Components
#-----------------------------------------------------------------------------
class MicrocodeExplorerView(QtWidgets.QWidget):
"""
The view component of the Microcode Explorer.
"""
WINDOW_TITLE = "Microcode Explorer"
def __init__(self, controller, model):
super(MicrocodeExplorerView, self).__init__()
self.visible = False
# the backing model, and controller for this view (eg, mvc pattern)
self.model = model
self.controller = controller
# initialize the plugin UI
self._ui_init()
self._ui_init_signals()
#--------------------------------------------------------------------------
# Pseudo Widget Functions
#--------------------------------------------------------------------------
def show(self):
self.refresh()
# show the dockable widget
flags = ida_kernwin.PluginForm.WOPN_DP_RIGHT | 0x200 # WOPN_SZHINT
ida_kernwin.display_widget(self._twidget, flags)
ida_kernwin.set_dock_pos(self.WINDOW_TITLE, "IDATopLevelDockArea", ida_kernwin.DP_RIGHT)
self._code_sync.hook()
def _cleanup(self):
self.visible = False
self._twidget = None
self.widget = None
self._code_sync.unhook()
self._ui_hooks.unhook()
# TODO cleanup controller / model
#--------------------------------------------------------------------------
# Initialization - UI
#--------------------------------------------------------------------------
def _ui_init(self):
"""
Initialize UI elements.
"""
self._ui_init_widget()
# initialize our ui elements
self._ui_init_list()
self._ui_init_code()
self._ui_init_settings()
# layout the populated ui just before showing it
self._ui_layout()
def _ui_init_widget(self):
"""
Initialize an IDA widget for this UI control.
"""
# create a dockable widget, and save a reference to it for later use
self._twidget = ida_kernwin.create_empty_widget(self.WINDOW_TITLE)
# cast the IDA 'twidget' to a less opaque QWidget object
self.widget = ida_kernwin.PluginForm.TWidgetToPyQtWidget(self._twidget)
# hooks to help track the container/widget lifetime
class ExplorerUIHooks(ida_kernwin.UI_Hooks):
def widget_invisible(_, twidget):
if twidget == self._twidget:
self.visible = False
self._cleanup()
def widget_visible(_, twidget):
if twidget == self._twidget:
self.visible = True
# install the widget lifetime hooks
self._ui_hooks = ExplorerUIHooks()
self._ui_hooks.hook()
def _ui_init_list(self):
"""
Initialize the microcode maturity list.
"""
self._maturity_list = LayerListWidget()
def _ui_init_code(self):
"""
Initialize the microcode view(s).
"""
self._code_view = MicrocodeView(self.model)
self._code_sync = MicroCursorHighlight(self.controller, self.model)
self._code_sync.track_view(self._code_view.widget)
def _ui_init_settings(self):
"""
Initialize the explorer settings groupbox.
"""
self._checkbox_cursor = QtWidgets.QCheckBox("Highlight mutual")
self._checkbox_cursor.setCheckState(QtCore.Qt.Checked)
self._checkbox_verbose = QtWidgets.QCheckBox("Show use/def")
self._checkbox_sync = QtWidgets.QCheckBox("Sync hexrays")
self._checkbox_sync.setCheckState(QtCore.Qt.Checked)
self._groupbox_settings = QtWidgets.QGroupBox("Settings")
layout = QtWidgets.QVBoxLayout()
layout.addWidget(self._checkbox_cursor)
layout.addWidget(self._checkbox_verbose)
layout.addWidget(self._checkbox_sync)
self._groupbox_settings.setLayout(layout)
def _ui_layout(self):
"""
Layout the major UI elements of the widget.
"""
layout = QtWidgets.QGridLayout()
# arrange the widgets in a 'grid' row col row span col span
layout.addWidget(self._code_view.widget, 0, 0, 0, 1)
layout.addWidget(self._maturity_list, 0, 1, 1, 1)
layout.addWidget(self._groupbox_settings, 1, 1, 1, 1)
# apply the layout to the widget
self.widget.setLayout(layout)
def _ui_init_signals(self):
"""
Connect UI signals.
"""
self._maturity_list.currentItemChanged.connect(lambda x, y: self.controller.select_maturity(x.text()))
self._code_view.connect_signals(self.controller)
self._code_view.OnClose = self.hide # HACK
# checkboxes
self._checkbox_cursor.stateChanged.connect(lambda x: self.controller.set_highlight_mutual(bool(x)))
self._checkbox_verbose.stateChanged.connect(lambda x: self.controller.set_verbose(bool(x)))
self._checkbox_sync.stateChanged.connect(lambda x: self._code_sync.enable_sync(bool(x)))
# model signals
self.model.mtext_refreshed(self.refresh)
self.model.maturity_changed(self.refresh)
#--------------------------------------------------------------------------
# Misc
#--------------------------------------------------------------------------
def refresh(self):
"""
Refresh the microcode explorer UI based on the model state.
"""
self._maturity_list.setCurrentRow(self.model.active_maturity - 1)
self._code_view.refresh()
class LayerListWidget(QtWidgets.QListWidget):
"""
The microcode maturity list widget
"""
def __init__(self):
super(LayerListWidget, self).__init__()
# populate the list widget with the microcode maturity levels
self.addItems([get_mmat_name(x) for x in get_mmat_levels()])
# select the first maturity level, by default
self.setCurrentRow(0)
# make the list widget a fixed size, slightly wider than it needs to be
width = self.sizeHintForColumn(0)
self.setMaximumWidth(int(width + width * 0.10))
def wheelEvent(self, event):
"""
Handle mouse wheel scroll events.
"""
y = event.angleDelta().y()
# scrolling down, clamp to last row
if y < 0:
next_row = min(self.currentRow()+1, self.count()-1)
# scrolling up, clamp to first row (0)
elif y > 0:
next_row = max(self.currentRow()-1, 0)
# horizontal scroll ? nothing to do..
else:
return
self.setCurrentRow(next_row)
class MicrocodeView(ida_kernwin.simplecustviewer_t):
"""
An IDA-based text area that will render the Hex-Rays microcode.
TODO: I'll probably rip this out in the future, as I'll have finer
control over the interaction / implementation if I just roll my own
microcode text widget.
For that reason, excuse its hacky-ness / lack of comments.
"""
def __init__(self, model):
super(MicrocodeView, self).__init__()
self.model = model
self.Create()
def connect_signals(self, controller):
self.controller = controller
self.OnCursorPosChanged = lambda: controller.select_position(*self.GetPos())
self.OnDblClick = lambda _: controller.activate_position(*self.GetPos())
self.model.position_changed(self.refresh_cursor)
def refresh(self):
self.ClearLines()
for line in self.model.mtext.lines:
self.AddLine(line.tagged_text)
self.refresh_cursor()
def refresh_cursor(self):
if not self.model.current_position:
return
self.Jump(*self.model.current_position)
def Create(self):
if not super(MicrocodeView, self).Create(None):
return False
self._twidget = self.GetWidget()
self.widget = ida_kernwin.PluginForm.TWidgetToPyQtWidget(self._twidget)
return True
def OnClose(self):
pass
def OnCursorPosChanged(self):
pass
def OnDblClick(self, shift):
pass
def OnPopup(self, form, popup_handle):
controller = self.controller
#
# so, i'm pretty picky about my UI / interactions. IDA puts items in
# the right click context menus of custom (code) viewers.
#
# these items aren't really relevant (imo) to the microcode viewer,
# so I do some dirty stuff here to filter them out and ensure only
# my items will appear in the context menu.
#
# there's only one right click context item right now, but in the
# future i'm sure there will be more.
#
class FilterMenu(QtCore.QObject):
def __init__(self, qmenu):
super(QtCore.QObject, self).__init__()
self.qmenu = qmenu
def eventFilter(self, obj, event):
if event.type() != QtCore.QEvent.Polish:
return False
for action in self.qmenu.actions():
if action.text() in ["&Font...", "&Synchronize with"]: # lol..
qmenu.removeAction(action)
self.qmenu.removeEventFilter(self)
self.qmenu = None
return True
p_qmenu = ctypes.cast(int(popup_handle), ctypes.POINTER(ctypes.c_void_p))[0]
qmenu = sip.wrapinstance(int(p_qmenu), QtWidgets.QMenu)
self.filter = FilterMenu(qmenu)
qmenu.installEventFilter(self.filter)
# only handle right clicks on lines containing micro instructions
ins_token = self.model.mtext.get_ins_for_line(self.model.current_line)
if not ins_token:
return False
class MyHandler(ida_kernwin.action_handler_t):
def activate(self, ctx):
controller.show_subtree(ins_token)
def update(self, ctx):
return ida_kernwin.AST_ENABLE_ALWAYS
# inject the 'View subtree' action into the right click context menu
desc = ida_kernwin.action_desc_t(None, 'View subtree', MyHandler())
ida_kernwin.attach_dynamic_action_to_popup(form, popup_handle, desc, None)
return True
#-----------------------------------------------------------------------------
# Util
#-----------------------------------------------------------------------------
class ViewCursor(object):
"""
TODO
"""
def __init__(self, line_num, x, y, mapped=True):
self.line_num = line_num
self.x = x
self.y = y
self.mapped = mapped
@property
def text_position(self):
return (self.line_num, self.x)
@property
def viewport_position(self):
return (self.line_num, self.x, self.y)
|
third_party/libtcod/.ci/conan_build.py | csb6/libtcod-ada | 686 | 39363 | <reponame>csb6/libtcod-ada<filename>third_party/libtcod/.ci/conan_build.py<gh_stars>100-1000
#!/usr/bin/env python3
"""Build script for conan-package-tools:
https://github.com/conan-io/conan-package-tools
"""
import os
import subprocess
from cpt.packager import ConanMultiPackager
try:
version = subprocess.check_output(
["git", "describe", "--abbrev=0"], universal_newlines=True
)
except subprocess.CalledProcessError:
version = "0.0"
if __name__ == "__main__":
if "CI" in os.environ:
os.environ["CONAN_SYSREQUIRES_MODE"] = "enabled"
# Fix GitHub Actions version tag.
if os.environ.get("GITHUB_REF", "").startswith("refs/tags/"):
version = os.environ["GITHUB_REF"].replace("refs/tags/", "")
builder = ConanMultiPackager(
username="hexdecimal",
channel="conan",
upload="https://api.bintray.com/conan/hexdecimal/conan",
upload_only_when_tag=True,
reference="libtcod/" + version,
remotes=[
"https://conan.bintray.com",
"https://api.bintray.com/conan/bincrafters/public-conan",
],
cppstds=["14"],
visual_runtimes=["MD", "MDd"],
# test_folder="tests/",
build_policy="missing",
upload_dependencies="all",
)
builder.add_common_builds(pure_c=False)
builder.run()
|
scripts/mnpr_system.py | semontesdeoca/MNPR | 218 | 39390 | """
@license: MIT
@repository: https://github.com/semontesdeoca/MNPR
_
_ __ ___ _ __ _ __ _ __ ___ _ _ ___| |_ ___ _ __ ___
| '_ ` _ \| '_ \| '_ \| '__| / __| | | / __| __/ _ \ '_ ` _ \
| | | | | | | | | |_) | | \__ \ |_| \__ \ || __/ | | | | |
|_| |_| |_|_| |_| .__/|_| |___/\__, |___/\__\___|_| |_| |_|
|_| |___/
@summary: MNPR related functions
"""
from __future__ import print_function
import os
import traceback
import maya.cmds as cmds
import maya.mel as mel
import coopLib as lib
import mnpr_info
import mnpr_runner
import mnpr_matPresets
mnpr_info.loadPlugin()
dx2sfxAttr = {"xUseColorTexture": "Albedo_Texture",
"xColorTint": "Color_Tint",
"xUseNormalTexture": "Normal_Map",
"xFlipU": "Invert_U",
"xFlipV": "Invert_V",
"xBumpDepth": "Bump_Depth",
"xUseSpecularTexture": "Specular_Map",
"xSpecular": "Specular_Roll_Off",
"xSpecDiffusion": "Specular_Diffusion",
"xSpecTransparency": "Specular_Transparency",
"xUseShadows": "",
"xShadowDepthBias": "",
"xDiffuseFactor": "Diffuse_Factor",
"xShadeColor": "Shade_Color",
"xShadeWrap": "Shade_Wrap",
"xUseOverrideShade": "Shade_Override",
"xDilute": "Dilute_Paint",
"xCangiante": "Cangiante",
"xDiluteArea": "Dilute_Area",
"xHighArea": "Highlight_Roll_Off",
"xHighTransparency": "Highlight_Transparency",
"xAtmosphereColor": "",
"xRangeStart": "",
"xRangeEnd": "",
"xDarkEdges": "",
"xMainTex": "Albedo_Texture_File",
"xNormalTex": "Normal_Map_File",
"xSpecTex": "Specular_Map_File"
}
def check():
"""Makes sure everything is running right"""
print("SYSTEM CHECK FOR {0}".format(mnpr_info.prototype))
# check viewport
viewport = lib.getActiveModelPanel()
cmds.modelEditor(viewport, dtx=True, e=True) # display textures
# plugin needs to be loaded
mnpr_info.loadRenderer()
# 3rd party plugins must be loaded
cmds.loadPlugin('shaderFXPlugin', quiet=True)
if cmds.about(nt=True, q=True):
cmds.loadPlugin('dx11Shader', quiet=True) # deprecated (only shadeFXPlugin in the future)
cmds.loadPlugin('glslShader', quiet=True) # deprecated (only shaderFXPlugin in the future)
# viewport renderer must be set
mel.eval("setRendererAndOverrideInModelPanel vp2Renderer {0} {1};".format(mnpr_info.prototype, viewport))
# modify color of heads up display
cmds.displayColor("headsUpDisplayLabels", 2, dormant=True)
cmds.displayColor("headsUpDisplayValues", 2, dormant=True)
# make sure a config node exists
if not cmds.objExists(mnpr_info.configNode):
selected = cmds.ls(sl=True, l=True)
selectConfig()
cmds.select(selected, r=True)
lib.printInfo("-> SYSTEM CHECK SUCCESSFUL")
def changeStyle():
"""Resets MNPR to load a new style"""
# reset stylization
cmds.mnpr(resetStylization=True)
# delete old config node
if cmds.objExists(mnpr_info.configNode):
cmds.delete(mnpr_info.configNode)
# flush undo
cmds.flushUndo()
print("style deleted")
# deregister node
cmds.mnpr(rn=False)
# register node
cmds.mnpr(rn=True)
# create new config node
selectConfig()
# refresh AETemplate
mnpr_runner.reloadConfig()
# set new media type
mnpr_info.media = cmds.mnpr(style=True, q=True)
# rebuild opened UI's
import mnpr_UIs
if cmds.window(mnpr_UIs.BreakdownUI.windowTitle, exists=True):
mnpr_runner.openOverrideSettings(rebuild=True)
import mnpr_FX
if cmds.window(mnpr_FX.MNPR_FX_UI.windowTitle, exists=True):
mnpr_runner.openPaintFX(rebuild=True)
lib.printInfo("Style changed")
def togglePlugin(force=""):
"""
Toggles active or forces desired plugin prototype
Args:
force (str): plugin name to force
"""
if force:
unloadPlugin(mnpr_info.prototype)
mnpr_info.prototype = force
check()
else:
# toggle loaded prototype
if cmds.pluginInfo(mnpr_info.prototype, loaded=True, q=True):
unloadPlugin(mnpr_info.prototype)
else:
check()
def unloadPlugin(plugin):
"""
Unloads plugin and cleans scene from plugin traces
Args:
plugin (str): name of plugin to be unloaded
"""
# check which prototype is active
if cmds.pluginInfo(plugin, loaded=True, q=True):
# remove traces and unload
if cmds.objExists(mnpr_info.configNode):
cmds.delete(mnpr_info.configNode) # delete config node
cmds.flushUndo() # clear undo queue
cmds.unloadPlugin(plugin) # unload plugin
lib.printInfo("->PLUGIN SUCCESSFULLY UNLOADED")
def showShaderAttr():
""" Select material and show in attribute editor """
if cmds.ls(sl=True):
cmds.hyperShade(smn=True)
mel.eval("openAEWindow")
else:
cmds.warning("Select object with shader")
def refreshShaders():
""" Refreshes object-space plugin shaders """
shaderDir = systemDir("shaders")
if os.name == 'nt' and mnpr_info.backend == 'dx11':
shaderFile = os.path.join(shaderDir, "PrototypeC.fx")
if not os.path.isfile(shaderFile):
shaderFile = os.path.join(shaderDir, "prototypeC.fxo")
shaders = cmds.ls(type="dx11Shader")
else:
shaderFile = os.path.join(shaderDir, "PrototypeC.ogsfx")
shaders = cmds.ls(type="GLSLShader")
for shader in shaders:
cmds.setAttr("{0}.shader".format(shader), shaderFile, type="string")
lib.printInfo('Shaders refreshed')
return True
def updateShaderFX():
""" Updates shaderFX shaders"""
shaderDir = systemDir("shaders")
materials = cmds.ls(type="ShaderfxShader")
counter = 0
for mat in materials:
counter += 1
# get materials attributes
matAttrs = {}
mnpr_matPresets.getMaterialAttrs(mat, matAttrs)
# load new graph
shaderFile = os.path.join(shaderDir, "{0}.sfx".format(matAttrs["graph"]))
cmds.shaderfx(sfxnode=mat, loadGraph=shaderFile)
# set attributes
mnpr_matPresets.setMaterialAttrs(mat, matAttrs)
print("{0} has been updated to the latest version".format(mat))
print("{0}/{1} materials updated".format(counter, len(materials)))
lib.printInfo('Shaders updated')
def dx112glsl():
""" Converts dx11 materials to glsl materials """
check()
dx11Shaders = cmds.ls(type="dx11Shader")
print(dx11Shaders)
for dx11Shader in dx11Shaders:
print("Transfering {0} shader".format(dx11Shader))
# get all attributes
attributes = cmds.listAttr(dx11Shader, ud=True, st="x*", k=True)
print(attributes)
# get all connected nodes
connectedNodes = cmds.listConnections(dx11Shader, t="file", c=True, p=True)
print(connectedNodes)
# get all shapes
cmds.select(dx11Shader, r=True)
cmds.hyperShade(objects="")
shapes = cmds.ls(sl=True)
print(shapes)
# create glsl shader
shader = cmds.shadingNode('GLSLShader', asShader=True, n="{0}_GL".format(dx11Shader))
cmds.select(shapes, r=True)
cmds.hyperShade(assign=shader)
print(">>> Shader {0} created".format(shader))
# assign attributes
shaderFile = os.path.join(mnpr_info.environment,"shaders","PrototypeC.ogsfx")
cmds.setAttr("{0}.shader".format(shader), shaderFile, type="string")
print("Setting attributes for {0}".format(shader))
for attr in attributes:
value = cmds.getAttr("{0}.{1}".format(dx11Shader, attr))
try:
if type(value) == type([]):
cmds.setAttr("{0}.{1}".format(shader, attr), value[0][0], value[0][1], value[0][2], typ="double3")
else:
cmds.setAttr("{0}.{1}".format(shader, attr), value)
except:
print("Found problemt when setting {0}.{1}, skipping for now".format(shader, attr))
# connect nodes
if connectedNodes:
for i in range(0, len(connectedNodes), 2):
inputAttr = connectedNodes[i].split(".")[1]
cmds.connectAttr(connectedNodes[i+1], "{0}.{1}".format(shader, inputAttr))
# set control sets
if cmds.attributeQuery("Color0_Source", node=shader, ex=True):
cmds.setAttr("{0}.Color0_Source".format(shader), "color:controlSetA", type="string" )
if cmds.attributeQuery("Color1_Source", node=shader, ex=True):
cmds.setAttr("{0}.Color1_Source".format(shader), "color:controlSetB", type="string" )
if cmds.attributeQuery("Color2_Source", node=shader, ex=True):
cmds.setAttr("{0}.Color2_Source".format(shader), "color:controlSetC", type="string" )
# delete dx11 shader
#cmds.delete(dx11Shader)
def dx112sfx(graph="mnpr_uber"):
"""
Converts dx11 materials to shaderFX materials
Args:
graph (str): ShaderFX graph name (filename)
"""
check()
dx11Shaders = cmds.ls(type="dx11Shader")
prototypeCNodes = []
for dx11Shader in dx11Shaders:
shaderPath = cmds.getAttr("{0}.shader".format(dx11Shader))
if "rototypeC" not in shaderPath:
continue
prototypeCNodes.append(dx11Shader)
print("Converting {0} shader".format(dx11Shader))
# get all attributes
attributes = cmds.listAttr(dx11Shader, ud=True, st="x*", k=True)
print(attributes)
# get all connected nodes
connectedNodes = cmds.listConnections(dx11Shader, t="file", c=True)
print(connectedNodes)
# get all shapes
cmds.select(dx11Shader, r=True)
cmds.hyperShade(objects="")
shapes = cmds.ls(sl=True)
print(shapes)
# create shaderFX shader
shader = cmds.shadingNode('ShaderfxShader', asShader=True, name="{0}".format(dx11Shader.replace("_WC", "_SFX")))
cmds.select(shapes, r=True)
cmds.hyperShade(assign=shader)
shaderFile = os.path.join(mnpr_info.environment, "shaders", "{0}.sfx".format(graph))
cmds.shaderfx(sfxnode=shader, loadGraph=shaderFile)
print(">>> Shader {0} created".format(shader))
# assign settings
vtxControl = bool(cmds.getAttr("{0}.{1}".format(dx11Shader, "xUseControl")))
if vtxControl:
nodeId = cmds.shaderfx(sfxnode=shader, getNodeIDByName="vtxControls")
cmds.shaderfx(sfxnode=shader, edit_bool=(nodeId, "value", vtxControl))
shadows = bool(cmds.getAttr("{0}.{1}".format(dx11Shader, "xUseShadows")))
if not shadows:
nodeId = cmds.shaderfx(sfxnode=shader, getNodeIDByName="Shadow")
cmds.shaderfx(sfxnode=shader, edit_bool=(nodeId, "value", shadows))
specularity = bool(cmds.getAttr("{0}.{1}".format(dx11Shader, "xSpecular")))
if specularity:
nodeId = cmds.shaderfx(sfxnode=shader, getNodeIDByName="Specularity")
cmds.shaderfx(sfxnode=shader, edit_bool=(nodeId, "value", specularity))
# assign attributes
print("Setting attributes for {0}".format(shader))
for attr in attributes:
value = cmds.getAttr("{0}.{1}".format(dx11Shader, attr))
if attr in dx2sfxAttr:
lib.setAttr(shader, dx2sfxAttr[attr], value)
# assign textures
if connectedNodes:
for i in range(0, len(connectedNodes), 2):
textureDir = cmds.getAttr("{0}.{1}".format(connectedNodes[i+1], "fileTextureName"))
attr = connectedNodes[i].split(".")[1]
lib.setAttr(shader, dx2sfxAttr[attr], textureDir)
# delete prototypeC shaders
cmds.delete(prototypeCNodes)
def systemDir(folder=''):
"""
Returns the system directory
Args:
folder (str): folder to append to system directory
Returns:
(str): path to system directory
"""
rootDir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
return os.path.join(rootDir, folder)
def selectConfig():
"""Select configuration node and re-check connections"""
# delete old configuration nodes
if cmds.objExists("NPRConfig"):
cmds.delete("NPRConfig")
if not cmds.objExists(mnpr_info.configNode):
print(mnpr_info.configNode)
cmds.createNode("mnprConfig", n=mnpr_info.configNode)
cmds.connectAttr("{0}.evaluate".format(mnpr_info.configNode), "persp.visibility", f=True)
mel.eval("AttributeEditor")
lib.printInfo("-> CONFIG NODE CREATED AND CONNECTED")
else:
cmds.select(mnpr_info.configNode)
mel.eval("AttributeEditor")
lib.printInfo("Selected {0} configuration node".format(mnpr_info.prototype))
def optimizePerformance():
"""Function to optimize performance by disabling some Maya functions"""
cmds.evaluationManager(mode="off") # set up animation evaluation to DG
def renderFrame(saveDir, width, height, renderSize=1, imgFormat=".jpg", override=mnpr_info.prototype):
"""
Renders current frame in the viewport
Args:
saveDir (str): save directory
width (int): width in pixels
height (int): height in pixels
renderSize (float): render size (factor)
imgFormat (str): .jpg, .exr, etc)
override (str): name of desired override (if any)
"""
check() # check that everything is in order
renderSize = resolutionCheck(width, height, renderSize) # make sure resolution is reasonable
# get working values to be changed
workingRenderSize = cmds.getAttr("{0}.renderScale".format(mnpr_info.configNode))
workingColorDepth = cmds.getAttr("{0}.colorDepth".format(mnpr_info.configNode))
# set desired attributes
if workingColorDepth != 2:
lib.setAttr(mnpr_info.configNode, "colorDepth", 2)
if renderSize != workingRenderSize:
lib.setAttr(mnpr_info.configNode, "renderScale", renderSize)
# prepare renderer
cmds.mnpr(g=True) # enable mnprGamma
mnprOperations = len(cmds.mnpr(lsO=True))
cmds.mnpr(renderOperation=mnprOperations-1, s=0) # HUD
cmds.mnpr(renderOperation=mnprOperations-2, s=0) # UI
cmds.refresh()
# render frame
try:
screenshotPath = lib.screenshot(saveDir, width, height, format=imgFormat, override=override) # render the frame
except WindowsError:
print("Screenshot saving has been canceled")
except:
traceback.print_exc()
if screenshotPath:
# bring everything back to normal
cmds.mnpr(renderOperation=mnprOperations-1, s=1) # HUD
cmds.mnpr(renderOperation=mnprOperations-2, s=1) # UI
lib.setAttr(mnpr_info.configNode, "renderScale", workingRenderSize)
lib.setAttr(mnpr_info.configNode, "colorDepth", workingColorDepth)
cmds.mnpr(g=False)
cmds.refresh()
return screenshotPath
def playblast(saveDir, width, height, renderCamera, modelPanel, renderSize=1):
"""
Playblasts the timeslider
Args:
saveDir (str): save directory with *.mov extension
width (int): width in pixels
height: height in pixels
renderCamera: camera to playblast from
modelPanel: modelPanel to playblast from
renderSize: render size (factor)
"""
check() # check that everything is in order
renderSize = resolutionCheck(width, height, renderSize) # make sure resolution is reasonable
aPlayBackSliderPython = mel.eval('$tmpVar=$gPlayBackSlider')
audioNode = cmds.timeControl(aPlayBackSliderPython, q=True, s=True) # get audio node
# get working values to be changed
workingRenderSize = cmds.getAttr("{0}.renderScale".format(mnpr_info.configNode))
workingColorDepth = cmds.getAttr("{0}.colorDepth".format(mnpr_info.configNode))
workingCamera = cmds.modelEditor(modelPanel, cam=True, q=True)
workingCameraShape = cmds.listRelatives(workingCamera, s=True)
if workingCameraShape:
workingCameraShape = workingCameraShape[0]
else:
# we already have the shape
workingCameraShape = workingCamera
# set desired attributes
cmds.mnpr(g=True)
mnprOperations = len(cmds.mnpr(lsO=True))
cmds.mnpr(renderOperation=mnprOperations-1, s=0) # HUD
cmds.mnpr(renderOperation=mnprOperations-2, s=0) # UI
cmds.modelEditor(modelPanel, cam=renderCamera, e=True) # change modelPanel
lib.setAttr(mnpr_info.configNode, "renderScale", renderSize)
lib.setAttr(mnpr_info.configNode, "colorDepth", 2) # needs to be 32bit to avoid artefacts
cmds.refresh()
# try playblasting
try:
cmds.playblast(f=saveDir, format="qt", w=width, h=height, percent=100, qlt=100, v=True, fo=True, os=True,
s=audioNode, compression="PNG")
except RuntimeError:
try:
cmds.playblast(f=saveDir, format="avi", w=width, h=height, percent=100, qlt=100, v=True, fo=True, os=True,
s=audioNode)
except RuntimeError:
cmds.error("Video cannot be playblasted as qt or avi, please check the installed codecs.")
# bring everything back to normal
cmds.mnpr(renderOperation=mnprOperations-1, s=1) # HUD
cmds.mnpr(renderOperation=mnprOperations-2, s=1) # UI
cmds.modelEditor(modelPanel, cam=workingCameraShape, e=True)
lib.setAttr(mnpr_info.configNode, "renderScale", workingRenderSize)
lib.setAttr(mnpr_info.configNode, "colorDepth", workingColorDepth)
cmds.mnpr(g=False)
cmds.refresh()
lib.printInfo("Video has been successfully playblasted to: {0}".format(saveDir))
def resolutionCheck(width, height, renderSize=1.0):
"""
Checks if resolution is between reasonable hardware limitations
Args:
width (int): viewport width
height (int): viewport height
renderSize (float): render size (factor)
Returns:
renderSize (int): viable render size (factor)
"""
if (width*renderSize > 16384) or (height*renderSize > 16384):
cmds.warning("Resolution too high to supersample, reducing render size")
return resolutionCheck(width, height, renderSize/2.0)
else:
if (width * height * pow(renderSize, 2)) > 150000000:
confirm = cmds.confirmDialog(title='Crash Warning',
message='Rendering a frame at such high resolutions might take long and even crash Maya\nWould you like to continue anyway?',
icn="warning", button=['Yes', 'No'], defaultButton='Yes',
cancelButton='No', dismissString='No', ma='center')
if confirm == 'No':
cmds.error("Frame capture cancelled by user")
return renderSize
def updateAE():
mel.eval("refreshEditorTemplates;")
return True
|
python/dgl/contrib/data/__init__.py | ketyi/dgl | 9,516 | 39392 | <filename>python/dgl/contrib/data/__init__.py
from __future__ import absolute_import
from . import knowledge_graph as knwlgrh
def load_data(dataset, bfs_level=3, relabel=False):
if dataset in ['aifb', 'mutag', 'bgs', 'am']:
return knwlgrh.load_entity(dataset, bfs_level, relabel)
elif dataset in ['FB15k', 'wn18', 'FB15k-237']:
return knwlgrh.load_link(dataset)
else:
raise ValueError('Unknown dataset: {}'.format(dataset))
|
tests/chem/test_mol.py | ShantamShorewala/aizynthfinder | 219 | 39400 | import pytest
from rdkit import Chem
from aizynthfinder.chem import MoleculeException, Molecule
def test_no_input():
with pytest.raises(MoleculeException):
Molecule()
def test_create_with_mol():
rd_mol = Chem.MolFromSmiles("O")
mol = Molecule(rd_mol=rd_mol)
assert mol.smiles == "O"
def test_create_with_smiles():
mol = Molecule(smiles="O")
assert Chem.MolToSmiles(mol.rd_mol) == "O"
def test_inchi():
mol = Molecule(smiles="O")
assert mol.inchi == "InChI=1S/H2O/h1H2"
def test_inchi_key():
mol = Molecule(smiles="O")
assert mol.inchi_key == "<KEY>"
def test_fingerprint():
mol = Molecule(smiles="O")
assert sum(mol.fingerprint(2)) == 1
assert sum(mol.fingerprint(2, 10)) == 1
def test_sanitize():
mol = Molecule(smiles="O", sanitize=True)
assert Chem.MolToSmiles(mol.rd_mol) == "O"
mol = Molecule(smiles="c1ccccc1(C)(C)")
with pytest.raises(MoleculeException):
mol.sanitize()
mol.sanitize(raise_exception=False)
assert mol.smiles == "CC1(C)CCCCC1"
def test_equality():
mol1 = Molecule(smiles="CCCCO")
mol2 = Molecule(smiles="OCCCC")
assert mol1 == mol2
def test_basic_equality():
mol1 = Molecule(smiles="CC[C@@H](C)O") # R-2-butanol
mol2 = Molecule(smiles="CC[C@H](C)O") # S-2-butanol
assert mol1 != mol2
assert mol1.basic_compare(mol2)
def test_has_atom_mapping():
mol1 = Molecule(smiles="CCCCO")
mol2 = Molecule(smiles="C[C:5]CCO")
assert not mol1.has_atom_mapping()
assert mol2.has_atom_mapping()
def test_remove_atom_mapping():
mol = Molecule(smiles="C[C:5]CCO")
assert mol.has_atom_mapping()
mol.remove_atom_mapping()
assert not mol.has_atom_mapping()
|
tests/test_tree.py | tgragnato/geneva | 1,182 | 39444 | import logging
import os
from scapy.all import IP, TCP
import actions.tree
import actions.drop
import actions.tamper
import actions.duplicate
import actions.utils
import layers.packet
def test_init():
"""
Tests initialization
"""
print(actions.action.Action.get_actions("out"))
def test_count_leaves():
"""
Tests leaf count is correct.
"""
a = actions.tree.ActionTree("out")
logger = logging.getLogger("test")
assert not a.parse("TCP:reserved:0tamper{TCP:flags:replace:S}-|", logger), "Tree parsed malformed DNA"
a.parse("[TCP:reserved:0]-tamper{TCP:flags:replace:S}-|", logger)
duplicate = actions.duplicate.DuplicateAction()
duplicate2 = actions.duplicate.DuplicateAction()
drop = actions.drop.DropAction()
assert a.count_leaves() == 1
assert a.remove_one()
a.add_action(duplicate)
assert a.count_leaves() == 1
duplicate.left = duplicate2
assert a.count_leaves() == 1
duplicate.right = drop
assert a.count_leaves() == 2
def test_check():
"""
Tests action tree check function.
"""
a = actions.tree.ActionTree("out")
logger = logging.getLogger("test")
a.parse("[TCP:flags:RA]-tamper{TCP:flags:replace:S}-|", logger)
p = layers.packet.Packet(IP()/TCP(flags="A"))
assert not a.check(p, logger)
p = layers.packet.Packet(IP(ttl=64)/TCP(flags="RA"))
assert a.check(p, logger)
assert a.remove_one()
assert a.check(p, logger)
a.parse("[TCP:reserved:0]-tamper{TCP:flags:replace:S}-|", logger)
assert a.check(p, logger)
a.parse("[IP:ttl:64]-tamper{TCP:flags:replace:S}-|", logger)
assert a.check(p, logger)
p = layers.packet.Packet(IP(ttl=15)/TCP(flags="RA"))
assert not a.check(p, logger)
def test_scapy():
"""
Tests misc. scapy aspects relevant to strategies.
"""
a = actions.tree.ActionTree("out")
logger = logging.getLogger("test")
a.parse("[TCP:reserved:0]-tamper{TCP:flags:replace:S}-|", logger)
p = layers.packet.Packet(IP()/TCP(flags="A"))
assert a.check(p, logger)
packets = a.run(p, logger)
assert packets[0][TCP].flags == "S"
p = layers.packet.Packet(IP()/TCP(flags="A"))
assert a.check(p, logger)
a.parse("[TCP:reserved:0]-tamper{TCP:chksum:corrupt}-|", logger)
packets = a.run(p, logger)
assert packets[0][TCP].chksum
assert a.check(p, logger)
def test_str():
"""
Tests string representation.
"""
logger = logging.getLogger("test")
t = actions.trigger.Trigger("field", "flags", "TCP")
a = actions.tree.ActionTree("out", trigger=t)
assert str(a).strip() == "[%s]-|" % str(t)
tamper = actions.tamper.TamperAction(field="flags", tamper_type="replace", tamper_value="S")
tamper2 = actions.tamper.TamperAction(field="flags", tamper_type="replace", tamper_value="R")
assert a.add_action(tamper)
assert str(a).strip() == "[TCP:flags:0]-tamper{TCP:flags:replace:S}-|"
# Tree will not add a duplicate action
assert not a.add_action(tamper)
assert str(a).strip() == "[TCP:flags:0]-tamper{TCP:flags:replace:S}-|"
assert a.add_action(tamper2)
assert str(a).strip() == "[TCP:flags:0]-tamper{TCP:flags:replace:S}(tamper{TCP:flags:replace:R},)-|"
assert a.add_action(actions.duplicate.DuplicateAction())
assert str(a).strip() == "[TCP:flags:0]-tamper{TCP:flags:replace:S}(tamper{TCP:flags:replace:R}(duplicate,),)-|"
drop = actions.drop.DropAction()
assert a.add_action(drop)
assert str(a).strip() == "[TCP:flags:0]-tamper{TCP:flags:replace:S}(tamper{TCP:flags:replace:R}(duplicate(drop,),),)-|" or \
str(a).strip() == "[TCP:flags:0]-tamper{TCP:flags:replace:S}(tamper{TCP:flags:replace:R}(duplicate(,drop),),)-|"
assert a.remove_action(drop)
assert str(a).strip() == "[TCP:flags:0]-tamper{TCP:flags:replace:S}(tamper{TCP:flags:replace:R}(duplicate,),)-|"
# Cannot remove action that is not present
assert not a.remove_action(drop)
assert str(a).strip() == "[TCP:flags:0]-tamper{TCP:flags:replace:S}(tamper{TCP:flags:replace:R}(duplicate,),)-|"
a = actions.tree.ActionTree("out", trigger=t)
orig = "[TCP:urgptr:15963]-duplicate(,drop)-|"
a.parse(orig, logger)
assert a.remove_one()
assert orig != str(a)
assert str(a) in ["[TCP:urgptr:15963]-drop-|", "[TCP:urgptr:15963]-duplicate-|"]
def test_pretty_print_send():
t = actions.trigger.Trigger("field", "flags", "TCP")
a = actions.tree.ActionTree("out", trigger=t)
duplicate = actions.duplicate.DuplicateAction()
a.add_action(duplicate)
correct_string = "TCP:flags:0\nduplicate\n├── ===> \n└── ===> "
assert a.pretty_print() == correct_string
def test_pretty_print(logger):
"""
Print complex tree, although difficult to test
"""
t = actions.trigger.Trigger("field", "flags", "TCP")
a = actions.tree.ActionTree("out", trigger=t)
tamper = actions.tamper.TamperAction(field="flags", tamper_type="replace", tamper_value="S")
tamper2 = actions.tamper.TamperAction(field="flags", tamper_type="replace", tamper_value="R")
duplicate = actions.duplicate.DuplicateAction()
duplicate2 = actions.duplicate.DuplicateAction()
duplicate3 = actions.duplicate.DuplicateAction()
duplicate4 = actions.duplicate.DuplicateAction()
duplicate5 = actions.duplicate.DuplicateAction()
drop = actions.drop.DropAction()
drop2 = actions.drop.DropAction()
drop3 = actions.drop.DropAction()
drop4 = actions.drop.DropAction()
duplicate.left = duplicate2
duplicate.right = duplicate3
duplicate2.left = tamper
duplicate2.right = drop
duplicate3.left = duplicate4
duplicate3.right = drop2
duplicate4.left = duplicate5
duplicate4.right = drop3
duplicate5.left = drop4
duplicate5.right = tamper2
a.add_action(duplicate)
correct_string = "TCP:flags:0\nduplicate\n├── duplicate\n│ ├── tamper{TCP:flags:replace:S}\n│ │ └── ===> \n│ └── drop\n└── duplicate\n ├── duplicate\n │ ├── duplicate\n │ │ ├── drop\n │ │ └── tamper{TCP:flags:replace:R}\n │ │ └── ===> \n │ └── drop\n └── drop"
assert a.pretty_print() == correct_string
assert a.pretty_print(visual=True)
assert os.path.exists("tree.png")
os.remove("tree.png")
a.parse("[TCP:flags:0]-|", logger)
a.pretty_print(visual=True) # Empty action tree
assert not os.path.exists("tree.png")
def test_pretty_print_order():
"""
Tests the left/right ordering by reading in a new tree
"""
logger = logging.getLogger("test")
a = actions.tree.ActionTree("out")
assert a.parse("[TCP:flags:A]-duplicate(tamper{TCP:flags:replace:R}(tamper{TCP:chksum:replace:14239},),duplicate(tamper{TCP:flags:replace:S}(tamper{TCP:chksum:replace:14239},),))-|", logger)
correct_pretty_print = "TCP:flags:A\nduplicate\n├── tamper{TCP:flags:replace:R}\n│ └── tamper{TCP:chksum:replace:14239}\n│ └── ===> \n└── duplicate\n ├── tamper{TCP:flags:replace:S}\n │ └── tamper{TCP:chksum:replace:14239}\n │ └── ===> \n └── ===> "
assert a.pretty_print() == correct_pretty_print
def test_parse():
"""
Tests string parsing.
"""
logger = logging.getLogger("test")
t = actions.trigger.Trigger("field", "flags", "TCP")
a = actions.tree.ActionTree("out", trigger=t)
base_t = actions.trigger.Trigger("field", "flags", "TCP")
base_a = actions.tree.ActionTree("out", trigger=base_t)
tamper = actions.tamper.TamperAction(field="flags", tamper_type="replace", tamper_value="S")
tamper2 = actions.tamper.TamperAction(field="flags", tamper_type="replace", tamper_value="R")
tamper3 = actions.tamper.TamperAction(field="flags", tamper_type="replace", tamper_value="S")
tamper4 = actions.tamper.TamperAction(field="flags", tamper_type="replace", tamper_value="R")
a.parse("[TCP:flags:0]-|", logger)
assert str(a) == str(base_a)
assert len(a) == 0
base_a.add_action(tamper)
assert a.parse("[TCP:flags:0]-tamper{TCP:flags:replace:S}-|", logger)
assert str(a) == str(base_a)
assert len(a) == 1
assert a.parse("[TCP:flags:0]-tamper{TCP:flags:replace:S}(tamper{TCP:flags:replace:R},)-|", logging.getLogger("test"))
base_a.add_action(tamper2)
assert str(a) == str(base_a)
assert len(a) == 2
base_a.add_action(tamper3)
base_a.add_action(tamper4)
assert a.parse("[TCP:flags:0]-tamper{TCP:flags:replace:S}(tamper{TCP:flags:replace:R}(tamper{TCP:flags:replace:S}(tamper{TCP:flags:replace:R},),),)-|", logging.getLogger("test"))
assert str(a) == str(base_a)
assert len(a) == 4
base_t = actions.trigger.Trigger("field", "flags", "TCP")
base_a = actions.tree.ActionTree("out", trigger=base_t)
duplicate = actions.duplicate.DuplicateAction()
assert a.parse("[TCP:flags:0]-duplicate-|", logger)
base_a.add_action(duplicate)
assert str(a) == str(base_a)
tamper = actions.tamper.TamperAction(field="flags", tamper_type="replace", tamper_value="S")
tamper2 = actions.tamper.TamperAction(field="flags", tamper_type="replace", tamper_value="R")
tamper3 = actions.tamper.TamperAction(field="flags", tamper_type="replace", tamper_value="A")
tamper4 = actions.tamper.TamperAction(field="flags", tamper_type="replace", tamper_value="R")
duplicate.left = tamper
assert a.parse("[TCP:flags:0]-duplicate(tamper{TCP:flags:replace:S},)-|", logger)
assert str(a) == str(base_a)
duplicate.right = tamper2
assert a.parse("[TCP:flags:0]-duplicate(tamper{TCP:flags:replace:S},tamper{TCP:flags:replace:R})-|", logger)
assert str(a) == str(base_a)
tamper2.left = tamper3
assert a.parse("[TCP:flags:0]-duplicate(tamper{TCP:flags:replace:S},tamper{TCP:flags:replace:R}(tamper{TCP:flags:replace:A},))-|", logger)
assert str(a) == str(base_a)
strategy = actions.utils.parse("[TCP:flags:0]-duplicate(tamper{TCP:flags:replace:S},tamper{TCP:flags:replace:R})-| \/", logger)
assert strategy
assert len(strategy.out_actions[0]) == 3
assert len(strategy.in_actions) == 0
assert not a.parse("[]", logger) # No valid trigger
assert not a.parse("[TCP:flags:0]-", logger) # No valid ending "|"
assert not a.parse("[TCP:]-|", logger) # invalid trigger
assert not a.parse("[TCP:flags:0]-foo-|", logger) # Non-existent action
assert not a.parse("[TCP:flags:0]--|", logger) # Empty action
assert not a.parse("[TCP:flags:0]-duplicate(,,,)-|", logger) # Bad tree
assert not a.parse("[TCP:flags:0]-duplicate()))-|", logger) # Bad tree
assert not a.parse("[TCP:flags:0]-duplicate(((()-|", logger) # Bad tree
assert not a.parse("[TCP:flags:0]-duplicate(,))))-|", logger) # Bad tree
assert not a.parse("[TCP:flags:0]-drop(duplicate,)-|", logger) # Terminal action with children
assert not a.parse("[TCP:flags:0]-drop(duplicate,duplicate)-|", logger) # Terminal action with children
assert not a.parse("[TCP:flags:0]-tamper{TCP:flags:replace:S}(,duplicate)-|", logger) # Non-branching action with right child
assert not a.parse("[TCP:flags:0]-tamper{TCP:flags:replace:S}(drop,duplicate)-|", logger) # Non-branching action with children
def test_tree():
"""
Tests basic tree functionality.
"""
t = actions.trigger.Trigger(None, None, None)
a = actions.tree.ActionTree("out", trigger=t)
tamper = actions.tamper.TamperAction()
tamper2 = actions.tamper.TamperAction()
duplicate = actions.duplicate.DuplicateAction()
a.add_action(None)
a.add_action(tamper)
assert a.get_slots() == 1
a.add_action(tamper2)
assert a.get_slots() == 1
a.add_action(duplicate)
assert a.get_slots() == 2
t = actions.trigger.Trigger(None, None, None)
a = actions.tree.ActionTree("out", trigger=t)
drop = actions.drop.DropAction()
a.add_action(drop)
assert a.get_slots() == 0
add_success = a.add_action(tamper)
assert not add_success
assert a.get_slots() == 0
rep = ""
for s in a.string_repr(a.action_root):
rep += s
assert rep == "drop"
print(str(a))
assert a.parse("[TCP:flags:A]-duplicate(tamper{TCP:seq:corrupt},)-|", logging.getLogger("test"))
for act in a:
print(str(a))
assert len(a) == 2
assert a.get_slots() == 2
for _ in range(100):
assert str(a.get_rand_action("out", request="DropAction")) == "drop"
def test_remove():
"""
Tests remove
"""
t = actions.trigger.Trigger(None, None, None)
a = actions.tree.ActionTree("out", trigger=t)
tamper = actions.tamper.TamperAction()
tamper2 = actions.tamper.TamperAction()
tamper3 = actions.tamper.TamperAction()
assert not a.remove_action(tamper)
a.add_action(tamper)
assert a.remove_action(tamper)
a.add_action(tamper)
a.add_action(tamper2)
a.add_action(tamper3)
assert a.remove_action(tamper2)
assert tamper2 not in a
assert tamper.left == tamper3
assert not tamper.right
assert len(a) == 2
a = actions.tree.ActionTree("out", trigger=t)
duplicate = actions.duplicate.DuplicateAction()
tamper = actions.tamper.TamperAction()
tamper2 = actions.tamper.TamperAction()
tamper3 = actions.tamper.TamperAction()
a.add_action(tamper)
assert a.action_root == tamper
duplicate.left = tamper2
duplicate.right = tamper3
a.add_action(duplicate)
assert len(a) == 4
assert a.remove_action(duplicate)
assert duplicate not in a
assert tamper.left == tamper2
assert not tamper.right
assert len(a) == 2
a.parse("[TCP:flags:A]-|", logging.getLogger("test"))
assert not a.remove_one(), "Cannot remove one with no action root"
def test_len():
"""
Tests length calculation.
"""
t = actions.trigger.Trigger(None, None, None)
a = actions.tree.ActionTree("out", trigger=t)
tamper = actions.tamper.TamperAction()
tamper2 = actions.tamper.TamperAction()
assert len(a) == 0, "__len__ returned wrong length"
a.add_action(tamper)
assert len(a) == 1, "__len__ returned wrong length"
a.add_action(tamper)
assert len(a) == 1, "__len__ returned wrong length"
a.add_action(tamper2)
assert len(a) == 2, "__len__ returned wrong length"
duplicate = actions.duplicate.DuplicateAction()
a.add_action(duplicate)
assert len(a) == 3, "__len__ returned wrong length"
def test_contains():
"""
Tests contains method
"""
t = actions.trigger.Trigger(None, None, None)
a = actions.tree.ActionTree("out", trigger=t)
tamper = actions.tamper.TamperAction()
tamper2 = actions.tamper.TamperAction()
tamper3 = actions.tamper.TamperAction()
assert not a.contains(tamper), "contains incorrect behavior"
assert not a.contains(tamper2), "contains incorrect behavior"
a.add_action(tamper)
assert a.contains(tamper), "contains incorrect behavior"
assert not a.contains(tamper2), "contains incorrect behavior"
add_success = a.add_action(tamper)
assert not add_success, "added duplicate action"
assert a.contains(tamper), "contains incorrect behavior"
assert not a.contains(tamper2), "contains incorrect behavior"
a.add_action(tamper2)
assert a.contains(tamper), "contains incorrect behavior"
assert a.contains(tamper2), "contains incorrect behavior"
a.remove_action(tamper2)
assert a.contains(tamper), "contains incorrect behavior"
assert not a.contains(tamper2), "contains incorrect behavior"
a.add_action(tamper2)
assert a.contains(tamper), "contains incorrect behavior"
assert a.contains(tamper2), "contains incorrect behavior"
remove_success = a.remove_action(tamper)
assert remove_success
assert not a.contains(tamper), "contains incorrect behavior"
assert a.contains(tamper2), "contains incorrect behavior"
a.add_action(tamper3)
assert a.contains(tamper3), "contains incorrect behavior"
assert len(a) == 2, "len incorrect return"
remove_success = a.remove_action(tamper2)
assert remove_success
def test_iter():
"""
Tests iterator.
"""
t = actions.trigger.Trigger(None, None, None)
a = actions.tree.ActionTree("out", trigger=t)
tamper = actions.tamper.TamperAction(field="flags", tamper_type="replace", tamper_value="S")
tamper2 = actions.tamper.TamperAction(field="flags", tamper_type="replace", tamper_value="R")
assert a.add_action(tamper)
assert a.add_action(tamper2)
assert not a.add_action(tamper)
for node in a:
print(node)
def test_run():
"""
Tests running packets through the chain.
"""
logger = logging.getLogger("test")
t = actions.trigger.Trigger(None, None, None)
a = actions.tree.ActionTree("out", trigger=t)
tamper = actions.tamper.TamperAction(field="flags", tamper_type="replace", tamper_value="S")
tamper2 = actions.tamper.TamperAction(field="flags", tamper_type="replace", tamper_value="R")
duplicate = actions.duplicate.DuplicateAction()
duplicate2 = actions.duplicate.DuplicateAction()
drop = actions.drop.DropAction()
packet = layers.packet.Packet(IP()/TCP())
a.add_action(tamper)
packets = a.run(packet, logging.getLogger("test"))
assert len(packets) == 1
assert None not in packets
assert packets[0].get("TCP", "flags") == "S"
a.add_action(tamper2)
print(str(a))
packet = layers.packet.Packet(IP()/TCP())
assert not a.add_action(tamper), "tree added duplicate action"
packets = a.run(packet, logging.getLogger("test"))
assert len(packets) == 1
assert None not in packets
assert packets[0].get("TCP", "flags") == "R"
print(str(a))
a.remove_action(tamper2)
a.remove_action(tamper)
a.add_action(duplicate)
packet = layers.packet.Packet(IP()/TCP(flags="RA"))
packets = a.run(packet, logging.getLogger("test"))
assert len(packets) == 2
assert None not in packets
assert packets[0][TCP].flags == "RA"
assert packets[1][TCP].flags == "RA"
print(str(a))
duplicate.left = tamper
duplicate.right = tamper2
packet = layers.packet.Packet(IP()/TCP(flags="RA"))
print("ABUT TO RUN")
packets = a.run(packet, logging.getLogger("test"))
assert len(packets) == 2
assert None not in packets
print(str(a))
print(str(packets[0]))
print(str(packets[1]))
assert packets[0][TCP].flags == "S"
assert packets[1][TCP].flags == "R"
print(str(a))
tamper.left = duplicate2
packet = layers.packet.Packet(IP()/TCP(flags="RA"))
packets = a.run(packet, logging.getLogger("test"))
assert len(packets) == 3
assert None not in packets
assert packets[0][TCP].flags == "S"
assert packets[1][TCP].flags == "S"
assert packets[2][TCP].flags == "R"
print(str(a))
tamper2.left = drop
packet = layers.packet.Packet(IP()/TCP(flags="RA"))
packets = a.run(packet, logging.getLogger("test"))
assert len(packets) == 2
assert None not in packets
assert packets[0][TCP].flags == "S"
assert packets[1][TCP].flags == "S"
print(str(a))
assert a.remove_action(duplicate2)
tamper.left = actions.drop.DropAction()
packet = layers.packet.Packet(IP()/TCP(flags="RA"))
packets = a.run(packet, logger )
assert len(packets) == 0
print(str(a))
a.parse("[TCP:flags:A]-duplicate(tamper{TCP:flags:replace:R}(tamper{TCP:chksum:replace:14239},),duplicate(tamper{TCP:flags:replace:S},))-|", logger)
packet = layers.packet.Packet(IP()/TCP(flags="A"))
assert a.check(packet, logger)
packets = a.run(packet, logger)
assert len(packets) == 3
assert packets[0][TCP].flags == "R"
assert packets[1][TCP].flags == "S"
assert packets[2][TCP].flags == "A"
def test_index():
"""
Tests index
"""
a = actions.tree.ActionTree("out")
tamper = actions.tamper.TamperAction(field="flags", tamper_type="replace", tamper_value="S")
tamper2 = actions.tamper.TamperAction(field="flags", tamper_type="replace", tamper_value="R")
tamper3 = actions.tamper.TamperAction(field="flags", tamper_type="replace", tamper_value="F")
assert a.add_action(tamper)
assert a[0] == tamper
assert not a[1]
assert a.add_action(tamper2)
assert a[0] == tamper
assert a[1] == tamper2
assert a[-1] == tamper2
assert not a[10]
assert a.add_action(tamper3)
assert a[-1] == tamper3
assert not a[-11]
def test_mate():
"""
Tests mate primitive
"""
logger = logging.getLogger("test")
t = actions.trigger.Trigger("field", "flags", "TCP")
a = actions.tree.ActionTree("out", trigger=t)
assert not a.choose_one()
tamper = actions.tamper.TamperAction(field="flags", tamper_type="replace", tamper_value="S")
tamper2 = actions.tamper.TamperAction(field="flags", tamper_type="replace", tamper_value="R")
duplicate = actions.duplicate.DuplicateAction()
duplicate2 = actions.duplicate.DuplicateAction()
drop = actions.drop.DropAction()
other_a = actions.tree.ActionTree("out", trigger=t)
assert not a.mate(other_a), "Can't mate empty trees"
assert a.add_action(tamper)
assert other_a.add_action(tamper2)
assert a.choose_one() == tamper
assert other_a.choose_one() == tamper2
assert a.get_parent(tamper) == (None, None)
assert other_a.get_parent(tamper2) == (None, None)
assert a.add_action(duplicate)
assert a.get_parent(duplicate) == (tamper, "left")
duplicate.right = drop
assert a.get_parent(drop) == (duplicate, "right")
assert other_a.add_action(duplicate2)
# Test mating a full tree with a full tree
assert str(a) == "[TCP:flags:0]-tamper{TCP:flags:replace:S}(duplicate(,drop),)-|"
assert str(other_a) == "[TCP:flags:0]-tamper{TCP:flags:replace:R}(duplicate,)-|"
assert a.swap(duplicate, other_a, duplicate2)
assert str(a).strip() == "[TCP:flags:0]-tamper{TCP:flags:replace:S}(duplicate,)-|"
assert str(other_a).strip() == "[TCP:flags:0]-tamper{TCP:flags:replace:R}(duplicate(,drop),)-|"
assert len(a) == 2
assert len(other_a) == 3
assert duplicate2 not in other_a
assert duplicate not in a
assert tamper.left == duplicate2
assert tamper2.left == duplicate
assert other_a.get_parent(duplicate) == (tamper2, "left")
assert a.get_parent(duplicate2) == (tamper, "left")
assert other_a.get_parent(drop) == (duplicate, "right")
assert a.get_parent(None) == (None, None)
# Test mating two trees with just root nodes
t = actions.trigger.Trigger("field", "flags", "TCP")
a = actions.tree.ActionTree("out", trigger=t)
assert not a.choose_one()
tamper = actions.tamper.TamperAction(field="flags", tamper_type="replace", tamper_value="S")
tamper2 = actions.tamper.TamperAction(field="flags", tamper_type="replace", tamper_value="R")
duplicate = actions.duplicate.DuplicateAction()
duplicate2 = actions.duplicate.DuplicateAction()
drop = actions.drop.DropAction()
other_a = actions.tree.ActionTree("out", trigger=t)
assert not a.mate(other_a)
assert a.add_action(duplicate)
assert other_a.add_action(duplicate2)
assert a.mate(other_a)
assert a.action_root == duplicate2
assert other_a.action_root == duplicate
assert not duplicate.left and not duplicate.right
assert not duplicate2.left and not duplicate2.right
# Confirm that no nodes have been aliased or connected between the trees
for node in a:
for other_node in other_a:
assert not node.left == other_node
assert not node.right == other_node
# Test mating two trees where one is empty
assert a.remove_action(duplicate2)
# This should swap the duplicate action to be the action root of the other tree
assert str(a) == "[TCP:flags:0]-|"
assert str(other_a) == "[TCP:flags:0]-duplicate-|"
assert a.mate(other_a)
assert not other_a.action_root
assert a.action_root == duplicate
assert len(a) == 1
assert len(other_a) == 0
# Confirm that no nodes have been aliased or connected between the trees
for node in a:
for other_node in other_a:
if other_node:
assert not node.left == other_node
assert not node.right == other_node
assert a.parse("[TCP:flags:0]-tamper{TCP:flags:replace:S}(duplicate(,drop),)-|", logger)
drop = a.action_root.left.right
assert str(drop) == "drop"
# Note that this will return a valid ActionTree, but because it is empty,
# it is technically a False-y value, as it's length is 0
assert other_a.parse("[TCP:flags:0]-|", logger) == other_a
a.swap(drop, other_a, None)
assert other_a.action_root == drop
assert not a.action_root.left.right
assert str(other_a) == "[TCP:flags:0]-drop-|"
assert str(a) == "[TCP:flags:0]-tamper{TCP:flags:replace:S}(duplicate,)-|"
other_a.swap(drop, a, a.action_root.left)
# Confirm that no nodes have been aliased or connected between the trees
for node in a:
for other_node in other_a:
if other_node:
assert not node.left == other_node
assert not node.right == other_node
assert str(other_a) == "[TCP:flags:0]-duplicate-|"
assert str(a) == "[TCP:flags:0]-tamper{TCP:flags:replace:S}(drop,)-|"
a.parse("[TCP:flags:0]-drop-|", logger)
other_a.parse("[TCP:flags:0]-duplicate(drop,drop)-|", logger)
a_drop = a.action_root
other_duplicate = other_a.action_root
a.swap(a_drop, other_a, other_duplicate)
print(str(a))
print(str(other_a))
assert str(other_a) == "[TCP:flags:0]-drop-|"
assert str(a) == "[TCP:flags:0]-duplicate(drop,drop)-|"
duplicate = actions.duplicate.DuplicateAction()
duplicate2 = actions.duplicate.DuplicateAction()
drop = actions.drop.DropAction()
drop2 = actions.drop.DropAction()
drop3 = actions.drop.DropAction()
a = actions.tree.ActionTree("out", trigger=t)
a.add_action(duplicate)
a.add_action(drop)
a.add_action(drop2)
assert str(a) == "[TCP:flags:0]-duplicate(drop,drop)-|"
assert a.get_slots() == 0
other_a = actions.tree.ActionTree("out", trigger=t)
other_a.add_action(drop3)
a.swap(drop, other_a, drop3)
assert str(a) == "[TCP:flags:0]-duplicate(drop,drop)-|"
a.swap(drop3, other_a, drop)
assert str(a) == "[TCP:flags:0]-duplicate(drop,drop)-|"
assert a.mate(other_a)
def test_choose_one():
"""
Tests choose_one functionality
"""
a = actions.tree.ActionTree("out")
drop = actions.drop.DropAction()
assert not a.choose_one()
assert a.add_action(drop)
assert a.choose_one() == drop
assert a.remove_action(drop)
assert not a.choose_one()
duplicate = actions.duplicate.DuplicateAction()
a.add_action(duplicate)
assert a.choose_one() == duplicate
duplicate.left = drop
assert a.choose_one() in [duplicate, drop]
# Make sure that both actions get chosen
chosen = set()
for i in range(0, 10000):
act = a.choose_one()
chosen.add(act)
assert chosen == set([duplicate, drop])
|
ide/tests/test_import_archive.py | Ramonrlb/cloudpebble | 147 | 39474 | """ These tests check basic operation of ide.tasks.archive.do_import_archive """
import mock
from django.core.exceptions import ValidationError
from ide.tasks.archive import do_import_archive, InvalidProjectArchiveException
from ide.utils.cloudpebble_test import CloudpebbleTestCase, make_package, make_appinfo, build_bundle, override_settings
from ide.models.project import Project
from utils.fakes import FakeS3
__author__ = 'joe'
fake_s3 = FakeS3()
@mock.patch('ide.models.s3file.s3', fake_s3)
class TestImportArchive(CloudpebbleTestCase):
def setUp(self):
self.login()
@staticmethod
def make_resource_spec(name='IMAGE_BLAH'):
return {
'resources': {
'media': [{
'file': 'images/blah.png',
'name': name,
'type': 'bitmap'
}]
}
}
def test_import_basic_bundle_with_appinfo(self):
""" Check that a minimal bundle imports without error """
bundle = build_bundle({
'src/main.c': '',
'appinfo.json': make_appinfo()
})
do_import_archive(self.project_id, bundle)
def test_throws_with_invalid_appinfo(self):
""" Check that appinfo validation is performed with a few invalid values """
invalid_things = [
('projectType', 'invalid'),
('sdkVersion', '1'),
('versionLabel', '01.0'),
]
for k, v in invalid_things:
bundle = build_bundle({
'src/main.c': '',
'appinfo.json': make_appinfo({k: v})
})
with self.assertRaises(ValidationError):
do_import_archive(self.project_id, bundle)
def test_import_basic_bundle_with_npm_manifest(self):
""" Check that archives with package.json can be imported """
bundle = build_bundle({
'src/main.c': '',
'package.json': make_package(package_options={'name': 'myproject'})
})
do_import_archive(self.project_id, bundle)
project = Project.objects.get(pk=self.project_id)
self.assertEqual(project.app_long_name, 'test')
self.assertEqual(project.app_short_name, 'myproject')
def test_import_package_with_dependencies(self):
""" Check that dependencies in a package.json file are imported into the database """
deps = {
'some_package': '3.14.15',
'another': 'http://blah.com/package.git',
}
bundle = build_bundle({
'src/main.c': '',
'package.json': make_package(package_options={
'dependencies': deps
})
})
do_import_archive(self.project_id, bundle)
project = Project.objects.get(pk=self.project_id)
actual_deps = {d.name: d.version for d in project.dependencies.all()}
self.assertDictEqual(actual_deps, deps)
def test_import_package_with_keywords(self):
""" Check that keywords in a package.json file are imported into the database """
keywords = ['pebbles', 'watch', 'bunnies']
bundle = build_bundle({
'src/main.c': '',
'package.json': make_package(package_options={
'keywords': keywords
})
})
do_import_archive(self.project_id, bundle)
project = Project.objects.get(pk=self.project_id)
self.assertEqual(set(keywords), set(project.keywords))
def test_import_appinfo_with_resources(self):
""" Check that a resource can be imported in an appinfo.json project """
bundle = build_bundle({
'src/main.c': '',
'resources/images/blah.png': 'contents!',
'appinfo.json': make_appinfo(options=self.make_resource_spec())
})
do_import_archive(self.project_id, bundle)
project = Project.objects.get(pk=self.project_id)
self.assertEqual(project.resources.get().variants.get().get_contents(), 'contents!')
def test_import_package_with_resources(self):
""" Check that a resource can be imported in an package.json project """
bundle = build_bundle({
'src/main.c': '',
'resources/images/blah.png': 'contents!',
'package.json': make_package(pebble_options=self.make_resource_spec())
})
do_import_archive(self.project_id, bundle)
project = Project.objects.get(pk=self.project_id)
self.assertEqual(project.resources.get().variants.get().get_contents(), 'contents!')
def test_throws_with_local_file_dependencies(self):
""" Throw if any dependencies reference local files """
bad_versions = [
'file:security/breach',
'/security/breach',
'./security/breach',
'../security/breach',
'~/security/breach'
]
for version in bad_versions:
bundle = build_bundle({
'src/main.c': '',
'package.json': make_package(package_options={
'dependencies': {'some_package': version}
})
})
with self.assertRaises(ValidationError):
do_import_archive(self.project_id, bundle)
def test_throws_if_sdk2_project_has_array_appkeys(self):
""" Throw when trying to import an sdk 2 project with array appkeys """
bundle = build_bundle({
'src/main.c': '',
'appinfo.json': make_appinfo(options={'appKeys': [], 'sdkVersion': '2'})
})
with self.assertRaises(ValidationError):
do_import_archive(self.project_id, bundle)
def test_invalid_resource_id(self):
""" Check that invalid characters are banned from resource IDs """
bundle = build_bundle({
'src/main.c': '',
'resources/images/blah.png': 'contents!',
'package.json': make_package(pebble_options=self.make_resource_spec("<>"))
})
with self.assertRaises(ValidationError):
do_import_archive(self.project_id, bundle)
def test_import_json_file(self):
""" Check that json files are correctly imported """
bundle = build_bundle({
'src/js/test.json': '{}',
'src/main.c': '',
'package.json': make_package()
})
do_import_archive(self.project_id, bundle)
project = Project.objects.get(pk=self.project_id)
self.assertEqual(project.source_files.filter(file_name='test.json').count(), 1)
def test_import_rocky(self):
""" Check that json files are correctly imported """
bundle = build_bundle({
'src/rocky/index.js': '',
'src/common/lib.js': '',
'src/pkjs/app.js': '',
'package.json': make_package(pebble_options={'projectType': 'rocky'})
})
do_import_archive(self.project_id, bundle)
project = Project.objects.get(pk=self.project_id)
self.assertEqual(project.source_files.filter(file_name='index.js', target='app').count(), 1)
self.assertEqual(project.source_files.filter(file_name='lib.js', target='common').count(), 1)
self.assertEqual(project.source_files.filter(file_name='app.js', target='pkjs').count(), 1)
@mock.patch('ide.models.s3file.s3', fake_s3)
class TestImportLibrary(CloudpebbleTestCase):
def setUp(self):
self.login(type='package')
def test_import_basic_library(self):
""" Try importing a basic library """
bundle = build_bundle({
'include/my-lib.h': '',
'package.json': make_package(pebble_options={'projectType': 'package'}),
'src/c/my-lib.c': '',
'src/c/my-priv.h': '',
})
do_import_archive(self.project_id, bundle)
project = Project.objects.get(pk=self.project_id)
files = {f.file_name: f for f in project.source_files.all()}
self.assertSetEqual(set(files.keys()), {'my-lib.h', 'my-lib.c', 'my-priv.h'})
self.assertEqual(files['my-lib.h'].target, 'public')
self.assertEqual(files['my-lib.c'].target, 'app')
self.assertEqual(files['my-priv.h'].target, 'app')
def test_import_library_with_resources(self):
""" Try importing a basic library with resources """
bundle = build_bundle({
'package.json': make_package(pebble_options={
'projectType': 'package',
'resources': {'media': [{
'type': 'bitmap',
'name': 'MY_RES1',
'file': 'res1.png'
}, {
'type': 'bitmap',
'name': 'MY_RES2',
'file': 'res2.png'
}]}
}),
'src/resources/res1.png': '',
'src/resources/res2.png': '',
})
do_import_archive(self.project_id, bundle)
project = Project.objects.get(pk=self.project_id)
self.assertSetEqual({f.file_name for f in project.resources.all()}, {'res1.png', 'res2.png'})
|
fairlearn/metrics/__init__.py | alliesaizan/fairlearn | 1,142 | 39491 | <filename>fairlearn/metrics/__init__.py
# Copyright (c) Microsoft Corporation and Fairlearn contributors.
# Licensed under the MIT License.
"""Functionality for computing metrics, with a particular focus on disaggregated metrics.
For our purpose, a metric is a function with signature
``f(y_true, y_pred, ....)``
where ``y_true`` are the set of true values and ``y_pred`` are
values predicted by a machine learning algorithm. Other
arguments may be present (most often sample weights), which will
affect how the metric is calculated.
This module provides the concept of a *disaggregated metric*.
This is a metric where in addition to ``y_true`` and ``y_pred``
values, the user provides information about group membership
for each sample.
For example, a user could provide a 'Gender' column, and the
disaggregated metric would contain separate results for the subgroups
'male', 'female' and 'nonbinary' indicated by that column.
The underlying metric function is evaluated for each of these three
subgroups.
This extends to multiple grouping columns, calculating the metric
for each combination of subgroups.
"""
import sys as _sys
from ._metric_frame import MetricFrame # noqa: F401
from ._make_derived_metric import make_derived_metric # noqa: F401
from ._generated_metrics import _generated_metric_dict
from ._disparities import ( # noqa: F401
demographic_parity_difference,
demographic_parity_ratio,
equalized_odds_difference,
equalized_odds_ratio)
from ._extra_metrics import ( # noqa: F401
true_positive_rate,
true_negative_rate,
false_positive_rate,
false_negative_rate,
_balanced_root_mean_squared_error,
mean_prediction,
selection_rate,
_mean_overprediction,
_mean_underprediction,
count)
# Add the generated metrics of the form and
# `<metric>_{difference,ratio,group_min,group_max`
_module_obj = _sys.modules[__name__]
for _name, _func in _generated_metric_dict.items():
setattr(_module_obj, _name, _func)
# ============================================
# Build list of items to be listed in the docs
_core = [
"MetricFrame",
"make_derived_metric"
]
_disparities = [
"demographic_parity_difference",
"demographic_parity_ratio",
"equalized_odds_difference",
"equalized_odds_ratio"
]
_extra_metrics = [
"true_positive_rate",
"true_negative_rate",
"false_positive_rate",
"false_negative_rate",
"mean_prediction",
"selection_rate",
"count"
]
__all__ = _core + _disparities + _extra_metrics + list(sorted(_generated_metric_dict.keys()))
|
Projects/Healthcare/breast-cancer/src/data_loading/augmentations.py | DanielMabadeje/Artificial-Intelligence-Deep-Learning-Machine-Learning-Tutorials | 3,266 | 39506 | # Copyright (C) 2019 <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>,
# <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>,
# <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>,
# <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>,
# <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>,
# <NAME>, <NAME>
#
# This file is part of breast_cancer_classifier.
#
# breast_cancer_classifier is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# breast_cancer_classifier is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with breast_cancer_classifier. If not, see <http://www.gnu.org/licenses/>.
# ==============================================================================
import cv2
import numpy as np
from src.constants import VIEWS
def shift_window_inside_image(start, end, image_axis_size, input_axis_size):
"""
If the window goes outside the bound of the image, then shifts it to fit inside the image.
"""
if start < 0:
start = 0
end = start + input_axis_size
elif end > image_axis_size:
end = image_axis_size
start = end - input_axis_size
return start, end
def zero_pad_and_align_window(image_axis_size, input_axis_size, max_crop_and_size_noise, bidirectional):
"""
Adds Zero padding to the image if cropped image is smaller than required window size.
"""
pad_width = input_axis_size - image_axis_size + max_crop_and_size_noise * (2 if bidirectional else 1)
assert (pad_width >= 0)
if bidirectional:
pad_front = int(pad_width / 2)
start = max_crop_and_size_noise
else:
start, pad_front = 0, 0
pad_back = pad_width - pad_front
end = start + input_axis_size
return start, end, pad_front, pad_back
def simple_resize(image_to_resize, size):
"""
Resizes image to the required size
"""
image_resized = cv2.resize(image_to_resize, (size[1], size[0]), interpolation=cv2.INTER_CUBIC)
if len(image_to_resize.shape) == 3 and len(image_resized.shape) == 2 and image_to_resize.shape[2] == 1:
image_resized = np.expand_dims(image_resized, 2)
return image_resized
def crop_image(image, input_size, borders):
"""
Crops image to the required size using window location
"""
cropped_image = image[borders[0]: borders[1], borders[2]: borders[3]]
if ((borders[1] - borders[0]) != input_size[0]) or ((borders[3] - borders[2]) != input_size[1]):
cropped_image = simple_resize(cropped_image, input_size)
return cropped_image
def window_location_at_center_point(input_size, center_y, center_x):
"""
Calculates window location (top, bottom, left, right)
given center point and size of augmentation window
"""
half_height = input_size[0] // 2
half_width = input_size[1] // 2
top = center_y - half_height
bottom = center_y + input_size[0] - half_height
left = center_x - half_width
right = center_x + input_size[1] - half_width
return top, bottom, left, right
def sample_crop_best_center(image, input_size, random_number_generator, max_crop_noise, max_crop_size_noise,
best_center, view):
"""
Crops using the best center point and ideal window size.
Pads small images to have enough room for crop noise and size noise.
Applies crop noise in location of the window borders.
"""
max_crop_noise = np.array(max_crop_noise)
crop_noise_multiplier = np.zeros(2, dtype=np.float32)
if max_crop_noise.any():
# there is no point in sampling crop_noise_multiplier if it's going to be multiplied by (0, 0)
crop_noise_multiplier = random_number_generator.uniform(low=-1.0, high=1.0, size=2)
center_y, center_x = best_center
# get the window around the center point. The window might be outside of the image.
top, bottom, left, right = window_location_at_center_point(input_size, center_y, center_x)
pad_y_top, pad_y_bottom, pad_x_right = 0, 0, 0
if VIEWS.is_cc(view):
if image.shape[0] < input_size[0] + (max_crop_noise[0] + max_crop_size_noise) * 2:
# Image is smaller than window size + noise margin in y direction.
# CC view: pad at both top and bottom
top, bottom, pad_y_top, pad_y_bottom = zero_pad_and_align_window(image.shape[0], input_size[0],
max_crop_noise[0] + max_crop_size_noise,
True)
elif VIEWS.is_mlo(view):
if image.shape[0] < input_size[0] + max_crop_noise[0] + max_crop_size_noise:
# Image is smaller than window size + noise margin in y direction.
# MLO view: only pad at the bottom
top, bottom, _, pad_y_bottom = zero_pad_and_align_window(image.shape[0], input_size[0],
max_crop_noise[0] + max_crop_size_noise, False)
else:
raise KeyError("Unknown view", view)
if image.shape[1] < input_size[1] + max_crop_noise[1] + max_crop_size_noise:
# Image is smaller than window size + noise margin in x direction.
left, right, _, pad_x_right = zero_pad_and_align_window(image.shape[1], input_size[1],
max_crop_noise[1] + max_crop_size_noise, False)
# Pad image if necessary by allocating new memory and copying contents over
if pad_y_top > 0 or pad_y_bottom > 0 or pad_x_right > 0:
new_zero_array = np.zeros((
image.shape[0] + pad_y_top + pad_y_bottom,
image.shape[1] + pad_x_right, image.shape[2]), dtype=image.dtype)
new_zero_array[pad_y_top: image.shape[0] + pad_y_top, 0: image.shape[1]] = image
image = new_zero_array
# if window is drawn outside of image, shift it to be inside the image.
top, bottom = shift_window_inside_image(top, bottom, image.shape[0], input_size[0])
left, right = shift_window_inside_image(left, right, image.shape[1], input_size[1])
if top == 0:
# there is nowhere to shift upwards, we only apply noise downwards
crop_noise_multiplier[0] = np.abs(crop_noise_multiplier[0])
elif bottom == image.shape[0]:
# there is nowhere to shift down, we only apply noise upwards
crop_noise_multiplier[0] = -np.abs(crop_noise_multiplier[0])
# else: we do nothing to the noise multiplier
if left == 0:
# there is nowhere to shift left, we only apply noise to move right
crop_noise_multiplier[1] = np.abs(crop_noise_multiplier[1])
elif right == image.shape[1]:
# there is nowhere to shift right, we only apply noise to move left
crop_noise_multiplier[1] = -np.abs(crop_noise_multiplier[1])
# else: we do nothing to the noise multiplier
borders = np.array((top, bottom, left, right), dtype=np.int32)
# Calculate maximum amount of how much the window can move for cropping noise
top_margin = top
bottom_margin = image.shape[0] - bottom
left_margin = left
right_margin = image.shape[1] - right
if crop_noise_multiplier[0] >= 0:
vertical_margin = bottom_margin
else:
vertical_margin = top_margin
if crop_noise_multiplier[1] >= 0:
horizontal_margin = right_margin
else:
horizontal_margin = left_margin
if vertical_margin < max_crop_noise[0]:
max_crop_noise[0] = vertical_margin
if horizontal_margin < max_crop_noise[1]:
max_crop_noise[1] = horizontal_margin
crop_noise = np.round(max_crop_noise * crop_noise_multiplier)
crop_noise = np.array((crop_noise[0], crop_noise[0], crop_noise[1], crop_noise[1]), dtype=np.int32)
borders = borders + crop_noise
# this is to make sure that the cropping window isn't outside of the image
assert (borders[0] >= 0) and (borders[1] <= image.shape[0]) and (borders[2] >= 0) and (borders[3] <= image.shape[
1]), "Centre of the crop area is sampled such that the borders are outside of the image. Borders: " + str(
borders) + ', image shape: ' + str(image.shape)
# return the padded image and cropping window information
return image, borders
def sample_crop(image, input_size, borders, random_number_generator, max_crop_size_noise):
"""
Applies size noise of the window borders.
"""
size_noise_multiplier = random_number_generator.uniform(low=-1.0, high=1.0, size=4)
top_margin = borders[0]
bottom_margin = image.shape[0] - borders[1]
left_margin = borders[2]
right_margin = image.shape[1] - borders[3]
max_crop_size_noise = min(max_crop_size_noise, top_margin, bottom_margin, left_margin, right_margin)
if input_size[0] >= input_size[1]:
max_crop_size_vertical_noise = max_crop_size_noise
max_crop_size_horizontal_noise = np.round(max_crop_size_noise * (input_size[1] / input_size[0]))
elif input_size[0] < input_size[1]:
max_crop_size_vertical_noise = np.round(max_crop_size_noise * (input_size[0] / input_size[1]))
max_crop_size_horizontal_noise = max_crop_size_noise
else:
raise RuntimeError()
max_crop_size_noise = np.array((max_crop_size_vertical_noise, max_crop_size_vertical_noise,
max_crop_size_horizontal_noise, max_crop_size_horizontal_noise),
dtype=np.int32)
size_noise = np.round(max_crop_size_noise * size_noise_multiplier)
size_noise = np.array(size_noise, dtype=np.int32)
borders = borders + size_noise
# this is to make sure that the cropping window isn't outside of the image
assert (borders[0] >= 0) and (borders[1] <= image.shape[0]) and (borders[2] >= 0) and (borders[3] <= image.shape[
1]), "Center of the crop area is sampled such that the borders are outside of the image. Borders: " + str(
borders) + ', image shape: ' + str(image.shape)
# Sanity check. make sure that the top is above the bottom
assert borders[1] > borders[0], "Bottom above the top. Top: " + str(borders[0]) + ', bottom: ' + str(borders[1])
# Sanity check. make sure that the left is left to the right
assert borders[3] > borders[2], "Left on the right. Left: " + str(borders[2]) + ', right: ' + str(borders[3])
return borders
def random_augmentation_best_center(image, input_size, random_number_generator, max_crop_noise=(0, 0),
max_crop_size_noise=0, auxiliary_image=None,
best_center=None, view=""):
"""
Crops augmentation window from a given image
by applying noise in location and size of the window.
"""
joint_image = np.expand_dims(image, 2)
if auxiliary_image is not None:
joint_image = np.concatenate([joint_image, auxiliary_image], axis=2)
joint_image, borders = sample_crop_best_center(joint_image, input_size, random_number_generator, max_crop_noise,
max_crop_size_noise, best_center, view)
borders = sample_crop(joint_image, input_size, borders, random_number_generator, max_crop_size_noise)
sampled_joint_image = crop_image(joint_image, input_size, borders)
if auxiliary_image is None:
return sampled_joint_image[:, :, 0], None
else:
return sampled_joint_image[:, :, 0], sampled_joint_image[:, :, 1:]
|
data_pipeline/_kafka_producer.py | poros/data_pipeline | 110 | 39512 | # -*- coding: utf-8 -*-
# Copyright 2016 Yelp Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
from __future__ import unicode_literals
import time
from collections import defaultdict
from collections import namedtuple
from contextlib import contextmanager
from cached_property import cached_property
from kafka import create_message
from kafka import KafkaClient
from kafka.common import ProduceRequest
from data_pipeline._position_data_tracker import PositionDataTracker
from data_pipeline._producer_retry import RetryHandler
from data_pipeline._retry_util import ExpBackoffPolicy
from data_pipeline._retry_util import MaxRetryError
from data_pipeline._retry_util import Predicate
from data_pipeline._retry_util import retry_on_condition
from data_pipeline._retry_util import RetryPolicy
from data_pipeline.config import get_config
from data_pipeline.envelope import Envelope
_EnvelopeAndMessage = namedtuple("_EnvelopeAndMessage", ["envelope", "message"])
logger = get_config().logger
# prepare needs to be in the module top level so it can be serialized for
# multiprocessing
def _prepare(envelope_and_message):
try:
kwargs = {}
if envelope_and_message.message.keys:
kwargs['key'] = envelope_and_message.message.encoded_keys
return create_message(
envelope_and_message.envelope.pack(envelope_and_message.message),
**kwargs
)
except:
logger.exception('Prepare failed')
raise
class KafkaProducer(object):
"""The KafkaProducer deals with buffering messages that need to be published
into Kafka, preparing them for publication, and ultimately publishing them.
Args:
producer_position_callback (function): The producer position callback
is called when the KafkaProducer is instantiated, and every time
messages are published to notify the producer of current position
information of successfully published messages.
dry_run (Optional[bool]): When dry_run mode is on, the producer won't
talk to real KafKa topic, nor to real Schematizer. Default to False.
"""
@cached_property
def envelope(self):
return Envelope()
def __init__(self, producer_position_callback, dry_run=False):
self.producer_position_callback = producer_position_callback
self.dry_run = dry_run
self.kafka_client = KafkaClient(get_config().cluster_config.broker_list)
self.position_data_tracker = PositionDataTracker()
self._reset_message_buffer()
self.skip_messages_with_pii = get_config().skip_messages_with_pii
self._publish_retry_policy = RetryPolicy(
ExpBackoffPolicy(with_jitter=True),
max_retry_count=get_config().producer_max_publish_retry_count
)
self._automatic_flush_enabled = True
@contextmanager
def disable_automatic_flushing(self):
"""Prevents the producer from flushing automatically (e.g. for timeouts
or batch size) while the context manager is open.
"""
try:
self._automatic_flush_enabled = False
yield
finally:
self._automatic_flush_enabled = True
def wake(self):
"""Should be called periodically if we're not otherwise waking up by
publishing, to ensure that messages are actually published.
"""
# if we haven't woken up in a while, we may need to flush messages
self._flush_if_necessary()
def publish(self, message):
if message.contains_pii and self.skip_messages_with_pii:
logger.info(
"Skipping a PII message - "
"uuid hex: {0}, "
"schema_id: {1}, "
"timestamp: {2}, "
"type: {3}".format(
message.uuid_hex,
message.schema_id,
message.timestamp,
message.message_type.name
)
)
return
self._add_message_to_buffer(message)
self.position_data_tracker.record_message_buffered(message)
self._flush_if_necessary()
def flush_buffered_messages(self):
produce_method = (self._publish_produce_requests_dry_run
if self.dry_run else self._publish_produce_requests)
produce_method(self._generate_produce_requests())
self._reset_message_buffer()
def close(self):
self.flush_buffered_messages()
self.kafka_client.close()
def _publish_produce_requests(self, requests):
"""It will try to publish all the produce requests for topics, and
retry a number of times until either all the requests are successfully
published or it can no longer retry, in which case, the exception will
be thrown.
Each time the requests that are successfully published in the previous
round will be removed from the requests and won't be published again.
"""
unpublished_requests = list(requests)
retry_handler = RetryHandler(self.kafka_client, unpublished_requests)
def has_requests_to_be_sent():
return bool(retry_handler.requests_to_be_sent)
retry_handler = retry_on_condition(
retry_policy=self._publish_retry_policy,
retry_conditions=[Predicate(has_requests_to_be_sent)],
func_to_retry=self._publish_requests,
use_previous_result_as_param=True,
retry_handler=retry_handler
)
if retry_handler.has_unpublished_request:
raise MaxRetryError(last_result=retry_handler)
def _publish_requests(self, retry_handler):
"""Main function to publish message requests. This function is wrapped
with retry function and will be retried based on specified retry policy
Args:
retry_handler: :class:`data_pipeline._producer_retry.RetryHandler`
that determines which messages should be retried next time.
"""
if not retry_handler.requests_to_be_sent:
return retry_handler
responses = self._try_send_produce_requests(
retry_handler.requests_to_be_sent
)
retry_handler.update_requests_to_be_sent(
responses,
self.position_data_tracker.topic_to_kafka_offset_map
)
self._record_success_requests(retry_handler.success_topic_stats_map)
return retry_handler
def _try_send_produce_requests(self, requests):
# Either it throws exceptions and none of them succeeds, or it returns
# responses of all the requests (success or fail response).
try:
return self.kafka_client.send_produce_request(
payloads=requests,
acks=get_config().kafka_client_ack_count,
fail_on_error=False
)
except Exception:
# Exceptions like KafkaUnavailableError, LeaderNotAvailableError,
# UnknownTopicOrPartitionError, etc., are not controlled by
# `fail_on_error` flag and could be thrown from the kafka client,
# and fail all the requests. We will retry all the requests until
# either all of them are successfully published or it exceeds the
# maximum retry criteria.
return []
def _record_success_requests(self, success_topic_stats_map):
for topic_partition, stats in success_topic_stats_map.iteritems():
topic = topic_partition.topic_name
assert stats.message_count == len(self.message_buffer[topic])
self.position_data_tracker.record_messages_published(
topic=topic,
offset=stats.original_offset,
message_count=stats.message_count
)
self.message_buffer.pop(topic)
def _publish_produce_requests_dry_run(self, requests):
for request in requests:
self._publish_single_request_dry_run(request)
def _publish_single_request_dry_run(self, request):
topic = request.topic
message_count = len(request.messages)
self.position_data_tracker.record_messages_published(
topic,
-1,
message_count
)
def _is_ready_to_flush(self):
time_limit = get_config().kafka_producer_flush_time_limit_seconds
return (self._automatic_flush_enabled and (
(time.time() - self.start_time) >= time_limit or
self.message_buffer_size >= get_config().kafka_producer_buffer_size
))
def _flush_if_necessary(self):
if self._is_ready_to_flush():
self.flush_buffered_messages()
def _add_message_to_buffer(self, message):
topic = message.topic
message = self._prepare_message(message)
self.message_buffer[topic].append(message)
self.message_buffer_size += 1
def _generate_produce_requests(self):
return [
ProduceRequest(topic=topic, partition=0, messages=messages)
for topic, messages in self._generate_prepared_topic_and_messages()
]
def _generate_prepared_topic_and_messages(self):
return self.message_buffer.iteritems()
def _prepare_message(self, message):
return _prepare(_EnvelopeAndMessage(envelope=self.envelope, message=message))
def _reset_message_buffer(self):
if not hasattr(self, 'message_buffer_size') or self.message_buffer_size > 0:
self.producer_position_callback(self.position_data_tracker.get_position_data())
self.start_time = time.time()
self.message_buffer = defaultdict(list)
self.message_buffer_size = 0
class LoggingKafkaProducer(KafkaProducer):
def _publish_produce_requests(self, requests):
logger.info(
"Flushing buffered messages - requests={0}, messages={1}".format(
len(requests), self.message_buffer_size
)
)
try:
super(LoggingKafkaProducer, self)._publish_produce_requests(requests)
logger.info("All messages published successfully")
except MaxRetryError as e:
logger.exception(
"Failed to publish all produce requests. {0}".format(repr(e))
)
raise
def _reset_message_buffer(self):
logger.info("Resetting message buffer for success requests.")
super(LoggingKafkaProducer, self)._reset_message_buffer()
def _publish_single_request_dry_run(self, request):
super(LoggingKafkaProducer, self)._publish_single_request_dry_run(request)
logger.debug("dry_run mode: Would have published {0} messages to {1}".format(
len(request.messages),
request.topic
))
|
stackimpact/utils.py | timgates42/stackimpact-python | 742 | 39544 |
import time
import uuid
import base64
import hashlib
def millis():
return int(round(time.time() * 1000))
def timestamp():
return int(time.time())
def base64_encode(s):
return base64.b64encode(s.encode('utf-8')).decode('utf-8')
def base64_decode(b):
return base64.b64decode(b).decode('utf-8')
def generate_uuid():
return str(uuid.uuid4())
def generate_sha1(text):
sha1_hash = hashlib.sha1()
sha1_hash.update(text.encode('utf-8'))
return sha1_hash.hexdigest()
|
example/app/views.py | aolkin/django-bootstrap-form | 324 | 39545 | <gh_stars>100-1000
from django.shortcuts import render
from app.forms import ExampleForm
def index(request):
form = ExampleForm()
return render(request, 'index.html', {'form': form})
|
myia/operations/op_array_getitem_wrap.py | strint/myia | 222 | 39617 | """Implementation of the 'array_getitem_wrap' operation."""
from ..lib import Slice, core, myia_static
from ..operations import array_getitem, reshape
def _dim_explicit(dim, dim_size):
if dim < 0:
dim = dim_size + dim
assert dim >= 0
return dim
@myia_static
def _build_slices(a_shp, item):
begin = ()
end = ()
stride = ()
remove_dims = ()
for adx, a in enumerate(a_shp):
if adx < len(item):
i = item[adx]
if isinstance(i, (slice, Slice)):
begin = begin + (
0 if i.start is None else _dim_explicit(i.start, a),
)
end = end + (a if i.stop is None else _dim_explicit(i.stop, a),)
stride = stride + (1 if i.step is None else i.step,)
remove_dims = remove_dims + (False,)
else:
begin = begin + (_dim_explicit(i, a),)
end = end + (_dim_explicit(i, a) + 1,)
stride = stride + (1,)
remove_dims = remove_dims + (True,)
else:
begin = begin + (0,)
end = end + (a,)
stride = stride + (1,)
remove_dims = remove_dims + (False,)
return begin, end, stride, remove_dims
@core
def array_getitem_wrap(array, item):
"""Implementation of `array_getitem`."""
if isinstance(item, tuple):
begin, end, stride, remove_dims = _build_slices(array.shape, item)
else:
begin, end, stride, remove_dims = _build_slices(array.shape, (item,))
ret = array_getitem(array, begin, end, stride)
final_shape = ()
for o, r in zip(ret.shape, remove_dims):
if not r:
final_shape = final_shape + (o,)
ret = reshape(ret, final_shape)
return ret
__operation_defaults__ = {
"name": "array_getitem_wrap",
"registered_name": "array_getitem_wrap",
"mapping": array_getitem_wrap,
"python_implementation": None,
}
|
django_tutorial/views/error_views.py | twtrubiks/django-tutorial | 431 | 39628 | <reponame>twtrubiks/django-tutorial
from django.shortcuts import render
def view_404(request):
return render(request, 'django_tutorial/error_pages/page_404.html', status=404)
def view_500(request):
return render(request, 'django_tutorial/error_pages/page_500.html', status=500)
|
examples/anagrams_demo.py | aathi2002/open-tamil | 218 | 39641 | import codecs
from solthiruthi.dictionary import *
from tamil import wordutils
TVU, TVU_size = DictionaryBuilder.create(TamilVU)
ag, ag2 = wordutils.anagrams_in_dictionary(TVU)
with codecs.open("demo.txt", "w", "utf-8") as fp:
itr = 1
for k, c in ag:
v = ag2[k]
fp.write("%03d) %s\n" % (itr, " | ".join(v)))
itr += 1
|
data/kitti_raw_loader.py | infinityofspace/SfmLearner-Pytorch | 908 | 39647 | from __future__ import division
import numpy as np
from path import Path
from imageio import imread
from skimage.transform import resize as imresize
from kitti_util import pose_from_oxts_packet, generate_depth_map, read_calib_file, transform_from_rot_trans
from datetime import datetime
class KittiRawLoader(object):
def __init__(self,
dataset_dir,
static_frames_file=None,
img_height=128,
img_width=416,
min_disp=0.2,
get_depth=False,
get_pose=False,
depth_size_ratio=1):
dir_path = Path(__file__).realpath().dirname()
test_scene_file = dir_path/'test_scenes.txt'
self.from_speed = static_frames_file is None
if static_frames_file is not None:
self.collect_static_frames(static_frames_file)
with open(test_scene_file, 'r') as f:
test_scenes = f.readlines()
self.test_scenes = [t[:-1] for t in test_scenes]
self.dataset_dir = dataset_dir
self.img_height = img_height
self.img_width = img_width
self.cam_ids = ['02', '03']
self.date_list = ['2011_09_26', '2011_09_28', '2011_09_29', '2011_09_30', '2011_10_03']
self.min_disp = min_disp
self.get_depth = get_depth
self.get_pose = get_pose
self.depth_size_ratio = depth_size_ratio
self.collect_train_folders()
def collect_static_frames(self, static_frames_file):
with open(static_frames_file, 'r') as f:
frames = f.readlines()
self.static_frames = {}
for fr in frames:
if fr == '\n':
continue
date, drive, frame_id = fr.split(' ')
curr_fid = '%.10d' % (np.int(frame_id[:-1]))
if drive not in self.static_frames.keys():
self.static_frames[drive] = []
self.static_frames[drive].append(curr_fid)
def collect_train_folders(self):
self.scenes = []
for date in self.date_list:
drive_set = (self.dataset_dir/date).dirs()
for dr in drive_set:
if dr.name[:-5] not in self.test_scenes:
self.scenes.append(dr)
def collect_scenes(self, drive):
train_scenes = []
for c in self.cam_ids:
oxts = sorted((drive/'oxts'/'data').files('*.txt'))
with open(drive/'oxts'/'timestamps.txt', 'r') as f:
times = [datetime.strptime(time_string[:-4], "%Y-%m-%d %H:%M:%S.%f") for time_string in f.readlines()]
scene_data = {'cid': c,
'dir': drive,
'speed': [],
'time': [t.timestamp() for t in times],
'frame_id': [],
'pose': [],
'rel_path': drive.name + '_' + c}
scale = None
origin = None
imu2velo = read_calib_file(drive.parent/'calib_imu_to_velo.txt')
velo2cam = read_calib_file(drive.parent/'calib_velo_to_cam.txt')
cam2cam = read_calib_file(drive.parent/'calib_cam_to_cam.txt')
velo2cam_mat = transform_from_rot_trans(velo2cam['R'], velo2cam['T'])
imu2velo_mat = transform_from_rot_trans(imu2velo['R'], imu2velo['T'])
cam_2rect_mat = transform_from_rot_trans(cam2cam['R_rect_00'], np.zeros(3))
imu2cam = cam_2rect_mat @ velo2cam_mat @ imu2velo_mat
for n, f in enumerate(oxts):
metadata = np.genfromtxt(f)
speed = metadata[8:11]
scene_data['speed'].append(speed)
scene_data['frame_id'].append('{:010d}'.format(n))
lat = metadata[0]
if scale is None:
scale = np.cos(lat * np.pi / 180.)
pose_matrix = pose_from_oxts_packet(metadata[:6], scale)
if origin is None:
origin = pose_matrix
odo_pose = imu2cam @ np.linalg.inv(origin) @ pose_matrix @ np.linalg.inv(imu2cam)
scene_data['pose'].append(odo_pose[:3])
sample = self.load_image(scene_data, 0)
if sample is None:
return []
scene_data['P_rect'] = self.get_P_rect(scene_data, sample[1], sample[2])
scene_data['intrinsics'] = scene_data['P_rect'][:, :3]
train_scenes.append(scene_data)
return train_scenes
def get_scene_imgs(self, scene_data):
def construct_sample(scene_data, i, frame_id):
sample = {"img": self.load_image(scene_data, i)[0], "id": frame_id}
if self.get_depth:
sample['depth'] = self.get_depth_map(scene_data, i)
if self.get_pose:
sample['pose'] = scene_data['pose'][i]
return sample
if self.from_speed:
cum_displacement = np.zeros(3)
for i, (speed1, speed2, t1, t2) in enumerate(zip(scene_data['speed'][1:],
scene_data['speed'][:-1],
scene_data['time'][1:],
scene_data['time'][:-1])):
print(speed1, speed2, t1, t2)
cum_displacement += 0.5*(speed1 + speed2) / (t2-t1)
disp_mag = np.linalg.norm(cum_displacement)
if disp_mag > self.min_disp:
frame_id = scene_data['frame_id'][i]
yield construct_sample(scene_data, i, frame_id)
cum_displacement *= 0
else: # from static frame file
drive = str(scene_data['dir'].name)
for (i, frame_id) in enumerate(scene_data['frame_id']):
if (drive not in self.static_frames.keys()) or (frame_id not in self.static_frames[drive]):
yield construct_sample(scene_data, i, frame_id)
def get_P_rect(self, scene_data, zoom_x, zoom_y):
calib_file = scene_data['dir'].parent/'calib_cam_to_cam.txt'
filedata = read_calib_file(calib_file)
P_rect = np.reshape(filedata['P_rect_' + scene_data['cid']], (3, 4))
P_rect[0] *= zoom_x
P_rect[1] *= zoom_y
return P_rect
def load_image(self, scene_data, tgt_idx):
img_file = scene_data['dir']/'image_{}'.format(scene_data['cid'])/'data'/scene_data['frame_id'][tgt_idx]+'.png'
if not img_file.isfile():
return None
img = imread(img_file)
zoom_y = self.img_height/img.shape[0]
zoom_x = self.img_width/img.shape[1]
img = imresize(img, (self.img_height, self.img_width))
# workaround for skimage (float [0 .. 1]) and imageio (uint8 [0 .. 255]) interoperability
img = (img * 255).astype(np.uint8)
return img, zoom_x, zoom_y
def get_depth_map(self, scene_data, tgt_idx):
# compute projection matrix velodyne->image plane
R_cam2rect = np.eye(4)
calib_dir = scene_data['dir'].parent
cam2cam = read_calib_file(calib_dir/'calib_cam_to_cam.txt')
velo2cam = read_calib_file(calib_dir/'calib_velo_to_cam.txt')
velo2cam = np.hstack((velo2cam['R'].reshape(3, 3), velo2cam['T'][..., np.newaxis]))
velo2cam = np.vstack((velo2cam, np.array([0, 0, 0, 1.0])))
R_cam2rect[:3, :3] = cam2cam['R_rect_00'].reshape(3, 3)
velo2cam = np.dot(R_cam2rect, velo2cam)
velo_file_name = scene_data['dir']/'velodyne_points'/'data'/'{}.bin'.format(scene_data['frame_id'][tgt_idx])
return generate_depth_map(velo_file_name, scene_data['P_rect'], velo2cam,
self.img_width, self.img_height, self.depth_size_ratio)
|
cloudtunes-server/cloudtunes/async.py | skymemoryGit/cloudtunes | 529 | 39676 | """Asynchronous MongoDB and Redis connections."""
from functools import partial
import motor
import tornadoredis
from cloudtunes import settings
RedisClient = partial(tornadoredis.Client, **settings.REDIS)
mongo = motor.MotorClient(**settings.MONGODB).cloudtunes
redis = RedisClient()
|
code/applications/qs_predict_probablistic.py | ninamiolane/quicksilver | 126 | 39682 | # add LDDMM shooting code into path
import sys
sys.path.append('../vectormomentum/Code/Python');
sys.path.append('../library')
from subprocess import call
import argparse
import os.path
#Add deep learning related libraries
from collections import Counter
import torch
import prediction_network
import util
import numpy as np
from skimage import exposure
#Add LDDMM registration related libraries
# pyca modules
import PyCA.Core as ca
import PyCA.Common as common
#import PyCA.Display as display
# vector momentum modules
# others
import logging
import copy
import math
import registration_methods
#parse command line input
parser = argparse.ArgumentParser(description='Deformation prediction given set of moving and target images.')
requiredNamed = parser.add_argument_group('required named arguments')
requiredNamed.add_argument('--moving-image', nargs='+', required=True, metavar=('m1', 'm2, m3...'),
help='List of moving images, seperated by space.')
requiredNamed.add_argument('--target-image', nargs='+', required=True, metavar=('t1', 't2, t3...'),
help='List of target images, seperated by space.')
requiredNamed.add_argument('--output-prefix', nargs='+', required=True, metavar=('o1', 'o2, o3...'),
help='List of registration output prefixes for every moving/target image pair, seperated by space. Preferred to be a directory (e.g. /some_path/output_dir/)')
parser.add_argument('--samples', type=int, default=50, metavar='N',
help='number of times to sample the network (default: 64)')
parser.add_argument('--batch-size', type=int, default=64, metavar='N',
help='input batch size for prediction network (default: 64)')
parser.add_argument('--n-GPU', type=int, default=1, metavar='N',
help='number of GPUs used for prediction (default: 1). For maximum efficiency please set the batch size divisible by the number of GPUs.')
parser.add_argument('--use-CPU-for-shooting', action='store_true', default=False,
help='Use CPU for geodesic shooting. Slow, but saves GPU memory.')
parser.add_argument('--shoot-steps', type=int, default=0, metavar='N',
help='time steps for geodesic shooting. Ignore this option to use the default step size used by the registration model.')
parser.add_argument('--affine-align', action='store_true', default=False,
help='Perform affine registration to align moving and target images to ICBM152 atlas space. Require niftireg.')
parser.add_argument('--histeq', action='store_true', default=False,
help='Perform histogram equalization to the moving and target images.')
parser.add_argument('--atlas', default="../data/atlas/icbm152.nii",
help="Atlas to use for (affine) pre-registration")
parser.add_argument('--prediction-parameter', default='../../network_configs/OASIS_predict_probabilistic.pth.tar',
help="network parameters for the prediction network")
args = parser.parse_args()
# check validity of input arguments from command line
def check_args(args):
# number of input images/output prefix consistency check
n_moving_images = len(args.moving_image)
n_target_images = len(args.target_image)
n_output_prefix = len(args.output_prefix)
if (n_moving_images != n_target_images):
print('The number of moving images is not consistent with the number of target images!')
sys.exit(1)
elif (n_moving_images != n_output_prefix ):
print('The number of output prefix is not consistent with the number of input images!')
sys.exit(1)
# number of GPU check (positive integers)
if (args.n_GPU <= 0):
print('Number of GPUs must be positive!')
sys.exit(1)
# geodesic shooting step check (positive integers)
if (args.shoot_steps < 0):
print('Shooting steps (--shoot-steps) is negative. Using model default step.')
# geodesic shooting step check (positive integers)
if (args.samples < 1):
print('Number of samples (--samples) is smaller than 1. Using model default step.')
#enddef
def create_net(args, network_config):
net_single = prediction_network.net(network_config['network_feature']).cuda();
net_single.load_state_dict(network_config['state_dict'])
if (args.n_GPU > 1) :
device_ids=range(0, args.n_GPU)
net = torch.nn.DataParallel(net_single, device_ids=device_ids).cuda()
else:
net = net_single
net.train()
return net;
#enddef
def preprocess_image(image_pyca, histeq):
image_np = common.AsNPCopy(image_pyca)
nan_mask = np.isnan(image_np)
image_np[nan_mask] = 0
image_np /= np.amax(image_np)
# perform histogram equalization if needed
if histeq:
image_np[image_np != 0] = exposure.equalize_hist(image_np[image_np != 0])
return image_np
#perform deformation prediction
def predict_image(args):
if (args.use_CPU_for_shooting):
mType = ca.MEM_HOST
else:
mType = ca.MEM_DEVICE
# load the prediction network
predict_network_config = torch.load(args.prediction_parameter)
prediction_net = create_net(args, predict_network_config);
batch_size = args.batch_size
patch_size = predict_network_config['patch_size']
input_batch = torch.zeros(batch_size, 2, patch_size, patch_size, patch_size).cuda()
# start prediction
for i in range(0, len(args.moving_image)):
common.Mkdir_p(os.path.dirname(args.output_prefix[i]))
if (args.affine_align):
# Perform affine registration to both moving and target image to the ICBM152 atlas space.
# Registration is done using Niftireg.
call(["reg_aladin",
"-noSym", "-speeeeed", "-ref", args.atlas ,
"-flo", args.moving_image[i],
"-res", args.output_prefix[i]+"moving_affine.nii",
"-aff", args.output_prefix[i]+'moving_affine_transform.txt'])
call(["reg_aladin",
"-noSym", "-speeeeed" ,"-ref", args.atlas ,
"-flo", args.target_image[i],
"-res", args.output_prefix[i]+"target_affine.nii",
"-aff", args.output_prefix[i]+'target_affine_transform.txt'])
moving_image = common.LoadITKImage(args.output_prefix[i]+"moving_affine.nii", mType)
target_image = common.LoadITKImage(args.output_prefix[i]+"target_affine.nii", mType)
else:
moving_image = common.LoadITKImage(args.moving_image[i], mType)
target_image = common.LoadITKImage(args.target_image[i], mType)
#preprocessing of the image
moving_image_np = preprocess_image(moving_image, args.histeq);
target_image_np = preprocess_image(target_image, args.histeq);
grid = moving_image.grid()
moving_image_processed = common.ImFromNPArr(moving_image_np, mType)
target_image_processed = common.ImFromNPArr(target_image_np, mType)
moving_image.setGrid(grid)
target_image.setGrid(grid)
predict_transform_space = False
if 'matlab_t7' in predict_network_config:
predict_transform_space = True
# run actual prediction
prediction_result = util.predict_momentum(moving_image_np, target_image_np, input_batch, batch_size, patch_size, prediction_net, predict_transform_space);
m0 = prediction_result['image_space']
m0_reg = common.FieldFromNPArr(prediction_result['image_space'], mType);
registration_result = registration_methods.geodesic_shooting(moving_image_processed, target_image_processed, m0_reg, args.shoot_steps, mType, predict_network_config)
phi = common.AsNPCopy(registration_result['phiinv'])
phi_square = np.power(phi,2)
for sample_iter in range(1, args.samples):
print(sample_iter)
prediction_result = util.predict_momentum(moving_image_np, target_image_np, input_batch, batch_size, patch_size, prediction_net, predict_transform_space);
m0 += prediction_result['image_space']
m0_reg = common.FieldFromNPArr(prediction_result['image_space'], mType);
registration_result = registration_methods.geodesic_shooting(moving_image_processed, target_image_processed, m0_reg, args.shoot_steps, mType, predict_network_config)
phi += common.AsNPCopy(registration_result['phiinv'])
phi_square += np.power(common.AsNPCopy(registration_result['phiinv']),2)
m0_mean = np.divide(m0, args.samples);
m0_reg = common.FieldFromNPArr(m0_mean, mType);
registration_result = registration_methods.geodesic_shooting(moving_image_processed, target_image_processed, m0_reg, args.shoot_steps, mType, predict_network_config)
phi_mean = registration_result['phiinv']
phi_var = np.divide(phi_square, args.samples) - np.power(np.divide(phi, args.samples), 2)
#save result
common.SaveITKImage(registration_result['I1'], args.output_prefix[i]+"I1.mhd")
common.SaveITKField(phi_mean, args.output_prefix[i]+"phiinv_mean.mhd")
common.SaveITKField(common.FieldFromNPArr(phi_var, mType), args.output_prefix[i]+"phiinv_var.mhd")
#enddef
if __name__ == '__main__':
check_args(args);
predict_image(args)
|
tods/sk_interface/detection_algorithm/SOD_skinterface.py | ZhuangweiKang/tods | 544 | 39739 | import numpy as np
from ..base import BaseSKI
from tods.detection_algorithm.PyodSOD import SODPrimitive
class SODSKI(BaseSKI):
def __init__(self, **hyperparams):
super().__init__(primitive=SODPrimitive, **hyperparams)
self.fit_available = True
self.predict_available = True
self.produce_available = False
|
munjong/remove_sejong_period_error.py | cjdans5545/khaiii | 1,235 | 39758 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
remove wrong sentence breaking marks after period error eojeol
__author__ = 'Jamie (<EMAIL>)'
__copyright__ = 'Copyright (C) 2017-, Kakao Corp. All rights reserved.'
"""
###########
# imports #
###########
from argparse import ArgumentParser
import logging
import os
import re
import sys
from typing import TextIO, Tuple
from khaiii.munjong.sejong_corpus import Morph, WORD_ID_PTN
#############
# functions #
#############
def _get_three_lines(fin: TextIO) -> Tuple[str, str, str]:
"""
get three lines tuple from file (generator)
Args:
fin: input file
Yields:
prev. prev. line
prev. line
curr. line
"""
prev_prev_line = fin.readline().rstrip('\r\n')
prev_line = fin.readline().rstrip('\r\n')
# print first two lines
print(prev_prev_line)
print(prev_line)
for curr_line in fin:
curr_line = curr_line.rstrip('\r\n')
yield prev_prev_line, prev_line, curr_line
prev_prev_line = prev_line
prev_line = curr_line
def _is_known_period_error_eojeol(line: str) -> bool:
"""
알려진 특정 문장분리 오류를 포함하는 어절인 지 여부
Args:
line: line (eojeol)
Returns:
whether has error or not
"""
cols = line.split('\t')
if len(cols) != 3 or not WORD_ID_PTN.match(cols[0]):
return False
if '/SF + ' not in cols[2] or re.match(r'.+/EF \+ ./SF$', cols[2]):
return False
if re.match(r'.+/SF \+ [\'"’”]/SS$', cols[2]):
return False
morphs = [Morph.parse(_) for _ in cols[2].split(' + ')]
tags_str = '+'.join([_.tag for _ in morphs])
if 'SN+SF+SN' in tags_str and not tags_str.endswith('+SF'):
# 4.6판: 4/SN + ./SF + 6/SN + 판/NNB
if 'XSN+SF+SN' not in tags_str:
return True
elif 'SL+SF+SL' in tags_str and not tags_str.endswith('+SF'):
# S.M.오너: S/SL + ./SF + M/SL + ./SF + 오너/NNG
return True
return False
def run():
"""
run function which is the start point of program
"""
file_name = os.path.basename(sys.stdin.name)
for line_num, (prev_prev_line, prev_line, curr_line) in enumerate(_get_three_lines(sys.stdin),
start=1):
if curr_line == '</p>' and _is_known_period_error_eojeol(prev_line):
continue
elif prev_line == '</p>' and curr_line == '<p>' and \
_is_known_period_error_eojeol(prev_prev_line):
logging.info('%s:%d\t%s', file_name, line_num, prev_prev_line)
continue
print(curr_line)
########
# main #
########
def main():
"""
main function processes only argument parsing
"""
parser = ArgumentParser(description='remove wrong sentence breaking marks after'
' period error eojeol')
parser.add_argument('--input', help='input file <default: stdin>', metavar='FILE')
parser.add_argument('--output', help='output file <default: stdout>', metavar='FILE')
parser.add_argument('--debug', help='enable debug', action='store_true')
args = parser.parse_args()
if args.input:
sys.stdin = open(args.input, 'rt')
if args.output:
sys.stdout = open(args.output, 'wt')
if args.debug:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.INFO)
run()
if __name__ == '__main__':
main()
|
src/pretalx/event/migrations/0023_update_featured_visibility.py | lili668668/pretalx | 418 | 39779 | <filename>src/pretalx/event/migrations/0023_update_featured_visibility.py
# Generated by Django 3.0.5 on 2020-07-26 15:45
from django.db import migrations
def update_show_featured(apps, schema_editor):
Event = apps.get_model("event", "Event")
EventSettings = apps.get_model("event", "Event_SettingsStore")
for event in Event.objects.all():
old_value = EventSettings.objects.filter(
object=event, key="show_sneak_peek"
).first()
if old_value and old_value.value == "False":
EventSettings.objects.create(
object=event,
key="show_featured",
value="never",
)
class Migration(migrations.Migration):
dependencies = [
("event", "0022_auto_20200124_1213"),
]
operations = [
migrations.RunPython(update_show_featured, migrations.RunPython.noop),
]
|
dash_docs/chapters/dash_bio/examples/ideogram.py | joelostblom/dash-docs | 379 | 39829 | import dash
import dash_bio as dashbio
import dash_html_components as html
import dash_core_components as dcc
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
app.layout = html.Div([
'Select which chromosomes to display on the ideogram below:',
dcc.Dropdown(
id='displayed-chromosomes',
options=[{'label': str(i), 'value': str(i)} for i in range(1, 23)],
multi=True,
value=[str(i) for i in range(1, 23)]
),
dashbio.Ideogram(
id='my-dashbio-ideogram'
),
html.Div(id='ideogram-rotated')
])
@app.callback(
dash.dependencies.Output('my-dashbio-ideogram', 'chromosomes'),
[dash.dependencies.Input('displayed-chromosomes', 'value')]
)
def update_ideogram(value):
return value
@app.callback(
dash.dependencies.Output('ideogram-rotated', 'children'),
[dash.dependencies.Input('my-dashbio-ideogram', 'rotated')]
)
def update_ideogram_rotated(rot):
return 'You have {} selected a chromosome.'.format(
'' if rot else 'not')
if __name__ == '__main__':
app.run_server(debug=True)
|
objectModel/Python/tests/storage/test_github.py | rt112000/CDM | 884 | 39869 | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
import json
import unittest
import unittest.mock as mock
import random
from tests.common import async_test
from cdm.storage.github import GithubAdapter
class GithubStorageAdapterTestCase(unittest.TestCase):
def test_make_corpus_path(self):
adapter = GithubAdapter()
adapter.timeout = 2000
adapter.maximum_timeout = 5000
adapter.number_of_retries = 0
# Valid path.
self.assertEqual(adapter.create_corpus_path(
'https://raw.githubusercontent.com/Microsoft/CDM/master/schemaDocuments/dir1/dir2/file.json'), '/dir1/dir2/file.json')
# Invalid path.
self.assertIsNone(adapter.create_corpus_path('https://raw.githubusercontent.com/Microsoft/CDM/master/schemaDocument/dir1/dir2/file.json'))
@mock.patch('cdm.utilities.network.cdm_http_client.urllib.request.urlopen', new_callable=mock.mock_open, read_data=json.dumps({'Ḽơᶉëᶆ': 'ȋṕšᶙṁ'}).encode())
@async_test
async def test_read(self, mock_urlopen):
adapter = GithubAdapter()
adapter.timeout = 2000
adapter.maximum_timeout = 5000
raw_data = await adapter.read_async('/dir1/dir2/file.json')
data = json.loads(raw_data)
# Verify URL.
self.assertEqual(mock_urlopen.call_args[0][0].full_url, 'https://raw.githubusercontent.com/Microsoft/CDM/master/schemaDocuments/dir1/dir2/file.json')
self.assertEqual(data, {'Ḽơᶉëᶆ': 'ȋṕšᶙṁ'}) # Verify data.
if __name__ == '__main__':
unittest.main()
|
speedTester/logs/average.py | saurabhcommand/Hello-world | 1,428 | 39870 | <reponame>saurabhcommand/Hello-world
file1 = open("./logs/pythonlog.txt", 'r+')
avg1 = 0.0
lines1 = 0.0
for line in file1:
lines1 = lines1 + 1.0
avg1 = (avg1 + float(line))
avg1 = avg1/lines1
print(avg1, "for Python with", lines1, "lines")
file2 = open("./logs/clog.txt", 'r+')
avg2 = 0.0
lines2 = 0.0
for line in file2:
lines2 = lines2 + 1.0
avg2 = (avg2 + float(line))
avg2 = avg2/lines2
print(avg2, "for C with", lines2, "lines")
file3 = open("./logs/cpplog.txt", 'r+')
avg3 = 0.0
lines3 = 0.0
for line in file3:
lines3 = lines3 + 1.0
avg3 = (avg3 + float(line))
avg3 = avg3/lines3
print(avg3, "for C++ with", lines3, "lines")
file4 = open("./logs/javalog.txt", 'r+')
avg4 = 0.0
lines4 = 0.0
for line in file4:
lines4 = lines4 + 1.0
avg4 = (avg4 + float(line))
avg4 = avg4/lines4
print(avg4, "for Java with", lines4, "lines")
word = ""
while(word.lower() != "y" and word.lower() != "n"):
word = input("Do you want to wipe the previous log? [Y/N]")
if(word.lower() == "y"):
file1.truncate(0)
file3.truncate(0)
file2.truncate(0)
file4.truncate(0)
print("Done.")
file4.close()
file3.close()
file2.close()
file1.close()
|
tensorflow_ranking/python/keras/estimator_test.py | renyi533/ranking | 2,482 | 39878 | <filename>tensorflow_ranking/python/keras/estimator_test.py
# Copyright 2021 The TensorFlow Ranking Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tests for Keras Estimator."""
import os
from absl.testing import parameterized
import tensorflow as tf
from google.protobuf import text_format
from tensorflow_ranking.python import data
from tensorflow_ranking.python.keras import estimator as estimator_lib
from tensorflow_ranking.python.keras import losses
from tensorflow_ranking.python.keras import metrics
from tensorflow_ranking.python.keras import model
from tensorflow_ranking.python.keras import network
from tensorflow_serving.apis import input_pb2
_SIZE = 'example_list_size'
_ELWC_PROTO = text_format.Parse(
"""
context {
features {
feature {
key: "query_length"
value { int64_list { value: 3 } }
}
}
}
examples {
features {
feature {
key: "unigrams"
value { bytes_list { value: "tensorflow" } }
}
feature {
key: "utility"
value { float_list { value: 0.0 } }
}
feature {
key: "dense_feature"
value { float_list { value: -0.5 value: 0.5 } }
}
feature {
key: "doc_weight"
value { float_list { value: 0.0 } }
}
}
}
examples {
features {
feature {
key: "unigrams"
value { bytes_list { value: ["learning", "to", "rank"] } }
}
feature {
key: "utility"
value { float_list { value: 1.0 } }
}
feature {
key: "dense_feature"
value { float_list { value: 0.5 value: 0.5 } }
}
feature {
key: "doc_weight"
value { float_list { value: 1.0 } }
}
}
}
""", input_pb2.ExampleListWithContext())
_LABEL_FEATURE = 'utility'
_PADDING_LABEL = -1.
_EXAMPLE_WEIGHT_FEATURE = 'doc_weight'
def _get_feature_columns():
def _normalizer_fn(t):
return 2 * t
context_feature_columns = {
'query_length':
tf.feature_column.numeric_column(
'query_length',
shape=(1,),
default_value=0,
dtype=tf.int64,
normalizer_fn=_normalizer_fn)
}
example_feature_columns = {
'utility':
tf.feature_column.numeric_column(
'utility',
shape=(1,),
default_value=_PADDING_LABEL,
dtype=tf.float32),
'unigrams':
tf.feature_column.embedding_column(
tf.feature_column.categorical_column_with_vocabulary_list(
'unigrams',
vocabulary_list=[
'ranking', 'regression', 'classification', 'ordinal'
]),
dimension=10),
'dense_feature':
tf.feature_column.numeric_column(
'dense_feature',
shape=(2,),
default_value=0.0,
dtype=tf.float32)
}
custom_objects = {'_normalizer_fn': _normalizer_fn}
return context_feature_columns, example_feature_columns, custom_objects
def _get_example_weight_feature_column():
return tf.feature_column.numeric_column(
_EXAMPLE_WEIGHT_FEATURE, dtype=tf.float32, default_value=1.)
# This network needs actual layers, otherwise the estimator training fails.
class _DummyUnivariateRankingNetwork(network.UnivariateRankingNetwork):
"""Dummy univariate ranking network with a simple scoring function."""
def __init__(self,
context_feature_columns=None,
example_feature_columns=None,
name='dummy_ranking_network',
**kwargs):
super(_DummyUnivariateRankingNetwork, self).__init__(
context_feature_columns=context_feature_columns,
example_feature_columns=example_feature_columns,
name=name,
**kwargs)
self._score_layer = tf.keras.layers.Dense(units=1)
def score(self, context_features=None, example_features=None, training=True):
example_input = [
tf.keras.layers.Flatten()(example_features[name])
for name in sorted(self.example_feature_columns)
]
return self._score_layer(tf.concat(example_input, axis=1))
class KerasModelToEstimatorTest(tf.test.TestCase, parameterized.TestCase):
def setUp(self):
super(KerasModelToEstimatorTest, self).setUp()
(context_feature_columns, example_feature_columns,
custom_objects) = _get_feature_columns()
self._context_feature_columns = context_feature_columns
self._example_feature_columns = example_feature_columns
# Remove label feature from example feature column.
del self._example_feature_columns[_LABEL_FEATURE]
self._custom_objects = custom_objects
self._network = _DummyUnivariateRankingNetwork(
context_feature_columns=self._context_feature_columns,
example_feature_columns=self._example_feature_columns)
self._loss = losses.get(
losses.RankingLossKey.SOFTMAX_LOSS,
reduction=tf.compat.v2.losses.Reduction.SUM_OVER_BATCH_SIZE)
self._eval_metrics = metrics.default_keras_metrics()
self._optimizer = tf.keras.optimizers.Adagrad(learning_rate=0.1)
self._config = tf.estimator.RunConfig(
keep_checkpoint_max=2, save_checkpoints_secs=2)
self._data_file = os.path.join(tf.compat.v1.test.get_temp_dir(),
'test_elwc.tfrecord')
serialized_elwc_list = [
_ELWC_PROTO.SerializeToString(),
] * 20
if tf.io.gfile.exists(self._data_file):
tf.io.gfile.remove(self._data_file)
with tf.io.TFRecordWriter(self._data_file) as writer:
for serialized_elwc in serialized_elwc_list:
writer.write(serialized_elwc)
def tearDown(self):
super(KerasModelToEstimatorTest, self).tearDown()
if tf.io.gfile.exists(self._data_file):
tf.io.gfile.remove(self._data_file)
self._data_file = None
def _make_input_fn(self, weights_feature_name=None):
"""Return an input function, serves weights defined in weights_feature_name.
Args:
weights_feature_name: (str) A string defines the weights feature in
dataset. None if no weights is used.
Returns:
A function serves features and labels. Weights will be served in features.
"""
def _input_fn():
context_feature_columns, example_feature_columns, _ = (
_get_feature_columns())
context_feature_spec = tf.feature_column.make_parse_example_spec(
list(context_feature_columns.values()))
label_column = tf.feature_column.numeric_column(
_LABEL_FEATURE, dtype=tf.float32, default_value=_PADDING_LABEL)
weight_column = (
_get_example_weight_feature_column()
if weights_feature_name == _EXAMPLE_WEIGHT_FEATURE else None)
example_fc_list = (
list(example_feature_columns.values()) + [label_column] +
([weight_column] if weight_column else []))
example_feature_spec = tf.feature_column.make_parse_example_spec(
example_fc_list)
dataset = data.build_ranking_dataset(
file_pattern=self._data_file,
data_format=data.ELWC,
batch_size=10,
context_feature_spec=context_feature_spec,
example_feature_spec=example_feature_spec,
list_size=2,
reader=tf.data.TFRecordDataset,
size_feature_name=_SIZE)
features = tf.compat.v1.data.make_one_shot_iterator(dataset).get_next()
label = tf.squeeze(features.pop(_LABEL_FEATURE), axis=2)
return features, label
return _input_fn
def test_model_to_estimator_missing_custom_objects(self):
keras_model = model.create_keras_model(
network=self._network,
loss=self._loss,
metrics=self._eval_metrics,
optimizer=self._optimizer,
size_feature_name=_SIZE)
estimator = estimator_lib.model_to_estimator(
model=keras_model, config=self._config, custom_objects=None)
self.assertIsInstance(estimator, tf.compat.v1.estimator.Estimator)
# Train and export model.
train_spec = tf.estimator.TrainSpec(
input_fn=self._make_input_fn(), max_steps=1)
eval_spec = tf.estimator.EvalSpec(
name='eval', input_fn=self._make_input_fn(), steps=10)
with self.assertRaises(AttributeError):
tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
@parameterized.named_parameters(
('without_weights', None, 'predict'),
('with_example_weights', _EXAMPLE_WEIGHT_FEATURE, 'predict'),
('pointwise_inference', None, 'regress'))
def test_model_to_estimator(self, weights_feature_name, serving_default):
keras_model = model.create_keras_model(
network=self._network,
loss=self._loss,
metrics=self._eval_metrics,
optimizer=self._optimizer,
size_feature_name=_SIZE)
estimator = estimator_lib.model_to_estimator(
model=keras_model,
config=self._config,
weights_feature_name=weights_feature_name,
custom_objects=self._custom_objects,
serving_default=serving_default)
self.assertIsInstance(estimator, tf.compat.v1.estimator.Estimator)
# Train and export model.
train_spec = tf.estimator.TrainSpec(
input_fn=self._make_input_fn(weights_feature_name), max_steps=1)
eval_spec = tf.estimator.EvalSpec(
name='eval',
input_fn=self._make_input_fn(weights_feature_name),
steps=10)
tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
context_feature_spec = tf.feature_column.make_parse_example_spec(
self._context_feature_columns.values())
example_feature_spec = tf.feature_column.make_parse_example_spec(
self._example_feature_columns.values())
def _make_serving_input_fn(serving_default):
if serving_default == 'predict':
return data.build_ranking_serving_input_receiver_fn(
data.ELWC,
context_feature_spec=context_feature_spec,
example_feature_spec=example_feature_spec,
size_feature_name=_SIZE)
else:
def pointwise_serving_fn():
serialized = tf.compat.v1.placeholder(
dtype=tf.string, shape=[None], name='input_ranking_tensor')
receiver_tensors = {'input_ranking_data': serialized}
features = data.parse_from_tf_example(
serialized,
context_feature_spec=context_feature_spec,
example_feature_spec=example_feature_spec,
size_feature_name=_SIZE)
return tf.estimator.export.ServingInputReceiver(features,
receiver_tensors)
return pointwise_serving_fn
serving_input_receiver_fn = _make_serving_input_fn(serving_default)
export_dir = os.path.join(tf.compat.v1.test.get_temp_dir(), 'export')
estimator.export_saved_model(export_dir, serving_input_receiver_fn)
# Confirm model ran and created checkpoints and saved model.
final_ckpt_path = os.path.join(estimator.model_dir, 'model.ckpt-1.meta')
self.assertTrue(tf.io.gfile.exists(final_ckpt_path))
saved_model_pb = os.path.join(export_dir,
tf.io.gfile.listdir(export_dir)[0],
'saved_model.pb')
self.assertTrue(tf.io.gfile.exists(saved_model_pb))
def test_model_to_estimator_wrong_weights_name(self):
keras_model = model.create_keras_model(
network=self._network,
loss=self._loss,
metrics=self._eval_metrics,
optimizer=self._optimizer,
size_feature_name=_SIZE)
estimator = estimator_lib.model_to_estimator(
model=keras_model,
config=self._config,
weights_feature_name='weights',
custom_objects=self._custom_objects)
self.assertIsInstance(estimator, tf.compat.v1.estimator.Estimator)
# Train and export model.
train_spec = tf.estimator.TrainSpec(
input_fn=self._make_input_fn(), max_steps=1)
eval_spec = tf.estimator.EvalSpec(
name='eval', input_fn=self._make_input_fn(), steps=10)
with self.assertRaises(ValueError):
tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
if __name__ == '__main__':
tf.test.main()
|
tools/Polygraphy/tests/backend/trt/test_profile.py | KaliberAI/TensorRT | 5,249 | 39915 | <filename>tools/Polygraphy/tests/backend/trt/test_profile.py
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
import tensorrt as trt
from polygraphy import mod
from polygraphy.backend.trt import Profile, network_from_onnx_bytes
from tests.models.meta import ONNX_MODELS
@pytest.fixture(scope="session")
def dynamic_identity_network():
builder, network, parser = network_from_onnx_bytes(ONNX_MODELS["dynamic_identity"].loader)
with builder, network, parser:
yield builder, network, parser
class TestProfile(object):
def test_can_add(self):
profile = Profile()
min, opt, max = (1, 1), (2, 2), (4, 4)
assert profile.add("input", min=min, opt=opt, max=max) is profile
shape_tuple = profile["input"]
assert shape_tuple.min == min
assert shape_tuple.opt == opt
assert shape_tuple.max == max
@pytest.mark.skipif(mod.version(trt.__version__) < mod.version("7.0"), reason="Unsupported for TRT 6")
def test_fill_defaults_does_not_overwrite(self, dynamic_identity_network):
_, network, _ = dynamic_identity_network
profile = Profile().add("X", (1, 1, 1, 1), (1, 1, 2, 2), (1, 1, 3, 3))
profile.fill_defaults(network) is profile
assert profile["X"].min == (1, 1, 1, 1)
assert profile["X"].opt == (1, 1, 2, 2)
assert profile["X"].max == (1, 1, 3, 3)
@pytest.mark.skipif(mod.version(trt.__version__) < mod.version("7.0"), reason="Unsupported for TRT 6")
def test_to_trt(self, dynamic_identity_network):
builder, network, _ = dynamic_identity_network
profile = Profile().add("X", (1, 2, 1, 1), (1, 2, 2, 2), (1, 2, 4, 4))
trt_profile = profile.to_trt(builder, network)
trt_profile.get_shape("X") == ((1, 2, 1, 1), (1, 2, 2, 2), (1, 2, 4, 4))
|
tests/attention/test_attention_layer.py | SamuelCahyawijaya/fast-transformers | 1,171 | 39922 | #
# Copyright (c) 2020 Idiap Research Institute, http://www.idiap.ch/
# Written by <NAME> <<EMAIL>>,
# <NAME> <<EMAIL>>
#
import unittest
import torch
from fast_transformers.attention.attention_layer import AttentionLayer
class TestAttentionLayer(unittest.TestCase):
def _assert_sizes_attention(self, qshape, kshape, vshape):
def inner(q, k, v, m1, m2, m3):
self.assertEqual(q.shape, qshape)
self.assertEqual(k.shape, kshape)
self.assertEqual(v.shape, vshape)
N, L, H, E = q.shape
_, S, _, D = v.shape
return v.new_zeros((N, L, H, D))
return inner
def test_forward(self):
att = AttentionLayer(
self._assert_sizes_attention(
(10, 5, 4, 25),
(10, 8, 4, 25),
(10, 8, 4, 25)
),
100,
4
)
v = att(
torch.rand(10, 5, 100),
torch.rand(10, 8, 100),
torch.rand(10, 8, 100),
None, None, None
)
self.assertEqual(v.shape, (10, 5, 100))
att = AttentionLayer(
self._assert_sizes_attention(
(10, 5, 4, 32),
(10, 8, 4, 32),
(10, 8, 4, 64)
),
100,
4,
d_keys=32,
d_values=64
)
v = att(
torch.rand(10, 5, 100),
torch.rand(10, 8, 100),
torch.rand(10, 8, 100),
None, None, None
)
self.assertEqual(v.shape, (10, 5, 100))
if __name__ == "__main__":
unittest.main()
|
pulsar/async/_subprocess.py | PyCN/pulsar | 1,410 | 39940 |
if __name__ == '__main__':
import sys
import pickle
from multiprocessing import current_process
from multiprocessing.spawn import import_main_path
data = pickle.load(sys.stdin.buffer)
current_process().authkey = data['authkey']
sys.path = data['path']
import_main_path(data['main'])
impl = pickle.loads(data['impl'])
from pulsar.async.concurrency import run_actor
run_actor(impl)
|
django_slack/templatetags/django_slack.py | lociii/django-slack | 237 | 39961 | <gh_stars>100-1000
from django import template
from django.utils.encoding import force_str
from django.utils.functional import keep_lazy
from django.utils.safestring import SafeText, mark_safe
from django.template.defaultfilters import stringfilter
register = template.Library()
_slack_escapes = {
ord('&'): u'&',
ord('<'): u'<',
ord('>'): u'>',
}
@keep_lazy(str, SafeText)
@register.filter(is_safe=True)
@stringfilter
def escapeslack(value):
"""
Returns the given text with ampersands and angle brackets encoded for use in
the Slack API, per the Slack API documentation:
<https://api.slack.com/docs/formatting#how_to_escape_characters>
This is based on django.template.defaultfilters.escapejs.
"""
return mark_safe(force_str(value).translate(_slack_escapes))
|
python/tests/utils/test_environment_decorator_test.py | xuyanbo03/lab | 7,407 | 39991 | <reponame>xuyanbo03/lab
# Copyright 2018 Google Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""Tests TestEnvironmentDecorator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tempfile
import unittest
import numpy as np
import six
from PIL import Image
from python.tests.utils import test_environment_decorator
_OBSERVATION_SPEC = [{'name': 'RGB_INTERLEAVED', 'shape': [1, 2, 3]}]
class EnvironmentStub(object):
def __init__(self):
self.test_observation_spec = _OBSERVATION_SPEC
self.test_observations = [
{
'RGB_INTERLEAVED':
np.array(
[[[255, 0, 0], [128, 0, 0], [0, 0, 255]],
[[0, 255, 0], [128, 0, 0], [0, 255, 0]]],
dtype=np.uint8)
},
{
'RGB_INTERLEAVED':
np.array([[[0, 255, 0], [0, 128, 0]]], dtype=np.uint8)
},
{
'RGB_INTERLEAVED':
np.array([[[0, 0, 255], [0, 0, 128]]], dtype=np.uint8)
},
]
self.test_rewards = [0, 1, 2, 3]
self._frame_index = 0
self.last_actions = None
self.last_steps = None
self.events_return = None
self.is_running_return = None
self.action_spec_return = None
self.reset_return = None
def step(self, actions, steps):
self.last_actions = actions
self.last_steps = steps
self._frame_index += 1
return self.test_rewards[self._frame_index - 1]
def is_running(self):
return self.is_running_return
def observations(self):
return self.test_observations[self._frame_index]
def events(self):
return self.events_return
def action_spec(self):
return self.action_spec_return
def observation_spec(self):
return self.test_observation_spec
def reset(self, **_):
self._frame_index = 0
return self.reset_return
def num_steps(self):
return self._frame_index
class TestEnvironmentDecoratorTest(unittest.TestCase):
def setUp(self):
self._env = EnvironmentStub()
self._decorator = test_environment_decorator.TestEnvironmentDecorator(
self._env)
def testStepIsCalled(self):
actions = object()
steps = 3
self.assertEqual(
self._decorator.step(actions, steps), self._env.test_rewards[0])
self.assertEqual(self._env.last_actions, actions)
self.assertEqual(self._env.last_steps, steps)
def testAccumulatedReward(self):
self._decorator.step(None, 1)
self._decorator.step(None, 1)
self.assertEqual(self._decorator.accumulated_reward(),
np.sum(self._env.test_rewards[0:2]))
def testResetAccumulatedReward(self):
self._decorator.step(None, 1)
self._decorator.reset()
self.assertEqual(self._decorator.accumulated_reward(), 0)
def testRewardHistory(self):
self._decorator.step(None, 1)
self._decorator.step(None, 1)
six.assertCountEqual(self,
self._decorator.reward_history(),
self._env.test_rewards[0:2])
def testResetRewardHistory(self):
self._decorator.step(None, 1)
self._decorator.reset()
six.assertCountEqual(self, self._decorator.reward_history(), [])
def testAccumulatedEvents(self):
events = ['event1', 'event2', 'event3']
self._env.events_return = events[0]
self._decorator.reset()
self._env.events_return = events[1]
self._decorator.step(None, 1)
self._env.events_return = events[2]
self._decorator.step(None, 1)
six.assertCountEqual(self, self._decorator.accumulated_events(), events)
def testResetAccumulatedEvents(self):
events = ['event1', 'event2']
self._env.events_return = events[0]
self._decorator.step(None, 1)
self._env.events_return = events[1]
self._decorator.reset()
six.assertCountEqual(self,
self._decorator.accumulated_events(), [events[1]])
def testObservationDelegation(self):
self.assertEqual(self._env.test_observations[0],
self._decorator.observations())
def testObservationSpecDelegation(self):
self.assertEqual(self._env.test_observation_spec,
self._decorator.observation_spec())
def testNumSteps(self):
self._decorator.reset()
self.assertEqual(self._decorator.num_steps(), 0)
self._decorator.step(None, None)
self.assertEqual(self._decorator.num_steps(), 1)
def testMethodDelegation(self):
method_names = ['is_running', 'events', 'action_spec', 'reset']
for name in method_names:
result = object()
setattr(self._env, name + '_return', result)
self.assertEqual(getattr(self._decorator, name)(), result)
def testSavingFrames(self):
self._decorator.reset()
self._decorator.step(None, 1)
self._decorator.step(None, 1)
temp_dir = tempfile.mkdtemp()
self._decorator.save_frames(temp_dir)
for index, observation in enumerate(self._env.test_observations):
expected_image = observation['RGB_INTERLEAVED']
image_file_name = os.path.join(temp_dir, 'frame{0}.png'.format(index))
image = np.asarray(Image.open(image_file_name))
self.assertTrue(np.array_equal(image, expected_image))
if __name__ == '__main__':
unittest.main()
|
roles/openshift_openstack/library/os_service_catalog.py | Roscoe198/Ansible-Openshift | 164 | 40022 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2018 Red Hat, Inc. and/or its affiliates
# and other contributors as indicated by the @author tags.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=unused-wildcard-import,wildcard-import,unused-import,redefined-builtin
''' os_service_catalog_facts '''
from ansible.module_utils.basic import AnsibleModule
try:
import shade
HAS_SHADE = True
except ImportError:
HAS_SHADE = False
DOCUMENTATION = '''
---
module: os_service_catalog_facts
short_description: Retrieve OpenStack service catalog facts
description:
- Retrieves all the available OpenStack services
notes:
- This module creates a new top-level C(openstack_service_catalog) fact
which contains a dictionary of OpenStack service endpoints like
network and load-balancers.
author:
- "<NAME> <<EMAIL>>"
'''
RETURN = '''
openstack_service_catalog:
description: OpenStack available services.
type: dict
returned: always
sample:
alarming:
- adminURL: http://172.16.0.9:8042
id: 2c40b50da0bb44178db91c8a9a29a46e
internalURL: http://172.16.0.9:8042
publicURL: https://mycloud.org:13042
region: regionOne
cloudformation:
- adminURL: http://172.16.0.9:8000/v1
id: 46648eded04e463281a9cba7ddcc45cb
internalURL: http://172.16.0.9:8000/v1
publicURL: https://mycloud.org:13005/v1
region: regionOne
compute:
- adminURL: http://172.16.0.9:8774/v2.1
id: bff1bc5dd92842c281b2358a6d15c5bc
internalURL: http://172.16.0.9:8774/v2.1
publicURL: https://mycloud.org:13774/v2.1
region: regionOne
event:
- adminURL: http://172.16.0.9:8779
id: 608ac3666ef24f2e8f240785b8612efb
internalURL: http://172.16.0.9:8779
publicURL: https://mycloud.org:13779
region: regionOne
identity:
- adminURL: https://mycloud.org:35357
id: 4d07689ce46b4d51a01cc873bc772c80
internalURL: http://172.16.0.9:5000
publicURL: https://mycloud.org:13000
region: regionOne
image:
- adminURL: http://172.16.0.9:9292
id: 1850105115ea493eb65f3f704d421291
internalURL: http://172.16.0.9:9292
publicURL: https://mycloud.org:13292
region: regionOne
metering:
- adminURL: http://172.16.0.9:8777
id: 4cae4dcabe0a4914a6ec6dabd62490ba
internalURL: http://172.16.0.9:8777
publicURL: https://mycloud.org:13777
region: regionOne
metric:
- adminURL: http://172.16.0.9:8041
id: 29bcecf9a06f40f782f19dd7492af352
internalURL: http://172.16.0.9:8041
publicURL: https://mycloud.org:13041
region: regionOne
network:
- adminURL: http://172.16.0.9:9696
id: 5d5785c9b8174c21bfb19dc3b16c87fa
internalURL: http://172.16.0.9:9696
publicURL: https://mycloud.org:13696
region: regionOne
object-store:
- adminURL: http://172.17.0.9:8080
id: 031f1e342fdf4f25b6099d1f3b0847e3
internalURL: http://172.17.0.9:8080/v1/AUTH_6d2847d6a6414308a67644eefc7b98c7
publicURL: https://mycloud.org:13808/v1/AUTH_6d2847d6a6414308a67644eefc7b98c7
region: regionOne
orchestration:
- adminURL: http://172.16.0.9:8004/v1/6d2847d6a6414308a67644eefc7b98c7
id: 1e6cecbd15b3413d9411052c52b9d433
internalURL: http://172.16.0.9:8004/v1/6d2847d6a6414308a67644eefc7b98c7
publicURL: https://mycloud.org:13004/v1/6d2847d6a6414308a67644eefc7b98c7
region: regionOne
placement:
- adminURL: http://172.16.0.9:8778/placement
id: 1f2551e5450c4bd6a9f716f92e93a154
internalURL: http://172.16.0.9:8778/placement
publicURL: https://mycloud.org:13778/placement
region: regionOne
volume:
- adminURL: http://172.16.0.9:8776/v1/6d2847d6a6414308a67644eefc7b98c7
id: 38e369a0e17346fe8e37a20146e005ef
internalURL: http://172.16.0.9:8776/v1/6d2847d6a6414308a67644eefc7b98c7
publicURL: https://mycloud.org:13776/v1/6d2847d6a6414308a67644eefc7b98c7
region: regionOne
volumev2:
- adminURL: http://172.16.0.9:8776/v2/6d2847d6a6414308a67644eefc7b98c7
id: <KEY>
internalURL: http://172.16.0.9:8776/v2/6d2847d6a6414308a67644eefc7b98c7
publicURL: https://mycloud.org:13776/v2/6d2847d6a6414308a67644eefc7b98c7
region: regionOne
volumev3:
- adminURL: http://172.16.0.9:8776/v3/6d2847d6a6414308a67644eefc7b98c7
id: <KEY>
internalURL: http://172.16.0.9:8776/v3/6d2847d6a6414308a67644eefc7b98c7
publicURL: https://mycloud.org:13776/v3/6d2847d6a6414308a67644eefc7b98c7
region: regionOne
'''
def main():
''' Main module function '''
module = AnsibleModule(argument_spec={}, supports_check_mode=True)
if not HAS_SHADE:
module.fail_json(msg='shade is required for this module')
try:
cloud = shade.openstack_cloud()
# pylint: disable=broad-except
except Exception:
module.fail_json(msg='Failed to connect to the cloud')
try:
service_catalog = cloud.cloud_config.get_service_catalog()
# pylint: disable=broad-except
except Exception:
module.fail_json(msg='Failed to retrieve the service catalog')
try:
endpoints = service_catalog.get_endpoints()
# pylint: disable=broad-except
except Exception:
module.fail_json(msg='Failed to retrieve the service catalog '
'endpoints')
module.exit_json(
changed=False,
ansible_facts={'openstack_service_catalog': endpoints})
if __name__ == '__main__':
main()
|
trainModel.py | NYUMedML/DeepEHR | 242 | 40023 | <reponame>NYUMedML/DeepEHR<filename>trainModel.py
import torch
# import torch.nn as nn
# import torch.nn.functional as F
import numpy as np
from torch.autograd import Variable
# import torchwordemb
import torch.optim as optim
import sys
import time
import gc
import pickle
import os
import models2 as m
import enc_model as m3
# import pandas as pd
# from util import *
import torch.utils.data
# from sklearn.metrics import auc
# from sklearn import metrics
'''
General Training Script for PyTorch Models
-- Modified to accommodate more flexible LSTM structure
'''
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--nClassGender", type = int, default=2) # Number of classes in gender variable
parser.add_argument("--nClassRace", type=int, default=25) # Number of classes in race variable
parser.add_argument("--nClassEthnic", type=int, default=29) # Number of classes in ethnic variable
parser.add_argument("--modelName", default="Enc_CNN_LSTM")
parser.add_argument("--dimLSTM", type=int, default=128) # LSTM dimension
parser.add_argument("--dimLSTM_num", type=int, default=128) # LSTM dimension for numericals
parser.add_argument("--p_dropOut", type=float, default=.5)
parser.add_argument("--batch_norm", action='store_true')
parser.add_argument("--bidir", action='store_true')
parser.add_argument("--train_embed", action='store_true')
parser.add_argument("--rnnType", default="GRU")
parser.add_argument("--enc_len", type=int, default=20)
parser.add_argument("--doc_len", type=int, default=1000)
parser.add_argument("--n_iter", type=int, default=10)
parser.add_argument("--lr", type=float, default=0.001)
parser.add_argument("--lr_decay3", type=int, default=10) # Decay learning rate every lr_decay3 epochs
parser.add_argument("--i", type=int, default=1) # Index of the element in the parameter set to be tuned
parser.add_argument("--batchSizePos", type=int, default=16)
parser.add_argument("--batchSizeNeg", type=int, default=0)
parser.add_argument("--num_workers", type=int, default=4)
parser.add_argument("--flg_cuda", action='store_true')
parser.add_argument("--emb_dim", type=int, default=300) # Embedding dimension
parser.add_argument("--logInterval", type=int, default=1) # Print test accuracy every n epochs
parser.add_argument("--flgSave", action='store_true')
parser.add_argument("--savePath", default='./')
parser.add_argument("--filters", type=int, default=128)
parser.add_argument("--nK", type=int, default=3) # Number of kernels
parser.add_argument("--randSeed", type=int, default=42)
parser.add_argument("--posThres", type=float, default=0.5)
parser.add_argument("--inputPath", default="/ifs/data/razavianlab/encSeq_input/dim50/")
parser.add_argument("--alpha_L1", type=float, default=0.0)
parser.add_argument("--randn_std", type=float, default=None)
parser.add_argument("--flgBias", action='store_true')
parser.add_argument("--flg_gradClip", action='store_true')
parser.add_argument("--flg_AllLSTM", action='store_true')
parser.add_argument("--flg_useNum", action='store_true')
args = parser.parse_args()
torch.manual_seed(args.randSeed) # For reproducible results
if args.flgSave:
if not os.path.isdir(args.savePath):
os.mkdir(args.savePath)
# args.d = ['chf', 'kf', 'str'][args.i -1]
# lsAlpha = [0.0, 1e-6, 1e-5, 1e-4, 1e-3, 1e-2, 1e-1]
# args.alpha_L1 = lsAlpha[args.i -1]
if args.flg_AllLSTM:
dimLSTM = args.dimLSTM * args.enc_len
else:
dimLSTM = args.dimLSTM
lsDim = [[dimLSTM, 256, 3], [dimLSTM, 512, 256, 3]][args.i-1]
print('General parameters: ', args)
unique = False
print("Loading Data")
# if args.modelName in ['Enc_SumLSTM', 'Enc_CNN_LSTM']:
embedding = pickle.load(open(args.inputPath + 'embedding.p', 'rb'))
embedding = torch.from_numpy(embedding).float()
if args.modelName in ['Enc_SumLSTM', 'Enc_CNN_LSTM', 'DemoLab', 'Enc_CNN_LSTM_DemoLab']:
trainset_pos = m3.encDataset(args.inputPath, 'dfTrainPos.json', args.nClassGender, args.nClassRace, args.nClassEthnic,
transform=m3.padOrTruncateToTensor(args.enc_len, args.doc_len))
trainset_neg = m3.encDataset(args.inputPath, 'dfTrainNeg.json', args.nClassGender, args.nClassRace, args.nClassEthnic,
transform=m3.padOrTruncateToTensor(args.enc_len, args.doc_len))
testset = m3.encDataset(args.inputPath, 'dfDev.json', args.nClassGender, args.nClassRace, args.nClassEthnic,
transform=m3.padOrTruncateToTensor(args.enc_len, args.doc_len))
else:
trainset_pos = m3.staticDataset(args.inputPath, 'dfTrainPos.json', args.nClassGender, args.nClassRace, args.nClassEthnic,
transform=m3.padOrTruncateToTensor(args.enc_len, args.doc_len))
trainset_neg = m3.staticDataset(args.inputPath, 'dfTrainNeg.json', args.nClassGender, args.nClassRace, args.nClassEthnic,
transform=m3.padOrTruncateToTensor(args.enc_len, args.doc_len))
testset = m3.staticDataset(args.inputPath, 'dfDev.json', args.nClassGender, args.nClassRace, args.nClassEthnic,
transform=m3.padOrTruncateToTensor(args.enc_len, args.doc_len))
print('To Loader')
if args.flg_cuda:
train_loader_pos = torch.utils.data.DataLoader(trainset_pos, batch_size=args.batchSizePos, shuffle=True,
pin_memory=True)
test_loader = torch.utils.data.DataLoader(testset, batch_size=args.batchSizePos + args.batchSizeNeg,
shuffle=False, pin_memory=True)
if trainset_neg is not None:
train_loader_neg = torch.utils.data.DataLoader(trainset_neg, batch_size=args.batchSizeNeg, shuffle=True,
pin_memory=True)
else:
train_loader_pos = torch.utils.data.DataLoader(trainset_pos, batch_size=args.batchSizePos, shuffle=True,
pin_memory=False)
test_loader = torch.utils.data.DataLoader(testset, batch_size=args.batchSizePos + args.batchSizeNeg,
shuffle=False, pin_memory=False)
if trainset_neg is not None:
train_loader_neg = torch.utils.data.DataLoader(trainset_neg, batch_size=args.batchSizeNeg, shuffle=True,
pin_memory=False)
model_paras = {'enc_len': args.enc_len, 'doc_len': args.doc_len, 'flg_updateEmb': args.train_embed,
'flg_bn': args.batch_norm,
'rnnType': args.rnnType, 'bidir': args.bidir, 'p_dropOut': args.p_dropOut, 'lsDim': lsDim,
'dimLSTM': args.dimLSTM,
'flg_cuda': args.flg_cuda, 'filters': args.filters, 'Ks': [i + 1 for i in range(args.nK)],
'randn_std': args.randn_std, 'lastRelu': True, 'flgBias': args.flgBias,
'flg_AllLSTM': args.flg_AllLSTM,
'flg_useNum': args.flg_useNum, 'dimLSTM_num': args.dimLSTM_num}
print('Model parameters: ', model_paras)
if args.modelName in ['Enc_SumLSTM', 'Enc_CNN_LSTM', 'DemoLab', 'Enc_CNN_LSTM_DemoLab']:
model = getattr(m3, args.modelName)(model_paras, embedding)
else:
from argparse import Namespace
static_model_args = Namespace()
static_model_args.dropout = args.p_dropOut
static_model_args.batch_norm = args.batch_norm
static_model_args.kernels = args.nK
static_model_args.bidir = args.bidir
static_model_args.train_embed = args.train_embed
static_model_args.max_len = 2000
static_model_args.n_out = 3
static_model_args.h = args.filters
static_model_args.n_demo_feat = 208
model = getattr(m, args.modelName)(embedding , static_model_args)
if args.flg_cuda:
model = model.cuda()
print(model)
opt = optim.Adam(model.params, lr=args.lr)
print("Beginning Training")
train_paras = {'n_iter': args.n_iter, 'log_interval': [args.logInterval, 1000], 'flg_cuda': args.flg_cuda,
'lr_decay': [args.lr, 0.9, args.lr_decay3, 1e-5],
'flgSave': args.flgSave, 'savePath': args.savePath, 'posThres': args.posThres,
'alpha_L1': args.alpha_L1, 'flg_gradClip': args.flg_gradClip}
m = m3.trainModel(train_paras, train_loader_pos, test_loader, model, opt, train_loader_neg=train_loader_neg)
_, lsTrainAccuracy, lsTestAccuracy = m.run()
testAuc = [np.mean(x[1]) for x in lsTestAccuracy]
print('Test AUC max: %.3f' % (max(testAuc)))
print('Test AUC final: %.3f' % (testAuc[-1]))
stopIdx = min(testAuc.index(max(testAuc)) * args.logInterval, args.n_iter)
print('Stop at: %d' % (stopIdx))
|
packages/dcos-integration-test/extra/test_applications.py | timgates42/dcos | 2,577 | 40040 | import logging
import uuid
from typing import Any
import pytest
import requests
import test_helpers
from dcos_test_utils import marathon
from dcos_test_utils.dcos_api import DcosApiSession
__maintainer__ = 'kensipe'
__contact__ = '<EMAIL>'
log = logging.getLogger(__name__)
def deploy_test_app_and_check(dcos_api_session: DcosApiSession, app: dict, test_uuid: str) -> None:
"""This method deploys the test server app and then
pings its /operating_environment endpoint to retrieve the container
user running the task.
In a mesos container, this will be the marathon user
In a docker container this user comes from the USER setting
from the app's Dockerfile, which, for the test application
is the default, root
"""
expanded_config = test_helpers.get_expanded_config()
default_os_user = 'nobody' if expanded_config.get('security') == 'strict' else 'root'
if 'container' in app and app['container']['type'] == 'DOCKER':
marathon_user = 'root'
else:
marathon_user = app.get('user', default_os_user)
with dcos_api_session.marathon.deploy_and_cleanup(app):
service_points = dcos_api_session.marathon.get_app_service_endpoints(app['id'])
r = requests.get('http://{}:{}/test_uuid'.format(service_points[0].host, service_points[0].port))
if r.status_code != 200:
msg = "Test server replied with non-200 reply: '{0} {1}. "
msg += "Detailed explanation of the problem: {2}"
raise Exception(msg.format(r.status_code, r.reason, r.text))
r_data = r.json()
assert r_data['test_uuid'] == test_uuid
r = requests.get('http://{}:{}/operating_environment'.format(
service_points[0].host,
service_points[0].port))
if r.status_code != 200:
msg = "Test server replied with non-200 reply: '{0} {1}. "
msg += "Detailed explanation of the problem: {2}"
raise Exception(msg.format(r.status_code, r.reason, r.text))
json_uid = r.json()['uid']
if marathon_user == 'root':
assert json_uid == 0, "App running as root should have uid 0."
else:
assert json_uid != 0, ("App running as {} should not have uid 0.".format(marathon_user))
@pytest.mark.first
def test_docker_image_availablity() -> None:
assert test_helpers.docker_pull_image("debian:stretch-slim"), "docker pull failed for image used in the test"
def test_if_marathon_app_can_be_deployed(dcos_api_session: DcosApiSession) -> None:
"""Marathon app deployment integration test
This test verifies that marathon app can be deployed, and that service points
returned by Marathon indeed point to the app that was deployed.
The application being deployed is a simple http server written in python.
Please test_server.py for more details.
This is done by assigning an unique UUID to each app and passing it to the
docker container as an env variable. After successful deployment, the
"GET /test_uuid" request is issued to the app. If the returned UUID matches
the one assigned to test - test succeeds.
"""
deploy_test_app_and_check(dcos_api_session, *test_helpers.marathon_test_app())
def test_if_docker_app_can_be_deployed(dcos_api_session: DcosApiSession) -> None:
"""Marathon app inside docker deployment integration test.
Verifies that a marathon app inside of a docker daemon container can be
deployed and accessed as expected.
"""
deploy_test_app_and_check(
dcos_api_session,
*test_helpers.marathon_test_app(
network=marathon.Network.BRIDGE,
container_type=marathon.Container.DOCKER,
container_port=9080))
@pytest.mark.parametrize('healthcheck', [
marathon.Healthcheck.HTTP,
marathon.Healthcheck.MESOS_HTTP,
])
def test_if_ucr_app_can_be_deployed(dcos_api_session: DcosApiSession, healthcheck: Any) -> None:
"""Marathon app inside ucr deployment integration test.
Verifies that a marathon docker app inside of a ucr container can be
deployed and accessed as expected.
"""
deploy_test_app_and_check(
dcos_api_session,
*test_helpers.marathon_test_app(
container_type=marathon.Container.MESOS,
healthcheck_protocol=healthcheck))
def test_if_marathon_app_can_be_deployed_with_mesos_containerizer(dcos_api_session: DcosApiSession) -> None:
"""Marathon app deployment integration test using the Mesos Containerizer
This test verifies that a Marathon app using the Mesos containerizer with
a Docker image can be deployed.
This is done by assigning an unique UUID to each app and passing it to the
docker container as an env variable. After successfull deployment, the
"GET /test_uuid" request is issued to the app. If the returned UUID matches
the one assigned to test - test succeds.
When port mapping is available (MESOS-4777), this test should be updated to
reflect that.
"""
deploy_test_app_and_check(
dcos_api_session,
*test_helpers.marathon_test_app(container_type=marathon.Container.MESOS))
def test_if_marathon_app_can_be_deployed_with_nfs_csi_volume(dcos_api_session: DcosApiSession) -> None:
"""Marathon app deployment integration test using an NFS CSI volume.
This test verifies that a Marathon app can be deployed which attaches to
an NFS volume provided by the NFS CSI plugin. In order to accomplish this,
we must first set up an NFS share on one agent.
"""
# We will run an NFS server on one agent and an app on another agent to
# verify CSI volume functionality.
if len(dcos_api_session.slaves) < 2:
pytest.skip("CSI Volume Tests require a minimum of two agents.")
expanded_config = test_helpers.get_expanded_config()
if expanded_config.get('security') == 'strict':
pytest.skip('Cannot setup NFS server as root user with EE strict mode enabled')
test_uuid = uuid.uuid4().hex
hosts = dcos_api_session.slaves[0], dcos_api_session.slaves[1]
# A helper to run a Metronome job as root to clean up the NFS share on an agent.
# We define this here so that it can be used during error handling.
def cleanup_nfs() -> None:
cleanup_command = """
sudo systemctl stop nfs-server && \
echo '' | sudo tee /etc/exports && \
sudo systemctl restart nfs-utils && \
sudo exportfs -arv && \
sudo rm -rf /var/lib/dcos-nfs-shares/test-volume-001
"""
cleanup_job = {
'description': 'Clean up NFS share',
'id': 'nfs-share-cleanup-{}'.format(test_uuid),
'run': {
'cmd': cleanup_command,
'cpus': 0.5,
'mem': 256,
'disk': 32,
'user': 'root',
'restart': {'policy': 'ON_FAILURE'},
'placement': {
'constraints': [{
'attribute': '@hostname',
'operator': 'LIKE',
'value': hosts[0]
}]
}
}
}
dcos_api_session.metronome_one_off(cleanup_job)
# Run a Metronome job as root to set up the NFS share on an agent.
command = """sudo mkdir -p /var/lib/dcos-nfs-shares/test-volume-001 && \
sudo chown -R nobody: /var/lib/dcos-nfs-shares/test-volume-001 && \
sudo chmod 777 /var/lib/dcos-nfs-shares/test-volume-001 && \
echo '/var/lib/dcos-nfs-shares/test-volume-001 *(rw,sync)' | sudo tee /etc/exports && \
sudo systemctl restart nfs-utils && \
sudo exportfs -arv && \
sudo systemctl start nfs-server && \
sudo systemctl enable nfs-server
"""
setup_job = {
'description': 'Set up NFS share',
'id': 'nfs-share-setup-{}'.format(test_uuid),
'run': {
'cmd': command,
'cpus': 0.5,
'mem': 256,
'disk': 32,
'user': 'root',
'restart': {'policy': 'ON_FAILURE'},
'placement': {
'constraints': [{
'attribute': '@hostname',
'operator': 'LIKE',
'value': hosts[0]
}]
}
}
}
dcos_api_session.metronome_one_off(setup_job)
# Create an app which writes to the NFS volume.
app = {
'id': 'csi-nfs-write-app-{}'.format(test_uuid),
'instances': 1,
'cpus': 0.5,
'mem': 256,
'cmd': 'echo some-stuff > test-volume-dir/output && sleep 999999',
'user': 'root',
'container': {
'type': 'MESOS',
'volumes': [{
'mode': 'rw',
'containerPath': 'test-volume-dir',
'external': {
'provider': 'csi',
'name': 'test-volume-001',
'options': {
'pluginName': 'nfs.csi.k8s.io',
'capability': {
'accessType': 'mount',
'accessMode': 'MULTI_NODE_MULTI_WRITER',
'fsType': 'nfs'
},
'volumeContext': {
'server': hosts[0],
'share': '/var/lib/dcos-nfs-shares/test-volume-001'
}
}
}
}]
},
'constraints': [
[
'hostname',
'LIKE',
hosts[1]
]
],
'healthChecks': [{
'protocol': 'COMMAND',
'command': {'value': 'test `cat test-volume-dir/output` = some-stuff'},
'gracePeriodSeconds': 5,
'intervalSeconds': 10,
'timeoutSeconds': 10,
'maxConsecutiveFailures': 3
}]
}
try:
with dcos_api_session.marathon.deploy_and_cleanup(app):
# Trivial app if it deploys, there is nothing else to check
pass
except Exception as error:
raise(error)
finally:
cleanup_nfs()
def test_if_marathon_pods_can_be_deployed_with_mesos_containerizer(dcos_api_session: DcosApiSession) -> None:
"""Marathon pods deployment integration test using the Mesos Containerizer
This test verifies that a Marathon pods can be deployed.
"""
test_uuid = uuid.uuid4().hex
# create pod with trivial apps that function as long running processes
pod_definition = {
'id': '/integration-test-pods-{}'.format(test_uuid),
'scaling': {'kind': 'fixed', 'instances': 1},
'environment': {'PING': 'PONG'},
'containers': [
{
'name': 'ct1',
'resources': {'cpus': 0.1, 'mem': 32},
'image': {'kind': 'DOCKER', 'id': 'debian:stretch-slim'},
'exec': {'command': {'shell': 'touch foo; while true; do sleep 1; done'}},
'healthcheck': {'command': {'shell': 'test -f foo'}}
},
{
'name': 'ct2',
'resources': {'cpus': 0.1, 'mem': 32},
'exec': {'command': {'shell': 'echo $PING > foo; while true; do sleep 1; done'}},
'healthcheck': {'command': {'shell': 'test $PING = `cat foo`'}}
}
],
'networks': [{'mode': 'host'}]
}
with dcos_api_session.marathon.deploy_pod_and_cleanup(pod_definition):
# Trivial app if it deploys, there is nothing else to check
pass
|
dataloader/stereo_kittilist15.py | ne3x7/VCN | 148 | 40068 | <reponame>ne3x7/VCN
import torch.utils.data as data
import pdb
from PIL import Image
import os
import os.path
import numpy as np
IMG_EXTENSIONS = [
'.jpg', '.JPG', '.jpeg', '.JPEG',
'.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP',
]
def is_image_file(filename):
return any(filename.endswith(extension) for extension in IMG_EXTENSIONS)
def dataloader(filepath, typ = 'train'):
left_fold = 'image_2/'
right_fold = 'image_3/'
disp_L = 'disp_occ_0/'
disp_R = 'disp_occ_1/'
image = [img for img in os.listdir(filepath+left_fold) if img.find('_10') > -1]
image = sorted(image)
imglist = [1,3,6,20,26,35,38,41,43,44,49,60,67,70,81,84,89,97,109,119,122,123,129,130,132,134,141,144,152,158,159,165,171,174,179,182, 184,186,187,196]
if typ == 'train':
train = [image[i] for i in range(200) if i not in imglist]
elif typ == 'trainval':
train = [image[i] for i in range(200)]
val = [image[i] for i in imglist]
left_train = [filepath+left_fold+img for img in train]
right_train = [filepath+right_fold+img for img in train]
disp_train_L = [filepath+disp_L+img for img in train]
#disp_train_R = [filepath+disp_R+img for img in train]
left_val = [filepath+left_fold+img for img in val]
right_val = [filepath+right_fold+img for img in val]
disp_val_L = [filepath+disp_L+img for img in val]
#disp_val_R = [filepath+disp_R+img for img in val]
return left_train, right_train, disp_train_L, left_val, right_val, disp_val_L
|
src/textacy/extract/keyterms/__init__.py | austinjp/textacy | 1,929 | 40086 | """
Keyterms
--------
:mod:`textacy.extract.keyterms`: Extract keyterms from documents using a variety of
rule-based algorithms.
"""
from .scake import scake
from .sgrank import sgrank
from .textrank import textrank
from .yake import yake
|
tests/settings/imagemagick.py | apahomov/sorl-thumbnail | 630 | 40095 | <filename>tests/settings/imagemagick.py
from .default import *
THUMBNAIL_ENGINE = 'sorl.thumbnail.engines.convert_engine.Engine'
THUMBNAIL_CONVERT = 'convert'
|
app/oauth_office365/tests.py | ricardojba/PwnAuth | 304 | 40110 | <filename>app/oauth_office365/tests.py<gh_stars>100-1000
from django.test import TestCase
# Create your tests here.
# TODO add test to validate application scopes are enforced when creating
# TODO add test to validate that application redirect and refresh URL match the site's base url
# TODO add unicode handling tests
# TODO test large attachments
# TODO basic tests for getting and deleting messages, attachments
|
kolibri/core/content/zip_wsgi.py | MBKayro/kolibri | 545 | 40151 | <filename>kolibri/core/content/zip_wsgi.py
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import logging
import mimetypes
import os
import re
import time
import zipfile
import html5lib
from cheroot import wsgi
from django.core.cache import cache
from django.core.handlers.wsgi import WSGIRequest
from django.http import HttpResponse
from django.http import HttpResponseNotAllowed
from django.http import HttpResponseNotFound
from django.http import HttpResponseNotModified
from django.http.response import FileResponse
from django.http.response import StreamingHttpResponse
from django.utils.cache import patch_response_headers
from django.utils.encoding import force_str
from django.utils.http import http_date
from kolibri.core.content.errors import InvalidStorageFilenameError
from kolibri.core.content.utils.paths import get_content_storage_file_path
from kolibri.core.content.utils.paths import get_zip_content_base_path
logger = logging.getLogger(__name__)
def add_security_headers(request, response):
response["Access-Control-Allow-Origin"] = "*"
response["Access-Control-Allow-Methods"] = "GET, OPTIONS"
requested_headers = request.META.get("HTTP_ACCESS_CONTROL_REQUEST_HEADERS", "")
if requested_headers:
response["Access-Control-Allow-Headers"] = requested_headers
# restrict CSP to only allow resources to be loaded from self, to prevent info leakage
# (e.g. via passing user info out as GET parameters to an attacker's server), or inadvertent data usage
response[
"Content-Security-Policy"
] = "default-src 'self' 'unsafe-inline' 'unsafe-eval' data: blob:"
return response
def django_response_to_wsgi(response, environ, start_response):
status = "%d %s" % (response.status_code, response.reason_phrase)
response_headers = [(str(k), str(v)) for k, v in response.items()]
for c in response.cookies.values():
response_headers.append((str("Set-Cookie"), str(c.output(header=""))))
start_response(force_str(status), response_headers)
if getattr(response, "file_to_stream", None) is not None and environ.get(
"wsgi.file_wrapper"
):
response = environ["wsgi.file_wrapper"](response.file_to_stream)
return response
allowed_methods = set(["GET", "OPTIONS"])
# This is also included in packages/hashi/src/h5p.html
# ideally, we should never ever update this code
# but if we do we should update it there.
INITIALIZE_HASHI_FROM_IFRAME = "if (window.parent && window.parent.hashi) {try {window.parent.hashi.initializeIframe(window);} catch (e) {}}"
def parse_html(content):
try:
document = html5lib.parse(content, namespaceHTMLElements=False)
if not document:
# Could not parse
return content
# Because html5lib parses like a browser, it will
# always create head and body tags if they are missing.
head = document.find("head")
# Use the makeelement method of the head tag here to ensure that we use the same
# Element class for both. Depending on the system and python version we are on,
# we may be using the C implementation or the pure python and a mismatch will cause an error.
script_tag = head.makeelement("script", {"type": "text/javascript"})
script_tag.text = INITIALIZE_HASHI_FROM_IFRAME
head.insert(0, script_tag)
# Currently, html5lib strips the doctype, but it's important for correct rendering, so check the original
# content for the doctype and, if found, prepend it to the content serialized by html5lib
doctype = None
try:
# Now parse the content as a dom tree instead, so that we capture
# any doctype node as a dom node that we can read.
tree_builder_dom = html5lib.treebuilders.getTreeBuilder("dom")
parser_dom = html5lib.HTMLParser(
tree_builder_dom, namespaceHTMLElements=False
)
tree = parser_dom.parse(content)
# By HTML Spec if doctype is included, it must be the first thing
# in the document, so it has to be the first child node of the document
doctype_node = tree.childNodes[0]
# Check that this node is in fact a doctype node
if doctype_node.nodeType == doctype_node.DOCUMENT_TYPE_NODE:
# render to a string by calling the toxml method
# toxml uses single quotes by default, replace with ""
doctype = doctype_node.toxml().replace("'", '"')
except Exception as e:
logger.warn("Error in HTML5 parsing to determine doctype {}".format(e))
html = html5lib.serialize(
document,
quote_attr_values="always",
omit_optional_tags=False,
minimize_boolean_attributes=False,
use_trailing_solidus=True,
space_before_trailing_solidus=False,
)
if doctype:
html = doctype + html
return html
except html5lib.html5parser.ParseError:
return content
def get_embedded_file(zipped_path, zipped_filename, embedded_filepath):
with zipfile.ZipFile(zipped_path) as zf:
# if no path, or a directory, is being referenced, look for an index.html file
if not embedded_filepath or embedded_filepath.endswith("/"):
embedded_filepath += "index.html"
# get the details about the embedded file, and ensure it exists
try:
info = zf.getinfo(embedded_filepath)
except KeyError:
return HttpResponseNotFound(
'"{}" does not exist inside "{}"'.format(
embedded_filepath, zipped_filename
)
)
# file size
file_size = 0
# try to guess the MIME type of the embedded file being referenced
content_type = (
mimetypes.guess_type(embedded_filepath)[0] or "application/octet-stream"
)
if embedded_filepath.endswith("htm") or embedded_filepath.endswith("html"):
content = zf.open(info).read()
html = parse_html(content)
response = HttpResponse(html, content_type=content_type)
file_size = len(response.content)
else:
# generate a streaming response object, pulling data from within the zip file
response = FileResponse(zf.open(info), content_type=content_type)
file_size = info.file_size
# set the content-length header to the size of the embedded file
if file_size:
response["Content-Length"] = file_size
return response
path_regex = re.compile("/(?P<zipped_filename>[^/]+)/(?P<embedded_filepath>.*)")
YEAR_IN_SECONDS = 60 * 60 * 24 * 365
def _zip_content_from_request(request): # noqa: C901
if request.method not in allowed_methods:
return HttpResponseNotAllowed(allowed_methods)
match = path_regex.match(request.path_info)
if match is None:
return HttpResponseNotFound("Path not found")
if request.method == "OPTIONS":
return HttpResponse()
zipped_filename, embedded_filepath = match.groups()
try:
# calculate the local file path to the zip file
zipped_path = get_content_storage_file_path(zipped_filename)
except InvalidStorageFilenameError:
return HttpResponseNotFound(
'"%(filename)s" is not a valid file name' % {"filename": zipped_filename}
)
# if the zipfile does not exist on disk, return a 404
if not os.path.exists(zipped_path):
return HttpResponseNotFound(
'"%(filename)s" is not a valid zip file' % {"filename": zipped_filename}
)
# Sometimes due to URL concatenation, we get URLs with double-slashes in them, like //path/to/file.html.
# the zipped_filename and embedded_filepath are defined by the regex capturing groups in the URL defined
# in urls.py in the same folder as this file:
# r"^zipcontent/(?P<zipped_filename>[^/]+)/(?P<embedded_filepath>.*)"
# If the embedded_filepath contains a leading slash because of an input URL like:
# /zipcontent/filename.zip//file.html
# then the embedded_filepath will have a value of "/file.html"
# we detect this leading slash in embedded_filepath and remove it.
if embedded_filepath.startswith("/"):
embedded_filepath = embedded_filepath[1:]
# Any double-slashes later in the URL will be present as double-slashes, such as:
# /zipcontent/filename.zip/path//file.html
# giving an embedded_filepath value of "path//file.html"
# Normalize the path by converting double-slashes occurring later in the path to a single slash.
# This would change our example embedded_filepath to "path/file.html" which will resolve properly.
embedded_filepath = embedded_filepath.replace("//", "/")
# if client has a cached version, use that (we can safely assume nothing has changed, due to MD5)
if request.META.get("HTTP_IF_MODIFIED_SINCE"):
return HttpResponseNotModified()
CACHE_KEY = "ZIPCONTENT_VIEW_RESPONSE_{}/{}".format(
zipped_filename, embedded_filepath
)
cached_response = cache.get(CACHE_KEY)
if cached_response is not None:
return cached_response
response = get_embedded_file(zipped_path, zipped_filename, embedded_filepath)
# ensure the browser knows not to try byte-range requests, as we don't support them here
response["Accept-Ranges"] = "none"
response["Last-Modified"] = http_date(time.time())
patch_response_headers(response, cache_timeout=YEAR_IN_SECONDS)
if not isinstance(response, StreamingHttpResponse):
cache.set(CACHE_KEY, response, YEAR_IN_SECONDS)
return response
def generate_zip_content_response(environ):
request = WSGIRequest(environ)
response = _zip_content_from_request(request)
add_security_headers(request, response)
return response
def zip_content_view(environ, start_response):
"""
Handles GET requests and serves a static file from within the zip file.
"""
response = generate_zip_content_response(environ)
return django_response_to_wsgi(response, environ, start_response)
def get_application():
path_map = {
get_zip_content_base_path(): zip_content_view,
}
return wsgi.PathInfoDispatcher(path_map)
|
mamonsu/tools/report/format.py | sgrinko/mamonsu | 188 | 40176 | <filename>mamonsu/tools/report/format.py
# -*- coding: utf-8 -*-
import re
import sys
import mamonsu.lib.platform as platform
class color(object):
mapping = {
'BOLD': '\033[0;0m\033[1;1m',
'RED': '\033[1;31m',
'GRAY': '\033[1;30m',
'PURPLE': '\033[1;35m',
'BLUE': '\033[1;34m',
'END': '\033[1;m'
}
def __init__(self):
self.color = sys.stdout.isatty()
def disable(self):
self.color = False
def __getattr__(self, name):
if self.color:
return self.mapping[name]
else:
return ''
TermColor = color()
# int (bytes) => str (human readable)
def humansize_bytes(nbytes):
fmt = '{0:>6} {1}'
if not isinstance(nbytes, platform.INTEGER_TYPES):
return 'ERROR'
if nbytes == 0:
return fmt.format(0, 'B')
i, suffixes, = 0, ['B', 'KB', 'MB', 'GB', 'TB', 'PB']
while nbytes >= 1024 and i < len(suffixes) - 1:
nbytes /= 1024.
i += 1
f = ('%.2f' % nbytes).rstrip('0').rstrip('.')
return fmt.format(f, suffixes[i])
# str (some formates) => str (human readable)
def humansize(value):
m = re.search('(\d+) (\S+)', value)
if m is None:
return value
val, suff = m.group(1), m.group(2)
val, suff = int(val), suff.upper()
if suff == 'S':
return value
if suff == 'MS':
return value
if suff == 'B':
return humansize_bytes(val)
if suff == 'KB':
return humansize_bytes(val * 1024)
if suff == '4KB':
return humansize_bytes(val * 1024 * 4)
if suff == '8KB':
return humansize_bytes(val * 1024 * 8)
if suff == '16KB':
return humansize_bytes(val * 1024 * 16)
if suff == 'MB':
return humansize_bytes(val * 1024 * 1024)
if suff == 'GB':
return humansize_bytes(val * 1024 * 1024 * 1024)
if suff == 'TB':
return humansize_bytes(val * 1024 * 1024 * 1024 * 1024)
return value
def header_h1(info):
return "\n{0}{1}{2}{3}\n".format(
TermColor.BOLD, TermColor.RED, info.upper(), TermColor.END)
def key_val_h1(key, val, spaces=12):
fmt = " {0}{1}{2:" + str(spaces) + "}{3}: {4}\n"
return fmt.format(
TermColor.BOLD, TermColor.PURPLE, key, TermColor.END, val)
def header_h2(info):
return " {0}{1}{2}{3}\n".format(
TermColor.BOLD, TermColor.PURPLE, info, TermColor.END)
def key_val_h2(key, val, delim=': '):
return " {0}{1}{2:4}{3}{4}{5}\n".format(
TermColor.BOLD, TermColor.BLUE, key, TermColor.END, delim, val)
def topline_h1(arr=None, delim=" \t"):
if arr is None:
arr = []
result = "{0}{1}".format(TermColor.BOLD, TermColor.BLUE)
for x in arr:
result = "{0}{1}{2}".format(result, delim, x)
return "{0}{1}\n".format(result, TermColor.END)
def format_raw_h1(raw=""):
result = []
for i, line in enumerate(raw.split("\n")):
if i == 0:
result.append(" {0}{1}{2}{3}".format(
TermColor.BOLD, TermColor.BLUE, line, TermColor.END))
else:
result.append(" {0}".format(line))
return "\n".join(result) + "\n"
|
contrib/report_builders/__init__.py | berndonline/flan | 3,711 | 40182 | <gh_stars>1000+
from .report_builder import ReportBuilder
from .latex_report_builder import LatexReportBuilder
from .markdown_report_builder import MarkdownReportBuilder
from .json_report_builder import JsonReportBuilder
from .html_report_builder import JinjaHtmlReportBuilder
|
hnn/src/apps/training_utils.py | anlewy/mt-dnn | 2,075 | 40267 | #
# Author: <EMAIL>
# Date: 01/25/2019
#
""" Utils for training and optimization
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import utils
logger=utils.get_logger()
import numpy as np
import torch
from bert.optimization import BertAdam
def zero_grad(model, optimizer_param):
model.zero_grad()
for n, p in optimizer_param:
p.grad = None
def dump_parameter_names(model, path):
with open(path, 'w', encoding='utf8') as fs:
fs.write('{}\n'.format('\n'.join([n for n,p in model.named_parameters()])))
def copy_optimizer_params_to_model(named_params_model, named_params_optimizer):
""" Utility function for optimize_on_cpu and 16-bits training.
Copy the parameters optimized on CPU/RAM back to the model on GPU
"""
for (name_opti, param_opti), (name_model, param_model) in zip(named_params_optimizer, named_params_model):
if name_opti != name_model:
logger.error("name_opti != name_model: {} {}".format(name_opti, name_model))
raise ValueError
param_model.data.copy_(param_opti.data)
def set_optimizer_params_grad(named_params_optimizer, named_params_model, test_nan=False):
""" Utility function for optimize_on_cpu and 16-bits training.
Copy the gradient of the GPU parameters to the CPU/RAMM copy of the model
"""
is_nan = False
for (name_opti, param_opti), (name_model, param_model) in zip(named_params_optimizer, named_params_model):
if name_opti != name_model:
logger.error("name_opti != name_model: {} {}".format(name_opti, name_model))
raise ValueError
if param_model.grad is not None:
norm = param_model.grad.norm()
if test_nan and (torch.isnan(norm) or torch.isinf(norm)):
is_nan = True
if param_opti.grad is None:
param_opti.grad = torch.nn.Parameter(param_opti.data.new().resize_(*param_opti.data.size()))
param_opti.grad.data.copy_(param_model.grad.data)
else:
param_opti.grad = None
return is_nan
def create_optimizer(model, args, num_train_steps=None, init_spec=None, no_decay=['bias', 'LayerNorm.weight']):
# Prepare optimizer
if args.fp16:
dcnt = torch.cuda.device_count()
if args.no_even_grad:
param_optimizer = [(n, param.detach().clone().type(torch.cuda.FloatTensor).\
requires_grad_()) for i,(n,param) in enumerate(model.named_parameters())]
else:
total_size = sum(np.prod(p.size()) for p in model.parameters())
quota={i:0 for i in range(dcnt)}
quota[0]=total_size//(dcnt*2)
param_optimizer = []
for i,(n, param) in enumerate(model.named_parameters()):
ps = np.prod(param.size())
index = list(sorted(quota.items(), key=lambda x: x[1]))[0][0]
quota[index]+=ps
cp = param.clone().type(torch.cuda.FloatTensor).detach().to('cuda:{}'.format(index)).requires_grad_()
param_optimizer += [(n, cp)]
elif args.optimize_on_cpu:
param_optimizer = [(n, param.clone().detach().to('cpu').requires_grad_()) \
for n, param in model.named_parameters()]
else:
param_optimizer = [(n,p) for n,p in model.named_parameters()]
group0=dict(params=[],
weight_decay_rate=args.weight_decay,
names=[])
group1=dict(params=[],
weight_decay_rate=0.00,
names=[])
for (n,p) in param_optimizer:
if not any(nd in n for nd in no_decay):
group0['params'].append(p)
group0['names'].append(n)
else:
group1['params'].append(p)
group1['names'].append(n)
optimizer_grouped_parameters = [group0, group1]
t_total = num_train_steps
optimizer=None
if t_total:
if args.local_rank != -1:
t_total = t_total // torch.distributed.get_world_size()
optimizer = BertAdam(optimizer_grouped_parameters,
lr=args.learning_rate,
b1=args.adam_beta1,
b2=args.adam_beta2,
v1=args.qhadam_v1,
v2=args.qhadam_v2,
lr_ends=args.lr_schedule_ends,
e=args.epsilon,
warmup=args.warmup_proportion if args.warmup_proportion<1 else args.warmup_proportion/t_total,
t_total=t_total,
schedule=args.lr_schedule,
max_grad_norm=args.max_grad_norm,
global_grad_norm=args.global_grad_norm,
init_spec = init_spec,
weight_decay_rate = args.weight_decay)
return optimizer, param_optimizer, t_total
|
src/hobbits-plugins/analyzers/KaitaiStruct/ksy_py/hardware/mifare/mifare_classic.py | SabheeR/hobbits | 304 | 40282 | <gh_stars>100-1000
# This is a generated file! Please edit source .ksy file and use kaitai-struct-compiler to rebuild
from pkg_resources import parse_version
import kaitaistruct
from kaitaistruct import KaitaiStruct, KaitaiStream, BytesIO
import collections
if parse_version(kaitaistruct.__version__) < parse_version('0.9'):
raise Exception("Incompatible Kaitai Struct Python API: 0.9 or later is required, but you have %s" % (kaitaistruct.__version__))
class MifareClassic(KaitaiStruct):
"""You can get a dump for testing by the link: https://github.com/zhovner/mfdread/raw/master/dump.mfd
.. seealso::
Source - https://github.com/nfc-tools/libnfc
https://www.nxp.com/docs/en/data-sheet/MF1S70YYX_V1.pdf
"""
SEQ_FIELDS = ["sectors"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['sectors']['start'] = self._io.pos()
self._raw_sectors = []
self.sectors = []
i = 0
while not self._io.is_eof():
if not 'arr' in self._debug['sectors']:
self._debug['sectors']['arr'] = []
self._debug['sectors']['arr'].append({'start': self._io.pos()})
self._raw_sectors.append(self._io.read_bytes((((4 if i >= 32 else 1) * 4) * 16)))
_io__raw_sectors = KaitaiStream(BytesIO(self._raw_sectors[-1]))
_t_sectors = MifareClassic.Sector(i == 0, _io__raw_sectors, self, self._root)
_t_sectors._read()
self.sectors.append(_t_sectors)
self._debug['sectors']['arr'][len(self.sectors) - 1]['end'] = self._io.pos()
i += 1
self._debug['sectors']['end'] = self._io.pos()
class Key(KaitaiStruct):
SEQ_FIELDS = ["key"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['key']['start'] = self._io.pos()
self.key = self._io.read_bytes(6)
self._debug['key']['end'] = self._io.pos()
class Sector(KaitaiStruct):
SEQ_FIELDS = ["manufacturer", "data_filler", "trailer"]
def __init__(self, has_manufacturer, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self.has_manufacturer = has_manufacturer
self._debug = collections.defaultdict(dict)
def _read(self):
if self.has_manufacturer:
self._debug['manufacturer']['start'] = self._io.pos()
self.manufacturer = MifareClassic.Manufacturer(self._io, self, self._root)
self.manufacturer._read()
self._debug['manufacturer']['end'] = self._io.pos()
self._debug['data_filler']['start'] = self._io.pos()
self._raw_data_filler = self._io.read_bytes(((self._io.size() - self._io.pos()) - 16))
_io__raw_data_filler = KaitaiStream(BytesIO(self._raw_data_filler))
self.data_filler = MifareClassic.Sector.Filler(_io__raw_data_filler, self, self._root)
self.data_filler._read()
self._debug['data_filler']['end'] = self._io.pos()
self._debug['trailer']['start'] = self._io.pos()
self.trailer = MifareClassic.Trailer(self._io, self, self._root)
self.trailer._read()
self._debug['trailer']['end'] = self._io.pos()
class Values(KaitaiStruct):
SEQ_FIELDS = ["values"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['values']['start'] = self._io.pos()
self.values = []
i = 0
while not self._io.is_eof():
if not 'arr' in self._debug['values']:
self._debug['values']['arr'] = []
self._debug['values']['arr'].append({'start': self._io.pos()})
_t_values = MifareClassic.Sector.Values.ValueBlock(self._io, self, self._root)
_t_values._read()
self.values.append(_t_values)
self._debug['values']['arr'][len(self.values) - 1]['end'] = self._io.pos()
i += 1
self._debug['values']['end'] = self._io.pos()
class ValueBlock(KaitaiStruct):
SEQ_FIELDS = ["valuez", "addrz"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['valuez']['start'] = self._io.pos()
self.valuez = [None] * (3)
for i in range(3):
if not 'arr' in self._debug['valuez']:
self._debug['valuez']['arr'] = []
self._debug['valuez']['arr'].append({'start': self._io.pos()})
self.valuez[i] = self._io.read_u4le()
self._debug['valuez']['arr'][i]['end'] = self._io.pos()
self._debug['valuez']['end'] = self._io.pos()
self._debug['addrz']['start'] = self._io.pos()
self.addrz = [None] * (4)
for i in range(4):
if not 'arr' in self._debug['addrz']:
self._debug['addrz']['arr'] = []
self._debug['addrz']['arr'].append({'start': self._io.pos()})
self.addrz[i] = self._io.read_u1()
self._debug['addrz']['arr'][i]['end'] = self._io.pos()
self._debug['addrz']['end'] = self._io.pos()
@property
def addr(self):
if hasattr(self, '_m_addr'):
return self._m_addr if hasattr(self, '_m_addr') else None
if self.valid:
self._m_addr = self.addrz[0]
return self._m_addr if hasattr(self, '_m_addr') else None
@property
def addr_valid(self):
if hasattr(self, '_m_addr_valid'):
return self._m_addr_valid if hasattr(self, '_m_addr_valid') else None
self._m_addr_valid = ((self.addrz[0] == ~(self.addrz[1])) and (self.addrz[0] == self.addrz[2]) and (self.addrz[1] == self.addrz[3]))
return self._m_addr_valid if hasattr(self, '_m_addr_valid') else None
@property
def valid(self):
if hasattr(self, '_m_valid'):
return self._m_valid if hasattr(self, '_m_valid') else None
self._m_valid = ((self.value_valid) and (self.addr_valid))
return self._m_valid if hasattr(self, '_m_valid') else None
@property
def value_valid(self):
if hasattr(self, '_m_value_valid'):
return self._m_value_valid if hasattr(self, '_m_value_valid') else None
self._m_value_valid = ((self.valuez[0] == ~(self.valuez[1])) and (self.valuez[0] == self.valuez[2]))
return self._m_value_valid if hasattr(self, '_m_value_valid') else None
@property
def value(self):
if hasattr(self, '_m_value'):
return self._m_value if hasattr(self, '_m_value') else None
if self.valid:
self._m_value = self.valuez[0]
return self._m_value if hasattr(self, '_m_value') else None
class Filler(KaitaiStruct):
"""only to create _io."""
SEQ_FIELDS = ["data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['data']['start'] = self._io.pos()
self.data = self._io.read_bytes(self._io.size())
self._debug['data']['end'] = self._io.pos()
@property
def block_size(self):
if hasattr(self, '_m_block_size'):
return self._m_block_size if hasattr(self, '_m_block_size') else None
self._m_block_size = 16
return self._m_block_size if hasattr(self, '_m_block_size') else None
@property
def data(self):
if hasattr(self, '_m_data'):
return self._m_data if hasattr(self, '_m_data') else None
self._m_data = self.data_filler.data
return self._m_data if hasattr(self, '_m_data') else None
@property
def blocks(self):
if hasattr(self, '_m_blocks'):
return self._m_blocks if hasattr(self, '_m_blocks') else None
io = self.data_filler._io
_pos = io.pos()
io.seek(0)
self._debug['_m_blocks']['start'] = io.pos()
self._m_blocks = []
i = 0
while not io.is_eof():
if not 'arr' in self._debug['_m_blocks']:
self._debug['_m_blocks']['arr'] = []
self._debug['_m_blocks']['arr'].append({'start': io.pos()})
self._m_blocks.append(io.read_bytes(self.block_size))
self._debug['_m_blocks']['arr'][len(self._m_blocks) - 1]['end'] = io.pos()
i += 1
self._debug['_m_blocks']['end'] = io.pos()
io.seek(_pos)
return self._m_blocks if hasattr(self, '_m_blocks') else None
@property
def values(self):
if hasattr(self, '_m_values'):
return self._m_values if hasattr(self, '_m_values') else None
io = self.data_filler._io
_pos = io.pos()
io.seek(0)
self._debug['_m_values']['start'] = io.pos()
self._m_values = MifareClassic.Sector.Values(io, self, self._root)
self._m_values._read()
self._debug['_m_values']['end'] = io.pos()
io.seek(_pos)
return self._m_values if hasattr(self, '_m_values') else None
class Manufacturer(KaitaiStruct):
SEQ_FIELDS = ["nuid", "bcc", "sak", "atqa", "manufacturer"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['nuid']['start'] = self._io.pos()
self.nuid = self._io.read_u4le()
self._debug['nuid']['end'] = self._io.pos()
self._debug['bcc']['start'] = self._io.pos()
self.bcc = self._io.read_u1()
self._debug['bcc']['end'] = self._io.pos()
self._debug['sak']['start'] = self._io.pos()
self.sak = self._io.read_u1()
self._debug['sak']['end'] = self._io.pos()
self._debug['atqa']['start'] = self._io.pos()
self.atqa = self._io.read_u2le()
self._debug['atqa']['end'] = self._io.pos()
self._debug['manufacturer']['start'] = self._io.pos()
self.manufacturer = self._io.read_bytes(8)
self._debug['manufacturer']['end'] = self._io.pos()
class Trailer(KaitaiStruct):
SEQ_FIELDS = ["key_a", "access_bits", "user_byte", "key_b"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['key_a']['start'] = self._io.pos()
self.key_a = MifareClassic.Key(self._io, self, self._root)
self.key_a._read()
self._debug['key_a']['end'] = self._io.pos()
self._debug['access_bits']['start'] = self._io.pos()
self._raw_access_bits = self._io.read_bytes(3)
_io__raw_access_bits = KaitaiStream(BytesIO(self._raw_access_bits))
self.access_bits = MifareClassic.Trailer.AccessConditions(_io__raw_access_bits, self, self._root)
self.access_bits._read()
self._debug['access_bits']['end'] = self._io.pos()
self._debug['user_byte']['start'] = self._io.pos()
self.user_byte = self._io.read_u1()
self._debug['user_byte']['end'] = self._io.pos()
self._debug['key_b']['start'] = self._io.pos()
self.key_b = MifareClassic.Key(self._io, self, self._root)
self.key_b._read()
self._debug['key_b']['end'] = self._io.pos()
class AccessConditions(KaitaiStruct):
SEQ_FIELDS = ["raw_chunks"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['raw_chunks']['start'] = self._io.pos()
self.raw_chunks = [None] * (self._parent.ac_count_of_chunks)
for i in range(self._parent.ac_count_of_chunks):
if not 'arr' in self._debug['raw_chunks']:
self._debug['raw_chunks']['arr'] = []
self._debug['raw_chunks']['arr'].append({'start': self._io.pos()})
self.raw_chunks[i] = self._io.read_bits_int_be(4)
self._debug['raw_chunks']['arr'][i]['end'] = self._io.pos()
self._debug['raw_chunks']['end'] = self._io.pos()
class TrailerAc(KaitaiStruct):
SEQ_FIELDS = []
def __init__(self, ac, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self.ac = ac
self._debug = collections.defaultdict(dict)
def _read(self):
pass
@property
def can_read_key_b(self):
"""key A is required."""
if hasattr(self, '_m_can_read_key_b'):
return self._m_can_read_key_b if hasattr(self, '_m_can_read_key_b') else None
self._m_can_read_key_b = self.ac.inv_shift_val <= 2
return self._m_can_read_key_b if hasattr(self, '_m_can_read_key_b') else None
@property
def can_write_keys(self):
if hasattr(self, '_m_can_write_keys'):
return self._m_can_write_keys if hasattr(self, '_m_can_write_keys') else None
self._m_can_write_keys = ((((self.ac.inv_shift_val + 1) % 3) != 0) and (self.ac.inv_shift_val < 6))
return self._m_can_write_keys if hasattr(self, '_m_can_write_keys') else None
@property
def can_write_access_bits(self):
if hasattr(self, '_m_can_write_access_bits'):
return self._m_can_write_access_bits if hasattr(self, '_m_can_write_access_bits') else None
self._m_can_write_access_bits = self.ac.bits[2].b
return self._m_can_write_access_bits if hasattr(self, '_m_can_write_access_bits') else None
@property
def key_b_controls_write(self):
if hasattr(self, '_m_key_b_controls_write'):
return self._m_key_b_controls_write if hasattr(self, '_m_key_b_controls_write') else None
self._m_key_b_controls_write = not (self.can_read_key_b)
return self._m_key_b_controls_write if hasattr(self, '_m_key_b_controls_write') else None
class ChunkBitRemap(KaitaiStruct):
SEQ_FIELDS = []
def __init__(self, bit_no, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self.bit_no = bit_no
self._debug = collections.defaultdict(dict)
def _read(self):
pass
@property
def shift_value(self):
if hasattr(self, '_m_shift_value'):
return self._m_shift_value if hasattr(self, '_m_shift_value') else None
self._m_shift_value = (-1 if self.bit_no == 1 else 1)
return self._m_shift_value if hasattr(self, '_m_shift_value') else None
@property
def chunk_no(self):
if hasattr(self, '_m_chunk_no'):
return self._m_chunk_no if hasattr(self, '_m_chunk_no') else None
self._m_chunk_no = (((self.inv_chunk_no + self.shift_value) + self._parent._parent.ac_count_of_chunks) % self._parent._parent.ac_count_of_chunks)
return self._m_chunk_no if hasattr(self, '_m_chunk_no') else None
@property
def inv_chunk_no(self):
if hasattr(self, '_m_inv_chunk_no'):
return self._m_inv_chunk_no if hasattr(self, '_m_inv_chunk_no') else None
self._m_inv_chunk_no = (self.bit_no + self.shift_value)
return self._m_inv_chunk_no if hasattr(self, '_m_inv_chunk_no') else None
class DataAc(KaitaiStruct):
SEQ_FIELDS = []
def __init__(self, ac, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self.ac = ac
self._debug = collections.defaultdict(dict)
def _read(self):
pass
@property
def read_key_a_required(self):
if hasattr(self, '_m_read_key_a_required'):
return self._m_read_key_a_required if hasattr(self, '_m_read_key_a_required') else None
self._m_read_key_a_required = self.ac.val <= 4
return self._m_read_key_a_required if hasattr(self, '_m_read_key_a_required') else None
@property
def write_key_b_required(self):
if hasattr(self, '_m_write_key_b_required'):
return self._m_write_key_b_required if hasattr(self, '_m_write_key_b_required') else None
self._m_write_key_b_required = (( ((not (self.read_key_a_required)) or (self.read_key_b_required)) ) and (not (self.ac.bits[0].b)))
return self._m_write_key_b_required if hasattr(self, '_m_write_key_b_required') else None
@property
def write_key_a_required(self):
if hasattr(self, '_m_write_key_a_required'):
return self._m_write_key_a_required if hasattr(self, '_m_write_key_a_required') else None
self._m_write_key_a_required = self.ac.val == 0
return self._m_write_key_a_required if hasattr(self, '_m_write_key_a_required') else None
@property
def read_key_b_required(self):
if hasattr(self, '_m_read_key_b_required'):
return self._m_read_key_b_required if hasattr(self, '_m_read_key_b_required') else None
self._m_read_key_b_required = self.ac.val <= 6
return self._m_read_key_b_required if hasattr(self, '_m_read_key_b_required') else None
@property
def decrement_available(self):
if hasattr(self, '_m_decrement_available'):
return self._m_decrement_available if hasattr(self, '_m_decrement_available') else None
self._m_decrement_available = (( ((self.ac.bits[1].b) or (not (self.ac.bits[0].b))) ) and (not (self.ac.bits[2].b)))
return self._m_decrement_available if hasattr(self, '_m_decrement_available') else None
@property
def increment_available(self):
if hasattr(self, '_m_increment_available'):
return self._m_increment_available if hasattr(self, '_m_increment_available') else None
self._m_increment_available = (( ((not (self.ac.bits[0].b)) and (not (self.read_key_a_required)) and (not (self.read_key_b_required))) ) or ( ((not (self.ac.bits[0].b)) and (self.read_key_a_required) and (self.read_key_b_required)) ))
return self._m_increment_available if hasattr(self, '_m_increment_available') else None
class Ac(KaitaiStruct):
SEQ_FIELDS = []
def __init__(self, index, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self.index = index
self._debug = collections.defaultdict(dict)
def _read(self):
pass
class AcBit(KaitaiStruct):
SEQ_FIELDS = []
def __init__(self, i, chunk, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self.i = i
self.chunk = chunk
self._debug = collections.defaultdict(dict)
def _read(self):
pass
@property
def n(self):
if hasattr(self, '_m_n'):
return self._m_n if hasattr(self, '_m_n') else None
self._m_n = ((self.chunk >> self.i) & 1)
return self._m_n if hasattr(self, '_m_n') else None
@property
def b(self):
if hasattr(self, '_m_b'):
return self._m_b if hasattr(self, '_m_b') else None
self._m_b = self.n == 1
return self._m_b if hasattr(self, '_m_b') else None
@property
def bits(self):
if hasattr(self, '_m_bits'):
return self._m_bits if hasattr(self, '_m_bits') else None
_pos = self._io.pos()
self._io.seek(0)
self._debug['_m_bits']['start'] = self._io.pos()
self._m_bits = [None] * (self._parent._parent.ac_bits)
for i in range(self._parent._parent.ac_bits):
if not 'arr' in self._debug['_m_bits']:
self._debug['_m_bits']['arr'] = []
self._debug['_m_bits']['arr'].append({'start': self._io.pos()})
_t__m_bits = MifareClassic.Trailer.AccessConditions.Ac.AcBit(self.index, self._parent.chunks[i].chunk, self._io, self, self._root)
_t__m_bits._read()
self._m_bits[i] = _t__m_bits
self._debug['_m_bits']['arr'][i]['end'] = self._io.pos()
self._debug['_m_bits']['end'] = self._io.pos()
self._io.seek(_pos)
return self._m_bits if hasattr(self, '_m_bits') else None
@property
def val(self):
"""c3 c2 c1."""
if hasattr(self, '_m_val'):
return self._m_val if hasattr(self, '_m_val') else None
self._m_val = (((self.bits[2].n << 2) | (self.bits[1].n << 1)) | self.bits[0].n)
return self._m_val if hasattr(self, '_m_val') else None
@property
def inv_shift_val(self):
if hasattr(self, '_m_inv_shift_val'):
return self._m_inv_shift_val if hasattr(self, '_m_inv_shift_val') else None
self._m_inv_shift_val = (((self.bits[0].n << 2) | (self.bits[1].n << 1)) | self.bits[2].n)
return self._m_inv_shift_val if hasattr(self, '_m_inv_shift_val') else None
class ValidChunk(KaitaiStruct):
SEQ_FIELDS = []
def __init__(self, inv_chunk, chunk, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self.inv_chunk = inv_chunk
self.chunk = chunk
self._debug = collections.defaultdict(dict)
def _read(self):
pass
@property
def valid(self):
if hasattr(self, '_m_valid'):
return self._m_valid if hasattr(self, '_m_valid') else None
self._m_valid = (self.inv_chunk ^ self.chunk) == 15
return self._m_valid if hasattr(self, '_m_valid') else None
@property
def data_acs(self):
if hasattr(self, '_m_data_acs'):
return self._m_data_acs if hasattr(self, '_m_data_acs') else None
_pos = self._io.pos()
self._io.seek(0)
self._debug['_m_data_acs']['start'] = self._io.pos()
self._m_data_acs = [None] * ((self._parent.acs_in_sector - 1))
for i in range((self._parent.acs_in_sector - 1)):
if not 'arr' in self._debug['_m_data_acs']:
self._debug['_m_data_acs']['arr'] = []
self._debug['_m_data_acs']['arr'].append({'start': self._io.pos()})
_t__m_data_acs = MifareClassic.Trailer.AccessConditions.DataAc(self.acs_raw[i], self._io, self, self._root)
_t__m_data_acs._read()
self._m_data_acs[i] = _t__m_data_acs
self._debug['_m_data_acs']['arr'][i]['end'] = self._io.pos()
self._debug['_m_data_acs']['end'] = self._io.pos()
self._io.seek(_pos)
return self._m_data_acs if hasattr(self, '_m_data_acs') else None
@property
def remaps(self):
if hasattr(self, '_m_remaps'):
return self._m_remaps if hasattr(self, '_m_remaps') else None
_pos = self._io.pos()
self._io.seek(0)
self._debug['_m_remaps']['start'] = self._io.pos()
self._m_remaps = [None] * (self._parent.ac_bits)
for i in range(self._parent.ac_bits):
if not 'arr' in self._debug['_m_remaps']:
self._debug['_m_remaps']['arr'] = []
self._debug['_m_remaps']['arr'].append({'start': self._io.pos()})
_t__m_remaps = MifareClassic.Trailer.AccessConditions.ChunkBitRemap(i, self._io, self, self._root)
_t__m_remaps._read()
self._m_remaps[i] = _t__m_remaps
self._debug['_m_remaps']['arr'][i]['end'] = self._io.pos()
self._debug['_m_remaps']['end'] = self._io.pos()
self._io.seek(_pos)
return self._m_remaps if hasattr(self, '_m_remaps') else None
@property
def acs_raw(self):
if hasattr(self, '_m_acs_raw'):
return self._m_acs_raw if hasattr(self, '_m_acs_raw') else None
_pos = self._io.pos()
self._io.seek(0)
self._debug['_m_acs_raw']['start'] = self._io.pos()
self._m_acs_raw = [None] * (self._parent.acs_in_sector)
for i in range(self._parent.acs_in_sector):
if not 'arr' in self._debug['_m_acs_raw']:
self._debug['_m_acs_raw']['arr'] = []
self._debug['_m_acs_raw']['arr'].append({'start': self._io.pos()})
_t__m_acs_raw = MifareClassic.Trailer.AccessConditions.Ac(i, self._io, self, self._root)
_t__m_acs_raw._read()
self._m_acs_raw[i] = _t__m_acs_raw
self._debug['_m_acs_raw']['arr'][i]['end'] = self._io.pos()
self._debug['_m_acs_raw']['end'] = self._io.pos()
self._io.seek(_pos)
return self._m_acs_raw if hasattr(self, '_m_acs_raw') else None
@property
def trailer_ac(self):
if hasattr(self, '_m_trailer_ac'):
return self._m_trailer_ac if hasattr(self, '_m_trailer_ac') else None
_pos = self._io.pos()
self._io.seek(0)
self._debug['_m_trailer_ac']['start'] = self._io.pos()
self._m_trailer_ac = MifareClassic.Trailer.AccessConditions.TrailerAc(self.acs_raw[(self._parent.acs_in_sector - 1)], self._io, self, self._root)
self._m_trailer_ac._read()
self._debug['_m_trailer_ac']['end'] = self._io.pos()
self._io.seek(_pos)
return self._m_trailer_ac if hasattr(self, '_m_trailer_ac') else None
@property
def chunks(self):
if hasattr(self, '_m_chunks'):
return self._m_chunks if hasattr(self, '_m_chunks') else None
_pos = self._io.pos()
self._io.seek(0)
self._debug['_m_chunks']['start'] = self._io.pos()
self._m_chunks = [None] * (self._parent.ac_bits)
for i in range(self._parent.ac_bits):
if not 'arr' in self._debug['_m_chunks']:
self._debug['_m_chunks']['arr'] = []
self._debug['_m_chunks']['arr'].append({'start': self._io.pos()})
_t__m_chunks = MifareClassic.Trailer.AccessConditions.ValidChunk(self.raw_chunks[self.remaps[i].inv_chunk_no], self.raw_chunks[self.remaps[i].chunk_no], self._io, self, self._root)
_t__m_chunks._read()
self._m_chunks[i] = _t__m_chunks
self._debug['_m_chunks']['arr'][i]['end'] = self._io.pos()
self._debug['_m_chunks']['end'] = self._io.pos()
self._io.seek(_pos)
return self._m_chunks if hasattr(self, '_m_chunks') else None
@property
def ac_bits(self):
if hasattr(self, '_m_ac_bits'):
return self._m_ac_bits if hasattr(self, '_m_ac_bits') else None
self._m_ac_bits = 3
return self._m_ac_bits if hasattr(self, '_m_ac_bits') else None
@property
def acs_in_sector(self):
if hasattr(self, '_m_acs_in_sector'):
return self._m_acs_in_sector if hasattr(self, '_m_acs_in_sector') else None
self._m_acs_in_sector = 4
return self._m_acs_in_sector if hasattr(self, '_m_acs_in_sector') else None
@property
def ac_count_of_chunks(self):
if hasattr(self, '_m_ac_count_of_chunks'):
return self._m_ac_count_of_chunks if hasattr(self, '_m_ac_count_of_chunks') else None
self._m_ac_count_of_chunks = (self.ac_bits * 2)
return self._m_ac_count_of_chunks if hasattr(self, '_m_ac_count_of_chunks') else None
|
leetcode/coding/editor.py | chishui/terminal-leetcode | 254 | 40389 | import os
import subprocess
from pathlib import Path
from ..views.viewhelper import delay_refresh_detail
from ..helper.config import config
def edit(filepath: Path, loop):
if isinstance(filepath, str):
filepath = Path(filepath)
editor = os.environ.get('EDITOR', 'vi').lower()
# vim
if editor == 'vi' or editor == 'vim':
cmd = editor + ' ' + str(filepath)
current_directory = Path.cwd()
os.chdir(filepath.parent)
if config.tmux_support and is_inside_tmux():
open_in_new_tmux_window(cmd)
else:
subprocess.call(cmd, shell=True)
delay_refresh_detail(loop)
os.chdir(current_directory)
# sublime text
elif editor == 'sublime':
cmd = 'subl ' + str(filepath)
subprocess.call(cmd, shell=True)
def is_inside_tmux():
return 'TMUX' in os.environ
def open_in_new_tmux_window(edit_cmd):
# close other panes if exist, so that the detail pane is the only pane
try:
output = subprocess.check_output("tmux list-panes | wc -l", shell=True)
num_pane = int(output)
if num_pane > 1:
subprocess.check_call("tmux kill-pane -a", shell=True)
except Exception:
pass
cmd = "tmux split-window -h"
os.system(cmd)
cmd = "tmux send-keys -t right '%s' C-m" % edit_cmd
os.system(cmd)
|
setup.py | nad2000/swagger_to_uml | 190 | 40393 | <gh_stars>100-1000
from setuptools import setup, find_packages
requires = [
'PyYAML==5.1'
]
setup(
name='swagger_to_uml',
version='0.1',
description='swagger_to_uml',
classifiers=[
"Programming Language :: Python"
],
author='<NAME>',
author_email='<EMAIL>',
license='MIT',
url='http://nlohmann.me',
keywords='swagger uml plantuml',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
install_requires=requires,
tests_require=requires,
scripts=['bin/swagger_to_uml']
)
|
multimedia/gui/lvgl/lvgl_multiple_screens.py | 708yamaguchi/MaixPy_scripts | 485 | 40402 | <reponame>708yamaguchi/MaixPy_scripts<filename>multimedia/gui/lvgl/lvgl_multiple_screens.py
#this demo shows how to create multiple screens, load and unload them properly without causing memory leak
import lvgl as lv
import lvgl_helper as lv_h
import lcd
import time
from machine import Timer
from machine import I2C
from touch import Touch, TouchLow
import KPU as kpu
import gc
config_touchscreen_support = True
board_m1n = False
lcd.init()
TOUCH = None
def read_cb(drv, ptr):
data = lv.indev_data_t.cast(ptr)
TOUCH.event()
data.point = lv.point_t({'x': TOUCH.points[1][0], 'y': TOUCH.points[1][1]})
data.state = lv.INDEV_STATE.PR if TOUCH.state == 1 else lv.INDEV_STATE.REL
return False
if config_touchscreen_support:
i2c = I2C(I2C.I2C0, freq=1000*1000, scl=24, sda=27) # 24 27)
devices = i2c.scan()
print("devs", devices) # devs 0 [16, 38, 52, 56]
TouchLow.config(i2c)
TOUCH = Touch(480, 320, 200)
lv.init()
disp_buf1 = lv.disp_buf_t()
buf1_1 = bytearray(320*10)
lv.disp_buf_init(disp_buf1,buf1_1, None, len(buf1_1)//4)
disp_drv = lv.disp_drv_t()
lv.disp_drv_init(disp_drv)
disp_drv.buffer = disp_buf1
disp_drv.flush_cb = lv_h.flush
if board_m1n:
disp_drv.hor_res = 240
disp_drv.ver_res = 240
else:
disp_drv.hor_res = 480
disp_drv.ver_res = 320
lv.disp_drv_register(disp_drv)
if config_touchscreen_support:
indev_drv = lv.indev_drv_t()
lv.indev_drv_init(indev_drv)
indev_drv.type = lv.INDEV_TYPE.POINTER
indev_drv.read_cb = read_cb
lv.indev_drv_register(indev_drv)
lv.log_register_print_cb(lambda level,path,line,msg: print('%s(%d): %s' % (path, line, msg)))
class UI:
def __init__(self):
self.scr1 = self.create_scr1()
self.scr2 = self.create_scr2()
def create_scr1(self):
scr1 = lv.obj()
btn1 = lv.btn(scr1)
btn1.align(scr1, lv.ALIGN.CENTER, 0, 0)
label1 = lv.label(btn1)
label1.set_text("Button 1")
label1.set_size(20,20)
return scr1
def create_scr2(self):
scr2 = lv.obj()
btn2 = lv.btn(scr2)
btn2.align(scr2, lv.ALIGN.CENTER, 0, 0)
label2 = lv.label(btn2)
label2.set_text("Button 2")
label2.set_size(20,20)
return scr2
ui = UI()
kpu.memtest()
def on_timer(timer):
lv.tick_inc(5)
lv.task_handler()
gc.collect()
timer = Timer(Timer.TIMER0, Timer.CHANNEL0, mode=Timer.MODE_PERIODIC, period=5, unit=Timer.UNIT_MS, callback=on_timer, arg=None)
while True:
tim = time.ticks_ms()
while time.ticks_ms()-tim < 500:
pass
lv.scr_load(ui.scr1)
kpu.memtest()
tim = time.ticks_ms()
while time.ticks_ms()-tim < 500:
pass
lv.scr_load(ui.scr2)
kpu.memtest()
|
quantecon/markov/utilities.py | Smit-create/QuantEcon.py | 1,462 | 40415 | <reponame>Smit-create/QuantEcon.py<gh_stars>1000+
"""
Utility routines for the markov submodule
"""
import numpy as np
from numba import jit
@jit(nopython=True, cache=True)
def sa_indices(num_states, num_actions):
"""
Generate `s_indices` and `a_indices` for `DiscreteDP`, for the case
where all the actions are feasible at every state.
Parameters
----------
num_states : scalar(int)
Number of states.
num_actions : scalar(int)
Number of actions.
Returns
-------
s_indices : ndarray(int, ndim=1)
Array containing the state indices.
a_indices : ndarray(int, ndim=1)
Array containing the action indices.
Examples
--------
>>> s_indices, a_indices = qe.markov.sa_indices(4, 3)
>>> s_indices
array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3])
>>> a_indices
array([0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2])
"""
L = num_states * num_actions
dtype = np.int_
s_indices = np.empty(L, dtype=dtype)
a_indices = np.empty(L, dtype=dtype)
i = 0
for s in range(num_states):
for a in range(num_actions):
s_indices[i] = s
a_indices[i] = a
i += 1
return s_indices, a_indices
@jit(nopython=True, cache=True)
def _fill_dense_Q(s_indices, a_indices, Q_in, Q_out):
L = Q_in.shape[0]
for i in range(L):
Q_out[s_indices[i], a_indices[i], :] = Q_in[i, :]
return Q_out
@jit(nopython=True, cache=True)
def _s_wise_max_argmax(a_indices, a_indptr, vals, out_max, out_argmax):
n = len(out_max)
for i in range(n):
if a_indptr[i] != a_indptr[i+1]:
m = a_indptr[i]
for j in range(a_indptr[i]+1, a_indptr[i+1]):
if vals[j] > vals[m]:
m = j
out_max[i] = vals[m]
out_argmax[i] = a_indices[m]
@jit(nopython=True, cache=True)
def _s_wise_max(a_indices, a_indptr, vals, out_max):
n = len(out_max)
for i in range(n):
if a_indptr[i] != a_indptr[i+1]:
m = a_indptr[i]
for j in range(a_indptr[i]+1, a_indptr[i+1]):
if vals[j] > vals[m]:
m = j
out_max[i] = vals[m]
@jit(nopython=True, cache=True)
def _find_indices(a_indices, a_indptr, sigma, out):
n = len(sigma)
for i in range(n):
for j in range(a_indptr[i], a_indptr[i+1]):
if sigma[i] == a_indices[j]:
out[i] = j
@jit(nopython=True, cache=True)
def _has_sorted_sa_indices(s_indices, a_indices):
"""
Check whether `s_indices` and `a_indices` are sorted in
lexicographic order.
Parameters
----------
s_indices, a_indices : ndarray(ndim=1)
Returns
-------
bool
Whether `s_indices` and `a_indices` are sorted.
"""
L = len(s_indices)
for i in range(L-1):
if s_indices[i] > s_indices[i+1]:
return False
if s_indices[i] == s_indices[i+1]:
if a_indices[i] >= a_indices[i+1]:
return False
return True
@jit(nopython=True, cache=True)
def _generate_a_indptr(num_states, s_indices, out):
"""
Generate `a_indptr`; stored in `out`. `s_indices` is assumed to be
in sorted order.
Parameters
----------
num_states : scalar(int)
s_indices : ndarray(int, ndim=1)
out : ndarray(int, ndim=1)
Length must be num_states+1.
"""
idx = 0
out[0] = 0
for s in range(num_states-1):
while(s_indices[idx] == s):
idx += 1
out[s+1] = idx
out[num_states] = len(s_indices)
|
examples/run_bidaf/cmrc_bidaf.py | ishine/SMRCToolkit | 1,238 | 40437 | <reponame>ishine/SMRCToolkit<gh_stars>1000+
# coding: utf-8
from sogou_mrc.data.vocabulary import Vocabulary
from sogou_mrc.dataset.squad import SquadReader, SquadEvaluator
from sogou_mrc.dataset.cmrc import CMRCReader,CMRCEvaluator
from sogou_mrc.model.bidaf import BiDAF
import tensorflow as tf
import logging
from sogou_mrc.data.batch_generator import BatchGenerator
tf.logging.set_verbosity(tf.logging.ERROR)
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
data_folder = ''
embedding_folder = ''
train_file = data_folder+"cmrc2018_train.json"
dev_file = data_folder+"cmrc2018_dev.json"
reader = CMRCReader()
train_data = reader.read(train_file)
eval_data = reader.read(dev_file)
evaluator = CMRCEvaluator(dev_file)
vocab = Vocabulary(do_lowercase=False)
vocab.build_vocab(train_data + eval_data, min_word_count=3, min_char_count=10)
word_embedding = vocab.make_word_embedding(embedding_folder )
train_batch_generator = BatchGenerator(vocab, train_data, batch_size=32, training=True)
eval_batch_generator = BatchGenerator(vocab, eval_data, batch_size=60)
model = BiDAF(vocab, pretrained_word_embedding=word_embedding,word_embedding_size=300)
model.compile(tf.train.AdamOptimizer, 0.001)
model.train_and_evaluate(train_batch_generator, eval_batch_generator, evaluator, epochs=50, eposides=2)
|
pynes/examples/mario.py | timgates42/pyNES | 1,046 | 40438 | <filename>pynes/examples/mario.py
import pynes
from pynes.bitbag import *
if __name__ == "__main__":
pynes.press_start()
exit()
palette = [
0x22,0x29, 0x1A,0x0F, 0x22,0x36,0x17,0x0F, 0x22,0x30,0x21,0x0F, 0x22,0x27,0x17,0x0F,
0x22,0x16,0x27,0x18, 0x22,0x1A,0x30,0x27, 0x22,0x16,0x30,0x27, 0x22,0x0F,0x36,0x17]
chr_asset = import_chr('mario.chr')
tinymario = define_sprite(108,144, [50,51,52,53], 0)
mario = define_sprite(128, 128, [0, 1, 2, 3, 4, 5, 6, 7], 0)
firemario = define_sprite(164,128, [0, 1, 2, 3, 4, 5, 6, 7], 0)
def reset():
wait_vblank()
clearmem()
wait_vblank()
load_palette(palette)
load_sprite(tinymario, 0)
load_sprite(mario, 4)
load_sprite(firemario, 12)
def joypad1_up():
get_sprite(mario).y -= 1
def joypad1_down():
get_sprite(mario).y += 1
def joypad1_left():
get_sprite(mario).x -= 1
def joypad1_right():
get_sprite(mario).x += 1
|
tests/__init__.py | Kua-Fu/rally | 1,577 | 40461 | <filename>tests/__init__.py
# Licensed to Elasticsearch B.V. under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch B.V. licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import asyncio
def run_async(t):
"""
A wrapper that ensures that a test is run in an asyncio context.
:param t: The test case to wrap.
"""
def async_wrapper(*args, **kwargs):
asyncio.run(t(*args, **kwargs), debug=True)
return async_wrapper
|
alipay/aop/api/domain/ItapDeviceInfo.py | antopen/alipay-sdk-python-all | 213 | 40463 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class ItapDeviceInfo(object):
def __init__(self):
self._fw_version = None
self._hw_version = None
self._manufacturer = None
self._model = None
self._product_name = None
@property
def fw_version(self):
return self._fw_version
@fw_version.setter
def fw_version(self, value):
self._fw_version = value
@property
def hw_version(self):
return self._hw_version
@hw_version.setter
def hw_version(self, value):
self._hw_version = value
@property
def manufacturer(self):
return self._manufacturer
@manufacturer.setter
def manufacturer(self, value):
self._manufacturer = value
@property
def model(self):
return self._model
@model.setter
def model(self, value):
self._model = value
@property
def product_name(self):
return self._product_name
@product_name.setter
def product_name(self, value):
self._product_name = value
def to_alipay_dict(self):
params = dict()
if self.fw_version:
if hasattr(self.fw_version, 'to_alipay_dict'):
params['fw_version'] = self.fw_version.to_alipay_dict()
else:
params['fw_version'] = self.fw_version
if self.hw_version:
if hasattr(self.hw_version, 'to_alipay_dict'):
params['hw_version'] = self.hw_version.to_alipay_dict()
else:
params['hw_version'] = self.hw_version
if self.manufacturer:
if hasattr(self.manufacturer, 'to_alipay_dict'):
params['manufacturer'] = self.manufacturer.to_alipay_dict()
else:
params['manufacturer'] = self.manufacturer
if self.model:
if hasattr(self.model, 'to_alipay_dict'):
params['model'] = self.model.to_alipay_dict()
else:
params['model'] = self.model
if self.product_name:
if hasattr(self.product_name, 'to_alipay_dict'):
params['product_name'] = self.product_name.to_alipay_dict()
else:
params['product_name'] = self.product_name
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = ItapDeviceInfo()
if 'fw_version' in d:
o.fw_version = d['fw_version']
if 'hw_version' in d:
o.hw_version = d['hw_version']
if 'manufacturer' in d:
o.manufacturer = d['manufacturer']
if 'model' in d:
o.model = d['model']
if 'product_name' in d:
o.product_name = d['product_name']
return o
|
synapse/servers/aha.py | ackroute/synapse | 216 | 40464 | <gh_stars>100-1000
# pragma: no cover
import sys
import asyncio
import synapse.lib.aha as s_aha
if __name__ == '__main__': # pragma: no cover
asyncio.run(s_aha.AhaCell.execmain(sys.argv[1:]))
|
tests/python/contrib/test_ethosu/cascader/test_integration.py | shengxinhu/tvm | 4,640 | 40466 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=wrong-import-position,invalid-name
"""
Test the cascader in the compilation flow.
"""
import pytest
pytest.importorskip("ethosu.vela")
import numpy as np
import tvm
from tvm import relay
from tvm.relay.backend.contrib.ethosu.codegen import _create_cascader
from tvm.relay.backend.contrib.ethosu.tir.compiler import _lower_to_tir
from tvm.contrib.ethosu.cascader import MemoryRegion, EthosuDeviceConfig
from .. import infra as test_infra
from . import infra as cascader_test_infra
def _ethos_u55_cascader():
sram = MemoryRegion(
name="SRAM",
size=10**6,
read_bandwidth=16,
write_bandwidth=16,
read_latency=0,
write_latency=0,
burst_length=1,
)
flash = MemoryRegion(name="FLASH", size=10**7, read_bandwidth=4, write_bandwidth=4)
device_config = EthosuDeviceConfig("ethos-u55-256")
cascader_options = cascader_test_infra.make_options(
cascade_region=sram,
max_proposals=64,
stripe_factors=4,
max_plan_size=10,
max_open_plans=8,
max_closed_plans=32,
always_copy_size=1024,
disable_pareto_plans=False,
disable_pareto_proposals=False,
enable_striping=False,
)
return _create_cascader(
options=cascader_options,
io_region=sram,
constant_region=flash,
working_regions=[sram],
device_config=device_config,
)
def _compile_model(relay_function):
mod = tvm.IRModule()
mod["main"] = relay_function
mod = relay.transform.InferType()(mod)
tir_mod = _lower_to_tir(mod["main"], _ethos_u55_cascader())[0]
return tir_mod["main"]
def _create_single_conv2d():
ifm = relay.var("x", shape=(1, 8, 8, 4), dtype="int8")
conv1 = test_infra.make_ethosu_conv2d(ifm, 4, 4, (3, 3), (1, 1), (1, 1), (1, 1))
func = relay.Function(relay.analysis.free_vars(conv1), conv1)
return func
def _create_double_conv2d():
ifm = relay.var("x", shape=(1, 8, 8, 4), dtype="int8")
conv1 = test_infra.make_ethosu_conv2d(ifm, 4, 4, (3, 3), (1, 1), (1, 1), (1, 1))
conv2 = test_infra.make_ethosu_conv2d(conv1, 4, 4, (1, 3), (1, 1), (1, 1), (1, 1))
func = relay.Function(relay.analysis.free_vars(conv2), conv2)
return func
def _create_scalar_add():
ifm = relay.var("x", shape=(1, 5, 4, 3), dtype="int8")
ifm2 = relay.const(np.ones((1, 1, 1, 1)), dtype="int8")
add = test_infra.make_ethosu_binary_elementwise(
ifm, ifm2, ifm_channels=3, ifm2_channels=1, operator_type="ADD", ofm_dtype="int8"
)
func = relay.Function(relay.analysis.free_vars(add), add)
return func
def test_single_conv_compute_cycles_hint():
"""
Check the "compute_cycles_hint" annotation remains in the lowering flow
for single convolution.
"""
primfunc = _compile_model(_create_single_conv2d())
ops = primfunc.body.body.body.seq
compute_cycles_hints = [2304, 640, 320]
for op, compute_cycle_hint in zip(ops, compute_cycles_hints):
assert op.attr_key == "pragma_compute_cycles_hint"
assert op.value == compute_cycle_hint
def test_double_conv_compute_cycles_hint():
"""
Check the "compute_cycles_hint" annotation remains in the lowering flow
for double convolution.
"""
primfunc = _compile_model(_create_double_conv2d())
ops = primfunc.body.body.body.body.body.body.seq
compute_cycles_hints = [2304, 640, 768, 640, 320, 240]
for op, compute_cycle_hint in zip(ops, compute_cycles_hints):
assert op.attr_key == "pragma_compute_cycles_hint"
assert op.value == compute_cycle_hint
def test_scalar_add_compute_cycles_hint():
"""
Check the "compute_cycles_hint" annotation remains in the lowering flow
for add with scalar values.
"""
primfunc = _compile_model(_create_scalar_add())
ops = primfunc.body.body.seq
compute_cycles_hints = [16, 24]
for op, compute_cycle_hint in zip(ops, compute_cycles_hints):
assert op.attr_key == "pragma_compute_cycles_hint"
assert op.value == compute_cycle_hint
|
pymterm/term_pylibui/key_translate.py | stonewell/pymterm | 102 | 40467 | <gh_stars>100-1000
__key_mapping = {
'return' : 'enter',
'up_arrow' : 'up',
'down_arrow' : 'down',
'left_arrow' : 'left',
'right_arrow' : 'right',
'page_up' : 'pageup',
'page_down' : 'pagedown',
}
def translate_key(e):
if len(e.key) > 0:
return __key_mapping[e.key] if e.key in __key_mapping else e.key
else:
if e.char == '\x08':
return 'backspace'
elif e.char == '\t':
return 'tab'
else:
return e.key
|
tests/test_core.py | srdjanrosic/supervisor | 597 | 40475 | """Testing handling with CoreState."""
from supervisor.const import CoreState
from supervisor.coresys import CoreSys
def test_write_state(run_dir, coresys: CoreSys):
"""Test write corestate to /run/supervisor."""
coresys.core.state = CoreState.RUNNING
assert run_dir.read_text() == CoreState.RUNNING.value
coresys.core.state = CoreState.SHUTDOWN
assert run_dir.read_text() == CoreState.SHUTDOWN.value
|
faceQuality/get_quality.py | awesome-archive/MaskInsightface | 269 | 40490 | # -*- coding: utf-8 -*-
from keras.models import load_model
import numpy as np
import os
import cv2
from FaceQNet import load_Qnet_model, face_quality
# Loading the pretrained model
model = load_Qnet_model()
IMG_PATH = '/home/sai/YANG/image/video/nanning/haha'
dir = os.listdir(IMG_PATH)
count = len(dir)
print('count:', count)
for i in dir:
count -= 1
if count%1000==0:
print('count:', count)
dir_path = os.path.join(IMG_PATH, i)
imgs_dir = os.listdir(dir_path)
for j in imgs_dir:
img_path = os.path.join(dir_path, j)
img = cv2.imread(img_path)
score = face_quality(model, img)
# img = [cv2.resize(cv2.imread(img_path, cv2.IMREAD_COLOR), (224, 224))]
# test_data = np.array(img, copy=False, dtype=np.float32)
# score = model.predict(test_data, batch_size=1, verbose=1)
path1 = str(score[0][0]) + '@'
rename = path1 + j
os.rename(img_path, os.path.join(dir_path, rename))
|
imagetagger/imagetagger/administration/forms.py | jbargu/imagetagger | 212 | 40500 | <reponame>jbargu/imagetagger
from django import forms
from imagetagger.annotations.models import AnnotationType
class AnnotationTypeCreationForm(forms.ModelForm):
class Meta:
model = AnnotationType
fields = [
'name',
'active',
'node_count',
'vector_type',
'enable_concealed',
'enable_blurred',
]
class AnnotationTypeEditForm(forms.ModelForm):
class Meta:
model = AnnotationType
fields = [
'name',
'active',
'enable_concealed',
'enable_blurred',
]
|
tenkit/lib/python/striped_smith_waterman/pyssw.py | qiangli/cellranger | 239 | 40523 | <gh_stars>100-1000
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@package pyssw
@brief Python standalone program for ssw alignment using the C library
Complete-Striped-Smith-Waterman-Library
Biopython module is require for fastq/fastq parsing
@copyright [The MIT licence](http://opensource.org/licenses/MIT)
@author <NAME> - 2014
* <<EMAIL>>
* <<EMAIL>>
* <<EMAIL>>
* [Github](https://github.com/a-slide)
* [Atlantic Gene Therapies - INSERM 1089] (http://www.atlantic-gene-therapies.fr/)
"""
#~~~~~~~GLOBAL IMPORTS~~~~~~~#
# Standard library packages
import optparse
import sys
from time import time
import gzip
#~~~~~~~MAIN FUNCTION~~~~~~~#
def main (opt):
print ("Inport subject sequence")
# Import fasta subject
if opt.subject.rpartition(".")[2].lower() == "gz":
subject_handle = gzip.open(opt.subject, "r")
else:
subject_handle = open(opt.subject, "r")
subject = SeqIO.read(subject_handle, "fasta")
print ("Inport query sequences and count the number of sequences")
# Import fasta subject
if opt.query.rpartition(".")[2].lower() == "gz":
nseq = count_seq(opt.query, opt.qtype, True)
query_handle = gzip.open(opt.query, "r")
else:
nseq = count_seq(opt.query, opt.qtype, False)
query_handle = open(opt.query, "r")
query_gen = SeqIO.parse(query_handle, opt.qtype)
print("{} contains {} sequences to align".format(opt.query, nseq))
# Calculate a step list for the progress bar
nseq_list = [int(nseq*i/100.0) for i in range(5,101,5)]
print ("Initialize ssw aligner with the subject sequence")
# Init the an Aligner object with the reference value
ssw = Aligner(
str(subject.seq),
match=int(opt.match),
mismatch=int(opt.mismatch),
gap_open=int(opt.gap_open),
gap_extend= int(opt.gap_extend),
report_secondary=False,
report_cigar=True)
# Write the header of the SAM file
with open("result.sam", "w") as f:
f.write("@HD\tVN:1.0\tSO:unsorted\n")
f.write("@SQ\tSN:{}\tLN:{}\n".format(subject.id, len(subject.seq)))
f.write("@PG\tID:Striped-Smith-Waterman\tPN:pyssw\tVN:0.1\n")
f.write("@CO\tScore_values = match {}, mismatch {}, gap_open {}, gap_extend {}\n".format(
opt.match,
opt.mismatch,
opt.gap_open,
opt.gap_extend))
f.write("@CO\tFilter Options = min_score {}, min_len {}\n".format(
opt.min_score,
opt.min_len))
print ("Starting alignment of queries against the subject sequence")
start = time()
# Align each query along the subject an write result in a SAM file
i = 0
for query in query_gen:
# Find the best alignment
if opt.reverse:
al, orient = find_best_align (ssw, query, float(opt.min_score), int(opt.min_len))
else:
al, orient = ssw.align(str(query.seq), float(opt.min_score), int(opt.min_len)), True
# If valid match found
if al:
f.write(sam_line(
qname=query.id,
flag=0 if orient else 16,
rname=subject.id,
pos=al.ref_begin+1,
cigar=al.cigar_string,
seq=str(query.seq),
qual=SeqIO.QualityIO._get_sanger_quality_str(query) if opt.qtype == "fastq" else "*",
tags=["AS:i:{}".format(al.score)]))
# If no valid match found and -u flag activated (report unaligned)
elif opt.unaligned:
f.write(sam_line(
qname=query.id,
flag=4,
seq=str(query.seq),
qual=SeqIO.QualityIO._get_sanger_quality_str(query) if opt.qtype == "fastq" else "*"))
# Else = match unreported
# Progress bar
i+=1
if i in nseq_list:
frac = i/float(nseq)
t = time()-start
print ("{} sequences \t{}% \tRemaining time = {}s".format(i, int(frac*100), round(t/frac-t, 2)))
print ("\n{} Sequences processed in {}s".format(i, round(time()-start, 2)))
#~~~~~~~HELPER FUNCTIONS~~~~~~~#
def sam_line (qname='*', flag=4, rname='*', pos=0, mapq=0, cigar='*', rnext='*', pnext=0, tlen=0, seq='*', qual='*', tags=None):
"""
Return a minimal sam line = by default return an undetermined sam line. Check the document
[SAM Format Specification](http://samtools.sourceforge.net/SAM1.pdf) for a full description.
@param qname Query template NAME
@param flag bitwise FLAG
@param rname Reference sequence NAME of the alignment
@param pos 1-based leftmost mapping POSition of the first matching base
@param mapq MAPping Quality
@param cigar CIGAR string
@param rnext Reference sequence name of the primary alignment of the mate
@param pnext 1-based leftmost position of the primary alignment of the mate
@param tlen signed observed Template LENgth
@param seq segment SEQuence
@param qual ASCII of base QUALity plus 33
@param tags list of optional tags
@return A Sam alignment line
"""
if tags:
return "{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\n".format(
qname, flag, rname, pos, mapq, cigar, rnext, pnext, tlen, seq, qual, " ".join(tags))
else:
return "{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}".format(
qname, flag, rname, pos, mapq, cigar, rnext, pnext, tlen, seq, qual)
def find_best_align (ssw, query, min_score, min_len):
# Align reverse and forward query
forward_al = ssw.align(str(query.seq), min_score, min_len)
reverse_al = ssw.align(str(query.seq.reverse_complement()), min_score, min_len)
# Decision tree to return the best aligned sequence taking into acount the absence of result
# by ssw_wrap in case of score filtering
if not forward_al:
if not reverse_al:
return (None, None)
else:
return (reverse_al, False)
else:
if not reverse_al:
return (forward_al, True)
else:
if forward_al.score >= reverse_al.score:
return (forward_al, True)
else:
return (reverse_al, False)
def count_seq (filename, seq_type="fasta", gziped=False):
"""
Count the number of sequences in a fastq or a fastq file
@param filename Path to a valid readeable file
@param file_type Should be either fastq or fastq. Default fasta
@param gziped Boolean indicating if the file is gziped or not. Default False
"""
#Standard library import
import gzip
from mmap import mmap
# Verify if the file is fasta or fastq type
assert seq_type in ["fasta", "fastq"], "The file has to be either fastq or fasta format"
# Open the file
if gziped:
f = gzip.open(filename, "r")
else:
f = open(filename, "r")
# FASTA Find a start line seq character ">" an increment the counter each time
if seq_type == "fasta":
nline = 0
for line in f:
if line[0] == ">":
nline+=1
f.close()
return nline
# FASTQ No motif to find, but 4 lines correspond to 1 sequence
else:
nline = 0
for line in f:
nline+=1
f.close()
return nline/4
def optparser():
print("Parse command line options")
# Usage and version strings
program_name = "pyssw"
program_version = 0.1
version_string = "{}\t{}".format(program_name, program_version)
usage_string = "{}.py -s subject.fasta -q fastq (or fasta) [Facultative options]".format(program_name)
optparser = optparse.OptionParser(usage = usage_string, version = version_string)
# Define optparser options
hstr = "Path of the fasta file containing the subject genome sequence. Can be gziped. [REQUIRED] "
optparser.add_option( '-s', '--subject', dest="subject", help=hstr)
hstr = "Path of the fastq or fasta file containing the short read to be aligned. Can be gziped. [REQUIRED]"
optparser.add_option( '-q', '--query', dest="query", help=hstr)
hstr = "Type of the query file = fastq or fasta. [default: fastq]"
optparser.add_option( '-t', '--qtype', dest="qtype", default="fastq", help=hstr)
hstr = "Positive integer for weight match in genome sequence alignment. [default: 2]"
optparser.add_option( '-m', '--match', dest="match",default=2, help=hstr)
hstr = "Positive integer. The negative value will be used as weight mismatch in genome sequence alignment. [default: 2]"
optparser.add_option( '-x', '--mismatch', dest="mismatch", default=2, help=hstr)
hstr = "Positive integer. The negative value will be used as weight for the gap opening. [default: 3]"
optparser.add_option( '-o', '--gap_open', dest="gap_open", default=3, help=hstr)
hstr = "Positive integer. The negative value will be used as weight for the gap opening. [default: 1]"
optparser.add_option( '-e', '--gap_extend', dest="gap_extend", default=1, help=hstr)
hstr = "Integer. Consider alignments having a score <= as not aligned. [default: 0]"
optparser.add_option( '-f', '--min_score', dest="min_score", default=0, help=hstr)
hstr = "Integer. Consider alignments having a length <= as not aligned. [default: 0]"
optparser.add_option( '-l', '--min_len', dest="min_len", default=0, help=hstr)
hstr = "Flag. Align query in forward and reverse orientation and choose the best alignment. [Set by default]"
optparser.add_option( '-r', '--reverse', dest="reverse", action="store_true", default=True, help=hstr)
hstr = "Flag. Write unaligned reads in sam output [Unset by default]"
optparser.add_option( '-u', '--unaligned', dest="unaligned", action="store_true", default=False, help=hstr)
# Parse arg and return a dictionnary_like object of options
opt, args = optparser.parse_args()
if not opt.subject:
print ("\nERROR: a subject fasta file has to be provided (-s option)\n")
optparser.print_help()
sys.exit()
if not opt.query:
print ("\nERROR: a query fasta or fastq file has to be provided (-q option)\n")
optparser.print_help()
sys.exit()
return opt
#~~~~~~~TOP LEVEL INSTRUCTIONS~~~~~~~#
if __name__ == '__main__':
# try to import Third party and local packages
try:
from Bio import SeqIO
except ImportError:
print ("ERROR: Please install Biopython package")
sys.exit()
try:
from ssw_wrap import Aligner
except ImportError:
print ("ERROR: Please place ssw_wrap in the current directory or add its dir to python path")
sys.exit()
# Parse command line arguments
opt = optparser()
# Run the main function
main(opt)
|
pyclustering/cluster/tests/rock_templates.py | JosephChataignon/pyclustering | 1,013 | 40547 | """!
@brief Test templates for ROCK clustering module.
@authors <NAME> (<EMAIL>)
@date 2014-2020
@copyright BSD-3-Clause
"""
from pyclustering.cluster.rock import rock;
from pyclustering.utils import read_sample;
from random import random;
class RockTestTemplates:
@staticmethod
def templateLengthProcessData(path_to_file, radius, cluster_numbers, threshold, expected_cluster_length, ccore):
sample = read_sample(path_to_file);
rock_instance = rock(sample, radius, cluster_numbers, threshold, ccore);
rock_instance.process();
clusters = rock_instance.get_clusters();
length = sum([len(cluster) for cluster in clusters]);
assert len(sample) == length;
obtained_cluster_sizes = [len(cluster) for cluster in clusters];
obtained_cluster_sizes.sort();
expected_cluster_length.sort();
assert obtained_cluster_sizes == expected_cluster_length;
@staticmethod
def templateClusterAllocationOneDimensionData(ccore_flag):
input_data = [ [random()] for i in range(10) ] + [ [random() + 3] for i in range(10) ] + [ [random() + 5] for i in range(10) ] + [ [random() + 8] for i in range(10) ];
rock_instance = rock(input_data, 1, 4, 0.5, ccore_flag);
rock_instance.process();
clusters = rock_instance.get_clusters();
assert len(clusters) == 4;
for cluster in clusters:
assert len(cluster) == 10;
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.