content
stringlengths 7
928k
| avg_line_length
float64 3.5
33.8k
| max_line_length
int64 6
139k
| alphanum_fraction
float64 0.08
0.96
| licenses
sequence | repository_name
stringlengths 7
104
| path
stringlengths 4
230
| size
int64 7
928k
| lang
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|
from .action_handler import *
from .event_handler import *
from .headline_post_action import *
from .incident_command import *
from .keyword_handler import *
from .incident_notification import *
from .dialog_handler import *
| 28.125 | 36 | 0.813333 | [
"MIT"
] | qubitdigital/response | response/slack/decorators/__init__.py | 225 | Python |
from browser import document, alert
import sys
from pprint import pprint
class redirect:
def write(text, text2):
document["output"].innerHTML += text2
sys.stdout = redirect()
sys.stderr = redirect()
d = document["output"]
d.clear()
d.innerHTML = "Hello"
print("Hello again")
def hello(ev):
alert("Hello !")
document["button1"].bind("click", hello) | 17.318182 | 43 | 0.664042 | [
"Apache-2.0"
] | citizendatascience/ErysNotes | data/tests/redirect.py | 381 | Python |
# Copyright 2021 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional
import pytest
import cirq
from cirq.transformers.transformer_primitives import MAPPED_CIRCUIT_OP_TAG
def test_map_operations_can_write_new_gates_inline():
x = cirq.NamedQubit('x')
y = cirq.NamedQubit('y')
z = cirq.NamedQubit('z')
c = cirq.Circuit(
cirq.CZ(x, y),
cirq.Y(x),
cirq.Z(x),
cirq.X(y),
cirq.CNOT(y, z),
cirq.Z(y),
cirq.Z(x),
cirq.CNOT(y, z),
cirq.CNOT(z, y),
)
cirq.testing.assert_has_diagram(
c,
'''
x: ───@───Y───Z───Z───────────
│
y: ───@───X───@───Z───@───X───
│ │ │
z: ───────────X───────X───@───
''',
)
expected_diagram = '''
x: ───X───X───X───X───────────
y: ───X───X───X───X───X───X───
z: ───────────X───────X───X───
'''
cirq.testing.assert_has_diagram(
cirq.map_operations(c, lambda op, _: cirq.X.on_each(*op.qubits)), expected_diagram
)
cirq.testing.assert_has_diagram(
cirq.map_operations_and_unroll(c, lambda op, _: cirq.X.on_each(*op.qubits)),
expected_diagram,
)
def test_map_operations_does_not_insert_too_many_moments():
q = cirq.LineQubit.range(5)
c_orig = cirq.Circuit(
cirq.CX(q[0], q[1]),
cirq.CX(q[3], q[2]),
cirq.CX(q[3], q[4]),
)
def map_func(op: cirq.Operation, _: int) -> cirq.OP_TREE:
if op.gate == cirq.CX:
yield cirq.Z.on_each(*op.qubits)
yield cirq.CX(*op.qubits)
yield cirq.Z.on_each(*op.qubits)
return op
cirq.testing.assert_has_diagram(
c_orig,
'''
0: ───@───────
│
1: ───X───────
2: ───X───────
│
3: ───@───@───
│
4: ───────X───
''',
)
c_mapped = cirq.map_operations(c_orig, map_func)
circuit_op = cirq.CircuitOperation(
cirq.FrozenCircuit(
cirq.Z.on_each(q[0], q[1]), cirq.CNOT(q[0], q[1]), cirq.Z.on_each(q[0], q[1])
)
)
c_expected = cirq.Circuit(
circuit_op.with_qubits(q[0], q[1]).mapped_op().with_tags('<mapped_circuit_op>'),
circuit_op.with_qubits(q[3], q[2]).mapped_op().with_tags('<mapped_circuit_op>'),
circuit_op.with_qubits(q[3], q[4]).mapped_op().with_tags('<mapped_circuit_op>'),
)
cirq.testing.assert_same_circuits(c_mapped, c_expected)
cirq.testing.assert_has_diagram(
cirq.map_operations_and_unroll(c_orig, map_func),
'''
0: ───Z───@───Z───────────────
│
1: ───Z───X───Z───────────────
2: ───Z───X───Z───────────────
│
3: ───Z───@───Z───Z───@───Z───
│
4: ───────────────Z───X───Z───
''',
)
def test_unroll_circuit_op_and_variants():
q = cirq.LineQubit.range(2)
c = cirq.Circuit(cirq.X(q[0]), cirq.CNOT(q[0], q[1]), cirq.X(q[0]))
cirq.testing.assert_has_diagram(
c,
'''
0: ───X───@───X───
│
1: ───────X───────
''',
)
mapped_circuit = cirq.map_operations(
c, lambda op, i: [cirq.Z(q[1])] * 2 if op.gate == cirq.CNOT else op
)
cirq.testing.assert_has_diagram(
cirq.unroll_circuit_op(mapped_circuit),
'''
0: ───X───────────X───
1: ───────Z───Z───────
''',
)
cirq.testing.assert_has_diagram(
cirq.unroll_circuit_op_greedy_earliest(mapped_circuit),
'''
0: ───X───────X───
1: ───Z───Z───────
''',
)
cirq.testing.assert_has_diagram(
cirq.unroll_circuit_op_greedy_frontier(mapped_circuit),
'''
0: ───X───────X───
1: ───────Z───Z───
''',
)
def test_unroll_circuit_op_no_tags():
q = cirq.LineQubit.range(2)
op_list = [cirq.X(q[0]), cirq.Y(q[1])]
op1 = cirq.CircuitOperation(cirq.FrozenCircuit(op_list))
op2 = op1.with_tags("custom tag")
op3 = op1.with_tags(MAPPED_CIRCUIT_OP_TAG)
c = cirq.Circuit(op1, op2, op3)
for unroller in [
cirq.unroll_circuit_op,
cirq.unroll_circuit_op_greedy_earliest,
cirq.unroll_circuit_op_greedy_frontier,
]:
cirq.testing.assert_same_circuits(
unroller(c, tags_to_check=None), cirq.Circuit([op_list] * 3)
)
cirq.testing.assert_same_circuits(unroller(c), cirq.Circuit([op1, op2, op_list]))
cirq.testing.assert_same_circuits(
unroller(c, tags_to_check=("custom tag",)), cirq.Circuit([op1, op_list, op3])
)
cirq.testing.assert_same_circuits(
unroller(
c,
tags_to_check=("custom tag", MAPPED_CIRCUIT_OP_TAG),
),
cirq.Circuit([op1, op_list, op_list]),
)
def test_map_operations_raises_qubits_not_subset():
q = cirq.LineQubit.range(3)
with pytest.raises(ValueError, match='should act on a subset'):
_ = cirq.map_operations(
cirq.Circuit(cirq.CNOT(q[0], q[1])), lambda op, i: cirq.CNOT(q[1], q[2])
)
def test_map_moments_drop_empty_moments():
op = cirq.X(cirq.NamedQubit("x"))
c = cirq.Circuit(cirq.Moment(op), cirq.Moment(), cirq.Moment(op))
c_mapped = cirq.map_moments(c, lambda m, i: [] if len(m) == 0 else [m])
cirq.testing.assert_same_circuits(c_mapped, cirq.Circuit(c[0], c[0]))
def test_merge_moments():
q = cirq.LineQubit.range(3)
c_orig = cirq.Circuit(
cirq.Z.on_each(q[0], q[1]),
cirq.Z.on_each(q[1], q[2]),
cirq.Z.on_each(q[1], q[0]),
strategy=cirq.InsertStrategy.NEW_THEN_INLINE,
)
c_orig = cirq.Circuit(c_orig, cirq.CCX(*q), c_orig)
cirq.testing.assert_has_diagram(
c_orig,
'''
0: ───Z───────Z───@───Z───────Z───
│
1: ───Z───Z───Z───@───Z───Z───Z───
│
2: ───────Z───────X───────Z───────
''',
)
def merge_func(m1: cirq.Moment, m2: cirq.Moment) -> Optional[cirq.Moment]:
def is_z_moment(m):
return all(op.gate == cirq.Z for op in m)
if not (is_z_moment(m1) and is_z_moment(m2)):
return None
qubits = m1.qubits | m2.qubits
def mul(op1, op2):
return (op1 or op2) if not (op1 and op2) else cirq.decompose_once(op1 * op2)
return cirq.Moment(mul(m1.operation_at(q), m2.operation_at(q)) for q in qubits)
cirq.testing.assert_has_diagram(
cirq.merge_moments(c_orig, merge_func),
'''
0: ───────@───────
│
1: ───Z───@───Z───
│
2: ───Z───X───Z───
''',
)
def test_merge_moments_empty_circuit():
def fail_if_called_func(*_):
assert False
c = cirq.Circuit()
assert cirq.merge_moments(c, fail_if_called_func) is c
def test_merge_operations_raises():
q = cirq.LineQubit.range(3)
c = cirq.Circuit(cirq.CZ(*q[:2]), cirq.X(q[0]))
with pytest.raises(ValueError, match='must act on a subset of qubits'):
cirq.merge_operations(c, lambda *_: cirq.X(q[2]))
def test_merge_operations_nothing_to_merge():
def fail_if_called_func(*_):
assert False
# Empty Circuit.
c = cirq.Circuit()
assert cirq.merge_operations(c, fail_if_called_func) == c
# Single moment
q = cirq.LineQubit.range(3)
c += cirq.Moment(cirq.CZ(*q[:2]))
assert cirq.merge_operations(c, fail_if_called_func) == c
# Multi moment with disjoint operations + global phase operation.
c += cirq.Moment(cirq.X(q[2]), cirq.global_phase_operation(1j))
assert cirq.merge_operations(c, fail_if_called_func) == c
def test_merge_operations_merges_connected_component():
q = cirq.LineQubit.range(3)
c_orig = cirq.Circuit(
cirq.Moment(cirq.H.on_each(*q)),
cirq.CNOT(q[0], q[2]),
cirq.CNOT(*q[0:2]),
cirq.H(q[0]),
cirq.CZ(*q[:2]),
cirq.X(q[0]),
cirq.Y(q[1]),
cirq.CNOT(*q[0:2]),
cirq.CNOT(*q[1:3]),
cirq.X(q[0]),
cirq.Y(q[1]),
cirq.CNOT(*q[:2]),
strategy=cirq.InsertStrategy.NEW,
)
cirq.testing.assert_has_diagram(
c_orig,
'''
0: ───H───@───@───H───@───X───────@───────X───────@───
│ │ │ │ │
1: ───H───┼───X───────@───────Y───X───@───────Y───X───
│ │
2: ───H───X───────────────────────────X───────────────
''',
)
def merge_func(op1, op2):
"""Artificial example where a CZ will absorb any merge-able operation."""
for op in [op1, op2]:
if op.gate == cirq.CZ:
return op
return None
c_new = cirq.merge_operations(c_orig, merge_func)
cirq.testing.assert_has_diagram(
c_new,
'''
0: ───H───@───────────@───────────────────────────@───
│ │ │
1: ───────┼───────────@───────────────@───────Y───X───
│ │
2: ───H───X───────────────────────────X───────────────''',
)
@pytest.mark.parametrize('qubit_order', ([0, 1], [1, 0]))
def test_merge_operations_deterministic_order(qubit_order):
q = cirq.LineQubit.range(2)
c_orig = cirq.Circuit(cirq.identity_each(*q), cirq.H.on_each(q[i] for i in qubit_order))
cirq.testing.assert_has_diagram(
c_orig,
'''
0: ───I───H───
│
1: ───I───H───''',
)
c_new = cirq.merge_operations(
c_orig, lambda op1, op2: op2 if isinstance(op1.gate, cirq.IdentityGate) else None
)
cirq.testing.assert_has_diagram(
c_new,
'''
0: ───H───────
1: ───────H───''',
)
@pytest.mark.parametrize("op_density", [0.1, 0.5, 0.9])
def test_merge_operations_complexity(op_density):
prng = cirq.value.parse_random_state(11011)
circuit = cirq.testing.random_circuit(20, 500, op_density, random_state=prng)
for merge_func in [
lambda _, __: None,
lambda op1, _: op1,
lambda _, op2: op2,
lambda op1, op2: (op1, op2, None)[prng.choice(3)],
]:
def wrapped_merge_func(op1, op2):
wrapped_merge_func.num_function_calls += 1
return merge_func(op1, op2)
wrapped_merge_func.num_function_calls = 0
_ = cirq.merge_operations(circuit, wrapped_merge_func)
total_operations = len([*circuit.all_operations()])
assert wrapped_merge_func.num_function_calls <= 2 * total_operations
| 29.108108 | 92 | 0.548839 | [
"Apache-2.0"
] | TripleRD/Cirq | cirq-core/cirq/transformers/transformer_primitives_test.py | 12,512 | Python |
import collections
import operator
import bytewax
FIRST_ITERATION = 0
def read_edges(filename):
with open(filename) as lines:
for line in lines:
line = line.strip()
if line:
parent, child = tuple(x.strip() for x in line.split(","))
yield FIRST_ITERATION, (parent, {child})
INITIAL_WEIGHT = 1.0
def with_initial_weight(parent_children):
parent, children = parent_children
return parent, INITIAL_WEIGHT, children
def parent_contribs(parent_weight_children):
parent, weight, children = parent_weight_children
contrib_from_parent = weight / len(children)
for child in children:
yield child, contrib_from_parent
def sum_to_weight(node_sum):
node, sum_contrib = node_sum
updated_weight = 0.15 + 0.85 * sum_contrib
return node, updated_weight
ec = bytewax.Executor()
flow = ec.Dataflow(read_edges("examples/sample_data/graph.txt"))
# (parent, {child}) per edge
flow.reduce_epoch(operator.or_)
# (parent, children) per parent
# TODO: Some sort of state capture here. This will be tricky because
# we don't have a way of building state per-worker generically
# yet. Timely uses Rust closures, but we're outside that context here.
flow.map(with_initial_weight)
# (parent, weight, children) per parent
flow.flat_map(parent_contribs)
# (child, contrib) per child * parent
# This is a network-bound performance optimization to pre-aggregate
# contribution sums in the worker before sending them to other
# workers. See
# https://github.com/frankmcsherry/blog/blob/master/posts/2015-07-08.md#implementation-2-worker-level-aggregation
flow.reduce_epoch_local(operator.add)
# (node, sum_contrib) per node per worker
flow.reduce_epoch(operator.add)
# (node, sum_contrib) per node
flow.map(sum_to_weight)
# (node, updated_weight) per node
# TODO: Figure out worker-persistent state? Then we don't need to
# re-join with the graph to get connectivity. Also figure out how to
# iterate.
flow.inspect_epoch(print)
if __name__ == "__main__":
ec.build_and_run()
| 27.289474 | 113 | 0.733365 | [
"ECL-2.0",
"Apache-2.0"
] | mttcnnff/bytewax | examples/pagerank.py | 2,074 | Python |
from abc import ABCMeta
from abc import abstractproperty
# The base class for all BMI Exceptions
# Made abstract since it is recommended to raise the specific subclass
class BMIException(Exception):
__metaclass__ = ABCMeta
@abstractproperty
def status_code(self):
pass
# The base class for all exceptions related to the file system like ceph
class FileSystemException(BMIException):
__metaclass__ = ABCMeta
# The base class for all exceptions related to HIL
class HILException(BMIException):
__metaclass__ = ABCMeta
# The base class for all exceptions related to Database
class DBException(BMIException):
__metaclass__ = ABCMeta
# The base class for all exceptions related to ISCSI
class ISCSIException(BMIException):
__metaclass__ = ABCMeta
# The base class for all exceptions related to the BMI Config Parser
class ConfigException(BMIException):
__metaclass__ = ABCMeta
# The base class for all exceptions related to DHCP
class DHCPException(BMIException):
__metaclass__ = ABCMeta
class ShellException(BMIException):
""" The Base Class for all exceptions related to Shell """
__metaclass__ = ABCMeta
# this exception should be raised when a user who is not a bmi admin tries
# admin level functions
class AuthorizationFailedException(BMIException):
@property
def status_code(self):
return 403
def __str__(self):
return "User Does Not Have Admin Role"
class RegistrationFailedException(BMIException):
@property
def status_code(self):
return 500
def __init__(self, node, error):
self.node = node
self.error = error
def __str__(self):
return "Failed to register " + self.node + " due to " + self.error
| 24.347222 | 74 | 0.735881 | [
"Apache-2.0"
] | CCI-MOC/ABMI | m2-modified/ims/exception/exception.py | 1,753 | Python |
import requests
import json
import os
requests.packages.urllib3.disable_warnings()
from cmlApiCalls import CML as cml
#edit the following variables
server = "cml.server.com"
username = "admin"
password = "CMLpassword123"
lab = "53b3fe"
user = os.getlogin()
auth = cml.auth(server, username, password)
N = True
n_id = 0
port = 9000
try:
os.mkdir(rf"C:/Users/{user}/AppData/Roaming/VanDyke/Config/Sessions/CML-{lab}")
except:
print("directory already exists... continue...")
while N:
node_id = f"n{n_id}"
response = cml.getNodesByID(auth, server, lab, node_id)
if response == "end of list":
#exit if end of list
N = False
elif response.get("node_definition") == "external_connector":
# dont count external_connector as usable
n_id = n_id + 1
else:
# get label
node_label = response.get("label")
# turn port number into hex
# strip "0x2233" and make it only 4 charators
hexport = hex(port).split('x')[-1]
with open("config.ini", "r") as config:
temp = config.read()
temp = temp.replace("REPLACE", "0000" + hexport)
location = rf"C:/Users/{user}/AppData/Roaming/VanDyke/Config/Sessions/CML-{lab}/{port}-{node_label}.ini"
with open( location, "w") as config2write:
config2write.write(temp)
if response.get("node_definition") == "wan_emulator":
# add by 1 if wan_emulator
port = port + 1
else:
port = port + 2
n_id = n_id + 1
| 25.952381 | 116 | 0.585933 | [
"BSD-3-Clause"
] | alexanderdeca/cml-community | scripts/breakout-to-secureCRT-session/main.py | 1,635 | Python |
# coding: utf-8
#
# Copyright 2022 :Barry-Thomas-Paul: Moss
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http: // www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Interface Class
# this is a auto generated file generated by Cheetah
# Libre Office Version: 7.3
# Namespace: com.sun.star.form
import typing
from abc import abstractmethod
from ..lang.x_event_listener import XEventListener as XEventListener_c7230c4a
if typing.TYPE_CHECKING:
from ..lang.event_object import EventObject as EventObject_a3d70b03
class XPositioningListener(XEventListener_c7230c4a):
"""
allows to receive notifications about cursor movements into a database form.
Please do not use anymore, this interface is deprecated, and superseded by functionality from the com.sun.star.form.component.DataForm service, as well as the com.sun.star.sdbc.XRowSetListener.
.. deprecated::
Class is deprecated.
See Also:
`API XPositioningListener <https://api.libreoffice.org/docs/idl/ref/interfacecom_1_1sun_1_1star_1_1form_1_1XPositioningListener.html>`_
"""
__ooo_ns__: str = 'com.sun.star.form'
__ooo_full_ns__: str = 'com.sun.star.form.XPositioningListener'
__ooo_type_name__: str = 'interface'
__pyunointerface__: str = 'com.sun.star.form.XPositioningListener'
@abstractmethod
def positioned(self, aEvent: 'EventObject_a3d70b03') -> None:
"""
is invoked when the database form has been positioned on a data record.
"""
__all__ = ['XPositioningListener']
| 37.377358 | 197 | 0.746593 | [
"Apache-2.0"
] | Amourspirit/ooo_uno_tmpl | ooobuild/lo/form/x_positioning_listener.py | 1,981 | Python |
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 12 16:39:59 2020
@author: nicholls
"""
import os
import numpy as np
import matplotlib.pyplot as plt
#%%
class AngularSpreadCalc():
""" class for calculating how angular spread changes with iterations:
Inputs:
iterations: maxinum number of iterations to calculate for (e.g. 500)
acceptance angle: acceptance angle of analyser
energy: initial energy of scattered electrons (eV)
"""
def __init__(self, iterations, acceptance_angle, energy, width=None):
self.iterations = iterations
self.acceptance_angle = acceptance_angle
self.energy = energy
self.width = width
def gen_lorentzian_cross_section(self):
self.cross_section_x = np.arange(-90, 90, 1)
y = [self._lorentzian_cross_section(x, self.width) for x in self.cross_section_x]
self.cross_section_y = y
return self.cross_section_y
def _lorentzian_cross_section(self, x, width):
position = 0
intensity = 1
l = intensity * 1 / (1 + ((position-x)/(width/2))**2)
return l
def plot_cross_section(self):
""" Plot the raw imported nist data """
plt.plot(self.cross_section_x, self.cross_section_y)
plt.title('Cross Section')
plt.xlabel('Angle')
plt.show()
def load_nist_cross_section(self, filename):
""" Load nist data file of differential elastic scattering profile.
Input:
filename: filename of csv data from nist database
Returns:
cross_section_y: given cross section in range -90 to 90 deg """
filepath = (os.path.dirname(os.path.abspath(__file__)).partition('controller')[0]
+ '\\data\\NIST cross sections\\' + filename)
data = np.genfromtxt(filepath, skip_header=10, delimiter=',')
self.cross_section_y = self._convert_nist_data(data)
self.cross_section_x = np.arange(-90, 90, 1)
return self.cross_section_y
def plot_nist(self):
""" Plot the raw imported nist data """
plt.plot(self.cross_section_x, self.cross_section_y)
plt.title('NIST Data')
plt.xlabel('Angle')
plt.show()
def run_convolution(self):
""" Run convolution between the nist cross section and a sine curve
representing initial scattering distribution.
Returns:
centered_data: angular distribution spread after each scattering
event
"""
# normalise cross section by area under curve
self.cross_section_y_norm = self.cross_section_y / np.sum(self.cross_section_y)
# generate initial distribution of electron scatter:
self.emitted_elctn_y = self._gen_electron_dist()
self.emitted_elctn_x = np.arange(-90, 90, 1)
# run convolution
convolved_data = self._convolution(self.cross_section_y_norm,
self.emitted_elctn_y,
self.iterations)
# center data and remove excess data (i.e. outside -90 to 90 range)
self.centered_data = self._centre_data(convolved_data)
return self.centered_data
def plot_convolution_results(self):
""" Plot convolution result to show angular distribution spread after
each scattering event."""
# plotting selected iterations:
for n in [0, 1, 2, 5, 10, 20, 50]:
plt.plot(self.emitted_elctn_x, self.centered_data[n], label=str(n))
plt.xticks([-90, -60, -30, 0, 30, 60, 90])
plt.xlabel('theta (degrees)')
plt.ylabel('Intensity (a.u.)')
plt.title('Angular distribution per scattering event')
plt.legend(title='No. of iterations', loc='center left',
bbox_to_anchor=(1, 0.5))
#plt.savefig('Convolution.png', dpi=600, bbox_inches='tight')
plt.show()
def limit_by_acceptance_angle(self):
""" Limit the data to the acceptance angle of the analyser """
# to set acceptance angle
self.angle_limited = self._limit_by_constant_angle(self.centered_data,
self.acceptance_angle)
#return self.angle_limited
def plot_angle_limited(self):
""" Plot the convolution results only in the accepted angle range"""
# to plot angle limited data
for n in [0, 1, 2, 5, 10, 20, 50]:
plt.plot(self.emitted_elctn_x, self.angle_limited[n], label=str(n))
plt.xticks([-90, -60, -30, 0, 30, 60, 90])
plt.xlabel('theta (degrees)')
plt.ylabel('Intensity (a.u.)')
plt.title('Intensity distribution after scattering event')
plt.legend(title='No. of iterations', loc='center left', bbox_to_anchor=(1, 0.5))
#plt.savefig('angle convolution.png', dpi=600, bbox_inches='tight')
plt.show()
def calc_area_under_curve(self):
""" Calculate area under each curve within acceptance angle,
represents intensity that the detector sees"""
sin = np.absolute(np.sin(np.arange(-90, 90, 1) * np.pi / 180))
angle_integrated = self.angle_limited * sin * np.pi
self.area_sum = np.sum(angle_integrated, axis=1)
self.area_sum = self.area_sum / self.area_sum[0]
return self.area_sum
def plot_area_under_curve(self):
""" Plot area under curve per scattering event / iteration """
plt.plot(self.area_sum)
plt.title('area under curve \n '
'(Energy: ' + str(self.energy) + ', Acceptance Angle: ' +
str(self.acceptance_angle) + ')')
plt.xlabel('No. of iterations')
plt.ylabel('Intensity a.u.')
plt.show()
def calc_area_ratio(self):
""" Calculate the change in area ratio between iteration n and n-1"""
# Change in ratio
self.area_ratio_list = self._area_ratio_change(self.area_sum)
return self.area_ratio_list
def plot_area_ratio(self):
""" Plot the change in area ratio per iteration """
# to plot
plt.plot(self.area_ratio_list)
plt.title('Intensity ratio change per iteration \n '
'(Energy: ' + str(self.energy) + ' eV, Acceptance Angle: '
+ str(self.acceptance_angle) + ')')
plt.xlabel('Iterations')
plt.ylabel('Area Ratio between iterations')
#plt.savefig('Ratio change per iteration.png', dpi=600)
plt.show()
def _convert_nist_data(self, dataset):
data = [n for n in dataset[:, 1]]
data.reverse()
data.extend([n for n in dataset[:, 1]][1:])
data = data[90:270]
return data
def _gen_electron_dist(self):
# x values
self.emitted_elctn_x = np.arange(-90, 90, 1)
# calculate y by cosine distribution
self.emitted_elctn_y = np.array([(np.cos(np.pi * i / 180))
for i in self.emitted_elctn_x])
# normalise by area under the curve
self.emitted_elctn_y = self.emitted_elctn_y / np.sum(self.emitted_elctn_y)
return self.emitted_elctn_y
def _convolution(self, cross_section, scatter, n):
# empty list to contain arrays of the scattered electrons
scattered_events = []
# add the first entry for unscattered:
scattered_events.append(scatter)
# convolution n number of times:
for i in range(n):
# convolve cross section with last scattered
z = np.convolve(cross_section, scattered_events[i])
# add scattered to list
scattered_events.append(z)
return scattered_events
def _centre_data(self, scattered_data_list):
data_cropped = []
for indx, scattering_event in enumerate(scattered_data_list):
centre = (indx+1) * 90
x_range_min = centre-90
x_range_max = centre+90
data = scattering_event[x_range_min : x_range_max]
data_cropped.append(data)
return data_cropped
def _limit_by_constant_angle(self, scattered_data_list, acceptance_angle):
angle = acceptance_angle/2
min_acceptance = 0 - angle
max_acceptance = 0 + angle
x_range = np.arange(-90, 90, 1)
min_index_list = np.where(x_range < min_acceptance)
max_index_list = np.where(x_range > max_acceptance)
for indx, scatter in enumerate(scattered_data_list):
scatter[min_index_list] = 0
scatter[max_index_list] = 0
return scattered_data_list
def _area_ratio_change(self, area_sum_list):
ratio_list = []
for n in range(len(area_sum_list)):
if n != 0:
ratio = area_sum_list[n]/area_sum_list[n-1]
ratio_list.append(ratio)
return ratio_list
| 36.726531 | 93 | 0.610358 | [
"MIT"
] | surfaceanalytics/inelasticscattering | model/algorithms/legacy/angular_spread_lorentzian.py | 8,998 | Python |
# =======================================================================================
# \ | | __ __| _ \ | / __| \ \ / __|
# _ \ | | | ( | . < _| \ / \__ \
# @autor: Luis Monteiro _/ _\ \__/ _| \___/ _|\_\ ___| _| ____/
# =======================================================================================
from autokeys.engine import Keyboard, HotKeys, SeqKeys, Clipboard
# =======================================================================================
# build credentials config
# =======================================================================================
def config_credentials(data):
# actions
def write_user(user):
def process(x):
Keyboard.Type(user, len(x))
return process
def write_pass(password):
def process(x):
Keyboard.Type(password, len(x))
Clipboard.Stage(password)
return process
# build config
hotkeys_user = HotKeys(Keyboard.CTRL, Keyboard.ALT, Keyboard.KEY('u'))
hotkeys_pass = HotKeys(Keyboard.CTRL, Keyboard.ALT, Keyboard.KEY('p'))
hotkeys_conf = {
hotkeys_user:{},
hotkeys_pass:{}}
for key, entry in data.items():
# user
hotkeys_conf[hotkeys_user][SeqKeys(*[Keyboard.KEY(x) for x in key])] = write_user(entry['user'])
# pass
hotkeys_conf[hotkeys_pass][SeqKeys(*[Keyboard.KEY(x) for x in key])] = write_pass(entry['pass'])
return hotkeys_conf
| 41.378378 | 105 | 0.427172 | [
"MIT"
] | lcmonteiro/service-autokeys | autokeys/credentials.py | 1,531 | Python |
"""Install funsies."""
import setuptools
from os import path
this_directory = path.abspath(path.dirname(__file__))
with open(path.join(this_directory, "README.md"), encoding="utf-8") as f:
long_description = f.read()
setuptools.setup(
name="funsies",
version="0.8.1",
author="Cyrille Lavigne",
author_email="[email protected]",
description="Funsies is a library to build and execution engine for"
+ " reproducible, composable and data-persistent computational workflows.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/aspuru-guzik-group/funsies",
package_dir={"": "src"},
package_data={"funsies": ["py.typed"]}, # mypy exports
packages=setuptools.find_namespace_packages(where="src"),
# Dependencies
python_requires=">=3.7",
install_requires=[
"mypy_extensions",
"redis",
"cloudpickle",
"rq>=1.7",
"loguru",
'importlib-metadata ~= 1.0 ; python_version < "3.8"',
'typing_extensions ; python_version < "3.8"',
"chevron",
],
entry_points="""
[console_scripts]
funsies=funsies._cli:main
start-funsies=funsies._start_funsies:main
""",
classifiers=[
"Development Status :: 4 - Beta",
#
"Typing :: Typed",
#
"License :: OSI Approved :: MIT License",
#
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
#
"Intended Audience :: Science/Research",
"Topic :: Scientific/Engineering :: Chemistry",
"Topic :: Scientific/Engineering :: Physics",
],
keywords="workflows hashtree redis compchem chemistry parallel hpc",
)
| 32.821429 | 79 | 0.624048 | [
"MIT"
] | aspuru-guzik-group/funsies | setup.py | 1,838 | Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
from alipay.aop.api.domain.CloudbusTransitResultItem import CloudbusTransitResultItem
class AlipayDataAiserviceCloudbusTransitorridorQueryResponse(AlipayResponse):
def __init__(self):
super(AlipayDataAiserviceCloudbusTransitorridorQueryResponse, self).__init__()
self._result = None
@property
def result(self):
return self._result
@result.setter
def result(self, value):
if isinstance(value, CloudbusTransitResultItem):
self._result = value
else:
self._result = CloudbusTransitResultItem.from_alipay_dict(value)
def parse_response_content(self, response_content):
response = super(AlipayDataAiserviceCloudbusTransitorridorQueryResponse, self).parse_response_content(response_content)
if 'result' in response:
self.result = response['result']
| 32.966667 | 127 | 0.736097 | [
"Apache-2.0"
] | Anning01/alipay-sdk-python-all | alipay/aop/api/response/AlipayDataAiserviceCloudbusTransitorridorQueryResponse.py | 989 | Python |
class Domain(Enum,IComparable,IFormattable,IConvertible):
"""
Enumeration of connector domain types
enum Domain,values: DomainCableTrayConduit (4),DomainElectrical (2),DomainHvac (1),DomainPiping (3),DomainUndefined (0)
"""
def __eq__(self,*args):
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __format__(self,*args):
""" __format__(formattable: IFormattable,format: str) -> str """
pass
def __ge__(self,*args):
pass
def __gt__(self,*args):
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __le__(self,*args):
pass
def __lt__(self,*args):
pass
def __ne__(self,*args):
pass
def __reduce_ex__(self,*args):
pass
def __str__(self,*args):
pass
DomainCableTrayConduit=None
DomainElectrical=None
DomainHvac=None
DomainPiping=None
DomainUndefined=None
value__=None
| 27.820513 | 215 | 0.684793 | [
"MIT"
] | BCSharp/ironpython-stubs | release/stubs.min/Autodesk/Revit/DB/__init___parts/Domain.py | 1,085 | Python |
"""
This is the custom function interface.
You should not implement it, or speculate about its implementation
class CustomFunction:
# Returns f(x, y) for any given positive integers x and y.
# Note that f(x, y) is increasing with respect to both x and y.
# i.e. f(x, y) < f(x + 1, y), f(x, y) < f(x, y + 1)
def f(self, x, y):
"""
class Solution:
def findSolution(self, customfunction: 'CustomFunction', z: int) -> List[List[int]]:
#print(customfunction.f(1,4))
ans = []
y = 1000
for x in range(1,1001):
while y > 0:
t = customfunction.f(x,y)
if t == z:
ans.append([x,y])
break
elif t > z:
y -= 1
else: # t < z
break
return ans | 34.346154 | 89 | 0.463606 | [
"MPL-2.0",
"MPL-2.0-no-copyleft-exception"
] | lwy0ever/leetcode | 1237-Find Positive Integer Solution for a Given Equation.py | 893 | Python |
# This code implementents a variational autoencoder using importance weighted
# sampling as described in Burda et al. 2015 "Importance Weighted Autoencoders"
# and the planar normalizing flow described in Rezende et al. 2015
# "Variational Inference with Normalizing Flows"
import theano
theano.config.floatX = 'float32'
import matplotlib
matplotlib.use('Agg')
import theano.tensor as T
import numpy as np
import lasagne
from parmesan.distributions import log_stdnormal, log_normal2, log_bernoulli
from parmesan.layers import SampleLayer, NormalizingPlanarFlowLayer, ListIndexLayer, NormalizeLayer, ScaleAndShiftLayer
from parmesan.datasets import load_mnist_realval, load_mnist_binarized
from parmesan.utils import log_mean_exp
import matplotlib.pyplot as plt
import shutil, gzip, os, cPickle, time, math, operator, argparse
filename_script = os.path.basename(os.path.realpath(__file__))
parser = argparse.ArgumentParser()
parser.add_argument("-dataset", type=str,
help="sampled or fixed binarized MNIST, sample|fixed", default="sample")
parser.add_argument("-eq_samples", type=int,
help="number of samples for the expectation over q(z|x)", default=1)
parser.add_argument("-iw_samples", type=int,
help="number of importance weighted samples", default=1)
parser.add_argument("-lr", type=float,
help="learning rate", default=0.001)
parser.add_argument("-anneal_lr_factor", type=float,
help="learning rate annealing factor", default=0.9995)
parser.add_argument("-anneal_lr_epoch", type=float,
help="larning rate annealing start epoch", default=1000)
parser.add_argument("-batch_norm", type=str,
help="batch normalization", default='true')
parser.add_argument("-outfolder", type=str,
help="output folder", default=os.path.join("results", os.path.splitext(filename_script)[0]))
parser.add_argument("-nonlin_enc", type=str,
help="encoder non-linearity", default="rectify")
parser.add_argument("-nonlin_dec", type=str,
help="decoder non-linearity", default="rectify")
parser.add_argument("-nhidden", type=int,
help="number of hidden units in deterministic layers", default=500)
parser.add_argument("-nlatent", type=int,
help="number of stochastic latent units", default=100)
parser.add_argument("-nflows", type=int,
help="length of normalizing flow", default=5)
parser.add_argument("-batch_size", type=int,
help="batch size", default=100)
parser.add_argument("-nepochs", type=int,
help="number of epochs to train", default=10000)
parser.add_argument("-eval_epoch", type=int,
help="epochs between evaluation of test performance", default=10)
args = parser.parse_args()
def get_nonlin(nonlin):
if nonlin == 'rectify':
return lasagne.nonlinearities.rectify
elif nonlin == 'very_leaky_rectify':
return lasagne.nonlinearities.very_leaky_rectify
elif nonlin == 'tanh':
return lasagne.nonlinearities.tanh
else:
raise ValueError('invalid non-linearity \'' + nonlin + '\'')
iw_samples = args.iw_samples #number of importance weighted samples
eq_samples = args.eq_samples #number of samples for the expectation over E_q(z|x)
lr = args.lr
anneal_lr_factor = args.anneal_lr_factor
anneal_lr_epoch = args.anneal_lr_epoch
batch_norm = args.batch_norm == 'true' or args.batch_norm == 'True'
res_out = args.outfolder
nonlin_enc = get_nonlin(args.nonlin_enc)
nonlin_dec = get_nonlin(args.nonlin_dec)
nhidden = args.nhidden
latent_size = args.nlatent
dataset = args.dataset
nflows = args.nflows
batch_size = args.batch_size
num_epochs = args.nepochs
eval_epoch = args.eval_epoch
assert dataset in ['sample','fixed'], "dataset must be sample|fixed"
np.random.seed(1234) # reproducibility
### SET UP LOGFILE AND OUTPUT FOLDER
if not os.path.exists(res_out):
os.makedirs(res_out)
# write commandline parameters to header of logfile
args_dict = vars(args)
sorted_args = sorted(args_dict.items(), key=operator.itemgetter(0))
description = []
description.append('######################################################')
description.append('# --Commandline Params--')
for name, val in sorted_args:
description.append("# " + name + ":\t" + str(val))
description.append('######################################################')
shutil.copy(os.path.realpath(__file__), os.path.join(res_out, filename_script))
logfile = os.path.join(res_out, 'logfile.log')
model_out = os.path.join(res_out, 'model')
with open(logfile,'w') as f:
for l in description:
f.write(l + '\n')
sym_iw_samples = T.iscalar('iw_samples')
sym_eq_samples = T.iscalar('eq_samples')
sym_lr = T.scalar('lr')
sym_x = T.matrix('x')
def bernoullisample(x):
return np.random.binomial(1,x,size=x.shape).astype(theano.config.floatX)
### LOAD DATA AND SET UP SHARED VARIABLES
if dataset is 'sample':
print "Using real valued MNIST dataset to binomial sample dataset after every epoch "
train_x, train_t, valid_x, valid_t, test_x, test_t = load_mnist_realval()
del train_t, valid_t, test_t
preprocesses_dataset = bernoullisample
else:
print "Using fixed binarized MNIST data"
train_x, valid_x, test_x = load_mnist_binarized()
preprocesses_dataset = lambda dataset: dataset #just a dummy function
train_x = np.concatenate([train_x,valid_x])
train_x = train_x.astype(theano.config.floatX)
test_x = test_x.astype(theano.config.floatX)
num_features=train_x.shape[-1]
sh_x_train = theano.shared(preprocesses_dataset(train_x), borrow=True)
sh_x_test = theano.shared(preprocesses_dataset(test_x), borrow=True)
def batchnormlayer(l,num_units, nonlinearity, name, W=lasagne.init.GlorotUniform(), b=lasagne.init.Constant(0.)):
l = lasagne.layers.DenseLayer(l, num_units=num_units, name="Dense-" + name, W=W, b=b, nonlinearity=None)
l = NormalizeLayer(l,name="BN-" + name)
l = ScaleAndShiftLayer(l,name="SaS-" + name)
l = lasagne.layers.NonlinearityLayer(l,nonlinearity=nonlinearity,name="Nonlin-" + name)
return l
def normaldenselayer(l,num_units, nonlinearity, name, W=lasagne.init.GlorotUniform(), b=lasagne.init.Constant(0.)):
l = lasagne.layers.DenseLayer(l, num_units=num_units, name="Dense-" + name, W=W, b=b, nonlinearity=nonlinearity)
return l
if batch_norm:
print "Using batch Normalization - The current implementation calculates " \
"the BN constants on the complete dataset in one batch. This might " \
"cause memory problems on some GFX's"
denselayer = batchnormlayer
else:
denselayer = normaldenselayer
### MODEL SETUP
# Recognition model q(z|x)
l_in = lasagne.layers.InputLayer((None, num_features))
l_enc_h1 = denselayer(l_in, num_units=nhidden, name='ENC_DENSE1', nonlinearity=nonlin_enc)
l_enc_h1 = denselayer(l_enc_h1, num_units=nhidden, name='ENC_DENSE2', nonlinearity=nonlin_enc)
l_mu = lasagne.layers.DenseLayer(l_enc_h1, num_units=latent_size, nonlinearity=lasagne.nonlinearities.identity, name='ENC_MU')
l_log_var = lasagne.layers.DenseLayer(l_enc_h1, num_units=latent_size, nonlinearity=lasagne.nonlinearities.identity, name='ENC_LOG_VAR')
#sample layer
l_z = SampleLayer(mean=l_mu, log_var=l_log_var, eq_samples=sym_eq_samples, iw_samples=sym_iw_samples)
#Normalizing Flow
l_logdet_J = []
l_zk = l_z
for i in range(nflows):
l_nf = NormalizingPlanarFlowLayer(l_zk)
l_zk = ListIndexLayer(l_nf,index=0)
l_logdet_J += [ListIndexLayer(l_nf,index=1)] #we need this for the cost function
# Generative model q(x|z)
l_dec_h1 = denselayer(l_zk, num_units=nhidden, name='DEC_DENSE2', nonlinearity=nonlin_dec)
l_dec_h1 = denselayer(l_dec_h1, num_units=nhidden, name='DEC_DENSE1', nonlinearity=nonlin_dec)
l_dec_x_mu = lasagne.layers.DenseLayer(l_dec_h1, num_units=num_features, nonlinearity=lasagne.nonlinearities.sigmoid, name='X_MU')
# get output needed for evaluating of training i.e with noise if any
train_out = lasagne.layers.get_output(
[l_z, l_zk, l_mu, l_log_var, l_dec_x_mu]+l_logdet_J, sym_x, deterministic=False
)
z_train = train_out[0]
zk_train = train_out[1]
z_mu_train = train_out[2]
z_log_var_train = train_out[3]
x_mu_train = train_out[4]
logdet_J_train = train_out[5:]
# get output needed for evaluating of testing i.e without noise
eval_out = lasagne.layers.get_output(
[l_z, l_zk, l_mu, l_log_var, l_dec_x_mu]+l_logdet_J, sym_x, deterministic=True
)
z_eval = eval_out[0]
zk_eval = eval_out[1]
z_mu_eval = eval_out[2]
z_log_var_eval = eval_out[3]
x_mu_eval = eval_out[4]
logdet_J_eval = eval_out[5:]
def latent_gaussian_x_bernoulli(z0, zk, z0_mu, z0_log_var, logdet_J_list, x_mu, x, eq_samples, iw_samples, epsilon=1e-6):
"""
Latent z : gaussian with standard normal prior
decoder output : bernoulli
When the output is bernoulli then the output from the decoder
should be sigmoid. The sizes of the inputs are
z0: (batch_size*eq_samples*iw_samples, num_latent)
zk: (batch_size*eq_samples*iw_samples, num_latent)
z0_mu: (batch_size, num_latent)
z0_log_var: (batch_size, num_latent)
logdet_J_list: list of `nflows` elements, each with shape (batch_size*eq_samples*iw_samples)
x_mu: (batch_size*eq_samples*iw_samples, num_features)
x: (batch_size, num_features)
Reference: Burda et al. 2015 "Importance Weighted Autoencoders"
"""
# reshape the variables so batch_size, eq_samples and iw_samples are separate dimensions
z0 = z0.reshape((-1, eq_samples, iw_samples, latent_size))
zk = zk.reshape((-1, eq_samples, iw_samples, latent_size))
x_mu = x_mu.reshape((-1, eq_samples, iw_samples, num_features))
for i in range(len(logdet_J_list)):
logdet_J_list[i] = logdet_J_list[i].reshape((-1, eq_samples, iw_samples))
# dimshuffle x, z_mu and z_log_var since we need to broadcast them when calculating the pdfs
x = x.dimshuffle(0, 'x', 'x', 1) # size: (batch_size, eq_samples, iw_samples, num_features)
z0_mu = z0_mu.dimshuffle(0, 'x', 'x', 1) # size: (batch_size, eq_samples, iw_samples, num_latent)
z0_log_var = z0_log_var.dimshuffle(0, 'x', 'x', 1) # size: (batch_size, eq_samples, iw_samples, num_latent)
# calculate LL components, note that the log_xyz() functions return log prob. for indepenedent components separately
# so we sum over feature/latent dimensions for multivariate pdfs
log_q0z0_given_x = log_normal2(z0, z0_mu, z0_log_var).sum(axis=3)
log_pzk = log_stdnormal(zk).sum(axis=3)
log_px_given_zk = log_bernoulli(x, x_mu, epsilon).sum(axis=3)
#normalizing flow loss
sum_logdet_J = 0
for logdet_J_k in logdet_J_list:
sum_logdet_J += logdet_J_k
# Calculate the LL using log-sum-exp to avoid underflow all log_*** -> shape: (batch_size, eq_samples, iw_samples)
LL = log_mean_exp(log_pzk + log_px_given_zk - log_q0z0_given_x + sum_logdet_J, axis=2) # log-mean-exp over iw_samples dimension -> shape: (batch_size, eq_samples)
LL = T.mean(LL) # average over eq_samples, batch_size dimensions -> shape: ()
return LL, T.mean(log_q0z0_given_x), T.mean(sum_logdet_J), T.mean(log_pzk), T.mean(log_px_given_zk)
# LOWER BOUNDS
LL_train, log_qz_given_x_train, sum_logdet_J_train, log_pz_train, log_px_given_z_train = latent_gaussian_x_bernoulli(
z_train, zk_train, z_mu_train, z_log_var_train, logdet_J_train, x_mu_train, sym_x, eq_samples=sym_eq_samples, iw_samples=sym_iw_samples)
LL_eval, log_qz_given_x_eval, sum_logdet_J_eval, log_pz_eval, log_px_given_z_eval = latent_gaussian_x_bernoulli(
z_eval, zk_eval, z_mu_eval, z_log_var_eval, logdet_J_eval, x_mu_eval, sym_x, eq_samples=sym_eq_samples, iw_samples=sym_iw_samples)
#some sanity checks that we can forward data through the model
X = np.ones((batch_size, 784), dtype=theano.config.floatX) # dummy data for testing the implementation
print "OUTPUT SIZE OF l_z using BS=%d, latent_size=%d, sym_iw_samples=%d, sym_eq_samples=%d --"\
%(batch_size, latent_size, iw_samples, eq_samples), \
lasagne.layers.get_output(l_z,sym_x).eval(
{sym_x: X, sym_iw_samples: np.int32(iw_samples),
sym_eq_samples: np.int32(eq_samples)}).shape
#print "log_pz_train", log_pz_train.eval({sym_x:X, sym_iw_samples: np.int32(iw_samples),sym_eq_samples:np.int32(eq_samples)}).shape
#print "log_px_given_z_train", log_px_given_z_train.eval({sym_x:X, sym_iw_samples: np.int32(iw_samples), sym_eq_samples:np.int32(eq_samples)}).shape
#print "log_qz_given_x_train", log_qz_given_x_train.eval({sym_x:X, sym_iw_samples: np.int32(iw_samples), sym_eq_samples:np.int32(eq_samples)}).shape
#print "lower_bound_train", LL_train.eval({sym_x:X, sym_iw_samples: np.int32(iw_samples), sym_eq_samples:np.int32(eq_samples)}).shape
# get all parameters
params = lasagne.layers.get_all_params([l_dec_x_mu], trainable=True)
for p in params:
print p, p.get_value().shape
# note the minus because we want to push up the lowerbound
grads = T.grad(-LL_train, params)
clip_grad = 1
max_norm = 5
mgrads = lasagne.updates.total_norm_constraint(grads,max_norm=max_norm)
cgrads = [T.clip(g, -clip_grad, clip_grad) for g in mgrads]
updates = lasagne.updates.adam(cgrads, params, beta1=0.9, beta2=0.999, epsilon=1e-4, learning_rate=sym_lr)
# Helper symbolic variables to index into the shared train and test data
sym_index = T.iscalar('index')
sym_batch_size = T.iscalar('batch_size')
batch_slice = slice(sym_index * sym_batch_size, (sym_index + 1) * sym_batch_size)
train_model = theano.function([sym_index, sym_batch_size, sym_lr, sym_eq_samples, sym_iw_samples], [LL_train, log_qz_given_x_train, sum_logdet_J_train, log_pz_train, log_px_given_z_train, z_mu_train, z_log_var_train],
givens={sym_x: sh_x_train[batch_slice]},
updates=updates)
test_model = theano.function([sym_index, sym_batch_size, sym_eq_samples, sym_iw_samples], [LL_eval, log_qz_given_x_eval, sum_logdet_J_eval, log_pz_eval, log_px_given_z_eval],
givens={sym_x: sh_x_test[batch_slice]})
if batch_norm:
collect_out = lasagne.layers.get_output(l_dec_x_mu,sym_x, deterministic=True, collect=True)
f_collect = theano.function([sym_eq_samples, sym_iw_samples],
[collect_out],
givens={sym_x: sh_x_train})
# Training and Testing functions
def train_epoch(lr, eq_samples, iw_samples, batch_size):
n_train_batches = train_x.shape[0] / batch_size
costs, log_qz_given_x, sum_logdet_J, log_pz, log_px_given_z, z_mu_train, z_log_var_train = [],[],[],[],[],[],[]
for i in range(n_train_batches):
cost_batch, log_qz_given_x_batch, sum_logdet_J_batch, log_pz_batch, log_px_given_z_batch, z_mu_batch, z_log_var_batch = train_model(i, batch_size, lr, eq_samples, iw_samples)
costs += [cost_batch]
log_qz_given_x += [log_qz_given_x_batch]
sum_logdet_J += [sum_logdet_J_batch]
log_pz += [log_pz_batch]
log_px_given_z += [log_px_given_z_batch]
z_mu_train += [z_mu_batch]
z_log_var_train += [z_log_var_batch]
return np.mean(costs), np.mean(log_qz_given_x), np.mean(sum_logdet_J), np.mean(log_pz), np.mean(log_px_given_z), np.concatenate(z_mu_train), np.concatenate(z_log_var_train)
def test_epoch(eq_samples, iw_samples, batch_size):
if batch_norm:
_ = f_collect(1,1) #collect BN stats on train
n_test_batches = test_x.shape[0] / batch_size
costs, log_qz_given_x, sum_logdet_J, log_pz, log_px_given_z = [],[],[],[],[]
for i in range(n_test_batches):
cost_batch, log_qz_given_x_batch, sum_logdet_J_batch, log_pz_batch, log_px_given_z_batch = test_model(i, batch_size, eq_samples, iw_samples)
costs += [cost_batch]
log_qz_given_x += [log_qz_given_x_batch]
sum_logdet_J += [sum_logdet_J_batch]
log_pz += [log_pz_batch]
log_px_given_z += [log_px_given_z_batch]
return np.mean(costs), np.mean(log_qz_given_x), np.mean(sum_logdet_J), np.mean(log_pz), np.mean(log_px_given_z)
print "Training"
# TRAIN LOOP
# We have made some the code very verbose to make it easier to understand.
total_time_start = time.time()
costs_train, log_qz_given_x_train, sum_logdet_J_train, log_pz_train, log_px_given_z_train = [],[],[],[],[]
LL_test1, log_qz_given_x_test1, sum_logdet_J_test1, log_pz_test1, log_px_given_z_test1 = [],[],[],[],[]
LL_test5000, log_qz_given_x_test5000, sum_logdet_J_test5000, log_pz_test5000, log_px_given_z_test5000 = [],[],[],[],[]
xepochs = []
logvar_z_mu_train, logvar_z_var_train, meanvar_z_var_train = None,None,None
for epoch in range(1, 1+num_epochs):
start = time.time()
#shuffle train data and train model
np.random.shuffle(train_x)
sh_x_train.set_value(preprocesses_dataset(train_x))
train_out = train_epoch(lr, eq_samples, iw_samples, batch_size)
if np.isnan(train_out[0]):
ValueError("NAN in train LL!")
if epoch >= anneal_lr_epoch:
#annealing learning rate
lr = lr*anneal_lr_factor
if epoch % eval_epoch == 0:
t = time.time() - start
costs_train += [train_out[0]]
log_qz_given_x_train += [train_out[1]]
sum_logdet_J_train += [train_out[2]]
log_pz_train += [train_out[3]]
log_px_given_z_train += [train_out[4]]
z_mu_train = train_out[5]
z_log_var_train = train_out[6]
print "calculating LL eq=1, iw=5000"
test_out5000 = test_epoch(1, 5000, batch_size=5) # smaller batch size to reduce memory requirements
LL_test5000 += [test_out5000[0]]
log_qz_given_x_test5000 += [test_out5000[1]]
sum_logdet_J_test5000 += [test_out5000[2]]
log_pz_test5000 += [test_out5000[3]]
log_px_given_z_test5000 += [test_out5000[4]]
print "calculating LL eq=1, iw=1"
test_out1 = test_epoch(1, 1, batch_size=50)
LL_test1 += [test_out1[0]]
log_qz_given_x_test1 += [test_out1[1]]
sum_logdet_J_test1 += [test_out1[2]]
log_pz_test1 += [test_out1[3]]
log_px_given_z_test1 += [test_out1[4]]
xepochs += [epoch]
line = "*Epoch=%d\tTime=%.2f\tLR=%.5f\teq_samples=%d\tiw_samples=%d\tnflows=%d\n" %(epoch, t, lr, eq_samples, iw_samples, nflows) + \
" TRAIN:\tCost=%.5f\tlogqK(zK|x)=%.5f\t= [logq0(z0|x)=%.5f - sum logdet J=%.5f]\tlogp(zK)=%.5f\tlogp(x|zK)=%.5f\n" %(costs_train[-1], log_qz_given_x_train[-1] - sum_logdet_J_train[-1], log_qz_given_x_train[-1], sum_logdet_J_train[-1], log_pz_train[-1], log_px_given_z_train[-1]) + \
" EVAL-L1:\tCost=%.5f\tlogqK(zK|x)=%.5f\t= [logq0(z0|x)=%.5f - sum logdet J=%.5f]\tlogp(zK)=%.5f\tlogp(x|zK)=%.5f\n" %(LL_test1[-1], log_qz_given_x_test1[-1] - sum_logdet_J_test1[-1], log_qz_given_x_test1[-1], sum_logdet_J_test1[-1], log_pz_test1[-1], log_px_given_z_test1[-1]) + \
" EVAL-L5000:\tCost=%.5f\tlogqK(zK|x)=%.5f\t= [logq0(z0|x)=%.5f - sum logdet J=%.5f]\tlogp(zK)=%.5f\tlogp(x|zK)=%.5f" %(LL_test5000[-1], log_qz_given_x_test5000[-1] - sum_logdet_J_test5000[-1], log_qz_given_x_test5000[-1], sum_logdet_J_test5000[-1], log_pz_test5000[-1], log_px_given_z_test5000[-1])
print line
with open(logfile,'a') as f:
f.write(line + "\n")
#save model every 100'th epochs
if epoch % 100 == 0:
all_params=lasagne.layers.get_all_params([l_dec_x_mu])
f = gzip.open(model_out + 'epoch%i'%(epoch), 'wb')
cPickle.dump(all_params, f, protocol=cPickle.HIGHEST_PROTOCOL)
f.close()
# BELOW THIS LINE IS A LOT OF BOOK KEEPING AND PLOTTING OF RESULTS
_logvar_z_mu_train = np.log(np.var(z_mu_train,axis=0))
_logvar_z_var_train = np.log(np.var(np.exp(z_log_var_train),axis=0))
_meanvar_z_var_train = np.log(np.mean(np.exp(z_log_var_train),axis=0))
if logvar_z_mu_train is None:
logvar_z_mu_train = _logvar_z_mu_train[:,None]
logvar_z_var_train = _logvar_z_var_train[:,None]
meanvar_z_var_train = _meanvar_z_var_train[:,None]
else:
logvar_z_mu_train = np.concatenate([logvar_z_mu_train,_logvar_z_mu_train[:,None]],axis=1)
logvar_z_var_train = np.concatenate([logvar_z_var_train, _logvar_z_var_train[:,None]],axis=1)
meanvar_z_var_train = np.concatenate([meanvar_z_var_train, _meanvar_z_var_train[:,None]],axis=1)
#plot results
plt.figure(figsize=[12,12])
plt.plot(xepochs,costs_train, label="LL")
plt.plot(xepochs,log_qz_given_x_train, label="logq(z|x)")
plt.plot(xepochs,log_pz_train, label="logp(z)")
plt.plot(xepochs,log_px_given_z_train, label="logp(x|z)")
plt.xlabel('Epochs'), plt.ylabel('log()'), plt.grid('on')
plt.title('Train'), plt.legend(bbox_to_anchor=(1.05, 1))
plt.savefig(res_out+'/train.png'), plt.close()
plt.figure(figsize=[12,12])
plt.plot(xepochs,LL_test1, label="LL_k1")
plt.plot(xepochs,log_qz_given_x_test1, label="logq(z|x)")
plt.plot(xepochs,log_pz_test1, label="logp(z)")
plt.plot(xepochs,log_px_given_z_test1, label="logp(x|z)")
plt.title('Eval L1'), plt.xlabel('Epochs'), plt.ylabel('log()'), plt.grid('on')
plt.legend(bbox_to_anchor=(1.05, 1))
plt.savefig(res_out+'/eval_L1.png'), plt.close()
plt.figure(figsize=[12,12])
plt.plot(xepochs,LL_test5000, label="LL_k5000")
plt.plot(xepochs,log_qz_given_x_test5000, label="logq(z|x)")
plt.plot(xepochs,log_pz_test5000, label="logp(z)")
plt.plot(xepochs,log_px_given_z_test5000, label="logp(x|z)")
plt.title('Eval L5000'), plt.xlabel('Epochs'), plt.ylabel('log()'), plt.grid('on')
plt.legend(bbox_to_anchor=(1.05, 1))
plt.savefig(res_out+'/eval_L5000.png'), plt.close()
fig, ax = plt.subplots()
data = logvar_z_mu_train
heatmap = ax.pcolor(data, cmap=plt.cm.Greys)
ax.set_xticks(np.arange(data.shape[1])+0.5, minor=False)
ax.set_xticklabels(xepochs, minor=False)
plt.xlabel('Epochs'), plt.ylabel('#Latent Unit'), plt.title('train log(var(mu))'), plt.colorbar(heatmap)
plt.savefig(res_out+'/train_logvar_z_mu_train.png'), plt.close()
fig, ax = plt.subplots()
data = logvar_z_var_train
heatmap = ax.pcolor(data, cmap=plt.cm.Greys)
ax.set_xticks(np.arange(data.shape[1])+0.5, minor=False)
ax.set_xticklabels(xepochs, minor=False)
plt.xlabel('Epochs'), plt.ylabel('#Latent Unit'), plt.title('train log(var(var))'), plt.colorbar(heatmap)
plt.savefig(res_out+'/train_logvar_z_var_train.png'), plt.close()
fig, ax = plt.subplots()
data = meanvar_z_var_train
heatmap = ax.pcolor(data, cmap=plt.cm.Greys)
ax.set_xticks(np.arange(data.shape[1])+0.5, minor=False)
ax.set_xticklabels(xepochs, minor=False)
plt.xlabel('Epochs'), plt.ylabel('#Latent Unit'), plt.title('train log(mean(var))'), plt.colorbar(heatmap)
plt.savefig(res_out+'/train_meanvar_z_var_train.png'), plt.close()
| 48.863158 | 315 | 0.705472 | [
"MIT"
] | Jungiebumper/parmesan | examples/iw_vae_normflow.py | 23,210 | Python |
from . import base
from grow.common import utils as common_utils
from boto.s3 import connection
from boto.s3 import key
from grow.pods import env
from protorpc import messages
import boto
import cStringIO
import logging
import os
import mimetypes
class Config(messages.Message):
bucket = messages.StringField(1)
access_key = messages.StringField(2)
access_secret = messages.StringField(3)
env = messages.MessageField(env.EnvConfig, 4)
keep_control_dir = messages.BooleanField(5, default=False)
redirect_trailing_slashes = messages.BooleanField(6, default=True)
index_document = messages.StringField(7, default='index.html')
error_document = messages.StringField(8, default='404.html')
class AmazonS3Destination(base.BaseDestination):
KIND = 's3'
Config = Config
def __str__(self):
return 's3://{}'.format(self.config.bucket)
@common_utils.cached_property
def bucket(self):
boto_connection = boto.connect_s3(
self.config.access_key, self.config.access_secret,
calling_format=connection.OrdinaryCallingFormat())
return boto_connection.get_bucket(self.config.bucket)
try:
return boto_connection.get_bucket(self.config.bucket)
except boto.exception.S3ResponseError as e:
if e.status == 404:
logging.info('Creating bucket: {}'.format(self.config.bucket))
return boto_connection.create_bucket(self.config.bucket)
raise
def dump(self, pod):
pod.set_env(self.get_env())
return pod.dump(
suffix=self.config.index_document,
append_slashes=self.config.redirect_trailing_slashes)
def prelaunch(self, dry_run=False):
if dry_run:
return
logging.info('Configuring S3 bucket: {}'.format(self.config.bucket))
self.bucket.configure_website(
self.config.index_document,
self.config.error_document)
def write_control_file(self, path, content):
path = os.path.join(self.control_dir, path.lstrip('/'))
return self.write_file(path, content, policy='private')
def read_file(self, path):
file_key = key.Key(self.bucket)
file_key.key = path
try:
return file_key.get_contents_as_string()
except boto.exception.S3ResponseError, e:
if e.status != 404:
raise
raise IOError('File not found: {}'.format(path))
def delete_file(self, path):
bucket_key = key.Key(self.bucket)
bucket_key.key = path.lstrip('/')
self.bucket.delete_key(bucket_key)
def write_file(self, path, content, policy='public-read'):
path = path.lstrip('/')
path = path if path != '' else self.config.index_document
if isinstance(content, unicode):
content = content.encode('utf-8')
bucket_key = key.Key(self.bucket)
bucket_key.key = path
fp = cStringIO.StringIO()
fp.write(content)
mimetype = mimetypes.guess_type(path)[0]
# TODO: Allow configurable headers.
headers = {
'Cache-Control': 'no-cache',
'Content-Type': mimetype if mimetype else 'text/html',
}
fp.seek(0)
bucket_key.set_contents_from_file(fp, headers=headers, replace=True, policy=policy)
fp.close()
| 34.612245 | 91 | 0.651533 | [
"MIT"
] | davidwtbuxton/grow | grow/deployments/destinations/amazon_s3.py | 3,392 | Python |
# Copyright 2020 QuantumBlack Visual Analytics Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND
# NONINFRINGEMENT. IN NO EVENT WILL THE LICENSOR OR OTHER CONTRIBUTORS
# BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF, OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# The QuantumBlack Visual Analytics Limited ("QuantumBlack") name and logo
# (either separately or in combination, "QuantumBlack Trademarks") are
# trademarks of QuantumBlack. The License does not grant you any right or
# license to the QuantumBlack Trademarks. You may not use the QuantumBlack
# Trademarks or any confusingly similar mark as a trademark for your product,
# or use the QuantumBlack Trademarks in any other manner that might cause
# confusion in the marketplace, including but not limited to in advertising,
# on websites, or on software.
#
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from functools import partial, update_wrapper, wraps
from typing import Callable
import pytest
from kedro.pipeline import node
# Different dummy func based on the number of arguments
def constant_output():
return "output" # pragma: no cover
def identity(input1: str):
return input1 # pragma: no cover
def biconcat(input1: str, input2: str):
return input1 + input2 # pragma: no cover
def triconcat(input1: str, input2: str, input3: str):
return input1 + input2 + input3 # pragma: no cover
@pytest.fixture
def simple_tuple_node_list():
return [
(identity, "A", "B"),
(biconcat, ["A", "B"], "C"),
(identity, "C", ["D", "E"]),
(biconcat, ["H", "I"], ["J", "K"]),
(identity, "J", dict(result="K")),
(biconcat, ["J", "K"], dict(result="L")),
(identity, dict(input1="J"), "L"),
(identity, dict(input1="J"), ["L", "M"]),
(identity, dict(input1="J"), dict(result="K")),
(constant_output, None, "M"),
(biconcat, ["N", "O"], None),
(lambda x: None, "F", "G"),
(lambda x: ("a", "b"), "G", ["X", "Y"]),
]
class TestValidNode:
def test_valid(self, simple_tuple_node_list):
nodes = [node(*tup) for tup in simple_tuple_node_list]
assert len(nodes) == len(simple_tuple_node_list)
def test_get_node_func(self):
test_node = node(identity, "A", "B")
assert test_node.func is identity
def test_set_node_func(self):
test_node = node(identity, "A", "B")
test_node.func = decorated_identity
assert test_node.func is decorated_identity
def test_labelled(self):
assert "labeled_node: <lambda>([input1]) -> [output1]" in str(
node(lambda x: None, "input1", "output1", name="labeled_node")
)
def test_call(self):
dummy_node = node(
biconcat, inputs=["input1", "input2"], outputs="output", name="myname"
)
actual = dummy_node(input1="in1", input2="in2")
expected = dummy_node.run(dict(input1="in1", input2="in2"))
assert actual == expected
def test_call_with_non_keyword_arguments(self):
dummy_node = node(
biconcat, inputs=["input1", "input2"], outputs="output", name="myname"
)
pattern = r"__call__\(\) takes 1 positional argument but 2 were given"
with pytest.raises(TypeError, match=pattern):
dummy_node("in1", input2="in2")
def test_run_with_duplicate_inputs_list(self):
dummy_node = node(func=biconcat, inputs=["input1", "input1"], outputs="output")
actual = dummy_node.run(dict(input1="in1"))
assert actual == {"output": "in1in1"}
def test_run_with_duplicate_inputs_dict(self):
dummy_node = node(
func=biconcat, inputs={"input1": "in1", "input2": "in1"}, outputs="output"
)
actual = dummy_node.run(dict(in1="hello"))
assert actual == {"output": "hellohello"}
def test_no_input(self):
assert "constant_output(None) -> [output1]" in str(
node(constant_output, None, "output1")
)
def test_no_output(self):
assert "<lambda>([input1]) -> None" in str(node(lambda x: None, "input1", None))
def test_inputs_none(self):
dummy_node = node(constant_output, None, "output")
assert dummy_node.inputs == []
def test_inputs_str(self):
dummy_node = node(identity, "input1", "output1")
assert dummy_node.inputs == ["input1"]
def test_inputs_dict(self):
dummy_node = node(
biconcat,
{"input1": "in1", "input2": "in2"},
["output2", "output1", "last node"],
)
inputs = dummy_node.inputs
assert isinstance(inputs, list)
assert len(inputs) == 2
assert set(inputs) == {"in1", "in2"}
def test_inputs_list(self):
dummy_node = node(
triconcat,
["input1", "input2", "another node"],
["output1", "output2", "last node"],
)
assert dummy_node.inputs == ["input1", "input2", "another node"]
def test_outputs_none(self):
dummy_node = node(identity, "input", None)
assert dummy_node.outputs == []
def test_outputs_str(self):
dummy_node = node(identity, "input1", "output1")
assert dummy_node.outputs == ["output1"]
def test_outputs_dict(self):
dummy_node = node(
biconcat, ["input1", "input2"], {"output1": "out1", "output2": "out2"}
)
outputs = dummy_node.outputs
assert isinstance(outputs, list)
assert len(outputs) == 2
assert set(outputs) == {"out1", "out2"}
def test_outputs_list(self):
dummy_node = node(
triconcat,
["input2", "input1", "another node"],
["output2", "output1", "last node"],
)
assert dummy_node.outputs == ["output2", "output1", "last node"]
@pytest.mark.parametrize(
"confirms_arg,expected",
[
(None, []),
([], []),
("foo", ["foo"]),
(["foo"], ["foo"]),
(["foo", "bar"], ["foo", "bar"]),
],
)
def test_confirms(self, confirms_arg, expected):
dummy_node = node(identity, "input", None, confirms=confirms_arg)
assert dummy_node.confirms == expected
class TestNodeComparisons:
def test_node_equals(self):
first = node(identity, "input1", "output1", name="a_node")
second = node(identity, "input1", "output1", name="a_node")
assert first == second
assert first is not second
def test_node_less_than(self):
first = node(identity, "input1", "output1", name="A")
second = node(identity, "input1", "output1", name="B")
assert first < second
assert first is not second
def test_node_invalid_equals(self):
n = node(identity, "input1", "output1", name="a_node")
assert n != "hello"
def test_node_invalid_less_than(self):
n = node(identity, "input1", "output1", name="a_node")
pattern = "'<' not supported between instances of 'Node' and 'str'"
with pytest.raises(TypeError, match=pattern):
n < "hello" # pylint: disable=pointless-statement
def test_different_input_list_order_not_equal(self):
first = node(biconcat, ["input1", "input2"], "output1", name="A")
second = node(biconcat, ["input2", "input1"], "output1", name="A")
assert first != second
def test_different_output_list_order_not_equal(self):
first = node(identity, "input1", ["output1", "output2"], name="A")
second = node(identity, "input1", ["output2", "output1"], name="A")
assert first != second
def test_different_input_dict_order_equal(self):
first = node(biconcat, {"input1": "a", "input2": "b"}, "output1", name="A")
second = node(biconcat, {"input2": "b", "input1": "a"}, "output1", name="A")
assert first == second
def test_different_output_dict_order_equal(self):
first = node(identity, "input1", {"output1": "a", "output2": "b"}, name="A")
second = node(identity, "input1", {"output2": "b", "output1": "a"}, name="A")
assert first == second
def test_input_dict_list_not_equal(self):
first = node(biconcat, ["input1", "input2"], "output1", name="A")
second = node(
biconcat, {"input1": "input1", "input2": "input2"}, "output1", name="A"
)
assert first != second
def test_output_dict_list_not_equal(self):
first = node(identity, "input1", ["output1", "output2"], name="A")
second = node(
identity, "input1", {"output1": "output1", "output2": "output2"}, name="A"
)
assert first != second
def bad_input_type_node():
return lambda x: None, ("A", "D"), "B"
def bad_output_type_node():
return lambda x: None, "A", {"B", "C"}
def bad_function_type_node():
return "A", "B", "C"
def no_input_or_output_node():
return constant_output, None, None
def input_same_as_output_node():
return biconcat, ["A", "B"], dict(a="A")
def duplicate_output_dict_node():
return identity, "A", dict(a="A", b="A")
def duplicate_output_list_node():
return identity, "A", ["A", "A"]
@pytest.mark.parametrize(
"func, expected",
[
(bad_input_type_node, r"`inputs` type must be one of "),
(bad_output_type_node, r"`outputs` type must be one of "),
(bad_function_type_node, r"first argument must be a function"),
(no_input_or_output_node, r"it must have some `inputs` or `outputs`"),
(
input_same_as_output_node,
r"A node cannot have the same inputs and outputs: {\'A\'}",
),
(
duplicate_output_dict_node,
r"Failed to create node identity"
r"\(\[A\]\) -> \[A,A\] due to "
r"duplicate output\(s\) {\'A\'}.",
),
(
duplicate_output_list_node,
r"Failed to create node identity"
r"\(\[A\]\) -> \[A,A\] due to "
r"duplicate output\(s\) {\'A\'}.",
),
],
)
def test_bad_node(func, expected):
with pytest.raises(ValueError, match=expected):
node(*func())
def inconsistent_input_size():
return identity, ["A", "B"], "C"
def inconsistent_input_args():
def dummy_func_args(*args):
return "".join([*args]) # pragma: no cover
return dummy_func_args, {"a": "A"}, "B"
def inconsistent_input_kwargs():
def dummy_func_args(**kwargs):
return list(kwargs.values()) # pragma: no cover
return dummy_func_args, "A", "B"
lambda_identity = lambda input1: input1 # noqa: disable=E731
def lambda_inconsistent_input_size():
return lambda_identity, ["A", "B"], "C"
partial_identity = partial(identity)
def partial_inconsistent_input_size():
return partial_identity, ["A", "B"], "C"
@pytest.mark.parametrize(
"func, expected",
[
(
inconsistent_input_size,
r"Inputs of 'identity' function expected \[\'input1\'\], but got \[\'A\', \'B\'\]",
),
(
inconsistent_input_args,
r"Inputs of 'dummy_func_args' function expected \[\'args\'\], but got {\'a\': \'A\'}",
),
(
inconsistent_input_kwargs,
r"Inputs of 'dummy_func_args' function expected \[\'kwargs\'\], but got A",
),
(
lambda_inconsistent_input_size,
r"Inputs of '<lambda>' function expected \[\'input1\'\], but got \[\'A\', \'B\'\]",
),
(
partial_inconsistent_input_size,
r"Inputs of '<partial>' function expected \[\'input1\'\], but got \[\'A\', \'B\'\]",
),
],
)
def test_bad_input(func, expected):
with pytest.raises(TypeError, match=expected):
node(*func())
def apply_f(func: Callable) -> Callable:
@wraps(func)
def with_f(*args, **kwargs):
return func(*[f"f({a})" for a in args], **kwargs)
return with_f
def apply_g(func: Callable) -> Callable:
@wraps(func)
def with_g(*args, **kwargs):
return func(*[f"g({a})" for a in args], **kwargs)
return with_g
def apply_h(func: Callable) -> Callable:
@wraps(func)
def with_h(*args, **kwargs):
return func(*[f"h({a})" for a in args], **kwargs)
return with_h
def apply_ij(func: Callable) -> Callable:
@wraps(func)
def with_ij(*args, **kwargs):
return func(*[f"ij({a})" for a in args], **kwargs)
return with_ij
@apply_f
def decorated_identity(value):
return value
class TestTagDecorator:
def test_apply_decorators(self):
old_node = node(apply_g(decorated_identity), "input", "output", name="node")
pattern = (
"The node's `decorate` API will be deprecated in Kedro 0.18.0."
"Please use a node's Hooks to extend the node's behaviour in a pipeline."
"For more information, please visit"
"https://kedro.readthedocs.io/en/stable/07_extend_kedro/04_hooks.html"
)
with pytest.warns(DeprecationWarning, match=re.escape(pattern)):
new_node = old_node.decorate(apply_h, apply_ij)
result = new_node.run(dict(input=1))
assert old_node.name == new_node.name
assert "output" in result
assert result["output"] == "f(g(ij(h(1))))"
def test_tag_nodes(self):
tagged_node = node(identity, "input", "output", tags=["hello"]).tag(["world"])
assert "hello" in tagged_node.tags
assert "world" in tagged_node.tags
assert len(tagged_node.tags) == 2
def test_tag_nodes_single_tag(self):
tagged_node = node(identity, "input", "output", tags="hello").tag("world")
assert "hello" in tagged_node.tags
assert "world" in tagged_node.tags
assert len(tagged_node.tags) == 2
def test_tag_and_decorate(self):
tagged_node = node(identity, "input", "output", tags=["hello"])
tagged_node = tagged_node.decorate(apply_f)
tagged_node = tagged_node.tag(["world"])
assert "hello" in tagged_node.tags
assert "world" in tagged_node.tags
assert tagged_node.run(dict(input=1))["output"] == "f(1)"
class TestNames:
def test_named(self):
n = node(identity, ["in"], ["out"], name="name")
assert str(n) == "name: identity([in]) -> [out]"
assert n.name == "name"
assert n.short_name == "name"
@pytest.mark.parametrize("bad_name", ["name,with,comma", "name with space"])
def test_invalid_name(self, bad_name):
pattern = (
f"'{bad_name}' is not a valid node name. "
f"It must contain only letters, digits, hyphens, "
f"underscores and/or fullstops."
)
with pytest.raises(ValueError, match=re.escape(pattern)):
node(identity, ["in"], ["out"], name=bad_name)
def test_namespaced(self):
n = node(identity, ["in"], ["out"], namespace="namespace")
assert str(n) == "identity([in]) -> [out]"
assert n.name == "namespace.identity([in]) -> [out]"
assert n.short_name == "Identity"
def test_named_and_namespaced(self):
n = node(identity, ["in"], ["out"], name="name", namespace="namespace")
assert str(n) == "name: identity([in]) -> [out]"
assert n.name == "namespace.name"
assert n.short_name == "name"
def test_function(self):
n = node(identity, ["in"], ["out"])
assert str(n) == "identity([in]) -> [out]"
assert n.name == "identity([in]) -> [out]"
assert n.short_name == "Identity"
def test_lambda(self):
n = node(lambda a: a, ["in"], ["out"])
assert str(n) == "<lambda>([in]) -> [out]"
assert n.name == "<lambda>([in]) -> [out]"
assert n.short_name == "<Lambda>"
def test_partial(self):
n = node(partial(identity), ["in"], ["out"])
assert str(n) == "<partial>([in]) -> [out]"
assert n.name == "<partial>([in]) -> [out]"
assert n.short_name == "<Partial>"
def test_updated_partial(self):
n = node(update_wrapper(partial(identity), identity), ["in"], ["out"])
assert str(n) == "identity([in]) -> [out]"
assert n.name == "identity([in]) -> [out]"
assert n.short_name == "Identity"
def test_updated_partial_dict_inputs(self):
n = node(
update_wrapper(partial(biconcat, input1=["in1"]), biconcat),
dict(input2="in2"),
["out"],
)
assert str(n) == "biconcat([in2]) -> [out]"
assert n.name == "biconcat([in2]) -> [out]"
assert n.short_name == "Biconcat"
| 33.747554 | 98 | 0.595071 | [
"Apache-2.0"
] | getindata/kedro | tests/pipeline/test_node.py | 17,245 | Python |
# -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida_core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
"""Top level functions that can be used to launch a Process."""
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from . import manager
from . import processes
from . import utils
__all__ = 'run', 'run_get_pid', 'run_get_node', 'submit'
def run(process, *args, **inputs):
"""
Run the process with the supplied inputs in a local runner that will block until the process is completed.
The return value will be the results of the completed process
:param process: the process class or workfunction to run
:param inputs: the inputs to be passed to the process
:return: the outputs of the process
"""
if isinstance(process, processes.Process):
runner = process.runner
else:
runner = manager.AiiDAManager.get_runner()
return runner.run(process, *args, **inputs)
def run_get_node(process, *args, **inputs):
"""
Run the process with the supplied inputs in a local runner that will block until the process is completed.
The return value will be the results of the completed process
:param process: the process class or workfunction to run
:param inputs: the inputs to be passed to the process
:return: tuple of the outputs of the process and the calculation node
"""
if isinstance(process, processes.Process):
runner = process.runner
else:
runner = manager.AiiDAManager.get_runner()
return runner.run_get_node(process, *args, **inputs)
def run_get_pid(process, *args, **inputs):
"""
Run the process with the supplied inputs in a local runner that will block until the process is completed.
The return value will be the results of the completed process
:param process: the process class or workfunction to run
:param inputs: the inputs to be passed to the process
:return: tuple of the outputs of the process and process pid
"""
if isinstance(process, processes.Process):
runner = process.runner
else:
runner = manager.AiiDAManager.get_runner()
return runner.run_get_pid(process, *args, **inputs)
def submit(process, **inputs):
"""
Submit the process with the supplied inputs to the daemon runners immediately returning control to
the interpreter. The return value will be the calculation node of the submitted process.
:param process: the process class to submit
:param inputs: the inputs to be passed to the process
:return: the calculation node of the process
"""
assert not utils.is_workfunction(process), 'Cannot submit a workfunction'
runner = manager.AiiDAManager.get_runner()
controller = manager.AiiDAManager.get_process_controller()
process = processes.instantiate_process(runner, process, **inputs)
runner.persister.save_checkpoint(process)
process.close()
# Do not wait for the future's result, because in the case of a single worker this would cock-block itself
controller.continue_process(process.pid, nowait=False, no_reply=True)
return process.calc
# Allow one to also use run.get_node and run.get_pid as a shortcut, without having to import the functions themselves
run.get_node = run_get_node
run.get_pid = run_get_pid
| 38.73 | 117 | 0.670281 | [
"BSD-2-Clause"
] | JuDFTteam/aiida_core | aiida/work/launch.py | 3,873 | Python |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: See license.txt
import frappe
from frappe.model.document import Document
class Note(Document):
def autoname(self):
# replace forbidden characters
import re
self.name = re.sub("[%'\"#*?`]", "", self.title.strip())
def validate(self):
if self.notify_on_login and not self.expire_notification_on:
# expire this notification in a week (default)
self.expire_notification_on = frappe.utils.add_days(self.creation, 7)
def before_print(self, settings=None):
self.print_heading = self.name
self.sub_heading = ""
@frappe.whitelist()
def mark_as_seen(note):
note = frappe.get_doc('Note', note)
if frappe.session.user not in [d.user for d in note.seen_by]:
note.append('seen_by', {'user': frappe.session.user})
note.save(ignore_version=True)
def get_permission_query_conditions(user):
if not user: user = frappe.session.user
if user == "Administrator":
return ""
return """(`tabNote`.public=1 or `tabNote`.owner="{user}")""".format(user=user)
def has_permission(doc, ptype, user):
if doc.public == 1 or user == "Administrator":
return True
if user == doc.owner:
return True
return False
| 26.217391 | 80 | 0.718905 | [
"MIT"
] | 18alantom/frappe | frappe/desk/doctype/note/note.py | 1,206 | Python |
# ----------------------------------------------------------------
# Copyright 2016 Cisco Systems
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------
"""Setup for YDK
"""
from __future__ import print_function
import os
import subprocess
import sysconfig
from setuptools.command.build_ext import build_ext
from setuptools import setup, Extension, find_packages
NMSP_PKG_NAME = "$PACKAGE$"
NMSP_PKG_VERSION = "$VERSION$"
NMSP_PKG_DEPENDENCIES = ["$DEPENDENCY$"]
# Define and modify version number and package name here,
# Namespace packages are share same prefix: "ydk-models"
NAME = 'ydk'
VERSION = '0.7.1'
INSTALL_REQUIREMENTS = ['pybind11>=2.1.1']
LONG_DESCRIPTION = '''
The YANG Development Kit (YDK) is a Software Development Kit
that provides API's that are modeled in YANG. The main goal
of YDK is to reduce the learning curve of YANG data models by
expressing the model semantics in an API and abstracting
protocol/encoding details. YDK is composed of a core package
that defines services and providers, plus one or more module
bundles that are based on YANG models.
'''
YDK_PACKAGES = find_packages(exclude=['contrib', 'docs*', 'tests*',
'ncclient', 'samples'])
class CMakeExtension(Extension):
def __init__(self, name, sourcedir=''):
Extension.__init__(self, name, sources=[])
self.sourcedir = os.path.abspath(sourcedir)
class YdkBuildExtension(build_ext):
def run(self):
try:
cmake3_installed = (
0 == subprocess.call(['which', 'cmake3'], stdout=subprocess.PIPE, stderr=subprocess.PIPE))
if not cmake3_installed:
subprocess.check_output(['cmake', '--version'])
except OSError:
raise RuntimeError("CMake must be installed to build the following extensions: " +
", ".join(e.name for e in self.extensions))
for ext in self.extensions:
self.build_extension(ext)
def build_extension(self, ext):
try:
import pybind11
except ImportError:
import pip
pip.main(['install', 'pybind11>=2.1.1'])
import pybind11
extdir = os.path.abspath(os.path.dirname(self.get_ext_fullpath(ext.name)))
coverage_compiler_flag = '-DCOVERAGE=False'
if 'YDK_COVERAGE' in os.environ:
coverage_compiler_flag = '-DCOVERAGE=True'
cmake_args = ['-DCMAKE_LIBRARY_OUTPUT_DIRECTORY={0}'.format(extdir),
'-DPYBIND11_INCLUDE={0};{1}'.format(
pybind11.get_include(),
pybind11.get_include(user=True)),
'-DPYTHON_VERSION={0}'.format(
get_python_version()),
'-DCMAKE_BUILD_TYPE=Release',
coverage_compiler_flag]
if not os.path.exists(self.build_temp):
os.makedirs(self.build_temp)
cmake3_installed = (0 == subprocess.call(['which', 'cmake3'], stdout=subprocess.PIPE, stderr=subprocess.PIPE))
if(cmake3_installed):
cmake_executable = 'cmake3'
else:
cmake_executable = 'cmake'
subprocess.check_call([cmake_executable, ext.sourcedir] + cmake_args, cwd=self.build_temp)
subprocess.check_call([cmake_executable, '--build', '.'], cwd=self.build_temp)
def get_python_version():
python_version = sysconfig.get_config_var('LDVERSION')
if python_version is None or len(python_version) == 0:
python_version = sysconfig.get_config_var('VERSION')
return python_version
setup(
name=NAME,
version=VERSION,
description='YDK Python SDK',
long_description=LONG_DESCRIPTION,
url='https://github.com/CiscoDevNet/ydk-py',
author='Cisco Systems',
author_email='[email protected]',
license='Apache 2.0',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Information Technology',
'Intended Audience :: Science/Research',
'Intended Audience :: System Administrators',
'Topic :: Software Development :: Build Tools',
'Topic :: Software Development :: Libraries',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: C++'
],
keywords='yang, C++11, python bindings ',
packages=YDK_PACKAGES,
install_requires=INSTALL_REQUIREMENTS,
ext_modules=[CMakeExtension('ydk_')],
cmdclass={
'build_ext' :YdkBuildExtension
},
zip_safe=False,
)
| 37.741722 | 118 | 0.610633 | [
"ECL-2.0",
"Apache-2.0"
] | bopopescu/ACI | core/setup.py | 5,699 | Python |
import ipaddress
from socket import inet_ntoa
from async_service import background_trio_service
from eth_utils import encode_hex
import pytest
import trio
from web3 import IPCProvider, Web3
from ddht.rpc import RPCServer
from ddht.tools.web3 import DiscoveryV5Module
from ddht.v5_1.rpc_handlers import get_v51_rpc_handlers
@pytest.fixture
async def rpc_server(ipc_path, alice):
async with alice.network() as network:
server = RPCServer(ipc_path, get_v51_rpc_handlers(network))
async with background_trio_service(server):
await server.wait_serving()
yield server
@pytest.fixture
def w3(rpc_server, ipc_path):
return Web3(IPCProvider(ipc_path), modules={"discv5": (DiscoveryV5Module,)})
@pytest.fixture
async def bob_network(bob):
async with bob.network() as network:
yield network
@pytest.fixture(params=("nodeid", "enode", "enr"))
def ping_params(request, alice, bob, bob_network):
alice.enr_db.set_enr(bob.enr)
if request.param == "nodeid":
return [bob.node_id.hex()]
elif request.param == "enode":
return [f"enode://{bob.node_id.hex()}@{bob.endpoint}"]
elif request.param == "enr":
return [repr(bob.enr)]
else:
raise Exception(f"Unhandled param: {request.param}")
@pytest.mark.trio
async def test_rpc_ping(make_request, ping_params, alice, bob):
pong = await make_request("discv5_ping", ping_params)
assert pong["enr_seq"] == bob.enr.sequence_number
assert pong["packet_ip"] == inet_ntoa(alice.endpoint.ip_address)
assert pong["packet_port"] == alice.endpoint.port
@pytest.fixture(params=("nodeid", "nodeid-hex", "enode", "enr", "enr-repr"))
def ping_param_w3(request, alice, bob, bob_network):
alice.enr_db.set_enr(bob.enr)
if request.param == "nodeid":
return bob.node_id
elif request.param == "nodeid-hex":
return encode_hex(bob.node_id)
elif request.param == "enode":
return f"enode://{bob.node_id.hex()}@{bob.endpoint}"
elif request.param == "enr":
return bob.enr
elif request.param == "enr-repr":
return repr(bob.enr)
else:
raise Exception(f"Unhandled param: {request.param}")
@pytest.mark.trio
async def test_rpc_ping_web3(make_request, ping_param_w3, alice, bob, w3):
pong = await trio.to_thread.run_sync(w3.discv5.ping, ping_param_w3)
assert pong.enr_seq == bob.enr.sequence_number
assert pong.packet_ip == ipaddress.ip_address(alice.endpoint.ip_address)
assert pong.packet_port == alice.endpoint.port
| 31.493827 | 80 | 0.704822 | [
"MIT"
] | vaporydev/ddht | tests/core/v5_1/test_v51_rpc_handlers.py | 2,551 | Python |
from ..utils import Object
class PageBlockRelatedArticle(Object):
"""
Contains information about a related article
Attributes:
ID (:obj:`str`): ``PageBlockRelatedArticle``
Args:
url (:obj:`str`):
Related article URL
title (:obj:`str`):
Article title; may be empty
description (:obj:`str`):
Article description; may be empty
photo (:class:`telegram.api.types.photo`):
Article photo; may be null
author (:obj:`str`):
Article author; may be empty
publish_date (:obj:`int`):
Point in time (Unix timestamp) when the article was published; 0 if unknown
Returns:
PageBlockRelatedArticle
Raises:
:class:`telegram.Error`
"""
ID = "pageBlockRelatedArticle"
def __init__(self, url, title, description, photo, author, publish_date, **kwargs):
self.url = url # str
self.title = title # str
self.description = description # str
self.photo = photo # Photo
self.author = author # str
self.publish_date = publish_date # int
@staticmethod
def read(q: dict, *args) -> "PageBlockRelatedArticle":
url = q.get('url')
title = q.get('title')
description = q.get('description')
photo = Object.read(q.get('photo'))
author = q.get('author')
publish_date = q.get('publish_date')
return PageBlockRelatedArticle(url, title, description, photo, author, publish_date)
| 29.396226 | 92 | 0.589217 | [
"MIT"
] | iTeam-co/pytglib | pytglib/api/types/page_block_related_article.py | 1,558 | Python |
from django.db import models
from django.core.validators import URLValidator
from django.contrib.auth.models import User
from tinymce.models import HTMLField
# Create your models here.
class Project(models.Model):
title = models.CharField(max_length = 50)
image = models.ImageField(upload_to = 'projects/')
description = models.TextField(max_length = 500)
link = models.TextField(validators=[URLValidator()],null=True)
profile = models.ForeignKey(User,on_delete=models.CASCADE, null=True)
design=models.PositiveIntegerField(choices=list(zip(range(1, 11), range(1, 11))), default=1)
usability=models.PositiveIntegerField(choices=list(zip(range(1, 11), range(1, 11))), default=1)
content=models.PositiveIntegerField(choices=list(zip(range(1, 11), range(1, 11))), default=1)
comment =models.TextField(validators=[URLValidator()],null=True)
def save_project(self):
self.save()
def delete_project(self):
self.delete()
@classmethod
def get_all(cls):
projects = cls.objects.all()
return projects
@classmethod
def get_project(cls, project_id):
project = cls.objects.get(id=project_id)
return project
@classmethod
def search_by_title(cls,search_term):
projects_title = cls.objects.filter(title__icontains=search_term)
return projects_title
class Profile(models.Model):
photo = models.ImageField(upload_to = 'profile/')
profile = models.ForeignKey(User,on_delete=models.CASCADE,null = True)
bio = models.TextField(max_length = 100)
contact = models.IntegerField()
def save_profile(self):
self.save()
def delete_profile(self):
self.delete()
class Comment(models.Model):
comment = models.CharField(max_length = 400)
def save_comment(self):
self.save() | 34.018519 | 99 | 0.704954 | [
"Unlicense"
] | drewheathens/Awwwards | upload/models.py | 1,837 | Python |
n = int(input())
x = int(input())
# n = 5 : 101 => x ** 4 * x ** 1
ans = 1
while n > 0:
if n & 1:
ans *= x
n >>= 1
x *= x
continue
n >>= 1
x *= x
print(ans)
| 12 | 32 | 0.343137 | [
"MIT"
] | freepvps/hsesamples | src/2sem/pow.py | 204 | Python |
##
# Copyright (c) 2007-2016 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
from caldavclientlibrary.protocol.http.session import Session
from caldavclientlibrary.protocol.webdav.head import Head
import unittest
class TestRequest(unittest.TestCase):
def test_Method(self):
server = Session("www.example.com")
request = Head(server, "/")
self.assertEqual(request.getMethod(), "HEAD")
class TestRequestHeaders(unittest.TestCase):
def test_NoSpecialHeaders(self):
server = Session("www.example.com")
request = Head(server, "/")
hdrs = request.generateRequestHeader()
self.assertFalse("If-None-Match:" in hdrs)
self.assertFalse("If-Match:" in hdrs)
def test_IfMatchHeader(self):
server = Session("www.example.com")
request = Head(server, "/")
request.setData(None, etag="\"12345\"")
hdrs = request.generateRequestHeader()
self.assertFalse("If-None-Match:" in hdrs)
self.assertTrue("If-Match: \"12345\"" in hdrs)
class TestRequestBody(unittest.TestCase):
pass
class TestResponse(unittest.TestCase):
pass
class TestResponseHeaders(unittest.TestCase):
pass
class TestResponseBody(unittest.TestCase):
pass
| 27.090909 | 74 | 0.705817 | [
"Apache-2.0"
] | DalavanCloud/ccs-caldavclientlibrary | caldavclientlibrary/protocol/webdav/tests/test_head.py | 1,788 | Python |
import random
from typing import List, Optional, Tuple
import numpy as np
import gym
import wandb
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow.keras import Sequential, Input, Model
from tensorflow.keras.layers import Dense, Conv2D, Flatten, Concatenate
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.models import clone_model
from utils import ReplayBuffer, Experience
class DQN:
def __init__(
self,
env:gym.Env,
combined_observation_space:Tuple[Tuple[int,int,int], int],
lr:float,
gamma:float,
epsilon:float,
epsilon_decay:float,
target_update_interval: int = 100,
log_wandb: bool=False,
replay_buffer:Optional[ReplayBuffer]=None,
fc_layers:Optional[List[int]]=None,
conv_layers:Optional[List[int]]=None
):
"""
Construct a new 'Deep Q-Network' object.
:param env: The environment of the game
:param lr: The learning rate of the agent
:param gamma: The amount of weight it gives to future rewards in the value function
:param epsilon: The probability where we do not go with the “greedy” action with the highest Q-value but rather choose a random action
:param epsilon_decay: The rate by which epsilon decreases after an episode
:param target_update_interval: The interval between updates of the target network
:param replay_buffer: Replay memory object to store and sample observations from for training.
Defaults to double-end queue with maximum length of 500_000 steps.
"""
self.log_wandb = log_wandb
self.env = env
self.action_space = env.action_space
self.combined_observation_space = combined_observation_space
self.lr = lr
self.gamma = gamma
self.epsilon = epsilon
self.epsilon_decay = epsilon_decay
self.target_update_interval = target_update_interval
self.rewards_list = []
# store trajectories of experience when executing a policy in an environment
self.buffer = replay_buffer if replay_buffer else ReplayBuffer(maxlen=2_500)
self.batch_size = 64
self.epsilon_min = 0.01
# agents have either a dis- crete or a continuous action space
self.num_action_space = 4
self.fc_layers = [128,128,128] if not fc_layers else fc_layers
assert len(self.fc_layers) >= 1, "You need at least one hidden layer"
self.conv_layers = [32, 64, 128] if not conv_layers else conv_layers
assert len(self.conv_layers) >= 1, "You need at least one hidden layer"
self.model = self.initialize_model()
self.model_target = clone_model(self.model)
# Track the hyperparameters
if self.log_wandb:
wandb.config.update({
"lr": self.lr,
"gamma": self.gamma,
"epsilon": self.epsilon,
"epsilon_decay": self.epsilon_decay,
"target_update_interval": self.target_update_interval,
"batch_size": self.batch_size,
"fc_layers": self.fc_layers
})
def initialize_model(self):
conv_layers = self.conv_layers[:] # Make a copy
first_conv_layer = conv_layers.pop(0)
i1 = Input(shape=self.combined_observation_space[0])
i2 = Input(shape=self.combined_observation_space[1])
x = Conv2D(first_conv_layer,8,4, padding="same", activation="relu")(i1)
for conv_layer in conv_layers:
x = Conv2D(conv_layer,3,4,padding="same", activation="relu")(x)
x = Flatten()(x)
x = Concatenate(axis=1)([x,i2])
layer = self.fc_layers[:] # Make a copy
first_layer = layer.pop(0)
x = Dense(first_layer, activation="relu")(x)
# Hidden fc_layers
for layer in layer:
x = Dense(layer, activation="relu")(x)
# the number of ending neurons is equal to the number of action space
out = Dense(self.num_action_space, activation="linear")(x)
model = Model(inputs = [i1, i2], outputs = out)
# Compile the model with MSE of TD-Error with Adam
model.compile(loss="mean_squared_error", optimizer=Adam(learning_rate=self.lr))
return model
def get_action(self, state):
# a random action is chosen when a random chosen number is lower than the epsilon
if np.random.rand() < self.epsilon:
return random.randint(0,3)
# if not, the model will predict the action with its current state
predicted_actions = self.model.predict([tf.expand_dims(state[0], axis=0),tf.expand_dims(state[1], axis=0)])
# returns the index of the actions with the highest score
return np.argmax(predicted_actions[0])
def update_weights(self):
# buffer size check
if len(self.buffer) < self.batch_size:
return
# randomly sample a replay memory with the size of the batch
# getting the states, actions, rewards, next_state and done_list from the random sample
states, actions, rewards, next_states, done_list = self.buffer.sample(self.batch_size, dqn=True)
# calculate the loss to create a target vector for the model to fit with the states
targets = rewards + self.gamma * (np.max(self.model_target.predict_on_batch([
np.concatenate(next_states[0]).reshape(-1, *self.combined_observation_space[0]),
np.concatenate(next_states[1]).reshape(-1, self.combined_observation_space[1])
]), axis=1)) * (1 - done_list)
target_vec = self.model.predict_on_batch([
np.concatenate(states[0]).reshape(-1, *self.combined_observation_space[0]),
np.concatenate(states[1]).reshape(-1, self.combined_observation_space[1])
])
indexes = np.array([i for i in range(self.batch_size)])
target_vec[[indexes], [actions]] = targets
# fit the model with the states and the target vector for one iteration
self.model.fit([
np.concatenate(states[0]).reshape(-1, *self.combined_observation_space[0]),
np.concatenate(states[1]).reshape(-1, self.combined_observation_space[1])
], target_vec, epochs=1, verbose=0)
def _update_target(self, target_weights, weights):
for target_weight, weight in zip(target_weights, weights):
target_weight.assign(weight)
def train(self, num_episodes=1000, mean_stopping=True):
# iterate over the number of episodes
for episode in range(num_episodes):
state = self.env.reset()
reward_for_episode = 0
max_num_steps = 1000
for step in range(max_num_steps):
# get the action for the current state
action = self.get_action(state)
if isinstance(action, tf.Tensor):
action = action.numpy()
# get the next_state, reward, done and info after running the action
next_state, reward, done, info = self.env.step(int(action))
# store the experience in replay memory
self.buffer.append(Experience(state, action, reward, next_state, done))
# add up rewards
reward_for_episode += reward
state = next_state
# train dqn
self.update_weights()
# Every k steps, copy actual network weights to the target network weights
if (step + 1) % self.target_update_interval == 0:
self._update_target(self.model_target.variables, self.model.variables)
if done: break
self.rewards_list.append(reward_for_episode)
# decay the epsilon after each episode
if self.epsilon > self.epsilon_min:
self.epsilon *= self.epsilon_decay
# check for terminal condition
last_rewards_mean = np.mean(self.rewards_list[-100:])
if last_rewards_mean > 250 and mean_stopping:
print("DQN Training Complete...")
break
print("[{:0>3}] Reward: {: >8.3f} | Avg Reward: {: >8.3f} | e: {:.3f} | Episode Length: {:}"
.format(episode, reward_for_episode, last_rewards_mean, self.epsilon, step))
if self.log_wandb:
wandb.log({
"Episode": episode,
"Reward": reward_for_episode,
"Avg-Reward-100e": last_rewards_mean,
"Epsilon": self.epsilon,
"Episode Length": step
})
def save(self, path:str):
self.model.save(path) | 41.574766 | 142 | 0.619085 | [
"MIT"
] | kiritowu/Deep-Learning | RL/Snake-DQN/model/dqn_engineered.py | 8,901 | Python |
#!/usr/bin/env python
# coding: utf-8
from saenopy import Solver
# initialize the object
M = Solver()
from saenopy.materials import SemiAffineFiberMaterial
# provide a material model
material = SemiAffineFiberMaterial(1645, 0.0008, 1.0075, 0.033)
M.setMaterialModel(material)
import numpy as np
# define the coordinates of the nodes of the mesh
# the array has to have the shape N_v x 3
R = np.array([[0., 0., 0.], # 0
[0., 1., 0.], # 1
[1., 1., 0.], # 2
[1., 0., 0.], # 3
[0., 0., 1.], # 4
[1., 0., 1.], # 5
[1., 1., 1.], # 6
[0., 1., 1.]]) # 7
# define the tetrahedra of the mesh
# the array has to have the shape N_t x 4
# every entry is an index referencing a verces in R (indices start with 0)
T = np.array([[0, 1, 7, 2],
[0, 2, 5, 3],
[0, 4, 5, 7],
[2, 5, 6, 7],
[0, 7, 5, 2]])
# provide the node data
M.setNodes(R)
# and the tetrahedron data
M.setTetrahedra(T)
# the displacements of the nodes which shall be fitted
# during the solving
U = np.array([[0 , 0, 0], # 0
[0 , 0, 0], # 1
[0.01, 0, 0], # 2
[0.01, 0, 0], # 3
[0 , 0, 0], # 4
[0.01, 0, 0], # 5
[0.01, 0, 0], # 6
[0 , 0, 0]]) # 7
# hand the displacements over to the class instance
M.setTargetDisplacements(U)
# call the regularisation
M.solve_regularized(stepper=0.1, alpha=0.001);
M.viewMesh(50, 1)
| 25.730159 | 75 | 0.488587 | [
"MIT"
] | rgerum/saenopy | docs/regularization.py | 1,621 | Python |
import unittest
import example_module
class MainTest(unittest.TestCase):
def test_add(self):
self.assertEqual(example_module.add(1, 1), 2)
def test_subtract(self):
self.assertEqual(example_module.subtract(1, 1), 0)
if __name__ == '__main__':
unittest.main() | 24 | 58 | 0.704861 | [
"MIT"
] | hokiedsp/test_python | tests/math_test.py | 288 | Python |
"""
This module stores global variables that must be shared between all modules of
envprobe.
Please do not introduce a too large global state in this module.
Please do not add dependencies of other modules to this module because almost
all parts of envprobe refers this module.
"""
# This list contains the valid subcommands that exist. These are not mapped
# as "get VARIABLE" when used in the short format `envprobe VARIABLE`.
REGISTERED_COMMANDS = []
| 35.076923 | 78 | 0.785088 | [
"MIT"
] | steakhal/envprobe | configuration/global_config.py | 456 | Python |
import configparser
import psycopg2
from sql_queries import copy_table_queries, insert_table_queries
def load_staging_tables(cur, conn):
for query in copy_table_queries:
print('Loading data by: '+query)
cur.execute(query)
conn.commit()
def insert_tables(cur, conn):
for query in insert_table_queries:
print('Transform data by: '+query)
cur.execute(query)
conn.commit()
def main():
config = configparser.ConfigParser()
config.read('/home/gulbulut/repos/udacity-data-engineering-nanodegree-projects/Project_3_Data_Warehouse/dwh.cfg')
conn = psycopg2.connect("host={} dbname={} user={} password={} port={}".format(*config['CLUSTER'].values()))
cur = conn.cursor()
load_staging_tables(cur, conn)
insert_tables(cur, conn)
conn.close()
if __name__ == "__main__":
main() | 25.470588 | 117 | 0.684758 | [
"MIT"
] | gulbulut/udacity-data-engineering-nanodegree-projects | Project_3_Data_Warehouse/etl.py | 866 | Python |
# -*- coding: utf-8 -*-
# __ __ __ ___ __ __ __ __ ____
# | ' \ \/ / | | \ \ \ / \
# | _ \ /__| | , , |/ /\__|
# | (_) ) / _ | | | | ( __
# | ___/ ( (_) | | | |\ \/ |
# |__| \___/ \___,__;__/__/__/ \____/
"""
Digital Messaging Center API Client
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
PyDmc is an API client library, written in Python, for Teradata's Digital
Messaging Center.
"""
__title__ = 'pydmc'
__version__ = '0..1'
__author__ = 'Nick Silva'
__license__ = 'MIT'
__copyright__ = 'Copyright 2015 Teradata Interactive'
from pydmc import *
| 22.769231 | 73 | 0.488176 | [
"MIT"
] | MappCTSAmericas/pydmc | pydmc/__init__.py | 592 | Python |
import string
import zmq
host = '127.0.0.1'
port = 6789
ctx = zmq.Context()
pub = ctx.socket(zmq.PUB)
pub.bind('tcp://%s:%s' % (host, port))
with open('lokomotywa.txt', 'rt') as poem:
words = poem.read()
for word in words.split():
word = word.strip(string.punctuation)
data = word.encode('utf-8')
if word.startswith(('a','e','i','o','u','y','A','E','I','O','U','Y')):
pub.send_multipart([b'samogloski', data])
if len(word) == 5:
pub.send_multipart([b'piec', data])
| 26.473684 | 74 | 0.586481 | [
"MIT"
] | Mikma03/Python_Bill_Lubanovic_BookCodes | 17. Chapter_/poem_pub.py | 503 | Python |
# -*- coding: utf-8 -*-
import keras.engine.training
from typing import Callable
from typing import Tuple
from typing import List
from typing import Union
from util_types import types_of_loco
from network_model.distillation.distillation_model_builder import DistllationModelIncubator
from keras.optimizers import Optimizer, SGD
from network_model.build_model import builder, builder_with_merge
from model_merger.keras.merge_model import ModelMerger
from keras.layers import Concatenate
from keras.callbacks import Callback
from model_merger.keras.siamese import SiameseBuilder
from model_merger.keras.proc.calculator import calc_l1_norm
from model_merger.pytorch.proc.distance.calculator import L1Norm
import torch.nn
from network_model.builder import keras_builder, pytorch_builder
DLModel = Union[keras.engine.training.Model, torch.nn.Module]
ModelBuilderResult = Union[DLModel, Tuple[DLModel, List[Callback]], Tuple[DLModel, Callable[[str], Callback]]]
ModelBuilder = Union[Callable[[int], ModelBuilderResult],
Callable[[Union[str, Tuple[str, str]]], ModelBuilderResult],
DistllationModelIncubator]
def init_input_image(size: types_of_loco.input_img_size):
def builder_of_generator(class_num: int, channels: int = 1, optimizer: Optimizer = SGD()):
"""
Ganのgenerator部を作成する
:param class_num
:param channels:色の出力変数(白黒画像なら1)
:param optimizer: 2次元の畳み込みウィンドウの幅と高さ 整数なら縦横比同じに
:return: discriminator部のモデル
"""
return builder(class_num, size, channels, optimizer)
return builder_of_generator
def build_wrapper(img_size: types_of_loco.input_img_size = 28,
channels: int = 3,
model_name: str = "model1",
optimizer: Optimizer = SGD()) -> Union[ModelBuilder, pytorch_builder.PytorchModelBuilder]:
"""
モデル生成をする関数を返す
交差検証をかける際のラッパーとして使う
:param img_size:
:param channels:
:param model_name:
:param optimizer:
:return:
"""
if callable(optimizer):
return pytorch_builder.PytorchModelBuilder(img_size=img_size,
channels=channels,
model_name=model_name,
opt_builder=optimizer)
return keras_builder.build_wrapper(img_size, channels, model_name, optimizer)
def build_with_merge_wrapper(base_model_num: int,
img_size: types_of_loco.input_img_size = 28,
channels: int = 3,
model_name: str = "model1",
optimizer: Optimizer = SGD(),
model_merger: ModelMerger = ModelMerger(Concatenate(),
metrics=['accuracy'])) -> ModelBuilder:
return lambda class_num: builder_with_merge(base_model_num,
model_merger,
class_num,
img_size,
channels,
optimizer,
model_name)
def build_siamese(q: float,
img_size: types_of_loco.input_img_size = 28,
channels: int = 3,
model_name: str = "model1",
optimizer=SGD(),
loss_func=None,
calc_distance=None,
save_best_only=True,
save_weights_only=False,
save_base_filepath: str = None,
is_inceptionv3: bool = False,
decide_dataset_generator=None,
nearest_data_ave_num=1,
will_calc_real_data_train=False):
use_distance = calc_distance
if use_distance is None:
use_distance = L1Norm() if callable(optimizer) else calc_l1_norm
def build(class_num: int):
base_model = builder(class_num, img_size, channels, optimizer, model_name)
shame_builder = SiameseBuilder(base_model)
return shame_builder.build_shame_trainer_for_classifivation(q,
optimizer,
loss_func,
use_distance,
save_base_filepath,
save_best_only,
save_weights_only
)
print(type(decide_dataset_generator))
return pytorch_builder.PytorchSiameseModelBuilder(q,
img_size,
channels,
model_name,
optimizer,
loss_func,
use_distance,
is_inceptionv3,
decide_dataset_generator,
nearest_data_ave_num,
will_calc_real_data_train) if callable(optimizer) else build
| 47.87395 | 114 | 0.509215 | [
"MIT"
] | Tetuwo181/ModelLearner | network_model/model_builder.py | 5,867 | Python |
# @ayushk780
# Big Thanks To Spechide and @TechnoAyanBoT
"""Counth: Avaible commands: .bstats
"""
import asyncio
from telethon import events
from uniborg.util import admin_cmd, humanbytes,get_readable_time
import shutil
import time
from userbot import botStartTime
@borg.on(admin_cmd(pattern=r"bstats"))
async def _(event):
if event.fwd_from:
return
currentTime = get_readable_time((time.time() - botStartTime))
total, used, free = shutil.disk_usage('.')
total = humanbytes(total)
used = humanbytes(used)
free = humanbytes(free)
stats = f'Bot Uptime: {currentTime}\n' \
f'Total disk space: {total}\n' \
f'Used: {used}\n' \
f'Free: {free}'
await event.edit(str(stats))
| 25.9 | 65 | 0.646075 | [
"MIT"
] | felapr1804/TechnoAyanBOT | userbot/plugins/bot_stats.py | 777 | Python |
import six
from django.shortcuts import render, resolve_url
from django.utils.functional import Promise
from rest_framework.renderers import BaseRenderer, JSONRenderer, TemplateHTMLRenderer
from rest_framework.utils import json
from .app_settings import redoc_settings, swagger_settings
from .codecs import VALIDATORS, OpenAPICodecJson, OpenAPICodecYaml
from .openapi import Swagger
from .utils import filter_none
class _SpecRenderer(BaseRenderer):
"""Base class for text renderers. Handles encoding and validation."""
charset = 'utf-8'
validators = []
codec_class = None
@classmethod
def with_validators(cls, validators):
assert all(vld in VALIDATORS for vld in validators), "allowed validators are " + ", ".join(VALIDATORS)
return type(cls.__name__, (cls,), {'validators': validators})
def render(self, data, media_type=None, renderer_context=None):
assert self.codec_class, "must override codec_class"
codec = self.codec_class(self.validators)
if not isinstance(data, Swagger): # pragma: no cover
# if `swagger` is not a ``Swagger`` object, it means we somehow got a non-success ``Response``
# in that case, it's probably better to let the default ``JSONRenderer`` render it
# see https://github.com/axnsan12/drf-yasg/issues/58
return JSONRenderer().render(data, media_type, renderer_context)
return codec.encode(data)
class OpenAPIRenderer(_SpecRenderer):
"""Renders the schema as a JSON document with the ``application/openapi+json`` specific mime type."""
media_type = 'application/openapi+json'
format = 'openapi'
codec_class = OpenAPICodecJson
class SwaggerJSONRenderer(_SpecRenderer):
"""Renders the schema as a JSON document with the generic ``application/json`` mime type."""
media_type = 'application/json'
format = '.json'
codec_class = OpenAPICodecJson
class SwaggerYAMLRenderer(_SpecRenderer):
"""Renders the schema as a YAML document."""
media_type = 'application/yaml'
format = '.yaml'
codec_class = OpenAPICodecYaml
class _UIRenderer(BaseRenderer):
"""Base class for web UI renderers. Handles loading and passing settings to the appropriate template."""
media_type = 'text/html'
charset = 'utf-8'
template = ''
def render(self, swagger, accepted_media_type=None, renderer_context=None):
if not isinstance(swagger, Swagger): # pragma: no cover
# if `swagger` is not a ``Swagger`` object, it means we somehow got a non-success ``Response``
# in that case, it's probably better to let the default ``TemplateHTMLRenderer`` render it
# see https://github.com/axnsan12/drf-yasg/issues/58
return TemplateHTMLRenderer().render(swagger, accepted_media_type, renderer_context)
self.set_context(renderer_context, swagger)
return render(renderer_context['request'], self.template, renderer_context)
def set_context(self, renderer_context, swagger=None):
renderer_context['title'] = swagger.info.title or '' if swagger else ''
renderer_context['version'] = swagger.info.version or '' if swagger else ''
renderer_context['oauth2_config'] = json.dumps(self.get_oauth2_config())
renderer_context['USE_SESSION_AUTH'] = swagger_settings.USE_SESSION_AUTH
renderer_context.update(self.get_auth_urls())
def resolve_url(self, to):
if isinstance(to, Promise):
to = str(to)
if to is None:
return None
args, kwargs = None, None
if not isinstance(to, six.string_types):
if len(to) > 2:
to, args, kwargs = to
elif len(to) == 2:
to, kwargs = to
args = args or ()
kwargs = kwargs or {}
return resolve_url(to, *args, **kwargs)
def get_auth_urls(self):
urls = {
'LOGIN_URL': self.resolve_url(swagger_settings.LOGIN_URL),
'LOGOUT_URL': self.resolve_url(swagger_settings.LOGOUT_URL),
}
return filter_none(urls)
def get_oauth2_config(self):
data = swagger_settings.OAUTH2_CONFIG
assert isinstance(data, dict), "OAUTH2_CONFIG must be a dict"
return data
class SwaggerUIRenderer(_UIRenderer):
"""Renders a swagger-ui web interface for schema browisng."""
template = 'drf-yasg/swagger-ui.html'
format = 'swagger'
def set_context(self, renderer_context, swagger=None):
super(SwaggerUIRenderer, self).set_context(renderer_context, swagger)
renderer_context['swagger_settings'] = json.dumps(self.get_swagger_ui_settings())
def get_swagger_ui_settings(self):
data = {
'url': self.resolve_url(swagger_settings.SPEC_URL),
'operationsSorter': swagger_settings.OPERATIONS_SORTER,
'tagsSorter': swagger_settings.TAGS_SORTER,
'docExpansion': swagger_settings.DOC_EXPANSION,
'deepLinking': swagger_settings.DEEP_LINKING,
'showExtensions': swagger_settings.SHOW_EXTENSIONS,
'defaultModelRendering': swagger_settings.DEFAULT_MODEL_RENDERING,
'defaultModelExpandDepth': swagger_settings.DEFAULT_MODEL_DEPTH,
'defaultModelsExpandDepth': swagger_settings.DEFAULT_MODEL_DEPTH,
'showCommonExtensions': swagger_settings.SHOW_COMMON_EXTENSIONS,
'oauth2RedirectUrl': swagger_settings.OAUTH2_REDIRECT_URL,
'supportedSubmitMethods': swagger_settings.SUPPORTED_SUBMIT_METHODS,
'displayOperationId': swagger_settings.DISPLAY_OPERATION_ID,
'persistAuth': swagger_settings.PERSIST_AUTH,
'refetchWithAuth': swagger_settings.REFETCH_SCHEMA_WITH_AUTH,
'refetchOnLogout': swagger_settings.REFETCH_SCHEMA_ON_LOGOUT,
'fetchSchemaWithQuery': swagger_settings.FETCH_SCHEMA_WITH_QUERY,
}
data = filter_none(data)
if swagger_settings.VALIDATOR_URL != '':
data['validatorUrl'] = self.resolve_url(swagger_settings.VALIDATOR_URL)
return data
class ReDocRenderer(_UIRenderer):
"""Renders a ReDoc web interface for schema browisng."""
template = 'drf-yasg/redoc.html'
format = 'redoc'
def set_context(self, renderer_context, swagger=None):
super(ReDocRenderer, self).set_context(renderer_context, swagger)
renderer_context['redoc_settings'] = json.dumps(self.get_redoc_settings())
def get_redoc_settings(self):
data = {
'url': self.resolve_url(redoc_settings.SPEC_URL),
'lazyRendering': redoc_settings.LAZY_RENDERING,
'hideHostname': redoc_settings.HIDE_HOSTNAME,
'expandResponses': redoc_settings.EXPAND_RESPONSES,
'pathInMiddlePanel': redoc_settings.PATH_IN_MIDDLE,
'nativeScrollbars': redoc_settings.NATIVE_SCROLLBARS,
'requiredPropsFirst': redoc_settings.REQUIRED_PROPS_FIRST,
'fetchSchemaWithQuery': redoc_settings.FETCH_SCHEMA_WITH_QUERY,
}
return filter_none(data)
class ReDocOldRenderer(ReDocRenderer):
"""Renders a ReDoc 1.x.x web interface for schema browisng."""
template = 'drf-yasg/redoc-old.html'
| 41.096045 | 110 | 0.690267 | [
"BSD-3-Clause"
] | varnion/drf-yasg | src/drf_yasg/renderers.py | 7,274 | Python |
"""
Grabs data from the "FAQ Content" CSV and turns it into nice JSON: a main faq object containing an array of themed Section objects, each Section Object in turn holding an array of Question objects consisting of question, answer and related link/s, as follows:
[
{
"Q": "What are the symptoms of COVID-19?",
"A": "Symptoms of COVID-19 include coughing and shortness of breath. Additionally, a person showing two or more of the following symptoms may have the virus: fever, repeated shaking with chills, muscle pain, headache, sore throat, new loss of taste or smell.",
"link": "https://www.cdc.gov/coronavirus/2019-ncov/symptoms-testing/symptoms.html"
},
.
.
.
]
etc
In addition to its array of Question objects, each Section displays a title and "last updated" date value. For the Python stage of the transition from CSV to JSON we create a hierarchy of nested dictionaries and lists equivalent to the JSON objects and arrays.
"""
import requests
import json
import csv
from io import StringIO
import datetime
# Link to "FAQ Content" CSV
link = "https://docs.google.com/spreadsheets/d/1_wBXS62S5oBQrwetGc8_-dFvDjEmNqzqHwUeP-DzkYs/export?format=csv&id=1_wBXS62S5oBQrwetGc8_-dFvDjEmNqzqHwUeP-DzkYs&gid=1318925039"
# Get the data
response = requests.get(link)
response.raise_for_status()
# Turn text from CSV stream into a file object csv.reader can read
csv_stream = StringIO(response.text)
# Create a reader to iterate over the CSV stream file object
reader = csv.reader(csv_stream)
# ISO8601 format date, to update each time the program runs
date = datetime.datetime.now().strftime("%Y-%m-%d")
# Create the main FAQ dictionary
# Create a list for our Sections
# Place the Sections list inside the main dictionary
faq_dict = {}
sections_list = []
faq_dict["faqItems"] = sections_list
# Get data from the reader object
# Arrange Sections and Questions in a JSON-friendly way
for row in reader:
rowtype = row[0] # CSV column delineating Section, Question, Answer or Link
rowval = row[1] # Values for the above
if rowtype == "Section Head":
section = {} # Create a dictionary for a Section
sections_list.append(section) # Add new Section to the Sections list
section["title"] = rowval # Give the Section its title
section["lastUpdateAt"] = date # a "last updated" value
questions_list = [] # Create a list for Questions
section["qa"] = questions_list # add our Questions list to the Section
if rowtype == "Q":
question = {} # Create new Question object
question["Q"] = rowval # the Question's title
questions_list.append(question) #append the Question to its given Section
elif rowtype == "A": # Now the same for Answers & Links
question["A"] = rowval
elif rowtype == "link":
question["link"] = rowval
# create JSON output file
# pass FAQ Content to json and store output in output file
with open ("faq.json", "w") as f:
json.dump(faq_dict, f, indent=2) | 44.257143 | 270 | 0.700775 | [
"MIT"
] | zappascout/stop-covid19-sfbayarea | data/jsonic1_commit.py | 3,098 | Python |
#!/usr/bin/env python
import mdtraj as md
import numpy as np
from LLC_Membranes.llclib import physical, topology
r = 1
t = md.load('initial.gro')
keep = [a.index for a in t.topology.atoms if a.residue.name == 'HOH']
res_start = keep[0]
com = physical.center_of_mass(t.xyz[:, keep, :], [18., 1., 1.])
membrane = topology.LC('HII') # object w/ attributes of LC making up membrane
hg = [a.index for a in t.topology.atoms if a.name in membrane.pore_defining_atoms and a.residue.name
== membrane.name]
pore_centers = physical.avg_pore_loc(4, t.xyz[:, hg, :], t.unitcell_vectors, buffer=0, spline=False)
partition = physical.partition(com, pore_centers, r, unitcell=t.unitcell_vectors,
spline=False)
pore_indices = [res_start + 3 * i for i in np.where(partition[0, :])[0]]
tail_indices = [res_start + 3 * i for i in np.where(partition[0, :] == False)[0]] # have to use double equals sign. Using is doesn't work with np.where
with open('partition.tcl', 'w') as f:
f.write('color Display Background white\n')
f.write('mol addrep 0\n')
f.write('mol modselect 0 0 index')
for i in pore_indices:
end = i + 3
f.write(' %s to %s' % (i, end - 1))
f.write('\n')
f.write('mol modcolor 0 0 ColorID 0\n')
f.write('mol modstyle 0 0 CPK 2.0 0.3 12.0 12.0\n')
f.write('mol addrep 0\n')
f.write('mol modselect 1 0 index')
for i in tail_indices:
end = i + 3
f.write(' %s to %s' % (i, end - 1))
f.write('\n')
f.write('mol modstyle 1 0 CPK 2.0 0.3 12.0 12.0\n')
f.write('mol modcolor 1 0 ColorID 1\n')
| 35.326087 | 152 | 0.624615 | [
"MIT"
] | shirtsgroup/LLC_Membranes | Ben_Manuscripts/stochastic_transport/figures/pore_water_tcl.py | 1,625 | Python |
import logging
logger = logging.getLogger('artifice')
logger.setLevel(logging.DEBUG)
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter('%(levelname)s:artifice:%(message)s'))
logger.addHandler(handler)
def set_verbosity(verbose):
if verbose == 0:
logger.setLevel(logging.WARNING)
elif verbose == 1:
logger.setLevel(logging.INFO)
else:
logger.setLevel(logging.DEBUG)
| 24.058824 | 77 | 0.753056 | [
"Apache-2.0"
] | bendkill/artifice | artifice/log/logger.py | 409 | Python |
#!/usr/bin/env python3
# Copyright (c) 2010 ArtForz -- public domain half-a-node
# Copyright (c) 2012 Jeff Garzik
# Copyright (c) 2010-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""BitcoinSN P2P network half-a-node.
This python code was modified from ArtForz' public domain half-a-node, as
found in the mini-node branch of http://github.com/jgarzik/pynode.
P2PConnection: A low-level connection object to a node's P2P interface
P2PInterface: A high-level interface object for communicating to a node over P2P
P2PDataStore: A p2p interface class that keeps a store of transactions and blocks
and can respond correctly to getdata and getheaders messages"""
import asyncio
from collections import defaultdict
from io import BytesIO
import logging
import struct
import sys
import threading
from test_framework.messages import CBlockHeader, MIN_VERSION_SUPPORTED, msg_addr, msg_block, MSG_BLOCK, msg_blocktxn, msg_cmpctblock, msg_feefilter, msg_getaddr, msg_getblocks, msg_getblocktxn, msg_getdata, msg_getheaders, msg_headers, msg_inv, msg_mempool, msg_ping, msg_pong, msg_reject, msg_sendcmpct, msg_sendheaders, msg_tx, MSG_TX, MSG_TYPE_MASK, msg_verack, msg_version, NODE_NETWORK, NODE_WITNESS, sha256
from test_framework.util import wait_until
logger = logging.getLogger("TestFramework.mininode")
MESSAGEMAP = {
b"addr": msg_addr,
b"block": msg_block,
b"blocktxn": msg_blocktxn,
b"cmpctblock": msg_cmpctblock,
b"feefilter": msg_feefilter,
b"getaddr": msg_getaddr,
b"getblocks": msg_getblocks,
b"getblocktxn": msg_getblocktxn,
b"getdata": msg_getdata,
b"getheaders": msg_getheaders,
b"headers": msg_headers,
b"inv": msg_inv,
b"mempool": msg_mempool,
b"ping": msg_ping,
b"pong": msg_pong,
b"reject": msg_reject,
b"sendcmpct": msg_sendcmpct,
b"sendheaders": msg_sendheaders,
b"tx": msg_tx,
b"verack": msg_verack,
b"version": msg_version,
}
MAGIC_BYTES = {
"mainnet": b"\xf9\xbe\xb4\xd9", # mainnet
"testnet3": b"\x0b\x11\x09\x07", # testnet3
"regtest": b"\xfa\xbf\xb5\xda", # regtest
}
class P2PConnection(asyncio.Protocol):
"""A low-level connection object to a node's P2P interface.
This class is responsible for:
- opening and closing the TCP connection to the node
- reading bytes from and writing bytes to the socket
- deserializing and serializing the P2P message header
- logging messages as they are sent and received
This class contains no logic for handing the P2P message payloads. It must be
sub-classed and the on_message() callback overridden."""
def __init__(self):
# The underlying transport of the connection.
# Should only call methods on this from the NetworkThread, c.f. call_soon_threadsafe
self._transport = None
@property
def is_connected(self):
return self._transport is not None
def peer_connect(self, dstaddr, dstport, net="regtest"):
assert not self.is_connected
self.dstaddr = dstaddr
self.dstport = dstport
# The initial message to send after the connection was made:
self.on_connection_send_msg = None
self.recvbuf = b""
self.network = net
logger.debug('Connecting to BitcoinSN Node: %s:%d' % (self.dstaddr, self.dstport))
loop = NetworkThread.network_event_loop
conn_gen_unsafe = loop.create_connection(lambda: self, host=self.dstaddr, port=self.dstport)
conn_gen = lambda: loop.call_soon_threadsafe(loop.create_task, conn_gen_unsafe)
return conn_gen
def peer_disconnect(self):
# Connection could have already been closed by other end.
NetworkThread.network_event_loop.call_soon_threadsafe(lambda: self._transport and self._transport.abort())
# Connection and disconnection methods
def connection_made(self, transport):
"""asyncio callback when a connection is opened."""
assert not self._transport
logger.debug("Connected & Listening: %s:%d" % (self.dstaddr, self.dstport))
self._transport = transport
if self.on_connection_send_msg:
self.send_message(self.on_connection_send_msg)
self.on_connection_send_msg = None # Never used again
self.on_open()
def connection_lost(self, exc):
"""asyncio callback when a connection is closed."""
if exc:
logger.warning("Connection lost to {}:{} due to {}".format(self.dstaddr, self.dstport, exc))
else:
logger.debug("Closed connection to: %s:%d" % (self.dstaddr, self.dstport))
self._transport = None
self.recvbuf = b""
self.on_close()
# Socket read methods
def data_received(self, t):
"""asyncio callback when data is read from the socket."""
if len(t) > 0:
self.recvbuf += t
self._on_data()
def _on_data(self):
"""Try to read P2P messages from the recv buffer.
This method reads data from the buffer in a loop. It deserializes,
parses and verifies the P2P header, then passes the P2P payload to
the on_message callback for processing."""
try:
while True:
if len(self.recvbuf) < 4:
return
if self.recvbuf[:4] != MAGIC_BYTES[self.network]:
raise ValueError("got garbage %s" % repr(self.recvbuf))
if len(self.recvbuf) < 4 + 12 + 4 + 4:
return
command = self.recvbuf[4:4+12].split(b"\x00", 1)[0]
msglen = struct.unpack("<i", self.recvbuf[4+12:4+12+4])[0]
checksum = self.recvbuf[4+12+4:4+12+4+4]
if len(self.recvbuf) < 4 + 12 + 4 + 4 + msglen:
return
msg = self.recvbuf[4+12+4+4:4+12+4+4+msglen]
th = sha256(msg)
h = sha256(th)
if checksum != h[:4]:
raise ValueError("got bad checksum " + repr(self.recvbuf))
self.recvbuf = self.recvbuf[4+12+4+4+msglen:]
if command not in MESSAGEMAP:
raise ValueError("Received unknown command from %s:%d: '%s' %s" % (self.dstaddr, self.dstport, command, repr(msg)))
f = BytesIO(msg)
t = MESSAGEMAP[command]()
t.deserialize(f)
self._log_message("receive", t)
self.on_message(t)
except Exception as e:
logger.exception('Error reading message:', repr(e))
raise
def on_message(self, message):
"""Callback for processing a P2P payload. Must be overridden by derived class."""
raise NotImplementedError
# Socket write methods
def send_message(self, message):
"""Send a P2P message over the socket.
This method takes a P2P payload, builds the P2P header and adds
the message to the send buffer to be sent over the socket."""
if not self.is_connected:
raise IOError('Not connected')
self._log_message("send", message)
tmsg = self._build_message(message)
def maybe_write():
if not self._transport:
return
# Python <3.4.4 does not have is_closing, so we have to check for
# its existence explicitly as long as BitcoinSN Core supports all
# Python 3.4 versions.
if hasattr(self._transport, 'is_closing') and self._transport.is_closing():
return
self._transport.write(tmsg)
NetworkThread.network_event_loop.call_soon_threadsafe(maybe_write)
# Class utility methods
def _build_message(self, message):
"""Build a serialized P2P message"""
command = message.command
data = message.serialize()
tmsg = MAGIC_BYTES[self.network]
tmsg += command
tmsg += b"\x00" * (12 - len(command))
tmsg += struct.pack("<I", len(data))
th = sha256(data)
h = sha256(th)
tmsg += h[:4]
tmsg += data
return tmsg
def _log_message(self, direction, msg):
"""Logs a message being sent or received over the connection."""
if direction == "send":
log_message = "Send message to "
elif direction == "receive":
log_message = "Received message from "
log_message += "%s:%d: %s" % (self.dstaddr, self.dstport, repr(msg)[:500])
if len(log_message) > 500:
log_message += "... (msg truncated)"
logger.debug(log_message)
class P2PInterface(P2PConnection):
"""A high-level P2P interface class for communicating with a BitcoinSN node.
This class provides high-level callbacks for processing P2P message
payloads, as well as convenience methods for interacting with the
node over P2P.
Individual testcases should subclass this and override the on_* methods
if they want to alter message handling behaviour."""
def __init__(self):
super().__init__()
# Track number of messages of each type received and the most recent
# message of each type
self.message_count = defaultdict(int)
self.last_message = {}
# A count of the number of ping messages we've sent to the node
self.ping_counter = 1
# The network services received from the peer
self.nServices = 0
def peer_connect(self, *args, services=NODE_NETWORK|NODE_WITNESS, send_version=True, **kwargs):
create_conn = super().peer_connect(*args, **kwargs)
if send_version:
# Send a version msg
vt = msg_version()
vt.nServices = services
vt.addrTo.ip = self.dstaddr
vt.addrTo.port = self.dstport
vt.addrFrom.ip = "0.0.0.0"
vt.addrFrom.port = 0
self.on_connection_send_msg = vt # Will be sent soon after connection_made
return create_conn
# Message receiving methods
def on_message(self, message):
"""Receive message and dispatch message to appropriate callback.
We keep a count of how many of each message type has been received
and the most recent message of each type."""
with mininode_lock:
try:
command = message.command.decode('ascii')
self.message_count[command] += 1
self.last_message[command] = message
getattr(self, 'on_' + command)(message)
except:
print("ERROR delivering %s (%s)" % (repr(message), sys.exc_info()[0]))
raise
# Callback methods. Can be overridden by subclasses in individual test
# cases to provide custom message handling behaviour.
def on_open(self):
pass
def on_close(self):
pass
def on_addr(self, message): pass
def on_block(self, message): pass
def on_blocktxn(self, message): pass
def on_cmpctblock(self, message): pass
def on_feefilter(self, message): pass
def on_getaddr(self, message): pass
def on_getblocks(self, message): pass
def on_getblocktxn(self, message): pass
def on_getdata(self, message): pass
def on_getheaders(self, message): pass
def on_headers(self, message): pass
def on_mempool(self, message): pass
def on_pong(self, message): pass
def on_reject(self, message): pass
def on_sendcmpct(self, message): pass
def on_sendheaders(self, message): pass
def on_tx(self, message): pass
def on_inv(self, message):
want = msg_getdata()
for i in message.inv:
if i.type != 0:
want.inv.append(i)
if len(want.inv):
self.send_message(want)
def on_ping(self, message):
self.send_message(msg_pong(message.nonce))
def on_verack(self, message):
self.verack_received = True
def on_version(self, message):
assert message.nVersion >= MIN_VERSION_SUPPORTED, "Version {} received. Test framework only supports versions greater than {}".format(message.nVersion, MIN_VERSION_SUPPORTED)
self.send_message(msg_verack())
self.nServices = message.nServices
# Connection helper methods
def wait_for_disconnect(self, timeout=60):
test_function = lambda: not self.is_connected
wait_until(test_function, timeout=timeout, lock=mininode_lock)
# Message receiving helper methods
def wait_for_block(self, blockhash, timeout=60):
test_function = lambda: self.last_message.get("block") and self.last_message["block"].block.rehash() == blockhash
wait_until(test_function, timeout=timeout, lock=mininode_lock)
def wait_for_header(self, blockhash, timeout=60):
def test_function():
last_headers = self.last_message.get('headers')
if not last_headers:
return False
return last_headers.headers[0].rehash() == blockhash
wait_until(test_function, timeout=timeout, lock=mininode_lock)
def wait_for_getdata(self, timeout=60):
"""Waits for a getdata message.
Receiving any getdata message will satisfy the predicate. the last_message["getdata"]
value must be explicitly cleared before calling this method, or this will return
immediately with success. TODO: change this method to take a hash value and only
return true if the correct block/tx has been requested."""
test_function = lambda: self.last_message.get("getdata")
wait_until(test_function, timeout=timeout, lock=mininode_lock)
def wait_for_getheaders(self, timeout=60):
"""Waits for a getheaders message.
Receiving any getheaders message will satisfy the predicate. the last_message["getheaders"]
value must be explicitly cleared before calling this method, or this will return
immediately with success. TODO: change this method to take a hash value and only
return true if the correct block header has been requested."""
test_function = lambda: self.last_message.get("getheaders")
wait_until(test_function, timeout=timeout, lock=mininode_lock)
def wait_for_inv(self, expected_inv, timeout=60):
"""Waits for an INV message and checks that the first inv object in the message was as expected."""
if len(expected_inv) > 1:
raise NotImplementedError("wait_for_inv() will only verify the first inv object")
test_function = lambda: self.last_message.get("inv") and \
self.last_message["inv"].inv[0].type == expected_inv[0].type and \
self.last_message["inv"].inv[0].hash == expected_inv[0].hash
wait_until(test_function, timeout=timeout, lock=mininode_lock)
def wait_for_verack(self, timeout=60):
test_function = lambda: self.message_count["verack"]
wait_until(test_function, timeout=timeout, lock=mininode_lock)
# Message sending helper functions
def send_and_ping(self, message):
self.send_message(message)
self.sync_with_ping()
# Sync up with the node
def sync_with_ping(self, timeout=60):
self.send_message(msg_ping(nonce=self.ping_counter))
test_function = lambda: self.last_message.get("pong") and self.last_message["pong"].nonce == self.ping_counter
wait_until(test_function, timeout=timeout, lock=mininode_lock)
self.ping_counter += 1
# One lock for synchronizing all data access between the network event loop (see
# NetworkThread below) and the thread running the test logic. For simplicity,
# P2PConnection acquires this lock whenever delivering a message to a P2PInterface.
# This lock should be acquired in the thread running the test logic to synchronize
# access to any data shared with the P2PInterface or P2PConnection.
mininode_lock = threading.RLock()
class NetworkThread(threading.Thread):
network_event_loop = None
def __init__(self):
super().__init__(name="NetworkThread")
# There is only one event loop and no more than one thread must be created
assert not self.network_event_loop
NetworkThread.network_event_loop = asyncio.new_event_loop()
def run(self):
"""Start the network thread."""
self.network_event_loop.run_forever()
def close(self, timeout=10):
"""Close the connections and network event loop."""
self.network_event_loop.call_soon_threadsafe(self.network_event_loop.stop)
wait_until(lambda: not self.network_event_loop.is_running(), timeout=timeout)
self.network_event_loop.close()
self.join(timeout)
class P2PDataStore(P2PInterface):
"""A P2P data store class.
Keeps a block and transaction store and responds correctly to getdata and getheaders requests."""
def __init__(self):
super().__init__()
self.reject_code_received = None
self.reject_reason_received = None
# store of blocks. key is block hash, value is a CBlock object
self.block_store = {}
self.last_block_hash = ''
# store of txs. key is txid, value is a CTransaction object
self.tx_store = {}
self.getdata_requests = []
def on_getdata(self, message):
"""Check for the tx/block in our stores and if found, reply with an inv message."""
for inv in message.inv:
self.getdata_requests.append(inv.hash)
if (inv.type & MSG_TYPE_MASK) == MSG_TX and inv.hash in self.tx_store.keys():
self.send_message(msg_tx(self.tx_store[inv.hash]))
elif (inv.type & MSG_TYPE_MASK) == MSG_BLOCK and inv.hash in self.block_store.keys():
self.send_message(msg_block(self.block_store[inv.hash]))
else:
logger.debug('getdata message type {} received.'.format(hex(inv.type)))
def on_getheaders(self, message):
"""Search back through our block store for the locator, and reply with a headers message if found."""
locator, hash_stop = message.locator, message.hashstop
# Assume that the most recent block added is the tip
if not self.block_store:
return
headers_list = [self.block_store[self.last_block_hash]]
maxheaders = 2000
while headers_list[-1].sha256 not in locator.vHave:
# Walk back through the block store, adding headers to headers_list
# as we go.
prev_block_hash = headers_list[-1].hashPrevBlock
if prev_block_hash in self.block_store:
prev_block_header = CBlockHeader(self.block_store[prev_block_hash])
headers_list.append(prev_block_header)
if prev_block_header.sha256 == hash_stop:
# if this is the hashstop header, stop here
break
else:
logger.debug('block hash {} not found in block store'.format(hex(prev_block_hash)))
break
# Truncate the list if there are too many headers
headers_list = headers_list[:-maxheaders - 1:-1]
response = msg_headers(headers_list)
if response is not None:
self.send_message(response)
def on_reject(self, message):
"""Store reject reason and code for testing."""
self.reject_code_received = message.code
self.reject_reason_received = message.reason
def send_blocks_and_test(self, blocks, rpc, success=True, request_block=True, reject_code=None, reject_reason=None, timeout=60):
"""Send blocks to test node and test whether the tip advances.
- add all blocks to our block_store
- send a headers message for the final block
- the on_getheaders handler will ensure that any getheaders are responded to
- if request_block is True: wait for getdata for each of the blocks. The on_getdata handler will
ensure that any getdata messages are responded to
- if success is True: assert that the node's tip advances to the most recent block
- if success is False: assert that the node's tip doesn't advance
- if reject_code and reject_reason are set: assert that the correct reject message is received"""
with mininode_lock:
self.reject_code_received = None
self.reject_reason_received = None
for block in blocks:
self.block_store[block.sha256] = block
self.last_block_hash = block.sha256
self.send_message(msg_headers([CBlockHeader(blocks[-1])]))
if request_block:
wait_until(lambda: blocks[-1].sha256 in self.getdata_requests, timeout=timeout, lock=mininode_lock)
if success:
wait_until(lambda: rpc.getbestblockhash() == blocks[-1].hash, timeout=timeout)
else:
assert rpc.getbestblockhash() != blocks[-1].hash
if reject_code is not None:
wait_until(lambda: self.reject_code_received == reject_code, lock=mininode_lock)
if reject_reason is not None:
wait_until(lambda: self.reject_reason_received == reject_reason, lock=mininode_lock)
def send_txs_and_test(self, txs, rpc, success=True, expect_disconnect=False, reject_code=None, reject_reason=None):
"""Send txs to test node and test whether they're accepted to the mempool.
- add all txs to our tx_store
- send tx messages for all txs
- if success is True/False: assert that the txs are/are not accepted to the mempool
- if expect_disconnect is True: Skip the sync with ping
- if reject_code and reject_reason are set: assert that the correct reject message is received."""
with mininode_lock:
self.reject_code_received = None
self.reject_reason_received = None
for tx in txs:
self.tx_store[tx.sha256] = tx
for tx in txs:
self.send_message(msg_tx(tx))
if expect_disconnect:
self.wait_for_disconnect()
else:
self.sync_with_ping()
raw_mempool = rpc.getrawmempool()
if success:
# Check that all txs are now in the mempool
for tx in txs:
assert tx.hash in raw_mempool, "{} not found in mempool".format(tx.hash)
else:
# Check that none of the txs are now in the mempool
for tx in txs:
assert tx.hash not in raw_mempool, "{} tx found in mempool".format(tx.hash)
if reject_code is not None:
wait_until(lambda: self.reject_code_received == reject_code, lock=mininode_lock)
if reject_reason is not None:
wait_until(lambda: self.reject_reason_received == reject_reason, lock=mininode_lock)
| 41.188172 | 413 | 0.653135 | [
"MIT"
] | BitcoinSN/BitcoinSN | test/functional/test_framework/mininode.py | 22,983 | Python |
from tool.runners.python import SubmissionPy
class CocoSubmission(SubmissionPy):
def run(self, s):
"""
:param s: input in string format
:return: solution flag
"""
# Your code goes here
# suppositions: all numbers are co-prime (it seems to be the case in the input ??)
_, buses = s.split("\n")
buses = [(k % int(n), int(n)) for k, n in enumerate(buses.split(",")) if n != "x"]
_, base = buses[0]
multiplier = base
for rest, b in buses[1:]:
k = 1
while (base + multiplier * k) % b != b - rest:
k += 1
base = base + multiplier * k
multiplier = multiplier * b
return base
def test_sub():
input = """N\n17,x,13,19"""
assert CocoSubmission().run(input) == 3417
assert CocoSubmission().run("N\n67,7,59,61") == 754018
assert CocoSubmission().run("N\n67,x,7,59,61") == 779210
assert CocoSubmission().run("N\n67,7,x,59,61") == 1261476
assert CocoSubmission().run("N\n1789,37,47,1889") == 1202161486
| 31.852941 | 90 | 0.553093 | [
"MIT"
] | david-ds/adventofcode-2020 | day-13/part-2/coco.py | 1,083 | Python |
import json
import os
from . import utils
from .browsers.chrome_native import is_native
def dump_kwargs(**kwargs):
utils.alert(
title='%s Kwargs' % kwargs.get('entity_type', 'Unknown'),
message='<pre>%s</pre>' % json.dumps(kwargs, sort_keys=True, indent=4)
)
def dump_environ(**kwargs):
utils.alert(
title='%s Environ' % kwargs.get('entity_type', 'Unknown'),
message='<pre>%s</pre>' % '\n'.join('%s=%s' % x for x in sorted(os.environ.iteritems())),
)
def raise_error(**kwargs):
raise ValueError('This is a test')
def disconnect(**kwargs):
if is_native():
os._exit(0)
else:
utils.alert(message='Not the native messenger.')
def prompt_confirm(**kwargs):
res = utils.confirm(
title='Testing Shotgun Actions',
message='Do you want to do the thing?',
)
utils.notify('You pressed "%s"' % ('OK' if res else 'Cancel'))
def prompt_select(**kwargs):
res = utils.select(
title='Testing Shotgun Actions',
prologue="What is your favourite colour?",
options=[(x, x) for x in 'Red', 'Orange', 'Yellow', 'Other'],
epilogue="Just imagine that they are all listed here...",
)
utils.notify('You picked %s' % ('"%s"' % res if res else 'nothing'))
| 26.285714 | 97 | 0.609472 | [
"BSD-3-Clause"
] | westernx/sgactions | sgactions/examples.py | 1,288 | Python |
import sys
import os
sys.path.append(os.path.abspath('..'))
sys.path.append(os.path.abspath('./demo/'))
from autorch_sphinx_theme import __version__
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.intersphinx',
'sphinx.ext.autodoc',
'sphinx.ext.viewcode',
'sphinxcontrib.httpdomain',
]
# Do not warn about external images (status badges in README.rst)
suppress_warnings = ['image.nonlocal_uri']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'PyTorch Sphinx Theme'
copyright = u'PyTorch'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = __version__
# The full version, including alpha/beta/rc tags.
release = __version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
language = 'en'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'default'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
intersphinx_mapping = {'rtd': ('https://docs.readthedocs.io/en/latest/', None)}
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'autorch_sphinx_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
'logo_only': True
}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ["../"]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = "demo/static/pytorch-logo-dark.svg"
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'PyTorchSphinxthemedemodoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'PyTorchthemedemo.tex', u'PyTorch theme demo Documentation',
u'PyTorch, PyTorch', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'pytorchthemedemo', u'PyTorch theme demo Documentation',
[u'PyTorch'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'PyTorchthemedemo', u'PyTorch theme demo Documentation',
u'PyTorch', 'PyTorchthemedemo',
'One line description of project.', 'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
| 31.854839 | 80 | 0.715823 | [
"MIT"
] | zhanghang1989/autorch_sphinx_theme | docs/conf.py | 7,900 | Python |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from typing import Any, Callable, Tuple
import tensorflow as tf # type: ignore[import]
from jax.config import config
from jax import dtypes
from jax.experimental import jax2tf
from jax import test_util as jtu
class JaxToTfTestCase(jtu.JaxTestCase):
def assertDtypesMatch(self, x, y, *, canonicalize_dtypes=True):
"""Compares dtypes across JAX and TF dtypes. Overrides super method."""
def to_numpy_dtype(dt):
return dt if isinstance(dt, np.dtype) else dt.as_numpy_dtype
if not config.FLAGS.jax_enable_x64 and canonicalize_dtypes:
self.assertEqual(dtypes.canonicalize_dtype(to_numpy_dtype(jtu._dtype(x))),
dtypes.canonicalize_dtype(to_numpy_dtype(jtu._dtype(y))))
else:
self.assertEqual(to_numpy_dtype(jtu._dtype(x)),
to_numpy_dtype(jtu._dtype(y)))
def ConvertAndCompare(self, func_jax: Callable, *args,
with_function: bool = False,
atol=None,
rtol=None) -> Tuple[Any, Any]:
"""Compares jax_func(*args) with convert(jax_func)(*args)."""
func_tf = jax2tf.convert(func_jax)
if with_function:
func_tf = tf.function(func_tf)
res_jax = func_jax(*args)
res_tf = func_tf(*args)
self.assertAllClose(res_jax, res_tf, atol=atol, rtol=rtol)
return (res_jax, res_tf)
| 38.9 | 80 | 0.701285 | [
"ECL-2.0",
"Apache-2.0"
] | BuddenD/jax | jax/experimental/jax2tf/tests/tf_test_util.py | 1,945 | Python |
import time
import thriftpy2
from thriftpy2.utils import serialize, deserialize
from thriftpy2.protocol import TBinaryProtocolFactory, TCyBinaryProtocolFactory
addressbook = thriftpy2.load("addressbook.thrift")
def make_addressbook():
phone1 = addressbook.PhoneNumber()
phone1.type = addressbook.PhoneType.MOBILE
phone1.number = b'555-1212'
phone2 = addressbook.PhoneNumber()
phone2.type = addressbook.PhoneType.HOME
phone2.number = b'555-1234'
person = addressbook.Person()
person.name = b"Alice"
person.phones = [phone1, phone2]
person.created_at = 1400000000
ab = addressbook.AddressBook()
ab.people = {person.name: person}
return ab
ab_encoded = serialize(make_addressbook())
def encode(n, proto_factory=TBinaryProtocolFactory()):
ab = make_addressbook()
start = time.time()
for i in range(n):
serialize(ab, proto_factory)
end = time.time()
print("encode\t-> {}".format(end - start))
def decode(n, proto_factory=TBinaryProtocolFactory()):
ab = addressbook.AddressBook()
start = time.time()
for i in range(n):
deserialize(ab, ab_encoded, proto_factory)
end = time.time()
print("decode\t-> {}".format(end - start))
def main():
n = 100000
print("binary protocol struct benchmark for {} times:".format(n))
encode(n)
decode(n)
print("\ncybin protocol struct benchmark for {} times:".format(n))
encode(n, TCyBinaryProtocolFactory())
decode(n, TCyBinaryProtocolFactory())
if __name__ == "__main__":
main()
| 25.966667 | 79 | 0.689345 | [
"MIT"
] | 4masaka/thriftpy2 | benchmark/benchmark_struct.py | 1,558 | Python |
# coding: utf-8
#
# Copyright 2017 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for core.domain.activity_jobs_one_off."""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
import ast
from constants import constants
from core.domain import activity_jobs_one_off
from core.domain import collection_domain
from core.domain import collection_services
from core.domain import exp_domain
from core.domain import exp_fetchers
from core.domain import exp_services
from core.domain import rights_domain
from core.domain import rights_manager
from core.domain import search_services
from core.domain import state_domain
from core.domain import taskqueue_services
from core.domain import topic_domain
from core.domain import user_services
from core.platform import models
from core.tests import test_utils
import feconf
import python_utils
datastore_services = models.Registry.import_datastore_services()
gae_search_services = models.Registry.import_search_services()
(
base_models, collection_models,
exp_models, question_models, skill_models,
story_models, topic_models, subtopic_models
) = models.Registry.import_models([
models.NAMES.base_model, models.NAMES.collection,
models.NAMES.exploration, models.NAMES.question, models.NAMES.skill,
models.NAMES.story, models.NAMES.topic, models.NAMES.subtopic
])
class ActivityContributorsSummaryOneOffJobTests(test_utils.GenericTestBase):
ONE_OFF_JOB_MANAGERS_FOR_TESTS = [
activity_jobs_one_off.ActivityContributorsSummaryOneOffJob]
EXP_ID = 'exp_id'
COL_ID = 'col_id'
USERNAME_A = 'usernamea'
USERNAME_B = 'usernameb'
EMAIL_A = '[email protected]'
EMAIL_B = '[email protected]'
def setUp(self):
super(ActivityContributorsSummaryOneOffJobTests, self).setUp()
self.signup(self.EMAIL_A, self.USERNAME_A)
self.signup(self.EMAIL_B, self.USERNAME_B)
self.user_a_id = self.get_user_id_from_email(self.EMAIL_A)
self.user_b_id = self.get_user_id_from_email(self.EMAIL_B)
def _run_one_off_job(self):
"""Runs the one-off MapReduce job."""
job_id = (
activity_jobs_one_off.ActivityContributorsSummaryOneOffJob
.create_new())
activity_jobs_one_off.ActivityContributorsSummaryOneOffJob.enqueue(
job_id)
self.assertEqual(
self.count_jobs_in_mapreduce_taskqueue(
taskqueue_services.QUEUE_NAME_ONE_OFF_JOBS), 1)
self.process_and_flush_pending_mapreduce_tasks()
stringified_output = (
activity_jobs_one_off.ActivityContributorsSummaryOneOffJob
.get_output(job_id))
eval_output = [ast.literal_eval(stringified_item) for
stringified_item in stringified_output]
return eval_output
def test_contributors_for_valid_nonrevert_contribution(self):
# Let USER A make three commits.
exploration = self.save_new_valid_exploration(
self.EXP_ID, self.user_a_id)
collection = self.save_new_valid_collection(self.COL_ID, self.user_a_id)
exp_services.update_exploration(
self.user_a_id, self.EXP_ID, [exp_domain.ExplorationChange({
'cmd': 'edit_exploration_property',
'property_name': 'title',
'new_value': 'New Exploration Title'
})], 'Changed title.')
exp_services.update_exploration(
self.user_a_id, self.EXP_ID, [exp_domain.ExplorationChange({
'cmd': 'edit_exploration_property',
'property_name': 'objective',
'new_value': 'New Objective'
})], 'Changed Objective.')
collection_services.update_collection(
self.user_a_id, self.COL_ID, [{
'cmd': 'edit_collection_property',
'property_name': 'title',
'new_value': 'New Exploration Title'
}], 'Changed title.')
collection_services.update_collection(
self.user_a_id, self.COL_ID, [{
'cmd': 'edit_collection_property',
'property_name': 'objective',
'new_value': 'New Objective'
}], 'Changed Objective.')
output = self._run_one_off_job()
self.assertEqual([['SUCCESS', 3]], output)
exploration_summary = exp_fetchers.get_exploration_summary_by_id(
exploration.id)
self.assertEqual([self.user_a_id], exploration_summary.contributor_ids)
self.assertEqual(
{self.user_a_id: 3}, exploration_summary.contributors_summary)
collection_summary = collection_services.get_collection_summary_by_id(
collection.id)
self.assertEqual([self.user_a_id], collection_summary.contributor_ids)
self.assertEqual(
{self.user_a_id: 3}, collection_summary.contributors_summary)
def test_contributors_with_only_reverts_not_included(self):
# Let USER A make three commits.
exploration = self.save_new_valid_exploration(
self.EXP_ID, self.user_a_id, title='Exploration Title 1')
exp_services.update_exploration(
self.user_a_id, self.EXP_ID, [exp_domain.ExplorationChange({
'cmd': 'edit_exploration_property',
'property_name': 'title',
'new_value': 'New Exploration Title'
})], 'Changed title.')
exp_services.update_exploration(
self.user_a_id, self.EXP_ID, [exp_domain.ExplorationChange({
'cmd': 'edit_exploration_property',
'property_name': 'objective',
'new_value': 'New Objective'
})], 'Changed Objective.')
# Let the second user revert version 3 to version 2.
exp_services.revert_exploration(self.user_b_id, self.EXP_ID, 3, 2)
output = self._run_one_off_job()
self.assertEqual([['SUCCESS', 1]], output)
exploration_summary = exp_fetchers.get_exploration_summary_by_id(
exploration.id)
self.assertEqual([self.user_a_id], exploration_summary.contributor_ids)
self.assertEqual(
{self.user_a_id: 2}, exploration_summary.contributors_summary)
def test_reverts_not_counted(self):
# Let USER A make 3 non-revert commits.
exploration = self.save_new_valid_exploration(
self.EXP_ID, self.user_a_id, title='Exploration Title')
exp_services.update_exploration(
self.user_a_id, self.EXP_ID, [exp_domain.ExplorationChange({
'cmd': 'edit_exploration_property',
'property_name': 'title',
'new_value': 'New Exploration Title'
})], 'Changed title.')
exp_services.update_exploration(
self.user_a_id, self.EXP_ID, [exp_domain.ExplorationChange({
'cmd': 'edit_exploration_property',
'property_name': 'objective',
'new_value': 'New Objective'
})], 'Changed Objective.')
# Let USER A revert version 3 to version 2.
exp_services.revert_exploration(self.user_a_id, self.EXP_ID, 3, 2)
output = self._run_one_off_job()
self.assertEqual([['SUCCESS', 1]], output)
# Check that USER A's number of contributions is equal to 2.
exploration_summary = exp_fetchers.get_exploration_summary_by_id(
exploration.id)
self.assertEqual([self.user_a_id], exploration_summary.contributor_ids)
self.assertEqual(
{self.user_a_id: 2}, exploration_summary.contributors_summary)
def test_nonhuman_committers_not_counted(self):
# Create a commit with the system user id.
exploration = self.save_new_valid_exploration(
self.EXP_ID, feconf.SYSTEM_COMMITTER_ID, title='Original Title')
collection = self.save_new_valid_collection(self.COL_ID, self.user_a_id)
# Create commits with all the system user ids.
for system_id in constants.SYSTEM_USER_IDS:
exp_services.update_exploration(
system_id, self.EXP_ID, [exp_domain.ExplorationChange({
'cmd': 'edit_exploration_property',
'property_name': 'title',
'new_value': 'Title changed by %s' % system_id
})], 'Changed title.')
collection_services.update_collection(
system_id, self.COL_ID, [{
'cmd': 'edit_collection_property',
'property_name': 'title',
'new_value': 'New Exploration Title'
}], 'Changed title.')
output = self._run_one_off_job()
self.assertEqual([['SUCCESS', 3]], output)
# Check that no system id was added to the exploration's
# contributor's summary.
exploration_summary = exp_fetchers.get_exploration_summary_by_id(
exploration.id)
collection_summary = collection_services.get_collection_summary_by_id(
collection.id)
for system_id in constants.SYSTEM_USER_IDS:
self.assertNotIn(
system_id,
exploration_summary.contributors_summary)
self.assertNotIn(
system_id,
exploration_summary.contributor_ids)
self.assertNotIn(
system_id,
collection_summary.contributors_summary)
self.assertNotIn(
system_id,
collection_summary.contributor_ids)
def test_deleted_exploration(self):
self.save_new_valid_exploration(
self.EXP_ID, self.user_a_id)
exp_services.delete_exploration(feconf.SYSTEM_COMMITTER_ID, self.EXP_ID)
self.process_and_flush_pending_mapreduce_tasks()
output = self._run_one_off_job()
self.assertEqual([], output)
class AuditContributorsOneOffJobTests(test_utils.GenericTestBase):
USER_1_ID = 'user_1_id'
USER_2_ID = 'user_2_id'
USER_3_ID = 'user_3_id'
USER_4_ID = 'user_4_id'
def _run_one_off_job(self):
"""Runs the one-off MapReduce job."""
job_id = activity_jobs_one_off.AuditContributorsOneOffJob.create_new()
activity_jobs_one_off.AuditContributorsOneOffJob.enqueue(job_id)
self.assertEqual(
self.count_jobs_in_mapreduce_taskqueue(
taskqueue_services.QUEUE_NAME_ONE_OFF_JOBS), 1)
self.process_and_flush_pending_mapreduce_tasks()
stringified_output = (
activity_jobs_one_off.AuditContributorsOneOffJob.get_output(job_id))
eval_output = [ast.literal_eval(stringified_item) for
stringified_item in stringified_output]
for item in eval_output:
if isinstance(item[1], list):
item[1] = [ast.literal_eval(triple) for triple in item[1]]
return eval_output
def test_correct_models(self):
exp_models.ExpSummaryModel(
id='id_1',
title='title',
category='category',
objective='objective',
language_code='language_code',
community_owned=False,
contributor_ids=[self.USER_1_ID],
contributors_summary={self.USER_1_ID: 4},
).put()
collection_models.CollectionSummaryModel(
id='id_1',
title='title',
category='category',
objective='objective',
language_code='language_code',
community_owned=False,
contributor_ids=[self.USER_2_ID],
contributors_summary={self.USER_2_ID: 4},
).put()
output = self._run_one_off_job()
self.assertEqual(len(output), 1)
self.assertEqual([['SUCCESS', 2]], output)
def test_duplicate_ids_models(self):
exp_models.ExpSummaryModel(
id='id_1',
title='title',
category='category',
objective='objective',
language_code='language_code',
community_owned=False,
contributor_ids=[self.USER_1_ID, self.USER_1_ID],
contributors_summary={self.USER_1_ID: 4},
).put()
collection_models.CollectionSummaryModel(
id='id_2',
title='title',
category='category',
objective='objective',
language_code='language_code',
community_owned=False,
contributor_ids=[self.USER_2_ID, self.USER_2_ID],
contributors_summary={self.USER_2_ID: 4},
).put()
output = self._run_one_off_job()
self.assertEqual(len(output), 2)
self.assertIn(['SUCCESS', 2], output)
self.assertIn([
'DUPLICATE_IDS', [
('id_1', [self.USER_1_ID, self.USER_1_ID], {self.USER_1_ID: 4}),
('id_2', [self.USER_2_ID, self.USER_2_ID], {self.USER_2_ID: 4})
]], output)
def test_missing_in_summary_models(self):
exp_models.ExpSummaryModel(
id='id_1',
title='title',
category='category',
objective='objective',
language_code='language_code',
community_owned=False,
contributor_ids=[self.USER_1_ID, self.USER_2_ID],
contributors_summary={self.USER_1_ID: 4},
).put()
collection_models.CollectionSummaryModel(
id='id_2',
title='title',
category='category',
objective='objective',
language_code='language_code',
community_owned=False,
contributor_ids=[self.USER_1_ID, self.USER_2_ID],
contributors_summary={self.USER_2_ID: 4},
).put()
output = self._run_one_off_job()
self.assertEqual(len(output), 2)
self.assertIn(['SUCCESS', 2], output)
self.assertIn([
'MISSING_IN_SUMMARY', [
('id_1', [self.USER_1_ID, self.USER_2_ID], {self.USER_1_ID: 4}),
('id_2', [self.USER_1_ID, self.USER_2_ID], {self.USER_2_ID: 4})
]], output)
def test_missing_in_ids_models(self):
exp_models.ExpSummaryModel(
id='id_1',
title='title',
category='category',
objective='objective',
language_code='language_code',
community_owned=False,
contributor_ids=[self.USER_1_ID],
contributors_summary={self.USER_1_ID: 2, self.USER_2_ID: 4},
).put()
collection_models.CollectionSummaryModel(
id='id_2',
title='title',
category='category',
objective='objective',
language_code='language_code',
community_owned=False,
contributor_ids=[self.USER_2_ID],
contributors_summary={self.USER_1_ID: 1, self.USER_2_ID: 3},
).put()
output = self._run_one_off_job()
self.assertEqual(len(output), 2)
self.assertIn(['SUCCESS', 2], output)
self.assertIn([
'MISSING_IN_IDS', [
(
'id_1',
[self.USER_1_ID],
{self.USER_1_ID: 2, self.USER_2_ID: 4}
),
(
'id_2',
[self.USER_2_ID],
{self.USER_1_ID: 1, self.USER_2_ID: 3}
)
]], output)
def test_combined_models(self):
exp_models.ExpSummaryModel(
id='id_1',
title='title',
category='category',
objective='objective',
language_code='language_code',
community_owned=False,
contributor_ids=[self.USER_1_ID, self.USER_1_ID, self.USER_2_ID],
contributors_summary={self.USER_2_ID: 4},
).put()
collection_models.CollectionSummaryModel(
id='id_2',
title='title',
category='category',
objective='objective',
language_code='language_code',
community_owned=False,
contributor_ids=[self.USER_2_ID, self.USER_3_ID],
contributors_summary={self.USER_1_ID: 4, self.USER_2_ID: 4},
).put()
output = self._run_one_off_job()
self.assertEqual(len(output), 4)
self.assertIn(['SUCCESS', 2], output)
self.assertIn([
'DUPLICATE_IDS', [(
'id_1',
[self.USER_1_ID, self.USER_1_ID, self.USER_2_ID],
{self.USER_2_ID: 4}
)]], output)
self.assertIn([
'MISSING_IN_SUMMARY', [
(
'id_1',
[self.USER_1_ID, self.USER_1_ID, self.USER_2_ID],
{self.USER_2_ID: 4}
),
(
'id_2',
[self.USER_2_ID, self.USER_3_ID],
{self.USER_1_ID: 4, self.USER_2_ID: 4}
)
]], output)
self.assertIn([
'MISSING_IN_IDS', [(
'id_2',
[self.USER_2_ID, self.USER_3_ID],
{self.USER_1_ID: 4, self.USER_2_ID: 4}
)]], output)
class OneOffReindexActivitiesJobTests(test_utils.GenericTestBase):
def setUp(self):
super(OneOffReindexActivitiesJobTests, self).setUp()
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
self.owner = user_services.UserActionsInfo(self.owner_id)
explorations = [exp_domain.Exploration.create_default_exploration(
'%s' % i,
title='title %d' % i,
category='category%d' % i
) for i in python_utils.RANGE(3)]
for exp in explorations:
exp_services.save_new_exploration(self.owner_id, exp)
rights_manager.publish_exploration(self.owner, exp.id)
collections = [collection_domain.Collection.create_default_collection(
'%s' % i,
title='title %d' % i,
category='category%d' % i
) for i in python_utils.RANGE(3, 6)]
for collection in collections:
collection_services.save_new_collection(self.owner_id, collection)
rights_manager.publish_collection(self.owner, collection.id)
self.process_and_flush_pending_mapreduce_tasks()
def test_standard_operation(self):
job_id = (
activity_jobs_one_off.IndexAllActivitiesJobManager.create_new())
activity_jobs_one_off.IndexAllActivitiesJobManager.enqueue(job_id)
self.assertEqual(
self.count_jobs_in_mapreduce_taskqueue(
taskqueue_services.QUEUE_NAME_ONE_OFF_JOBS), 1)
indexed_docs = []
def mock_add_documents_to_index(docs, index):
indexed_docs.extend(docs)
self.assertIn(index, (
search_services.SEARCH_INDEX_EXPLORATIONS,
search_services.SEARCH_INDEX_COLLECTIONS))
add_docs_swap = self.swap(
gae_search_services, 'add_documents_to_index',
mock_add_documents_to_index)
with add_docs_swap:
self.process_and_flush_pending_mapreduce_tasks()
ids = [doc['id'] for doc in indexed_docs]
titles = [doc['title'] for doc in indexed_docs]
categories = [doc['category'] for doc in indexed_docs]
for index in python_utils.RANGE(5):
self.assertIn('%s' % index, ids)
self.assertIn('title %d' % index, titles)
self.assertIn('category%d' % index, categories)
self.assertIsNone(
activity_jobs_one_off.IndexAllActivitiesJobManager.reduce(
'key', 'value'))
class MockCollectionCommitLogEntryModel(
collection_models.CollectionCommitLogEntryModel):
"""Mock CollectionCommitLogEntryModel so that it allows to set username."""
username = datastore_services.StringProperty(indexed=True, required=False)
class MockCollectionRightsModel(
collection_models.CollectionRightsModel):
"""Mock CollectionRightsModel so that it uses old version of
_trusted_commit.
"""
def _trusted_commit(
self, committer_id, commit_type, commit_message, commit_cmds):
"""Record the event to the commit log after the model commit.
Note that this overrides the superclass method.
Args:
committer_id: str. The user_id of the user who committed the
change.
commit_type: str. The type of commit. Possible values are in
core.storage.base_models.COMMIT_TYPE_CHOICES.
commit_message: str. The commit description message.
commit_cmds: list(dict). A list of commands, describing changes
made in this model, should give sufficient information to
reconstruct the commit. Each dict always contains:
cmd: str. Unique command.
and then additional arguments for that command.
"""
base_models.VersionedModel._trusted_commit( # pylint: disable=protected-access
self, committer_id, commit_type, commit_message, commit_cmds)
# Create and delete events will already be recorded in the
# CollectionModel.
if commit_type not in ['create', 'delete']:
# TODO(msl): Test if put_async() leads to any problems (make
# sure summary dicts get updated correctly when collections
# are changed).
collection_models.CollectionCommitLogEntryModel(
id=('rights-%s-%s' % (self.id, self.version)),
user_id=committer_id,
collection_id=self.id,
commit_type=commit_type,
commit_message=commit_message,
commit_cmds=commit_cmds,
version=None,
post_commit_status=self.status,
post_commit_community_owned=self.community_owned,
post_commit_is_private=(
self.status == constants.ACTIVITY_STATUS_PRIVATE)
).put()
class MockExplorationRightsModel(exp_models.ExplorationRightsModel):
"""Mock ExplorationRightsModel so that it uses old version of
_trusted_commit.
"""
def _trusted_commit(
self, committer_id, commit_type, commit_message, commit_cmds):
"""Record the event to the commit log after the model commit.
Note that this extends the superclass method.
Args:
committer_id: str. The user_id of the user who committed the
change.
commit_type: str. The type of commit. Possible values are in
core.storage.base_models.COMMIT_TYPE_CHOICES.
commit_message: str. The commit description message.
commit_cmds: list(dict). A list of commands, describing changes
made in this model, should give sufficient information to
reconstruct the commit. Each dict always contains:
cmd: str. Unique command.
and then additional arguments for that command.
"""
base_models.VersionedModel._trusted_commit( # pylint: disable=protected-access
self, committer_id, commit_type, commit_message, commit_cmds)
# Create and delete events will already be recorded in the
# ExplorationModel.
if commit_type not in ['create', 'delete']:
# TODO(msl): Test if put_async() leads to any problems (make
# sure summary dicts get updated correctly when explorations
# are changed).
exp_models.ExplorationCommitLogEntryModel(
id=('rights-%s-%s' % (self.id, self.version)),
user_id=committer_id,
exploration_id=self.id,
commit_type=commit_type,
commit_message=commit_message,
commit_cmds=commit_cmds,
version=None,
post_commit_status=self.status,
post_commit_community_owned=self.community_owned,
post_commit_is_private=(
self.status == constants.ACTIVITY_STATUS_PRIVATE)
).put()
class MockTopicRightsModel(topic_models.TopicRightsModel):
"""Mock TopicRightsModel so that it uses old version of _trusted_commit."""
def _trusted_commit(
self, committer_id, commit_type, commit_message, commit_cmds):
"""Record the event to the commit log after the model commit.
Note that this extends the superclass method.
Args:
committer_id: str. The user_id of the user who committed the
change.
commit_type: str. The type of commit. Possible values are in
core.storage.base_models.COMMIT_TYPE_CHOICES.
commit_message: str. The commit description message.
commit_cmds: list(dict). A list of commands, describing changes
made in this model, which should give sufficient information to
reconstruct the commit. Each dict always contains:
cmd: str. Unique command.
and then additional arguments for that command.
"""
base_models.VersionedModel._trusted_commit( # pylint: disable=protected-access
self, committer_id, commit_type, commit_message, commit_cmds)
topic_rights = MockTopicRightsModel.get_by_id(self.id)
if topic_rights.topic_is_published:
status = constants.ACTIVITY_STATUS_PUBLIC
else:
status = constants.ACTIVITY_STATUS_PRIVATE
topic_models.TopicCommitLogEntryModel(
id=('rights-%s-%s' % (self.id, self.version)),
user_id=committer_id,
topic_id=self.id,
commit_type=commit_type,
commit_message=commit_message,
commit_cmds=commit_cmds,
version=None,
post_commit_status=status,
post_commit_community_owned=False,
post_commit_is_private=not topic_rights.topic_is_published
).put()
class AddContentUserIdsContentJobTests(test_utils.GenericTestBase):
COL_1_ID = 'col_1_id'
EXP_1_ID = 'exp_1_id'
TOP_1_ID = 'top_1_id'
TOP_2_ID = 'top_2_id'
USER_1_ID = 'user_1_id'
USER_2_ID = 'user_2_id'
USER_3_ID = 'user_3_id'
USER_4_ID = 'user_4_id'
def _run_one_off_job(self):
"""Runs the one-off MapReduce job."""
job_id = (
activity_jobs_one_off.AddContentUserIdsContentJob.create_new())
activity_jobs_one_off.AddContentUserIdsContentJob.enqueue(job_id)
self.assertEqual(
self.count_jobs_in_mapreduce_taskqueue(
taskqueue_services.QUEUE_NAME_ONE_OFF_JOBS), 1)
self.process_and_flush_pending_mapreduce_tasks()
stringified_output = (
activity_jobs_one_off.AddContentUserIdsContentJob.get_output(
job_id))
eval_output = [ast.literal_eval(stringified_item) for
stringified_item in stringified_output]
return [
[key, sorted(values) if isinstance(values, list) else values]
for key, values in eval_output]
def setUp(self):
super(AddContentUserIdsContentJobTests, self).setUp()
self.collection_rights_model_swap = self.swap(
collection_models,
'CollectionRightsModel',
MockCollectionRightsModel)
self.exploration_rights_model_swap = self.swap(
exp_models, 'ExplorationRightsModel', MockExplorationRightsModel)
self.topic_rights_model_swap = self.swap(
topic_models, 'TopicRightsModel', MockTopicRightsModel)
def test_add_content_user_ids_to_collection_rights_snapshot(self):
with self.collection_rights_model_swap:
collection_model = collection_models.CollectionRightsModel(
id=self.COL_1_ID,
owner_ids=[self.USER_1_ID],
editor_ids=[self.USER_2_ID],
voice_artist_ids=[],
viewer_ids=[],
community_owned=False,
status=constants.ACTIVITY_STATUS_PUBLIC,
viewable_if_private=False,
first_published_msec=0.0
)
collection_model.save(
'cid', 'Created new collection rights',
[{'cmd': rights_domain.CMD_CREATE_NEW}])
collection_model.owner_ids = [self.USER_1_ID, self.USER_3_ID]
collection_model.save(
'cid', 'Change owner',
[{
'cmd': rights_domain.CMD_CHANGE_ROLE,
'assignee_id': self.USER_3_ID,
'old_role': rights_domain.ROLE_NONE,
'new_role': rights_domain.ROLE_OWNER
}])
output = self._run_one_off_job()
self.assertEqual(
output, [['SUCCESS-CollectionRightsSnapshotContentModel', 2]])
self.assertItemsEqual(
[self.USER_1_ID, self.USER_2_ID],
collection_models.CollectionRightsSnapshotMetadataModel
.get_by_id('%s-1' % self.COL_1_ID).content_user_ids)
self.assertItemsEqual(
[self.USER_1_ID, self.USER_2_ID, self.USER_3_ID],
collection_models.CollectionRightsSnapshotMetadataModel
.get_by_id('%s-2' % self.COL_1_ID).content_user_ids)
def test_add_content_user_ids_to_exploration_rights_snapshot(self):
with self.exploration_rights_model_swap:
exp_model = exp_models.ExplorationRightsModel(
id=self.EXP_1_ID,
owner_ids=[self.USER_1_ID],
editor_ids=[self.USER_2_ID],
voice_artist_ids=[],
viewer_ids=[self.USER_4_ID],
community_owned=False,
status=constants.ACTIVITY_STATUS_PUBLIC,
viewable_if_private=False,
first_published_msec=0.0)
exp_model.save(
'cid', 'Created new exploration rights',
[{'cmd': rights_domain.CMD_CREATE_NEW}])
exp_model.owner_ids = [self.USER_1_ID, self.USER_3_ID]
exp_model.save(
'cid', 'Change owner',
[{
'cmd': rights_domain.CMD_CHANGE_ROLE,
'assignee_id': self.USER_3_ID,
'old_role': rights_domain.ROLE_NONE,
'new_role': rights_domain.ROLE_OWNER
}])
output = self._run_one_off_job()
self.assertEqual(
output, [['SUCCESS-ExplorationRightsSnapshotContentModel', 2]])
self.assertItemsEqual(
[self.USER_1_ID, self.USER_2_ID, self.USER_4_ID],
exp_models.ExplorationRightsSnapshotMetadataModel
.get_by_id('%s-1' % self.EXP_1_ID).content_user_ids)
self.assertItemsEqual(
[self.USER_1_ID, self.USER_2_ID, self.USER_3_ID, self.USER_4_ID],
exp_models.ExplorationRightsSnapshotMetadataModel
.get_by_id('%s-2' % self.EXP_1_ID).content_user_ids)
def test_add_content_user_ids_to_topic_rights_snapshot(self):
with self.topic_rights_model_swap:
topic_model = topic_models.TopicRightsModel(
id=self.TOP_1_ID,
manager_ids=[self.USER_1_ID, self.USER_2_ID])
topic_model.commit(
'cid', 'Created new topic rights',
[{'cmd': rights_domain.CMD_CREATE_NEW}])
topic_model.manager_ids = [self.USER_2_ID, self.USER_3_ID]
topic_model.commit(
'cid', 'Change manager',
[{
'cmd': rights_domain.CMD_CHANGE_ROLE,
'assignee_id': self.USER_3_ID,
'old_role': rights_domain.ROLE_NONE,
'new_role': rights_domain.ROLE_OWNER
}])
output = self._run_one_off_job()
self.assertEqual(
output, [['SUCCESS-TopicRightsSnapshotContentModel', 2]])
self.assertItemsEqual(
[self.USER_1_ID, self.USER_2_ID],
topic_models.TopicRightsSnapshotMetadataModel
.get_by_id('%s-1' % self.TOP_1_ID).content_user_ids)
self.assertItemsEqual(
[self.USER_2_ID, self.USER_3_ID],
topic_models.TopicRightsSnapshotMetadataModel
.get_by_id('%s-2' % self.TOP_1_ID).content_user_ids)
def test_add_content_user_ids_to_multiple_rights_snapshots(self):
with self.collection_rights_model_swap:
collection_model = collection_models.CollectionRightsModel(
id=self.COL_1_ID,
owner_ids=[self.USER_1_ID],
editor_ids=[],
voice_artist_ids=[],
viewer_ids=[],
community_owned=False,
status=constants.ACTIVITY_STATUS_PUBLIC,
viewable_if_private=False,
first_published_msec=0.0
)
collection_model.save(
'cid', 'Created new collection rights',
[{'cmd': rights_domain.CMD_CREATE_NEW}])
collection_model.editor_ids = [self.USER_1_ID, self.USER_4_ID]
collection_model.save(
'cid', 'Add editors',
[{
'cmd': rights_domain.CMD_CHANGE_ROLE,
'assignee_id': self.USER_4_ID,
'old_role': rights_domain.ROLE_NONE,
'new_role': rights_domain.ROLE_OWNER
}])
with self.exploration_rights_model_swap:
exp_model = exp_models.ExplorationRightsModel(
id=self.EXP_1_ID,
owner_ids=[self.USER_1_ID, self.USER_2_ID],
editor_ids=[self.USER_2_ID],
voice_artist_ids=[],
viewer_ids=[self.USER_4_ID],
community_owned=False,
status=constants.ACTIVITY_STATUS_PUBLIC,
viewable_if_private=False,
first_published_msec=0.0)
exp_model.save(
'cid', 'Created new exploration rights',
[{'cmd': rights_domain.CMD_CREATE_NEW}])
exp_model.owner_ids = [self.USER_1_ID, self.USER_3_ID]
exp_model.save(
'cid', 'Change owner',
[{
'cmd': rights_domain.CMD_CHANGE_ROLE,
'assignee_id': self.USER_4_ID,
'old_role': rights_domain.ROLE_NONE,
'new_role': rights_domain.ROLE_OWNER
}])
with self.topic_rights_model_swap:
topic_model_1 = topic_models.TopicRightsModel(
id=self.TOP_1_ID,
manager_ids=[self.USER_1_ID, self.USER_2_ID])
topic_model_1.commit(
'cid', 'Created new topic rights',
[{'cmd': rights_domain.CMD_CREATE_NEW}])
topic_model_1.manager_ids = [self.USER_2_ID, self.USER_3_ID]
topic_model_1.commit(
'cid', 'Change manager',
[{
'cmd': rights_domain.CMD_CHANGE_ROLE,
'assignee_id': self.USER_3_ID,
'old_role': rights_domain.ROLE_NONE,
'new_role': rights_domain.ROLE_OWNER
}])
topic_model_2 = topic_models.TopicRightsModel(
id=self.TOP_2_ID,
manager_ids=[self.USER_1_ID])
topic_model_2.commit(
'cid', 'Created new topic rights',
[{'cmd': rights_domain.CMD_CREATE_NEW}])
topic_model_2.manager_ids = [self.USER_1_ID, self.USER_4_ID]
topic_model_2.commit(
'cid', 'Change manager',
[{
'cmd': rights_domain.CMD_CHANGE_ROLE,
'assignee_id': self.USER_3_ID,
'old_role': rights_domain.ROLE_NONE,
'new_role': rights_domain.ROLE_OWNER
}])
output = self._run_one_off_job()
self.assertIn(
['SUCCESS-CollectionRightsSnapshotContentModel', 2], output)
self.assertIn(
['SUCCESS-ExplorationRightsSnapshotContentModel', 2], output)
self.assertIn(['SUCCESS-TopicRightsSnapshotContentModel', 4], output)
self.assertItemsEqual(
[self.USER_1_ID],
collection_models.CollectionRightsSnapshotMetadataModel
.get_by_id('%s-1' % self.COL_1_ID).content_user_ids)
self.assertItemsEqual(
[self.USER_1_ID, self.USER_4_ID],
collection_models.CollectionRightsSnapshotMetadataModel
.get_by_id('%s-2' % self.COL_1_ID).content_user_ids)
self.assertItemsEqual(
[self.USER_1_ID, self.USER_2_ID, self.USER_4_ID],
exp_models.ExplorationRightsSnapshotMetadataModel
.get_by_id('%s-1' % self.EXP_1_ID).content_user_ids)
self.assertItemsEqual(
[self.USER_1_ID, self.USER_2_ID, self.USER_3_ID, self.USER_4_ID],
exp_models.ExplorationRightsSnapshotMetadataModel
.get_by_id('%s-2' % self.EXP_1_ID).content_user_ids)
self.assertItemsEqual(
[self.USER_1_ID, self.USER_2_ID],
topic_models.TopicRightsSnapshotMetadataModel
.get_by_id('%s-1' % self.TOP_1_ID).content_user_ids)
self.assertItemsEqual(
[self.USER_2_ID, self.USER_3_ID],
topic_models.TopicRightsSnapshotMetadataModel
.get_by_id('%s-2' % self.TOP_1_ID).content_user_ids)
self.assertItemsEqual(
[self.USER_1_ID],
topic_models.TopicRightsSnapshotMetadataModel
.get_by_id('%s-1' % self.TOP_2_ID).content_user_ids)
self.assertItemsEqual(
[self.USER_1_ID, self.USER_4_ID],
topic_models.TopicRightsSnapshotMetadataModel
.get_by_id('%s-2' % self.TOP_2_ID).content_user_ids)
class AddCommitCmdsUserIdsMetadataJobTests(test_utils.GenericTestBase):
COL_1_ID = 'col_1_id'
EXP_1_ID = 'exp_1_id'
TOP_1_ID = 'top_1_id'
TOP_2_ID = 'top_2_id'
USER_3_ID = 'user_3_id'
USER_4_ID = 'user_4_id'
USER_GAE_3_ID = 'user_gae_3_id'
USERNAME_1 = 'usernamea'
USERNAME_2 = 'usernameb'
EMAIL_1 = '[email protected]'
EMAIL_2 = '[email protected]'
def _run_one_off_job(self):
"""Runs the one-off MapReduce job."""
job_id = (
activity_jobs_one_off.AddCommitCmdsUserIdsMetadataJob.create_new())
activity_jobs_one_off.AddCommitCmdsUserIdsMetadataJob.enqueue(job_id)
self.assertEqual(
self.count_jobs_in_mapreduce_taskqueue(
taskqueue_services.QUEUE_NAME_ONE_OFF_JOBS), 1)
self.process_and_flush_pending_mapreduce_tasks()
stringified_output = (
activity_jobs_one_off.AddCommitCmdsUserIdsMetadataJob.get_output(
job_id))
eval_output = [ast.literal_eval(stringified_item) for
stringified_item in stringified_output]
return [
[key, sorted(values) if isinstance(values, list) else values]
for key, values in eval_output]
def setUp(self):
super(AddCommitCmdsUserIdsMetadataJobTests, self).setUp()
self.collection_rights_model_swap = self.swap(
collection_models,
'CollectionRightsModel',
MockCollectionRightsModel)
self.exploration_rights_model_swap = self.swap(
exp_models, 'ExplorationRightsModel', MockExplorationRightsModel)
self.topic_rights_model_swap = self.swap(
topic_models, 'TopicRightsModel', MockTopicRightsModel)
self.signup(self.EMAIL_1, self.USERNAME_1)
self.signup(self.EMAIL_2, self.USERNAME_2)
self.USER_1_ID = self.get_user_id_from_email(self.EMAIL_1)
self.USER_2_ID = self.get_user_id_from_email(self.EMAIL_2)
self.USER_GAE_1_ID = self.get_gae_id_from_email(self.EMAIL_1)
self.USER_GAE_2_ID = self.get_gae_id_from_email(self.EMAIL_2)
def test_add_commit_cmds_user_ids_to_collection_rights_snapshot(self):
with self.collection_rights_model_swap:
collection_model = collection_models.CollectionRightsModel(
id=self.COL_1_ID,
owner_ids=[self.USER_1_ID],
editor_ids=[self.USER_2_ID],
voice_artist_ids=[],
viewer_ids=[],
community_owned=False,
status=constants.ACTIVITY_STATUS_PUBLIC,
viewable_if_private=False,
first_published_msec=0.0
)
collection_model.save(
'cid',
'Created new collection rights',
[{'cmd': rights_domain.CMD_CREATE_NEW}])
collection_model.owner_ids = [self.USER_3_ID]
collection_model.save(
'cid',
'Change owner',
[{
'cmd': rights_domain.CMD_CHANGE_ROLE,
'assignee_id': self.USER_3_ID,
'old_role': rights_domain.ROLE_NONE,
'new_role': rights_domain.ROLE_OWNER
}])
output = self._run_one_off_job()
self.assertEqual(
output, [['SUCCESS-CollectionRightsSnapshotMetadataModel', 2]])
self.assertItemsEqual(
[],
collection_models.CollectionRightsSnapshotMetadataModel
.get_by_id('%s-1' % self.COL_1_ID).commit_cmds_user_ids)
self.assertItemsEqual(
[self.USER_3_ID],
collection_models.CollectionRightsSnapshotMetadataModel
.get_by_id('%s-2' % self.COL_1_ID).commit_cmds_user_ids)
def test_add_commit_cmds_user_ids_to_exploration_rights_snapshot(self):
with self.exploration_rights_model_swap:
exp_model = exp_models.ExplorationRightsModel(
id=self.EXP_1_ID,
owner_ids=[self.USER_1_ID, self.USER_2_ID],
editor_ids=[self.USER_2_ID],
voice_artist_ids=[],
viewer_ids=[],
community_owned=False,
status=constants.ACTIVITY_STATUS_PUBLIC,
viewable_if_private=False,
first_published_msec=0.0)
exp_model.save(
'cid', 'Created new exploration rights',
[{'cmd': rights_domain.CMD_CREATE_NEW}])
exp_model.owner_ids = [self.USER_3_ID]
exp_model.save(
'cid',
'Change owner',
[{
'cmd': rights_domain.CMD_CHANGE_ROLE,
'assignee_id': self.USER_3_ID,
'old_role': rights_domain.ROLE_NONE,
'new_role': rights_domain.ROLE_OWNER
}])
output = self._run_one_off_job()
self.assertEqual(
output, [['SUCCESS-ExplorationRightsSnapshotMetadataModel', 2]])
self.assertItemsEqual(
[],
exp_models.ExplorationRightsSnapshotMetadataModel
.get_by_id('%s-1' % self.EXP_1_ID).commit_cmds_user_ids)
self.assertItemsEqual(
[self.USER_3_ID],
exp_models.ExplorationRightsSnapshotMetadataModel
.get_by_id('%s-2' % self.EXP_1_ID).commit_cmds_user_ids)
def test_fix_user_ids_in_exploration_rights_snapshot(self):
with self.exploration_rights_model_swap:
exp_model = exp_models.ExplorationRightsModel(
id=self.EXP_1_ID,
owner_ids=[self.USER_3_ID],
editor_ids=[self.USER_2_ID],
voice_artist_ids=[],
viewer_ids=[],
community_owned=False,
status=constants.ACTIVITY_STATUS_PUBLIC,
viewable_if_private=False,
first_published_msec=0.0)
exp_model.save(
'cid', 'Created new exploration rights',
[{'cmd': rights_domain.CMD_CREATE_NEW}])
exp_model.owner_ids = [
self.USER_1_ID, self.USER_2_ID, self.USER_3_ID]
exp_model.save(
'cid',
'Change owner',
[
{
'cmd': rights_domain.CMD_CHANGE_ROLE,
'assignee_id': self.USER_GAE_1_ID,
'old_role': rights_domain.ROLE_NONE,
'new_role': rights_domain.ROLE_OWNER
},
{
'cmd': rights_domain.CMD_CHANGE_ROLE,
'assignee_id': self.USER_GAE_2_ID,
'old_role': rights_domain.ROLE_EDITOR,
'new_role': rights_domain.ROLE_OWNER
}
])
output = self._run_one_off_job()
self.assertItemsEqual(
output,
[
['SUCCESS-ExplorationRightsSnapshotMetadataModel', 2],
['MIGRATION_SUCCESS', 1]
]
)
self.assertItemsEqual(
[
{
'cmd': rights_domain.CMD_CHANGE_ROLE,
'assignee_id': self.USER_1_ID,
'old_role': rights_domain.ROLE_NONE,
'new_role': rights_domain.ROLE_OWNER
},
{
'cmd': rights_domain.CMD_CHANGE_ROLE,
'assignee_id': self.USER_2_ID,
'old_role': rights_domain.ROLE_EDITOR,
'new_role': rights_domain.ROLE_OWNER
}
],
exp_models.ExplorationRightsSnapshotMetadataModel
.get_by_id('%s-2' % self.EXP_1_ID).commit_cmds
)
self.assertItemsEqual(
[
{
'cmd': rights_domain.CMD_CHANGE_ROLE,
'assignee_id': self.USER_1_ID,
'old_role': rights_domain.ROLE_NONE,
'new_role': rights_domain.ROLE_OWNER
},
{
'cmd': rights_domain.CMD_CHANGE_ROLE,
'assignee_id': self.USER_2_ID,
'old_role': rights_domain.ROLE_EDITOR,
'new_role': rights_domain.ROLE_OWNER
}
],
exp_models.ExplorationCommitLogEntryModel
.get_by_id('rights-%s-2' % self.EXP_1_ID).commit_cmds
)
def test_fix_user_ids_in_exploration_rights_snapshot_with_missing_commit(
self):
with self.exploration_rights_model_swap:
exp_model = exp_models.ExplorationRightsModel(
id=self.EXP_1_ID,
owner_ids=[self.USER_3_ID],
editor_ids=[self.USER_2_ID],
voice_artist_ids=[],
viewer_ids=[],
community_owned=False,
status=constants.ACTIVITY_STATUS_PUBLIC,
viewable_if_private=False,
first_published_msec=0.0)
exp_model.save(
'cid', 'Created new exploration rights',
[{'cmd': rights_domain.CMD_CREATE_NEW}])
exp_model.owner_ids = [
self.USER_1_ID, self.USER_2_ID, self.USER_3_ID]
exp_model.save(
'cid',
'Change owner',
[
{
'cmd': rights_domain.CMD_CHANGE_ROLE,
'assignee_id': self.USER_GAE_1_ID,
'old_role': rights_domain.ROLE_NONE,
'new_role': rights_domain.ROLE_OWNER
},
{
'cmd': rights_domain.CMD_CHANGE_ROLE,
'assignee_id': self.USER_GAE_2_ID,
'old_role': rights_domain.ROLE_EDITOR,
'new_role': rights_domain.ROLE_OWNER
}
])
exp_models.ExplorationCommitLogEntryModel.get_by_id(
'rights-%s-2' % self.EXP_1_ID
).delete()
output = self._run_one_off_job()
self.assertItemsEqual(
output,
[
['SUCCESS-ExplorationRightsSnapshotMetadataModel', 2],
[
'MIGRATION_SUCCESS_MISSING_COMMIT_LOG',
['%s-2' % self.EXP_1_ID]
]
]
)
self.assertItemsEqual(
[
{
'cmd': rights_domain.CMD_CHANGE_ROLE,
'assignee_id': self.USER_1_ID,
'old_role': rights_domain.ROLE_NONE,
'new_role': rights_domain.ROLE_OWNER
},
{
'cmd': rights_domain.CMD_CHANGE_ROLE,
'assignee_id': self.USER_2_ID,
'old_role': rights_domain.ROLE_EDITOR,
'new_role': rights_domain.ROLE_OWNER
}
],
exp_models.ExplorationRightsSnapshotMetadataModel
.get_by_id('%s-2' % self.EXP_1_ID).commit_cmds
)
def test_fix_user_ids_in_exploration_rights_snapshot_with_missing_user(
self):
with self.exploration_rights_model_swap:
exp_model = exp_models.ExplorationRightsModel(
id=self.EXP_1_ID,
owner_ids=[self.USER_3_ID],
editor_ids=[self.USER_2_ID],
voice_artist_ids=[],
viewer_ids=[],
community_owned=False,
status=constants.ACTIVITY_STATUS_PUBLIC,
viewable_if_private=False,
first_published_msec=0.0)
exp_model.save(
'cid', 'Created new exploration rights',
[{'cmd': rights_domain.CMD_CREATE_NEW}])
exp_model.owner_ids = [
self.USER_1_ID, self.USER_2_ID, self.USER_3_ID]
exp_model.save(
'cid',
'Change owner',
[
{
'cmd': rights_domain.CMD_CHANGE_ROLE,
'assignee_id': self.USER_GAE_1_ID,
'old_role': rights_domain.ROLE_NONE,
'new_role': rights_domain.ROLE_OWNER
},
{
'cmd': rights_domain.CMD_CHANGE_ROLE,
'assignee_id': self.USER_GAE_3_ID,
'old_role': rights_domain.ROLE_EDITOR,
'new_role': rights_domain.ROLE_OWNER
}
])
output = self._run_one_off_job()
self.assertItemsEqual(
output,
[
['SUCCESS-ExplorationRightsSnapshotMetadataModel', 2],
['MIGRATION_FAILURE', ['(\'exp_1_id-2\', u\'user_gae_3_id\')']],
]
)
self.assertItemsEqual(
[
{
'cmd': rights_domain.CMD_CHANGE_ROLE,
'assignee_id': self.USER_GAE_1_ID,
'old_role': rights_domain.ROLE_NONE,
'new_role': rights_domain.ROLE_OWNER
},
{
'cmd': rights_domain.CMD_CHANGE_ROLE,
'assignee_id': self.USER_GAE_3_ID,
'old_role': rights_domain.ROLE_EDITOR,
'new_role': rights_domain.ROLE_OWNER
}
],
exp_models.ExplorationRightsSnapshotMetadataModel
.get_by_id('%s-2' % self.EXP_1_ID).commit_cmds
)
self.assertItemsEqual(
[
{
'cmd': rights_domain.CMD_CHANGE_ROLE,
'assignee_id': self.USER_GAE_1_ID,
'old_role': rights_domain.ROLE_NONE,
'new_role': rights_domain.ROLE_OWNER
},
{
'cmd': rights_domain.CMD_CHANGE_ROLE,
'assignee_id': self.USER_GAE_3_ID,
'old_role': rights_domain.ROLE_EDITOR,
'new_role': rights_domain.ROLE_OWNER
}
],
exp_models.ExplorationCommitLogEntryModel
.get_by_id('rights-%s-2' % self.EXP_1_ID).commit_cmds
)
def test_add_commit_cmds_user_ids_to_topic_rights_snapshot(self):
with self.topic_rights_model_swap:
topic_model = topic_models.TopicRightsModel(
id=self.TOP_1_ID,
manager_ids=[self.USER_1_ID])
topic_model.commit(
'cid',
'Created new topic rights',
[{'cmd': rights_domain.CMD_CREATE_NEW}])
topic_model.manager_ids = [self.USER_1_ID, self.USER_3_ID]
topic_model.commit(
'cid',
'Add manager',
[{
'cmd': topic_domain.CMD_CHANGE_ROLE,
'assignee_id': self.USER_3_ID,
'old_role': topic_domain.ROLE_NONE,
'new_role': topic_domain.ROLE_MANAGER
}])
topic_model.manager_ids = [self.USER_3_ID]
topic_model.commit(
'cid',
'Remove manager',
[{
'cmd': topic_domain.CMD_REMOVE_MANAGER_ROLE,
'removed_user_id': self.USER_1_ID,
}])
output = self._run_one_off_job()
self.assertEqual(
output, [['SUCCESS-TopicRightsSnapshotMetadataModel', 3]])
self.assertItemsEqual(
[],
topic_models.TopicRightsSnapshotMetadataModel
.get_by_id('%s-1' % self.TOP_1_ID).commit_cmds_user_ids)
self.assertItemsEqual(
[self.USER_3_ID],
topic_models.TopicRightsSnapshotMetadataModel
.get_by_id('%s-2' % self.TOP_1_ID).commit_cmds_user_ids)
self.assertItemsEqual(
[self.USER_1_ID],
topic_models.TopicRightsSnapshotMetadataModel
.get_by_id('%s-3' % self.TOP_1_ID).commit_cmds_user_ids)
def test_add_commit_cmds_user_ids_to_multiple_rights_snapshots(self):
with self.collection_rights_model_swap:
collection_model = collection_models.CollectionRightsModel(
id=self.COL_1_ID,
owner_ids=[],
editor_ids=[self.USER_1_ID],
voice_artist_ids=[],
viewer_ids=[],
community_owned=False,
status=constants.ACTIVITY_STATUS_PUBLIC,
viewable_if_private=False,
first_published_msec=0.0
)
collection_model.save(
'cid',
'Created new collection rights',
[{'cmd': rights_domain.CMD_CREATE_NEW}])
collection_model.editor_ids = [self.USER_1_ID, self.USER_4_ID]
collection_model.save(
'cid',
'Add editor',
[{
'cmd': rights_domain.CMD_CHANGE_ROLE,
'assignee_id': self.USER_4_ID,
'old_role': rights_domain.ROLE_NONE,
'new_role': rights_domain.ROLE_EDITOR
}])
with self.exploration_rights_model_swap:
exp_model = exp_models.ExplorationRightsModel(
id=self.EXP_1_ID,
owner_ids=[self.USER_1_ID, self.USER_2_ID],
editor_ids=[],
voice_artist_ids=[],
viewer_ids=[self.USER_4_ID],
community_owned=False,
status=constants.ACTIVITY_STATUS_PUBLIC,
viewable_if_private=False,
first_published_msec=0.0)
exp_model.save(
'cid', 'Created new exploration rights',
[{'cmd': rights_domain.CMD_CREATE_NEW}])
exp_model.owner_ids = [
self.USER_1_ID, self.USER_2_ID, self.USER_3_ID]
exp_model.save(
'cid',
'Add owner',
[{
'cmd': rights_domain.CMD_CHANGE_ROLE,
'assignee_id': self.USER_3_ID,
'old_role': rights_domain.ROLE_NONE,
'new_role': rights_domain.ROLE_OWNER
}])
with self.topic_rights_model_swap:
topic_model_1 = topic_models.TopicRightsModel(
id=self.TOP_1_ID,
manager_ids=[self.USER_1_ID, self.USER_2_ID])
topic_model_1.commit(
'cid',
'Created new topic rights',
[{'cmd': rights_domain.CMD_CREATE_NEW}])
topic_model_1.manager_ids = [
self.USER_1_ID, self.USER_2_ID, self.USER_3_ID]
topic_model_1.commit(
'cid',
'Add manager',
[{
'cmd': topic_domain.CMD_CHANGE_ROLE,
'assignee_id': self.USER_3_ID,
'old_role': topic_domain.ROLE_NONE,
'new_role': topic_domain.ROLE_MANAGER
}])
topic_model_2 = topic_models.TopicRightsModel(
id=self.TOP_2_ID,
manager_ids=[self.USER_1_ID, self.USER_4_ID])
topic_model_2.commit(
'cid', 'Created new topic rights',
[{'cmd': rights_domain.CMD_CREATE_NEW}])
topic_model_2.manager_ids = [self.USER_4_ID]
topic_model_2.commit(
'cid', 'Remove manager',
[{
'cmd': topic_domain.CMD_REMOVE_MANAGER_ROLE,
'removed_user_id': self.USER_1_ID,
}])
output = self._run_one_off_job()
self.assertItemsEqual(
output,
[
['SUCCESS-CollectionRightsSnapshotMetadataModel', 2],
['SUCCESS-ExplorationRightsSnapshotMetadataModel', 2],
['SUCCESS-TopicRightsSnapshotMetadataModel', 4]
]
)
self.assertItemsEqual(
[],
collection_models.CollectionRightsSnapshotMetadataModel
.get_by_id('%s-1' % self.COL_1_ID).commit_cmds_user_ids)
self.assertItemsEqual(
[self.USER_4_ID],
collection_models.CollectionRightsSnapshotMetadataModel
.get_by_id('%s-2' % self.COL_1_ID).commit_cmds_user_ids)
self.assertItemsEqual(
[],
exp_models.ExplorationRightsSnapshotMetadataModel
.get_by_id('%s-1' % self.EXP_1_ID).commit_cmds_user_ids)
self.assertItemsEqual(
[self.USER_3_ID],
exp_models.ExplorationRightsSnapshotMetadataModel
.get_by_id('%s-2' % self.EXP_1_ID).commit_cmds_user_ids)
self.assertItemsEqual(
[],
topic_models.TopicRightsSnapshotMetadataModel
.get_by_id('%s-1' % self.TOP_1_ID).commit_cmds_user_ids)
self.assertItemsEqual(
[self.USER_3_ID],
topic_models.TopicRightsSnapshotMetadataModel
.get_by_id('%s-2' % self.TOP_1_ID).commit_cmds_user_ids)
self.assertItemsEqual(
[],
topic_models.TopicRightsSnapshotMetadataModel
.get_by_id('%s-1' % self.TOP_2_ID).commit_cmds_user_ids)
self.assertItemsEqual(
[self.USER_1_ID],
topic_models.TopicRightsSnapshotMetadataModel
.get_by_id('%s-2' % self.TOP_2_ID).commit_cmds_user_ids)
class AuditSnapshotMetadataModelsJobTests(test_utils.GenericTestBase):
COL_1_ID = 'col_1_id'
EXP_1_ID = 'exp_1_id'
TOP_1_ID = 'top_1_id'
USER_1_ID = 'user_1_id'
def _run_one_off_job(self):
"""Runs the one-off MapReduce job."""
job_id = (
activity_jobs_one_off.AuditSnapshotMetadataModelsJob.create_new())
activity_jobs_one_off.AuditSnapshotMetadataModelsJob.enqueue(job_id)
self.assertEqual(
self.count_jobs_in_mapreduce_taskqueue(
taskqueue_services.QUEUE_NAME_ONE_OFF_JOBS), 1)
self.process_and_flush_pending_mapreduce_tasks()
stringified_output = (
activity_jobs_one_off.AuditSnapshotMetadataModelsJob.get_output(
job_id))
eval_output = [ast.literal_eval(stringified_item) for
stringified_item in stringified_output]
return [
[key, sorted(values) if isinstance(values, list) else values]
for key, values in eval_output]
def test_audit_collection_rights_snapshot(self):
collection_models.CollectionRightsSnapshotMetadataModel(
id='%s-1' % self.COL_1_ID,
committer_id=self.USER_1_ID,
commit_type='edit',
commit_cmds=[
{
'cmd': 'some_command',
'other_field': 'test'
}, {
'cmd': 'some_other_command',
'other_field': 'test',
'different_field': 'test'
}
]
).put()
output = self._run_one_off_job()
self.assertItemsEqual(
output,
[
['collection-some_command-length-2', 1],
['collection-cmd-some_command', 1],
['collection-cmd-some_other_command', 1],
['collection-some_command-field-other_field', 1],
['collection-some_other_command-field-other_field', 1],
['collection-some_other_command-field-different_field', 1],
]
)
def test_audit_deleted_collection_rights_snapshot(self):
collection_models.CollectionRightsSnapshotMetadataModel(
id='%s-1' % self.COL_1_ID,
committer_id=self.USER_1_ID,
commit_type='edit',
commit_cmds=[
{
'cmd': 'some_command',
'other_field': 'test'
}, {
'cmd': 'some_other_command',
'other_field': 'test',
'different_field': 'test'
}
],
deleted=True
).put()
output = self._run_one_off_job()
self.assertItemsEqual(output, [['collection-deleted', 1]])
def test_audit_collection_rights_snapshot_with_missing_cmd(self):
collection_models.CollectionRightsSnapshotMetadataModel(
id='%s-1' % self.COL_1_ID,
committer_id=self.USER_1_ID,
commit_type='edit',
commit_cmds=[
{
'other_field': 'test',
'different_field': 'test'
}
]
).put()
output = self._run_one_off_job()
self.assertItemsEqual(
output,
[
['collection-missing_cmd-length-1', 1],
['collection-missing-cmd', 1],
['collection-missing_cmd-field-other_field', 1],
['collection-missing_cmd-field-different_field', 1],
]
)
def test_audit_exploration_rights_snapshot_with_empty_commit_cmds(self):
exp_models.ExplorationRightsSnapshotMetadataModel(
id='%s-1' % self.EXP_1_ID,
committer_id=self.USER_1_ID,
commit_type='edit',
commit_cmds=[]
).put()
output = self._run_one_off_job()
self.assertItemsEqual(output, [['exploration-length-0', 1]])
def test_audit_topic_rights_snapshot(self):
topic_models.TopicRightsSnapshotMetadataModel(
id='%s-1' % self.TOP_1_ID,
committer_id=self.USER_1_ID,
commit_type='edit',
commit_cmds=[
{
'cmd': 'some_command',
'other_field': 'test'
}, {
'cmd': 'some_other_command',
'other_field': 'test',
'different_field': 'test'
}
]
).put()
output = self._run_one_off_job()
self.assertItemsEqual(
output,
[
['topic-some_command-length-2', 1],
['topic-cmd-some_command', 1],
['topic-cmd-some_other_command', 1],
['topic-some_command-field-other_field', 1],
['topic-some_other_command-field-other_field', 1],
['topic-some_other_command-field-different_field', 1],
]
)
def test_audit_multiple_rights_snapshots(self):
collection_models.CollectionRightsSnapshotMetadataModel(
id='%s-1' % self.COL_1_ID,
committer_id=self.USER_1_ID,
commit_type='edit',
commit_cmds=[
{
'cmd': 'some_command',
'other_field': 'test'
}, {
'cmd': 'some_other_command',
'other_field': 'test',
'different_field': 'test'
}
]
).put()
exp_models.ExplorationRightsSnapshotMetadataModel(
id='%s-1' % self.EXP_1_ID,
committer_id=self.USER_1_ID,
commit_type='edit',
commit_cmds=[
{
'cmd': 'some_command',
'other_field': 'test'
}
]
).put()
exp_models.ExplorationRightsSnapshotMetadataModel(
id='%s-2' % self.EXP_1_ID,
committer_id=self.USER_1_ID,
commit_type='edit',
commit_cmds=[
{
'cmd': 'some_command',
'other_field': 'test'
}
]
).put()
topic_models.TopicRightsSnapshotMetadataModel(
id='%s-1' % self.TOP_1_ID,
committer_id=self.USER_1_ID,
commit_type='edit',
commit_cmds=[]
).put()
output = self._run_one_off_job()
self.assertItemsEqual(
output,
[
['collection-some_command-length-2', 1],
['collection-cmd-some_command', 1],
['collection-cmd-some_other_command', 1],
['collection-some_command-field-other_field', 1],
['collection-some_other_command-field-other_field', 1],
['collection-some_other_command-field-different_field', 1],
['exploration-some_command-length-1', 2],
['exploration-cmd-some_command', 2],
['exploration-some_command-field-other_field', 2],
['topic-length-0', 1]
]
)
class ValidateSnapshotMetadataModelsJobTests(test_utils.GenericTestBase):
ALBERT_EMAIL = '[email protected]'
ALBERT_NAME = 'albert'
EXP_ID = 'exp_id0'
COLLECTION_ID = 'collection_id0'
QUESTION_ID = 'question_id0'
SKILL_ID = 'skill_id0'
STORY_ID = 'story_id0'
TOPIC_ID = 'topic_id0'
# The subtopic snapshot ID is in the format
# '<topicId>-<subtopicNum>-<version>'.
SUBTOPIC_ID = 'topic_id0-1'
TOPIC_RIGHTS_ID = 'topic_rights_id0'
DUMMY_COMMIT_CMDS = [
{
'cmd': 'some_command',
'other_field': 'test'
}, {
'cmd': 'some_other_command',
'other_field': 'test',
'different_field': 'test'
}
]
# A commit log entry model is not being created by the commit or
# the create function of the ConfigPropertyModel and
# the PlatformParameterModel. So, these models are excluded.
EXCLUDED_CLASS_NAMES = [
'ConfigPropertySnapshotMetadataModel',
'PlatformParameterSnapshotMetadataModel'
]
def setUp(self):
super(ValidateSnapshotMetadataModelsJobTests, self).setUp()
# Setup user who will own the test explorations.
self.signup(self.ALBERT_EMAIL, self.ALBERT_NAME)
self.albert_id = self.get_user_id_from_email(self.ALBERT_EMAIL)
self.process_and_flush_pending_tasks()
def _run_one_off_job(self):
"""Runs the one-off MapReduce job."""
job_class = activity_jobs_one_off.ValidateSnapshotMetadataModelsJob
job_id = job_class.create_new()
activity_jobs_one_off.ValidateSnapshotMetadataModelsJob.enqueue(job_id)
self.assertEqual(
self.count_jobs_in_mapreduce_taskqueue(
taskqueue_services.QUEUE_NAME_ONE_OFF_JOBS), 1)
self.process_and_flush_pending_mapreduce_tasks()
stringified_output = (
activity_jobs_one_off.ValidateSnapshotMetadataModelsJob
.get_output(job_id))
eval_output = [
ast.literal_eval(stringified_item) for
stringified_item in stringified_output]
return eval_output
def test_validate_snapshot_model_list(self):
job_class = activity_jobs_one_off.ValidateSnapshotMetadataModelsJob
actual_class_names = [
cls.__name__ for cls in job_class.SNAPSHOT_METADATA_MODELS]
class_names = [
cls.__name__ for
cls in base_models.BaseSnapshotMetadataModel.__subclasses__()]
expected_class_names = [
i for i in class_names if i not in self.EXCLUDED_CLASS_NAMES]
self.assertItemsEqual(expected_class_names, actual_class_names)
def test_correct_collection_models(self):
rights_manager.create_new_collection_rights(
self.COLLECTION_ID, self.albert_id)
collection_model = collection_models.CollectionModel(
id=self.COLLECTION_ID,
category='category',
title='title',
objective='objective',
collection_contents={
'nodes': {}
},
)
collection_model.commit(
self.albert_id, 'collection model created', self.DUMMY_COMMIT_CMDS)
actual_output = self._run_one_off_job()
expected_output = [
['FOUND PARENT MODEL - CollectionRightsSnapshotMetadataModel', 1],
['FOUND COMMIT LOGS - CollectionSnapshotMetadataModel', 1],
['FOUND PARENT MODEL - CollectionSnapshotMetadataModel', 1],
[
'COMMIT LOGS SHOULD NOT EXIST AND DOES NOT EXIST - ' +
'CollectionRightsSnapshotMetadataModel',
1
]
]
self.assertItemsEqual(expected_output, actual_output)
def test_correct_exp_models(self):
rights_manager.create_new_exploration_rights(
self.EXP_ID, self.albert_id)
exp_model = exp_models.ExplorationModel(
id=self.EXP_ID,
title='title',
category='category',
states_schema_version=1,
init_state_name='init_state_name'
)
exp_model.commit(
self.albert_id, 'exp model created', self.DUMMY_COMMIT_CMDS)
actual_output = self._run_one_off_job()
expected_output = [
[
'COMMIT LOGS SHOULD NOT EXIST AND DOES NOT EXIST - ' +
'ExplorationRightsSnapshotMetadataModel',
1
],
['FOUND COMMIT LOGS - ExplorationSnapshotMetadataModel', 1],
['FOUND PARENT MODEL - ExplorationSnapshotMetadataModel', 1],
['FOUND PARENT MODEL - ExplorationRightsSnapshotMetadataModel', 1]
]
self.assertItemsEqual(expected_output, actual_output)
def test_correct_question_models(self):
state = state_domain.State.create_default_state('ABC')
question_state_data = state.to_dict()
question_model = question_models.QuestionModel(
id=self.QUESTION_ID,
question_state_data=question_state_data,
question_state_data_schema_version=1,
language_code='en'
)
question_model.commit(
self.albert_id, 'question model created', self.DUMMY_COMMIT_CMDS)
actual_output = self._run_one_off_job()
expected_output = [
['FOUND PARENT MODEL - QuestionSnapshotMetadataModel', 1],
['FOUND COMMIT LOGS - QuestionSnapshotMetadataModel', 1]
]
self.assertItemsEqual(expected_output, actual_output)
def test_correct_skill_models(self):
skill_model = skill_models.SkillModel(
id=self.SKILL_ID,
description='description',
language_code='en',
misconceptions=[],
rubrics=[],
next_misconception_id=0,
misconceptions_schema_version=1,
rubric_schema_version=1,
skill_contents_schema_version=1,
all_questions_merged=False
)
skill_model.commit(
self.albert_id, 'skill model created', self.DUMMY_COMMIT_CMDS)
actual_output = self._run_one_off_job()
expected_output = [
['FOUND PARENT MODEL - SkillSnapshotMetadataModel', 1],
['FOUND COMMIT LOGS - SkillSnapshotMetadataModel', 1]
]
self.assertItemsEqual(expected_output, actual_output)
def test_correct_story_models(self):
story_model = story_models.StoryModel(
id=self.STORY_ID,
title='title',
description='Story description',
language_code='en',
story_contents_schema_version=1,
corresponding_topic_id=self.TOPIC_ID,
url_fragment='story'
)
story_model.commit(
self.albert_id, 'story model created', self.DUMMY_COMMIT_CMDS)
actual_output = self._run_one_off_job()
expected_output = [
['FOUND PARENT MODEL - StorySnapshotMetadataModel', 1],
['FOUND COMMIT LOGS - StorySnapshotMetadataModel', 1]
]
self.assertItemsEqual(expected_output, actual_output)
def test_correct_topic_models(self):
topic_rights = topic_models.TopicRightsModel(
id=self.TOPIC_ID,
manager_ids=[],
topic_is_published=True
)
topic_rights.commit(
self.albert_id, 'topic rights model created',
[{'cmd': 'create_new'}])
topic_model = topic_models.TopicModel(
id=self.TOPIC_ID,
name='name',
url_fragment='name-two',
canonical_name='canonical_name',
next_subtopic_id=1,
language_code='en',
subtopic_schema_version=0,
story_reference_schema_version=0
)
topic_model.commit(
self.albert_id, 'topic model created', self.DUMMY_COMMIT_CMDS)
actual_output = self._run_one_off_job()
expected_output = [
['FOUND PARENT MODEL - TopicRightsSnapshotMetadataModel', 1],
['FOUND COMMIT LOGS - TopicSnapshotMetadataModel', 1],
['FOUND PARENT MODEL - TopicSnapshotMetadataModel', 1],
['FOUND COMMIT LOGS - TopicRightsSnapshotMetadataModel', 1]
]
self.assertItemsEqual(expected_output, actual_output)
def test_correct_subtopic_models(self):
subtopic_page = subtopic_models.SubtopicPageModel(
id=self.SUBTOPIC_ID,
topic_id=self.TOPIC_ID,
page_contents={},
page_contents_schema_version=1,
language_code='en'
)
subtopic_page.commit(
self.albert_id, 'subtopic model created', self.DUMMY_COMMIT_CMDS)
actual_output = self._run_one_off_job()
expected_output = [
['FOUND COMMIT LOGS - SubtopicPageSnapshotMetadataModel', 1],
['FOUND PARENT MODEL - SubtopicPageSnapshotMetadataModel', 1]
]
self.assertItemsEqual(expected_output, actual_output)
def test_missing_collection_commit_logs(self):
collection_models.CollectionSnapshotMetadataModel(
id='%s-1' % self.COLLECTION_ID,
committer_id=self.albert_id,
commit_type='edit',
commit_cmds=self.DUMMY_COMMIT_CMDS
).put()
actual_output = self._run_one_off_job()
expected_output = [
[
'VALIDATION FAILURE - MISSING PARENT MODEL' +
' - CollectionSnapshotMetadataModel',
['collection_id0-1']
],
[
'VALIDATION FAILURE - MISSING COMMIT LOGS' +
' - CollectionSnapshotMetadataModel',
['collection_id0-1']
]
]
self.assertItemsEqual(expected_output, actual_output)
def test_missing_exp_commit_logs(self):
exp_models.ExplorationRightsSnapshotMetadataModel(
id='%s-1' % self.EXP_ID,
committer_id=self.albert_id,
commit_type='edit',
commit_cmds=self.DUMMY_COMMIT_CMDS
).put()
actual_output = self._run_one_off_job()
expected_output = [
[
'VALIDATION FAILURE - MISSING PARENT MODEL' +
' - ExplorationRightsSnapshotMetadataModel',
['exp_id0-1']
],
[
'VALIDATION FAILURE - MISSING COMMIT LOGS' +
' - ExplorationRightsSnapshotMetadataModel',
['exp_id0-1']
]
]
self.assertItemsEqual(expected_output, actual_output)
def test_missing_question_commit_logs(self):
question_models.QuestionSnapshotMetadataModel(
id='%s-1' % self.QUESTION_ID,
committer_id=self.albert_id,
commit_type='edit',
commit_cmds=self.DUMMY_COMMIT_CMDS
).put()
actual_output = self._run_one_off_job()
expected_output = [
[
'VALIDATION FAILURE - MISSING PARENT MODEL' +
' - QuestionSnapshotMetadataModel',
['question_id0-1']
],
[
'VALIDATION FAILURE - MISSING COMMIT LOGS' +
' - QuestionSnapshotMetadataModel',
['question_id0-1']
]
]
self.assertItemsEqual(expected_output, actual_output)
def test_missing_skill_commit_logs(self):
skill_models.SkillSnapshotMetadataModel(
id='%s-1' % self.SKILL_ID,
committer_id=self.albert_id,
commit_type='edit',
commit_cmds=self.DUMMY_COMMIT_CMDS
).put()
actual_output = self._run_one_off_job()
expected_output = [
[
'VALIDATION FAILURE - MISSING PARENT MODEL' +
' - SkillSnapshotMetadataModel',
['skill_id0-1']
],
[
'VALIDATION FAILURE - MISSING COMMIT LOGS' +
' - SkillSnapshotMetadataModel',
['skill_id0-1']
]
]
self.assertItemsEqual(expected_output, actual_output)
def test_missing_story_commit_logs(self):
story_models.StorySnapshotMetadataModel(
id='%s-1' % self.STORY_ID,
committer_id=self.albert_id,
commit_type='edit',
commit_cmds=self.DUMMY_COMMIT_CMDS
).put()
actual_output = self._run_one_off_job()
expected_output = [
[
'VALIDATION FAILURE - MISSING PARENT MODEL' +
' - StorySnapshotMetadataModel',
['story_id0-1']
],
[
'VALIDATION FAILURE - MISSING COMMIT LOGS' +
' - StorySnapshotMetadataModel',
['story_id0-1']
]
]
self.assertItemsEqual(expected_output, actual_output)
def test_missing_topic_commit_logs(self):
topic_models.TopicSnapshotMetadataModel(
id='%s-1' % self.TOPIC_ID,
committer_id=self.albert_id,
commit_type='edit',
commit_cmds=self.DUMMY_COMMIT_CMDS
).put()
actual_output = self._run_one_off_job()
expected_output = [
[
'VALIDATION FAILURE - MISSING PARENT MODEL' +
' - TopicSnapshotMetadataModel',
['topic_id0-1']
],
[
'VALIDATION FAILURE - MISSING COMMIT LOGS' +
' - TopicSnapshotMetadataModel',
['topic_id0-1']
]
]
self.assertItemsEqual(expected_output, actual_output)
def test_missing_subtopic_commit_logs(self):
subtopic_models.SubtopicPageSnapshotMetadataModel(
id='%s-1' % self.SUBTOPIC_ID,
committer_id=self.albert_id,
commit_type='edit',
commit_cmds=self.DUMMY_COMMIT_CMDS
).put()
actual_output = self._run_one_off_job()
expected_output = [
[
'VALIDATION FAILURE - MISSING PARENT MODEL' +
' - SubtopicPageSnapshotMetadataModel',
['topic_id0-1-1']
],
[
'VALIDATION FAILURE - MISSING COMMIT LOGS' +
' - SubtopicPageSnapshotMetadataModel',
['topic_id0-1-1']
]
]
self.assertItemsEqual(expected_output, actual_output)
def test_missing_topic_rights_commit_logs(self):
topic_models.TopicRightsSnapshotMetadataModel(
id='%s-1' % self.TOPIC_RIGHTS_ID,
committer_id=self.albert_id,
commit_type='edit',
commit_cmds=self.DUMMY_COMMIT_CMDS
).put()
actual_output = self._run_one_off_job()
expected_output = [
[
'VALIDATION FAILURE - MISSING PARENT MODEL' +
' - TopicRightsSnapshotMetadataModel',
['topic_rights_id0-1']
],
[
'VALIDATION FAILURE - MISSING COMMIT LOGS' +
' - TopicRightsSnapshotMetadataModel',
['topic_rights_id0-1']
]
]
self.assertItemsEqual(expected_output, actual_output)
| 38.890654 | 87 | 0.582054 | [
"Apache-2.0"
] | AnanyaNegi/oppia | core/domain/activity_jobs_one_off_test.py | 83,226 | Python |
import tensorflow as tf
import numpy as np
import hyperchamber as hc
import inspect
from hypergan.trainers.base_trainer import BaseTrainer
TINY = 1e-12
class EvolutionTrainer(BaseTrainer):
def _create(self):
gan = self.gan
generator = self.gan.generator
config = self.config
d_vars = self.d_vars or gan.discriminator.variables()
loss = self.loss or gan.loss
d_loss, g_loss = loss.sample
self.d_log = -tf.log(tf.abs(d_loss+TINY))
d_optimizer = self.build_optimizer(config, 'd_', config.d_trainer, self.d_lr, d_vars, d_loss)
#TODO more than one g_loss
g_optimizer = [self.build_optimizer(config, 'g_', config.g_trainer, self.g_lr, g.variables(), g_loss) for g, l in zip(generator.children, loss.children_losses)]
assign_children = []
for p, o in generator.parent_child_tuples:
for ov, pv in zip(o.variables(), p.variables()):
op=tf.assign(ov, pv)
if config.mutation_percent:
op += tf.random_normal(self.gan.ops.shape(pv), mean=0, stddev=0.01) * tf.cast(tf.greater(config.mutation_percent, tf.random_uniform(shape=self.gan.ops.shape(pv), minval=0, maxval=1)), tf.float32)
assign_children.append(op)
self.clone_parent = tf.group(*assign_children)
update_parent=[]
for p, o in generator.parent_child_tuples:
c_to_p = []
for ov, pv in zip(o.variables(), p.variables()):
op=tf.assign(pv, ov)
c_to_p.append(op)
update_parent.append(tf.group(*c_to_p))
self.update_parent = update_parent
f_lambda = config.f_lambda or 1
def _squash(grads):
return tf.add_n([tf.reshape(gan.ops.squash(g), [1]) for g in grads])
children_grads = [_squash(tf.gradients(l, d_vars)) for l in loss.children_losses]
if config.fitness == "g":
self.measure_g = [-l for l in loss.children_losses]
else:
self.measure_g = [-l+f_lambda*(-tf.log(TINY+grad_d - tf.log(TINY+tf.nn.sigmoid(loss.d_loss)) - tf.log(TINY+1-tf.nn.sigmoid(l)))) for l, grad_d in zip(loss.children_losses, children_grads)]
loss.metrics['measure_g'] = tf.reduce_mean(self.measure_g)
loss.metrics['g_loss'] = loss.g_loss
loss.metrics['d_loss'] = loss.d_loss
self.g_loss = g_loss
self.d_loss = d_loss
self.d_optimizer = d_optimizer
self.g_optimizer = g_optimizer
self.hist = [0 for i in range(len(self.gan.generator.children))]
return g_optimizer, d_optimizer
def _step(self, feed_dict):
gan = self.gan
sess = gan.session
config = self.config
loss = self.loss or gan.loss
metrics = loss.metrics
generator = gan.generator
d_loss, g_loss = loss.sample
#winner = np.random.choice(range(len(gan.generator.children)))
winners = []
for i in range(len(generator.parents)):
child_count = generator.config.child_count
choices = self.measure_g[i*child_count:(i+1)*child_count]
choice = np.argmax(sess.run(choices))
winner = i*child_count + choice
self.hist[winner]+=1
winners.append(winner)
sess.run([self.update_parent[winner] for winner in winners])
for i in range(config.d_update_steps or 1):
sess.run(self.d_optimizer)
sess.run(self.clone_parent)
for i in range(config.g_update_steps or 1):
sess.run(self.g_optimizer)
measure_g = sess.run(self.measure_g)
if self.current_step % 100 == 0:
hist_output = " " + "".join(["G"+str(i)+":"+str(v)+" "for i, v in enumerate(self.hist)])
metric_values = sess.run(self.output_variables(metrics), feed_dict)
print(str(self.output_string(metrics) % tuple([self.current_step] + metric_values)+hist_output))
self.hist = [0 for i in range(len(self.gan.generator.children))]
| 39.833333 | 215 | 0.62097 | [
"MIT"
] | Darkar25/HyperGAN | hypergan/trainers/experimental/evolution_trainer.py | 4,063 | Python |
import csv
import numpy as np
import re
import itertools
from collections import Counter
from collections import namedtuple
DataPoint = namedtuple('DataPoint', ['PhraseId', 'SentenceId', 'Phrase', 'Sentiment'])
def load_datapoints(data_file):
datapoints = []
with open(data_file) as f:
reader = csv.DictReader(f, delimiter='\t')
for row in reader:
if 'Sentiment' not in row:
row['Sentiment'] = None
dp = DataPoint(**row)
datapoints.append(dp)
return datapoints
def clean_str(string):
"""
Tokenization/string cleaning for all datasets except for SST.
Original taken from https://github.com/yoonkim/CNN_sentence/blob/master/process_data.py
"""
string = re.sub(r"[^A-Za-z0-9(),!?\'\`]", " ", string)
string = re.sub(r"\'s", " \'s", string)
string = re.sub(r"\'ve", " \'ve", string)
string = re.sub(r"n\'t", " n\'t", string)
string = re.sub(r"\'re", " \'re", string)
string = re.sub(r"\'d", " \'d", string)
string = re.sub(r"\'ll", " \'ll", string)
string = re.sub(r",", " , ", string)
string = re.sub(r"!", " ! ", string)
string = re.sub(r"\(", " \( ", string)
string = re.sub(r"\)", " \) ", string)
string = re.sub(r"\?", " \? ", string)
string = re.sub(r"\s{2,}", " ", string)
return string.strip().lower()
def extract_phrases_in_datapoints(datapoints):
x_text = [dp.Phrase for dp in datapoints]
return [clean_str(sent) for sent in x_text]
def extract_phraseids_in_datapoints(datapoints):
return [dp.PhraseId for dp in datapoints]
def load_data_and_labels(data_file):
"""
Loads MR polarity data from files, splits the data into words and generates labels.
Returns split sentences and labels.
"""
# Load data from files
datapoints = load_datapoints(data_file)
x_text = extract_phrases_in_datapoints(datapoints)
y = [int(dp.Sentiment) for dp in datapoints]
def one_hot(i):
return [0] * i + [1] + [0] * (4-i)
y_vector = []
for sentiment in y:
y_vector.append(one_hot(sentiment))
return [x_text, np.array(y_vector)]
def batch_iter(data, batch_size, num_epochs, shuffle=True):
"""
Generates a batch iterator for a dataset.
"""
data = np.array(data)
data_size = len(data)
num_batches_per_epoch = int(len(data)/batch_size) + 1
for epoch in range(num_epochs):
# Shuffle the data at each epoch
if shuffle:
shuffle_indices = np.random.permutation(np.arange(data_size))
shuffled_data = data[shuffle_indices]
else:
shuffled_data = data
for batch_num in range(num_batches_per_epoch):
start_index = batch_num * batch_size
end_index = min((batch_num + 1) * batch_size, data_size)
yield shuffled_data[start_index:end_index]
| 30.702128 | 91 | 0.620582 | [
"Apache-2.0"
] | pychuang/ist557-data-mining-cnn | data_helpers.py | 2,886 | Python |
import numpy as np
import pytest
import random
from mujoco_py import (MjSim, load_model_from_xml, cymj)
MODEL_XML = """
<mujoco model="inverted pendulum">
<size nuserdata="100"/>
<compiler inertiafromgeom="true"/>
<default>
<joint armature="0" damping="1" limited="true"/>
<geom contype="0" friction="1 0.1 0.1" rgba="0.7 0.7 0 1"/>
<tendon/>
<motor ctrlrange="-3 3"/>
</default>
<option gravity="0 0 -9.81" integrator="RK4" timestep="0.001"/>
<size nstack="3000"/>
<worldbody>
<geom name="rail" pos="0 0 0" quat="0.707 0 0.707 0" rgba="0.3 0.3 0.7 1" size="0.02 1" type="capsule"/>
<body name="cart" pos="0 0 0">
<geom name="cart" pos="0 0 0" quat="0.707 0 0.707 0" size="0.1 0.1" type="capsule"/>
<body name="pole" pos="0 0 0">
<joint axis="0 1 0" name="hinge" pos="0 0 0" range="-90 90" type="hinge"/>
<geom fromto="0 0 0 0.001 0 0.6" name="cpole" rgba="0 0.7 0.7 1" size="0.049 0.3" type="capsule"/>
</body>
</body>
</worldbody>
<actuator>
{actuator}
</actuator>
</mujoco>
"""
PID_ACTUATOR = """
<general ctrlrange='-1 1' gaintype="user" biastype="user" forcerange="-100 100" gainprm="200 10 10.0 0.1 0.1 0" joint="hinge" name="a-hinge"/>
"""
P_ONLY_ACTUATOR = """
<general ctrlrange='-1 1' gaintype="user" biastype="user" gainprm="200" joint="hinge" name="a-hinge"/>
"""
POSITION_ACTUATOR = """
<position ctrlrange='-1 1' kp=200 joint="hinge" name="a-hinge"/>
"""
"""
To enable PID control in the mujoco, please
refer to the setting in the PID_ACTUATOR.
Here we set Kp = 200, Ti = 10, Td = 0.1 (also iClamp = 10.0, dSmooth be 0.1)
"""
def test_mj_pid():
xml = MODEL_XML.format(actuator=PID_ACTUATOR)
model = load_model_from_xml(xml)
sim = MjSim(model)
cymj.set_pid_control(sim.model, sim.data)
# pertubation of pole to be unbalanced
init_pos = 0.1 * (random.random() - 0.5)
print('init pos', init_pos)
sim.data.qpos[0] = init_pos
pos = 0.0
sim.data.ctrl[0] = pos
print('desire position:', pos)
for _ in range(100):
sim.step()
print('final pos', sim.data.qpos[0])
assert abs(sim.data.qpos[0] - pos) < 0.01
"""
check new PID control is backward compatible with position control
when only has Kp term.
"""
def test_mj_proptional_only():
model = load_model_from_xml(MODEL_XML.format(actuator=P_ONLY_ACTUATOR))
sim = MjSim(model)
cymj.set_pid_control(sim.model, sim.data)
model2 = load_model_from_xml(MODEL_XML.format(actuator=POSITION_ACTUATOR))
sim2 = MjSim(model2)
init_pos = 0.1 * (random.random() - 0.5)
sim.data.qpos[0] = sim2.data.qpos[0] = init_pos
sim.data.ctrl[0] = sim2.data.ctrl[0] = 0
for i in range(2000):
print(i, sim.data.qpos[0], sim2.data.qpos[0])
sim.step()
sim2.step()
assert abs(sim.data.qpos[0] - sim2.data.qpos[0]) <= 1e-7, "%d step violates" % i
| 29.189474 | 143 | 0.659574 | [
"MIT"
] | aaronhan223/CS394R_Final_Project | mujoco-py/mujoco_py/tests/test_pid.py | 2,773 | Python |
from torchtext import data
import spacy
import dill
BOS_WORD = '<s>'
EOS_WORD = '</s>'
BLANK_WORD = "<blank>"
spacy_en = spacy.load('en')
spacy_de = spacy.load('de')
def tokenizer_en(text):
return [tok.text for tok in spacy_en.tokenizer(text)]
def tokenizer_de(text):
return [tok.text for tok in spacy_de.tokenizer(text)]
SRC = data.Field(tokenize=tokenizer_de, pad_token=BLANK_WORD)
TGT = data.Field(tokenize=tokenizer_en, init_token = BOS_WORD, eos_token = EOS_WORD, pad_token=BLANK_WORD)
data_fields = [('German', SRC), ('English', TGT)]
train, val, test = data.TabularDataset.splits(path='./data', train='train.csv', validation='val.csv', test='test.csv', format='csv', fields=data_fields, skip_header=True)
SRC.build_vocab(train.German)
TGT.build_vocab(train.English)
with open("./data/src_vocab.pt", "wb")as f:
dill.dump(SRC, f)
with open("./data/tgt_vocab.pt", "wb")as f:
dill.dump(TGT, f)
| 28.8125 | 170 | 0.715835 | [
"MIT"
] | abhishek1907/transformer | data_loader.py | 922 | Python |
import nanome
from nanome.util import Logs
from nanome._internal._network import PluginNetwork, _Packet
from nanome._internal._process import ProcessManagerInstance
from nanome._internal._network._commands._callbacks import _Messages
from nanome._internal._network._commands._callbacks._commands_enums import _Hashes
import traceback
import time
from timeit import default_timer as timer
try:
import asyncio
from ._plugin_instance_async import async_update_loop
except ImportError:
asyncio = False
UPDATE_RATE = 1.0 / 60.0
MINIMUM_SLEEP = 0.001
__metaclass__ = type
class _PluginInstance(object):
__callbacks = dict()
__futures = dict()
__complex_updated_callbacks = dict()
__selection_changed_callbacks = dict()
def _setup(
self, session_id, plugin_network, proc_pipe, log_pipe_conn,
original_version_table, custom_data, permissions):
self._menus = {}
self._run_text = "Run"
self._run_usable = True
self._advanced_settings_text = "Advanced Settings"
self._advanced_settings_usable = True
self._custom_data = custom_data
self._permissions = permissions
self._network = plugin_network
self._process_manager = ProcessManagerInstance(proc_pipe)
self._log_pipe_conn = log_pipe_conn
self._network._send_connect(_Messages.connect, [_Packet._compression_type(), original_version_table])
Logs.debug("Plugin constructed for session", session_id)
@classmethod
def _save_callback(cls, id, callback):
if callback is None:
if asyncio and nanome.PluginInstance._instance.is_async:
loop = asyncio.get_event_loop()
future = loop.create_future()
cls.__futures[id] = future
return future
else:
cls.__callbacks[id] = lambda *_: None
else:
cls.__callbacks[id] = callback
def _call(self, id, *args):
callbacks = self.__callbacks
futures = self.__futures
if asyncio and self.is_async and futures.get(id):
futures[id].set_result(args[0] if len(args) == 1 else args)
del futures[id]
return
if id not in callbacks:
Logs.warning('Received an unknown callback id:', id)
return
callbacks[id](*args)
del callbacks[id]
@classmethod
def _hook_complex_updated(cls, index, callback):
cls.__complex_updated_callbacks[index] = callback
@classmethod
def _hook_selection_changed(cls, index, callback):
cls.__selection_changed_callbacks[index] = callback
@classmethod
def _on_complex_updated(cls, index, new_complex):
callbacks = cls.__complex_updated_callbacks
try:
callbacks[index](new_complex)
except KeyError:
Logs.warning('Received an unknown updated complex index:', index)
@classmethod
def _on_selection_changed(cls, index, new_complex):
callbacks = cls.__selection_changed_callbacks
try:
callbacks[index](new_complex)
except KeyError:
Logs.warning('Received an unknown updated complex index:', index)
def _on_stop(self):
try:
self.on_stop()
except:
Logs.error("Error in on_stop function:", traceback.format_exc())
def _update_loop(self):
try:
self.start()
last_update = timer()
while self._network._receive() and self._process_manager.update():
self.update()
dt = last_update - timer()
sleep_time = max(UPDATE_RATE - dt, MINIMUM_SLEEP)
time.sleep(sleep_time)
last_update = timer()
except KeyboardInterrupt:
self._on_stop()
return
except Exception as e:
text = ' '.join(map(str, e.args))
msg = "Uncaught " + type(e).__name__ + ": " + text
Logs.error(msg)
# Give log a little time to reach destination before closing pipe
time.sleep(0.1)
self._on_stop()
self._process_manager._close()
self._network._close()
return
def _run(self):
if asyncio and self.is_async:
coro = async_update_loop(self, UPDATE_RATE, MINIMUM_SLEEP)
asyncio.run(coro)
else:
self._update_loop()
def _has_permission(self, permission):
return _Hashes.PermissionRequestHashes[permission] in self._permissions
| 32.609929 | 109 | 0.637669 | [
"MIT"
] | nanome-ai/Nanome | nanome/_internal/_plugin_instance.py | 4,598 | Python |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# thumbor imaging service
# https://github.com/thumbor/thumbor/wiki
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license
# Copyright (c) 2011 globo.com [email protected]
import os
from io import BytesIO
from subprocess import PIPE, Popen
from tempfile import mkstemp
from PIL import Image, ImageDraw, ImageFile, ImageSequence, JpegImagePlugin
from PIL import features as pillow_features
from thumbor.engines import BaseEngine
from thumbor.engines.extensions.pil import GifWriter
from thumbor.utils import deprecated, logger
try:
from thumbor.ext.filters import _composite
FILTERS_AVAILABLE = True
except ImportError:
FILTERS_AVAILABLE = False
FORMATS = {
".tif": "PNG", # serve tif as png
".jpg": "JPEG",
".jpeg": "JPEG",
".gif": "GIF",
".png": "PNG",
".webp": "WEBP",
}
ImageFile.MAXBLOCK = 2 ** 25
ImageFile.LOAD_TRUNCATED_IMAGES = True
DECOMPRESSION_BOMB_EXCEPTIONS = (Image.DecompressionBombWarning,)
if hasattr(Image, "DecompressionBombError"):
DECOMPRESSION_BOMB_EXCEPTIONS += (Image.DecompressionBombError,)
class Engine(BaseEngine):
def __init__(self, context):
super().__init__(context)
self.subsampling = None
self.qtables = None
self.original_mode = None
self.exif = None
try:
if self.context.config.MAX_PIXELS is None or int(
self.context.config.MAX_PIXELS
):
Image.MAX_IMAGE_PIXELS = self.context.config.MAX_PIXELS
except (AttributeError, TypeError, ValueError): # invalid type
logger.info(
"MAX_PIXELS config variable set to invalid type. Has to be int on None"
)
def gen_image(self, size, color):
if color == "transparent":
color = None
img = Image.new("RGBA", size, color)
return img
def create_image(self, buffer):
try:
img = Image.open(BytesIO(buffer))
except DECOMPRESSION_BOMB_EXCEPTIONS as error:
logger.warning("[PILEngine] create_image failed: %s", error)
return None
self.icc_profile = img.info.get("icc_profile")
self.exif = img.info.get("exif")
self.original_mode = img.mode
self.subsampling = JpegImagePlugin.get_sampling(img)
if self.subsampling == -1: # n/a for this file
self.subsampling = None
self.qtables = getattr(img, "quantization", None)
if self.context.config.ALLOW_ANIMATED_GIFS and self.extension == ".gif":
frames = []
for frame in ImageSequence.Iterator(img):
frames.append(frame.convert("P"))
img.seek(0)
self.frame_count = len(frames)
return frames
return img
def get_resize_filter(self):
config = self.context.config
resample = (
config.PILLOW_RESAMPLING_FILTER
if config.PILLOW_RESAMPLING_FILTER is not None
else "LANCZOS"
)
available = {
"LANCZOS": Image.LANCZOS,
"NEAREST": Image.NEAREST,
"BILINEAR": Image.BILINEAR,
"BICUBIC": Image.BICUBIC,
"HAMMING": Image.HAMMING,
}
return available.get(resample.upper(), Image.LANCZOS)
def draw_rectangle(self, x, y, width, height):
# Nasty retry if the image is loaded for the first time and it's truncated
try:
draw_image = ImageDraw.Draw(self.image)
except IOError:
draw_image = ImageDraw.Draw(self.image)
draw_image.rectangle([x, y, x + width, y + height])
del draw_image
def resize(self, width, height):
# Indexed color modes (such as 1 and P) will be forced to use a
# nearest neighbor resampling algorithm. So we convert them to
# RGB(A) mode before resizing to avoid nasty scaling artifacts.
if self.image.mode in ["1", "P"]:
logger.debug(
"converting image from 8-bit/1-bit palette to 32-bit RGB(A) for resize"
)
if self.image.mode == "1":
target_mode = "RGB"
else:
# convert() figures out RGB or RGBA based on palette used
target_mode = None
self.image = self.image.convert(mode=target_mode)
size = (int(width), int(height))
# Tell image loader what target size we want (only JPG for a moment)
self.image.draft(None, size)
resample = self.get_resize_filter()
self.image = self.image.resize(size, resample)
def crop(self, left, top, right, bottom):
self.image = self.image.crop((int(left), int(top), int(right), int(bottom)))
def rotate(self, degrees):
# PIL rotates counter clockwise
if degrees == 90:
self.image = self.image.transpose(Image.ROTATE_90)
elif degrees == 180:
self.image = self.image.transpose(Image.ROTATE_180)
elif degrees == 270:
self.image = self.image.transpose(Image.ROTATE_270)
else:
self.image = self.image.rotate(degrees, expand=1)
def flip_vertically(self):
self.image = self.image.transpose(Image.FLIP_TOP_BOTTOM)
def flip_horizontally(self):
self.image = self.image.transpose(Image.FLIP_LEFT_RIGHT)
def get_default_extension(self):
# extension is not present => force JPEG or PNG
if self.image.mode in ["P", "RGBA", "LA"]:
return ".png"
return ".jpeg"
# TODO: Refactor this - pylint: disable=too-many-statements,too-many-branches
def read(self, extension=None, quality=None): # NOQA
# returns image buffer in byte format.
img_buffer = BytesIO()
requested_extension = extension or self.extension
# 1 and P mode images will be much smaller if converted back to
# their original mode. So let's do that after resizing. Get $$.
if (
self.context.config.PILLOW_PRESERVE_INDEXED_MODE
and requested_extension in [None, ".png", ".gif"]
and self.original_mode in ["P", "1"]
and self.original_mode != self.image.mode
):
if self.original_mode == "1":
self.image = self.image.convert("1")
else:
# libimagequant might not be enabled on compile time
# but it's better than default octree for RGBA images
quantize_method = (
Image.LIBIMAGEQUANT
if pillow_features.check("libimagequant")
else None
)
self.image = self.image.quantize(method=quantize_method)
ext = requested_extension or self.get_default_extension()
options = {"quality": quality}
if ext in (".jpg", ".jpeg"):
options["optimize"] = True
if self.context.config.PROGRESSIVE_JPEG:
# Can't simply set options['progressive'] to the value
# of self.context.config.PROGRESSIVE_JPEG because save
# operates on the presence of the key in **options, not
# the value of that setting.
options["progressive"] = True
if self.image.mode != "RGB":
self.image = self.image.convert("RGB")
else:
subsampling_config = self.context.config.PILLOW_JPEG_SUBSAMPLING
qtables_config = self.context.config.PILLOW_JPEG_QTABLES
if subsampling_config is not None or qtables_config is not None:
# can't use 'keep' here as Pillow would try to extract
# qtables/subsampling and fail
options["quality"] = 0
orig_subsampling = self.subsampling
orig_qtables = self.qtables
if (
subsampling_config == "keep" or subsampling_config is None
) and (orig_subsampling is not None):
options["subsampling"] = orig_subsampling
else:
options["subsampling"] = subsampling_config
if (qtables_config == "keep" or qtables_config is None) and (
orig_qtables and 2 <= len(orig_qtables) <= 4
):
options["qtables"] = orig_qtables
else:
options["qtables"] = qtables_config
if ext == ".png" and self.context.config.PNG_COMPRESSION_LEVEL is not None:
options["compress_level"] = self.context.config.PNG_COMPRESSION_LEVEL
if options["quality"] is None:
options["quality"] = self.context.config.QUALITY
if self.icc_profile is not None:
options["icc_profile"] = self.icc_profile
if self.context.config.PRESERVE_EXIF_INFO:
if self.exif is not None:
options["exif"] = self.exif
try:
if ext == ".webp":
if options["quality"] == 100:
logger.debug("webp quality is 100, using lossless instead")
options["lossless"] = True
options.pop("quality")
if self.image.mode not in ["RGB", "RGBA"]:
if self.image.mode == "P":
mode = "RGBA"
else:
mode = "RGBA" if self.image.mode[-1] == "A" else "RGB"
self.image = self.image.convert(mode)
if ext in [".png", ".gif"] and self.image.mode == "CMYK":
self.image = self.image.convert("RGBA")
self.image.format = FORMATS.get(ext, FORMATS[self.get_default_extension()])
self.image.save(img_buffer, self.image.format, **options)
except IOError:
logger.exception(
"Could not save as improved image, consider to increase ImageFile.MAXBLOCK"
)
self.image.save(img_buffer, FORMATS[ext])
results = img_buffer.getvalue()
img_buffer.close()
self.extension = ext
return results
def read_multiple(self, images, extension=None):
gif_writer = GifWriter()
img_buffer = BytesIO()
duration = []
converted_images = []
coordinates = []
dispose = []
for image in images:
duration.append(image.info.get("duration", 80) / 1000)
converted_images.append(image.convert("RGB"))
coordinates.append((0, 0))
dispose.append(1)
loop = int(self.image.info.get("loop", 1))
images = gif_writer.convertImagesToPIL(converted_images, False, None)
gif_writer.writeGifToFile(
img_buffer, images, duration, loop, coordinates, dispose
)
results = img_buffer.getvalue()
img_buffer.close()
tmp_fd, tmp_file_path = mkstemp()
temp_file = os.fdopen(tmp_fd, "wb")
temp_file.write(results)
temp_file.close()
command = ["gifsicle", "--colors", "256", tmp_file_path]
popen = Popen(command, stdout=PIPE) # pylint: disable=consider-using-with
pipe = popen.stdout
pipe_output = pipe.read()
pipe.close()
if popen.wait() == 0:
results = pipe_output
os.remove(tmp_file_path)
return results
@deprecated("Use image_data_as_rgb instead.")
def get_image_data(self):
return self.image.tobytes()
def set_image_data(self, data):
self.image.frombytes(data)
@deprecated("Use image_data_as_rgb instead.")
def get_image_mode(self):
return self.image.mode
def image_data_as_rgb(self, update_image=True):
converted_image = self.image
if converted_image.mode not in ["RGB", "RGBA"]:
if "A" in converted_image.mode:
converted_image = converted_image.convert("RGBA")
elif converted_image.mode == "P":
# convert() figures out RGB or RGBA based on palette used
converted_image = converted_image.convert(None)
else:
converted_image = converted_image.convert("RGB")
if update_image:
self.image = converted_image
return converted_image.mode, converted_image.tobytes()
def convert_to_grayscale(self, update_image=True, alpha=True):
if "A" in self.image.mode and alpha:
image = self.image.convert("LA")
else:
image = self.image.convert("L")
if update_image:
self.image = image
return image
def has_transparency(self):
has_transparency = "A" in self.image.mode or "transparency" in self.image.info
if has_transparency:
# If the image has alpha channel,
# we check for any pixels that are not opaque (255)
has_transparency = (
min(self.image.convert("RGBA").getchannel("A").getextrema()) < 255
)
return has_transparency
def paste(self, other_engine, pos, merge=True):
if merge and not FILTERS_AVAILABLE:
raise RuntimeError(
"You need filters enabled to use paste with merge. Please reinstall "
+ "thumbor with proper compilation of its filters."
)
self.enable_alpha()
other_engine.enable_alpha()
image = self.image
other_image = other_engine.image
if merge:
image_size = self.size
other_size = other_engine.size
mode, data = self.image_data_as_rgb()
_, other_data = other_engine.image_data_as_rgb()
imgdata = _composite.apply(
mode,
data,
image_size[0],
image_size[1],
other_data,
other_size[0],
other_size[1],
int(pos[0]),
int(pos[1]),
)
self.set_image_data(imgdata)
else:
image.paste(other_image, pos)
def enable_alpha(self):
if self.image.mode != "RGBA":
self.image = self.image.convert("RGBA")
def strip_icc(self):
self.icc_profile = None
def strip_exif(self):
self.exif = None
| 35.265207 | 91 | 0.579619 | [
"MIT"
] | TrueGameover/thumbor | thumbor/engines/pil.py | 14,494 | Python |
# Subplots Function
subplots_doc = """It creates a matrix of subplots. It requires two integers (different from 0) where the first sets the number of rows and the second the number of columns of the subplots matrix."""
subplot_doc = """It sets the subplot to use to plot data: further commands will refer to the subplot chosen. It requires two integers, where the first sets the row (from above) and the second the column (from left) which define the coordinates of the subplot addressed. Those values have to be lower then the correspondent ones set using the function subplots()."""
# Clear Functions
clear_terminal_doc = """It clears the terminal screen and it is generally useful before plotting or when plotting a continuous stream of data.
The functions clt() and clear_terminal() are equivalent."""
clear_figure_doc = """It clear all internal definitions of the figure, including its subplots.
The functions clf() and clear_figure() are equivalent."""
clear_plot_doc = """It clear all internal definitions of the active subplot.
The functions clp() and clear_plot() are equivalent."""
clear_data_doc = """It clear only the data relative to the active subplot, without clearing the plot style.
The functions cld() and clear_data() are equivalent."""
# Set Functions
plotsize_doc = """It sets the plot size of the active subplot. It requires two parameters: the desired width and height of the plot.
Note that plotsize(width, height) is equivalent to plotsize([width, height]) and that plotsize(integer) is equivalent to plotsize(integer, integer).
The functions plotsize() and plot_size() are equivalent."""
title_doc = """It set the title of the active subplot."""
xlabel_doc = """It set the label of the x axis relative to the active subplot."""
ylabel_doc = """It set the label of the y axis relative to the active subplot."""
xaxes_doc = """It sets whatever or not to show the x axes. It requires two Boolean parameters, one for each axis (lower and upper x axis).
Note that xaxes(bool1, bool2) is equivalent to xaxes([bool1, bool2]) and that xaxes(bool) is equivalent to xaxes(bool, bool)."""
yaxes_doc = """It sets whatever or not to show the y axes. It requires two Boolean parameters, one for each axis (left and right y axis).
Note that yaxes(bool1, bool2) is equivalent to yaxes([bool1, bool2]) and that yaxes(bool) is equivalent to yaxes(bool, bool)."""
grid_doc = """It sets whatever or not to show the x and y grid lines. It requires two Boolean parameters, one for each axis.
Note that grid(bool_x, bool_y) is equivalent to grid([bool_x, bool_y]) and that grid(bool) is equivalent to grid(bool, bool)."""
axes_color_doc = """It sets the color of the axes background.
Access the function plt.colors() to check the available color codes."""
ticks_color_doc = """It sets the color relative to any writing in the plot (title, legend, axes labels and ticks).
Access the function plt.colors() to check the available color codes."""
canvas_color_doc = """It sets the canvas color. The canvas is the area where data is plotted.
Access the function plt.colors() to check the available color codes."""
colorless_doc = """It removes all colors from the active subplot.
The function cls() and colorless() are equivalent."""
xlim_doc = """It sets the minimum and maximum values that could be plotted on the x axis. It requires a list of two numbers, where the first sets the left (minimum) limit and the second the right (maximum) limit.
Note that xlim(width, height) is equivalent to xlim([width, height])."""
ylim_doc = """It sets the minimum and maximum values that could be plotted on the y axis. It requires a list of two numbers, where the first sets the lower (minimum) limit and the second the upper (maximum) limit.
Note that ylim(width, height) is equivalent to ylim([width, height])."""
ticks_doc = """It sets the number of numerical ticks to show on the x axis and y axis respectively. It requires two integers, one for each axis.
Note that ticks(width, height) is equivalent to ticks([width, height]) and that ticks(integer) is equivalent to ticks(integer, integer)."""
xticks_doc = """It sets the data ticks on the x axis. The ticks should be provided as a list of values. If two lists are provided, the second is intended as the list of labels to be printed at the coordinates given by the first list. If no list is provided, the ticks are calculated automatically."""
yticks_doc = """It sets the data ticks on the y axis. The ticks should be provided as a list of values. If two lists are provided, the second is intended as the list of labels to be printed at the coordinates given by the first list. If no list is provided, the ticks are calculated automatically."""
xscale_doc = """It sets the scale relative to the x axis, which could be either 'linear' (as by default) or 'log' (for logarithmic plots)."""
yscale_doc = """It sets the scale relative to the y axis, which could be either 'linear' (as by default) or 'log' (for logarithmic plots). Setting the parameter 'yscale' to either 'left' (by default) or 'right' the yscale of the two y axes could be set independently."""
# Plotting Functions
scatter_doc = """It creates a scatter plot of coordinates given by the x and y lists. Optionally, a single y list could be provided. Here is a basic example:
\x1b[32mimport plotext as plt
plt.scatter(x, y)
plt.show()\x1b[0m
Multiple data sets could be plotted using consecutive scatter functions:
\x1b[32mplt.scatter(x1, y1)
plt.scatter(y2)
plt.show()\x1b[0m
Here are all the parameters of the scatter function:
\x1b[33myaxis\x1b[0m sets whatever to plot the data relative to the left or right y axis. It accepts 'left' and 'right' as inputs.
\x1b[33mlabel\x1b[0m sets the label of the current data set, which will appear in the legend at the top left of the plot. The default value is an empty string. If all labels are an empty string no legend will be printed.
\x1b[33mmarker\x1b[0m sets the marker used to identify each data point, relative to the current data set. A single character could be provided or the available marker coded. Access the function markers() for the available extra marker codes. The default value is "small". If 'None' is provided, the marker is set automatically.
\x1b[33mcolor\x1b[0m
It sets the color of the data points. Access the function plt.colors() to find the available full-ground color codes. If 'None' is provided (as by default) the colors are set automatically.
\x1b[33mfillx\x1b[0m
if True, extra data points will be plotted from the current plot to the x axis. The default value is False.
\x1b[33mfilly\x1b[0m
if True, extra data points will be plotted from the current plot to the y axis. The default value is False.
"""
plot_doc = """It plots lines between the data points provided. It is very similar to the scatter function, except that no data point is plotted.
Access the scatter function docstring for further documentation on its internal parameters."""
bar_doc = """It creates a bar plot using to the x and y values provided. The x values could be a list of numbers or strings, or optionally not provided.
It accepts the same parameters as the scatter and plot functions (except for 'fillx' and 'filly', which are not allowed). Access the scatter function docstring for further documentation on its internal parameters.
Here are its extra parameters:
\x1b[33mfill\x1b[0m if set to True (as by default), the plot fills the bars with the chosen color; if False only the bars borders are plotted.
\x1b[33mwidth\x1b[0m is the relative width of the bars and could be a float ranging from 0 to 1. The default value is 4 / 5.
\x1b[33morientation\x1b[0m sets the orientation of the bar plot and could be either 'vertical' (in short 'v', as by default) or 'horizontal' (in short 'h')."""
hist_doc = """It builds the histogram plot relative to the data provided. It accepts the same parameters as the bar plot (access its docstring for further documentation) with the following extra parameter:
\x1b[33mbins\x1b[0m defines the number of equal-width bins in the range (default 10)."""
# Show
show_doc = """It builds and prints the final figure on terminal. The parameter 'hide', if set to True, allows to build the figure without actually printing it (the default value is False)."""
# Other Functions
string_to_time_doc = """It takes a date/time as a string and returns the correspondent number of seconds. The string format should be: 'DD/MM/YYYY hh:mm:ss'. Other accepted formats are:
'DD/MM/YYYY': in this case the time is set to 00:00:00.
'hh:mm:ss' in this case the date is set to today.
'DD/MM/YYYY hh:mm' in this case the seconds are set to 0.
'hh:mm' in this case the date is set to today and the seconds to 0."""
get_canvas_doc = """It returns the figure canvas as a string and it can be used only after the show() function."""
sleep_doc = """It adds a sleeping time to the computation and it is generally useful when continuously plotting a stream of data, in order to decrease a possible screen flickering effect.
An input of, for example, 0.01 would add (depending on your machine) approximately 0.01 secs to the computation. Manually tweak this value to reduce the possible flickering."""
savefig_doc = """It saves the plot canvas (without colors) as a text file, at the path provided as input. It can be used only after the show() function.
The functions savefig() and save_fig() are equivalent."""
terminal_size_doc = """It returns the terminal size as width x height."""
version_doc = """It returns the version of the current installed plotext package."""
docstrings_doc = """It prints all the available docstrings"""
colors_doc = """It shows the available full-ground and background color codes."""
markers_doc = """It shows the available marker codes."""
sin_doc = """It creates a sinusoidal signal useful, for example, to test the plotext package. Here are its parameters:
length: the length of the signal.
peaks: the number of periods in the signal.
decay: the decay rate of the signal (normalized to length). If positive the signal exponentially increases.
phase: if 0.5 the cosine is returned; if 1, -sine is returned.
"""
| 61.281437 | 365 | 0.75386 | [
"MIT"
] | Dev-iL/plotext | plotext/docstrings.py | 10,234 | Python |
from __future__ import absolute_import
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import LinearSegmentedColormap
from ..w_transform import HaarTransform, InvHaarTransform
def _zeros_like(obj):
zeros = [np.zeros_like(lev, dtype=float) for lev in obj]
return zeros
__all__ = ['_findmin', '_findmax', '_BinData', '_NewColorMap',
'_NSigmaFilter']
delta = 0.0 # Small number
def _findmin(array):
minn = delta
for i in array:
if np.min(i) < minn:
minn = np.min(i)
return minn
def _findmax(array):
maxx = delta
for i in array:
if np.max(i) > maxx:
maxx = np.max(i)
return maxx
def _BinData(data, bins):
hist, edges = np.histogram(a=range(bins), bins=bins, weights=data)
center = (edges[:-1]+edges[1:])/2.0
width = edges[1:]-edges[:-1]
return hist, edges, center, width
def _NewColorMap():
R=float(0+172+242)
G=(41.+181.+104.)
B=(242.+81.+59.)
#colors = [(0, 0, 1), (0, 1, 0), (1, 0, 0)] #RGB
#colors = [(0.172, 0.521, 0.729), (0.870, 0.325, 0.129)]
colors = [(0.152, 0.552, 0.607),
(0.666, 0.882, 0.035),
(0.945, 0.337, 0.074)]
nbins=2**15
cmap_name='New'
cm = LinearSegmentedColormap.from_list(cmap_name, colors, N=nbins)
return cm
def _NSigmaFilter(data, hypothesis, nsigma,
nsigma_min=None, nsigma_percent=None):
WaveDec_data = HaarTransform(data)
DataCoeffs = WaveDec_data[:-1]
DataFirstTrend = WaveDec_data[-1]
WaveDec_hypo = HaarTransform(hypothesis)
HypoCoeffs = WaveDec_hypo[:-1]
HypoFirstTrend = WaveDec_hypo[-1]
Level = len(DataCoeffs)
flatNsigma = []
flatAbsNsigma = []
flatDataCoeffs = []
flatHypoCoeffs = []
flatLoc = []
count = 0
for l in range(Level):
J = 2**(Level-l-1)
for j in range(J):
flatNsigma.append(nsigma[l][j])
flatAbsNsigma.append(abs(nsigma[l][j]))
flatDataCoeffs.append(DataCoeffs[l][j])
flatHypoCoeffs.append(HypoCoeffs[l][j])
flatLoc.append([l, j])
count += 1
ixsort = np.argsort(flatAbsNsigma)[::-1]
sortNsigma = [flatNsigma[ix] for ix in ixsort]
sortDataCoeffs = [flatDataCoeffs[ix] for ix in ixsort]
sortHypoCoeffs = [flatHypoCoeffs[ix] for ix in ixsort]
sortLoc = [flatLoc[ix] for ix in ixsort]
keepNsigma = []
keepDeltaCoeff = []
keepLoc = []
if nsigma_min is not None:
for i in range(len(sortNsigma)):
if abs(sortNsigma[i]) > nsigma_min:
keepNsigma.append(sortNsigma[i])
keepDeltaCoeff.append(sortDataCoeffs[i]-sortHypoCoeffs[i])
keepLoc.append(sortLoc[i])
elif nsigma_percent is not None:
net = len(sortNsigma)
netkeep = int(np.ceil(net*nsigma_percent))
keepNsigma = sortNsigma[:netkeep]
keepDeltaCoeff = np.subtract(sortDataCoeffs[:netkeep],
sortHypoCoeffs[:netkeep])
keepLoc = sortLoc[:netkeep]
else:
keepNsigma = sortNsigma
keepDeltaCoeff = np.subtract(sortDataCoeffs,
sortHypoCoeffs)
keepLoc = sortLoc
keep = _zeros_like(WaveDec_data)
for i in range(len(keepDeltaCoeff)):
l = keepLoc[i][0]
j = keepLoc[i][1]
keep[l][j] = keepDeltaCoeff[i]
keep[-1][0] = DataFirstTrend-HypoFirstTrend
return keep
| 28.427419 | 74 | 0.593759 | [
"MIT"
] | alexxromero/WAKY-private | kwakpriv/plotting/plottingtools.py | 3,525 | Python |
##########################################################################
#
# Copyright (c) 2017, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import os
import sys
import GafferUI
import Qt
from Qt import QtCore
from Qt import QtGui
from Qt import QtWidgets
def joinEdges( listContainer ) :
if listContainer.orientation() == listContainer.Orientation.Horizontal :
lowProperty = "gafferFlatLeft"
highProperty = "gafferFlatRight"
else :
lowProperty = "gafferFlatTop"
highProperty = "gafferFlatBottom"
visibleWidgets = [ w for w in listContainer if w.getVisible() ]
l = len( visibleWidgets )
for i in range( 0, l ) :
visibleWidgets[i]._qtWidget().setProperty( lowProperty, i > 0 )
visibleWidgets[i]._qtWidget().setProperty( highProperty, i < l - 1 )
def grab( widget, imagePath ) :
GafferUI.EventLoop.waitForIdle()
imageDir = os.path.dirname( imagePath )
if imageDir and not os.path.isdir( imageDir ) :
os.makedirs( imageDir )
if Qt.__binding__ in ( "PySide2", "PyQt5" ) :
# Qt 5
screen = QtWidgets.QApplication.primaryScreen()
windowHandle = widget._qtWidget().windowHandle()
if windowHandle :
screen = windowHandle.screen()
pixmap = screen.grabWindow( long( widget._qtWidget().winId() ) )
if sys.platform == "darwin" and pixmap.size() == screen.size() * screen.devicePixelRatio() :
# A bug means that the entire screen will have been captured,
# not just the widget we requested. Copy out just the widget.
topLeft = widget._qtWidget().mapToGlobal( QtCore.QPoint( 0, 0 ) )
bottomRight = widget._qtWidget().mapToGlobal( QtCore.QPoint( widget._qtWidget().width(), widget._qtWidget().height() ) )
size = bottomRight - topLeft
pixmap = pixmap.copy(
QtCore.QRect(
topLeft * screen.devicePixelRatio(),
QtCore.QSize( size.x(), size.y() ) * screen.devicePixelRatio()
)
)
else :
# Qt 4
pixmap = QtGui.QPixmap.grabWindow( long( widget._qtWidget().winId() ) )
pixmap.save( imagePath )
| 37.402062 | 123 | 0.693219 | [
"BSD-3-Clause"
] | mattigruener/gaffer | python/GafferUI/WidgetAlgo.py | 3,628 | Python |
from typing import List
async def FixEnvData(data: List[str]):
cache_data = data
if str(cache_data[1]).startswith(" "):
cache_data[1] = cache_data[1].split(" ", 1)[-1]
if str(cache_data[2]).startswith(" "):
cache_data[2] = (
"" if cache_data[2].isspace() else cache_data[2].split(" ", 1)[-1]
)
if (str(cache_data[3]).lower().strip() != "true") and (
str(cache_data[3]).lower().strip() != "false"
):
cache_data[3] = True
elif str(cache_data[3]).lower().strip() == "true":
cache_data[3] = True
elif str(cache_data[3]).lower().strip() == "false":
cache_data[3] = False
if str(cache_data[4]) != "":
cache_data[4] = "secret"
return cache_data
| 32.695652 | 78 | 0.557181 | [
"MIT"
] | BGHRFF/AppJsonMaker | core/fix.py | 752 | Python |
from dyc.utils import (
get_leading_whitespace,
read_yaml,
get_indent_forward,
get_indent_backward,
get_extension,
is_comment,
)
class TestGetLeadingWhitespace:
def test_tabs(self):
"""Test tabs functionality"""
text = '\t\tHello'
expected = '\t\t'
got = get_leading_whitespace(text)
assert expected == got
def test_whitespace(self):
"""Test whitespace functionality"""
space = ' '
text = '{space}Such a long whitespace'.format(space=space)
expected = space
got = get_leading_whitespace(text)
assert expected == got
class TestReadYaml:
def test_should_return_none_if_not_found(self):
random_path = '/path/to/non/existing/file.yaml'
expected = None
got = read_yaml(random_path)
assert expected == got
class TestGetIndentForward:
def test_forward(self):
lines = []
lines.append( '\n')
lines.append('This is a Test')
assert get_indent_forward(lines, 0) == '\n'
class TestGetIndentBackward:
def test_backward(self):
lines = []
lines.append( '\n')
lines.append('This is a Test')
assert get_indent_backward(lines, 1) == 'This is a Test'
class TestGetExtension:
def test_existing_extension_valid(self):
ext = 'file.puk'
expected = 'puk'
got = get_extension(ext)
assert expected == got
def test_non_existing_extension(self):
ext = 'file'
expected = ''
got = get_extension(ext)
assert expected == got
def test_wrong_extension_type(self):
exts = [dict(), False, True, [], 123]
expected = ''
for ext in exts:
got = get_extension(ext)
assert expected == got
class TestIsComment:
def test_valid_comments(self):
"""Testing valid comments"""
text = '# Hello World'
assert is_comment(text, ['#']) == True
def test_invalid_comments(self):
"""Testing invalid comments"""
text = '# Hello World'
assert is_comment(text, ['//']) == False
class UtilsTest():
def __init__(self, whitespace, read_yaml, extension, comment,
indent_forward, indent_backward):
self.test_get_leading_white_space = whitespace
self.test_read_yaml = read_yaml
self.test_get_extension = extension
self.test_is_comment = comment
self.test_get_indent_forward = indent_forward
self.test_get_indent_backward = indent_backward
def test_whitespace(self):
self.test_get_leading_white_space.test_tabs()
self.test_get_leading_white_space.test_whitespace()
def test_readYaml(self):
self.test_read_yaml.test_should_return_none_if_not_found()
def test_extension(self):
self.test_get_extension.test_existing_extension_valid()
self.test_get_extension.test_non_existing_extension()
self.test_get_extension.test_wrong_extension_type()
def test_comment(self):
self.test_is_comment.test_valid_comments()
self.test_is_comment.test_invalid_comments()
def test_indent_forward(self):
self.test_get_indent_forward.test_forward()
def test_indent_backward(self):
self.test_get_indent_backward.test_backward()
utils_test = UtilsTest(TestGetLeadingWhitespace(),
TestReadYaml(),
TestGetExtension(),
TestIsComment(),
TestGetIndentForward(),
TestGetIndentBackward())
utils_test.test_whitespace()
utils_test.test_readYaml()
utils_test.test_extension()
utils_test.test_comment()
utils_test.test_indent_forward()
utils_test.test_indent_backward()
| 29.44186 | 66 | 0.644813 | [
"MIT"
] | lukerm48/dyc | tests/test_utils.py | 3,798 | Python |
"""Install Dplython."""
from setuptools import setup, find_packages
setup(
name="dplython",
version="0.0.4",
description="Dplyr-style operations on top of pandas DataFrame.",
url="https://github.com/dodger487/dplython",
download_url="https://github.com/dodger487/dplython/tarball/0.0.4",
packages=find_packages(),
license="MIT",
keywords="pandas data dplyr",
package_data={"dplython": ["data/diamonds.csv"]},
package_dir={"dplython": "dplython"},
install_requires=["numpy", "pandas", "six"],
author="Chris Riederer",
author_email="[email protected]",
maintainer="Chris Riederer",
maintainer_email="[email protected]",
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Environment :: Web Environment",
"Operating System :: OS Independent",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Topic :: Scientific/Engineering",
]
) | 34.472222 | 71 | 0.639807 | [
"MIT"
] | dgrtwo/dplython | setup.py | 1,241 | Python |
import pickle
from kitti_functions import *
class DataLoader:
def __init__(self, args):
self.dataset_path = args.dataset_path
self.batch_size = args.batch_size
self.batch_size_valid = 1
self.seq_length = args.seq_length
self.scale_factor = args.data_scale
self.social_range = args.social_range
self.social_grid_size = args.grid_size
self.map_size = args.map_size
self.is_apply_social = 0
self.data_augmentation = args.data_augmentation
self.load_preprocessed_data()
self.reset_batch_pointer()
print('>> Dataset loading and analysis process are done...')
def reset_batch_pointer(self, ):
self.pointer = 0
self.frame_pointer = 0
def tick_batch_pointer(self):
self.pointer += 1
if (self.pointer >= len(self.train_data)):
self.pointer = 0
def load_preprocessed_data(self):
'''
raw_data is a list that has three components
component1) trajectory data for training
component2) trajectory data for validation and visualization
'''
f = open(self.dataset_path, 'rb')
raw_data = pickle.load(f)
f.close()
# for training data --------------------
counter = 0
self.train_data = []
for data in raw_data[0]:
scaled_data = np.copy(data)
self.train_data.append(scaled_data)
counter += int(len(scaled_data) - self.seq_length)
# assume we visit every frame as a start point of a short trajectory
# in one training epoch
self.num_batches = int(counter / self.batch_size)
# for validation data --------------------
self.valid_data = []
for data in raw_data[1]:
scaled_data = np.copy(data)
self.valid_data.append(scaled_data)
# for map data ----------------------------
self.map = []
for data in raw_data[2]:
self.map.append(data)
# for map info ----------------------------
self.map_info = []
for data in raw_data[3]:
self.map_info.append(data)
def preprocess_sequence(self, seq, isValid, isDiff):
'''
dataset id (0)
object id (1)
target pose (2~3)
neighbor pose (4~63)
'''
seq_len = seq.shape[0]
seq_tpose = np.copy(seq[:, 2:4])
seq_npose = np.copy(seq[:, 4:64]).reshape(seq_len, 30, 2)
# # load map
dataset_index = int(seq[0, 0])
map = self.map[dataset_index]
x_max, y_max, scale = self.map_info[dataset_index]
# # map roi extraction ------------------------------------------
seq_map = []
for i in range(seq_tpose.shape[0]):
x = seq_tpose[i, 0]
y = seq_tpose[i, 1]
corr_map = map_roi_extract(map, x, y, x_max, y_max, scale, int(self.map_size/2))
seq_map.append(corr_map)
# # TEST code ------------------------------------
'''
map_ = np.copy(np.copy(map_roi[i]))
map_row_cnt = map_.shape[0] / 2
map_col_cnt = map_.shape[1] / 2
pose_start_x = seq_tpose[i, 0]
pose_start_y = seq_tpose[i, 1]
for kappa in range(0, seq_tpose.shape[0]-i):
pose_x = int(3 * (seq_tpose[i+kappa, 0] - pose_start_x) + map_row_cnt)
pose_y = int(3 * (seq_tpose[i+kappa, 1] - pose_start_y) + map_col_cnt)
pose_x = _min(_max(pose_x, 0), map_.shape[0] - 1)
pose_y = _min(_max(pose_y, 0), map_.shape[1] - 1)
map_[pose_x, pose_y, 0] = 0
map_[pose_x, pose_y, 1] = int(255.0 * float(i+kappa) / float(seq_tpose.shape[0]-1))
map_[pose_x, pose_y, 2] = 255
cv2.imshow('test', map_)
cv2.waitKey(0)
'''
# # apply augmentation -------------------------------------------
# 0: none, 1: random flip, 2: random rotation, 3: random flip+scaling, 4: random rotation+scaling
if (isValid):
donothing = 0
else:
if (self.data_augmentation == 1):
seq_tpose, seq_npose, seq_map = random_flip(seq_tpose, seq_npose, seq_map)
elif (self.data_augmentation == 2):
# TODO : random rotation of map needs to be implemented
seq_tpose, seq_npose = random_rotate(seq_tpose, seq_npose)
# # TEST code ------------------------------------
'''
for i in range(seq_tpose.shape[0]):
map_ = np.copy(np.copy(map_roi[i]))
map_row_cnt = map_.shape[0] / 2
map_col_cnt = map_.shape[1] / 2
pose_start_x = seq_tpose[i, 0]
pose_start_y = seq_tpose[i, 1]
for kappa in range(0, seq_tpose.shape[0]-i):
pose_x = int(3 * (seq_tpose[i+kappa, 0] - pose_start_x) + map_row_cnt)
pose_y = int(3 * (seq_tpose[i+kappa, 1] - pose_start_y) + map_col_cnt)
pose_x = _min(_max(pose_x, 0), map_.shape[0] - 1)
pose_y = _min(_max(pose_y, 0), map_.shape[1] - 1)
map_[pose_x, pose_y, 0] = 0
map_[pose_x, pose_y, 1] = int(255.0 * float(i+kappa) / float(seq_tpose.shape[0]-1))
map_[pose_x, pose_y, 2] = 255
cv2.imshow('test', map_)
cv2.waitKey(0)
'''
# # TEST CODE-------------------------------------
''''
ego = np.copy(seq_tpose)
plt.plot(ego[:, 0], ego[:, 1], 'o')
for i in range(30):
ngh = np.squeeze(seq_npose[:, i, :]) # seq_len x 2
ngh_ = ngh[ngh[:, 0]>-1000, :]
if (len(ngh_) > 1):
plt.plot(ngh_[:, 0], ngh[:, 1], '+')
plt.show()
'''
# # create social vectors (ok) -----------------------------------------
num_grid = int(self.social_range / self.social_grid_size)
# seq_sgrid = np.zeros(shape=(seq_len, num_grid, num_grid))
seq_sgrid = np.zeros(shape=(seq_len, num_grid*num_grid))
for i in range(seq_len):
social_grid = np.zeros(shape=(num_grid, num_grid))
target_pose = seq_tpose[i, :].reshape(1, 2)
neighbors_pose = seq_npose[i, :]
for j in range(30):
if (neighbors_pose[j, 0] == -1000 or neighbors_pose[j, 0] == 1000):
continue
else:
neighbor_pose = neighbors_pose[j, :].reshape(1, 2)
social_grid = getSocialMatrix(social_grid, target_pose, neighbor_pose, self.social_range, self.social_grid_size)
seq_sgrid[i, :] = social_grid.reshape(1, num_grid*num_grid)
# # pose difference -----------------------------------------------
seq_tpose_cur = np.copy(seq_tpose[1:, :]) # set_tpose[1:seq_len-1]
seq_tpose_pre = np.copy(seq_tpose[:-1, :]) # set_tpose[0:seq_len-2]
seq_tpose_diff = seq_tpose_cur - seq_tpose_pre
if (isDiff):
return (seq_tpose_diff/self.scale_factor), np.copy(seq_sgrid[1:, :]), np.array(seq_map[1:])
else:
return seq_tpose_cur, np.copy(seq_sgrid[1:, :]), np.array(seq_map[1:])
def next_batch(self):
'''
Read a batch randomly
:x_batch: <batch size x seq_length x input_dim>
:y_batch: <batch size x seq_length x input_dim>
:d_batch: <batch size x seq_length>
'''
x_batch = []
y_batch = []
sg_batch = []
map_batch = []
d_batch = []
for i in range(self.batch_size):
data = self.train_data[self.pointer]
idx = random.randint(0, len(data) - self.seq_length - 2)
seq_all = np.copy(data[idx:idx + self.seq_length + 2])
# TODO : non-preprocessed data needs to be augmented and processed HERE
seq_all_proc, seq_sgrid, seq_map = self.preprocess_sequence(seq_all, isValid=False, isDiff=True)
seq_x = np.copy(seq_all_proc[0:self.seq_length])
seq_y = np.copy(seq_all_proc[1:self.seq_length + 1])
seq_sgrid_x = np.copy(seq_sgrid[0:self.seq_length, :])
y_batch.append(seq_y)
x_batch.append(seq_x)
sg_batch.append(seq_sgrid_x)
map_batch.append(seq_map[0:self.seq_length])
d_batch.append([self.pointer, idx])
'''
if len(data) is smaller than 50, self.seq_length is 24
n_batch is 1, therefore, (1.0 / n_batch) is 1
then the following is the same as
if random.random() < 1, then go next with prob. 1
if len(data) is greater than 50, self.seq_length is 24
n_batch is 2, therefore, (1.0 / n_batch) is 0.5
then the following is the same as
if random.random() < 0.5, then go next with prob. 0.5
'''
n_batch = int(len(data) / (self.seq_length + 2))
if random.random() < (1.0 / float(n_batch)):
self.tick_batch_pointer()
return x_batch, y_batch, sg_batch, map_batch, d_batch
def next_batch_valid(self):
'''
Read a batch randomly for validation during training
:x_batch: <batch size x seq_length x input_dim>
:y_batch: <batch size x seq_length x input_dim>
:d_batch: <batch size x seq_length>
'''
x_batch = []
y_batch = []
sg_batch = []
map_batch = []
d_batch = []
counter = 0
while (len(x_batch) < self.batch_size):
data = self.valid_data[self.pointer]
if (self.frame_pointer < len(data) - self.seq_length - 1):
idx = self.frame_pointer
seq_all = np.copy(data[idx:idx + self.seq_length + 2])
# TODO : non-preprocessed data needs to be augmented and processed HERE
seq_all_proc, seq_sgrid, seq_map = self.preprocess_sequence(seq_all, isValid=True, isDiff=True)
seq_x = np.copy(seq_all_proc[0:self.seq_length])
seq_y = np.copy(seq_all_proc[1:self.seq_length + 1])
seq_sgrid_x = np.copy(seq_sgrid[0:self.seq_length, :])
y_batch.append(seq_y)
x_batch.append(seq_x)
sg_batch.append(seq_sgrid_x)
map_batch.append(seq_map[0:self.seq_length])
d_batch.append([self.pointer, idx])
# move a quarter of seq. length steps
self.frame_pointer += int(self.seq_length/4)
else:
if (self.pointer >= len(self.valid_data)-1):
x_batch = []
y_batch = []
sg_batch = []
d_batch = []
return x_batch, y_batch, sg_batch, map_batch, d_batch
else:
self.pointer += 1
self.frame_pointer = 0
counter += 1
return x_batch, y_batch, sg_batch, map_batch, d_batch
def next_sequence_valid(self):
'''
dataset id (0)
object id (1)
target pose (2~3)
neighbor pose (4~63)
Read a batch randomly for validation and visualization
:x_batch: <batch size x seq_length x input_dim>
:y_batch: <batch size x seq_length x input_dim>
:d_batch: <batch size x seq_length>
'''
NotEndOfData = True
while(NotEndOfData):
if (self.pointer >= len(self.valid_data)):
x = []
grid = []
map = []
x_max = []
y_max = []
scale = []
dataset_index = []
NotEndOfData = False
break
else:
if (self.frame_pointer >= len(self.valid_data[self.pointer]) - self.seq_length - 2):
self.frame_pointer = 0
self.pointer += 1
else:
data = self.valid_data[self.pointer]
idx = self.frame_pointer
seq_all = np.copy(data[idx:idx + self.seq_length + 1])
# # load map
dataset_index = int(seq_all[0, 0])
map = self.map[dataset_index]
x_max, y_max, scale = self.map_info[dataset_index]
# TODO : non-preprocessed data needs to be augmented and processed HERE
seq_all_proc, seq_sgrid, seq_map = self.preprocess_sequence(seq_all, isValid=True, isDiff=False)
x = np.copy(seq_all_proc[0:self.seq_length + 1])
grid = np.copy(seq_sgrid[0:self.seq_length + 1])
print('seq_pointer %d, frame_pointer %d' % (self.pointer, self.frame_pointer))
self.frame_pointer += int(self.seq_length + 1)
break
return x, grid, map, x_max, y_max, scale, dataset_index, NotEndOfData | 35.893443 | 132 | 0.52394 | [
"MIT"
] | d1024choi/trajpred_irl | kitti_utils.py | 13,137 | Python |
import argparse, json
import simpleamt
import MySQLdb
if __name__ == '__main__':
parser = argparse.ArgumentParser(parents=[simpleamt.get_parent_parser()])
parser.add_argument('-f', action='store_true', default=False)
args = parser.parse_args()
mtc = simpleamt.get_mturk_connection_from_args(args)
approve_ids = []
reject_ids = []
if args.hit_ids_file is None:
parser.error('Must specify --hit_ids_file.')
with open(args.hit_ids_file, 'r') as f:
hit_ids = [line.strip() for line in f]
conn = MySQLdb.connect(host='localhost', user='root', passwd='password', db='ccr_db')
cursor = conn.cursor()
for hit_id in hit_ids:
try:
assignments = mtc.get_assignments(hit_id)
except:
continue
for a in assignments:
if a.AssignmentStatus == 'Submitted':
try:
# Try to parse the output from the assignment. If it isn't
# valid JSON then we reject the assignment.
output = json.loads(a.answers[0][0].fields[0])
# Check if HIT assignment properly completed!
print("output = ", output)
cursor.execute('SELECT successful, paid FROM hashes WHERE hash=%s;', (output['hash'],))
row = cursor.fetchone();
if row is None:
reject_ids.append(a.AssignmentId)
print('none reject')
continue
successful, paid = row
if paid == 1 or successful == 0:
reject_ids.append(a.AssignmentId)
print('other reject, paid=', paid, 'successful=', successful)
else:
cursor.execute('UPDATE hashes SET paid = 1 WHERE hash=%s;', (output['hash'],))
approve_ids.append(a.AssignmentId)
print('accept')
except ValueError as e:
reject_ids.append(a.AssignmentId)
else:
print "hit %s has already been %s" % (str(hit_id), a.AssignmentStatus)
print ('This will approve %d assignments and reject %d assignments with '
'sandbox=%s' % (len(approve_ids), len(reject_ids), str(args.sandbox)))
print 'Continue?'
if not args.f:
s = raw_input('(Y/N): ')
else:
s = 'Y'
if s == 'Y' or s == 'y':
print 'Approving assignments'
for idx, assignment_id in enumerate(approve_ids):
print 'Approving assignment %d / %d' % (idx + 1, len(approve_ids))
mtc.approve_assignment(assignment_id)
for idx, assignment_id in enumerate(reject_ids):
print 'Rejecting assignment %d / %d' % (idx + 1, len(reject_ids))
mtc.reject_assignment(assignment_id, feedback='Invalid results')
else:
print 'Aborting'
| 36.069444 | 97 | 0.630343 | [
"MIT"
] | maxspero/ccr-amt | check_and_approve_hits.py | 2,597 | Python |
import logging
import pickle
import os
import sys
import json
import cv2
import numpy as np
import glob
import tqdm
sys.path.append(os.path.join(os.path.dirname(__file__), os.pardir))
import src
from src.__init__ import *
def image_reader(image_path_list):
image = cv2.imread(image_path_list[0], 0)
image = cv2.resize(image, (48, 48))
image = np.expand_dims(image, axis=0)
for img_path in image_path_list[1:]:
image = np.concatenate(
(
image,
np.expand_dims(
cv2.resize(cv2.imread(img_path, 0), (48, 48)),
axis=0
)
),
axis=0
)
return image
def image_label_generator(emotion_map):
labels = []
_i = 0
image_lists = []
for k, v in tqdm.tqdm(emotion_map.items()):
path = os.path.join(FACE_IMAGES_PATH, k)
logger.debug('reading images at path: {}'.format(path))
image_list = glob.glob(path+'/*.png')
logger.debug('length images list: {}'.format(len(image_list)))
image_lists.append(image_list)
labels.extend([v]*len(image_list))
images = np.vstack((image_reader(image_list) for image_list in image_lists))
return images, labels
def train_test_splitter(images, labels):
dataset = [(image, label) for image, label in zip(images, labels)]
dataset_size = len(dataset)
trainset_size = int(.8 * dataset_size)
testset_size = dataset_size - trainset_size
logger.debug('Dataset size: {}'.format(dataset_size))
np.random.shuffle(dataset)
# PAY ATTENTION HERE: YOU CAN ALSO ADD DEV-SET :)
trainset, testset = dataset[:trainset_size], dataset[trainset_size:]
logger.debug('Trainset size: {}, Testset size: {}'.format(
len(trainset), len(testset)
))
logger.debug('concatinating the train images on axis 0')
train_image = np.vstack((tr[0] for tr in tqdm.tqdm(trainset[:])))
logger.debug('concatinating the train labels on axis 0')
train_label = [tr[1] for tr in tqdm.tqdm(trainset[:])]
logger.info('concatinating the test images on axis 0')
test_image = np.vstack((te[0] for te in tqdm.tqdm(testset[:])))
logger.debug('concatinating the test labels on axis 0')
test_label = [te[1] for te in tqdm.tqdm(testset[:])]
logger.debug('train-images-shape: {}, test-images-shape: {}'.format(
train_image.shape, test_image.shape
))
return (train_image, train_label), (test_image, test_label)
def create_dataset(images, labels):
images = np.reshape(images, (-1, 48*48))
logger.debug('images-shape: {}, length-labels: {}'.format(
images.shape, len(labels)
))
train, test = train_test_splitter(images, labels)
train_dict = {
'data': train[0],
'labels': train[1]
}
test_dict = {
'data': test[0],
'labels': test[1]
}
with open(os.path.join(DATASET_SAVE_PATH, 'train_batch_0'), 'wb') as file:
pickle.dump(train_dict, file)
logger.info('dataset: trainset-dict pickled and saved at {}'.format(DATASET_SAVE_PATH))
with open(os.path.join(DATASET_SAVE_PATH, 'test_batch_0'), 'wb') as file:
pickle.dump(test_dict, file)
logger.info('dataset: testset-dict pickled and saved at {}'.format(DATASET_SAVE_PATH))
logger.info('dataset created :)')
def condition_satisfied(emotion_map):
for emotion_class in emotion_map.keys():
path = os.path.join(FACE_IMAGES_PATH, emotion_class)
if not os.path.exists(path):
logger.error('Please capture images for "{}" emotion-class as well'.format(
emotion_class
))
logger.error('FAIL.')
return False
return True
if __name__ == '__main__':
logger = logging.getLogger('emojifier.dataset_creator')
FACE_IMAGES_PATH = os.path.join(os.path.dirname(__file__), os.pardir, 'images')
DATASET_SAVE_PATH = os.path.join(os.path.dirname(__file__), os.pardir, 'dataset')
if not os.path.exists(DATASET_SAVE_PATH):
os.makedirs(DATASET_SAVE_PATH)
if condition_satisfied(EMOTION_MAP):
_images, _labels = image_label_generator(EMOTION_MAP)
create_dataset(_images, _labels)
| 29.653061 | 95 | 0.628585 | [
"MIT"
] | WZX1998/facial-recognition | src/dataset_creator.py | 4,359 | Python |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
from pyspark.java_gateway import local_connect_and_auth
from pyspark.serializers import write_int, UTF8Deserializer
class TaskContext(object):
"""
.. note:: Experimental
Contextual information about a task which can be read or mutated during
execution. To access the TaskContext for a running task, use:
:meth:`TaskContext.get`.
"""
_taskContext = None
_attemptNumber = None
_partitionId = None
_stageId = None
_taskAttemptId = None
_localProperties = None
_resources = None
def __new__(cls):
"""Even if users construct TaskContext instead of using get, give them the singleton."""
taskContext = cls._taskContext
if taskContext is not None:
return taskContext
cls._taskContext = taskContext = object.__new__(cls)
return taskContext
@classmethod
def _getOrCreate(cls):
"""Internal function to get or create global TaskContext."""
if cls._taskContext is None:
cls._taskContext = TaskContext()
return cls._taskContext
@classmethod
def get(cls):
"""
Return the currently active TaskContext. This can be called inside of
user functions to access contextual information about running tasks.
.. note:: Must be called on the worker, not the driver. Returns None if not initialized.
"""
return cls._taskContext
def stageId(self):
"""The ID of the stage that this task belong to."""
return self._stageId
def partitionId(self):
"""
The ID of the RDD partition that is computed by this task.
"""
return self._partitionId
def attemptNumber(self):
""""
How many times this task has been attempted. The first task attempt will be assigned
attemptNumber = 0, and subsequent attempts will have increasing attempt numbers.
"""
return self._attemptNumber
def taskAttemptId(self):
"""
An ID that is unique to this task attempt (within the same SparkContext, no two task
attempts will share the same attempt ID). This is roughly equivalent to Hadoop's
TaskAttemptID.
"""
return self._taskAttemptId
def getLocalProperty(self, key):
"""
Get a local property set upstream in the driver, or None if it is missing.
"""
return self._localProperties.get(key, None)
def resources(self):
"""
Resources allocated to the task. The key is the resource name and the value is information
about the resource.
"""
return self._resources
BARRIER_FUNCTION = 1
def _load_from_socket(port, auth_secret):
"""
Load data from a given socket, this is a blocking method thus only return when the socket
connection has been closed.
"""
(sockfile, sock) = local_connect_and_auth(port, auth_secret)
# The barrier() call may block forever, so no timeout
sock.settimeout(None)
# Make a barrier() function call.
write_int(BARRIER_FUNCTION, sockfile)
sockfile.flush()
# Collect result.
res = UTF8Deserializer().loads(sockfile)
# Release resources.
sockfile.close()
sock.close()
return res
class BarrierTaskContext(TaskContext):
"""
.. note:: Experimental
A :class:`TaskContext` with extra contextual info and tooling for tasks in a barrier stage.
Use :func:`BarrierTaskContext.get` to obtain the barrier context for a running barrier task.
.. versionadded:: 2.4.0
"""
_port = None
_secret = None
@classmethod
def _getOrCreate(cls):
"""
Internal function to get or create global BarrierTaskContext. We need to make sure
BarrierTaskContext is returned from here because it is needed in python worker reuse
scenario, see SPARK-25921 for more details.
"""
if not isinstance(cls._taskContext, BarrierTaskContext):
cls._taskContext = object.__new__(cls)
return cls._taskContext
@classmethod
def get(cls):
"""
.. note:: Experimental
Return the currently active :class:`BarrierTaskContext`.
This can be called inside of user functions to access contextual information about
running tasks.
.. note:: Must be called on the worker, not the driver. Returns None if not initialized.
"""
return cls._taskContext
@classmethod
def _initialize(cls, port, secret):
"""
Initialize BarrierTaskContext, other methods within BarrierTaskContext can only be called
after BarrierTaskContext is initialized.
"""
cls._port = port
cls._secret = secret
def barrier(self):
"""
.. note:: Experimental
Sets a global barrier and waits until all tasks in this stage hit this barrier.
Similar to `MPI_Barrier` function in MPI, this function blocks until all tasks
in the same stage have reached this routine.
.. warning:: In a barrier stage, each task much have the same number of `barrier()`
calls, in all possible code branches.
Otherwise, you may get the job hanging or a SparkException after timeout.
.. versionadded:: 2.4.0
"""
if self._port is None or self._secret is None:
raise Exception("Not supported to call barrier() before initialize " +
"BarrierTaskContext.")
else:
_load_from_socket(self._port, self._secret)
def getTaskInfos(self):
"""
.. note:: Experimental
Returns :class:`BarrierTaskInfo` for all tasks in this barrier stage,
ordered by partition ID.
.. versionadded:: 2.4.0
"""
if self._port is None or self._secret is None:
raise Exception("Not supported to call getTaskInfos() before initialize " +
"BarrierTaskContext.")
else:
addresses = self._localProperties.get("addresses", "")
return [BarrierTaskInfo(h.strip()) for h in addresses.split(",")]
class BarrierTaskInfo(object):
"""
.. note:: Experimental
Carries all task infos of a barrier task.
:var address: The IPv4 address (host:port) of the executor that the barrier task is running on
.. versionadded:: 2.4.0
"""
def __init__(self, address):
self.address = address
| 31.873362 | 98 | 0.65735 | [
"Apache-2.0"
] | 2RedSquares/spark | python/pyspark/taskcontext.py | 7,299 | Python |
from unittest import TestCase
import pandas as pd
from pytz import UTC
from trading_calendars.exchange_calendar_xshg import XSHGExchangeCalendar
from .test_trading_calendar import ExchangeCalendarTestBase
from .test_utils import T
class XSHGCalendarTestCase(ExchangeCalendarTestBase, TestCase):
answer_key_filename = "xshg"
calendar_class = XSHGExchangeCalendar
# Shanghai stock exchange is open from 9:30 am to 3pm
# (for now, ignoring lunch break)
MAX_SESSION_HOURS = 5.5
HAVE_EARLY_CLOSES = False
MINUTE_INDEX_TO_SESSION_LABELS_END = pd.Timestamp("2011-04-07", tz=UTC)
def test_normal_year(self):
expected_holidays_2017 = [
T("2017-01-02"),
T("2017-01-27"),
T("2017-01-30"),
T("2017-01-31"),
T("2017-02-01"),
T("2017-02-02"),
T("2017-04-03"),
T("2017-04-04"),
T("2017-05-01"),
T("2017-05-29"),
T("2017-05-30"),
T("2017-10-02"),
T("2017-10-03"),
T("2017-10-04"),
T("2017-10-05"),
T("2017-10-06"),
]
for session_label in expected_holidays_2017:
self.assertNotIn(session_label, self.calendar.all_sessions)
def test_constrain_construction_dates(self):
# the XSHG calendar currently goes from 1999 to 2025, inclusive.
with self.assertRaises(ValueError) as e:
self.calendar_class(T("1998-12-31"), T("2005-01-01"))
self.assertEqual(
str(e.exception),
(
"The XSHG holidays are only recorded back to 1999,"
" cannot instantiate the XSHG calendar back to 1998."
),
)
with self.assertRaises(ValueError) as e:
self.calendar_class(T("2005-01-01"), T("2026-01-01"))
self.assertEqual(
str(e.exception),
(
"The XSHG holidays are only recorded to 2025,"
" cannot instantiate the XSHG calendar for 2026."
),
)
| 29.43662 | 75 | 0.576555 | [
"Apache-2.0"
] | quantrocket-llc/trading-calendars | trading_calendars/tests/test_xshg_calendar.py | 2,090 | Python |
import os
from sb3_contrib.ppo_mask import MaskablePPO
from sb3_contrib.qrdqn import QRDQN
from sb3_contrib.tqc import TQC
from sb3_contrib.trpo import TRPO
# Read version from file
version_file = os.path.join(os.path.dirname(__file__), "version.txt")
with open(version_file, "r") as file_handler:
__version__ = file_handler.read().strip()
| 28.833333 | 69 | 0.791908 | [
"MIT"
] | cyprienc/stable-baselines3-contrib | sb3_contrib/__init__.py | 346 | Python |
import logging
import os
import pickle
import sys
from pathlib import Path
import click
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import zsampler
from dotenv import load_dotenv, find_dotenv
from scipy.special import logsumexp, softmax
from src.inference.context_geo import GridContextGeo, gp_inflate_duplicate, gp_deflate_sum
from src.inference.hmc import HMCSampler
from src.inference.priors import BetaPriorWithIntercept, GaussianPrior, GPNonGridPriorSqExpFixed
from src.experiment.visualize import plot_traceplots
class BlockMixtureGpSoftmaxAllocation:
def __init__(self, *, uid=None,
grid_context=None,
K=1,
block_type="msoa",
hmc_all_iterations=100_000,
hmc_burn_in=25_000,
hmc_calibration=50_000,
hmc_info_interval=20_000,
hmc_thinning=5,
verbose=False,
lengthscale=1):
self.uid = uid
self.context = grid_context
self.K = K
self.NN = self.context.mask.shape[0]
self.hmc_thinning = hmc_thinning
self.hmc_info_interval = hmc_info_interval
self.N = grid_context.counts.shape[0]
self.J = self.context.J
# do a random assignment to mixtures
initial_Z = np.zeros((self.N, self.K), dtype=int)
initial_Z[np.arange(self.N), np.random.choice(self.K, self.N)] = 1
self.Z_samples = []
# Create an (N x 1) vector which gives the corresponding block for each cell.
if block_type == "lad":
block_assignment = np.asarray(grid_context.lads)
elif block_type == "msoa":
block_assignment = np.asarray(grid_context.msoas)
elif block_type == "ward":
block_assignment = np.asarray(grid_context.wards)
else:
block_assignment = np.repeat(1, self.N) # a single block
# read in block centroid coordinates
block_centroid_file_path = Path(os.getcwd()) / "data" / "processed" / f"{block_type}-centroids-map.csv"
block_centroids = pd.read_csv(block_centroid_file_path)
self.coord_x = block_centroids["x"].values
self.coord_x = self.coord_x - np.min(self.coord_x)
self.coord_y = block_centroids["y"].values
self.coord_y = self.coord_y - np.min(self.coord_y)
self.block_labels = block_centroids.iloc[:, 1].values
# Create the cell <-> block mapping (mind the ordering of the blocks)
unique_block_labels = np.unique(self.block_labels)
self.block_assignment_numeric = np.zeros(block_assignment.shape[0], dtype=np.int)
for idx_cell, block_label in enumerate(block_assignment):
self.block_assignment_numeric[idx_cell] = np.where(unique_block_labels == block_label)[0]
self.block_assignment = block_assignment
B = np.max(self.block_assignment_numeric) + 1
self.B = B
self.lengthscale = lengthscale
# Priors
self.beta_prior = BetaPriorWithIntercept(a=1, b=0.01)
self.f_prior = GPNonGridPriorSqExpFixed(coord_x=self.coord_x, coord_y=self.coord_y,
variance=100, lengthscale=self.lengthscale)
self.log_theta_prior = GaussianPrior(mean=np.asarray([0]), variance=np.asarray([1e2]))
init_beta_estimand = np.random.normal(0, 1, self.context.J * self.K)
init_beta_mass_matrix = 1e3 * np.ones(self.context.J * self.K)
self.beta_sampler = HMCSampler(func_lpdf=self.beta_loglik,
func_nabla_lpdf=self.nabla_beta_loglik,
func_plot=self.plot_beta if verbose else None,
init_estimand=init_beta_estimand,
init_M_diag=init_beta_mass_matrix,
init_L=20,
init_epsilon=5.0e-2,
n_burnin=hmc_burn_in,
n_calib=hmc_calibration,
S=hmc_all_iterations,
n_info_interval=hmc_info_interval,
thinning=hmc_thinning,
unique_estimation_id=uid,
adaptive=True)
init_f_estimand = np.random.normal(0, 1, B * self.K)
init_f_mass_matrix = 1e4 * np.ones(B * self.K)
self.f_sampler = HMCSampler(func_lpdf=self.f_loglik,
func_nabla_lpdf=self.nabla_f_loglik,
func_plot=self.plot_f if verbose else None,
init_estimand=init_f_estimand,
init_M_diag=init_f_mass_matrix,
init_L=100,
init_epsilon=5.0e-2,
n_burnin=hmc_burn_in,
n_calib=hmc_calibration,
S=hmc_all_iterations,
n_info_interval=hmc_info_interval,
thinning=hmc_thinning,
unique_estimation_id=uid,
adaptive=False)
self.current_beta = self.beta_sampler.estimand
self.current_f = self.f_sampler.estimand
self.current_Z = initial_Z
self.logger = logging.getLogger(__name__)
def beta_loglik(self, beta_estimand):
beta_matrix = np.reshape(beta_estimand, (self.J, self.K), order='F') # build a J x K matrix
Z = self.current_Z
counts = self.context.counts
covariates = self.context.covariates
fixed_effects = np.sum(np.multiply(Z, np.dot(covariates, beta_matrix)), axis=1)
poisson_part = np.sum(np.multiply(counts, fixed_effects) - np.exp(fixed_effects))
beta_part = self.beta_prior.log_pdf(beta_estimand, self.J)
output = poisson_part + beta_part
return output
def nabla_beta_loglik(self, beta_estimand):
beta_matrix = np.reshape(beta_estimand, (self.J, self.K), order='F') # build a J x K matrix
counts = self.context.counts
covariates = self.context.covariates
Z = self.current_Z
fixed_effects = np.sum(np.multiply(Z, np.dot(covariates, beta_matrix)), axis=1)
nabla_beta_matrix = np.zeros(beta_matrix.shape)
nabla_beta_matrix += np.dot(covariates.T, Z * counts[:, np.newaxis])
temp = np.exp(fixed_effects)
nabla_beta_matrix += (- np.dot(covariates.T, Z * temp[:, np.newaxis]))
nabla_beta = nabla_beta_matrix.flatten('F')
nabla_beta += self.beta_prior.nabla_beta_log_pdf(beta_estimand, self.J)
output = nabla_beta
return output
def plot_beta(self, beta_samples):
beta_samples_array = np.asarray(beta_samples)
for k in range(self.K):
beta_k_samples = beta_samples_array[:, (k * self.J):((k + 1) * self.J)]
plot_traceplots(beta_k_samples, self.context.covariates_names)
plt.show()
def sample_Z(self):
beta_matrix = np.reshape(self.current_beta, (self.J, self.K), order='F') # build a J x K matrix
f_matrix = np.reshape(self.current_f, (self.B, self.K), order='F')
Z = self.current_Z
f_full_matrix = gp_inflate_duplicate(f_matrix,
self.block_assignment_numeric,
self.N, self.K)
counts = self.context.counts
covariates = self.context.covariates
fixed_effects_all = np.dot(covariates, beta_matrix)
counts_matrix = np.repeat(counts.reshape((-1, 1)), self.K, axis=1)
poi_lik = counts_matrix * fixed_effects_all - np.exp(fixed_effects_all)
gp_log_softmax = f_full_matrix - logsumexp(f_full_matrix, axis=1)[:, np.newaxis]
prob = softmax(poi_lik + gp_log_softmax, axis=1)
new_Z = zsampler.sample_bulk_categorical(Z.astype(np.int64), prob.astype(np.float64))
return new_Z
def f_loglik(self, F_estimand):
f_matrix = np.reshape(F_estimand, (self.B, self.K), order='F')
Z = self.current_Z
f_full_matrix = gp_inflate_duplicate(f_matrix,
self.block_assignment_numeric,
self.N, self.K)
output = 0
temp = f_full_matrix - logsumexp(f_full_matrix, axis=1)[:, np.newaxis]
output += np.sum(np.multiply(Z, temp))
for k in range(self.K):
# GP contribution
output += self.f_prior.get_logpdf(f=f_matrix[:, k])
return output
def nabla_f_loglik(self, F_estimand):
f_matrix = np.reshape(F_estimand, (self.B, self.K), order='F')
f_full_matrix = gp_inflate_duplicate(f_matrix,
self.block_assignment_numeric,
self.N, self.K)
Z = self.current_Z
f_gradient = np.zeros(f_matrix.shape)
# nabla f poisson mixture
temp_matrix = 1 - np.exp(f_full_matrix - logsumexp(f_full_matrix, axis=1)[:, np.newaxis])
inflated_output_matrix = np.multiply(Z, temp_matrix)
f_gradient += gp_deflate_sum(inflated_output_matrix, self.block_assignment_numeric, self.N, self.B, self.K)
for k in range(self.K):
f_gradient[:, k] += self.f_prior.get_nabla_f(f=f_matrix[:, k])
return f_gradient.flatten(order='F')
def plot_f(self, F_samples):
f_array = np.asarray(F_samples).reshape((-1, self.B, self.K), order='F')
S = f_array.shape[0]
# discard irrelevant samples
self.Z_samples = self.Z_samples[(-S):]
Z_samples_array = np.asarray(self.Z_samples)
mixture_allocation = np.zeros((S, self.N, self.K))
mixture_allocation[np.repeat(range(S), self.N), np.tile(range(self.N), S), Z_samples_array.flatten(order='C')] = 1
average_alloc = np.mean(mixture_allocation, axis=0)
for k in range(self.K):
plt.figure()
self.context.plot_realisations(average_alloc[:, k], 111)
plt.show()
# plot a random traceplot
idx1 = np.random.choice(self.B)
plot_traceplots(f_array[:, idx1, :], [f"IDX: {idx1}: K={k}" for k in range(self.K)])
plt.show()
latent_weight_samples = softmax(np.mean(f_array, axis=0), axis=1)
latent_weight_samples_full = gp_inflate_duplicate(latent_weight_samples,
self.block_assignment_numeric,
self.N, self.K)
plt.figure()
for k in range(self.K):
self.context.plot_realisations(latent_weight_samples_full[:, k], 111)
plt.show()
def load_samples_snapshot(self, iteration_no):
beta_filepath = Path(os.getcwd()) / "models" / "snapshots" / f"beta-samples--{self.uid}--{iteration_no}.npy"
F_filepath = Path(os.getcwd()) / "models" / "snapshots" / f"F-samples--{self.uid}--{iteration_no}.npy"
Z_filepath = Path(os.getcwd()) / "models" / "snapshots" / f"Z-samples--{self.uid}--{iteration_no}.npy"
beta_samples = np.load(beta_filepath)
F_samples = np.load(F_filepath)
Z_samples = np.load(Z_filepath)
return beta_samples, Z_samples, F_samples
def __save_output(self, iteration):
folder_name = Path(os.getcwd()) / "models" / "snapshots"
if not os.path.exists(folder_name):
os.makedirs(folder_name)
F_full_path = folder_name / f"F-samples--{self.uid}--{iteration}"
F_samples_array = np.asarray(self.f_sampler.samples)
if F_samples_array.shape[0] > 0:
np.save(F_full_path, F_samples_array[::self.hmc_thinning, :])
beta_full_path = folder_name / f"beta-samples--{self.uid}--{iteration}"
beta_array = np.asarray(self.beta_sampler.samples)
if beta_array.shape[0] > 0:
np.save(beta_full_path, beta_array[::self.hmc_thinning, :])
Z_full_path = folder_name / f"Z-samples--{self.uid}--{iteration}"
Z_array = np.asarray(self.Z_samples)
if Z_array.shape[0] > 0:
np.save(Z_full_path, Z_array[::self.hmc_thinning, :])
def run_sampling(self, number_of_iterations):
iteration = 0
while iteration < number_of_iterations:
##########################################################################################
# BOOKKEEPING
##########################################################################################
# The HMC samplers are independently adaptive and therefore will discard samples during the adaptive phase.
num_current_samples = min(len(self.beta_sampler.samples),
len(self.f_sampler.samples))
self.beta_sampler.samples = self.beta_sampler.samples[(-num_current_samples):]
self.f_sampler.samples = self.f_sampler.samples[(-num_current_samples):]
self.Z_samples = self.Z_samples[(-num_current_samples):]
if (iteration + 1) % self.hmc_info_interval == 0:
self.__save_output(iteration)
##########################################################################################
# SAMPLE BETA
##########################################################################################
self.beta_sampler.sample_one()
self.current_beta = self.beta_sampler.estimand
##########################################################################################
# SAMPLE Z
##########################################################################################
new_Z = self.sample_Z()
self.Z_samples.append(np.where(new_Z > 0)[1])
self.current_Z = new_Z
##########################################################################################
# SAMPLE F
##########################################################################################
self.f_sampler.sample_one()
self.current_f = self.f_sampler.estimand
iteration += 1
self.logger.info("Sampling completed - saving model.")
self.__save_output(iteration)
@click.command()
@click.option('--year', '-y', type=str, default='12013-122015')
@click.option('--type', '-t', default='burglary')
@click.option('--resolution', '-r', type=int, default=400)
@click.option('--model_name', '-m', type=str, default='burglary_raw_4')
@click.option('--interpolation', '-i', type=str, default='weighted')
@click.option('--num_mixtures', '-K', type=int, default=3)
@click.option('--uid', type=str, default=None)
@click.option('--verbose', is_flag=True)
@click.option('--block_type', type=str, default="lad")
@click.option('--collection_unit', type=str, default="lsoa")
@click.option('--lengthscale', type=float, default=1500.0)
def main(year, type, resolution, model_name, interpolation, num_mixtures, uid, verbose,
block_type, collection_unit, lengthscale):
if uid is None:
uid = f"blockmixgp--{block_type}--{type}--{model_name}--{interpolation}--{num_mixtures}--{resolution}-{year}"
log_fmt = '[%(levelname)s] [%(asctime)s] [%(name)s] %(message)s'
datefmt = '%H:%M:%S'
if verbose:
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG, format=log_fmt)
else:
logging.basicConfig(filename=Path('models') / f"log-{uid}.log",
filemode='a',
format=log_fmt,
datefmt=datefmt,
level=logging.DEBUG)
logger = logging.getLogger(__name__)
logger.info("Building the context.")
grid_context = GridContextGeo(interpolation=interpolation,
year=year,
resolution=resolution,
crime_type=type,
model_name=model_name,
cov_collection_unit=collection_unit,
covariates_type='raw')
logger.info("Writing sampling context into a file.")
context_filename = Path(os.getcwd()) / "models" / f"context--{uid}.pickle"
with open(context_filename, 'wb') as context_file:
context_info = {
'context': grid_context,
'K': num_mixtures
}
pickle.dump(context_info, context_file)
logger.info("Initialising the model with estimand and mass matrix diagonal")
hmc_all_iterations = 250_000
hmc_info_interval = 50_000
hmc_thinning = 10
hmc_burn_in = 90_000
hmc_calibration = 150_000
model = BlockMixtureGpSoftmaxAllocation(uid=uid,
grid_context=grid_context,
K=num_mixtures,
hmc_info_interval=hmc_info_interval,
hmc_all_iterations=hmc_all_iterations,
hmc_thinning=hmc_thinning,
hmc_burn_in=hmc_burn_in,
hmc_calibration=hmc_calibration,
block_type=block_type,
verbose=verbose,
lengthscale=lengthscale)
model.run_sampling(number_of_iterations=hmc_all_iterations)
logger.info("Procedure finished.")
if __name__ == "__main__":
load_dotenv(find_dotenv())
main()
| 43.801956 | 122 | 0.561708 | [
"MIT"
] | jp2011/spatial-poisson-mixtures | src/models/block_mixture_gp_softmax.py | 17,915 | Python |
#!/usr/bin/env python3
# Copyright (c) 2017-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test deepcoin-cli"""
from test_framework.test_framework import DeepcoinTestFramework
from test_framework.util import assert_equal, assert_raises_process_error, get_auth_cookie
class TestDeepcoinCli(DeepcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
def run_test(self):
"""Main test logic"""
cli_response = self.nodes[0].cli("-version").send_cli()
assert("Deepcoin Core RPC client version" in cli_response)
self.log.info("Compare responses from gewalletinfo RPC and `deepcoin-cli getwalletinfo`")
cli_response = self.nodes[0].cli.getwalletinfo()
rpc_response = self.nodes[0].getwalletinfo()
assert_equal(cli_response, rpc_response)
self.log.info("Compare responses from getblockchaininfo RPC and `deepcoin-cli getblockchaininfo`")
cli_response = self.nodes[0].cli.getblockchaininfo()
rpc_response = self.nodes[0].getblockchaininfo()
assert_equal(cli_response, rpc_response)
user, password = get_auth_cookie(self.nodes[0].datadir)
self.log.info("Test -stdinrpcpass option")
assert_equal(0, self.nodes[0].cli('-rpcuser=%s' % user, '-stdinrpcpass', input=password).getblockcount())
assert_raises_process_error(1, "Incorrect rpcuser or rpcpassword", self.nodes[0].cli('-rpcuser=%s' % user, '-stdinrpcpass', input="foo").echo)
self.log.info("Test -stdin and -stdinrpcpass")
assert_equal(["foo", "bar"], self.nodes[0].cli('-rpcuser=%s' % user, '-stdin', '-stdinrpcpass', input=password + "\nfoo\nbar").echo())
assert_raises_process_error(1, "Incorrect rpcuser or rpcpassword", self.nodes[0].cli('-rpcuser=%s' % user, '-stdin', '-stdinrpcpass', input="foo").echo)
self.log.info("Test connecting to a non-existing server")
assert_raises_process_error(1, "Could not connect to the server", self.nodes[0].cli('-rpcport=1').echo)
self.log.info("Test connecting with non-existing RPC cookie file")
assert_raises_process_error(1, "Could not locate RPC credentials", self.nodes[0].cli('-rpccookiefile=does-not-exist', '-rpcpassword=').echo)
self.log.info("Make sure that -getinfo with arguments fails")
assert_raises_process_error(1, "-getinfo takes no arguments", self.nodes[0].cli('-getinfo').help)
self.log.info("Compare responses from `deepcoin-cli -getinfo` and the RPCs data is retrieved from.")
cli_get_info = self.nodes[0].cli('-getinfo').send_cli()
wallet_info = self.nodes[0].getwalletinfo()
network_info = self.nodes[0].getnetworkinfo()
blockchain_info = self.nodes[0].getblockchaininfo()
assert_equal(cli_get_info['version'], network_info['version'])
assert_equal(cli_get_info['protocolversion'], network_info['protocolversion'])
assert_equal(cli_get_info['walletversion'], wallet_info['walletversion'])
assert_equal(cli_get_info['balance'], wallet_info['balance'])
assert_equal(cli_get_info['blocks'], blockchain_info['blocks'])
assert_equal(cli_get_info['timeoffset'], network_info['timeoffset'])
assert_equal(cli_get_info['connections'], network_info['connections'])
assert_equal(cli_get_info['proxy'], network_info['networks'][0]['proxy'])
assert_equal(cli_get_info['difficulty'], blockchain_info['difficulty'])
assert_equal(cli_get_info['testnet'], blockchain_info['chain'] == "test")
assert_equal(cli_get_info['balance'], wallet_info['balance'])
assert_equal(cli_get_info['keypoololdest'], wallet_info['keypoololdest'])
assert_equal(cli_get_info['keypoolsize'], wallet_info['keypoolsize'])
assert_equal(cli_get_info['paytxfee'], wallet_info['paytxfee'])
assert_equal(cli_get_info['relayfee'], network_info['relayfee'])
# unlocked_until is not tested because the wallet is not encrypted
if __name__ == '__main__':
TestDeepcoinCli().main()
| 56.133333 | 160 | 0.706176 | [
"MIT"
] | deepcoindev2/Deepcoin | test/functional/interface_deepcoin_cli.py | 4,210 | Python |
# coding: utf-8
"""
Flat API
The Flat API allows you to easily extend the abilities of the [Flat Platform](https://flat.io), with a wide range of use cases including the following: * Creating and importing new music scores using MusicXML, MIDI, Guitar Pro (GP3, GP4, GP5, GPX, GP), PowerTab, TuxGuitar and MuseScore files * Browsing, updating, copying, exporting the user's scores (for example in MP3, WAV or MIDI) * Managing educational resources with Flat for Education: creating & updating the organization accounts, the classes, rosters and assignments. The Flat API is built on HTTP. Our API is RESTful It has predictable resource URLs. It returns HTTP response codes to indicate errors. It also accepts and returns JSON in the HTTP body. The [schema](/swagger.yaml) of this API follows the [OpenAPI Initiative (OAI) specification](https://www.openapis.org/), you can use and work with [compatible Swagger tools](http://swagger.io/open-source-integrations/). This API features Cross-Origin Resource Sharing (CORS) implemented in compliance with [W3C spec](https://www.w3.org/TR/cors/). You can use your favorite HTTP/REST library for your programming language to use Flat's API. This specification and reference is [available on Github](https://github.com/FlatIO/api-reference). Getting Started and learn more: * [API Overview and interoduction](https://flat.io/developers/docs/api/) * [Authentication (Personal Access Tokens or OAuth2)](https://flat.io/developers/docs/api/authentication.html) * [SDKs](https://flat.io/developers/docs/api/sdks.html) * [Rate Limits](https://flat.io/developers/docs/api/rate-limits.html) * [Changelog](https://flat.io/developers/docs/api/changelog.html) # noqa: E501
OpenAPI spec version: 2.7.0
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class FlatLocales(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
allowed enum values
"""
EN = "en"
ES = "es"
FR = "fr"
DE = "de"
IT = "it"
JA = "ja"
KO = "ko"
NL = "nl"
PL = "pl"
PT = "pt"
RO = "ro"
RU = "ru"
ZH_HANS = "zh-Hans"
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
}
attribute_map = {
}
def __init__(self): # noqa: E501
"""FlatLocales - a model defined in OpenAPI""" # noqa: E501
self.discriminator = None
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, FlatLocales):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 40.242718 | 1,686 | 0.624367 | [
"Apache-2.0"
] | FlatIO/api-client-python | flat_api/models/flat_locales.py | 4,145 | Python |
from io import BytesIO
from unittest import TestCase
from ecc import G, N, PrivateKey, S256Point
from helper import (
big_endian_to_int,
byte_to_int,
encode_base58_checksum,
hmac_sha512,
hmac_sha512_kdf,
int_to_big_endian,
int_to_byte,
raw_decode_base58,
sha256,
)
from mnemonic import secure_mnemonic, WORD_LOOKUP, WORD_LIST
MAINNET_XPRV = bytes.fromhex('0488ade4')
MAINNET_XPUB = bytes.fromhex('0488b21e')
MAINNET_YPRV = bytes.fromhex('049d7878')
MAINNET_YPUB = bytes.fromhex('049d7cb2')
MAINNET_ZPRV = bytes.fromhex('04b2430c')
MAINNET_ZPUB = bytes.fromhex('04b24746')
TESTNET_XPRV = bytes.fromhex('04358394')
TESTNET_XPUB = bytes.fromhex('043587cf')
TESTNET_YPRV = bytes.fromhex('044a4e28')
TESTNET_YPUB = bytes.fromhex('044a5262')
TESTNET_ZPRV = bytes.fromhex('045f18bc')
TESTNET_ZPUB = bytes.fromhex('045f1cf6')
class HDPrivateKey:
def __init__(self, private_key, chain_code,
depth=0, parent_fingerprint=b'\x00\x00\x00\x00',
child_number=0, testnet=False):
# the main secret, should be a PrivateKey object
self.private_key = private_key
self.private_key.testnet = testnet
# the code to make derivation deterministic
self.chain_code = chain_code
# level the current key is at in the heirarchy
self.depth = depth
# fingerprint of the parent key
self.parent_fingerprint = parent_fingerprint
# what order child this is
self.child_number = child_number
self.testnet = testnet
# keep a copy of the corresponding public key
self.pub = HDPublicKey(
point=private_key.point,
chain_code=chain_code,
depth=depth,
parent_fingerprint=parent_fingerprint,
child_number=child_number,
testnet=testnet,
)
def wif(self):
return self.private_key.wif()
def sec(self):
return self.pub.sec()
def hash160(self):
return self.pub.hash160()
def p2pkh_script(self):
return self.pub.p2pkh_script()
def p2wpkh_script(self):
return self.pub.p2wpkh_script()
def p2sh_p2wpkh_script(self):
return self.pub.p2sh_p2wpkh_script()
def address(self):
return self.pub.address()
def bech32_address(self):
return self.pub.bech32_address()
def p2sh_p2wpkh_address(self):
return self.pub.p2sh_p2wpkh_address()
def __repr__(self):
return self.xprv()
@classmethod
def from_seed(cls, seed, testnet=False):
# get hmac_sha512 with b'Bitcoin seed' and seed
h = hmac_sha512(b'Bitcoin seed', seed)
# create the private key using the first 32 bytes in big endian
private_key = PrivateKey(secret=big_endian_to_int(h[:32]))
# chaincode is the last 32 bytes
chain_code = h[32:]
# return an instance of the class
return cls(
private_key=private_key,
chain_code=chain_code,
testnet=testnet,
)
def child(self, index):
'''Returns the child HDPrivateKey at a particular index.
Hardened children return for indices >= 0x8000000.
'''
# if index >= 0x80000000
if index >= 0x80000000:
# the message data is the private key secret in 33 bytes in
# big-endian and the index in 4 bytes big-endian.
data = int_to_big_endian(self.private_key.secret, 33) + int_to_big_endian(index, 4)
else:
# the message data is the public key compressed SEC
# and the index in 4 bytes big-endian.
data = self.private_key.point.sec() + int_to_big_endian(index, 4)
# get the hmac_sha512 with chain code and data
h = hmac_sha512(self.chain_code, data)
# the new secret is the first 32 bytes as a big-endian integer
# plus the secret mod N
secret = (big_endian_to_int(h[:32]) + self.private_key.secret) % N
# create the PrivateKey object
private_key = PrivateKey(secret=secret)
# the chain code is the last 32 bytes
chain_code = h[32:]
# depth is whatever the current depth + 1
depth = self.depth + 1
# parent_fingerprint is the fingerprint of this node
parent_fingerprint = self.fingerprint()
# child number is the index
child_number = index
# return a new HDPrivateKey instance
return HDPrivateKey(
private_key=private_key,
chain_code=chain_code,
depth=depth,
parent_fingerprint=parent_fingerprint,
child_number=child_number,
testnet=self.testnet,
)
def traverse(self, path):
'''Returns the HDPrivateKey at the path indicated.
Path should be in the form of m/x/y/z where x' means
hardened'''
# keep track of the current node starting with self
current = self
# split up the path by the '/' splitter, ignore the first
components = path.split('/')[1:]
# iterate through the path components
for child in components:
# if the child ends with a ', we have a hardened child
if child.endswith("'"):
# index is the integer representation + 0x80000000
index = int(child[:-1]) + 0x80000000
# else the index is the integer representation
else:
index = int(child)
# grab the child at the index calculated
current = current.child(index)
# return the current child
return current
def raw_serialize(self, version):
# version + depth + parent_fingerprint + child number + chain code + private key
# start with version, which should be a constant depending on testnet
raw = version
# add depth, which is 1 byte using int_to_byte
raw += int_to_byte(self.depth)
# add the parent_fingerprint
raw += self.parent_fingerprint
# add the child number 4 bytes using int_to_big_endian
raw += int_to_big_endian(self.child_number, 4)
# add the chain code
raw += self.chain_code
# add the 0 byte and the private key's secret in big endian, 33 bytes
raw += int_to_big_endian(self.private_key.secret, 33)
return raw
def _prv(self, version):
'''Returns the base58-encoded x/y/z prv.
Expects a 4-byte version.'''
raw = self.raw_serialize(version)
# return the whole thing base58-encoded
return encode_base58_checksum(raw)
def xprv(self):
# from BIP0032:
if self.testnet:
version = TESTNET_XPRV
else:
version = MAINNET_XPRV
return self._prv(version)
def yprv(self):
# from BIP0049:
if self.testnet:
version = TESTNET_YPRV
else:
version = MAINNET_YPRV
return self._prv(version)
def zprv(self):
# from BIP0084:
if self.testnet:
version = TESTNET_ZPRV
else:
version = MAINNET_ZPRV
return self._prv(version)
# passthrough methods
def fingerprint(self):
return self.pub.fingerprint()
def xpub(self):
return self.pub.xpub()
def ypub(self):
return self.pub.ypub()
def zpub(self):
return self.pub.zpub()
@classmethod
def parse(cls, s):
'''Returns a HDPrivateKey from an extended key string'''
# get the bytes from the base58 using raw_decode_base58
raw = raw_decode_base58(s)
# check that the length of the raw is 78 bytes, otherwise raise ValueError
if len(raw) != 78:
raise ValueError('Not a proper extended key')
# create a stream
stream = BytesIO(raw)
# return the raw parsing of the stream
return cls.raw_parse(stream)
@classmethod
def raw_parse(cls, s):
'''Returns a HDPrivateKey from a stream'''
# first 4 bytes are the version
version = s.read(4)
# check that the version is one of the TESTNET or MAINNET
# private keys, if not raise a ValueError
if version in (TESTNET_XPRV, TESTNET_YPRV, TESTNET_ZPRV):
testnet = True
elif version in (MAINNET_XPRV, MAINNET_YPRV, MAINNET_ZPRV):
testnet = False
else:
raise ValueError('not an xprv, yprv or zprv: {}'.format(version))
# the next byte is depth
depth = byte_to_int(s.read(1))
# next 4 bytes are the parent_fingerprint
parent_fingerprint = s.read(4)
# next 4 bytes is the child number in big-endian
child_number = big_endian_to_int(s.read(4))
# next 32 bytes are the chain code
chain_code = s.read(32)
# the next byte should be b'\x00'
if byte_to_int(s.read(1)) != 0:
raise ValueError('private key should be preceded by a zero byte')
# last 32 bytes should be the private key in big endian
private_key = PrivateKey(secret=big_endian_to_int(s.read(32)))
# return an instance of the class
return cls(
private_key=private_key,
chain_code=chain_code,
depth=depth,
parent_fingerprint=parent_fingerprint,
child_number=child_number,
testnet=testnet,
)
def _get_address(self, purpose, account=0, external=True, address=0):
'''Returns the proper address among purposes 44', 49' and 84'.
p2pkh for 44', p2sh-p2wpkh for 49' and p2wpkh for 84'.'''
# if purpose is not one of 44', 49' or 84', raise ValueError
if purpose not in ("44'", "49'", "84'"):
raise ValueError('Cannot create an address without a proper purpose: {}'.format(purpose))
# if testnet, coin is 1', otherwise 0'
if self.testnet:
coin = "1'"
else:
coin = "0'"
# if external, chain is 0, otherwise 1
if external:
chain = '0'
else:
chain = '1'
# create the path m/purpose'/coin'/account'/chain/address
path = "m/{}/{}/{}'/{}/{}".format(purpose, coin, account, chain, address)
# get the HDPrivateKey at that location
hd_priv = self.traverse(path)
# if 44', return the address
if purpose == "44'":
return hd_priv.address()
# if 49', return the p2sh_p2wpkh_address
elif purpose == "49'":
return hd_priv.p2sh_p2wpkh_address()
# if 84', return the bech32_address
elif purpose == "84'":
return hd_priv.bech32_address()
def get_p2pkh_receiving_address(self, account=0, address=0):
return self._get_address("44'", account, True, address)
def get_p2pkh_change_address(self, account=0, address=0):
return self._get_address("44'", account, False, address)
def get_p2sh_p2wpkh_receiving_address(self, account=0, address=0):
return self._get_address("49'", account, True, address)
def get_p2sh_p2wpkh_change_address(self, account=0, address=0):
return self._get_address("49'", account, False, address)
def get_p2wpkh_receiving_address(self, account=0, address=0):
return self._get_address("84'", account, True, address)
def get_p2wpkh_change_address(self, account=0, address=0):
return self._get_address("84'", account, False, address)
@classmethod
def generate(cls, password=b'', entropy=0, testnet=False):
mnemonic = secure_mnemonic(entropy=entropy)
return mnemonic, cls.from_mnemonic(mnemonic, password=password, testnet=testnet)
@classmethod
def from_mnemonic(cls, mnemonic, password=b'', path='m', testnet=False):
'''Returns a HDPrivateKey object from the mnemonic.'''
# split the mnemonic into words with .split()
words = mnemonic.split()
# check that there are 12, 15, 18, 21 or 24 words
# if not, raise a ValueError
if len(words) not in (12, 15, 18, 21, 24):
raise ValueError('you need 12, 15, 18, 21, or 24 words')
# calculate the number
number = 0
# each word is 11 bits
for word in words:
# get the number that the word represents using WORD_LOOKUP
index = WORD_LOOKUP[word]
# left-shift the number by 11 bits and bitwise-or the index
number = (number << 11) | index
# checksum is the last n bits where n = (# of words / 3)
checksum_bits_length = len(words) // 3
# grab the checksum bits
checksum = number & ((1 << checksum_bits_length) - 1)
# get the actual number by right-shifting by the checksum bits length
data_num = number >> checksum_bits_length
# convert the number to big-endian
data = int_to_big_endian(data_num, checksum_bits_length * 4)
# the one byte we get is from sha256 of the data, shifted by
# 8 - the number of bits we need for the checksum
computed_checksum = sha256(data)[0] >> (8 - checksum_bits_length)
# check that the checksum is correct or raise ValueError
if checksum != computed_checksum:
raise ValueError('words fail checksum: {}'.format(words))
# normalize in case we got a mnemonic that's just the first 4 letters
normalized_words = []
for word in words:
normalized_words.append(WORD_LIST[WORD_LOOKUP[word]])
normalized_mnemonic = ' '.join(normalized_words)
# salt is b'mnemonic' + password
salt = b'mnemonic' + password
# the seed is the hmac_sha512_kdf with normalized mnemonic and salt
seed = hmac_sha512_kdf(normalized_mnemonic, salt)
# return the HDPrivateKey at the path specified
return cls.from_seed(seed, testnet=testnet).traverse(path)
class HDPublicKey:
def __init__(self, point, chain_code, depth, parent_fingerprint,
child_number, testnet=False):
self.point = point
self.chain_code = chain_code
self.depth = depth
self.parent_fingerprint = parent_fingerprint
self.child_number = child_number
self.testnet = testnet
self._raw = None
def __repr__(self):
return self.xpub()
def sec(self):
return self.point.sec()
def hash160(self):
return self.point.hash160()
def p2pkh_script(self):
return self.point.p2pkh_script()
def p2wpkh_script(self):
return self.point.p2wpkh_script()
def p2sh_p2wpkh_script(self):
return self.point.p2sh_p2wpkh_script()
def address(self):
return self.point.address(testnet=self.testnet)
def bech32_address(self):
return self.point.bech32_address(testnet=self.testnet)
def p2sh_p2wpkh_address(self):
return self.point.p2sh_p2wpkh_address(testnet=self.testnet)
def fingerprint(self):
'''Fingerprint is the hash160's first 4 bytes'''
return self.hash160()[:4]
def child(self, index):
'''Returns the child HDPrivateKey at a particular index.
Raises ValueError for indices >= 0x8000000.
'''
# if index >= 0x80000000, raise a ValueError
if index >= 0x80000000:
raise ValueError('child number should always be less than 2^31')
# data is the SEC compressed and the index in 4 bytes big-endian
data = self.point.sec() + int_to_big_endian(index, 4)
# get hmac_sha512 with chain code, data
h = hmac_sha512(self.chain_code, data)
# the new public point is the current point +
# the first 32 bytes in big endian * G
point = self.point + big_endian_to_int(h[:32]) * G
# chain code is the last 32 bytes
chain_code = h[32:]
# depth is current depth + 1
depth = self.depth + 1
# parent_fingerprint is the fingerprint of this node
parent_fingerprint = self.fingerprint()
# child number is the index
child_number = index
# return the HDPublicKey instance
return HDPublicKey(
point=point,
chain_code=chain_code,
depth=depth,
parent_fingerprint=parent_fingerprint,
child_number=child_number,
testnet=self.testnet,
)
def traverse(self, path):
'''Returns the HDPublicKey at the path indicated.
Path should be in the form of m/x/y/z.'''
# start current node at self
current = self
# get components of the path split at '/', ignoring the first
components = path.split('/')[1:]
# iterate through the components
for child in components:
# raise a ValueError if the path ends with a '
if child[-1:] == "'":
raise ValueError('HDPublicKey cannot get hardened child')
# traverse the next child at the index
current = current.child(int(child))
# return the current node
return current
def raw_serialize(self):
if self._raw is None:
if self.testnet:
version = TESTNET_XPUB
else:
version = MAINNET_XPUB
self._raw = self._serialize(version)
return self._raw
def _serialize(self, version):
# start with the version
raw = version
# add the depth using int_to_byte
raw += int_to_byte(self.depth)
# add the parent_fingerprint
raw += self.parent_fingerprint
# add the child number in 4 bytes using int_to_big_endian
raw += int_to_big_endian(self.child_number, 4)
# add the chain code
raw += self.chain_code
# add the SEC pubkey
raw += self.point.sec()
return raw
def _pub(self, version):
'''Returns the base58-encoded x/y/z pub.
Expects a 4-byte version.'''
# get the serialization
raw = self._serialize(version)
# base58-encode the whole thing
return encode_base58_checksum(raw)
def xpub(self):
if self.testnet:
version = TESTNET_XPUB
else:
version = MAINNET_XPUB
return self._pub(version)
def ypub(self):
if self.testnet:
version = TESTNET_YPUB
else:
version = MAINNET_YPUB
return self._pub(version)
def zpub(self):
if self.testnet:
version = TESTNET_ZPUB
else:
version = MAINNET_ZPUB
return self._pub(version)
@classmethod
def parse(cls, s):
'''Returns a HDPublicKey from an extended key string'''
# get the bytes from the base58 using raw_decode_base58
raw = raw_decode_base58(s)
# check that the length of the raw is 78 bytes, otherwise raise ValueError
if len(raw) != 78:
raise ValueError('Not a proper extended key')
# create a stream
stream = BytesIO(raw)
# return the raw parsing of the stream
return cls.raw_parse(stream)
@classmethod
def raw_parse(cls, s):
'''Returns a HDPublicKey from a stream'''
# first 4 bytes are the version
version = s.read(4)
# check that the version is one of the TESTNET or MAINNET
# public keys, if not raise a ValueError
if version in (TESTNET_XPUB, TESTNET_YPUB, TESTNET_ZPUB):
testnet = True
elif version in (MAINNET_XPUB, MAINNET_YPUB, MAINNET_ZPUB):
testnet = False
else:
raise ValueError('not an xpub, ypub or zpub: {} {}'.format(s, version))
# the next byte is depth
depth = byte_to_int(s.read(1))
# next 4 bytes are the parent_fingerprint
parent_fingerprint = s.read(4)
# next 4 bytes is the child number in big-endian
child_number = big_endian_to_int(s.read(4))
# next 32 bytes are the chain code
chain_code = s.read(32)
# last 33 bytes should be the SEC
point = S256Point.parse(s.read(33))
# return an instance of the class
return cls(
point=point,
chain_code=chain_code,
depth=depth,
parent_fingerprint=parent_fingerprint,
child_number=child_number,
testnet=testnet,
)
class HDTest(TestCase):
def test_from_seed(self):
seed = b'[email protected] Jimmy Song'
priv = HDPrivateKey.from_seed(seed, testnet=True)
addr = priv.bech32_address()
self.assertEqual(addr, 'tb1q7kn55vf3mmd40gyj46r245lw87dc6us5n50lrg')
def test_child(self):
seed = b'[email protected] Jimmy Song'
priv = HDPrivateKey.from_seed(seed, testnet=True)
pub = priv.pub
want = 'tb1qu6mnnk54hxfhy4aj58v0w6e7q8hghtv8wcdl7g'
addr = priv.child(0).bech32_address()
self.assertEqual(addr, want)
addr = pub.child(0).bech32_address()
self.assertEqual(addr, want)
addr = priv.child(0x80000002).bech32_address()
self.assertEqual(addr, 'tb1qscu8evdlqsucj7p84xwnrf63h4jsdr5yqga8zq')
with self.assertRaises(ValueError):
pub.child(0x80000002)
def test_traverse(self):
seed = b'[email protected] Jimmy Song'
priv = HDPrivateKey.from_seed(seed, testnet=True)
pub = priv.pub
path = "m/1/2/3/4"
self.assertEqual(priv.traverse(path).bech32_address(), pub.traverse(path).bech32_address())
path = "m/0/1'/2/3'"
self.assertEqual(priv.traverse(path).bech32_address(), 'tb1q423gz8cenqt6vfw987vlyxql0rh2jgh4sy0tue')
def test_prv_pub(self):
tests = [
{
'seed': bytes.fromhex('000102030405060708090a0b0c0d0e0f'),
'paths': [
[
'm',
'xpub661MyMwAqRbcFtXgS5sYJABqqG9YLmC4Q1Rdap9gSE8NqtwybGhePY2gZ29ESFjqJoCu1Rupje8YtGqsefD265TMg7usUDFdp6W1EGMcet8',
'xprv9s21ZrQH143K3QTDL4LXw2F7HEK3wJUD2nW2nRk4stbPy6cq3jPPqjiChkVvvNKmPGJxWUtg6LnF5kejMRNNU3TGtRBeJgk33yuGBxrMPHi',
], [
'm/0\'',
'xpub68Gmy5EdvgibQVfPdqkBBCHxA5htiqg55crXYuXoQRKfDBFA1WEjWgP6LHhwBZeNK1VTsfTFUHCdrfp1bgwQ9xv5ski8PX9rL2dZXvgGDnw',
'xprv9uHRZZhk6KAJC1avXpDAp4MDc3sQKNxDiPvvkX8Br5ngLNv1TxvUxt4cV1rGL5hj6KCesnDYUhd7oWgT11eZG7XnxHrnYeSvkzY7d2bhkJ7',
], [
'm/0\'/1',
'xpub6ASuArnXKPbfEwhqN6e3mwBcDTgzisQN1wXN9BJcM47sSikHjJf3UFHKkNAWbWMiGj7Wf5uMash7SyYq527Hqck2AxYysAA7xmALppuCkwQ',
'xprv9wTYmMFdV23N2TdNG573QoEsfRrWKQgWeibmLntzniatZvR9BmLnvSxqu53Kw1UmYPxLgboyZQaXwTCg8MSY3H2EU4pWcQDnRnrVA1xe8fs',
], [
'm/0\'/1/2\'',
'xpub6D4BDPcP2GT577Vvch3R8wDkScZWzQzMMUm3PWbmWvVJrZwQY4VUNgqFJPMM3No2dFDFGTsxxpG5uJh7n7epu4trkrX7x7DogT5Uv6fcLW5',
'xprv9z4pot5VBttmtdRTWfWQmoH1taj2axGVzFqSb8C9xaxKymcFzXBDptWmT7FwuEzG3ryjH4ktypQSAewRiNMjANTtpgP4mLTj34bhnZX7UiM',
], [
'm/0\'/1/2\'/2',
'xpub6FHa3pjLCk84BayeJxFW2SP4XRrFd1JYnxeLeU8EqN3vDfZmbqBqaGJAyiLjTAwm6ZLRQUMv1ZACTj37sR62cfN7fe5JnJ7dh8zL4fiyLHV',
'xprvA2JDeKCSNNZky6uBCviVfJSKyQ1mDYahRjijr5idH2WwLsEd4Hsb2Tyh8RfQMuPh7f7RtyzTtdrbdqqsunu5Mm3wDvUAKRHSC34sJ7in334',
], [
'm/0\'/1/2\'/2/1000000000',
'xpub6H1LXWLaKsWFhvm6RVpEL9P4KfRZSW7abD2ttkWP3SSQvnyA8FSVqNTEcYFgJS2UaFcxupHiYkro49S8yGasTvXEYBVPamhGW6cFJodrTHy',
'xprvA41z7zogVVwxVSgdKUHDy1SKmdb533PjDz7J6N6mV6uS3ze1ai8FHa8kmHScGpWmj4WggLyQjgPie1rFSruoUihUZREPSL39UNdE3BBDu76',
]
],
}, {
'seed': bytes.fromhex('fffcf9f6f3f0edeae7e4e1dedbd8d5d2cfccc9c6c3c0bdbab7b4b1aeaba8a5a29f9c999693908d8a8784817e7b7875726f6c696663605d5a5754514e4b484542'),
'paths': [
[
'm',
'xpub661MyMwAqRbcFW31YEwpkMuc5THy2PSt5bDMsktWQcFF8syAmRUapSCGu8ED9W6oDMSgv6Zz8idoc4a6mr8BDzTJY47LJhkJ8UB7WEGuduB',
'xprv9s21ZrQH143K31xYSDQpPDxsXRTUcvj2iNHm5NUtrGiGG5e2DtALGdso3pGz6ssrdK4PFmM8NSpSBHNqPqm55Qn3LqFtT2emdEXVYsCzC2U',
], [
'm/0',
'xpub69H7F5d8KSRgmmdJg2KhpAK8SR3DjMwAdkxj3ZuxV27CprR9LgpeyGmXUbC6wb7ERfvrnKZjXoUmmDznezpbZb7ap6r1D3tgFxHmwMkQTPH',
'xprv9vHkqa6EV4sPZHYqZznhT2NPtPCjKuDKGY38FBWLvgaDx45zo9WQRUT3dKYnjwih2yJD9mkrocEZXo1ex8G81dwSM1fwqWpWkeS3v86pgKt',
], [
'm/0/2147483647\'',
'xpub6ASAVgeehLbnwdqV6UKMHVzgqAG8Gr6riv3Fxxpj8ksbH9ebxaEyBLZ85ySDhKiLDBrQSARLq1uNRts8RuJiHjaDMBU4Zn9h8LZNnBC5y4a',
'xprv9wSp6B7kry3Vj9m1zSnLvN3xH8RdsPP1Mh7fAaR7aRLcQMKTR2vidYEeEg2mUCTAwCd6vnxVrcjfy2kRgVsFawNzmjuHc2YmYRmagcEPdU9',
], [
'm/0/2147483647\'/1',
'xpub6DF8uhdarytz3FWdA8TvFSvvAh8dP3283MY7p2V4SeE2wyWmG5mg5EwVvmdMVCQcoNJxGoWaU9DCWh89LojfZ537wTfunKau47EL2dhHKon',
'xprv9zFnWC6h2cLgpmSA46vutJzBcfJ8yaJGg8cX1e5StJh45BBciYTRXSd25UEPVuesF9yog62tGAQtHjXajPPdbRCHuWS6T8XA2ECKADdw4Ef',
], [
'm/0/2147483647\'/1/2147483646\'',
'xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL',
'xprvA1RpRA33e1JQ7ifknakTFpgNXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyuoseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFWc',
], [
'm/0/2147483647\'/1/2147483646\'/2',
'xpub6FnCn6nSzZAw5Tw7cgR9bi15UV96gLZhjDstkXXxvCLsUXBGXPdSnLFbdpq8p9HmGsApME5hQTZ3emM2rnY5agb9rXpVGyy3bdW6EEgAtqt',
'xprvA2nrNbFZABcdryreWet9Ea4LvTJcGsqrMzxHx98MMrotbir7yrKCEXw7nadnHM8Dq38EGfSh6dqA9QWTyefMLEcBYJUuekgW4BYPJcr9E7j',
],
],
}, {
'seed': bytes.fromhex('4b381541583be4423346c643850da4b320e46a87ae3d2a4e6da11eba819cd4acba45d239319ac14f863b8d5ab5a0d0c64d2e8a1e7d1457df2e5a3c51c73235be'),
'paths': [
[
'm',
'xpub661MyMwAqRbcEZVB4dScxMAdx6d4nFc9nvyvH3v4gJL378CSRZiYmhRoP7mBy6gSPSCYk6SzXPTf3ND1cZAceL7SfJ1Z3GC8vBgp2epUt13',
'xprv9s21ZrQH143K25QhxbucbDDuQ4naNntJRi4KUfWT7xo4EKsHt2QJDu7KXp1A3u7Bi1j8ph3EGsZ9Xvz9dGuVrtHHs7pXeTzjuxBrCmmhgC6',
], [
'm/0\'',
'xpub68NZiKmJWnxxS6aaHmn81bvJeTESw724CRDs6HbuccFQN9Ku14VQrADWgqbhhTHBaohPX4CjNLf9fq9MYo6oDaPPLPxSb7gwQN3ih19Zm4Y',
'xprv9uPDJpEQgRQfDcW7BkF7eTya6RPxXeJCqCJGHuCJ4GiRVLzkTXBAJMu2qaMWPrS7AANYqdq6vcBcBUdJCVVFceUvJFjaPdGZ2y9WACViL4L',
],
],
},
]
for test in tests:
seed = test['seed']
for path, xpub, xprv in test['paths']:
# test from seed
private_key = HDPrivateKey.from_seed(seed).traverse(path)
public_key = HDPublicKey.parse(xpub)
self.assertEqual(private_key.xprv(), xprv)
self.assertEqual(private_key.xpub(), public_key.xpub())
self.assertEqual(private_key.address(), public_key.address())
def test_parse(self):
xpub = 'xpub661MyMwAqRbcEZVB4dScxMAdx6d4nFc9nvyvH3v4gJL378CSRZiYmhRoP7mBy6gSPSCYk6SzXPTf3ND1cZAceL7SfJ1Z3GC8vBgp2epUt13'
hd_pub = HDPublicKey.parse(xpub)
self.assertEqual(hd_pub.xpub(), xpub)
xprv = 'xprv9s21ZrQH143K25QhxbucbDDuQ4naNntJRi4KUfWT7xo4EKsHt2QJDu7KXp1A3u7Bi1j8ph3EGsZ9Xvz9dGuVrtHHs7pXeTzjuxBrCmmhgC6'
hd_priv = HDPrivateKey.parse(xprv)
self.assertEqual(hd_priv.xprv(), xprv)
def test_get_address(self):
seedphrase = b'[email protected] Jimmy Song'
mainnet_priv = HDPrivateKey.from_seed(seedphrase)
testnet_priv = HDPrivateKey.from_seed(seedphrase, testnet=True)
tests = [
[mainnet_priv.get_p2pkh_receiving_address, 0, 1, '13pS51XfGTVhxbtrGKVSvwf36r96tLUu1K'],
[testnet_priv.get_p2pkh_change_address, 1, 0, 'n4EiCRsEEPaJ73HWA6zYEaHwo45BrP5MHb'],
[testnet_priv.get_p2sh_p2wpkh_receiving_address, 0, 2, '2NGKoo11UopXBWLC7qqj9BjgH9F3gvLdapz'],
[mainnet_priv.get_p2sh_p2wpkh_change_address, 0, 0, '38hYFPLMTykhURpCQTxkdDcpQKyieiYiU7'],
[mainnet_priv.get_p2wpkh_receiving_address, 2, 0, 'bc1qzeln78k9sghatd3uwnks8jek46qe23dw99zu9j'],
[testnet_priv.get_p2wpkh_change_address, 1, 1, 'tb1qecjwdw5uwwdfezzntec7m4kc8zkyjcamlz7dv9'],
]
for function, account, address, want in tests:
got = function(account, address)
self.assertEqual(got, want)
def test_from_mnemonic(self):
tests = [
[
"00000000000000000000000000000000",
"abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon about",
"c55257c360c07c72029aebc1b53c05ed0362ada38ead3e3e9efa3708e53495531f09a6987599d18264c1e1c92f2cf141630c7a3c4ab7c81b2f001698e7463b04",
"xprv9s21ZrQH143K3h3fDYiay8mocZ3afhfULfb5GX8kCBdno77K4HiA15Tg23wpbeF1pLfs1c5SPmYHrEpTuuRhxMwvKDwqdKiGJS9XFKzUsAF"
], [
"7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f",
"legal winner thank year wave sausage worth useful legal winner thank yellow",
"2e8905819b8723fe2c1d161860e5ee1830318dbf49a83bd451cfb8440c28bd6fa457fe1296106559a3c80937a1c1069be3a3a5bd381ee6260e8d9739fce1f607",
"xprv9s21ZrQH143K2gA81bYFHqU68xz1cX2APaSq5tt6MFSLeXnCKV1RVUJt9FWNTbrrryem4ZckN8k4Ls1H6nwdvDTvnV7zEXs2HgPezuVccsq"
], [
"80808080808080808080808080808080",
"letter advice cage absurd amount doctor acoustic avoid letter advice cage above",
"d71de856f81a8acc65e6fc851a38d4d7ec216fd0796d0a6827a3ad6ed5511a30fa280f12eb2e47ed2ac03b5c462a0358d18d69fe4f985ec81778c1b370b652a8",
"xprv9s21ZrQH143K2shfP28KM3nr5Ap1SXjz8gc2rAqqMEynmjt6o1qboCDpxckqXavCwdnYds6yBHZGKHv7ef2eTXy461PXUjBFQg6PrwY4Gzq"
], [
"ffffffffffffffffffffffffffffffff",
"zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo wrong",
"ac27495480225222079d7be181583751e86f571027b0497b5b5d11218e0a8a13332572917f0f8e5a589620c6f15b11c61dee327651a14c34e18231052e48c069",
"xprv9s21ZrQH143K2V4oox4M8Zmhi2Fjx5XK4Lf7GKRvPSgydU3mjZuKGCTg7UPiBUD7ydVPvSLtg9hjp7MQTYsW67rZHAXeccqYqrsx8LcXnyd"
], [
"000000000000000000000000000000000000000000000000",
"abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon agent",
"035895f2f481b1b0f01fcf8c289c794660b289981a78f8106447707fdd9666ca06da5a9a565181599b79f53b844d8a71dd9f439c52a3d7b3e8a79c906ac845fa",
"xprv9s21ZrQH143K3mEDrypcZ2usWqFgzKB6jBBx9B6GfC7fu26X6hPRzVjzkqkPvDqp6g5eypdk6cyhGnBngbjeHTe4LsuLG1cCmKJka5SMkmU"
], [
"7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f",
"legal winner thank year wave sausage worth useful legal winner thank year wave sausage worth useful legal will",
"f2b94508732bcbacbcc020faefecfc89feafa6649a5491b8c952cede496c214a0c7b3c392d168748f2d4a612bada0753b52a1c7ac53c1e93abd5c6320b9e95dd",
"xprv9s21ZrQH143K3Lv9MZLj16np5GzLe7tDKQfVusBni7toqJGcnKRtHSxUwbKUyUWiwpK55g1DUSsw76TF1T93VT4gz4wt5RM23pkaQLnvBh7"
], [
"808080808080808080808080808080808080808080808080",
"letter advice cage absurd amount doctor acoustic avoid letter advice cage absurd amount doctor acoustic avoid letter always",
"107d7c02a5aa6f38c58083ff74f04c607c2d2c0ecc55501dadd72d025b751bc27fe913ffb796f841c49b1d33b610cf0e91d3aa239027f5e99fe4ce9e5088cd65",
"xprv9s21ZrQH143K3VPCbxbUtpkh9pRG371UCLDz3BjceqP1jz7XZsQ5EnNkYAEkfeZp62cDNj13ZTEVG1TEro9sZ9grfRmcYWLBhCocViKEJae"
], [
"ffffffffffffffffffffffffffffffffffffffffffffffff",
"zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo when",
"0cd6e5d827bb62eb8fc1e262254223817fd068a74b5b449cc2f667c3f1f985a76379b43348d952e2265b4cd129090758b3e3c2c49103b5051aac2eaeb890a528",
"xprv9s21ZrQH143K36Ao5jHRVhFGDbLP6FCx8BEEmpru77ef3bmA928BxsqvVM27WnvvyfWywiFN8K6yToqMaGYfzS6Db1EHAXT5TuyCLBXUfdm"
], [
"0000000000000000000000000000000000000000000000000000000000000000",
"abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon art",
"bda85446c68413707090a52022edd26a1c9462295029f2e60cd7c4f2bbd3097170af7a4d73245cafa9c3cca8d561a7c3de6f5d4a10be8ed2a5e608d68f92fcc8",
"xprv9s21ZrQH143K32qBagUJAMU2LsHg3ka7jqMcV98Y7gVeVyNStwYS3U7yVVoDZ4btbRNf4h6ibWpY22iRmXq35qgLs79f312g2kj5539ebPM"
], [
"7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f",
"legal winner thank year wave sausage worth useful legal winner thank year wave sausage worth useful legal winner thank year wave sausage worth title",
"bc09fca1804f7e69da93c2f2028eb238c227f2e9dda30cd63699232578480a4021b146ad717fbb7e451ce9eb835f43620bf5c514db0f8add49f5d121449d3e87",
"xprv9s21ZrQH143K3Y1sd2XVu9wtqxJRvybCfAetjUrMMco6r3v9qZTBeXiBZkS8JxWbcGJZyio8TrZtm6pkbzG8SYt1sxwNLh3Wx7to5pgiVFU"
], [
"8080808080808080808080808080808080808080808080808080808080808080",
"letter advice cage absurd amount doctor acoustic avoid letter advice cage absurd amount doctor acoustic avoid letter advice cage absurd amount doctor acoustic bless",
"c0c519bd0e91a2ed54357d9d1ebef6f5af218a153624cf4f2da911a0ed8f7a09e2ef61af0aca007096df430022f7a2b6fb91661a9589097069720d015e4e982f",
"xprv9s21ZrQH143K3CSnQNYC3MqAAqHwxeTLhDbhF43A4ss4ciWNmCY9zQGvAKUSqVUf2vPHBTSE1rB2pg4avopqSiLVzXEU8KziNnVPauTqLRo"
], [
"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff",
"zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo vote",
"dd48c104698c30cfe2b6142103248622fb7bb0ff692eebb00089b32d22484e1613912f0a5b694407be899ffd31ed3992c456cdf60f5d4564b8ba3f05a69890ad",
"xprv9s21ZrQH143K2WFF16X85T2QCpndrGwx6GueB72Zf3AHwHJaknRXNF37ZmDrtHrrLSHvbuRejXcnYxoZKvRquTPyp2JiNG3XcjQyzSEgqCB"
], [
"9e885d952ad362caeb4efe34a8e91bd2",
"ozone drill grab fiber curtain grace pudding thank cruise elder eight picnic",
"274ddc525802f7c828d8ef7ddbcdc5304e87ac3535913611fbbfa986d0c9e5476c91689f9c8a54fd55bd38606aa6a8595ad213d4c9c9f9aca3fb217069a41028",
"xprv9s21ZrQH143K2oZ9stBYpoaZ2ktHj7jLz7iMqpgg1En8kKFTXJHsjxry1JbKH19YrDTicVwKPehFKTbmaxgVEc5TpHdS1aYhB2s9aFJBeJH"
], [
"6610b25967cdcca9d59875f5cb50b0ea75433311869e930b",
"gravity machine north sort system female filter attitude volume fold club stay feature office ecology stable narrow fog",
"628c3827a8823298ee685db84f55caa34b5cc195a778e52d45f59bcf75aba68e4d7590e101dc414bc1bbd5737666fbbef35d1f1903953b66624f910feef245ac",
"xprv9s21ZrQH143K3uT8eQowUjsxrmsA9YUuQQK1RLqFufzybxD6DH6gPY7NjJ5G3EPHjsWDrs9iivSbmvjc9DQJbJGatfa9pv4MZ3wjr8qWPAK"
], [
"68a79eaca2324873eacc50cb9c6eca8cc68ea5d936f98787c60c7ebc74e6ce7c",
"hamster diagram private dutch cause delay private meat slide toddler razor book happy fancy gospel tennis maple dilemma loan word shrug inflict delay length",
"64c87cde7e12ecf6704ab95bb1408bef047c22db4cc7491c4271d170a1b213d20b385bc1588d9c7b38f1b39d415665b8a9030c9ec653d75e65f847d8fc1fc440",
"xprv9s21ZrQH143K2XTAhys3pMNcGn261Fi5Ta2Pw8PwaVPhg3D8DWkzWQwjTJfskj8ofb81i9NP2cUNKxwjueJHHMQAnxtivTA75uUFqPFeWzk"
], [
"c0ba5a8e914111210f2bd131f3d5e08d",
"scheme spot photo card baby mountain device kick cradle pact join borrow",
"ea725895aaae8d4c1cf682c1bfd2d358d52ed9f0f0591131b559e2724bb234fca05aa9c02c57407e04ee9dc3b454aa63fbff483a8b11de949624b9f1831a9612",
"xprv9s21ZrQH143K3FperxDp8vFsFycKCRcJGAFmcV7umQmcnMZaLtZRt13QJDsoS5F6oYT6BB4sS6zmTmyQAEkJKxJ7yByDNtRe5asP2jFGhT6"
], [
"6d9be1ee6ebd27a258115aad99b7317b9c8d28b6d76431c3",
"horn tenant knee talent sponsor spell gate clip pulse soap slush warm silver nephew swap uncle crack brave",
"fd579828af3da1d32544ce4db5c73d53fc8acc4ddb1e3b251a31179cdb71e853c56d2fcb11aed39898ce6c34b10b5382772db8796e52837b54468aeb312cfc3d",
"xprv9s21ZrQH143K3R1SfVZZLtVbXEB9ryVxmVtVMsMwmEyEvgXN6Q84LKkLRmf4ST6QrLeBm3jQsb9gx1uo23TS7vo3vAkZGZz71uuLCcywUkt"
], [
"9f6a2878b2520799a44ef18bc7df394e7061a224d2c33cd015b157d746869863",
"panda eyebrow bullet gorilla call smoke muffin taste mesh discover soft ostrich alcohol speed nation flash devote level hobby quick inner drive ghost inside",
"72be8e052fc4919d2adf28d5306b5474b0069df35b02303de8c1729c9538dbb6fc2d731d5f832193cd9fb6aeecbc469594a70e3dd50811b5067f3b88b28c3e8d",
"xprv9s21ZrQH143K2WNnKmssvZYM96VAr47iHUQUTUyUXH3sAGNjhJANddnhw3i3y3pBbRAVk5M5qUGFr4rHbEWwXgX4qrvrceifCYQJbbFDems"
], [
"23db8160a31d3e0dca3688ed941adbf3",
"cat swing flag economy stadium alone churn speed unique patch report train",
"deb5f45449e615feff5640f2e49f933ff51895de3b4381832b3139941c57b59205a42480c52175b6efcffaa58a2503887c1e8b363a707256bdd2b587b46541f5",
"xprv9s21ZrQH143K4G28omGMogEoYgDQuigBo8AFHAGDaJdqQ99QKMQ5J6fYTMfANTJy6xBmhvsNZ1CJzRZ64PWbnTFUn6CDV2FxoMDLXdk95DQ"
], [
"8197a4a47f0425faeaa69deebc05ca29c0a5b5cc76ceacc0",
"light rule cinnamon wrap drastic word pride squirrel upgrade then income fatal apart sustain crack supply proud access",
"4cbdff1ca2db800fd61cae72a57475fdc6bab03e441fd63f96dabd1f183ef5b782925f00105f318309a7e9c3ea6967c7801e46c8a58082674c860a37b93eda02",
"xprv9s21ZrQH143K3wtsvY8L2aZyxkiWULZH4vyQE5XkHTXkmx8gHo6RUEfH3Jyr6NwkJhvano7Xb2o6UqFKWHVo5scE31SGDCAUsgVhiUuUDyh"
], [
"066dca1a2bb7e8a1db2832148ce9933eea0f3ac9548d793112d9a95c9407efad",
"all hour make first leader extend hole alien behind guard gospel lava path output census museum junior mass reopen famous sing advance salt reform",
"26e975ec644423f4a4c4f4215ef09b4bd7ef924e85d1d17c4cf3f136c2863cf6df0a475045652c57eb5fb41513ca2a2d67722b77e954b4b3fc11f7590449191d",
"xprv9s21ZrQH143K3rEfqSM4QZRVmiMuSWY9wugscmaCjYja3SbUD3KPEB1a7QXJoajyR2T1SiXU7rFVRXMV9XdYVSZe7JoUXdP4SRHTxsT1nzm"
], [
"f30f8c1da665478f49b001d94c5fc452",
"vessel ladder alter error federal sibling chat ability sun glass valve picture",
"2aaa9242daafcee6aa9d7269f17d4efe271e1b9a529178d7dc139cd18747090bf9d60295d0ce74309a78852a9caadf0af48aae1c6253839624076224374bc63f",
"xprv9s21ZrQH143K2QWV9Wn8Vvs6jbqfF1YbTCdURQW9dLFKDovpKaKrqS3SEWsXCu6ZNky9PSAENg6c9AQYHcg4PjopRGGKmdD313ZHszymnps"
], [
"c10ec20dc3cd9f652c7fac2f1230f7a3c828389a14392f05",
"scissors invite lock maple supreme raw rapid void congress muscle digital elegant little brisk hair mango congress clump",
"7b4a10be9d98e6cba265566db7f136718e1398c71cb581e1b2f464cac1ceedf4f3e274dc270003c670ad8d02c4558b2f8e39edea2775c9e232c7cb798b069e88",
"xprv9s21ZrQH143K4aERa2bq7559eMCCEs2QmmqVjUuzfy5eAeDX4mqZffkYwpzGQRE2YEEeLVRoH4CSHxianrFaVnMN2RYaPUZJhJx8S5j6puX"
], [
"f585c11aec520db57dd353c69554b21a89b20fb0650966fa0a9d6f74fd989d8f",
"void come effort suffer camp survey warrior heavy shoot primary clutch crush open amazing screen patrol group space point ten exist slush involve unfold",
"01f5bced59dec48e362f2c45b5de68b9fd6c92c6634f44d6d40aab69056506f0e35524a518034ddc1192e1dacd32c1ed3eaa3c3b131c88ed8e7e54c49a5d0998",
"xprv9s21ZrQH143K39rnQJknpH1WEPFJrzmAqqasiDcVrNuk926oizzJDDQkdiTvNPr2FYDYzWgiMiC63YmfPAa2oPyNB23r2g7d1yiK6WpqaQS"
]
]
for entropy, mnemonic, seed, xprv in tests:
private_key = HDPrivateKey.from_mnemonic(mnemonic, b'TREZOR')
self.assertEqual(private_key.xprv(), xprv)
def test_bip49(self):
mnemonic = 'abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon about'
password = b''
path = 'm'
hd_private_key = HDPrivateKey.from_mnemonic(mnemonic, password, path=path, testnet=True)
want = 'tprv8ZgxMBicQKsPe5YMU9gHen4Ez3ApihUfykaqUorj9t6FDqy3nP6eoXiAo2ssvpAjoLroQxHqr3R5nE3a5dU3DHTjTgJDd7zrbniJr6nrCzd'
self.assertEqual(hd_private_key.xprv(), want)
account0 = hd_private_key.child((1 << 31) + 49).child((1 << 31) + 1).child(1 << 31)
want = 'tprv8gRrNu65W2Msef2BdBSUgFdRTGzC8EwVXnV7UGS3faeXtuMVtGfEdidVeGbThs4ELEoayCAzZQ4uUji9DUiAs7erdVskqju7hrBcDvDsdbY'
self.assertEqual(account0.xprv(), want)
account0_pub = account0.pub
account0_first_key = account0.child(0).child(0)
pub_first_key = account0_pub.traverse('/0/0')
want = 'cULrpoZGXiuC19Uhvykx7NugygA3k86b3hmdCeyvHYQZSxojGyXJ'
self.assertEqual(account0_first_key.wif(), want)
want = 0xc9bdb49cfbaedca21c4b1f3a7803c34636b1d7dc55a717132443fc3f4c5867e8
self.assertEqual(account0_first_key.private_key.secret, want)
want = bytes.fromhex('03a1af804ac108a8a51782198c2d034b28bf90c8803f5a53f76276fa69a4eae77f')
self.assertEqual(account0_first_key.private_key.point.sec(), want)
self.assertEqual(pub_first_key.address(), account0_first_key.address())
def test_bech32_address(self):
mnemonic = 'abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon about'
password = b''
path = 'm/84\'/0\'/0\''
account = HDPrivateKey.from_mnemonic(mnemonic, password, path=path, testnet=False)
want = 'zprvAdG4iTXWBoARxkkzNpNh8r6Qag3irQB8PzEMkAFeTRXxHpbF9z4QgEvBRmfvqWvGp42t42nvgGpNgYSJA9iefm1yYNZKEm7z6qUWCroSQnE'
self.assertEqual(account.zprv(), want)
want = 'zpub6rFR7y4Q2AijBEqTUquhVz398htDFrtymD9xYYfG1m4wAcvPhXNfE3EfH1r1ADqtfSdVCToUG868RvUUkgDKf31mGDtKsAYz2oz2AGutZYs'
self.assertEqual(account.zpub(), want)
first_key = account.child(0).child(0)
want = 'bc1qcr8te4kr609gcawutmrza0j4xv80jy8z306fyu'
self.assertEqual(first_key.bech32_address(), want)
def test_zprv(self):
mnemonic, priv = HDPrivateKey.generate(entropy=1 << 128)
for word in mnemonic.split():
self.assertTrue(word in WORD_LIST)
zprv = priv.zprv()
self.assertTrue(zprv.startswith('zprv'))
zpub = priv.pub.zpub()
self.assertTrue(zpub.startswith('zpub'))
derived = HDPrivateKey.parse(zprv)
self.assertEqual(zprv, derived.zprv())
mnemonic, priv = HDPrivateKey.generate(testnet=True)
zprv = priv.zprv()
self.assertTrue(zprv.startswith('vprv'))
zpub = priv.pub.zpub()
self.assertTrue(zpub.startswith('vpub'))
xpub = priv.pub.xpub()
self.assertTrue(xpub.startswith('tpub'))
derived = HDPrivateKey.parse(zprv)
self.assertEqual(zprv, derived.zprv())
derived_pub = HDPublicKey.parse(zpub)
self.assertEqual(zpub, derived_pub.zpub())
with self.assertRaises(ValueError):
bad_zprv = encode_base58_checksum(b'\x00' * 78)
HDPrivateKey.parse(bad_zprv)
with self.assertRaises(ValueError):
bad_zpub = encode_base58_checksum(b'\x00' * 78)
HDPublicKey.parse(bad_zpub)
with self.assertRaises(ValueError):
derived_pub.child(1 << 31)
def test_errors(self):
with self.assertRaises(ValueError):
HDPrivateKey.from_mnemonic('hello')
with self.assertRaises(ValueError):
mnemonic = 'abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon'
HDPrivateKey.from_mnemonic(mnemonic)
| 50.261931 | 206 | 0.673541 | [
"MIT"
] | jimmysong/pw-exercises | session6/hd.py | 45,286 | Python |
# Copyright 2017-present Open Networking Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright (c) 2008 The Board of Trustees of The Leland Stanford Junior University
# Copyright (c) 2011, 2012 Open Networking Foundation
# Copyright (c) 2012, 2013 Big Switch Networks, Inc.
# See the file LICENSE.pyloxi which should have been included in the source distribution
# Automatically generated by LOXI from template module.py
# Do not modify
import struct
import loxi
import util
import loxi.generic_util
import sys
ofp = sys.modules['loxi.of11']
class instruction(loxi.OFObject):
subtypes = {}
def __init__(self, type=None):
if type != None:
self.type = type
else:
self.type = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
packed.append('\x00' * 4)
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
subtype, = reader.peek('!H', 0)
subclass = instruction.subtypes.get(subtype)
if subclass:
return subclass.unpack(reader)
obj = instruction()
obj.type = reader.read("!H")[0]
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
reader.skip(4)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.type != other.type: return False
return True
def pretty_print(self, q):
q.text("instruction {")
with q.group():
with q.indent(2):
q.breakable()
q.breakable()
q.text('}')
class apply_actions(instruction):
type = 4
def __init__(self, actions=None):
if actions != None:
self.actions = actions
else:
self.actions = []
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
packed.append('\x00' * 4)
packed.append(loxi.generic_util.pack_list(self.actions))
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = apply_actions()
_type = reader.read("!H")[0]
assert(_type == 4)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
reader.skip(4)
obj.actions = loxi.generic_util.unpack_list(reader, ofp.action.action.unpack)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.actions != other.actions: return False
return True
def pretty_print(self, q):
q.text("apply_actions {")
with q.group():
with q.indent(2):
q.breakable()
q.text("actions = ");
q.pp(self.actions)
q.breakable()
q.text('}')
instruction.subtypes[4] = apply_actions
class clear_actions(instruction):
type = 5
def __init__(self):
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
packed.append('\x00' * 4)
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = clear_actions()
_type = reader.read("!H")[0]
assert(_type == 5)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
reader.skip(4)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
return True
def pretty_print(self, q):
q.text("clear_actions {")
with q.group():
with q.indent(2):
q.breakable()
q.breakable()
q.text('}')
instruction.subtypes[5] = clear_actions
class experimenter(instruction):
subtypes = {}
type = 65535
def __init__(self, experimenter=None, data=None):
if experimenter != None:
self.experimenter = experimenter
else:
self.experimenter = 0
if data != None:
self.data = data
else:
self.data = ''
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
packed.append(struct.pack("!L", self.experimenter))
packed.append(self.data)
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
subtype, = reader.peek('!L', 4)
subclass = experimenter.subtypes.get(subtype)
if subclass:
return subclass.unpack(reader)
obj = experimenter()
_type = reader.read("!H")[0]
assert(_type == 65535)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
obj.experimenter = reader.read("!L")[0]
obj.data = str(reader.read_all())
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.experimenter != other.experimenter: return False
if self.data != other.data: return False
return True
def pretty_print(self, q):
q.text("experimenter {")
with q.group():
with q.indent(2):
q.breakable()
q.text("data = ");
q.pp(self.data)
q.breakable()
q.text('}')
instruction.subtypes[65535] = experimenter
class goto_table(instruction):
type = 1
def __init__(self, table_id=None):
if table_id != None:
self.table_id = table_id
else:
self.table_id = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
packed.append(struct.pack("!B", self.table_id))
packed.append('\x00' * 3)
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = goto_table()
_type = reader.read("!H")[0]
assert(_type == 1)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
obj.table_id = reader.read("!B")[0]
reader.skip(3)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.table_id != other.table_id: return False
return True
def pretty_print(self, q):
q.text("goto_table {")
with q.group():
with q.indent(2):
q.breakable()
q.text("table_id = ");
q.text("%#x" % self.table_id)
q.breakable()
q.text('}')
instruction.subtypes[1] = goto_table
class write_actions(instruction):
type = 3
def __init__(self, actions=None):
if actions != None:
self.actions = actions
else:
self.actions = []
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
packed.append('\x00' * 4)
packed.append(loxi.generic_util.pack_list(self.actions))
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = write_actions()
_type = reader.read("!H")[0]
assert(_type == 3)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
reader.skip(4)
obj.actions = loxi.generic_util.unpack_list(reader, ofp.action.action.unpack)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.actions != other.actions: return False
return True
def pretty_print(self, q):
q.text("write_actions {")
with q.group():
with q.indent(2):
q.breakable()
q.text("actions = ");
q.pp(self.actions)
q.breakable()
q.text('}')
instruction.subtypes[3] = write_actions
class write_metadata(instruction):
type = 2
def __init__(self, metadata=None, metadata_mask=None):
if metadata != None:
self.metadata = metadata
else:
self.metadata = 0
if metadata_mask != None:
self.metadata_mask = metadata_mask
else:
self.metadata_mask = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
packed.append('\x00' * 4)
packed.append(struct.pack("!Q", self.metadata))
packed.append(struct.pack("!Q", self.metadata_mask))
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = write_metadata()
_type = reader.read("!H")[0]
assert(_type == 2)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
reader.skip(4)
obj.metadata = reader.read("!Q")[0]
obj.metadata_mask = reader.read("!Q")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.metadata != other.metadata: return False
if self.metadata_mask != other.metadata_mask: return False
return True
def pretty_print(self, q):
q.text("write_metadata {")
with q.group():
with q.indent(2):
q.breakable()
q.text("metadata = ");
q.text("%#x" % self.metadata)
q.text(","); q.breakable()
q.text("metadata_mask = ");
q.text("%#x" % self.metadata_mask)
q.breakable()
q.text('}')
instruction.subtypes[2] = write_metadata
| 29.294574 | 88 | 0.562494 | [
"Apache-2.0"
] | Rajeshrc99/docker-voltha | ofagent/loxi/of11/instruction.py | 11,337 | Python |
import os
import numpy as np
import copy
import colorsys
from timeit import default_timer as timer
from keras import backend as K
from keras.models import load_model
from keras.layers import Input
from PIL import Image, ImageFont, ImageDraw
from nets.yolo4 import yolo_body,yolo_eval
from utils.utils import letterbox_image
#--------------------------------------------#
# 使用自己训练好的模型预测需要修改2个参数
# model_path和classes_path都需要修改!
#--------------------------------------------#
class YOLO(object):
_defaults = {
"model_path": 'model_data/yolo4_weight.h5',
"anchors_path": 'model_data/yolo_anchors.txt',
"classes_path": 'model_data/coco_classes.txt',
"score" : 0.5,
"iou" : 0.3,
# 显存比较小可以使用416x416
# 显存比较大可以使用608x608
"model_image_size" : (416, 416)
}
@classmethod
def get_defaults(cls, n):
if n in cls._defaults:
return cls._defaults[n]
else:
return "Unrecognized attribute name '" + n + "'"
#---------------------------------------------------#
# 初始化yolo
#---------------------------------------------------#
def __init__(self, **kwargs):
self.__dict__.update(self._defaults)
self.class_names = self._get_class()
self.anchors = self._get_anchors()
self.sess = K.get_session()
self.boxes, self.scores, self.classes = self.generate()
#---------------------------------------------------#
# 获得所有的分类
#---------------------------------------------------#
def _get_class(self):
classes_path = os.path.expanduser(self.classes_path)
with open(classes_path) as f:
class_names = f.readlines()
class_names = [c.strip() for c in class_names]
return class_names
#---------------------------------------------------#
# 获得所有的先验框
#---------------------------------------------------#
def _get_anchors(self):
anchors_path = os.path.expanduser(self.anchors_path)
with open(anchors_path) as f:
anchors = f.readline()
anchors = [float(x) for x in anchors.split(',')]
return np.array(anchors).reshape(-1, 2)
#---------------------------------------------------#
# 获得所有的分类
#---------------------------------------------------#
def generate(self):
model_path = os.path.expanduser(self.model_path)
assert model_path.endswith('.h5'), 'Keras model or weights must be a .h5 file.'
# 计算anchor数量
num_anchors = len(self.anchors)
num_classes = len(self.class_names)
# 载入模型,如果原来的模型里已经包括了模型结构则直接载入。
# 否则先构建模型再载入
try:
self.yolo_model = load_model(model_path, compile=False)
except:
self.yolo_model = yolo_body(Input(shape=(None,None,3)), num_anchors//3, num_classes)
self.yolo_model.load_weights(self.model_path)
else:
assert self.yolo_model.layers[-1].output_shape[-1] == \
num_anchors/len(self.yolo_model.output) * (num_classes + 5), \
'Mismatch between model and given anchor and class sizes'
print('{} model, anchors, and classes loaded.'.format(model_path))
# 画框设置不同的颜色
hsv_tuples = [(x / len(self.class_names), 1., 1.)
for x in range(len(self.class_names))]
self.colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))
self.colors = list(
map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)),
self.colors))
# 打乱颜色
np.random.seed(10101)
np.random.shuffle(self.colors)
np.random.seed(None)
self.input_image_shape = K.placeholder(shape=(2, ))
boxes, scores, classes = yolo_eval(self.yolo_model.output, self.anchors,
num_classes, self.input_image_shape,
score_threshold=self.score, iou_threshold=self.iou)
return boxes, scores, classes
#---------------------------------------------------#
# 检测图片
#---------------------------------------------------#
def detect_image(self, image):
start = timer()
# 调整图片使其符合输入要求
new_image_size = self.model_image_size
boxed_image = letterbox_image(image, new_image_size)
image_data = np.array(boxed_image, dtype='float32')
image_data /= 255.
image_data = np.expand_dims(image_data, 0) # Add batch dimension.
# 预测结果
out_boxes, out_scores, out_classes = self.sess.run(
[self.boxes, self.scores, self.classes],
feed_dict={
self.yolo_model.input: image_data,
self.input_image_shape: [image.size[1], image.size[0]],
K.learning_phase(): 0
})
print('Found {} boxes for {}'.format(len(out_boxes), 'img'))
# 设置字体
font = ImageFont.truetype(font='font/simhei.ttf',
size=np.floor(3e-2 * image.size[1] + 0.5).astype('int32'))
thickness = (image.size[0] + image.size[1]) // 300
small_pic=[]
for i, c in list(enumerate(out_classes)):
predicted_class = self.class_names[c]
box = out_boxes[i]
score = out_scores[i]
top, left, bottom, right = box
top = top - 5
left = left - 5
bottom = bottom + 5
right = right + 5
top = max(0, np.floor(top + 0.5).astype('int32'))
left = max(0, np.floor(left + 0.5).astype('int32'))
bottom = min(image.size[1], np.floor(bottom + 0.5).astype('int32'))
right = min(image.size[0], np.floor(right + 0.5).astype('int32'))
# 画框框
label = '{} {:.2f}'.format(predicted_class, score)
draw = ImageDraw.Draw(image)
label_size = draw.textsize(label, font)
label = label.encode('utf-8')
print(label)
if top - label_size[1] >= 0:
text_origin = np.array([left, top - label_size[1]])
else:
text_origin = np.array([left, top + 1])
for i in range(thickness):
draw.rectangle(
[left + i, top + i, right - i, bottom - i],
outline=self.colors[c])
draw.rectangle(
[tuple(text_origin), tuple(text_origin + label_size)],
fill=self.colors[c])
draw.text(text_origin, str(label,'UTF-8'), fill=(0, 0, 0), font=font)
del draw
end = timer()
print(end - start)
return image
def close_session(self):
self.sess.close()
| 36.48913 | 96 | 0.515788 | [
"MIT"
] | yanjingke/yolov4-keras | yolo.py | 7,016 | Python |
import holoviews as hv
import geoviews as gv
import cartopy.crs as ccrs
import cartopy.feature as cf
from holoviews.operation.datashader import regrid
from holoviews.streams import FreehandDraw
import panel as pn
pn.extension()
hv.extension('bokeh', logo=False)
import sys
# Suppress warnings
if not sys.warnoptions:
import warnings
warnings.simplefilter("ignore")
def interactive_plot(cube, cmap='viridis', kdims=['longitude', 'latitude'], coastlines=False , coastline_color='pink', projection=ccrs.PlateCarree, tools=['hover'], min_height=600, **opts):
# Generate an interactive Bokeh image of a cube with various plotting options
# Convert cube to GeoViews dataset
dataset = gv.Dataset(cube, [coord.name() for coord in cube.dim_coords], label=cube.name())
# Generate an image object which will dynamically render as the interactive view changes
image = regrid(dataset.to(gv.Image, kdims, dynamic=True))
# Options for plotting
options = {
'cmap': cmap,
'responsive': True,
'projection': projection(),
'colorbar': True,
'min_height': min_height,
'aspect': 2,
'tools': tools
}
# Include coastlines if needed
if coastlines:
return gv.feature.ocean * gv.feature.land * image.opts(**options, **opts) * gv.feature.coastline.opts(line_color=coastline_color)
else:
return image.opts(**options, **opts)
def dashboard_column(plots, shared_slider=False):
# Generate a Panel dashboard from a list of interactive plots
# Create a Panel object to host our plots
app = pn.GridSpec(sizing_mode='stretch_both')
# Arrange plots in a column
column = pn.Column(*plots)
# Add plots and sliders to Panel app
if shared_slider:
# Link all the sliders to one slider
# TODO: Add check for whether sliders can be linked
slider1 = column[0][1][0]
for plot in column[1:]:
slider = plot[1][0]
slider1.link(slider, value='value')
# Append all the plots to the app (using 3/4 of the horizontal space)
for i, plot in enumerate(column):
app[i, 0:4] = plot[0]
# Add the linked slider (using the last 1/4 of the horizontal space)
app[0, 4] = slider1
else:
# Append whole column (with individual sliders) to the app
app[0, 0] = column
return app
def warning_tool(color="orange"):
warning = gv.Polygons([]).opts(line_color=color, line_width=3, fill_color=color, fill_alpha=0.2)
pen = FreehandDraw(source=warning)
return pen, warning
| 35.16 | 189 | 0.662874 | [
"BSD-3-Clause"
] | informatics-lab/example-notebooks | 2.1 Weather/opscentretools/plotting.py | 2,637 | Python |
from typing import Dict
import pytest
from {{cookiecutter.project_slug}}.tests.assertions import assert_field_error
from {{cookiecutter.project_slug}}.users.api.serializers import RegisterSerializer
pytestmark = pytest.mark.django_db
@pytest.fixture
def user_json(user_json: Dict) -> Dict:
user_json["password1"] = user_json["password"]
user_json["password2"] = user_json["password"]
for field in list(user_json):
if field not in ["name", "email", "password1", "password2"]:
user_json.pop(field)
return user_json
class TestRegisterSerializer:
def test_get_cleaned_data_returns_dict_with_correct_fields(
self, user_json: Dict
) -> None:
serializer = RegisterSerializer(data=user_json)
assert serializer.is_valid()
cleaned_data = serializer.get_cleaned_data()
assert len(cleaned_data) == 3
for field in ["name", "password1", "email"]:
assert cleaned_data[field] == user_json[field]
def test_get_cleaned_data_returns_empty_string_for_name_when_name_not_provided(
self, user_json: Dict
) -> None:
user_json.pop("name")
serializer = RegisterSerializer(data=user_json)
assert serializer.is_valid()
cleaned_data = serializer.get_cleaned_data()
assert cleaned_data["name"] == ""
@pytest.mark.parametrize(
"field",
["email", "password1", "password2"],
ids=["email", "password1", "password2"],
)
def test_fields_are_required(self, user_json: Dict, field: str) -> None:
user_json.pop(field)
serializer = RegisterSerializer(data=user_json)
assert_field_error(serializer, field)
| 30.909091 | 83 | 0.682941 | [
"BSD-3-Clause"
] | e-dang/cookiecutter-django | {{cookiecutter.project_slug}}/{{cookiecutter.project_slug}}/tests/integration/users/test_serializers.py | 1,700 | Python |
import re
### parse_text(text)
# takes a string, return a list of strings with the matching groups
def parse_text_regex(text, regex):
try:
compiled_regex = re.compile(regex)
if compiled_regex is None:
raise Exception(f"String {text} doesn't match {regex}")
except TypeError as te:
raise Exception(te)
except Exception as e:
raise e
match = compiled_regex.match(text)
return match.groups()
def clean_string_with_regex(text, regex):
cleaned_string = re.sub(regex, '', text)
cleaned_string = cleaned_string.strip()
return cleaned_string
| 24.64 | 67 | 0.676948 | [
"MIT"
] | PumaConcolor/TPS-dice-roller-bot | TPS_dice_roller_bot/core/parse.py | 616 | Python |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from datetime import datetime
from django.contrib import messages
from django.db.models import Count
from django.views.generic import (
DetailView,
TemplateView,
)
from .models import (
AggregateHourlySongChart,
HourlySongChart,
HourlySongChartEntry,
Song,
)
from .utils import KR_TZ
class HourlySongChartView(DetailView):
template_name = 'charts/hourlysongchart_detail.html'
def _get_hour(self, msg=False):
chart_date = self.request.GET.get('date', None)
if chart_date:
try:
hour = self.request.GET.get('hour', '00')
return KR_TZ.localize(datetime.strptime('{}{}'.format(chart_date, hour), '%Y%m%d%H'))
except ValueError:
if msg:
messages.error(self.request, 'Invalid date/hour parameters.')
return AggregateHourlySongChart.objects.latest('hour').hour.astimezone(KR_TZ)
def get_context_data(self, **kwargs):
context = super(HourlySongChartView, self).get_context_data(**kwargs)
context['hour'] = self._get_hour()
return context
def get_object(self):
hour = self._get_hour(msg=True)
return AggregateHourlySongChart.get_cached_chart(hour)
class StatsView(TemplateView):
template_name = 'charts/stats.html'
def get_context_data(self, **kwargs):
context = super(StatsView, self).get_context_data(**kwargs)
for slug in ['melon', 'genie', 'bugs', 'mnet']:
context['{}_earliest'.format(slug)] = HourlySongChart.objects.filter(
chart__service__slug=slug).earliest('hour').hour
context['song_count'] = HourlySongChartEntry.objects.aggregate(
song_count=Count('song', distinct=True))['song_count']
context['artist_count'] = HourlySongChartEntry.objects.aggregate(
artist_count=Count('song__artists', distinct=True))['artist_count']
context['album_count'] = HourlySongChartEntry.objects.aggregate(
album_count=Count('song__album', distinct=True))['album_count']
return context
| 34.555556 | 101 | 0.66881 | [
"MIT"
] | pmrowla/kchart | kchart/charts/views.py | 2,177 | Python |
#
# LeetCode
#
# Problem - 581
# URL - https://leetcode.com/problems/shortest-unsorted-continuous-subarray/
#
class Solution:
def findUnsortedSubarray(self, arr: List[int]) -> int:
if (not arr):
0
index1 = -1
index2 = -1
for i in range(1, len(arr)):
if (arr[i] < arr[i-1]):
index1 = i-1
break
for i in range(len(arr)-2, -1, -1):
if (arr[i] > arr[i+1]):
index2 = i+1
break
if (index1 == -1):
return 0
else:
maxSubArr = max(arr[index1:index2+1])
minSubArr = min(arr[index1:index2+1])
for i in range(0, index1):
if (arr[i] > minSubArr):
index1 = i
break
for i in range(len(arr)-1, index2, -1):
if (arr[i] < maxSubArr):
index2 = i
break
return index2 - index1 + 1
| 19.534884 | 76 | 0.515476 | [
"MIT"
] | KevinTMtz/CompetitiveProgramming | LeetCode/581.py | 840 | Python |
"""
CryptoAPIs
Crypto APIs 2.0 is a complex and innovative infrastructure layer that radically simplifies the development of any Blockchain and Crypto related applications. Organized around REST, Crypto APIs 2.0 can assist both novice Bitcoin/Ethereum enthusiasts and crypto experts with the development of their blockchain applications. Crypto APIs 2.0 provides unified endpoints and data, raw data, automatic tokens and coins forwardings, callback functionalities, and much more. # noqa: E501
The version of the OpenAPI document: 2.0.0
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from cryptoapis.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from cryptoapis.exceptions import ApiAttributeError
def lazy_import():
from cryptoapis.model.banned_ip_address_details import BannedIpAddressDetails
from cryptoapis.model.invalid_pagination import InvalidPagination
from cryptoapis.model.limit_greater_than_allowed import LimitGreaterThanAllowed
from cryptoapis.model.uri_not_found import UriNotFound
globals()['BannedIpAddressDetails'] = BannedIpAddressDetails
globals()['InvalidPagination'] = InvalidPagination
globals()['LimitGreaterThanAllowed'] = LimitGreaterThanAllowed
globals()['UriNotFound'] = UriNotFound
class ListAssetsDetailsE400(ModelComposed):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'details': ([BannedIpAddressDetails],), # noqa: E501
'code': (str,), # noqa: E501
'message': (str,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'details': 'details', # noqa: E501
'code': 'code', # noqa: E501
'message': 'message', # noqa: E501
}
read_only_vars = {
}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""ListAssetsDetailsE400 - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
details ([BannedIpAddressDetails]): [optional] # noqa: E501
code (str): Specifies an error code, e.g. error 404.. [optional] # noqa: E501
message (str): Specifies the message of the error, i.e. why the error was returned, e.g. error 404 stands for “not found”.. [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
constant_args = {
'_check_type': _check_type,
'_path_to_item': _path_to_item,
'_spec_property_naming': _spec_property_naming,
'_configuration': _configuration,
'_visited_composed_classes': self._visited_composed_classes,
}
composed_info = validate_get_composed_info(
constant_args, kwargs, self)
self._composed_instances = composed_info[0]
self._var_name_to_model_instances = composed_info[1]
self._additional_properties_model_instances = composed_info[2]
discarded_args = composed_info[3]
for var_name, var_value in kwargs.items():
if var_name in discarded_args and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self._additional_properties_model_instances:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
'_composed_instances',
'_var_name_to_model_instances',
'_additional_properties_model_instances',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""ListAssetsDetailsE400 - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
details ([BannedIpAddressDetails]): [optional] # noqa: E501
code (str): Specifies an error code, e.g. error 404.. [optional] # noqa: E501
message (str): Specifies the message of the error, i.e. why the error was returned, e.g. error 404 stands for “not found”.. [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
constant_args = {
'_check_type': _check_type,
'_path_to_item': _path_to_item,
'_spec_property_naming': _spec_property_naming,
'_configuration': _configuration,
'_visited_composed_classes': self._visited_composed_classes,
}
composed_info = validate_get_composed_info(
constant_args, kwargs, self)
self._composed_instances = composed_info[0]
self._var_name_to_model_instances = composed_info[1]
self._additional_properties_model_instances = composed_info[2]
discarded_args = composed_info[3]
for var_name, var_value in kwargs.items():
if var_name in discarded_args and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self._additional_properties_model_instances:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
@cached_property
def _composed_schemas():
# we need this here to make our import statements work
# we must store _composed_schemas in here so the code is only run
# when we invoke this method. If we kept this at the class
# level we would get an error because the class level
# code would be run when this module is imported, and these composed
# classes don't exist yet because their module has not finished
# loading
lazy_import()
return {
'anyOf': [
],
'allOf': [
],
'oneOf': [
InvalidPagination,
LimitGreaterThanAllowed,
UriNotFound,
],
}
| 45.145897 | 484 | 0.596916 | [
"MIT"
] | Crypto-APIs/Crypto_APIs_2.0_SDK_Python | cryptoapis/model/list_assets_details_e400.py | 14,861 | Python |
#!/usr/bin/env python
# mypy: ignore-errors
# depdive documentation build configuration file
#
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath(".."))
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
# Add 'sphinx_automodapi.automodapi' if you want to build modules
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.viewcode",
"sphinx.ext.autodoc",
"sphinx.ext.napoleon",
"sphinx_click",
"sphinx_rtd_dark_mode",
]
default_dark_mode = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
source_suffix = ".rst"
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "depdive"
copyright = "2021, Nasif Imtiaz"
author = "Nasif Imtiaz"
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = "0.0.41"
# The full version, including alpha/beta/rc tags.
release = "0.0.41"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# -- Options for HTMLHelp output ---------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = "depdivedoc"
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ("letterpaper" or "a4paper").
#
# "papersize": "letterpaper",
# The font size ("10pt", "11pt" or "12pt").
#
# "pointsize": "10pt",
# Additional stuff for the LaTeX preamble.
#
# "preamble": "",
# Latex figure (float) alignment
#
# "figure_align": "htbp",
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto, manual, or own class]).
latex_documents = [
(
master_doc,
"depdive.tex",
"depdive Documentation",
"Nasif Imtiaz",
"manual",
),
]
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(
master_doc,
"depdive",
"depdive Documentation",
[author],
1,
)
]
autodoc_typehints = "description"
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"depdive",
"depdive Documentation",
author,
"depdive",
"One line description of project.",
"Miscellaneous",
),
]
html_css_files = [
"custom_cookietemple.css",
]
| 27.672515 | 77 | 0.661877 | [
"MIT"
] | nasifimtiazohi/depdive | docs/conf.py | 4,732 | Python |
from core.himesis import Himesis, HimesisPreConditionPatternLHS
import uuid
class HMM10_then1_IsolatedLHS(HimesisPreConditionPatternLHS):
def __init__(self):
"""
Creates the himesis graph representing the AToM3 model HMM10_then1_IsolatedLHS.
"""
# Flag this instance as compiled now
self.is_compiled = True
super(HMM10_then1_IsolatedLHS, self).__init__(name='HMM10_then1_IsolatedLHS', num_nodes=0, edges=[])
# Add the edges
self.add_edges([])
# Set the graph attributes
self["mm__"] = ['MT_pre__FamiliesToPersonsMM', 'MoTifRule']
self["MT_constraint__"] = """#===============================================================================
# This code is executed after the nodes in the LHS have been matched.
# You can access a matched node labelled n by: PreNode('n').
# To access attribute x of node n, use: PreNode('n')['x'].
# The given constraint must evaluate to a boolean expression:
# returning True enables the rule to be applied,
# returning False forbids the rule from being applied.
#===============================================================================
return True
"""
self["name"] = """"""
self["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'MM10_then1')
# Set the node attributes
# Add the attribute equations
self["equations"] = []
def constraint(self, PreNode, graph):
"""
Executable constraint code.
@param PreNode: Function taking an integer as parameter
and returns the node corresponding to that label.
"""
#===============================================================================
# This code is executed after the nodes in the LHS have been matched.
# You can access a matched node labelled n by: PreNode('n').
# To access attribute x of node n, use: PreNode('n')['x'].
# The given constraint must evaluate to a boolean expression:
# returning True enables the rule to be applied,
# returning False forbids the rule from being applied.
#===============================================================================
return True
| 43.068966 | 125 | 0.5004 | [
"MIT"
] | levilucio/SyVOLT | UMLRT2Kiltera_MM/Properties/from_thesis/HMM10_then1_IsolatedLHS.py | 2,498 | Python |
#!/usr/bin/env python
# Copyright (c) 2014 Wladimir J. van der Laan
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
A script to check that the (Linux) executables produced by gitian only contain
allowed gcxc, glibc and libstdc++ version symbols. This makes sure they are
still compatible with the minimum supported Linux distribution versions.
Example usage:
find ../gitian-builder/build -type f -executable | xargs python contrib/devtools/symbol-check.py
'''
from __future__ import division, print_function, unicode_literals
import subprocess
import re
import sys
import os
# Debian 6.0.9 (Squeeze) has:
#
# - g++ version 4.4.5 (https://packages.debian.org/search?suite=default§ion=all&arch=any&searchon=names&keywords=g%2B%2B)
# - libc version 2.11.3 (https://packages.debian.org/search?suite=default§ion=all&arch=any&searchon=names&keywords=libc6)
# - libstdc++ version 4.4.5 (https://packages.debian.org/search?suite=default§ion=all&arch=any&searchon=names&keywords=libstdc%2B%2B6)
#
# Ubuntu 10.04.4 (Lucid Lynx) has:
#
# - g++ version 4.4.3 (http://packages.ubuntu.com/search?keywords=g%2B%2B&searchon=names&suite=lucid§ion=all)
# - libc version 2.11.1 (http://packages.ubuntu.com/search?keywords=libc6&searchon=names&suite=lucid§ion=all)
# - libstdc++ version 4.4.3 (http://packages.ubuntu.com/search?suite=lucid§ion=all&arch=any&keywords=libstdc%2B%2B&searchon=names)
#
# Taking the minimum of these as our target.
#
# According to GNU ABI document (http://gcxc.gnu.org/onlinedocs/libstdc++/manual/abi.html) this corresponds to:
# GCC 4.4.0: GCC_4.4.0
# GCC 4.4.2: GLIBCXX_3.4.13, CXXABI_1.3.3
# (glibc) GLIBC_2_11
#
MAX_VERSIONS = {
'GCC': (4,4,0),
'CXXABI': (1,3,3),
'GLIBCXX': (3,4,13),
'GLIBC': (2,11)
}
# See here for a description of _IO_stdin_used:
# https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=634261#109
# Ignore symbols that are exported as part of every executable
IGNORE_EXPORTS = {
b'_edata', b'_end', b'_init', b'__bss_start', b'_fini', b'_IO_stdin_used'
}
READELF_CMD = os.getenv('READELF', '/usr/bin/readelf')
CPPFILT_CMD = os.getenv('CPPFILT', '/usr/bin/c++filt')
# Allowed NEEDED libraries
ALLOWED_LIBRARIES = {
# bitcoind and bitcoin-qt
b'libgcxc_s.so.1', # GCC base support
b'libc.so.6', # C library
b'libpthread.so.0', # threading
b'libanl.so.1', # DNS resolve
b'libm.so.6', # math library
b'librt.so.1', # real-time (clock)
b'ld-linux-x86-64.so.2', # 64-bit dynamic linker
b'ld-linux.so.2', # 32-bit dynamic linker
# bitcoin-qt only
b'libX11-xcb.so.1', # part of X11
b'libX11.so.6', # part of X11
b'libxcb.so.1', # part of X11
b'libfontconfig.so.1', # font support
b'libfreetype.so.6', # font parsing
b'libdl.so.2' # programming interface to dynamic linker
}
class CPPFilt(object):
'''
Demangle C++ symbol names.
Use a pipe to the 'c++filt' command.
'''
def __init__(self):
self.proc = subprocess.Popen(CPPFILT_CMD, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
def __call__(self, mangled):
self.proc.stdin.write(mangled + b'\n')
self.proc.stdin.flush()
return self.proc.stdout.readline().rstrip()
def close(self):
self.proc.stdin.close()
self.proc.stdout.close()
self.proc.wait()
def read_symbols(executable, imports=True):
'''
Parse an ELF executable and return a list of (symbol,version) tuples
for dynamic, imported symbols.
'''
p = subprocess.Popen([READELF_CMD, '--dyn-syms', '-W', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
(stdout, stderr) = p.communicate()
if p.returncode:
raise IOError('Could not read symbols for %s: %s' % (executable, stderr.strip()))
syms = []
for line in stdout.split(b'\n'):
line = line.split()
if len(line)>7 and re.match(b'[0-9]+:$', line[0]):
(sym, _, version) = line[7].partition(b'@')
is_import = line[6] == b'UND'
if version.startswith(b'@'):
version = version[1:]
if is_import == imports:
syms.append((sym, version))
return syms
def check_version(max_versions, version):
if b'_' in version:
(lib, _, ver) = version.rpartition(b'_')
else:
lib = version
ver = '0'
ver = tuple([int(x) for x in ver.split(b'.')])
if not lib in max_versions:
return False
return ver <= max_versions[lib]
def read_libraries(filename):
p = subprocess.Popen([READELF_CMD, '-d', '-W', filename], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
(stdout, stderr) = p.communicate()
if p.returncode:
raise IOError('Error opening file')
libraries = []
for line in stdout.split(b'\n'):
tokens = line.split()
if len(tokens)>2 and tokens[1] == b'(NEEDED)':
match = re.match(b'^Shared library: \[(.*)\]$', b' '.join(tokens[2:]))
if match:
libraries.append(match.group(1))
else:
raise ValueError('Unparseable (NEEDED) specification')
return libraries
if __name__ == '__main__':
cppfilt = CPPFilt()
retval = 0
for filename in sys.argv[1:]:
# Check imported symbols
for sym,version in read_symbols(filename, True):
if version and not check_version(MAX_VERSIONS, version):
print('%s: symbol %s from unsupported version %s' % (filename, cppfilt(sym).decode('utf-8'), version.decode('utf-8')))
retval = 1
# Check exported symbols
for sym,version in read_symbols(filename, False):
if sym in IGNORE_EXPORTS:
continue
print('%s: export of symbol %s not allowed' % (filename, cppfilt(sym).decode('utf-8')))
retval = 1
# Check dependency libraries
for library_name in read_libraries(filename):
if library_name not in ALLOWED_LIBRARIES:
print('%s: NEEDED library %s is not allowed' % (filename, library_name.decode('utf-8')))
retval = 1
exit(retval)
| 38.006135 | 142 | 0.653269 | [
"MIT"
] | bitcoinemxmx/GCX | contrib/devtools/symbol-check.py | 6,195 | Python |
#!/usr/bin/env python3
import cgi, cgitb, os, storage, shutil, time, sys, atexit
def deltemp():
os.remove("_/3dsthemes/tmp.zip")
from libs import zip
cgitb.enable()
from libs.session import Session
from libs import smdh
session=Session()
if not session.isLoggedIn():
raise ValueError("Must be logged in to upload a file")
form = cgi.FieldStorage()
if not "title" in form:
raise ValueError("Title is missing")
if not "desc" in form:
raise ValueError("Description is missing")
if not "file" in form:
raise ValueError("File is missing")
fileitem = form["file"]
if not fileitem.file:
raise ValueError("No file uploaded?")
#Check if an upload is in progress
if os.path.isfile("_/3dsthemes/tmp.zip"):
raise ValueError("An upload is in progress. Please wait a little before reuploading.")
atexit.register(deltemp)
#OK, we're onto something
outpath = "_/3dsthemes/tmp.zip"
fout = open(outpath, "wb")
for f in range(21):
if f == 20:
fout.close()
raise ValueError("File too big.")
chunk = fileitem.file.read(1000000)
if not chunk: break
fout.write(chunk)
fout.close()
tid=storage.count("themes")
dirname = "_/3dsthemes/%i/"%(storage.count("themes"))
try:
os.mkdir(dirname)
except:
shutil.rmtree(dirname)
os.mkdir(dirname)
zip.unzip("_/3dsthemes/tmp.zip", dirname)
try:
os.rename(dirname+"preview.png", dirname+"Preview.png")
except:
pass
testfile = smdh.SMDH(dirname+"info.smdh")
#Will throw an exception if the file doesn't exist or isn't valid.
#Put theme into database. This is done last to prevent 'ghost themes'
title=cgi.escape(form["title"].value)
markdown=cgi.escape(form["desc"].value)
author=session.getUserName()
date=int(time.time())
aid=tid
storage.append("themes",{"title":title, "markdown":markdown, "author":author, "date":date, "aid":aid}) #Write
sys.stdout.buffer.write(("Content-type: text/html\r\n\r\n<html><head><script>window.location.replace(\"index.py\");</script></head></html>").encode('utf8'))
print("Test?")
| 32.901639 | 156 | 0.707025 | [
"BSD-2-Clause"
] | Mtgxyz2/homepage | 3dsthemes/do_upload.py | 2,007 | Python |
# Generated by Django 4.0.2 on 2022-02-19 19:48
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='HierarchicalModelMixin',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('parent', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='children', to='common.hierarchicalmodelmixin')),
],
),
]
| 28.434783 | 175 | 0.643731 | [
"MIT"
] | kayzan73/cenith4 | common/migrations/0001_initial.py | 654 | Python |
import pickle
from time import time
from sklearn.cross_validation import train_test_split as tts
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import SGDClassifier
from sklearn.metrics import classification_report as clsr
from sklearn.neural_network._base import identity
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import LabelEncoder
from analyzer.Transformer import NLTKPreprocessor
# @timeit
def build_and_evaluate(X, y, classifier=SGDClassifier, outpath=None, verbose=True):
# @timeit
def build(classifier, X, y=None):
"""
Inner build function that builds a single model.
"""
if isinstance(classifier, type):
classifier = classifier()
model = Pipeline([
('preprocessor', NLTKPreprocessor()),
('vectorizer', TfidfVectorizer(
tokenizer=identity, preprocessor=None, lowercase=False)),
('classifier', classifier),
])
model.fit(X, y)
return model
# Label encode the targets
labels = LabelEncoder()
y = labels.fit_transform(y)
secs = time()
# Begin evaluation
if verbose: print("Building for evaluation")
X_train, X_test, y_train, y_test = tts(X, y, test_size=0.2)
model = build(classifier, X_train, y_train)
if verbose:
print("Evaluation model fit in {:0.3f} seconds".format(time() - secs))
print("Classification Report:\n")
y_pred = model.predict(X_test)
print(clsr(y_test, y_pred, target_names=labels.classes_))
secs = time()
if verbose:
print("Building complete model and saving ...")
model = build(classifier, X, y)
model.labels_ = labels
if verbose:
print("Complete model fit in {:0.3f} seconds".format(time() - secs))
if outpath:
with open(outpath, 'wb') as f:
pickle.dump(model, f)
print("Model written out to {}".format(outpath))
return model
| 28.927536 | 83 | 0.66483 | [
"MIT"
] | shobhitagarwal1612/Emotion-Analysis | analyzer/build.py | 1,996 | Python |
class FlumineException(Exception):
"""Base class for Flumine Errors"""
pass
class RunError(FlumineException):
"""Exception raised if error
in `Flumine.run()``
"""
def __init__(self, message):
super(RunError, self).__init__(message)
class ListenerError(FlumineException):
"""Error raised if error in Listener"""
def __int__(self, message):
super(ListenerError, self).__init__(message)
class OrderError(FlumineException):
"""Exception raised if incorrect
order/order_type requested.
"""
def __init__(self, message):
super(OrderError, self).__init__(message)
class OrderUpdateError(FlumineException):
"""Exception raised if order update
incorrect.
"""
def __init__(self, message):
super(OrderUpdateError, self).__init__(message)
class OrderExecutionError(FlumineException):
"""Exception raised error in package during
execution.
"""
pass
class ControlError(FlumineException):
"""Exception raised if order voilates
a control.
"""
def __init__(self, message):
super(ControlError, self).__init__(message)
class ClientError(FlumineException):
"""Exception raised on client
error.
"""
def __init__(self, message):
super(ClientError, self).__init__(message)
| 20.476923 | 55 | 0.678437 | [
"MIT"
] | betcode-org/flumine | flumine/exceptions.py | 1,331 | Python |
from . import AWSProperty, AWSAttribute, validate_pausetime
from .validators import positive_integer, integer, boolean
class AutoScalingRollingUpdate(AWSProperty):
props = {
'MaxBatchSize': (positive_integer, False),
'MinInstancesInService': (integer, False),
'MinSuccessfulInstancesPercent': (integer, False),
'PauseTime': (validate_pausetime, False),
'SuspendProcesses': ([str], False),
'WaitOnResourceSignals': (boolean, False),
}
class AutoScalingScheduledAction(AWSProperty):
props = {
'IgnoreUnmodifiedGroupSizeProperties': (boolean, False),
}
class AutoScalingReplacingUpdate(AWSProperty):
props = {
'WillReplace': (boolean, False),
}
class CodeDeployLambdaAliasUpdate(AWSProperty):
props = {
'AfterAllowTrafficHook': (str, False),
'ApplicationName': (boolean, True),
'BeforeAllowTrafficHook': (str, False),
'DeploymentGroupName': (boolean, True),
}
class UpdatePolicy(AWSAttribute):
props = {
'AutoScalingRollingUpdate': (AutoScalingRollingUpdate, False),
'AutoScalingScheduledAction': (AutoScalingScheduledAction, False),
'AutoScalingReplacingUpdate': (AutoScalingReplacingUpdate, False),
'CodeDeployLambdaAliasUpdate': (CodeDeployLambdaAliasUpdate, False),
'UseOnlineResharding': (boolean, False),
'EnableVersionUpgrade': (boolean, False),
}
class ResourceSignal(AWSProperty):
props = {
'Count': (positive_integer, False),
'Timeout': (validate_pausetime, False),
}
class AutoScalingCreationPolicy(AWSProperty):
props = {
'MinSuccessfulInstancesPercent': (integer, False),
}
class CreationPolicy(AWSAttribute):
props = {
'AutoScalingCreationPolicy': (AutoScalingCreationPolicy, False),
'ResourceSignal': (ResourceSignal, True),
}
| 28.833333 | 76 | 0.679453 | [
"MIT"
] | MrUltimate/good-vibes-api | env/lib/python3.7/site-packages/troposphere/policies.py | 1,903 | Python |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (C) 2020 The SymbiFlow Authors.
#
# Use of this source code is governed by a ISC-style
# license that can be found in the LICENSE file or at
# https://opensource.org/licenses/ISC
#
# SPDX-License-Identifier: ISC
import sys
import os
sys.path.append(os.getcwd() + "/..")
import unittest
import fpgaperf
import re
import random
def def_devpack(toolchain):
if 'radiant' in toolchain:
device = 'up5k'
package = 'uwg30'
else:
# tinyfpga b2
# XXX: change to hx8k, ct256?
device = 'lp8k'
package = 'cm81'
return device, package
class TestCase(unittest.TestCase):
def setUp(self):
self.verbose = False
def test_env_ready(self):
assert fpgaperf.env_ready()
def test_icetime_parse(self):
with open('icetime.txt', 'r') as f:
m = fpgaperf.icetime_parse(f)
assert 'max_freq' in m
assert abs(m['max_freq'] - 132.94e6) < 1.0
def test_yosys_ver(self):
v = fpgaperf.yosys_ver()
assert re.match(r'Yosys .* .*git sha1 .*', v)
def test_get_toolchains(self):
ts = fpgaperf.get_toolchains()
assert 'vpr' in ts
assert 'arachne' in ts
assert 'radiant-synpro' in ts
def test_get_projects(self):
ps = fpgaperf.get_projects()
assert 'oneblink' in ps
assert 'picosoc-hx8kdemo' in ps
assert 'picorv32-wrap' in ps
def test_get_seedable(self):
ts = fpgaperf.get_seedable()
assert 'vpr' in ts
assert 'arachne' in ts
assert 'nextpnr' in ts
def test_toolchains(self):
'''Try each toolchain'''
for toolchain in fpgaperf.toolchains.keys():
device, package = def_devpack(toolchain)
fpgaperf.run(
family='ice40',
device=device,
package=package,
toolchain=toolchain,
project=fpgaperf.get_project('oneblink'),
verbose=self.verbose
)
def test_pcf(self):
'''Try each toolchain with a pcf'''
for toolchain in fpgaperf.toolchains.keys():
device, package = def_devpack(toolchain)
if 'radiant' in toolchain:
pcf = fpgaperf.root_dir + '/project/FIXME.pcf'
else:
pcf = fpgaperf.root_dir + '/project/oneblink_lp8k-cm81.pcf'
fpgaperf.run(
family='ice40',
device=device,
package=package,
toolchain=toolchain,
project=fpgaperf.get_project('oneblink'),
pcf=pcf,
verbose=self.verbose
)
def test_seed(self):
'''Try seeding, where possible'''
random.seed(1234)
for toolchain in fpgaperf.get_seedable():
seed = random.randint(1, 0x7FFFFFFF)
device, package = def_devpack(toolchain)
fpgaperf.run(
family='ice40',
device=device,
package=package,
toolchain=toolchain,
project=fpgaperf.get_project('oneblink'),
seed=seed,
verbose=self.verbose
)
if __name__ == '__main__':
unittest.main()
| 27.94958 | 75 | 0.562538 | [
"ISC"
] | arn4ud/fpga-tool-perf | test/test_all.py | 3,326 | Python |
import os
import freesasa
from .extract_residues import extract_residue
# Defaults
_DEFAULT_OPTIONS = {
'hetatm': True,
'hydrogen': True,
# 'halt-at-unknown': True,
# 'separate-chains' : False,
'separate-models': True
}
_DEFAULT_PARAMETERS = {
'algorithm': freesasa.LeeRichards,
'probe-radius': freesasa.Parameters.defaultParameters['probe-radius'],
'n-points': freesasa.Parameters.defaultParameters['n-points'],
'n-slices': freesasa.Parameters.defaultParameters['n-slices'],
'n-threads': freesasa.Parameters.defaultParameters['n-threads']
}
class FreesasaRunner:
"""Wrapper to help run freesasa on a single PDB file
Freesasa has a nice Python interface but some things don't work quite as
needed for BAC, at least in Python 3. This wrapper is intended to handle
these issues:
1. File names need conversion to bytes when passed to freesasa
2. By default should include HETATMS and hydrogens in analysis
Parameters
----------
config: str, optional
Path to configuration file containing residue composition
and atomic parameters - freesasa format.
options: dict, optional
Options to change how PDBs are parsed by freesasa.
parameters: dict, optional
Parameters to alter how freesasa computes surface area.
Methods
-------
run(pdb)
Run freesasa on input PDB file & return surface area results.
"""
def __init__(self, config, wsas_params, tmp_dir, nonstandard_residue_files, nonstandard_residue, ligand_topology, options=None, parameters=None):
"""Wrapper for freesasa
config: str
Path to configuration file containing residue composition
and atomic parameters - freesasa format.
options: dict, optional
Options to change how PDBs are parsed by freesasa.
parameters: dict, optional
Parameters to alter how freesasa computes surface area.
"""
# Hide warnings (as the load of multiple structures is two step and
# extended config is not read in first step).
freesasa.setVerbosity(1)
config = self._update_sasa_config(config, wsas_params, tmp_dir, nonstandard_residue_files, nonstandard_residue, ligand_topology)
self.classifier = freesasa.Classifier(bytes(str(config), 'utf-8'))
self.options = options or _DEFAULT_OPTIONS
self.parameters = parameters or _DEFAULT_PARAMETERS
def run(self, pdb):
"""Run freesasa on provided PDB file
Parameters
----------
pdb: str
Path to input PDB file
Returns
-------
list
SASA values for each atom of every model in the input PDB.
"""
structure_array = freesasa.structureArray(bytes(pdb, 'utf-8'), options=self.options, classifier=self.classifier)
results = []
for s in structure_array:
print('Computing SASA for each model/frame')
result = freesasa.calc(s)
atom_areas = [result.atomArea(ndx) for ndx in range(s.nAtoms())]
results.append(atom_areas)
return results
def _update_sasa_config(self, config, parameters, tmp_dir, nonstandard_residue_files, nonstandard_residue, ligand_topology):
"""
Add non-standard residues (including the ligand if a topology is
provided for it) to the freesasa config file.
Parameters
----------
Notes
-----
Edited config files is saved in self.tmp_dir and
self.freesasa_config_file is updated to reflect this.
Returns
-------
"""
files_to_add = nonstandard_residue_files
if ligand_topology:
files_to_add.append(ligand_topology)
residues_to_add = {}
for filename in files_to_add:
residues, gentop = extract_residue(filename)
residues_to_add.update(residues)
if nonstandard_residue:
residues_to_add.update(nonstandard_residue)
if residues_to_add:
sasa_config = os.path.join(tmp_dir, 'system_sasa.config')
self._add_residues_freesasa_config_file(residues_to_add, sasa_config, parameters, orig_filename=config)
return sasa_config
return config
@staticmethod
def _create_freesasa_section_text(new_residues, sasa_atom_params):
"""
Create text to add to freesasa configuration file to incorporate new residue.
Parameters
----------
new_residues : dict
Non-standard residues to add to the freesasa config file.
keys = residue names, values = atom name to type mapping (dict).
sasa_atom_params: dict
Maps atom type to properties needed by freesasa (radius and polarity).
Returns
-------
atom_type_section : str
Text to be added to freesasa config file atom type section.
residue_section : str
Text to be added to freesasa config file residue section.
"""
atom_types = []
# Create lines for residue section of format:
# residue_name atom_name atom_type
residue_section = ''
for res_name, atom_to_type in new_residues.items():
residue_section += '\n'
for atom_name, atom_type in atom_to_type.items():
residue_line = '{:s} {:s} {:s}\n'.format(res_name,
atom_name,
atom_type)
atom_types.append(atom_type)
residue_section += residue_line
# Create lines for atom type section of format:
# atom_type residue polarity
atom_type_section = ''
for atom_type in set(atom_types):
# use the predefined values if they are present
if atom_type in sasa_atom_params:
atom_line = '{:s} {:.2f} {:s}\n'.format(atom_type,
sasa_atom_params[atom_type]['radius'],
sasa_atom_params[atom_type]['polarity'])
else:
raise Exception('This atom type was not found to have preset radius and polarity')
atom_type_section += atom_line
return atom_type_section, residue_section
def _add_residues_freesasa_config_file(self, new_residues, new_filename, atom_params, orig_filename):
"""
Create a new freesasa config file that adds specified residue to the
content of an existing copy.
Parameters
----------
new_residues : dict
Non-standard residues to add to the freesasa config file.
keys = residue names, values = atom name to type mapping (dict).
new_filename: str
Filename to be used for the updated freesasa config file.
atom_params: dict
Radius and polarity information for each atom type.
orig_filename: str
Filename for the original freesasa config file.
"""
# Get text to add atom type and residue sections for the
# residues being added to the config file
(new_atom_types, new_residues) = self._create_freesasa_section_text(new_residues, atom_params)
with open(new_filename, 'w') as out_file, open(orig_filename) as input_config:
[out_file.write(l+new_atom_types if l.startswith('# extra') else l) for l in input_config]
out_file.write(new_residues)
| 33.252174 | 149 | 0.626831 | [
"Apache-2.0"
] | UCL-CCS/BAC2 | bac/analyse/wsas/freesasa_utils.py | 7,648 | Python |
import sys
from telethon import events, functions, __version__
from uniborg.util import admin_cmd
@borg.on(admin_cmd(pattern="helpme", allow_sudo=True)) # pylint:disable=E0602
async def _(event):
if event.fwd_from:
return
help_string = """@UniBorg
Python {}
Telethon {}
UserBot Forked from https://github.com/expectocode/uniborg""".format(
sys.version,
__version__
)
tgbotusername = Config.TG_BOT_USER_NAME_BF_HER # pylint:disable=E0602
if tgbotusername is not None:
results = await borg.inline_query( # pylint:disable=E0602
tgbotusername,
help_string
)
await results[0].click(
event.chat_id,
reply_to=event.reply_to_msg_id,
hide_via=True
)
await event.delete()
else:
await event.reply(help_string)
await event.delete()
@borg.on(admin_cmd(pattern="dc")) # pylint:disable=E0602
async def _(event):
if event.fwd_from:
return
result = await borg(functions.help.GetNearestDcRequest()) # pylint:disable=E0602
await event.edit(result.stringify())
@borg.on(admin_cmd(pattern="config")) # pylint:disable=E0602
async def _(event):
if event.fwd_from:
return
result = await borg(functions.help.GetConfigRequest()) # pylint:disable=E0602
result = result.stringify()
logger.info(result) # pylint:disable=E0602
await event.edit("""Telethon UserBot powered by @UniBorg""")
| 29.68 | 85 | 0.66442 | [
"MPL-2.0"
] | anandvfc/UniBorg | stdplugins/_help.py | 1,484 | Python |
import pytest
from briefcase.integrations.subprocess import CommandOutputParseError, ParseError
def splitlines_parser(data):
"""A test parser that returns the input data, split by line."""
return data.splitlines()
def second_line_parser(data):
"""A test parser that returns the second line of input."""
try:
return data.splitlines()[1]
except IndexError:
raise ParseError("Input does not contain 2 lines")
def third_line_parser(data):
"""A test parser that returns the third line of input."""
try:
return data.splitlines()[2]
except IndexError:
raise ParseError("Input does not contain 3 lines")
def test_call(mock_sub, capsys):
"""A simple call to check_output will be invoked."""
output = mock_sub.parse_output(splitlines_parser, ["hello", "world"])
mock_sub._subprocess.check_output.assert_called_with(["hello", "world"], text=True)
assert capsys.readouterr().out == ""
assert output == ["some output line 1", "more output line 2"]
def test_call_with_arg(mock_sub, capsys):
"""Any extra keyword arguments are passed through as-is to check_output."""
output = mock_sub.parse_output(
splitlines_parser, ["hello", "world"], extra_arg="asdf"
)
mock_sub._subprocess.check_output.assert_called_with(
["hello", "world"],
extra_arg="asdf",
text=True,
)
assert capsys.readouterr().out == ""
assert output == ["some output line 1", "more output line 2"]
def test_call_with_parser_success(mock_sub, capsys):
"""Parser returns expected portion of check_output's output."""
output = mock_sub.parse_output(second_line_parser, ["hello", "world"])
mock_sub._subprocess.check_output.assert_called_with(["hello", "world"], text=True)
assert output == "more output line 2"
def test_call_with_parser_error(mock_sub, capsys):
"""Parser errors on output from check_output."""
with pytest.raises(
CommandOutputParseError,
match="Unable to parse command output: Input does not contain 3 lines",
):
mock_sub.parse_output(third_line_parser, ["hello", "world"])
mock_sub._subprocess.check_output.assert_called_with(["hello", "world"], text=True)
expected_output = (
"\n"
"Command Output Parsing Error:\n"
" Input does not contain 3 lines\n"
"Command:\n"
" hello world\n"
"Command Output:\n"
" some output line 1\n"
" more output line 2\n"
)
assert capsys.readouterr().out == expected_output
@pytest.mark.parametrize(
"in_kwargs, kwargs",
[
({}, {"text": True}),
({"text": True}, {"text": True}),
({"text": False}, {"text": False}),
({"universal_newlines": False}, {"universal_newlines": False}),
({"universal_newlines": True}, {"universal_newlines": True}),
],
)
def test_text_eq_true_default_overriding(mock_sub, in_kwargs, kwargs):
"""if text or universal_newlines is explicitly provided, those should
override text=true default."""
mock_sub.parse_output(splitlines_parser, ["hello", "world"], **in_kwargs)
mock_sub._subprocess.check_output.assert_called_with(["hello", "world"], **kwargs)
| 31.572816 | 87 | 0.664207 | [
"BSD-3-Clause"
] | pybee/briefcase | tests/integrations/subprocess/test_Subprocess__parse_output.py | 3,252 | Python |
"""This module contains the detection code for predictable variable
dependence."""
import logging
from copy import copy
from mythril.analysis.module.base import DetectionModule, EntryPoint
from mythril.analysis.report import Issue
from mythril.exceptions import UnsatError
from mythril.analysis import solver
from mythril.laser.smt import ULT, symbol_factory
from mythril.analysis.swc_data import TIMESTAMP_DEPENDENCE, WEAK_RANDOMNESS
from mythril.analysis.module.module_helpers import is_prehook
from mythril.laser.ethereum.state.global_state import GlobalState
from mythril.laser.ethereum.state.annotation import StateAnnotation
from typing import cast, List
log = logging.getLogger(__name__)
predictable_ops = ["COINBASE", "GASLIMIT", "TIMESTAMP", "NUMBER"]
final_ops = ["CALL", "SUICIDE", "STOP", "RETURN"]
class PredictableValueAnnotation:
"""Symbol annotation used if a variable is initialized from a predictable environment variable."""
def __init__(self, operation: str, add_constraints=None) -> None:
self.operation = operation
self.add_constraints = add_constraints
class PredictablePathAnnotation(StateAnnotation):
"""State annotation used when a path is chosen based on a predictable variable."""
def __init__(self, operation: str, location: int, add_constraints=None) -> None:
self.operation = operation
self.location = location
self.add_constraints = add_constraints
class OldBlockNumberUsedAnnotation(StateAnnotation):
"""State annotation set in blockhash prehook if the input value is lower than the current block number."""
def __init__(self, constraints) -> None:
self.block_constraints = constraints
pass
class PredictableVariables(DetectionModule):
"""This module detects whether control flow decisions are made using predictable
parameters."""
name = "Control flow depends on a predictable environment variable"
swc_id = "{} {}".format(TIMESTAMP_DEPENDENCE, WEAK_RANDOMNESS)
description = (
"Check whether important control flow decisions are influenced by block.coinbase,"
"block.gaslimit, block.timestamp or block.number."
)
entry_point = EntryPoint.CALLBACK
pre_hooks = ["BLOCKHASH", "JUMPI"] + final_ops
post_hooks = ["BLOCKHASH"] + predictable_ops
def _execute(self, state: GlobalState) -> None:
"""
:param state:
:return:
"""
if state.get_current_instruction()["address"] in self.cache:
return
issues = self._analyze_state(state)
for issue in issues:
self.cache.add(issue.address)
self.issues.extend(issues)
@staticmethod
def _analyze_state(state: GlobalState) -> list:
"""
:param state:
:return:
"""
issues = []
if is_prehook():
opcode = state.get_current_instruction()["opcode"]
if opcode in final_ops:
for annotation in state.annotations:
if isinstance(annotation, PredictablePathAnnotation):
if annotation.add_constraints:
constraints = (
state.world_state.constraints
+ annotation.add_constraints
)
else:
constraints = copy(state.world_state.constraints)
try:
transaction_sequence = solver.get_transaction_sequence(
state, constraints
)
except UnsatError:
continue
description = (
"The "
+ annotation.operation
+ " is used in to determine a control flow decision. "
)
description += (
"Note that the values of variables like coinbase, gaslimit, block number and timestamp "
"are predictable and can be manipulated by a malicious miner. Also keep in mind that attackers "
"know hashes of earlier blocks. Don't use any of those environment variables for random number "
"generation or to make critical control flow decisions."
)
"""
Usually report low severity except in cases where the hash of a previous block is used to
determine control flow.
"""
severity = "Medium" if "hash" in annotation.operation else "Low"
"""
Note: We report the location of the JUMPI that lead to this path. Usually this maps to an if or
require statement.
"""
swc_id = (
TIMESTAMP_DEPENDENCE
if "timestamp" in annotation.operation
else WEAK_RANDOMNESS
)
issue = Issue(
contract=state.environment.active_account.contract_name,
function_name=state.environment.active_function_name,
address=annotation.location,
swc_id=swc_id,
bytecode=state.environment.code.bytecode,
title="Dependence on predictable environment variable",
severity=severity,
description_head="A control flow decision is made based on a predictable variable.",
description_tail=description,
gas_used=(
state.mstate.min_gas_used,
state.mstate.max_gas_used,
),
transaction_sequence=transaction_sequence,
)
issues.append(issue)
elif opcode == "JUMPI":
# Look for predictable state variables in jump condition
for annotation in state.mstate.stack[-2].annotations:
if isinstance(annotation, PredictableValueAnnotation):
state.annotate(
PredictablePathAnnotation(
annotation.operation,
state.get_current_instruction()["address"],
add_constraints=annotation.add_constraints,
)
)
break
elif opcode == "BLOCKHASH":
param = state.mstate.stack[-1]
try:
constraint = [
ULT(param, state.environment.block_number),
ULT(
state.environment.block_number,
symbol_factory.BitVecVal(2 ** 255, 256),
),
]
# Why the second constraint? Because without it Z3 returns a solution where param overflows.
solver.get_model(
state.world_state.constraints + constraint # type: ignore
)
state.annotate(OldBlockNumberUsedAnnotation(constraint))
except UnsatError:
pass
else:
# we're in post hook
opcode = state.environment.code.instruction_list[state.mstate.pc - 1][
"opcode"
]
if opcode == "BLOCKHASH":
# if we're in the post hook of a BLOCKHASH op, check if an old block number was used to create it.
annotations = cast(
List[OldBlockNumberUsedAnnotation],
list(state.get_annotations(OldBlockNumberUsedAnnotation)),
)
if len(annotations):
# We can append any block constraint here
state.mstate.stack[-1].annotate(
PredictableValueAnnotation(
"block hash of a previous block",
add_constraints=annotations[0].block_constraints,
)
)
else:
# Always create an annotation when COINBASE, GASLIMIT, TIMESTAMP or NUMBER is executed.
state.mstate.stack[-1].annotate(
PredictableValueAnnotation(
"block.{} environment variable".format(opcode.lower())
)
)
return issues
detector = PredictableVariables()
| 39.008658 | 124 | 0.534569 | [
"MIT"
] | marcuswin/mythril | mythril/analysis/module/modules/dependence_on_predictable_vars.py | 9,011 | Python |
# -*- coding: utf-8 -*-
from helper import unittest, PillowTestCase
from PIL import Image, ImageDraw, ImageFont, features
FONT_SIZE = 20
FONT_PATH = "Tests/fonts/DejaVuSans.ttf"
@unittest.skipUnless(features.check('raqm'), "Raqm Library is not installed.")
class TestImagecomplextext(PillowTestCase):
def test_english(self):
#smoke test, this should not fail
ttf = ImageFont.truetype(FONT_PATH, FONT_SIZE)
im = Image.new(mode='RGB', size=(300, 100))
draw = ImageDraw.Draw(im)
draw.text((0, 0), 'TEST', font=ttf, fill=500, direction='ltr')
def test_complex_text(self):
ttf = ImageFont.truetype(FONT_PATH, FONT_SIZE)
im = Image.new(mode='RGB', size=(300, 100))
draw = ImageDraw.Draw(im)
draw.text((0, 0), 'اهلا عمان', font=ttf, fill=500)
target = 'Tests/images/test_text.png'
target_img = Image.open(target)
self.assert_image_similar(im, target_img, .5)
def test_y_offset(self):
ttf = ImageFont.truetype("Tests/fonts/NotoNastaliqUrdu-Regular.ttf", FONT_SIZE)
im = Image.new(mode='RGB', size=(300, 100))
draw = ImageDraw.Draw(im)
draw.text((0, 0), 'العالم العربي', font=ttf, fill=500)
target = 'Tests/images/test_y_offset.png'
target_img = Image.open(target)
self.assert_image_similar(im, target_img, 1.7)
def test_complex_unicode_text(self):
ttf = ImageFont.truetype(FONT_PATH, FONT_SIZE)
im = Image.new(mode='RGB', size=(300, 100))
draw = ImageDraw.Draw(im)
draw.text((0, 0), 'السلام عليكم', font=ttf, fill=500)
target = 'Tests/images/test_complex_unicode_text.png'
target_img = Image.open(target)
self.assert_image_similar(im, target_img, .5)
def test_text_direction_rtl(self):
ttf = ImageFont.truetype(FONT_PATH, FONT_SIZE)
im = Image.new(mode='RGB', size=(300, 100))
draw = ImageDraw.Draw(im)
draw.text((0, 0), 'English عربي', font=ttf, fill=500, direction='rtl')
target = 'Tests/images/test_direction_rtl.png'
target_img = Image.open(target)
self.assert_image_similar(im, target_img, .5)
def test_text_direction_ltr(self):
ttf = ImageFont.truetype(FONT_PATH, FONT_SIZE)
im = Image.new(mode='RGB', size=(300, 100))
draw = ImageDraw.Draw(im)
draw.text((0, 0), 'سلطنة عمان Oman', font=ttf, fill=500, direction='ltr')
target = 'Tests/images/test_direction_ltr.png'
target_img = Image.open(target)
self.assert_image_similar(im, target_img, .5)
def test_text_direction_rtl2(self):
ttf = ImageFont.truetype(FONT_PATH, FONT_SIZE)
im = Image.new(mode='RGB', size=(300, 100))
draw = ImageDraw.Draw(im)
draw.text((0, 0), 'Oman سلطنة عمان', font=ttf, fill=500, direction='rtl')
target = 'Tests/images/test_direction_ltr.png'
target_img = Image.open(target)
self.assert_image_similar(im, target_img, .5)
def test_ligature_features(self):
ttf = ImageFont.truetype(FONT_PATH, FONT_SIZE)
im = Image.new(mode='RGB', size=(300, 100))
draw = ImageDraw.Draw(im)
draw.text((0, 0), 'filling', font=ttf, fill=500, features=['-liga'])
target = 'Tests/images/test_ligature_features.png'
target_img = Image.open(target)
self.assert_image_similar(im, target_img, .5)
liga_size = ttf.getsize('fi', features=['-liga'])
self.assertEqual(liga_size,(13,19))
def test_kerning_features(self):
ttf = ImageFont.truetype(FONT_PATH, FONT_SIZE)
im = Image.new(mode='RGB', size=(300, 100))
draw = ImageDraw.Draw(im)
draw.text((0, 0), 'TeToAV', font=ttf, fill=500, features=['-kern'])
target = 'Tests/images/test_kerning_features.png'
target_img = Image.open(target)
self.assert_image_similar(im, target_img, .5)
def test_arabictext_features(self):
ttf = ImageFont.truetype(FONT_PATH, FONT_SIZE)
im = Image.new(mode='RGB', size=(300, 100))
draw = ImageDraw.Draw(im)
draw.text((0, 0), 'اللغة العربية', font=ttf, fill=500, features=['-fina','-init','-medi'])
target = 'Tests/images/test_arabictext_features.png'
target_img = Image.open(target)
self.assert_image_similar(im, target_img, .5)
if __name__ == '__main__':
unittest.main()
# End of file
| 33.395522 | 98 | 0.633966 | [
"MIT"
] | leorzz/simplemooc | Pillow-4.3.0/Tests/test_imagefontctl.py | 4,540 | Python |
import random as rd
import time as t
def Seed():
rd.seed(int(str(t.time()).split(".")[0]))
Seed()
def Random():
return rd.gauss(0,0.01)
def RandomZeroMask(Prob=0.1):
r= rd.random()
if r<Prob:
return 0.0
else:
return 1.0 | 19.615385 | 45 | 0.580392 | [
"MIT"
] | ssuurrffaaccee/ToyNeuralNetworkImplementation | DYNAMIC/Rand.py | 255 | Python |
# Standard Library
import asyncio
import logging
import math
# Third Party
import numpy as np
import pandas as pd
from fastapi import FastAPI, HTTPException, Request
from nats.aio.client import Client as NATS
from nats_wrapper import NatsWrapper
app = FastAPI()
logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(name)s - %(message)s")
nw = None
@app.on_event("startup")
async def startup_event():
global nw
nw = NatsWrapper()
loop = asyncio.get_event_loop()
await nw.connect(loop)
async def get_nats() -> NATS:
if not nw.nc.is_connected:
loop = asyncio.get_event_loop()
await nw.connect(loop)
return nw.nc
async def push_to_nats(nats: NATS, payload):
try:
df = pd.json_normalize(payload)
if "time" in df.columns:
df["dt"] = pd.to_datetime(df.time, errors="coerce")
df = df[df["dt"].notnull()]
df["time_nanoseconds"] = df["dt"].astype(np.int64)
# compute window
df["window_dt"] = df["dt"].dt.floor("30s")
df["window_start_time_ns"] = df["window_dt"].astype(np.int64)
df.drop(columns=["dt"], inplace=True)
df["_id"] = df["time_nanoseconds"].map(str) + df.groupby(
"time_nanoseconds"
).cumcount().map("{:016b}".format)
df = df.fillna("")
for window_start_time_ns, data_df in df.groupby(["window_start_time_ns"]):
window_payload_size_bytes = data_df.memory_usage(deep=True).sum()
num_chunked_dfs = max(
1, math.ceil(window_payload_size_bytes / nats.max_payload)
)
if num_chunked_dfs > 1:
logging.info(
"payload_df size = {} bytes. NATS max payload = {} bytes. Chunking into {} DataFrames".format(
window_payload_size_bytes, nats.max_payload, num_chunked_dfs
)
)
# process every chunk
for chunked_payload_df in np.array_split(data_df, num_chunked_dfs):
await nats.publish(
"raw_logs", chunked_payload_df.to_json().encode()
)
else:
# TODO logs without timestamp (e.g. control plane logs)
logging.info("Ignoring payload without time field")
except Exception as e:
logging.error("Error: {}".format(str(e)))
@app.post("/")
async def index(request: Request):
logging.info("Received request: {}".format(str(request)))
try:
logs_payload = await request.json()
asyncio.create_task(push_to_nats(await get_nats(), logs_payload))
except:
# Bad Request
raise HTTPException(
status_code=404, detail="Something wrong with request {request}"
)
| 34.878049 | 118 | 0.588112 | [
"Apache-2.0"
] | cjellick/opni | src/payload-receiver-service/app/main.py | 2,860 | Python |
# Copyright (c) 2019 - now, Eggroll Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
import inspect
from time import time, perf_counter
from eggroll.utils.log_utils import get_logger
L = get_logger(filename='profile')
def _method_profile_logger(func):
def wrapper(*args, **kwargs):
start_wall_time = time()
start_cpu_time = perf_counter()
result = func(*args, **kwargs)
end_wall_time = time()
end_cpu_time = perf_counter()
code = func.__code__
try:
outerframes = inspect.getouterframes(inspect.currentframe(), 2)
real_caller = outerframes[1]
L.trace(f'{{"metric_type": "func_profile", '
f'"qualname": "{func.__qualname__}", '
f'"caller": "{real_caller.filename.rsplit("/", 1)[-1]}:{real_caller.lineno}", '
f'"cpu_time": {end_cpu_time - start_cpu_time}, '
f'"wall_time": {end_wall_time - start_wall_time}}}')
return result
except Exception as e:
L.trace(f'{{"metric_type": "func_profile", '
f'"qualname": "{func.__qualname__}", '
f'"caller": "unknown", '
f'"cpu_time": {end_cpu_time - start_cpu_time}, '
f'"wall_time": {end_wall_time - start_wall_time}}}')
return wrapper
| 35.943396 | 98 | 0.623097 | [
"Apache-2.0"
] | Jakob-98/eggroll | python/eggroll/core/aspects.py | 1,905 | Python |
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import os
from spack import *
class Lbann(CMakePackage, CudaPackage, ROCmPackage):
"""LBANN: Livermore Big Artificial Neural Network Toolkit. A distributed
memory, HPC-optimized, model and data parallel training toolkit for deep
neural networks.
"""
homepage = "http://software.llnl.gov/lbann/"
url = "https://github.com/LLNL/lbann/archive/v0.91.tar.gz"
git = "https://github.com/LLNL/lbann.git"
maintainers = ['bvanessen']
version('develop', branch='develop')
version('0.101', sha256='69d3fe000a88a448dc4f7e263bcb342c34a177bd9744153654528cd86335a1f7')
version('0.100', sha256='d1bab4fb6f1b80ae83a7286cc536a32830890f6e5b0c3107a17c2600d0796912')
version('0.99', sha256='3358d44f1bc894321ce07d733afdf6cb7de39c33e3852d73c9f31f530175b7cd')
version('0.98.1', sha256='9a2da8f41cd8bf17d1845edf9de6d60f781204ebd37bffba96d8872036c10c66')
version('0.98', sha256='8d64b9ac0f1d60db553efa4e657f5ea87e790afe65336117267e9c7ae6f68239')
version('0.97.1', sha256='2f2756126ac8bb993202cf532d72c4d4044e877f4d52de9fdf70d0babd500ce4')
version('0.97', sha256='9794a706fc7ac151926231efdf74564c39fbaa99edca4acb745ee7d20c32dae7')
version('0.96', sha256='97af78e9d3c405e963361d0db96ee5425ee0766fa52b43c75b8a5670d48e4b4a')
version('0.95', sha256='d310b986948b5ee2bedec36383a7fe79403721c8dc2663a280676b4e431f83c2')
version('0.94', sha256='567e99b488ebe6294933c98a212281bffd5220fc13a0a5cd8441f9a3761ceccf')
version('0.93', sha256='77bfd7fe52ee7495050f49bcdd0e353ba1730e3ad15042c678faa5eeed55fb8c')
version('0.92', sha256='9187c5bcbc562c2828fe619d53884ab80afb1bcd627a817edb935b80affe7b84')
version('0.91', sha256='b69f470829f434f266119a33695592f74802cff4b76b37022db00ab32de322f5')
variant('al', default=True, description='Builds with support for Aluminum Library')
variant('build_type', default='Release',
description='The build type to build',
values=('Debug', 'Release'))
variant('conduit', default=True,
description='Builds with support for Conduit Library '
'(note that for v0.99 conduit is required)')
variant('deterministic', default=False,
description='Builds with support for deterministic execution')
variant('dihydrogen', default=True,
description='Builds with support for DiHydrogen Tensor Library')
variant('distconv', default=False,
description='Builds with support for spatial, filter, or channel '
'distributed convolutions')
variant('docs', default=False, description='Builds with support for building documentation')
variant('dtype', default='float',
description='Type for floating point representation of weights',
values=('float', 'double'))
variant('extras', default=False, description='Add python modules for LBANN related tools')
variant('fft', default=False, description='Support for FFT operations')
variant('half', default=False,
description='Builds with support for FP16 precision data types')
variant('hwloc', default=True, description='Add support for topology aware algorithms')
variant('nvprof', default=False, description='Build with region annotations for NVPROF')
variant('numpy', default=False,
description='Builds with support for processing NumPy data files')
variant('vision', default=False,
description='Builds with support for image processing data with OpenCV')
variant('vtune', default=False, description='Builds with support for Intel VTune')
variant('onednn', default=False, description='Support for OneDNN')
variant('nvshmem', default=False, description='Support for NVSHMEM')
variant('python', default=True, description='Support for Python extensions (e.g. Data Reader)')
variant('pfe', default=True, description='Python Frontend for generating and launching models')
variant('boost', default=False, description='Enable callbacks that use Boost libraries')
# Variant Conflicts
conflicts('@:0.90,0.99:', when='~conduit')
conflicts('@0.90:0.101.99', when='+fft')
conflicts('@:0.90,0.101.99:', when='~dihydrogen')
conflicts('~cuda', when='+nvprof')
conflicts('~hwloc', when='+al')
conflicts('~cuda', when='+nvshmem')
conflicts('+cuda', when='+rocm', msg='CUDA and ROCm support are mutually exclusive')
conflicts('+extras', when='~pfe', msg='Python extras require the Python front end support')
conflicts('~vision', when='@0.91:0.101')
conflicts('~numpy', when='@0.91:0.101')
conflicts('~python', when='@0.91:0.101')
conflicts('~pfe', when='@0.91:0.101')
depends_on('[email protected]:', type='build')
# Specify the correct versions of Hydrogen
depends_on('hydrogen@:1.3.4', when='@0.95:0.100')
depends_on('[email protected]:1.4.99', when='@0.101:0.101.99')
depends_on('[email protected]:', when='@:0.90,0.102:')
# Add Hydrogen variants
depends_on('hydrogen +openmp +openmp_blas +shared +int64')
depends_on('hydrogen ~al', when='~al')
depends_on('hydrogen +al', when='+al')
depends_on('hydrogen ~cuda', when='~cuda')
depends_on('hydrogen +cuda', when='+cuda')
depends_on('hydrogen ~half', when='~half')
depends_on('hydrogen +half', when='+half')
depends_on('hydrogen ~rocm', when='~rocm')
depends_on('hydrogen +rocm', when='+rocm')
depends_on('hydrogen build_type=Debug', when='build_type=Debug')
# Older versions depended on Elemental not Hydrogen
depends_on('elemental +openmp_blas +shared +int64', when='@0.91:0.94')
depends_on('elemental +openmp_blas +shared +int64 build_type=Debug',
when='build_type=Debug @0.91:0.94')
# Specify the correct version of Aluminum
depends_on('aluminum@:0.3.99', when='@0.95:0.100 +al')
depends_on('[email protected]:0.4.99', when='@0.101:0.101.99 +al')
depends_on('[email protected]:', when='@:0.90,0.102: +al')
# Add Aluminum variants
depends_on('aluminum +cuda +nccl +ht +cuda_rma', when='+al +cuda')
depends_on('aluminum +rocm +rccl +ht', when='+al +rocm')
depends_on('[email protected]:', when='@:0.90,0.102:')
depends_on('dihydrogen +openmp', when='+dihydrogen')
depends_on('dihydrogen ~cuda', when='+dihydrogen ~cuda')
depends_on('dihydrogen +cuda', when='+dihydrogen +cuda')
depends_on('dihydrogen ~al', when='+dihydrogen ~al')
depends_on('dihydrogen +al', when='+dihydrogen +al')
depends_on('dihydrogen +distconv +cuda', when='+distconv')
depends_on('dihydrogen ~half', when='+dihydrogen ~half')
depends_on('dihydrogen +half', when='+dihydrogen +half')
depends_on('dihydrogen ~nvshmem', when='+dihydrogen ~nvshmem')
depends_on('dihydrogen +nvshmem', when='+dihydrogen +nvshmem')
depends_on('dihydrogen ~rocm', when='+dihydrogen ~rocm')
depends_on('dihydrogen +rocm', when='+dihydrogen +rocm')
depends_on('[email protected]', when='@0.101:0.101.99 +dihydrogen')
depends_on('dihydrogen@:0.0,0.2:', when='@:0.90,0.102: +dihydrogen')
conflicts('~dihydrogen', when='+distconv')
for arch in CudaPackage.cuda_arch_values:
depends_on('hydrogen cuda_arch=%s' % arch, when='+cuda cuda_arch=%s' % arch)
depends_on('aluminum cuda_arch=%s' % arch, when='+al +cuda cuda_arch=%s' % arch)
depends_on('dihydrogen cuda_arch=%s' % arch, when='+dihydrogen +cuda cuda_arch=%s' % arch)
depends_on('nccl cuda_arch=%s' % arch, when='+cuda cuda_arch=%s' % arch)
# variants +rocm and amdgpu_targets are not automatically passed to
# dependencies, so do it manually.
for val in ROCmPackage.amdgpu_targets:
depends_on('hydrogen amdgpu_target=%s' % val, when='amdgpu_target=%s' % val)
depends_on('aluminum amdgpu_target=%s' % val, when='+al amdgpu_target=%s' % val)
depends_on('dihydrogen amdgpu_target=%s' % val, when='+dihydrogen amdgpu_target=%s' % val)
depends_on('cudnn', when='@0.90:0.100.99 +cuda')
depends_on('[email protected]:', when='@:0.90,0.101: +cuda')
depends_on('cub', when='@0.94:0.98.2 +cuda ^cuda@:10.99')
depends_on('hipcub', when='+rocm')
depends_on('mpi')
depends_on('[email protected]:', when='@:0.90,0.102: +hwloc')
depends_on('[email protected]:1.11.99', when='@0.95:0.101.99 +hwloc')
depends_on('hwloc +cuda +nvml', when='+cuda')
depends_on('[email protected]:', when='+rocm')
depends_on('half', when='+half')
depends_on('[email protected]: +openmp', when='+fft')
# LBANN wraps OpenCV calls in OpenMP parallel loops, build without OpenMP
# Additionally disable video related options, they incorrectly link in a
# bad OpenMP library when building with clang or Intel compilers
depends_on('[email protected]: build_type=RelWithDebInfo +core +highgui '
'+imgcodecs +imgproc +jpeg +png +tiff +fast-math ~cuda',
when='+vision')
# Note that for Power systems we want the environment to add +powerpc
depends_on('[email protected]: +powerpc', when='+vision arch=ppc64le:')
depends_on('cnpy', when='+numpy')
depends_on('nccl', when='@0.94:0.98.2 +cuda')
depends_on('[email protected]: +hdf5~hdf5_compat', when='@0.94:0.99 +conduit')
depends_on('[email protected]:0.6.99 +hdf5~hdf5_compat', when='@0.100:0.101 +conduit')
depends_on('[email protected]: +hdf5~hdf5_compat', when='@:0.90,0.99:')
# LBANN can use Python in two modes 1) as part of an extensible framework
# and 2) to drive the front end model creation and launch
# Core library support for Python Data Reader and extensible interface
depends_on('python@3: +shared', type=('run'), when='@:0.90,0.99: +python')
extends("python", when='+python')
# Python front end and possible extra packages
depends_on('python@3: +shared', type=('build', 'run'), when='@:0.90,0.99: +pfe')
extends("python", when='+pfe')
depends_on('py-setuptools', type='build', when='+pfe')
depends_on('py-argparse', type='run', when='@:0.90,0.99: +pfe ^python@:2.6')
depends_on('py-configparser', type='run', when='@:0.90,0.99: +pfe +extras')
depends_on('[email protected]:', type='run', when='@:0.90,0.99: +pfe +extras')
depends_on('[email protected]:', type='run', when='@:0.90,0.99: +pfe +extras')
depends_on('[email protected]:', type=('build', 'run'), when='@:0.90,0.99: +pfe +extras')
depends_on('[email protected]:', type='run', when='@:0.90,0.99: +pfe +extras')
depends_on('[email protected]:', type='run', when='@:0.90,0.99: +pfe +extras')
depends_on('[email protected]:', type='run', when='@:0.90,0.99: +pfe +extras')
depends_on('py-pytest', type='test', when='@:0.90,0.99: +pfe')
depends_on('[email protected]', type=('build', 'run'), when='@:0.90,0.99: +pfe')
depends_on('[email protected]', when='@:0.90,0.99:')
depends_on('py-breathe', type='build', when='+docs')
depends_on('doxygen', type='build', when='+docs')
depends_on('py-m2r', type='build', when='+docs')
depends_on('cereal')
depends_on('catch2', type=('build', 'test'))
depends_on('clara')
depends_on('llvm-openmp', when='%apple-clang')
depends_on('onednn cpu_runtime=omp gpu_runtime=none', when='+onednn')
depends_on('nvshmem', when='+nvshmem')
depends_on('zstr')
generator = 'Ninja'
depends_on('ninja', type='build')
@property
def common_config_args(self):
spec = self.spec
# Environment variables
cppflags = []
cppflags.append('-DLBANN_SET_EL_RNG')
args = []
args.extend([
'-DCMAKE_CXX_FLAGS=%s' % ' '.join(cppflags),
'-DLBANN_VERSION=spack',
])
if '+numpy' in spec:
args.append(
'-DCNPY_DIR={0}'.format(spec['cnpy'].prefix),
)
return args
def setup_build_environment(self, env):
if self.spec.satisfies('%apple-clang'):
env.append_flags(
'CPPFLAGS', self.compiler.openmp_flag)
env.append_flags(
'CFLAGS', self.spec['llvm-openmp'].headers.include_flags)
env.append_flags(
'CXXFLAGS', self.spec['llvm-openmp'].headers.include_flags)
env.append_flags(
'LDFLAGS', self.spec['llvm-openmp'].libs.ld_flags)
# Get any recent versions or non-numeric version
# Note that develop > numeric and non-develop < numeric
@when('@:0.90,0.94:')
def cmake_args(self):
spec = self.spec
args = self.common_config_args
args.extend([
'-DCMAKE_CXX_STANDARD=17',
'-DLBANN_WITH_CNPY=%s' % ('+numpy' in spec),
'-DLBANN_DETERMINISTIC:BOOL=%s' % ('+deterministic' in spec),
'-DLBANN_WITH_HWLOC=%s' % ('+hwloc' in spec),
'-DLBANN_WITH_ALUMINUM:BOOL=%s' % ('+al' in spec),
'-DLBANN_WITH_BOOST:BOOL=%s' % ('+boost' in spec),
'-DLBANN_WITH_CONDUIT:BOOL=%s' % ('+conduit' in spec),
'-DLBANN_WITH_NVSHMEM:BOOL=%s' % ('+nvshmem' in spec),
'-DLBANN_WITH_FFT:BOOL=%s' % ('+fft' in spec),
'-DLBANN_WITH_ONEDNN:BOOL=%s' % ('+onednn' in spec),
'-DLBANN_WITH_EMBEDDED_PYTHON:BOOL=%s' % ('+python' in spec),
'-DLBANN_WITH_PYTHON_FRONTEND:BOOL=%s' % ('+pfe' in spec),
'-DLBANN_WITH_TBINF=OFF',
'-DLBANN_WITH_UNIT_TESTING:BOOL=%s' % (self.run_tests),
'-DLBANN_WITH_VISION:BOOL=%s' % ('+vision' in spec),
'-DLBANN_WITH_VTUNE:BOOL=%s' % ('+vtune' in spec),
'-DLBANN_DATATYPE={0}'.format(spec.variants['dtype'].value),
'-DCEREAL_DIR={0}'.format(spec['cereal'].prefix),
# protobuf is included by py-protobuf+cpp
'-DProtobuf_DIR={0}'.format(spec['protobuf'].prefix),
'-Dprotobuf_MODULE_COMPATIBLE=ON'])
if '+cuda' in spec:
if spec.satisfies('^[email protected]:'):
args.append('-DCMAKE_CUDA_STANDARD=17')
else:
args.append('-DCMAKE_CUDA_STANDARD=14')
if spec.satisfies('@:0.90') or spec.satisfies('@0.95:'):
args.append(
'-DHydrogen_DIR={0}/CMake/hydrogen'.format(
spec['hydrogen'].prefix))
elif spec.satisfies('@0.94'):
args.append(
'-DElemental_DIR={0}/CMake/elemental'.format(
spec['elemental'].prefix))
if spec.satisfies('@0.94:0.98.2'):
args.append('-DLBANN_WITH_NCCL:BOOL=%s' %
('+cuda +nccl' in spec))
if '+vtune' in spec:
args.append('-DVTUNE_DIR={0}'.format(spec['vtune'].prefix))
if '+al' in spec:
args.append('-DAluminum_DIR={0}'.format(spec['aluminum'].prefix))
if '+conduit' in spec:
args.append('-DConduit_DIR={0}'.format(spec['conduit'].prefix))
# Add support for OpenMP with external (Brew) clang
if spec.satisfies('%clang platform=darwin'):
clang = self.compiler.cc
clang_bin = os.path.dirname(clang)
clang_root = os.path.dirname(clang_bin)
args.extend([
'-DOpenMP_CXX_FLAGS=-fopenmp=libomp',
'-DOpenMP_CXX_LIB_NAMES=libomp',
'-DOpenMP_libomp_LIBRARY={0}/lib/libomp.dylib'.format(
clang_root)])
if '+vision' in spec:
args.append('-DOpenCV_DIR:STRING={0}'.format(
spec['opencv'].prefix))
if '+cuda' in spec:
args.append(
'-DCUDA_TOOLKIT_ROOT_DIR={0}'.format(
spec['cuda'].prefix))
args.append(
'-DcuDNN_DIR={0}'.format(
spec['cudnn'].prefix))
if spec.satisfies('@0.94:0.98.2'):
if spec.satisfies('^cuda@:10.99'):
args.append('-DCUB_DIR={0}'.format(
spec['cub'].prefix))
if '+nccl' in spec:
args.append(
'-DNCCL_DIR={0}'.format(
spec['nccl'].prefix))
args.append(
'-DLBANN_WITH_NVPROF:BOOL=%s' % ('+nvprof' in spec))
if spec.satisfies('@:0.90') or spec.satisfies('@0.100:'):
args.append(
'-DLBANN_WITH_DIHYDROGEN:BOOL=%s' % ('+dihydrogen' in spec))
if spec.satisfies('@:0.90') or spec.satisfies('@0.101:'):
args.append(
'-DLBANN_WITH_DISTCONV:BOOL=%s' % ('+distconv' in spec))
if '+rocm' in spec:
args.extend([
'-DHIP_ROOT_DIR={0}'.format(spec['hip'].prefix),
'-DHIP_CXX_COMPILER={0}'.format(self.spec['hip'].hipcc)])
archs = self.spec.variants['amdgpu_target'].value
if archs != 'none':
arch_str = ",".join(archs)
cxxflags_str = " ".join(self.spec.compiler_flags['cxxflags'])
args.append(
'-DHIP_HIPCC_FLAGS=--amdgpu-target={0}'
' -g -fsized-deallocation -fPIC -std=c++17 {1}'.format(
arch_str, cxxflags_str)
)
return args
@when('@0.91:0.93')
def cmake_args(self):
spec = self.spec
args = self.common_config_args
args.extend([
'-DWITH_CUDA:BOOL=%s' % ('+cuda' in spec),
'-DWITH_CUDNN:BOOL=%s' % ('+cuda' in spec),
'-DELEMENTAL_USE_CUBLAS:BOOL=%s' % (
'+cublas' in spec['elemental']),
'-DWITH_TBINF=OFF',
'-DWITH_VTUNE=OFF',
'-DElemental_DIR={0}'.format(spec['elemental'].prefix),
'-DELEMENTAL_MATH_LIBS={0}'.format(
spec['elemental'].libs),
'-DVERBOSE=0',
'-DLBANN_HOME=.'])
if spec.variants['dtype'].value == 'float':
args.append('-DDATATYPE=4')
elif spec.variants['dtype'].value == 'double':
args.append('-DDATATYPE=8')
if '+vision' in spec:
args.append('-DOpenCV_DIR:STRING={0}'.format(
spec['opencv'].prefix))
if '+cudnn' in spec:
args.append('-DcuDNN_DIR={0}'.format(
spec['cudnn'].prefix))
if '+cub' in spec and spec.satisfies('^cuda@:10.99'):
args.append('-DCUB_DIR={0}'.format(
spec['cub'].prefix))
return args
| 46.198511 | 99 | 0.615104 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | JeffersonLab/spack | var/spack/repos/builtin/packages/lbann/package.py | 18,618 | Python |
# coding=utf-8
"""sksurgerytextoverlay tests"""
from sksurgeryutils.ui.sksurgerytextoverlay_demo import TextOverlayDemo
import pytest
import sys
def test_sksurgerytextoverlay():
""" Basic test to run the widget and make sure everything loads OK."""
if sys.platform == "darwin":
pytest.skip("Test not working on Mac runner")
# Use input video rather than camera to test
input_file = 'tests/data/test_video.avi'
gui = TextOverlayDemo(input_file)
gui.start()
| 25.1 | 74 | 0.713147 | [
"BSD-3-Clause"
] | SciKit-Surgery/scikit-surgeryutils | tests/test_sksurgerytextoverlay.py | 502 | Python |
from django.contrib import admin
from .models import *
# Register your models here.
admin.site.register(Bill)
| 14.125 | 32 | 0.769912 | [
"MIT"
] | rozenmd/housemates | bills/admin.py | 113 | Python |
# stdlib
import time
from unittest import skipIf
# 3p
import psycopg2
from psycopg2 import extensions
from psycopg2 import extras
from ddtrace import Pin
from ddtrace.constants import ANALYTICS_SAMPLE_RATE_KEY
from ddtrace.contrib.psycopg.patch import PSYCOPG2_VERSION
from ddtrace.contrib.psycopg.patch import patch
from ddtrace.contrib.psycopg.patch import unpatch
from tests.contrib.config import POSTGRES_CONFIG
from tests.opentracer.utils import init_tracer
from tests.utils import TracerTestCase
from tests.utils import assert_is_measured
from tests.utils import snapshot
if PSYCOPG2_VERSION >= (2, 7):
from psycopg2.sql import Identifier
from psycopg2.sql import Literal
from psycopg2.sql import SQL
TEST_PORT = POSTGRES_CONFIG["port"]
class PsycopgCore(TracerTestCase):
# default service
TEST_SERVICE = "postgres"
def setUp(self):
super(PsycopgCore, self).setUp()
patch()
def tearDown(self):
super(PsycopgCore, self).tearDown()
unpatch()
def _get_conn(self, service=None):
conn = psycopg2.connect(**POSTGRES_CONFIG)
pin = Pin.get_from(conn)
if pin:
pin.clone(service=service, tracer=self.tracer).onto(conn)
return conn
def test_patch_unpatch(self):
# Test patch idempotence
patch()
patch()
service = "fo"
conn = self._get_conn(service=service)
conn.cursor().execute("""select 'blah'""")
self.assert_structure(dict(name="postgres.query", service=service))
self.reset()
# Test unpatch
unpatch()
conn = self._get_conn()
conn.cursor().execute("""select 'blah'""")
self.assert_has_no_spans()
# Test patch again
patch()
conn = self._get_conn(service=service)
conn.cursor().execute("""select 'blah'""")
self.assert_structure(dict(name="postgres.query", service=service))
def assert_conn_is_traced(self, db, service):
# ensure the trace pscyopg client doesn't add non-standard
# methods
try:
db.execute("""select 'foobar'""")
except AttributeError:
pass
# Ensure we can run a query and it's correctly traced
q = """select 'foobarblah'"""
start = time.time()
cursor = db.cursor()
res = cursor.execute(q)
self.assertIsNone(res)
rows = cursor.fetchall()
end = time.time()
self.assertEquals(rows, [("foobarblah",)])
self.assert_structure(
dict(name="postgres.query", resource=q, service=service, error=0, span_type="sql"),
)
root = self.get_root_span()
self.assertIsNone(root.get_tag("sql.query"))
assert start <= root.start <= end
assert root.duration <= end - start
# confirm analytics disabled by default
self.reset()
# run a query with an error and ensure all is well
q = """select * from some_non_existant_table"""
cur = db.cursor()
try:
cur.execute(q)
except Exception:
pass
else:
assert 0, "should have an error"
self.assert_structure(
dict(
name="postgres.query",
resource=q,
service=service,
error=1,
span_type="sql",
meta={
"out.host": "127.0.0.1",
},
metrics={
"out.port": TEST_PORT,
},
),
)
root = self.get_root_span()
assert_is_measured(root)
self.assertIsNone(root.get_tag("sql.query"))
self.reset()
def test_opentracing_propagation(self):
# ensure OpenTracing plays well with our integration
query = """SELECT 'tracing'"""
db = self._get_conn()
ot_tracer = init_tracer("psycopg-svc", self.tracer)
with ot_tracer.start_active_span("db.access"):
cursor = db.cursor()
cursor.execute(query)
rows = cursor.fetchall()
self.assertEquals(rows, [("tracing",)])
self.assert_structure(
dict(name="db.access", service="psycopg-svc"),
(dict(name="postgres.query", resource=query, service="postgres", error=0, span_type="sql"),),
)
assert_is_measured(self.get_spans()[1])
self.reset()
with self.override_config("psycopg", dict(trace_fetch_methods=True)):
db = self._get_conn()
ot_tracer = init_tracer("psycopg-svc", self.tracer)
with ot_tracer.start_active_span("db.access"):
cursor = db.cursor()
cursor.execute(query)
rows = cursor.fetchall()
self.assertEquals(rows, [("tracing",)])
self.assert_structure(
dict(name="db.access", service="psycopg-svc"),
(
dict(name="postgres.query", resource=query, service="postgres", error=0, span_type="sql"),
dict(name="postgres.query.fetchall", resource=query, service="postgres", error=0, span_type="sql"),
),
)
assert_is_measured(self.get_spans()[1])
@skipIf(PSYCOPG2_VERSION < (2, 5), "context manager not available in psycopg2==2.4")
def test_cursor_ctx_manager(self):
# ensure cursors work with context managers
# https://github.com/DataDog/dd-trace-py/issues/228
conn = self._get_conn()
t = type(conn.cursor())
with conn.cursor() as cur:
assert t == type(cur), "{} != {}".format(t, type(cur))
cur.execute(query="""select 'blah'""")
rows = cur.fetchall()
assert len(rows) == 1, rows
assert rows[0][0] == "blah"
assert_is_measured(self.get_root_span())
self.assert_structure(
dict(name="postgres.query"),
)
def test_disabled_execute(self):
conn = self._get_conn()
self.tracer.enabled = False
# these calls were crashing with a previous version of the code.
conn.cursor().execute(query="""select 'blah'""")
conn.cursor().execute("""select 'blah'""")
self.assert_has_no_spans()
@skipIf(PSYCOPG2_VERSION < (2, 5), "_json is not available in psycopg2==2.4")
def test_manual_wrap_extension_types(self):
conn = self._get_conn()
# NOTE: this will crash if it doesn't work.
# _ext.register_type(_ext.UUID, conn_or_curs)
# TypeError: argument 2 must be a connection, cursor or None
extras.register_uuid(conn_or_curs=conn)
# NOTE: this will crash if it doesn't work.
# _ext.register_default_json(conn)
# TypeError: argument 2 must be a connection, cursor or None
extras.register_default_json(conn)
def test_manual_wrap_extension_adapt(self):
conn = self._get_conn()
# NOTE: this will crash if it doesn't work.
# items = _ext.adapt([1, 2, 3])
# items.prepare(conn)
# TypeError: argument 2 must be a connection, cursor or None
items = extensions.adapt([1, 2, 3])
items.prepare(conn)
# NOTE: this will crash if it doesn't work.
# binary = _ext.adapt(b'12345)
# binary.prepare(conn)
# TypeError: argument 2 must be a connection, cursor or None
binary = extensions.adapt(b"12345")
binary.prepare(conn)
@skipIf(PSYCOPG2_VERSION < (2, 7), "quote_ident not available in psycopg2<2.7")
def test_manual_wrap_extension_quote_ident(self):
from ddtrace import patch_all
patch_all()
from psycopg2.extensions import quote_ident
# NOTE: this will crash if it doesn't work.
# TypeError: argument 2 must be a connection or a cursor
conn = psycopg2.connect(**POSTGRES_CONFIG)
quote_ident("foo", conn)
def test_connect_factory(self):
services = ["db", "another"]
for service in services:
conn = self._get_conn(service=service)
self.assert_conn_is_traced(conn, service)
def test_commit(self):
conn = self._get_conn()
conn.commit()
self.assert_structure(dict(name="postgres.connection.commit", service=self.TEST_SERVICE))
def test_rollback(self):
conn = self._get_conn()
conn.rollback()
self.assert_structure(dict(name="postgres.connection.rollback", service=self.TEST_SERVICE))
@skipIf(PSYCOPG2_VERSION < (2, 7), "SQL string composition not available in psycopg2<2.7")
def test_composed_query(self):
"""Checks whether execution of composed SQL string is traced"""
query = SQL(" union all ").join(
[SQL("""select {} as x""").format(Literal("one")), SQL("""select {} as x""").format(Literal("two"))]
)
db = self._get_conn()
with db.cursor() as cur:
cur.execute(query=query)
rows = cur.fetchall()
assert len(rows) == 2, rows
assert rows[0][0] == "one"
assert rows[1][0] == "two"
assert_is_measured(self.get_root_span())
self.assert_structure(
dict(name="postgres.query", resource=query.as_string(db)),
)
@skipIf(PSYCOPG2_VERSION < (2, 7), "SQL string composition not available in psycopg2<2.7")
def test_composed_query_identifier(self):
"""Checks whether execution of composed SQL string is traced"""
db = self._get_conn()
with db.cursor() as cur:
# DEV: Use a temp table so it is removed after this session
cur.execute("CREATE TEMP TABLE test (id serial PRIMARY KEY, name varchar(12) NOT NULL UNIQUE);")
cur.execute("INSERT INTO test (name) VALUES (%s);", ("test_case",))
spans = self.get_spans()
assert len(spans) == 2
self.reset()
query = SQL("""select {}, {} from {}""").format(Identifier("id"), Identifier("name"), Identifier("test"))
cur.execute(query=query)
rows = cur.fetchall()
assert rows == [(1, "test_case")]
assert_is_measured(self.get_root_span())
self.assert_structure(
dict(name="postgres.query", resource=query.as_string(db)),
)
@snapshot()
@skipIf(PSYCOPG2_VERSION < (2, 7), "SQL string composition not available in psycopg2<2.7")
def test_composed_query_encoding(self):
"""Checks whether execution of composed SQL string is traced"""
import logging
logger = logging.getLogger()
logger.level = logging.DEBUG
query = SQL(" union all ").join([SQL("""select 'one' as x"""), SQL("""select 'two' as x""")])
conn = psycopg2.connect(**POSTGRES_CONFIG)
with conn.cursor() as cur:
cur.execute(query=query)
rows = cur.fetchall()
assert len(rows) == 2, rows
assert rows[0][0] == "one"
assert rows[1][0] == "two"
def test_analytics_default(self):
conn = self._get_conn()
conn.cursor().execute("""select 'blah'""")
spans = self.get_spans()
self.assertEqual(len(spans), 1)
span = spans[0]
self.assertIsNone(span.get_metric(ANALYTICS_SAMPLE_RATE_KEY))
def test_analytics_with_rate(self):
with self.override_config("psycopg", dict(analytics_enabled=True, analytics_sample_rate=0.5)):
conn = self._get_conn()
conn.cursor().execute("""select 'blah'""")
spans = self.get_spans()
self.assertEqual(len(spans), 1)
span = spans[0]
self.assertEqual(span.get_metric(ANALYTICS_SAMPLE_RATE_KEY), 0.5)
def test_analytics_without_rate(self):
with self.override_config("psycopg", dict(analytics_enabled=True)):
conn = self._get_conn()
conn.cursor().execute("""select 'blah'""")
spans = self.get_spans()
self.assertEqual(len(spans), 1)
span = spans[0]
self.assertEqual(span.get_metric(ANALYTICS_SAMPLE_RATE_KEY), 1.0)
@TracerTestCase.run_in_subprocess(env_overrides=dict(DD_SERVICE="mysvc"))
def test_user_specified_app_service(self):
"""
When a user specifies a service for the app
The psycopg integration should not use it.
"""
# Ensure that the service name was configured
from ddtrace import config
assert config.service == "mysvc"
conn = self._get_conn()
conn.cursor().execute("""select 'blah'""")
spans = self.get_spans()
self.assertEqual(len(spans), 1)
assert spans[0].service != "mysvc"
@TracerTestCase.run_in_subprocess(env_overrides=dict(DD_PSYCOPG_SERVICE="mysvc"))
def test_user_specified_service(self):
conn = self._get_conn()
conn.cursor().execute("""select 'blah'""")
spans = self.get_spans()
self.assertEqual(len(spans), 1)
assert spans[0].service == "mysvc"
@skipIf(PSYCOPG2_VERSION < (2, 5), "Connection context managers not defined in <2.5.")
def test_contextmanager_connection(self):
service = "fo"
with self._get_conn(service=service) as conn:
conn.cursor().execute("""select 'blah'""")
self.assert_structure(dict(name="postgres.query", service=service))
@skipIf(PSYCOPG2_VERSION < (2, 7), "quote_ident not available in psycopg2<2.7")
def test_manual_wrap_extension_quote_ident_standalone():
from ddtrace import patch_all
patch_all()
from psycopg2.extensions import quote_ident
# NOTE: this will crash if it doesn't work.
# TypeError: argument 2 must be a connection or a cursor
conn = psycopg2.connect(**POSTGRES_CONFIG)
quote_ident("foo", conn)
| 34.733167 | 119 | 0.603389 | [
"Apache-2.0",
"BSD-3-Clause"
] | discord/dd-trace-py | tests/contrib/psycopg/test_psycopg.py | 13,928 | Python |
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
import os,json,glob,re
import numpy as np
import pandas as pd
import nibabel as nb
from nilearn.input_data import NiftiMasker
def dcan2fmriprep(dcandir,outdir,sub_id=None):
dcandir = os.path.abspath(dcandir)
outdir = os.path.abspath(outdir)
if sub_id is None:
sub_idir = glob.glob(dcandir +'/sub*')
sub_id = [ os.path.basename(j) for j in sub_idir]
for j in sub_id:
dcan2fmriprepx(dcan_dir=dcandir,out_dir=outdir,sub_id=j)
return sub_id
def dcan2fmriprepx(dcan_dir,out_dir,sub_id):
"""
dcan2fmriprep(dcan_dir,out_dir)
"""
# get session id if available
sess =glob.glob(dcan_dir+'/'+sub_id+'/s*')
ses_id = []
ses_id = [ j.split('ses-')[1] for j in sess]
# anat dirx
for ses in ses_id:
anat_dirx = dcan_dir+'/' + sub_id + '/ses-' +ses + '/files/MNINonLinear/'
anatdir = out_dir +'/' + sub_id + '/ses-'+ses+ '/anat/'
os.makedirs(anatdir,exist_ok=True)
sess='ses-'+ses
tw1 = anat_dirx +'/T1w.nii.gz'
brainmask = anat_dirx + '/brainmask_fs.nii.gz'
ribbon = anat_dirx + '/ribbon.nii.gz'
segm = anat_dirx + '/aparc+aseg.nii.gz'
midR = glob.glob(anat_dirx + '/fsaverage_LR32k/*R.midthickness.32k_fs_LR.surf.gii')[0]
midL = glob.glob(anat_dirx + '/fsaverage_LR32k/*L.midthickness.32k_fs_LR.surf.gii')[0]
infR = glob.glob(anat_dirx + '/fsaverage_LR32k/*R.inflated.32k_fs_LR.surf.gii')[0]
infL = glob.glob(anat_dirx + '/fsaverage_LR32k/*L.inflated.32k_fs_LR.surf.gii')[0]
pialR = glob.glob(anat_dirx + '/fsaverage_LR32k/*R.pial.32k_fs_LR.surf.gii')[0]
pialL = glob.glob(anat_dirx + '/fsaverage_LR32k/*L.pial.32k_fs_LR.surf.gii')[0]
whiteR = glob.glob(anat_dirx + '/fsaverage_LR32k/*R.white.32k_fs_LR.surf.gii')[0]
whiteL = glob.glob(anat_dirx + '/fsaverage_LR32k/*L.white.32k_fs_LR.surf.gii')[0]
dcanimages = [tw1,segm,ribbon, brainmask,tw1,tw1,midL,midR,pialL,pialR,whiteL,whiteR,infL,infR]
t1wim = anatdir + sub_id + '_' + sess + '_desc-preproc_T1w.nii.gz'
t1seg = anatdir + sub_id + '_' + sess + '_dseg.nii.gz'
t1ribbon = anatdir + sub_id + '_' + sess + '_desc-ribbon_T1w.nii.gz'
t1brainm = anatdir + sub_id + '_' + sess + '_desc-brain_mask.nii.gz'
regfile1 = anatdir + sub_id + '_' + sess + '_from-T1w_to-MNI152NLin2009cAsym_mode-image_xfm.h5'
regfile2 = anatdir + sub_id + '_' + sess + '_from-MNI152NLin2009cAsym_to-T1w_mode-image_xfm.h5'
lMid = anatdir + sub_id + '_' + sess + '_hemi-L_midthickness.surf.gii'
rMid = anatdir + sub_id + '_' + sess + '_hemi-R_midthickness.surf.gii'
lpial = anatdir + sub_id + '_' + sess + '_hemi-L_pial.surf.gii'
rpial = anatdir + sub_id + '_' + sess + '_hemi-R_pial.surf.gii'
lwhite = anatdir + sub_id + '_' + sess + '_hemi-L_smoothwm.surf.gii'
rwhite = anatdir + sub_id + '_' + sess + '_hemi-R_smoothwm.surf.gii'
linf = anatdir + sub_id + '_' + sess + '_hemi-L_inflated.surf.gii'
rinf = anatdir + sub_id + '_' + sess + '_hemi-R_inflated.surf.gii'
newanatfiles =[t1wim,t1seg,t1ribbon,t1brainm,regfile1,regfile2,lMid,rMid,lpial,rpial,
lwhite,rwhite,linf,rinf]
for i,j in zip(dcanimages,newanatfiles):
symlinkfiles(i,j)
# get masks and transforms
wmmask =glob.glob(anat_dirx + '/wm_2mm_*_mask_eroded.nii.gz')[0]
csfmask =glob.glob(anat_dirx + '/vent_2mm_*_mask_eroded.nii.gz')[0]
tw1tonative = anat_dirx +'xfms/T1w_to_MNI_0GenericAffine.mat'
# get task and idx run 01
func_dirx = dcan_dir +'/' + sub_id + '/ses-' +ses_id[0] + '/files/MNINonLinear/Results/'
taskd = glob.glob(func_dirx + 'task-*')
taskid=[]
for k in taskd:
if not os.path.isfile(k):
taskid.append(os.path.basename(k).split('-')[1])
func_dir = out_dir +'/' + sub_id + '/ses-'+ses+ '/func/'
os.makedirs(func_dir,exist_ok=True)
ses_id = 'ses-'+ses
for ttt in taskid:
taskdir ='task-'+ttt
taskname = re.split(r'(\d+)', ttt)[0]
run_id = '_run-'+ str(re.split(r'(\d+)', ttt)[1])
func_dirxx = func_dirx + taskdir
sbref = func_dirxx + '/'+ taskdir +'_SBRef.nii.gz'
volume = func_dirxx + '/'+ taskdir + '.nii.gz'
brainmask = func_dirxx + '/brainmask_fs.2.0.nii.gz'
dtsereis = func_dirxx +'/'+ taskdir + '_Atlas.dtseries.nii'
motionp = func_dirxx + '/Movement_Regressors.txt'
rmsdx = func_dirxx + '/Movement_AbsoluteRMS.txt'
mvreg = pd.read_csv(motionp,header=None,delimiter=r"\s+")
mvreg = mvreg.iloc[:,0:6]
mvreg.columns=['trans_x','trans_y','trans_z','rot_x','rot_y','rot_z']
# convert rot to rad
mvreg['rot_x']=mvreg['rot_x']*np.pi/180
mvreg['rot_y']=mvreg['rot_y']*np.pi/180
mvreg['rot_z']=mvreg['rot_z']*np.pi/180
csfreg = extractreg(mask=csfmask,nifti=volume)
wmreg = extractreg(mask=wmmask,nifti=volume)
gsreg = extractreg(mask=brainmask,nifti=volume)
rsmd = np.loadtxt(rmsdx)
brainreg = pd.DataFrame({'global_signal':gsreg,'white_matter':wmreg,'csf':csfreg,'rmsd':rsmd })
regressors = pd.concat([mvreg, brainreg], axis=1)
dcanfunfiles=[sbref,dtsereis,tw1tonative,tw1tonative]
tr = nb.load(volume).header.get_zooms()[-1] # repetition time
jsontis={
"RepetitionTime": np.float(tr),
"TaskName": taskname}
json2={
"grayordinates": "91k", "space": "HCP grayordinates",
"surface": "fsLR","surface_density": "32k",
"volume": "MNI152NLin6Asym"}
#boldname = func_dir + sub_id+'_'+ ses_id + '_task-'+taskname + run_id+ '_space-MNI152NLin6Asym_desc-preproc_bold.nii.gz'
boldjson = func_dir + sub_id+'_'+ ses_id + '_task-'+taskname + run_id+ '_space-MNI152NLin6Asym_desc-preproc_bold.json'
confreg = func_dir + sub_id+'_'+ ses_id + '_task-'+taskname + run_id+ '_desc-confounds_timeseries.tsv'
confregj = func_dir + sub_id+'_'+ ses_id + '_task-'+taskname + run_id+ '_desc-confounds_timeseries.json'
boldref = func_dir + sub_id+'_'+ ses_id + '_task-'+taskname + run_id+'_space-MNI152NLin6Asym_boldref.nii.gz'
#brainmaskf = func_dir + sub_id+'_'+ ses_id + '_task-'+taskname + run_id +'_space-MNI152NLin6Asym_desc-brain_mask.nii.gz'
dttseriesx = func_dir + sub_id+'_'+ ses_id + '_task-'+taskname + run_id+ '_space-fsLR_den-91k_bold.dtseries.nii'
dttseriesj = func_dir + sub_id+'_'+ ses_id + '_task-'+taskname + run_id+ '_space-fsLR_den-91k_bold.dtseries.json'
native2t1w = func_dir + sub_id+'_'+ ses_id + '_task-'+taskname + run_id+ '_from-scanner_to-T1w_mode-image_xfm.txt'
t12native = func_dir + sub_id+'_'+ ses_id + '_task-'+taskname + run_id+ '_from-T1w_to-scanner_mode-image_xfm.txt'
# maske coreg files here
fmfuncfiles = [boldref,dttseriesx,native2t1w,t12native]
# symlink files
for jj,kk in zip(dcanfunfiles,fmfuncfiles):
symlinkfiles(jj,kk)
figdir = out_dir +'/' + sub_id+ '/figures/'
os.makedirs(figdir,exist_ok=True)
bbreg = figdir + sub_id+'_'+ ses_id + '_task-'+taskname + run_id+ '_desc-bbregister_bold.svg'
bbreg = bbregplot(fixed_image=tw1,moving_image=boldref,out_file=bbreg,contour=ribbon)
# write json
writejson(jsontis,boldjson)
writejson(json2,dttseriesj)
writejson(json2,confregj)
#save confounds
regressors.to_csv(confreg,sep='\t',index=False)
dcanjosn = {
"Name": "ABCDDCAN",
"BIDSVersion": "1.4.0",
"DatasetType": "derivative",
"GeneratedBy": [
{
"Name": "DCAN",
"Version": "0.0.4",
"CodeURL": "https://github.com/DCAN-Labs/abcd-hcp-pipeline"
}],}
writejson(dcanjosn,out_dir+'/dataset_description.json')
return dcanjosn
#def symlinkfiles(src, dest):
#if os.path.islink(dest):
#os.remove(dest)
#os.symlink(src,dest)
#else:
#os.symlink(src,dest)
#return dest
def copyfileobj_example(source, dest, buffer_size=1024*1024*1024):
"""
Copy a file from source to dest. source and dest
must be file-like objects, i.e. any object with a read or
write method, like for example StringIO.
"""
while True:
copy_buffer = source.read(buffer_size)
if not copy_buffer:
break
dest.write(copy_buffer)
def symlinkfiles(source, dest):
# Beware, this example does not handle any edge cases!
with open(source, 'rb') as src, open(dest, 'wb') as dst:
copyfileobj_example(src, dst)
def extractreg(mask,nifti):
masker=NiftiMasker(mask_img=mask)
signals = masker.fit_transform(nifti)
return np.mean(signals,axis=1)
def writejson(data,outfile):
with open(outfile,'w') as f:
json.dump(data,f)
return outfile
def bbregplot(fixed_image,moving_image, contour, out_file='report.svg'):
from nilearn.image import threshold_img, load_img,resample_img
from niworkflows.viz.utils import plot_registration
from niworkflows.viz.utils import cuts_from_bbox, compose_view
import numpy as np
fixed_image_nii = load_img(fixed_image)
moving_image_nii = load_img(moving_image)
moving_image_nii = resample_img(moving_image_nii, target_affine=np.eye(3), interpolation='nearest')
contour_nii = load_img(contour) if contour is not None else None
mask_nii = threshold_img(fixed_image_nii, 1e-3)
n_cuts = 7
if contour_nii:
cuts = cuts_from_bbox(contour_nii, cuts=n_cuts)
else:
cuts = cuts_from_bbox(mask_nii, cuts=n_cuts)
compose_view(
plot_registration(
fixed_image_nii,
"fixed-image",
estimate_brightness=True,
cuts=cuts,
label='fixed',
contour=contour_nii,
compress='auto'
),
plot_registration(
moving_image_nii,
"moving-image",
estimate_brightness=True,
cuts=cuts,
label='moving',
contour=contour_nii,
compress='auto',
),
out_file=out_file,
)
return out_file | 39.870036 | 134 | 0.594712 | [
"MIT"
] | PennLINC/xcp_abcd | xcp_abcd/utils/dcan2fmriprep.py | 11,044 | Python |
import codecs
import os
from setuptools import setup, find_packages
def read(*parts):
filename = os.path.join(os.path.dirname(__file__), *parts)
with codecs.open(filename, encoding='utf-8') as fp:
return fp.read()
VERSION = (0, 8, 0)
version = '.'.join(map(str, VERSION))
setup(
name='python-quickbooks',
version=version,
author='Edward Emanuel Jr.',
author_email='[email protected]',
description='A Python library for accessing the Quickbooks API.',
url='https://github.com/sidecars/python-quickbooks',
license='MIT',
keywords=['quickbooks', 'qbo', 'accounting'],
long_description=read('README.rst'),
long_description_content_type='text/markdown',
test_runner='nosetests',
entry_points={
'console_scripts': ['quickbooks-cli=quickbooks.tools.cli:cli_execute']
},
install_requires=[
'setuptools',
'intuit-oauth==1.2.2',
'rauth>=0.7.1',
'requests>=2.7.0',
'simplejson>=2.2.0',
'six>=1.4.0',
'python-dateutil',
'pycparser==2.18'
],
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
packages=find_packages(),
)
| 27.982143 | 78 | 0.611359 | [
"MIT"
] | Ethernodes-org/python-quickbooks | setup.py | 1,567 | Python |
from __future__ import absolute_import
import os
from collections import namedtuple
import time
from torch.nn import functional as F
from baseline.fast_rcnn.model.utils.creator_tool import AnchorTargetCreator, ProposalTargetCreator
from torch import nn
import torch as t
from baseline.fast_rcnn.utils import array_tool as at
from baseline.fast_rcnn.utils.vis_tool import Visualizer
from baseline.fast_rcnn.utils.config import opt
from torchnet.meter import ConfusionMeter, AverageValueMeter
LossTuple = namedtuple('LossTuple',
['rpn_loc_loss',
'rpn_cls_loss',
'roi_loc_loss',
'roi_cls_loss',
'total_loss'
])
class FasterRCNNTrainer(nn.Module):
"""wrapper for conveniently training. return losses
The losses include:
* :obj:`rpn_loc_loss`: The localization loss for \
Region Proposal Network (RPN).
* :obj:`rpn_cls_loss`: The classification loss for RPN.
* :obj:`roi_loc_loss`: The localization loss for the head module.
* :obj:`roi_cls_loss`: The classification loss for the head module.
* :obj:`total_loss`: The sum of 4 loss above.
Args:
faster_rcnn (model.FasterRCNN):
A Faster R-CNN model that is going to be trained.
"""
def __init__(self, faster_rcnn):
super(FasterRCNNTrainer, self).__init__()
self.faster_rcnn = faster_rcnn
self.rpn_sigma = opt.rpn_sigma
self.roi_sigma = opt.roi_sigma
# target creator create gt_bbox gt_label etc as training targets.
self.anchor_target_creator = AnchorTargetCreator()
self.proposal_target_creator = ProposalTargetCreator()
self.loc_normalize_mean = faster_rcnn.loc_normalize_mean
self.loc_normalize_std = faster_rcnn.loc_normalize_std
self.optimizer = self.faster_rcnn.get_optimizer()
# visdom wrapper
self.vis = Visualizer(env=opt.env)
# indicators for training status
self.rpn_cm = ConfusionMeter(2)
self.roi_cm = ConfusionMeter(21)
self.meters = {k: AverageValueMeter() for k in LossTuple._fields} # average loss
def forward(self, imgs, bboxes, labels, scale):
"""Forward Faster R-CNN and calculate losses.
Here are notations used.
* :math:`N` is the batch size.
* :math:`R` is the number of bounding boxes per image.
Currently, only :math:`N=1` is supported.
Args:
imgs (~torch.autograd.Variable): A variable with a batch of images.
bboxes (~torch.autograd.Variable): A batch of bounding boxes.
Its shape is :math:`(N, R, 4)`.
labels (~torch.autograd..Variable): A batch of labels.
Its shape is :math:`(N, R)`. The background is excluded from
the definition, which means that the range of the value
is :math:`[0, L - 1]`. :math:`L` is the number of foreground
classes.
scale (float): Amount of scaling applied to
the raw image during preprocessing.
Returns:
namedtuple of 5 losses
"""
n = bboxes.shape[0]
if n != 1:
raise ValueError('Currently only batch size 1 is supported.')
_, _, H, W = imgs.shape
img_size = (H, W)
features = self.faster_rcnn.extractor(imgs)
rpn_locs, rpn_scores, rois, roi_indices, anchor = \
self.faster_rcnn.rpn(features, img_size, scale)
# Since batch size is one, convert variables to singular form
bbox = bboxes[0]
label = labels[0]
rpn_score = rpn_scores[0]
rpn_loc = rpn_locs[0]
roi = rois
# Sample RoIs and forward
# it's fine to break the computation graph of rois,
# consider them as constant input
sample_roi, gt_roi_loc, gt_roi_label = self.proposal_target_creator(
roi,
at.tonumpy(bbox),
at.tonumpy(label),
self.loc_normalize_mean,
self.loc_normalize_std)
# NOTE it's all zero because now it only support for batch=1 now
sample_roi_index = t.zeros(len(sample_roi))
roi_cls_loc, roi_score = self.faster_rcnn.head(
features,
sample_roi,
sample_roi_index)
# ------------------ RPN losses -------------------#
gt_rpn_loc, gt_rpn_label = self.anchor_target_creator(
at.tonumpy(bbox),
anchor,
img_size)
gt_rpn_label = at.totensor(gt_rpn_label).long()
gt_rpn_loc = at.totensor(gt_rpn_loc)
rpn_loc_loss = _fast_rcnn_loc_loss(
rpn_loc,
gt_rpn_loc,
gt_rpn_label.data,
self.rpn_sigma)
# NOTE: default value of ignore_index is -100 ...
rpn_cls_loss = F.cross_entropy(rpn_score, gt_rpn_label.cuda(), ignore_index=-1)
_gt_rpn_label = gt_rpn_label[gt_rpn_label > -1]
_rpn_score = at.tonumpy(rpn_score)[at.tonumpy(gt_rpn_label) > -1]
self.rpn_cm.add(at.totensor(_rpn_score, False), _gt_rpn_label.data.long())
# ------------------ ROI losses (fast rcnn loss) -------------------#
n_sample = roi_cls_loc.shape[0]
roi_cls_loc = roi_cls_loc.view(n_sample, -1, 4)
roi_loc = roi_cls_loc[t.arange(0, n_sample).long().cuda(), \
at.totensor(gt_roi_label).long()]
gt_roi_label = at.totensor(gt_roi_label).long()
gt_roi_loc = at.totensor(gt_roi_loc)
roi_loc_loss = _fast_rcnn_loc_loss(
roi_loc.contiguous(),
gt_roi_loc,
gt_roi_label.data,
self.roi_sigma)
roi_cls_loss = nn.CrossEntropyLoss()(roi_score, gt_roi_label.cuda())
self.roi_cm.add(at.totensor(roi_score, False), gt_roi_label.data.long())
losses = [rpn_loc_loss, rpn_cls_loss, roi_loc_loss, roi_cls_loss]
losses = losses + [sum(losses)]
return LossTuple(*losses)
def train_step(self, imgs, bboxes, labels, scale):
self.optimizer.zero_grad()
losses = self.forward(imgs, bboxes, labels, scale)
losses.total_loss.backward()
self.optimizer.step()
self.update_meters(losses)
return losses
def save(self, save_optimizer=False, save_path=None, **kwargs):
"""serialize models include optimizer and other info
return path where the model-file is stored.
Args:
save_optimizer (bool): whether save optimizer.state_dict().
save_path (string): where to save model, if it's None, save_path
is generate using time str and info from kwargs.
Returns:
save_path(str): the path to save models.
"""
save_dict = dict()
save_dict['model'] = self.faster_rcnn.state_dict()
save_dict['config'] = opt._state_dict()
save_dict['other_info'] = kwargs
save_dict['vis_info'] = self.vis.state_dict()
if save_optimizer:
save_dict['optimizer'] = self.optimizer.state_dict()
if save_path is None:
timestr = time.strftime('%m%d%H%M')
save_path = 'checkpoints/fasterrcnn_%s' % timestr
for k_, v_ in kwargs.items():
save_path += '_%s' % v_
save_dir = os.path.dirname(save_path)
if not os.path.exists(save_dir):
os.makedirs(save_dir)
t.save(save_dict, save_path)
self.vis.save([self.vis.env])
return save_path
def load(self, path, load_optimizer=True, parse_opt=False, cpu_flag: bool = True):
if cpu_flag:
state_dict = t.load(path,
map_location=t.device('cpu'))
else:
state_dict = t.load(path)
if 'model' in state_dict:
self.faster_rcnn.load_state_dict(state_dict['model'])
else: # legacy way, for backward compatibility
self.faster_rcnn.load_state_dict(state_dict)
return self
if parse_opt:
opt._parse(state_dict['config'])
if 'optimizer' in state_dict and load_optimizer:
self.optimizer.load_state_dict(state_dict['optimizer'])
return self
def update_meters(self, losses):
loss_d = {k: at.scalar(v) for k, v in losses._asdict().items()}
for key, meter in self.meters.items():
meter.add(loss_d[key])
def reset_meters(self):
for key, meter in self.meters.items():
meter.reset()
self.roi_cm.reset()
self.rpn_cm.reset()
def get_meter_data(self):
return {k: v.value()[0] for k, v in self.meters.items()}
def _smooth_l1_loss(x, t, in_weight, sigma):
sigma2 = sigma ** 2
diff = in_weight * (x - t)
abs_diff = diff.abs()
flag = (abs_diff.data < (1. / sigma2)).float()
y = (flag * (sigma2 / 2.) * (diff ** 2) +
(1 - flag) * (abs_diff - 0.5 / sigma2))
return y.sum()
def _fast_rcnn_loc_loss(pred_loc, gt_loc, gt_label, sigma):
in_weight = t.zeros(gt_loc.shape).cuda()
# Localization loss is calculated only for positive rois.
# NOTE: unlike origin implementation,
# we don't need inside_weight and outside_weight, they can calculate by gt_label
in_weight[(gt_label > 0).view(-1, 1).expand_as(in_weight).cuda()] = 1
loc_loss = _smooth_l1_loss(pred_loc, gt_loc, in_weight.detach(), sigma)
# Normalize by total number of negtive and positive rois.
loc_loss /= ((gt_label >= 0).sum().float()) # ignore gt_label==-1 for rpn_loss
return loc_loss
| 36.859316 | 98 | 0.611925 | [
"BSD-3-Clause"
] | ITMO-NSS-team/LightObjRecEnsembler | baseline/fast_rcnn/trainer.py | 9,694 | Python |
import math
import random
import time
def average_density(rdr):
countZeros = 0
length = 0
for i in rdr:
length = length + 1
if (i == 0):
countZeros = countZeros + 1
return [length - countZeros, length]
def check_rdr(rdr):
for i in range (0, len(rdr)-1):
if rdr[i] != 0 and rdr[i+1] != 0:
return False
return True
def generate_random_D(m, l):
if l > (m+1)/2:
raise ValueError("l should satisfy the condition l <= (m+1)/2")
D = []
for i in range(2, l+1, 1):
odd = False
while not odd:
x = random.randint(3, m)
if(x % 2 != 0 and x not in D):
odd = True
D.append(x)
D.sort()
D.insert(0, 1)
return D
def add_carry_revised(bin_k):
len_k = len(bin_k)
# convert bin_k to an array to allow change of one bit easily
bin_s = list(bin_k)
carry = '0'
# If k is empty, Then carry needs to be added last.
if (bin_k == ''):
return '1'
# If LSB is 0, we just add carry to make it one. If it's 1, we make it 0 and carry is set to 1
if(bin_k[len_k-1] == '0'):
bin_s[len_k-1] = '1'
else:
bin_s[len_k-1] = '0'
carry = '1'
# index is set to the second LSB
index = len_k-2
while carry == '1':
# if k was only 1 bit, we just append the carry
if index == -1:
carry = '0'
bin_s.insert(0, '1')
# if we reached the MSB and it's 1, then we make it 0 and append 1,
# if it is 0, it is just set to 1.
elif index == 0:
carry = '0'
if (bin_s[index] == '1'):
bin_s[index] = '0'
bin_s.insert(0, '1')
else:
bin_s[index] = '1'
# if the bit is neither of the last two cases, it's set to 1 when it is 0,
# or it is set to 0, and carry is still 1
elif(bin_k[index] == '0'):
bin_s[index] = '1'
carry = '0'
else:
bin_s[index] = '0'
# Update the index
index = index - 1
# bin_s is converted back to a variable
bin_k = "".join(bin_s)
return bin_k
def get_Wn(D):
return int(math.floor(math.log(max(D), 2)))
def RDR_algorithm(D, k):
rdr = []
bin_k = bin(k)[2:]
# get number of bits
Wn = get_Wn(D)
flag_d = 0
while bin_k != '':
# If k is even, zero is appened to rdr and k is shifted right 1 bit
if bin_k[len(bin_k)-1] == '0':
rdr.insert(0, 0)
bin_k = bin_k[:len(bin_k)-1]
continue
# if LSB is not 0, we extract w bit
for w in range(Wn + 1, 0, -1):
# if the window is bigger than the length of k, we need to have smaller windwo
if (w > len(bin_k)):
continue
# we check every d in the digit set D
for d in D:
bin_d = bin(d)[2:] # get the binary representation of d
length_bin_d = len(bin_d)
# extract w bits from bin_k
k_reg = bin_k[len(bin_k) - w:]
# compute the negative residue of d, if neg_d is negative, it is ignored by setting it to 0.
neg_d = 2**w - d
while neg_d < 0:
neg_d = 0
neg_bin_d = bin(neg_d)[2:] # get the binary representation of neg_d
length_neg_bin_d = len(neg_bin_d)
# d cannot be chosen unless the value is less than the extracted window.
if d <= k_reg:
if int(bin_d, 2) ^ int(k_reg, 2) == 0:
rdr.insert(0, d)
# inserting w-1 zeros
for j in range(0, w-1):
rdr.insert(0, 0)
# update k by shifting it right w bits
bin_k = bin_k[:len(bin_k) - w]
# set flag_d to 1 to set the window to Wn+1
flag_d = 1
break
elif int(neg_bin_d, 2) ^ int(k_reg, 2) == 0 and neg_d != 1:
rdr.insert(0, -d)
# Inserting zeros
for j in range(0, w-1):
rdr.insert(0, 0)
# update k by shifting it right w bits
bin_k = bin_k[:len(bin_k) - w]
# update k after adding a carry to LSB
bin_k = add_carry_revised(bin_k)
# set flag_d to 1 to set the window to Wn+1
flag_d = 1
break
# break out of the for loop to check if we finished k or not
if flag_d == 1:
flag_d = 0
break
# In the end, there might be some leading zeros which are not needed,
# this while loop removes the leading zeros and update k accordingly
while (rdr[0] == 0):
rdr = rdr[1:]
# return the result, and length of result
return rdr
# this function return the value of rdr representation.
def check_num(rdr):
b = 1
sum = 0
for i in range(len(rdr)-1, -1, -1):
sum = sum + b*rdr[i]
b = b*2
return sum
def run_tests_time():
i = 10
j = 0
averageTime = 0
nist = [651056770906015076056810763456358567190100156695615665659,
2695995667150639794667015087019625940457807714424391721682712368051,
115792089210351248362697456949407573528996955234135760342422159061068512044339,
26959956671506397946670150870196259404578077144243917216827126959956671506397946670150870196259404578077144243917216,
2695995667150639794667015087019625940457807714424391721682712368058238947189273490172349807129834790127349087129834623486127461012630462184628923461201280461]
w = [5, 7, 9 , 11]
index_w = 0
index_nist = 0
while index_w < 1:
while index_nist < 5:
D = generate_random_D(2**w[index_w], 2**(w[index_w]-3)-1)
while j < 1000:
# print j
startTime = time.time()
rdr = RDR_algorithm(D, nist[index_nist])
endTime = time.time()
averageTime = averageTime + (endTime - startTime)
j = j+1
averageTime = averageTime / 1000
print "Average Time for NIST[", index_nist, "] and w = ", w[index_w], " = ", averageTime
averageTime = 0
j = 0
index_nist = index_nist +1
index_nist = 0
index_w = index_w + 1
if __name__ == '__main__':
# print "bin > ", bin(651056770906015076056810763456358567190100156695615665659)
# # run_tests_time()
# nist = [651056770906015076056810763456358567190100156695615665659,
# 2695995667150639794667015087019625940457807714424391721682712368051,
# 115792089210351248362697456949407573528996955234135760342422159061068512044339,
# 26959956671506397946670150870196259404578077144243917216827126959956671506397946670150870196259404578077144243917216,
# 2695995667150639794667015087019625940457807714424391721682712368058238947189273490172349807129834790127349087129834623486127461012630462184628923461201280461]
# D = [1, 7, 23, 25, 33, 37, 39, 43, 49, 53, 63, 65, 67, 71, 75, 77, 85, 89, 97, 99, 103, 107, 113, 115, 117, 119, 127, 131, 133, 135, 145, 151, 153, 157, 163, 165, 171, 181, 183, 185, 189, 191, 197, 199, 201, 203, 207, 211, 213, 219, 221, 225, 227, 229, 233, 235, 237, 243, 247, 255, 257, 259, 269, 283, 287, 295, 307, 311, 321, 329, 333, 335, 339, 341, 345, 349, 351, 371, 373, 381, 385, 393, 403, 405, 411, 419,421, 429, 431, 433, 435, 437, 441, 459, 471, 489, 503, 519, 521, 523, 527, 529, 535, 537, 543, 547, 549, 563, 567, 577, 585, 589, 601, 603, 609, 615, 619, 627, 633, 635, 641, 643, 655, 659, 665, 671, 675, 681, 687, 709, 711, 719, 727, 729, 731, 733, 735, 737, 741, 743, 745, 747, 749, 751, 755, 761, 763, 765, 771, 777, 779, 783, 785, 789, 797, 803, 807, 813, 817, 827, 839, 841, 845, 853, 859, 863, 865, 871, 873, 875, 883, 887, 889, 891, 895, 897, 899, 901, 905, 909, 915, 925, 927, 933, 935, 945, 949, 961, 963, 967, 977, 983, 985, 987, 989, 995]
# k = nist[4]
# rdr = RDR_algorithm(D, k)
# print "IFRA > ", rdr
rdr = RDR_algorithm([1, 3, 23, 27], 314154655)
print "RDR > ", rdr
print "Min_len > ", len(rdr)
print "IsRDR > ", check_rdr(rdr)
print "check > ", check_num(rdr) | 42.679612 | 968 | 0.535942 | [
"MIT"
] | iMohannad/Random_Recording_Algorithm | Python/IFRA.py | 8,792 | Python |
"""
WSGI config for djangoCMS project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "djangoCMS.settings")
application = get_wsgi_application()
| 23.235294 | 78 | 0.787342 | [
"MIT"
] | yeLer/Vue-Somes | step3ComprehensiveProject/django-vue-cms/djangoCMS/djangoCMS/wsgi.py | 395 | Python |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.