ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | 1a35e8205f6a8583288f695dee55a0404f8ae149 | #!/usr/bin/python
#
# Generate test cases for version_test.go
#
# Herein lies my first ever python script...
#
import rpm
versions = [
"",
"0",
"1",
"2",
"10",
"100",
"0.0",
"0.1",
"0.10",
"0.99",
"1.0",
"1.99",
"2.0",
"0.0.0",
"0.0.1",
"0.0.2",
"0.0.10",
"0.0.99",
"0.1.0",
"0.2.0",
"0.10.0",
"0.99.0",
"0.100.0",
"0.0.0.0",
"0.0.0.1",
"0.0.0.10",
"0.0.1.0",
"0.0.01.0",
"1.2.3.4",
"1-2-3-4",
"20150101",
"20151212",
"20151212.0",
"20151212.1",
"2015.1.1",
"2015.02.02",
"2015.12.12",
"1.2.3a",
"1.2.3b",
"R16B",
"R16C",
"1.2.3.2016.1.1",
"0.5a1.dev",
"1.8.B59BrZX",
"0.07b4p1",
"3.99.5final.SP07",
"3.99.5final.SP08",
"0.4.tbb.20100203",
"0.5.20120830CVS.el7",
"1.el7",
"1.el6",
"10.el7",
"01.el7",
"0.17.20140318svn632.el7",
"0.17.20140318svn633.el7",
"1.20140522gitad6fb3e.el7",
"1.20140522hitad6fb3e.el7",
"8.20140605hgacf1c26e3019.el7",
"8.20140605hgacf1c26e3029.el7",
"22.svn457.el7",
"22.svn458.el7",
]
print "\t// tests generated with version_test.py"
print "\ttests := []VerTest{"
for x in versions:
for y in versions:
print "\t\tVerTest{\"" + x + "\", \"" + y + "\",", rpm.labelCompare(("0", "0", x), ("0", "0", y)), "},"
print "\t}"
|
py | 1a35e8452e7d752ba288a3bf9a03b2e7449200de | import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
import plotly.graph_objects as go
from plotly.subplots import make_subplots
import joblib
from drain3.drain import Drain
import numpy as np
collections = joblib.load("results/collections.joblib")
labels = joblib.load("results/labels.joblib")
containers = joblib.load("results/containers.joblib")
cd = joblib.load("results/matrices_dict.joblib")
dd = joblib.load("results/drain_dict.joblib")
def find_max_value(matrix_dict: dict)->int:
max_value = 0
for _, d1 in matrix_dict.items(): #collections
for _, d2 in d1.items(): #labels
for _, d3 in d2.items(): #containers
test_value = np.amax(d3)
if test_value > max_value:
max_value = test_value
return max_value
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
app.layout = html.Div([
html.Label('Collection'),
dcc.Dropdown(
id='collections',
options=[{"label": idx, "value": idx} for idx in collections],
value='1',
multi=True
),
html.Label('Label'),
dcc.Dropdown(
id='labels',
options=[{"label": l, "value": l} for l in labels],
value='healthy',
multi=True
),
html.Label('Container'),
dcc.Dropdown(
id='containers',
options=[{"label": c, "value": c} for c in containers],
value='core.soaesb',
multi=True
),
dcc.Graph(id='heatmap')
], style={'columnCount': 1})
@app.callback(
Output('heatmap', 'figure'),
Input('collections', 'value'),
Input('labels', 'value'),
Input('containers', 'value'))
def update_heatmap(collections_set, labels_set, containers_set):
# rows will always be containers and columns can either be labels or collections
if not (len(collections_set) > 1 & len(labels_set) > 1):
n_cols = len(collections_set) if len(collections_set) > 1 else len(labels_set)
mdict = {i: {} for i in range(len(containers_set))}
cdict = {i: {} for i in range(len(containers_set))}
for i in range(len(containers_set)):
for j in range(n_cols):
if len(collections_set) > 1:
mdict[i][j] = cd[j][labels][containers_set[i]]
else:
if len(labels_set)>1:
mdict[i][j] = cd[int(collections_set)][labels_set[j]][containers_set[i]]
cdict[i] = [cluster.get_template() for cluster in dd[containers_set[i]].clusters]
else:
mdict[i][j] = cd[int(collections_set)][labels_set][containers_set[i]]
n_cols = len(collections_set) if len(collections_set) > 1 else len(labels_set)
fig = make_subplots(
rows = len(containers_set),
cols = n_cols,
start_cell = "top-left"
)
fig.update_yaxes(showticklabels=False)
# fig.update_layout(margin=dict(t=100, r=100, b=100, l=100),
# width=2000, height=1200,
# autosize=False)
fig.update_coloraxes(
cmin = 0,
cmax = find_max_value(cd)
)
for i in range(len(containers_set)):
for j in range(n_cols):
fig.add_trace(
go.Heatmap(z=mdict[i][j].tolist(),
y=cdict[i]),
row=i+1,
col=j+1)
return fig
if __name__ == '__main__':
app.run_server(host='0.0.0.0', debug=True)
|
py | 1a35e8a96abebc9b5b4e82cb7d2a3505b0b8ee02 | # Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests that specifically target tfq_unitary_op."""
import numpy as np
from absl.testing import parameterized
import tensorflow as tf
import cirq
from tensorflow_quantum.python import util
from tensorflow_quantum.core.ops import tfq_unitary_op
class UnitaryTest(tf.test.TestCase, parameterized.TestCase):
"""Tests tfq_calculate_unitary."""
def test_calculate_unitary_inputs(self):
"""Make sure the unitary op fails gracefully on bad inputs."""
unitary_op = tfq_unitary_op.get_unitary_op()
n_qubits = 5
batch_size = 5
symbol_names = ['alpha']
qubits = cirq.GridQubit.rect(1, n_qubits)
circuit_batch, resolver_batch = \
util.random_symbol_circuit_resolver_batch(
qubits, symbol_names, batch_size)
symbol_values_array = np.array(
[[resolver[symbol]
for symbol in symbol_names]
for resolver in resolver_batch])
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
'programs must be rank 1'):
# programs tensor has the wrong shape.
unitary_op(util.convert_to_tensor([circuit_batch]), symbol_names,
symbol_values_array)
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
'symbol_names must be rank 1'):
# symbol_names tensor has the wrong shape.
unitary_op(util.convert_to_tensor(circuit_batch),
np.array([symbol_names]), symbol_values_array)
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
'symbol_values must be rank 2'):
# symbol_values tensor has the wrong shape.
unitary_op(util.convert_to_tensor(circuit_batch), symbol_names,
np.array([symbol_values_array]))
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
'symbol_values must be rank 2'):
# symbol_values tensor has the wrong shape 2.
unitary_op(util.convert_to_tensor(circuit_batch), symbol_names,
symbol_values_array[0])
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
'Unparseable proto'):
# programs tensor has the right type, but invalid value.
unitary_op(['junk'] * batch_size, symbol_names, symbol_values_array)
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
'Could not find symbol in parameter map'):
# symbol_names tensor has the right type, but invalid value.
unitary_op(util.convert_to_tensor(circuit_batch), ['junk'],
symbol_values_array)
with self.assertRaisesRegex(TypeError, 'Cannot convert'):
# programs tensor has the wrong type.
unitary_op([1] * batch_size, symbol_names, symbol_values_array)
with self.assertRaisesRegex(TypeError, 'Cannot convert'):
# symbol_names tensor has the wrong type.
unitary_op(util.convert_to_tensor(circuit_batch), [1],
symbol_values_array)
with self.assertRaisesRegex(tf.errors.UnimplementedError, ''):
# symbol_values tensor has the wrong type.
unitary_op(util.convert_to_tensor(circuit_batch), symbol_names,
[['junk']] * batch_size)
with self.assertRaisesRegex(TypeError, 'missing'):
# too few tensors.
# pylint: disable=no-value-for-parameter
unitary_op(util.convert_to_tensor(circuit_batch), symbol_names)
# pylint: enable=no-value-for-parameter
# TODO (mbbrough): determine if we should allow extra arguments ?
with self.assertRaisesRegex(TypeError, 'positional arguments'):
# pylint: disable=too-many-function-args
unitary_op(util.convert_to_tensor(circuit_batch), symbol_names,
symbol_values_array, [])
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
expected_regex='cirq.Channel'):
# attempting to use noisy circuit.
noisy_circuit = cirq.Circuit(cirq.depolarize(0.3).on_each(*qubits))
unitary_op(
util.convert_to_tensor([noisy_circuit for _ in circuit_batch]),
symbol_names, symbol_values_array)
@parameterized.parameters([
{
'all_n_qubits': [2, 3]
},
{
'all_n_qubits': [1, 5, 8]
},
])
def test_calculate_unitary_output_padding(self, all_n_qubits):
"""If calculate_unitary is asked to calculate matrices given circuits
acting on different numbers of qubits, the op should return a tensor
padded with zeros up to the size of the largest circuit."""
unitary_op = tfq_unitary_op.get_unitary_op()
circuit_batch = []
for n_qubits in all_n_qubits:
qubits = cirq.GridQubit.rect(1, n_qubits)
circuit_batch += util.random_circuit_resolver_batch(qubits, 1)[0]
tfq_results = unitary_op(util.convert_to_tensor(circuit_batch), [],
[[]] * len(circuit_batch))
results = [cirq.unitary(circuit) for circuit in circuit_batch]
self.assertAllClose(tfq_results.to_list(), results, atol=1e-5)
def test_calculate_unitary_empty(self):
"""Ensure calculate_unitary is consistent with empty circuits."""
unitary_op = tfq_unitary_op.get_unitary_op()
empty_u = cirq.unitary(cirq.Circuit())
tfq_empty_u = unitary_op(util.convert_to_tensor([cirq.Circuit()]), [],
[[]])
self.assertAllClose(tfq_empty_u, [empty_u], atol=1e-5) # wrap in batch.
def test_calculate_unitary_no_circuit(self):
"""Ensure calculate_unitary is consistent with no circuits."""
unitary_op = tfq_unitary_op.get_unitary_op()
no_circuit = tf.raw_ops.Empty(shape=(0,), dtype=tf.string)
empty_values = tf.raw_ops.Empty(shape=(0, 0), dtype=tf.float32)
tfq_empty_u = unitary_op(no_circuit, [], empty_values)
expected_shape = tf.TensorShape([0, None, None])
self.assertEqual(tfq_empty_u.shape.as_list(), expected_shape.as_list())
@parameterized.parameters([{
'n_qubits': 6,
'unitary_op': tfq_unitary_op.get_unitary_op(True)
}, {
'n_qubits': 7,
'unitary_op': tfq_unitary_op.get_unitary_op(True)
}, {
'n_qubits': 6,
'unitary_op': tfq_unitary_op.get_unitary_op(False)
}, {
'n_qubits': 7,
'unitary_op': tfq_unitary_op.get_unitary_op(False)
}])
def test_calculate_unitary_consistency_symbol_free(self, n_qubits,
unitary_op):
"""Test calculate_unitary works without symbols."""
unitary_op = tfq_unitary_op.get_unitary_op()
qubits = cirq.GridQubit.rect(1, n_qubits)
circuit_batch, _ = util.random_circuit_resolver_batch(qubits, 25)
tfq_results = unitary_op(util.convert_to_tensor(circuit_batch), [],
[[]] * len(circuit_batch))
results = [cirq.unitary(circuit) for circuit in circuit_batch]
self.assertAllClose(tfq_results, results, atol=1e-5)
@parameterized.parameters([{
'n_qubits': 3,
'unitary_op': tfq_unitary_op.get_unitary_op(True)
}, {
'n_qubits': 4,
'unitary_op': tfq_unitary_op.get_unitary_op(True)
}, {
'n_qubits': 3,
'unitary_op': tfq_unitary_op.get_unitary_op(False)
}, {
'n_qubits': 4,
'unitary_op': tfq_unitary_op.get_unitary_op(False)
}])
def test_calculate_unitary_consistency(self, n_qubits, unitary_op):
"""Test that calculate_unitary works with symbols."""
unitary_op = tfq_unitary_op.get_unitary_op()
qubits = cirq.GridQubit.rect(1, n_qubits)
symbols = ['alpha', 'beta', 'gamma']
circuit_batch, resolver_batch = \
util.random_symbol_circuit_resolver_batch(qubits, symbols, 25)
values = np.empty((len(circuit_batch), len(symbols)))
for i in range(len(circuit_batch)):
for j in range(len(symbols)):
values[i][j] = resolver_batch[i][symbols[j]]
tfq_results = unitary_op(util.convert_to_tensor(circuit_batch), symbols,
values)
results = []
for circuit, resolver in zip(circuit_batch, resolver_batch):
resolved_circuit = cirq.resolve_parameters(circuit, resolver)
results.append(cirq.unitary(resolved_circuit))
self.assertAllClose(tfq_results, results, atol=1e-5)
if __name__ == "__main__":
tf.test.main()
|
py | 1a35e8ae9c7dd037cd36c0967f42d498fff8e0b3 | # Copyright 2018 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Callable, Optional
import numpy as np
import pytest
import cirq
def assert_optimizes(
before: cirq.Circuit,
expected: cirq.Circuit,
optimizer: Optional[Callable[[cirq.Circuit], None]] = None):
if optimizer is None:
optimizer = cirq.MergeSingleQubitGates().optimize_circuit
optimizer(before)
# Ignore differences that would be caught by follow-up optimizations.
followup_optimizations = [
cirq.DropNegligible(),
cirq.DropEmptyMoments()
]
for post in followup_optimizations:
post(before) # type: ignore # error: "object" not callable
post(expected) # type: ignore # error: "object" not callable
try:
assert before == expected
except AssertionError: # coverage: ignore
# coverage: ignore
print("BEFORE")
print(before)
print("EXPECTED")
print(expected)
raise
def test_leaves_singleton():
m = cirq.MergeSingleQubitGates()
q = cirq.NamedQubit('q')
c = cirq.Circuit([cirq.Moment([cirq.X(q)])])
m.optimization_at(c, 0, c.operation_at(q, 0))
cirq.testing.assert_same_circuits(
c,
cirq.Circuit([cirq.Moment([cirq.X(q)])]))
def test_not_both():
with pytest.raises(ValueError):
_ = cirq.MergeSingleQubitGates(
synthesizer=lambda *args: None,
rewriter=lambda *args: None)
def test_combines_sequence():
m = cirq.MergeSingleQubitGates()
q = cirq.NamedQubit('q')
c = cirq.Circuit.from_ops(
cirq.X(q)**0.5,
cirq.Z(q)**0.5,
cirq.X(q)**-0.5)
opt_summary = m.optimization_at(c, 0, c.operation_at(q, 0))
assert opt_summary.clear_span == 3
assert list(opt_summary.clear_qubits) == [q]
assert len(opt_summary.new_operations) == 1
assert isinstance(opt_summary.new_operations[0].gate,
cirq.SingleQubitMatrixGate)
cirq.testing.assert_allclose_up_to_global_phase(
cirq.unitary(opt_summary.new_operations[0]),
cirq.unitary(cirq.Y**0.5),
atol=1e-7)
def test_removes_identity_sequence():
q = cirq.NamedQubit('q')
assert_optimizes(
before=cirq.Circuit([
cirq.Moment([cirq.Z(q)]),
cirq.Moment([cirq.H(q)]),
cirq.Moment([cirq.X(q)]),
cirq.Moment([cirq.H(q)]),
]),
expected=cirq.Circuit())
def test_stopped_at_2qubit():
m = cirq.MergeSingleQubitGates()
q = cirq.NamedQubit('q')
q2 = cirq.NamedQubit('q2')
c = cirq.Circuit([
cirq.Moment([cirq.Z(q)]),
cirq.Moment([cirq.H(q)]),
cirq.Moment([cirq.X(q)]),
cirq.Moment([cirq.H(q)]),
cirq.Moment([cirq.CZ(q, q2)]),
cirq.Moment([cirq.H(q)]),
])
opt_summary = m.optimization_at(c, 0, c.operation_at(q, 0))
assert opt_summary.clear_span == 4
assert list(opt_summary.clear_qubits) == [q]
if len(opt_summary.new_operations) != 0:
assert len(opt_summary.new_operations) == 1
assert isinstance(opt_summary.new_operations[0].gate,
cirq.SingleQubitMatrixGate)
cirq.testing.assert_allclose_up_to_global_phase(
cirq.unitary(opt_summary.new_operations[0]),
np.eye(2),
atol=1e-7)
def test_ignores_2qubit_target():
m = cirq.MergeSingleQubitGates()
q = cirq.NamedQubit('q')
q2 = cirq.NamedQubit('q2')
c = cirq.Circuit([
cirq.Moment([cirq.CZ(q, q2)]),
])
m.optimization_at(c, 0, c.operation_at(q, 0))
cirq.testing.assert_same_circuits(
c,
cirq.Circuit([cirq.Moment([cirq.CZ(q, q2)])]))
def test_ignore_unsupported_gate():
class UnsupportedDummy(cirq.Gate):
pass
q0 = cirq.LineQubit(0)
circuit = cirq.Circuit.from_ops(
UnsupportedDummy()(q0),
)
c_orig = cirq.Circuit(circuit)
cirq.MergeSingleQubitGates().optimize_circuit(circuit)
assert circuit == c_orig
def test_rewrite():
q0 = cirq.LineQubit(0)
q1 = cirq.LineQubit(1)
circuit = cirq.Circuit.from_ops(
cirq.X(q0),
cirq.X(q1),
cirq.Y(q0),
cirq.CZ(q0, q1),
cirq.Y(q1),
)
cirq.MergeSingleQubitGates(
rewriter=lambda ops: cirq.H(ops[0].qubits[0])
).optimize_circuit(circuit)
cirq.DropEmptyMoments().optimize_circuit(circuit)
cirq.testing.assert_same_circuits(circuit, cirq.Circuit.from_ops(
cirq.H(q0),
cirq.H(q1),
cirq.CZ(q0, q1),
cirq.H(q1),
))
def test_merge_single_qubit_gates_into_phased_x_z():
a, b = cirq.LineQubit.range(2)
assert_optimizes(
before=cirq.Circuit.from_ops(
cirq.X(a),
cirq.Y(b)**0.5,
cirq.CZ(a, b),
cirq.H(a),
cirq.Z(a),
),
expected=cirq.Circuit.from_ops(
cirq.X(a),
cirq.Y(b)**0.5,
cirq.CZ(a, b),
cirq.Y(a)**-0.5,
),
optimizer=cirq.merge_single_qubit_gates_into_phased_x_z)
|
py | 1a35eb113ce8f06864df6829eb3cc2ef1d8c0c28 | messages = ["work hard and maybe play hard"]
|
py | 1a35eb8ee469459485eec289375f7969e4f72757 | from __future__ import print_function, division
import itertools
try:
import pathlib
except ImportError:
import pathlib2 as pathlib
import json
import os
def composite_channel(target, image, color, range_min, range_max):
''' Render _image_ in pseudocolor and composite into _target_
Args:
target: Numpy float32 array containing composition target image
image: Numpy uint16 array of image to render and composite
color: Color as r, g, b float array, 0-1
range_min: Threshhold range minimum, 0-65535
range_max: Threshhold range maximum, 0-65535
'''
f_image = (image.astype('float32') - range_min) / (range_max - range_min)
f_image = f_image.clip(0, 1, out=f_image)
for i, component in enumerate(color):
target[:, :, i] += f_image * component
def _calculate_total_tiles(opener, tile_size, num_levels):
tiles = 0
for level in range(num_levels):
(nx, ny) = opener.get_level_tiles(level, tile_size)
tiles += nx * ny
return tiles
def _check_duplicate(group_path, settings, old_rows):
old_settings = next((row for row in old_rows if row['Group Path'] == group_path), {})
return settings == old_settings
def render_color_tiles(opener, output_dir, tile_size, config_rows, logger, progress_callback=None, allow_cache=True):
EXT = 'jpg'
for settings in config_rows:
settings['Source'] = opener.path
print('Processing:', str(opener.path))
output_path = pathlib.Path(output_dir)
if not output_path.exists():
output_path.mkdir(parents=True)
config_path = output_path / 'config.json'
old_rows = []
if allow_cache:
if os.path.exists(config_path):
with open(config_path, 'r') as f:
try:
old_rows = json.load(f)
except json.decoder.JSONDecodeError as err:
print(err)
with open(config_path, 'w') as f:
json.dump(config_rows, f)
num_levels = opener.get_shape()[1]
total_tiles = _calculate_total_tiles(opener, tile_size, num_levels)
progress = 0
if num_levels < 2:
logger.warning(f'Number of levels {num_levels} < 2')
group_dirs = {settings['Group Path']: settings for settings in config_rows}
is_up_to_date = {g: False for g, s in group_dirs.items()}
if allow_cache:
is_up_to_date = {g: _check_duplicate(g, s, old_rows) for g, s in group_dirs.items()}
for level in range(num_levels):
(nx, ny) = opener.get_level_tiles(level, tile_size)
print(' level {} ({} x {})'.format(level, ny, nx))
for ty, tx in itertools.product(range(0, ny), range(0, nx)):
filename = '{}_{}_{}.{}'.format(level, tx, ty, EXT)
for settings in config_rows:
group_dir = settings['Group Path']
if not (output_path / group_dir).exists():
(output_path / group_dir).mkdir(parents=True)
output_file = str(output_path / group_dir / filename)
# Only save file if change in config rows
if not (os.path.exists(output_file) and is_up_to_date[group_dir]):
try:
opener.save_tile(output_file, settings, tile_size, level, tx, ty)
except AttributeError as e:
logger.error(f'{level} ty {ty} tx {tx}: {e}')
else:
logger.warning(f'Not saving tile level {level} ty {ty} tx {tx}')
logger.warning(f'Path {output_file} exists with same rendering settings')
progress += 1
if progress_callback is not None:
progress_callback(progress, len(config_rows)*total_tiles)
|
py | 1a35ec6abebde23279c601c42e2977063819f0f9 | import errno
import os
import random
import re
import shutil
import subprocess
import sys
import textwrap
import uuid
from datetime import date
from distutils.core import Command
import boto3
import pkg_resources
import requests
from botocore.handlers import disable_signing
from cookiecutter.main import cookiecutter
from setuptools.command import easy_install
def download_url(url, download_dir):
filename = os.path.join(download_dir, os.path.basename(url))
if not os.path.exists(filename):
with open(filename, 'wb') as f:
response = requests.get(url, stream=True)
total = response.headers.get('content-length')
if total is None:
f.write(response.content)
else:
downloaded = 0
total = int(total)
for data in response.iter_content(chunk_size=max(int(total / 1000), 1024 * 1024)):
downloaded += len(data)
f.write(data)
done = int(50 * downloaded / total)
print('\r{}{} {}%'.format('█' * done, '.' * (50-done), 2*done), end='', flush=True)
print()
else:
print('Already downloaded')
return filename
class app(Command):
description = "Create a native application to wrap this project"
user_options = [
('dir=', 'd',
"Directory to put the project in"),
('formal-name=', None,
"Formal name for the project"),
('class-name=', None,
"Entry class name for the project"),
('organization-name=', None,
"Name of the organization managing the project"),
('template=', None,
"Template (or template repository URL) to use."),
('bundle', None,
'Bundle identifier for the author organization - usually a reversed domain (e.g., "org.python")'),
('icon=', None,
"Name of the icon file."),
('guid=', None,
"GUID identifying the app."),
('secret-key=', None,
"Secret key for the app."),
('splash=', None,
"Name of the splash screen file."),
('app-requires', None,
'List of platform-specific requirements for this app.'),
('support-pkg=', None,
'URL for the support package to use'),
('download-dir=', None,
"Directory where the project support packages will be cached"),
('build', 'b',
"Build the project after generating"),
('start', 's',
"Start the application after building"),
('os-version=', None,
"Set the device OS version. (e.g., iOS 10.2)"),
('device-name=', None,
"Set the device to run. (e.g., iPhone 7 Plus)"),
('background-image=', None,
"Name of the background image file (macOS .dmg only)"),
('sanitize-version', None,
"Forces installer version to only contain numbers."),
('clean', None,
"Delete any artifacts from previous run"),
]
def initialize_options(self):
self.dir = None
self.formal_name = None
self.class_name = None
self.organization_name = None
self.template = None
self.bundle = None
self.icon = None
self.splash = None
self.app_requires = None
self.support_pkg = None
self.support_dir = None
self.download_dir = None
self.document_types = None
self.version_code = None
self.guid = None
self.secret_key = None
self.build = False
self.start = False
self.os_version = None
self.device_name = None
self.sanitize_version = None
self.clean = None
self.background_image = None
def finalize_options(self):
if self.formal_name is None:
self.formal_name = self.distribution.get_name().title()
if self.class_name is None:
CLASS_NAME_CHARS = re.compile('[^a-zA-Z]')
self.class_name = CLASS_NAME_CHARS.sub('', self.formal_name.title())
if self.organization_name is None:
self.organization_name = self.distribution.get_author().title()
if self.bundle is None:
if self.distribution.get_author_email():
domain = self.distribution.get_author_email().split('@')[-1]
else:
domain = 'org.python'
self.bundle = '.'.join(reversed(domain.split('.')))
if self.download_dir is None:
self.download_dir = os.path.expanduser(os.path.join('~', '.briefcase'))
if self.document_types is None:
self.document_types = {}
# The Version Code is a pure-string, numerically sortable
# version number.
match = re.match('(?P<major>\d+)(\.(?P<minor>\d+)(\.(?P<revision>\d+))?)?', self.distribution.get_version())
self._numeric_version_parts = (
int(match.groups()[0]) if match.groups()[0] else 0,
int(match.groups()[2]) if match.groups()[2] else 0,
int(match.groups()[4]) if match.groups()[4] else 0,
)
self.version_code = '%02d%02d%02d' % self._numeric_version_parts
self.version_numeric = '%d.%d.%d' % self._numeric_version_parts
# The app's GUID (if not manually specified) is a namespace UUID
# based on the URL for the app.
if self.guid is None:
self.guid = uuid.uuid3(uuid.NAMESPACE_URL, self.distribution.get_url())
# The secret key is 40 characters of entropy
if self.secret_key is None:
self.secret_key = ''.join(random.choice("abcdefghijklmnopqrstuvwxyz0123456789") for i in range(40))
# Ensure the download directory exists
try:
os.makedirs(self.download_dir)
except OSError as e:
if e.errno != errno.EEXIST:
raise
if self.start:
self.build = True
def find_support_pkg(self):
# Get an S3 client, and disable signing (so we don't need credentials)
S3_BUCKET = 'pybee-briefcase-support'
S3_REGION = 'us-west-2'
S3_URL = 'https://{}.s3-{}.amazonaws.com/'.format(S3_BUCKET, S3_REGION)
s3 = boto3.client('s3', region_name=S3_REGION)
s3.meta.events.register('choose-signer.s3.*', disable_signing)
top_build_number = 0
top_build = None
paginator = s3.get_paginator('list_objects')
for page in paginator.paginate(
Bucket=S3_BUCKET,
Prefix='{}/{}.{}/{}/'.format(
self.support_project,
sys.version_info.major,
sys.version_info.minor,
self.platform
)):
for item in page.get('Contents', []):
build_number = int(
item['Key'].rstrip('.tar.gz').split('.')[-1].lstrip('b'))
if build_number > top_build_number:
top_build_number = build_number
top_build = item['Key']
if top_build:
return S3_URL + top_build
else:
return None
@property
def app_dir(self):
return os.path.join(os.getcwd(), self.resource_dir, 'app')
@property
def app_packages_dir(self):
return os.path.join(os.getcwd(), self.resource_dir, 'app_packages')
@property
def version(self):
return self.distribution.get_version()
@property
def _python_version(self):
return '{}.{}'.format(sys.version_info.major, sys.version_info.minor)
def generate_app_template(self, extra_context=None):
print(" * Writing application template...")
if self.sanitize_version and self.version_numeric != self.version:
print(" ! Version currently contains characters: {}".format(self.version))
print(" ! Installer version sanitized to: {}".format(self.version_numeric))
extra_context = extra_context or {}
extra_context['version'] = self.version_numeric
if self.template is None:
template_path = os.path.expanduser('~/.cookiecutters/Python-{}-template'.format(self.platform))
if os.path.exists(template_path):
self.template = template_path
self._git_fetch(template_path)
self._git_checkout(template_path)
if not self._has_cookiecutter_json(template_path):
print("Directory {} isn't a valid template (no cookiecutter.json found).".format(template_path))
sys.exit(1)
self._git_pull(template_path)
else:
self.template = 'https://github.com/pybee/Python-{}-template.git'.format(self.platform)
print("Project template: {}".format(self.template))
_extra_context = {
'app_name': self.distribution.get_name(),
'formal_name': self.formal_name,
'class_name': self.class_name,
'organization_name': self.organization_name,
'author': self.distribution.get_author(),
'description': self.distribution.get_description(),
'dir_name': self.dir,
'bundle': self.bundle,
'year': date.today().strftime('%Y'),
'month': date.today().strftime('%B'),
'version': self.version,
'version_code': self.version_code,
'guid': self.guid,
'secret_key': self.secret_key,
'document_types': self.document_types,
}
if extra_context:
_extra_context.update(extra_context)
cookiecutter(
self.template,
no_input=True,
checkout=self._python_version,
extra_context=_extra_context
)
def _has_cookiecutter_json(self, template_path):
cookiecutter_json_path = os.path.join(template_path, 'cookiecutter.json')
return os.path.exists(cookiecutter_json_path)
def _get_all_branches(self, path):
branches = subprocess.check_output(["git", "ls-remote", "--heads"], stderr=subprocess.STDOUT, cwd=path)
branches = branches.decode('utf-8').splitlines()
branches = branches[1:]
all_branches = [name.rsplit("/", 1)[1] for name in branches]
return all_branches
def _git_fetch(self, path):
subprocess.Popen(["git", "fetch"], cwd=path).wait()
def _git_checkout(self, path):
try:
subprocess.check_output(["git", "checkout", self._python_version], stderr=subprocess.STDOUT, cwd=path)
except subprocess.CalledProcessError:
print("There is no branch for Python version %r (existing branches: " %
self._python_version, ", ".join(self._get_all_branches(path)) + ").")
def _git_pull(self, path):
template_name = path.split('/')[-1]
try:
subprocess.check_output(["git", "pull"], stderr=subprocess.STDOUT, cwd=path)
print('Template {} succesfully updated.'.format(template_name))
except subprocess.CalledProcessError as pull_error:
error_message = pull_error.output.decode('utf-8')
if 'resolve host' in error_message:
print('Unable to update template {}, using unpulled.'.format(template_name))
print(error_message)
def install_app_requirements(self):
print(" * Installing requirements...")
if self.distribution.install_requires:
subprocess.Popen(
[
"pip", "install",
"--upgrade",
"--force-reinstall",
'--target={}'.format(self.app_packages_dir)
] + self.distribution.install_requires,
).wait()
else:
print("No requirements.")
def install_platform_requirements(self):
print(" * Installing platform requirements...")
if self.app_requires:
subprocess.Popen(
[
"pip", "install",
"--upgrade",
"--force-reinstall",
'--target={}'.format(self.app_packages_dir)
] + self.app_requires,
).wait()
else:
print("No platform requirements.")
def install_code(self):
print(" * Installing project code...")
subprocess.Popen(
[
"pip", "install",
"--upgrade",
"--force-reinstall",
"--no-dependencies",
'--target={}'.format(self.app_dir),
'.'
],
).wait()
@property
def launcher_header(self):
"""
Optionally override the shebang line for launcher scripts
This should return a suitable relative path which will find the
bundled python for the relevant platform if the setuptools default
is not suitable.
"""
return None
@property
def launcher_script_location(self):
return self.app_dir
def install_launch_scripts(self):
exe_names = []
if self.distribution.entry_points:
print(" * Creating launchers...")
subprocess.Popen(
[
"pip", "install",
"--upgrade",
"--force-reinstall",
'--target={}'.format(self.app_dir),
'setuptools'
],
).wait()
rel_sesources = os.path.relpath(self.resource_dir, self.launcher_script_location)
rel_sesources_split = ', '.join(["'%s'" % f for f in rel_sesources.split(os.sep)])
easy_install.ScriptWriter.template = textwrap.dedent("""
# EASY-INSTALL-ENTRY-SCRIPT: %(spec)r,%(group)r,%(name)r
__requires__ = %(spec)r
import os
import re
import sys
import site
from os.path import dirname, abspath, join
resources = abspath(join(dirname(__file__), {}))
site.addsitedir(join(resources, 'app'))
site.addsitedir(join(resources, 'app_packages'))
os.environ['PATH'] += os.pathsep + resources
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point(%(spec)r, %(group)r, %(name)r)()
)
""".format(rel_sesources_split)).lstrip()
ei = easy_install.easy_install(self.distribution)
for dist in pkg_resources.find_distributions(self.app_dir):
# Note: this is a different Distribution class to self.distribution
ei.args = True # Needs something to run finalize_options
ei.finalize_options()
ei.script_dir = self.launcher_script_location
for args in easy_install.ScriptWriter.best().get_args(dist, header=self.launcher_header):
ei.write_script(*args)
# Grab names of launchers
for entry_points in dist.get_entry_map().values():
exe_names.extend(entry_points.keys())
if self.formal_name not in exe_names:
print(" ! No entry_point matching formal_name, \n"
" template builtin script will be main launcher.")
return exe_names
def install_resources(self):
if self.icon:
print(" * Adding icons...")
self.install_icon()
else:
print(" * No icons defined - using default...")
if self.splash:
print(" * Adding splash screens...")
self.install_splash()
else:
print(" * No splash screen defined...")
def install_support_package(self):
if self.support_pkg is None:
print(" * Determining best support package...")
self.support_pkg = self.find_support_pkg()
if self.support_dir is None:
self.support_dir = self.resource_dir
if self.support_pkg:
print(" * Installing support package...")
print("Support package:", self.support_pkg)
# Download and unpack the support package.
filename = download_url(url=self.support_pkg, download_dir=self.download_dir)
destination = os.path.join(os.getcwd(), self.support_dir)
shutil.unpack_archive(filename, extract_dir=destination)
else:
print()
print("No pre-built support package could be found for Python %s.%s." %
(sys.version_info.major, sys.version_info.minor))
print("You will need to compile your own. You may want to start with")
print("the code from https://github.com/pybee/%s and" % self.support_project)
print("then specify the compiled tarball with:")
print()
print(" python setup.py {} --support-pkg=<path to tarball>".format(self.platform.lower()))
print()
sys.exit(1)
def install_extras(self):
pass
def build_app(self):
pass
def run_app(self):
pass
def post_install(self):
print()
print("Installation complete.")
def post_build(self):
print()
print("Build complete.")
def start_app(self):
print("Don't know how to start {} applications.".format(self.platform))
def post_start(self):
print()
print("App started.")
def run(self):
full_generation = True
if os.path.exists(self.dir):
print()
if self.clean:
print(" * Deleting existing content...")
if os.path.isdir(self.dir):
shutil.rmtree(self.dir)
else:
os.remove(self.dir)
else:
print(" * Updating user code...")
full_generation = False
if full_generation:
self.generate_app_template()
self.install_support_package()
self.install_app_requirements()
self.install_platform_requirements()
self.install_code()
self.install_launch_scripts()
self.install_resources()
self.install_extras()
self.post_install()
if self.build:
success = self.build_app()
if success is None or success is True:
self.post_build()
if self.start:
self.start_app()
self.post_start()
|
py | 1a35ed35bd214d9f7577d1f70cff12faa471617b | import RPi.GPIO as GPIO
import time
GPIO.setwarnings(False)
GPIO.setmode (GPIO.BOARD)
GPIO.setup (12,GPIO.OUT)
p = GPIO.PWM(12, 50)
duty = 0
p.start(duty)
for change_duty in range(0,101,10):
p.ChangeDutyCycle(change_duty)
time.sleep(0.1)
for change_duty in range(100, -1, -10):
p.ChangeDutyCycle(change_duty)
time.sleep(0.1)
p.stop()
|
py | 1a35ed7c1d6bc1e6e7cd566aa36bd276ff3ec01d | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdknas.endpoint import endpoint_data
class CreateAccessRuleRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'NAS', '2017-06-26', 'CreateAccessRule','nas')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_RWAccessType(self):
return self.get_query_params().get('RWAccessType')
def set_RWAccessType(self,RWAccessType):
self.add_query_param('RWAccessType',RWAccessType)
def get_SourceCidrIp(self):
return self.get_query_params().get('SourceCidrIp')
def set_SourceCidrIp(self,SourceCidrIp):
self.add_query_param('SourceCidrIp',SourceCidrIp)
def get_UserAccessType(self):
return self.get_query_params().get('UserAccessType')
def set_UserAccessType(self,UserAccessType):
self.add_query_param('UserAccessType',UserAccessType)
def get_Priority(self):
return self.get_query_params().get('Priority')
def set_Priority(self,Priority):
self.add_query_param('Priority',Priority)
def get_AccessGroupName(self):
return self.get_query_params().get('AccessGroupName')
def set_AccessGroupName(self,AccessGroupName):
self.add_query_param('AccessGroupName',AccessGroupName)
def get_FileSystemType(self):
return self.get_query_params().get('FileSystemType')
def set_FileSystemType(self,FileSystemType):
self.add_query_param('FileSystemType',FileSystemType) |
py | 1a35ede9c4b851009acb256e76d50c5244f2db08 | # exported from PySB model 'model'
from pysb import Model, Monomer, Parameter, Expression, Compartment, Rule, Observable, Initial, MatchOnce, Annotation, ANY, WILD
Model()
Monomer('Ligand', ['Receptor'])
Monomer('ParpU', ['C3A'])
Monomer('C8A', ['BidU', 'C3pro'])
Monomer('SmacM', ['BaxA'])
Monomer('BaxM', ['BidM', 'BaxA'])
Monomer('Apop', ['C3pro', 'Xiap'])
Monomer('Fadd', ['Receptor', 'C8pro'])
Monomer('SmacC', ['Xiap'])
Monomer('ParpC')
Monomer('Xiap', ['SmacC', 'Apop', 'C3A'])
Monomer('C9')
Monomer('C3ub')
Monomer('C8pro', ['Fadd', 'C6A'])
Monomer('Bcl2', ['BidM', 'BaxA'])
Monomer('C3pro', ['Apop', 'C8A'])
Monomer('CytoCM', ['BaxA'])
Monomer('CytoCC')
Monomer('BaxA', ['BaxM', 'Bcl2', 'BaxA_1', 'BaxA_2', 'SmacM', 'CytoCM'])
Monomer('ApafI')
Monomer('BidU', ['C8A'])
Monomer('BidT')
Monomer('C3A', ['Xiap', 'ParpU', 'C6pro'])
Monomer('ApafA')
Monomer('BidM', ['BaxM', 'Bcl2'])
Monomer('Receptor', ['Ligand', 'Fadd'])
Monomer('C6A', ['C8pro'])
Monomer('C6pro', ['C3A'])
Parameter('bind_0_Ligand_binder_Receptor_binder_target_2kf', 1.0)
Parameter('bind_0_Ligand_binder_Receptor_binder_target_1kr', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_2kf', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_1kr', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr', 1.0)
Parameter('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc', 1.0)
Parameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf', 1.0)
Parameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr', 1.0)
Parameter('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc', 1.0)
Parameter('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_2kf', 1.0)
Parameter('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_1kr', 1.0)
Parameter('inhibition_0_SmacC_inhibitor_Xiap_inh_target_2kf', 1.0)
Parameter('inhibition_0_SmacC_inhibitor_Xiap_inh_target_1kr', 1.0)
Parameter('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_2kf', 1.0)
Parameter('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_1kr', 1.0)
Parameter('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_2kf', 1.0)
Parameter('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_1kr', 1.0)
Parameter('catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product_1kc', 1.0)
Parameter('inhibition_0_Xiap_inhibitor_Apop_inh_target_2kf', 1.0)
Parameter('inhibition_0_Xiap_inhibitor_Apop_inh_target_1kr', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr', 1.0)
Parameter('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr', 1.0)
Parameter('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc', 1.0)
Parameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kf', 1.0)
Parameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kr', 1.0)
Parameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf', 1.0)
Parameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr', 1.0)
Parameter('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc', 1.0)
Parameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf', 1.0)
Parameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr', 1.0)
Parameter('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc', 1.0)
Parameter('inhibition_0_Bcl2_inhibitor_BidM_inh_target_2kf', 1.0)
Parameter('inhibition_0_Bcl2_inhibitor_BidM_inh_target_1kr', 1.0)
Parameter('inhibition_0_Bcl2_inhibitor_BaxA_inh_target_2kf', 1.0)
Parameter('inhibition_0_Bcl2_inhibitor_BaxA_inh_target_1kr', 1.0)
Parameter('pore_formation_0_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_0_BaxA_pore_1kr', 1.0)
Parameter('pore_formation_1_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_1_BaxA_pore_1kr', 1.0)
Parameter('pore_formation_2_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_2_BaxA_pore_1kr', 1.0)
Parameter('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_2kf', 1.0)
Parameter('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kr', 1.0)
Parameter('transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kc', 1.0)
Parameter('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_2kf', 1.0)
Parameter('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kr', 1.0)
Parameter('transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kc', 1.0)
Parameter('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_2kf', 1.0)
Parameter('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_1kr', 1.0)
Parameter('catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product_1kc', 1.0)
Parameter('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_2kf', 1.0)
Parameter('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_1kr', 1.0)
Parameter('catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product_1kc', 1.0)
Parameter('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_2kf', 1.0)
Parameter('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_1kr', 1.0)
Parameter('catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product_1kc', 1.0)
Parameter('Ligand_0', 1000.0)
Parameter('ParpU_0', 1000000.0)
Parameter('C8A_0', 0.0)
Parameter('SmacM_0', 100000.0)
Parameter('BaxM_0', 40000.0)
Parameter('Apop_0', 0.0)
Parameter('Fadd_0', 130000.0)
Parameter('SmacC_0', 0.0)
Parameter('ParpC_0', 0.0)
Parameter('Xiap_0', 176750.0)
Parameter('C9_0', 100000.0)
Parameter('C3ub_0', 0.0)
Parameter('C8pro_0', 130000.0)
Parameter('Bcl2_0', 328000.0)
Parameter('C3pro_0', 21000.0)
Parameter('CytoCM_0', 500000.0)
Parameter('CytoCC_0', 0.0)
Parameter('BaxA_0', 0.0)
Parameter('ApafI_0', 100000.0)
Parameter('BidU_0', 171000.0)
Parameter('BidT_0', 0.0)
Parameter('C3A_0', 0.0)
Parameter('ApafA_0', 0.0)
Parameter('BidM_0', 0.0)
Parameter('Receptor_0', 100.0)
Parameter('C6A_0', 0.0)
Parameter('C6pro_0', 100.0)
Observable('Ligand_obs', Ligand())
Observable('ParpU_obs', ParpU())
Observable('C8A_obs', C8A())
Observable('SmacM_obs', SmacM())
Observable('BaxM_obs', BaxM())
Observable('Apop_obs', Apop())
Observable('Fadd_obs', Fadd())
Observable('SmacC_obs', SmacC())
Observable('ParpC_obs', ParpC())
Observable('Xiap_obs', Xiap())
Observable('C9_obs', C9())
Observable('C3ub_obs', C3ub())
Observable('C8pro_obs', C8pro())
Observable('Bcl2_obs', Bcl2())
Observable('C3pro_obs', C3pro())
Observable('CytoCM_obs', CytoCM())
Observable('CytoCC_obs', CytoCC())
Observable('BaxA_obs', BaxA())
Observable('ApafI_obs', ApafI())
Observable('BidU_obs', BidU())
Observable('BidT_obs', BidT())
Observable('C3A_obs', C3A())
Observable('ApafA_obs', ApafA())
Observable('BidM_obs', BidM())
Observable('Receptor_obs', Receptor())
Observable('C6A_obs', C6A())
Observable('C6pro_obs', C6pro())
Rule('bind_0_Ligand_binder_Receptor_binder_target', Ligand(Receptor=None) + Receptor(Ligand=None, Fadd=None) | Ligand(Receptor=1) % Receptor(Ligand=1, Fadd=None), bind_0_Ligand_binder_Receptor_binder_target_2kf, bind_0_Ligand_binder_Receptor_binder_target_1kr)
Rule('bind_0_Receptor_binder_Fadd_binder_target', Receptor(Ligand=ANY, Fadd=None) + Fadd(Receptor=None, C8pro=None) | Receptor(Ligand=ANY, Fadd=1) % Fadd(Receptor=1, C8pro=None), bind_0_Receptor_binder_Fadd_binder_target_2kf, bind_0_Receptor_binder_Fadd_binder_target_1kr)
Rule('substrate_binding_0_Fadd_catalyzer_C8pro_substrate', Fadd(Receptor=ANY, C8pro=None) + C8pro(Fadd=None, C6A=None) | Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1, C6A=None), substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf, substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr)
Rule('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product', Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1, C6A=None) >> Fadd(Receptor=ANY, C8pro=None) + C8A(BidU=None, C3pro=None), catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc)
Rule('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=None, C3pro=None) + BidU(C8A=None) | C8A(BidU=1, C3pro=None) % BidU(C8A=1), catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf, catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr)
Rule('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=1, C3pro=None) % BidU(C8A=1) >> C8A(BidU=None, C3pro=None) + BidT(), catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc)
Rule('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex', ApafI() + CytoCC() | ApafA(), conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_2kf, conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_1kr)
Rule('inhibition_0_SmacC_inhibitor_Xiap_inh_target', SmacC(Xiap=None) + Xiap(SmacC=None, Apop=None, C3A=None) | SmacC(Xiap=1) % Xiap(SmacC=1, Apop=None, C3A=None), inhibition_0_SmacC_inhibitor_Xiap_inh_target_2kf, inhibition_0_SmacC_inhibitor_Xiap_inh_target_1kr)
Rule('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex', ApafA() + C9() | Apop(C3pro=None, Xiap=None), conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_2kf, conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_1kr)
Rule('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product', Apop(C3pro=None, Xiap=None) + C3pro(Apop=None, C8A=None) | Apop(C3pro=1, Xiap=None) % C3pro(Apop=1, C8A=None), catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_2kf, catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_1kr)
Rule('catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product', Apop(C3pro=1, Xiap=None) % C3pro(Apop=1, C8A=None) >> Apop(C3pro=None, Xiap=None) + C3A(Xiap=None, ParpU=None, C6pro=None), catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product_1kc)
Rule('inhibition_0_Xiap_inhibitor_Apop_inh_target', Xiap(SmacC=None, Apop=None, C3A=None) + Apop(C3pro=None, Xiap=None) | Xiap(SmacC=None, Apop=1, C3A=None) % Apop(C3pro=None, Xiap=1), inhibition_0_Xiap_inhibitor_Apop_inh_target_2kf, inhibition_0_Xiap_inhibitor_Apop_inh_target_1kr)
Rule('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(SmacC=None, Apop=None, C3A=None) + C3A(Xiap=None, ParpU=None, C6pro=None) | Xiap(SmacC=None, Apop=None, C3A=1) % C3A(Xiap=1, ParpU=None, C6pro=None), catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf, catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr)
Rule('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(SmacC=None, Apop=None, C3A=1) % C3A(Xiap=1, ParpU=None, C6pro=None) >> Xiap(SmacC=None, Apop=None, C3A=None) + C3ub(), catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc)
Rule('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=None, C6pro=None) + ParpU(C3A=None) | C3A(Xiap=None, ParpU=1, C6pro=None) % ParpU(C3A=1), catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf, catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr)
Rule('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=1, C6pro=None) % ParpU(C3A=1) >> C3A(Xiap=None, ParpU=None, C6pro=None) + ParpC(), catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc)
Rule('equilibration_0_BidT_equil_a_BidM_equil_b', BidT() | BidM(BaxM=None, Bcl2=None), equilibration_0_BidT_equil_a_BidM_equil_b_1kf, equilibration_0_BidT_equil_a_BidM_equil_b_1kr)
Rule('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=None, Bcl2=None) + BaxM(BidM=None, BaxA=None) | BidM(BaxM=1, Bcl2=None) % BaxM(BidM=1, BaxA=None), catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf, catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr)
Rule('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=1, Bcl2=None) % BaxM(BidM=1, BaxA=None) >> BidM(BaxM=None, Bcl2=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc)
Rule('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxM(BidM=None, BaxA=None) | BaxA(BaxM=1, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) % BaxM(BidM=None, BaxA=1), self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf, self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr)
Rule('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=1, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) % BaxM(BidM=None, BaxA=1) >> BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc)
Rule('inhibition_0_Bcl2_inhibitor_BidM_inh_target', Bcl2(BidM=None, BaxA=None) + BidM(BaxM=None, Bcl2=None) | Bcl2(BidM=1, BaxA=None) % BidM(BaxM=None, Bcl2=1), inhibition_0_Bcl2_inhibitor_BidM_inh_target_2kf, inhibition_0_Bcl2_inhibitor_BidM_inh_target_1kr)
Rule('inhibition_0_Bcl2_inhibitor_BaxA_inh_target', Bcl2(BidM=None, BaxA=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) | Bcl2(BidM=None, BaxA=1) % BaxA(BaxM=None, Bcl2=1, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), inhibition_0_Bcl2_inhibitor_BaxA_inh_target_2kf, inhibition_0_Bcl2_inhibitor_BaxA_inh_target_1kr)
Rule('pore_formation_0_BaxA_pore', BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=None, SmacM=None, CytoCM=None), pore_formation_0_BaxA_pore_2kf, pore_formation_0_BaxA_pore_1kr)
Rule('pore_formation_1_BaxA_pore', BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=None, SmacM=None, CytoCM=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None), pore_formation_1_BaxA_pore_2kf, pore_formation_1_BaxA_pore_1kr)
Rule('pore_formation_2_BaxA_pore', BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None), pore_formation_2_BaxA_pore_2kf, pore_formation_2_BaxA_pore_1kr)
Rule('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C', BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + SmacM(BaxA=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=5, CytoCM=None) % SmacM(BaxA=5), transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_2kf, transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kr)
Rule('transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C', BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=5, CytoCM=None) % SmacM(BaxA=5) >> BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + SmacC(Xiap=None), transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kc)
Rule('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C', BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + CytoCM(BaxA=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=5) % CytoCM(BaxA=5), transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_2kf, transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kr)
Rule('transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C', BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=5) % CytoCM(BaxA=5) >> BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + CytoCC(), transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kc)
Rule('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product', C8A(BidU=None, C3pro=None) + C3pro(Apop=None, C8A=None) | C8A(BidU=None, C3pro=1) % C3pro(Apop=None, C8A=1), catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_2kf, catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_1kr)
Rule('catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product', C8A(BidU=None, C3pro=1) % C3pro(Apop=None, C8A=1) >> C8A(BidU=None, C3pro=None) + C3A(Xiap=None, ParpU=None, C6pro=None), catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product_1kc)
Rule('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product', C3A(Xiap=None, ParpU=None, C6pro=None) + C6pro(C3A=None) | C3A(Xiap=None, ParpU=None, C6pro=1) % C6pro(C3A=1), catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_2kf, catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_1kr)
Rule('catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product', C3A(Xiap=None, ParpU=None, C6pro=1) % C6pro(C3A=1) >> C3A(Xiap=None, ParpU=None, C6pro=None) + C6A(C8pro=None), catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product_1kc)
Rule('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product', C6A(C8pro=None) + C8pro(Fadd=None, C6A=None) | C6A(C8pro=1) % C8pro(Fadd=None, C6A=1), catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_2kf, catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_1kr)
Rule('catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product', C6A(C8pro=1) % C8pro(Fadd=None, C6A=1) >> C6A(C8pro=None) + C8A(BidU=None, C3pro=None), catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product_1kc)
Initial(Ligand(Receptor=None), Ligand_0)
Initial(ParpU(C3A=None), ParpU_0)
Initial(C8A(BidU=None, C3pro=None), C8A_0)
Initial(SmacM(BaxA=None), SmacM_0)
Initial(BaxM(BidM=None, BaxA=None), BaxM_0)
Initial(Apop(C3pro=None, Xiap=None), Apop_0)
Initial(Fadd(Receptor=None, C8pro=None), Fadd_0)
Initial(SmacC(Xiap=None), SmacC_0)
Initial(ParpC(), ParpC_0)
Initial(Xiap(SmacC=None, Apop=None, C3A=None), Xiap_0)
Initial(C9(), C9_0)
Initial(C3ub(), C3ub_0)
Initial(C8pro(Fadd=None, C6A=None), C8pro_0)
Initial(Bcl2(BidM=None, BaxA=None), Bcl2_0)
Initial(C3pro(Apop=None, C8A=None), C3pro_0)
Initial(CytoCM(BaxA=None), CytoCM_0)
Initial(CytoCC(), CytoCC_0)
Initial(BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), BaxA_0)
Initial(ApafI(), ApafI_0)
Initial(BidU(C8A=None), BidU_0)
Initial(BidT(), BidT_0)
Initial(C3A(Xiap=None, ParpU=None, C6pro=None), C3A_0)
Initial(ApafA(), ApafA_0)
Initial(BidM(BaxM=None, Bcl2=None), BidM_0)
Initial(Receptor(Ligand=None, Fadd=None), Receptor_0)
Initial(C6A(C8pro=None), C6A_0)
Initial(C6pro(C3A=None), C6pro_0)
|
py | 1a35ee1aed990ae83e08908ff6c6171cd8023d10 | '''
@author:yk
基于直方图变换的风格迁移
修改os.chdir 输入python style.py xx.jpg(待变化的图片) xx.jpg(目标风格的图片)
'''
import cv2 as cv
import numpy as np
import random
import os
import matplotlib.pyplot as plt
import sys
os.chdir("C:\\Users\\m\\Desktop\\第三次作业")
def show(img,name="img"): #显示图像
cv.imshow(name,img)
cv.waitKey(0)
cv.destroyAllWindows()
def read(name): #读取图像
return cv.imread(name+".bmp",0)
def hist_equal(img): #直方图均衡(求各个像素占比)
M,N=img.shape
s=np.zeros([256,1])
for j in range(M): #遍历每个像素的像素值
for k in range(N):
s[img[j][k]]+=1 #对应位置+1
for i in range(1,256):
s[i]=s[i-1]+s[i] #累计求和
s=s/(M*N)
return s
def hist_match(src,dst): #直方图匹配
M1,N1=src.shape
M2,N2=dst.shape
s=hist_equal(src) #src的sk
z=hist_equal(dst) #dst的zk
g=np.zeros([256]) #初始化g函数
index=0
for i in range(256): #寻找sk与zk最接近的一个数,返回下标作为索引值
mins=1000
for j in range(256):
k=abs(s[i]-z[j])
if k < mins:
mins=k
index=j
g[i]=index
return g
def img_trans(img,g): #根据g函数,求出原图像关于g函数的转换,返回增强的图片
M,N=img.shape
dst=np.zeros(img.shape,dtype=np.uint8)
for i in range(M):
for j in range(N):
dst[i][j]=g[img[i][j]]
return dst
def img_enhance(img1,img2): #绘制增强后的图以及其对应的直方图
g=hist_match(img1,img2)
dst=img_trans(img1,g)
hist=cv.calcHist([dst],[0],None,[256],[0,256])
plt.plot(hist)
plt.ylim([0,10000])
plt.clf()
return dst
if __name__ =="__main__":
name1=sys.argv[1]
name2=sys.argv[2]
orig1=cv.imread(name1)
orig2=cv.imread(name2)
b1,g1,r1=cv.split(orig1)
b2,g2,r2=cv.split(orig2)
dst1=img_enhance(b1,b2)
dst2=img_enhance(g1,g2)
dst3=img_enhance(r1,r2)
dst=cv.merge([dst1,dst2,dst3])
show(dst)
|
py | 1a35efbe6ddda62e41b094d34c64ab55d4dc6f78 | """
Divide By Mean
==============
"""
import logging
from functools import partial
import numpy as np
from .fitness_normalizer import FitnessNormalizer
logger = logging.getLogger(__name__)
class DivideByMean(FitnessNormalizer):
"""
Divides fitness values by the population mean.
While this function can be used if the fitness value of each
:class:`.Molecule` in the population is a single
number, it is most useful when the fitness value is a
:class:`tuple` of numbers. In this case, it is necessary to somehow
combine the numbers so that a single fitness value is produced.
For example, take a fitness value which is the vector holding the
properties ``[energy, diameter, num_atoms]``. For a given molecule
these numbers may be something like ``[200,000, 12, 140]``. If we
were to sum these numbers, the energy term would dominate the final
fitness value. In order to combine these numbers we can divide them
by the population averages. For example, if the average energy
of molecules in the population is ``300,000`` the average diameter
is ``10`` and the average number of atoms is ``70`` then the
fitness vector would be scaled to ``[0.5, 1.2, 2]``. These
numbers are now of a similar magnitude and can be summed to give a
reasonable value. After division , each value represents how
much better than the population average each property value is.
In essence we have removed the units from each parameter.
Examples
--------
*Selectively Normalizing Fitness Values*
Sometimes you do not want to normalize all the values in a
population together. For example, if a failed fitness value
calculation resulted in some records having a fitness value of
``None``, you would want to ignore these records from the
normalization
.. testcode:: selectively-normalizing-fitness-values
import stk
import numpy as np
building_block = stk.BuildingBlock(
smiles='BrCCBr',
functional_groups=[stk.BromoFactory()],
)
population = (
stk.MoleculeRecord(
topology_graph=stk.polymer.Linear(
building_blocks=(building_block, ),
repeating_unit='A',
num_repeating_units=2,
),
).with_fitness_value(
fitness_value=(1., 2., 3.),
normalized=False,
),
# This will have a fitness value of None.
stk.MoleculeRecord(
topology_graph=stk.polymer.Linear(
building_blocks=(building_block, ),
repeating_unit='A',
num_repeating_units=2,
),
),
)
mean_scaler = stk.DivideByMean(
# Only normalize values which are not None.
filter=lambda population, record:
record.get_fitness_value() is not None
)
# Calling mean_scaler.normalize() will return a new
# population holding the molecule records with normalized
# fitness values.
normalized_population = tuple(mean_scaler.normalize(
population=population,
))
normalized_record1, normalized_record2 = normalized_population
assert np.all(np.equal(
normalized_record1.get_fitness_value(),
(1, 1, 1),
))
"""
def __init__(self, filter=lambda population, record: True):
"""
Initialize a :class:`.DivideByMean` instance.
Parameters
----------
filter : :class:`callable`, optional
Takes two parameters, first is a :class:`tuple`
of :class:`.MoleculeRecord` instances,
and the second is a :class:`.MoleculeRecord`. The
:class:`callable` returns ``True`` or ``False``. Only
molecules which return ``True`` will have fitness values
normalized. By default, all molecules will have fitness
values normalized.
The instance passed to the `population` argument of
:meth:`.normalize` is passed as the first argument, while
the second argument will be passed every
:class:`.MoleculeRecord` in it, one at a time.
"""
self._filter = filter
def normalize(self, population):
filtered = filter(
partial(self._filter, population),
population,
)
mean = np.mean(
a=[record.get_fitness_value() for record in filtered],
axis=0,
)
logger.debug(f'Means used: {mean}')
for record in population:
if self._filter(population, record):
yield record.with_fitness_value(
fitness_value=np.divide(
record.get_fitness_value(),
mean,
)
)
else:
yield record
|
py | 1a35efedfaa4fea5cdcef6e27c36fecbf5ebdfc6 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Control flow statements: loops, conditionals, etc."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.autograph.operators import py_builtins
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_math_ops
def for_stmt(iter_, extra_test, body, init_state):
"""Functional form of a for statement.
The loop operates on a state, which includes all symbols that are
variant across loop iterations, excluding the iterate as well as the
variables local to the loop.
For example, given the loop below that calculates the geometric and
arithmetic means or some numbers:
geo_mean = 1
arith_mean = 0
for i in range(n):
a = numbers[i]
geo_mean *= a
arith_mean += a
The state is represented by the variables geo_mean and arith_mean. The
argument for initial_state may contain the tuple (1, 0), the body will
include the arguments geo_mean and arith_mean and will return a tuple
representing the new values for geo_mean and respectively arith_mean.
Args:
iter_: The entity being iterated over.
extra_test: Callable with the state as arguments, and boolean return type.
An additional loop condition.
body: Callable with the iterate and the state as arguments, and
state as return type. The actual loop body.
init_state: Tuple containing the initial state.
Returns:
Tuple containing the final state.
"""
if tensor_util.is_tensor(iter_):
return _known_len_for_stmt(iter_, extra_test, body, init_state)
elif isinstance(iter_, dataset_ops.DatasetV2):
return _dataset_for_stmt(iter_, extra_test, body, init_state)
else:
return _py_for_stmt(iter_, extra_test, body, init_state)
def _py_for_stmt(iter_, extra_test, body, init_state):
"""Overload of for_stmt that executes a Python for loop."""
state = init_state
for target in iter_:
if not extra_test(*state):
break
state = body(target, *state)
# TODO(mdan): Remove this special case.
if len(state) == 1:
return state[0]
return state
def _known_len_for_stmt(iter_, extra_test, body, init_state):
"""Overload of for_stmt that iterates over objects that admit a length."""
n = py_builtins.len_(iter_)
def while_body(iterate_index, *state):
iterate = iter_[iterate_index]
new_state = body(iterate, *state)
return (iterate_index + 1,) + new_state
def while_cond(iterate_index, *state):
return gen_math_ops.logical_and(iterate_index < n, extra_test(*state))
results = while_stmt(
while_cond,
while_body,
init_state=(0,) + init_state,
extra_deps=(iter_,),
opts=dict(maximum_iterations=n))
# Dropping the iteration index because it's not syntactically visible.
results = results[1:]
# TODO(mdan): Remove this special case.
if len(results) == 1:
return results[0]
return results
def _dataset_for_stmt(ds, extra_test, body, init_state):
"""Overload of for_stmt that iterates over TF Datasets."""
# Because Datsets only expose get_next, in the style of Python iterators,
# we are forced to unpack the loop as:
#
# epoch_number, iterate = ds.get_next()
# while epoch_number < 2:
# <body>
# epoch_number, iterate = ds.get_next()
epoch_numbers = dataset_ops.Dataset.range(2)
def tag_with(ds, tag):
return dataset_ops.Dataset.zip(
(dataset_ops.Dataset.from_tensors(tag).repeat(), ds))
ds_with_epoch = epoch_numbers.flat_map(lambda i: tag_with(ds, i))
iterator = ds_with_epoch.make_initializable_iterator()
with ops.control_dependencies((iterator.initializer,)):
epoch_number, iterate = iterator.get_next()
def while_body(epoch_number, iterate, *state):
new_state = body(iterate, *state)
epoch_number, iterate = iterator.get_next()
return (epoch_number, iterate) + new_state
def while_cond(epoch_number, iterate, *state):
del iterate
return gen_math_ops.logical_and(epoch_number < 1, extra_test(*state))
results = while_stmt(
while_cond,
while_body,
init_state=(epoch_number, iterate) + init_state,
extra_deps=())
# Dropping the epoch number and iterate because they are not syntactically
# visible.
results = results[2:]
# TODO(mdan): Remove this special case.
if len(results) == 1:
return results[0]
return results
def while_stmt(test, body, init_state, extra_deps, opts=None):
"""Functional form of a while statement.
The loop operates on a so-called state, which includes all symbols that are
variant across loop iterations. In what follows we refer to state as either
a tuple of entities that represent an actual state, or a list of arguments
of the corresponding types.
Args:
test: Callable with the state as arguments, and boolean return type.
The loop condition.
body: Callable with the state as arguments, and state as return type.
The actual loop body.
init_state: Tuple containing the initial state.
extra_deps: Tuple containing additional entities on which the loop may
depend, such as loop invariants referenced by test. Used
exclusively for dispatch control.
opts: Optional dict of extra loop parameters.
Returns:
Tuple containing the final state.
"""
# TODO(mdan): Consider adding a generic mechanism for dynamic dispatch.
# That could be something as simple as a collection of dispatch rules, with
# some prioritization.
if any(tensor_util.is_tensor(v) for v in init_state + extra_deps):
return _tf_while_stmt(test, body, init_state, opts)
else:
return _py_while_stmt(test, body, init_state, opts)
def _tf_while_stmt(test, body, init_state, opts):
"""Overload of while_stmt that stages a TF while_stmt."""
if opts is None:
opts = {}
return control_flow_ops.while_loop(test, body, init_state, **opts)
def _py_while_stmt(test, body, init_state, opts):
"""Overload of while_stmt that executes a Python while loop."""
del opts
state = init_state
while test(*state):
state = body(*state)
return state
def if_stmt(cond, body, orelse):
"""Functional form of an if statement.
Args:
cond: Boolean.
body: Callable with no arguments, and outputs of the positive (if) branch
as return type.
orelse: Callable with no arguments, and outputs of the negative (else)
branch as return type.
Returns:
Tuple containing the statement outputs.
"""
if tensor_util.is_tensor(cond):
return tf_if_stmt(cond, body, orelse)
else:
return _py_if_stmt(cond, body, orelse)
def tf_if_stmt(cond, body, orelse):
"""Overload of if_stmt that stages a TF cond."""
return control_flow_ops.cond(cond, body, orelse)
def _py_if_stmt(cond, body, orelse):
"""Overload of if_stmt that executes a Python if statement."""
return body() if cond else orelse()
|
py | 1a35eff4c8b49f70cff61547f2faf3e35edc6b14 | import numpy as np
import h5py
import pandas as pd
from typing import Any, Callable
from scipy.stats import binned_statistic
from scipy.interpolate import interp1d
from sklearn.utils import resample
from imblearn.over_sampling import SMOTE
def get_data(arg_label:str,
boxsize:int=100,
path_to_file:str="/cosma7/data/dp004/dc-cues1/tng_dataframes/",
):
"""
"""
filename = f"merged_dataframe_{boxsize}.h5"
hdf5_filename = path_to_file + filename
df = pd.read_hdf(hdf5_filename, key="df", mode="r")
df = df.fillna(-9999.)
ids = df.ID_DMO
drop_list=["N_gals", "M_stars_central", "total_M_stars",
"x_hydro", "y_hydro", "z_hydro",
"x_dmo", "y_dmo", "z_dmo",
"M200_HYDRO", "ID_HYDRO", "ID_DMO",
"Group_R_Crit200", #"CentralVmax", #"m2500c",
"vrms_2500c", "vrms_200c", "vrms_std_2500c",
"CentralMassInMaxRad",
"displacement",
'vrms_std_200c', 'beta2500c',
"concentration_nfw"
]
# Chose label
if arg_label == "dark_or_light":
df["labels"] = df.N_gals > 0
df = df.drop(columns=drop_list)
elif arg_label == "nr_of_satellites":
df["labels"] = df.N_gals - 1
df = df[df.N_gals > 1]
df = df.drop(columns=drop_list)
elif arg_label == "stellar_mass":
df["labels"] = np.log10(df.M_stars_central)
df["labels"] = df["labels"].replace([-np.inf, np.inf], 0.)
df = df.drop(columns=drop_list)
elif arg_label == "both":
df["labels"] = df.N_gals > 0
'''
keep_list = [
"Formation Time", "CentralVmax", "CentralHalfmassRad", "concentration_prada", "Spin",
"env_10", "labels",
]
df = df[keep_list]
'''
return df.drop(columns="labels"), df.labels
def load_positions(test_idx = None,
path_to_file:str="/cosma7/data/dp004/dc-cues1/tng_dataframes/",
boxsize:int=100
):
filename = f"merged_dataframe_{int(boxsize)}.h5"
hdf5_filename = path_to_file + filename
df = pd.read_hdf(hdf5_filename, key="df", mode="r")
if test_idx is not None:
df=df.iloc[test_idx]
hydro_pos = np.vstack([df.x_hydro, df.y_hydro, df.z_hydro]).T
dmo_pos = np.vstack([df.x_dmo, df.y_dmo, df.z_dmo]).T
return hydro_pos, dmo_pos
def _find_transition_regions(df_features: pd.DataFrame, n_centrals):
"""
Function to find two masses: where half the haloes are luminous, and where all haloes are luminous
Args:
df: dataframe containing masses and wheather luminous or dark
Returns:
mass_center: mass at which half of the haloes are luminous.
mass_end: mass at which 100% of haloes are luminous.
"""
nbins = 15
m200c = 10**df_features.M200c
bins = np.logspace(np.log10(np.min(m200c)), 12.5, nbins + 1)
nluminous, mass_edges, _ = binned_statistic(
m200c, n_centrals, statistic="mean", bins=bins
)
interpolator = interp1d(nluminous, (mass_edges[1:] + mass_edges[:-1]) / 2.0)
mass_center = interpolator(0.5)
mass_end = ((mass_edges[1:] + mass_edges[:-1]) / 2.0)[nluminous > 0.99][0]
return np.log10(mass_center), np.log10(mass_end)
def balance_dataset(df_features, df_labels, sampler, split='mass'):
if split == 'mass':
df_features_resampled, df_labels_resampled=_balance_mass_split(df_features,
df_labels, sampler)
else:
df_features_resampled, df_labels_resampled=_balance(df_features, df_labels, sampler)
return df_features_resampled, df_labels_resampled
def _balance(df_features, df_labels, sampler):
sampler_ = sampler(random_state=42)
features_resampled, labels_resampled = sampler_.fit_sample(df_features, df_labels)
df_features_resampled = pd.DataFrame(data=features_resampled,
columns=df_features.columns)
df_labels_resampled= pd.Series(data=labels_resampled)
return df_features_resampled, df_labels_resampled
def _balance_mass_split(
df_features, df_labels, sampler
):
center_transition, end_transition = _find_transition_regions(df_features, df_labels)
df_left_transition_feats, df_left_transition_labels = _balance_df_given_mass(
df_features, df_labels, 0.0, center_transition, sampler
)
df_right_transition_feats, df_right_transition_labels = _balance_df_given_mass(
df_features, df_labels, center_transition, 15, sampler
)
df_features = pd.concat([df_left_transition_feats, df_right_transition_feats])
df_labels = pd.concat([df_left_transition_labels, df_right_transition_labels])
return df_features, df_labels
def _balance_df_given_mass(
df_features, df_labels, minimum_mass, maximum_mass, sampler
):
"""
internal function indicated by leading _
"""
mass_threshold = (df_features.M200c > minimum_mass) & (df_features.M200c < maximum_mass)
df_M = df_features[mass_threshold]
df_M_labels = df_labels[mass_threshold]
df_features_resampled, df_labels_resampled = _balance(df_M, df_M_labels, sampler)
return df_features_resampled, df_labels_resampled
|
py | 1a35f1123230943e81e57cb3aedd29e43a92800a | # -*- coding: utf-8 -*-
'''
Execute an unmodified puppet_node_classifier and read the output as YAML. The YAML data is then directly overlaid onto the minion's Pillar data.
'''
# Don't "fix" the above docstring to put it on two lines, as the sphinx
# autosummary pulls only the first line for its description.
# Import python libs
import logging
# Import third party libs
import yaml
# Set up logging
log = logging.getLogger(__name__)
def ext_pillar(minion_id, pillar, command):
'''
Execute an unmodified puppet_node_classifier and read the output as YAML
'''
try:
data = yaml.safe_load(__salt__['cmd.run']('{0} {1}'.format(command, minion_id)))
data = data['parameters']
return data
except Exception:
log.critical(
'YAML data from {0} failed to parse'.format(command)
)
return {}
|
py | 1a35f1be924ad06755d3417d80ea1d8afa46947c | import sys
name=sys.argv[1]
print ("""
## This file contains some of the options that can be changed to customize
## your Ren'Py game. It only contains the most common options... there
## is quite a bit more customization you can do.
##
## Lines beginning with two '#' marks are comments, and you shouldn't
## uncomment them. Lines beginning with a single '#' mark are
## commented-out code, and you may want to uncomment them when
## appropriate.
init -1 python hide:
## Should we enable the use of developer tools? This should be
## set to False before the game is released, so the user can't
## cheat using developer tools.
config.developer = True
## These control the width and height of the screen.
config.screen_width = 800
config.screen_height = 600
## This controls the title of the window, when Ren'Py is
## running in a window.
""" + " config.window_title = u\""+name+"\"" + """
# These control the name and version of the game, that are reported
# with tracebacks and other debugging logs.
""" + " config.name = \""+name+"\""+"""
config.version = "0.0"
#########################################
# Themes
## We then want to call a theme function. theme.roundrect is
## a theme that features the use of rounded rectangles.
##
## The theme function takes a number of parameters that can
## customize the color scheme.
theme.marker(
## Theme: Marker
## Color scheme: Muted Horror
## The color of an idle widget face.
widget = "#777777",
## The color of a focused widget face.
widget_hover = "#73735C",
## The color of the text in a widget.
widget_text = "#404033",
## The color of the text in a selected widget. (For
## example, the current value of a preference.)
widget_selected = "#000000",
## The color of a disabled widget face.
disabled = "#73735C",
## The color of disabled widget text.
disabled_text = "#8C8C70",
## The color of informational labels.
label = "#1A0001",
## The color of a frame containing widgets.
frame = "#555544",
## The background of the main menu. This can be a color
## beginning with '#', or an image filename. The latter
## should take up the full height and width of the screen.
mm_root = "#1A0001",
## The background of the game menu. This can be a color
## beginning with '#', or an image filename. The latter
## should take up the full height and width of the screen.
gm_root = "#1A0001",
## If this is True, the in-game window is rounded. If False,
## the in-game window is square.
rounded_window = False,
## And we're done with the theme. The theme will customize
## various styles, so if we want to change them, we should
## do so below.
)
#########################################
## These settings let you customize the window containing the
## dialogue and narration, by replacing it with an image.
## The background of the window. In a Frame, the two numbers
## are the size of the left/right and top/bottom borders,
## respectively.
# style.window.background = Frame("frame.png", 12, 12)
## Margin is space surrounding the window, where the background
## is not drawn.
# style.window.left_margin = 6
# style.window.right_margin = 6
# style.window.top_margin = 6
# style.window.bottom_margin = 6
## Padding is space inside the window, where the background is
## drawn.
# style.window.left_padding = 6
# style.window.right_padding = 6
# style.window.top_padding = 6
# style.window.bottom_padding = 6
## This is the minimum height of the window, including the margins
## and padding.
# style.window.yminimum = 250
#########################################
## This lets you change the placement of the main menu.
## The way placement works is that we find an anchor point
## inside a displayable, and a position (pos) point on the
## screen. We then place the displayable so the two points are
## at the same place.
## An anchor/pos can be given as an integer or a floating point
## number. If an integer, the number is interpreted as a number
## of pixels from the upper-left corner. If a floating point,
## the number is interpreted as a fraction of the size of the
## displayable or screen.
# style.mm_menu_frame.xpos = 0.5
# style.mm_menu_frame.xanchor = 0.5
# style.mm_menu_frame.ypos = 0.75
# style.mm_menu_frame.yanchor = 0.5
#########################################
## These let you customize the default font used for text in Ren'Py.
## The file containing the default font.
# style.default.font = "DejaVuSans.ttf"
## The default size of text.
# style.default.size = 22
## Note that these only change the size of some of the text. Other
## buttons have their own styles.
#########################################
## These settings let you change some of the sounds that are used by
## Ren'Py.
## Set this to False if the game does not have any sound effects.
config.has_sound = True
## Set this to False if the game does not have any music.
config.has_music = True
## Set this to True if the game has voicing.
config.has_voice = False
## Sounds that are used when button and imagemaps are clicked.
# style.button.activate_sound = "click.wav"
# style.imagemap.activate_sound = "click.wav"
## Sounds that are used when entering and exiting the game menu.
# config.enter_sound = "click.wav"
# config.exit_sound = "click.wav"
## A sample sound that can be played to check the sound volume.
# config.sample_sound = "click.wav"
## Music that is played while the user is at the main menu.
# config.main_menu_music = "main_menu_theme.ogg"
#########################################
## Help.
## This lets you configure the help option on the Ren'Py menus.
## It may be:
## - A label in the script, in which case that label is called to
## show help to the user.
## - A file name relative to the base directory, which is opened in a
## web browser.
## - None, to disable help.
config.help = "README.html"
#########################################
## Transitions.
## Used when entering the game menu from the game.
config.enter_transition = None
## Used when exiting the game menu to the game.
config.exit_transition = None
## Used between screens of the game menu.
config.intra_transition = None
## Used when entering the game menu from the main menu.
config.main_game_transition = None
## Used when returning to the main menu from the game.
config.game_main_transition = None
## Used when entering the main menu from the splashscreen.
config.end_splash_transition = None
## Used when entering the main menu after the game has ended.
config.end_game_transition = None
## Used when a game is loaded.
config.after_load_transition = None
## Used when the window is shown.
config.window_show_transition = None
## Used when the window is hidden.
config.window_hide_transition = None
## Used when showing NVL-mode text directly after ADV-mode text.
config.adv_nvl_transition = dissolve
## Used when showing ADV-mode text directly after NVL-mode text.
config.nvl_adv_transition = dissolve
## Used when yesno is shown.
config.enter_yesno_transition = None
## Used when the yesno is hidden.
config.exit_yesno_transition = None
## Used when entering a replay
config.enter_replay_transition = None
## Used when exiting a replay
config.exit_replay_transition = None
## Used when the image is changed by a say statement with image attributes.
config.say_attribute_transition = None
#########################################
## This is the name of the directory where the game's data is
## stored. (It needs to be set early, before any other init code
## is run, so the persistent information can be found by the init code.)
python early:
config.save_directory = "six-1426033935"
init -1 python hide:
#########################################
## Default values of Preferences.
## Note: These options are only evaluated the first time a
## game is run. To have them run a second time, delete
## game/saves/persistent
## Should we start in fullscreen mode?
config.default_fullscreen = False
## The default text speed in characters per second. 0 is infinite.
config.default_text_cps = 0
## The default auto-forward time setting.
config.default_afm_time = 10
#########################################
## More customizations can go here.
""")
|
py | 1a35f1e34b67c5cbbebdf85f3d3b6ecef62b35c8 | from __future__ import absolute_import
from __future__ import print_function
from amitools.fs.block.Block import *
import amitools.fs.DosType as DosType
class PartitionDosEnv:
valid_keys = ('max_transfer', 'mask', 'num_buffer', 'reserved', 'boot_pri', 'pre_alloc', 'boot_blocks')
def __init__(self, size=16, block_size=128, sec_org=0, surfaces=0, sec_per_blk=1, blk_per_trk=0,
reserved=2, pre_alloc=0, interleave=0, low_cyl=0, high_cyl=0, num_buffer=30,
buf_mem_type=0, max_transfer=0xffffff, mask=0x7ffffffe, boot_pri=0, dos_type=DosType.DOS0,
baud=0, control=0, boot_blocks=0):
self.size = size
self.block_size = block_size
self.sec_org = sec_org
self.surfaces = surfaces
self.sec_per_blk = sec_per_blk
self.blk_per_trk = blk_per_trk
self.reserved = reserved
self.pre_alloc = pre_alloc
self.interleave = interleave
self.low_cyl = low_cyl
self.high_cyl = high_cyl
self.num_buffer = num_buffer
self.buf_mem_type = buf_mem_type
self.max_transfer = max_transfer
self.mask = mask
self.boot_pri = boot_pri
self.dos_type = dos_type
self.baud = baud
self.control = control
self.boot_blocks = boot_blocks
def dump(self):
print("DosEnv")
print(" size: %d" % self.size)
print(" block_size: %d" % self.block_size)
print(" sec_org: %d" % self.sec_org)
print(" surfaces: %d" % self.surfaces)
print(" sec_per_blk: %d" % self.sec_per_blk)
print(" blk_per_trk: %d" % self.blk_per_trk)
print(" reserved: %d" % self.reserved)
print(" pre_alloc: %d" % self.pre_alloc)
print(" interleave: %d" % self.interleave)
print(" low_cyl: %d" % self.low_cyl)
print(" high_cyl: %d" % self.high_cyl)
print(" num_buffer: %d" % self.num_buffer)
print(" buf_mem_type: 0x%08x" % self.buf_mem_type)
print(" max_transfer: 0x%08x" % self.max_transfer)
print(" mask: 0x%08x" % self.mask)
print(" boot_pri: %d" % self.boot_pri)
print(" dos_type: 0x%08x = %s" % (self.dos_type, DosType.num_to_tag_str(self.dos_type)))
print(" baud: %d" % self.baud)
print(" control: %d" % self.control)
print(" boot_blocks: %d" % self.boot_blocks)
def read(self, blk):
self.size = blk._get_long(32)
self.block_size = blk._get_long(33)
self.sec_org = blk._get_long(34)
self.surfaces = blk._get_long(35)
self.sec_per_blk = blk._get_long(36)
self.blk_per_trk = blk._get_long(37)
self.reserved = blk._get_long(38)
self.pre_alloc = blk._get_long(39)
self.interleave = blk._get_long(40)
self.low_cyl = blk._get_long(41)
self.high_cyl = blk._get_long(42)
self.num_buffer = blk._get_long(43)
self.buf_mem_type = blk._get_long(44)
self.max_transfer = blk._get_long(45)
self.mask = blk._get_long(46)
self.boot_pri = blk._get_slong(47)
self.dos_type = blk._get_long(48)
self.baud = blk._get_long(49)
self.control = blk._get_long(50)
self.boot_blocks = blk._get_long(51)
def write(self, blk):
blk._put_long(32, self.size)
blk._put_long(33, self.block_size)
blk._put_long(34, self.sec_org)
blk._put_long(35, self.surfaces)
blk._put_long(36, self.sec_per_blk)
blk._put_long(37, self.blk_per_trk)
blk._put_long(38, self.reserved)
blk._put_long(39, self.pre_alloc)
blk._put_long(40, self.interleave)
blk._put_long(41, self.low_cyl)
blk._put_long(42, self.high_cyl)
blk._put_long(43, self.num_buffer)
blk._put_long(44, self.buf_mem_type)
blk._put_long(45, self.max_transfer)
blk._put_long(46, self.mask)
blk._put_slong(47, self.boot_pri)
blk._put_long(48, self.dos_type)
blk._put_long(49, self.baud)
blk._put_long(50, self.control)
blk._put_long(51, self.boot_blocks)
class PartitionBlock(Block):
FLAG_BOOTABLE = 1
FLAG_NO_AUTOMOUNT = 2
def __init__(self, blkdev, blk_num):
Block.__init__(self, blkdev, blk_num, chk_loc=2, is_type=Block.PART)
def create(self, drv_name, dos_env, host_id=7, next=Block.no_blk, flags=0, dev_flags=0,
size=64):
Block.create(self)
self.size = size
self.host_id = host_id
self.next = next
self.flags = flags
self.dev_flags = dev_flags
self.drv_name = drv_name
if dos_env == None:
dos_env = PartitionDosEnv()
self.dos_env = dos_env
self.valid = True
def write(self):
self._create_data()
self._put_long(1, self.size)
self._put_long(3, self.host_id)
self._put_long(4, self.next)
self._put_long(5, self.flags)
self._put_long(8, self.dev_flags)
self._put_bstr(9, 31, self.drv_name)
self.dos_env.write(self)
Block.write(self)
def read(self):
Block.read(self)
if not self.valid:
return False
self.size = self._get_long(1)
self.host_id = self._get_long(3)
self.next = self._get_long(4)
self.flags = self._get_long(5)
self.dev_flags = self._get_long(8)
self.drv_name = self._get_bstr(9, 31)
self.dos_env = PartitionDosEnv()
self.dos_env.read(self)
return self.valid
def dump(self):
Block.dump(self, "Partition")
print(" size: %d" % self.size)
print(" host_id: %d" % self.host_id)
print(" next: %s" % self._dump_ptr(self.next))
print(" flags: 0x%08x" % self.flags)
print(" dev_flags: 0x%08x" % self.dev_flags)
print(" drv_name: '%s'" % self.drv_name)
self.dos_env.dump()
|
py | 1a35f1e4c6185b86111d58e8cbf7b42bca2ddfe7 | import DSGRN
from DSGRN import *
import networkx as nx
import matplotlib.pyplot as plt
from copy import deepcopy
import os
from all_networks_with_n_nodes_e_edges import *
from save_files import *
from GradientFun import *
from get_FG import *
from get_FP_Poset import *
from networkx_cond import *
def reduce_gradient_graph_to_nodes_of_interest(database, grad_graph, FP_Poset):
c = database.conn.cursor()
FP_keep = [node for node in FP_Poset.keys()]
G = nx.DiGraph() #building networkx graph
for node in grad_graph:
G.add_node(node)
for edge in grad_graph[node]:
G.add_edge(node, edge)
del_list = []
for node in grad_graph:
p = node[-1]
MGI_result = c.execute('select MorseGraphIndex from Signatures where ParameterIndex is ' + str(p))
MGI = MGI_result.fetchone()[0]
FP_result = [row[0] for row in c.execute('select Label from MorseGraphAnnotations where MorseGraphIndex is ' + str(MGI))]
if not set(FP_result).intersection(set(FP_keep)):
del_list.append(node)
for n in del_list: #removes del_list nodes in networkx graph and grad_graph keys
G.remove_node(n)
del grad_graph[n]
return G, grad_graph
def get_product_graph(database, cG, scc, FP_Poset):
'''
cG: condensation graph of gradient graph with only monostable fixed points and nodes in FP_Poset, expects networkx object.
scc: dictonary of stongly connected components, where keys are node labels in given graph
and values are nodes in original graph the condensation is derived from.
returns: Product Graph, i.e., reduces cG to having only edges that appear in FP_Poset.
Then removes all parts of graph not connected to a node in the start set.
'''
c = database.conn.cursor()
H = nx.DiGraph() #building networkx graph from FP_poset
for node in FP_Poset:
for edge in FP_Poset[node]:
H.add_edge(node, edge)
del_list = [] #currently written with FP repeats
P = deepcopy(cG)
for edge in P.edges():
s = scc[edge[0]][0][-1]
t = scc[edge[1]][0][-1]
sMGI = c.execute('select MorseGraphIndex from Signatures where ParameterIndex is ' + str(s))
MGI = sMGI.fetchone()[0]
sFP = [row[0] for row in c.execute('select Label from MorseGraphAnnotations where MorseGraphIndex is ' + str(MGI))]
tMGI = c.execute('select MorseGraphIndex from Signatures where ParameterIndex is ' + str(t))
MGI = tMGI.fetchone()[0]
tFP = [row[0] for row in c.execute('select Label from MorseGraphAnnotations where MorseGraphIndex is ' + str(MGI))]
keep = False
if (sFP[0],tFP[0]) in H.edges():
keep = True
if sFP[0] == tFP[0]:
keep = True
if keep == False:
del_list.append(edge)
for edge in del_list:
P.remove_edge(edge[0],edge[1])
P.remove_nodes_from(list(nx.isolates(cG)))
start_set = []
for node in P:
p = scc[node][0]
if p[0] == 0 and p[1] == 0:
start_set.append(node)
del_list = []
for node in P.nodes():
for i in start_set:
if i != node:
try:
nx.shortest_path(P, i, node)
break
except:
if i == start_set[-1]:
del_list.append(node)
break
else:
continue
for node in del_list:
P.remove_node(node)
return P
def return_start_stop_set(database, graph, scc, Hb_max, Kni_max, start_FP_list = None, stop_FP_list = None):
'''
graph: can be in dictinary or networkx form, function expects a condensation graph.
scc: dictonary of stongly connected components, where keys are node labels in given graph
and values are nodes in original graph the condensation is derived from.
Hb_max, Kni_max: Highest factor graph layers.
start_FP_list, stop_FP_list: list of fixed points wanting to constrain the starting and stoping sets to.
returns: set of nodes considered starting nodes for a path and stoping nodes.
'''
c = database.conn.cursor()
start_set = []
stop_set = []
for node in graph:
n = scc[node][0]
#print(node, p)
if n[0] == 0 and n[1] == 0:
if start_FP_list != None:
p = scc[node][0][-1]
MGI_result = c.execute('select MorseGraphIndex from Signatures where ParameterIndex is ' + str(p))
MGI = MGI_result.fetchone()[0]
FP_result = [row[0] for row in c.execute('select Label from MorseGraphAnnotations where MorseGraphIndex is ' + str(MGI))]
if FP_result in start_FP_list:
start_set.append(node)
else:
start_set.append(node)
if n[0] == Hb_max and n[1] == Kni_max:
if stop_FP_list != None:
p = scc[node][0][-1]
MGI_result = c.execute('select MorseGraphIndex from Signatures where ParameterIndex is ' + str(p))
MGI = MGI_result.fetchone()[0]
FP_result = [row[0] for row in c.execute('select Label from MorseGraphAnnotations where MorseGraphIndex is ' + str(MGI))]
if FP_result in stop_FP_list:
stop_set.append(node)
else:
stop_set.append(node)
return start_set, stop_set
def test_any_path_exists_in_product(string, network_filename, database = None, grad_graph = None, reduce = True):
'''
string: network string.
network_filename: Name wanting to save network text file as, expects that .txt not at end.
returns: True if paths exists, False if no paths exists in product graph.
'''
# Make DSGRN database
if database == None:
txt_filename = "/home/elizabeth/Desktop/GIT/dsgrn_acdc/networks/" + network_filename + ".txt"
f = open(txt_filename,"w") # Make txt file for network, needed to build DSGRN database
f.write(string)
f.close()
db_filename = "/home/elizabeth/Desktop/GIT/dsgrn_acdc/networks/" + network_filename + ".db"
os.system("mpiexec -n 2 Signatures "+ txt_filename + ' ' + db_filename)
database = Database(db_filename)
out_edges = get_number_out_edges_from_string(string)
Hb_list, Kni_list = get_Hb_Kni_list(database)
Hb_max = len(Hb_list)-1
Kni_max = len(Kni_list)-1
FP_Poset = get_FP_Poset(out_edges)[0]
# If grad_graph has not already been computed for this network, compute it and save.
if grad_graph == None:
gradlist = get_gradlist_strict(database, Hb_list, Kni_list)
grad_graph = get_gradient_graph_parallel(database, gradlist, 7, Hb_list, Kni_list)
grad_graph_filename = "grad_graph_strict_"+network_filename
save_json(grad_graph, grad_graph_filename)
if reduce == True:
G, ngg = reduce_gradient_graph_to_nodes_of_interest(database, grad_graph, FP_Poset)
ngg_filename = "reduced_grad_graph_strict_"+network_filename
save_json(ngg, ngg_filename)
strongcc = strongly_connected_components_by_MGI(G, database)
cG, scc = condensation(G, strongcc)
P = get_product_graph(database, cG, scc, FP_Poset)
start_set, stop_set = return_start_stop_set(database, P, scc, Hb_max, Kni_max)
if start_set == []:
print("Empty start set")
result = False
if stop_set == []:
print("Empty stop set")
result = False
else:
for s in start_set:
for t in stop_set:
try:
nx.shortest_path(cG, s, t)
print('Path exists from ' + str(s) + ' to '+ str(t))
result = True
break
except:
if s == start_set[-1]:
if t == stop_set[-1]:
print('No Path Exists')
result = False
break
else:
continue
else:
continue
break
return result
def find_breaks_in_FG_comb(database, P, scc, Hb_max, Kni_max):
breaks = []
for h in range(Hb_max+1):
for k in range(Kni_max+1):
if (h,k) != (0,0) and (h,k) != (Hb_max, Kni_max):
remove = (h,k)
T = deepcopy(P)
for node in P.nodes():
if scc[node][0][0:2] == remove:
T.remove_node(node)
start_set, stop_set = return_start_stop_set(database, T, scc, Hb_max, Kni_max)
if start_set == []:
result = False
if stop_set == []:
result = False
else:
for s in start_set:
for t in stop_set:
try:
nx.shortest_path(T, s, t)
result = True
break
except:
if s == start_set[-1]:
if t == stop_set[-1]:
result = False
break
else:
continue
else:
continue
break
if result == False:
breaks.append((h,k))
x = []
y = []
for s in scc:
x.append(Hb_max-scc[s][0][0])
y.append(scc[s][0][1])
plt.scatter(y, x)
for i in breaks:
plt.scatter([Hb_max-i[0]],[i[1]], color = 'r')
plt.xlabel('Kni Facter Graph Layer')
plt.ylabel('Hb Facter Graph Layer')
plt.show()
return breaks
def create_cond_subgraphs_graphml(database, grad_graph, cond, prod_graph_nodes, path_nodes, scc, FP_Region, start_set, stop_set, Filename):
''' graphml filetype '''
c = database.conn.cursor()
N = nx.DiGraph()
for node in grad_graph:
N.add_node(node)
for edge in grad_graph[node]:
N.add_edge(node, edge)
G = nx.DiGraph()
Kni_att = {}
Hb_att = {}
MGI_att = {}
Region_att = {}
scc_size_att = {}
graph = {}
s_t = {}
for node in cond:
G.add_node(node)
count = 0
for edge in cond[node]:
G.add_edge(node, edge)
yes_count = 0
for s in scc[node]:
for t in scc[edge]:
if N.has_edge(s,t) == True:
yes_count += 1
count +=1
G[node][edge]['weight'] = yes_count
for edge in cond[node]:
G[node][edge]['weight'] = G[node][edge]['weight']/count
p = scc[node][0][-1]
MGI_result = c.execute('select MorseGraphIndex from Signatures where ParameterIndex is ' + str(p))
MGI = MGI_result.fetchone()[0]
MGI_att[node] = MGI
FP_result = [row[0] for row in c.execute('select Label from MorseGraphAnnotations where MorseGraphIndex is ' + str(MGI))]
if len(FP_result) == 1:
for r in FP_Region:
if FP_result[0] in FP_Region[r]:
Region_att[node] = r
else:
Region_att[node] = 'not mono-stable'
Hb_att[node] = scc[node][0][0]
Kni_att[node] = scc[node][0][1]
if node in path_nodes:
graph[node] = 'path'
elif node in prod_graph_nodes:
graph[node] = 'product'
else:
graph[node] = 'cond'
scc_size_att[node] = len(scc[node])
for node in start_set:
s_t[node] = 'starting'
for node in stop_set:
s_t[node] = 'stoping'
nx.set_node_attributes(G, 'Hb_FG_layer', Hb_att)
nx.set_node_attributes(G, 'Kni_FG_layer', Kni_att)
nx.set_node_attributes(G, 'MGI', MGI_att)
nx.set_node_attributes(G, 'Region', Region_att)
nx.set_node_attributes(G, 'group', graph)
nx.set_node_attributes(G, 'scc size', scc_size_att)
nx.set_node_attributes(G, 'start_stop', s_t)
group=nx.get_node_attributes(G,'group')
att = {}
for edge in G.edges():
s = edge[0]
t = edge[1]
if group[s] == 'path':
if group[t] != 'path':
att[s] = 'leave path'
nx.set_node_attributes(G, 'leaving', att)
nx.write_graphml(G, Filename)
def get_gephi_graph_for_cond(database, network, grad_graph, graphml_filename, path_nodes = []):
'''
grad_graph: expects graph as dictinary
network_txt_filename: filename and place where txt file format of the network string is saved.
graphml_filename: name wanting for graphml file, will add location automatically. Expects .graphml at end.
'''
out_edges = get_number_out_edges_from_string(network)
FP_Poset, FP_Region = get_FP_Poset(out_edges)
G = reduce_gradient_graph_to_nodes_of_interest(database, grad_graph, FP_Poset)[0]
strongcc = strongly_connected_components_by_MGI(G, database)
cG, scc = condensation(G, strongcc)
P = get_product_graph(database, cG, scc, FP_Poset)
Hb_list, Kni_list = get_Hb_Kni_list(database)
Hb_max = len(Hb_list)-1
Kni_max = len(Kni_list)-1
start_set, stop_set = return_start_stop_set(database, P, scc, Hb_max, Kni_max)
filename = '/home/elizabeth/Desktop/GIT/dsgrn_acdc/Saved_Files/Graphml/' + graphml_filename
### Notice graph only has bagged FP in it, the condensation of the gradient graph only, without removing all nodes not in bag is much larger.
create_cond_subgraphs_graphml(database, grad_graph, cG, P, path_nodes, scc, FP_Region, start_set, stop_set, filename) |
py | 1a35f2a4cc1502c1473a3aba2c04fe78e26b2aa0 | from torch import Tensor
from torch.autograd import Variable
from torch.optim import Adam
from itertools import chain
from utils.misc import hard_update
from utils.policies import DiscretePolicy
import torch.nn.functional as F
class Agent(object):
"""
General class for agents (policy, target policy, etc)
"""
def __init__(self, obs_shape, action_size, hidden_dim=64,
lr=0.01, adam_eps=1e-8, nonlin=F.relu, n_pol_heads=1):
self.policy = DiscretePolicy(obs_shape,
action_size,
hidden_dim=hidden_dim,
nonlin=nonlin,
n_heads=n_pol_heads)
self.target_policy = DiscretePolicy(obs_shape,
action_size,
hidden_dim=hidden_dim,
nonlin=nonlin,
n_heads=n_pol_heads)
hard_update(self.target_policy, self.policy)
self.policy_optimizer = Adam(self.policy.parameters(), lr=lr, eps=adam_eps)
def step(self, obs, explore=False, head=0):
"""
Take a step forward in environment for a minibatch of observations
Inputs:
obs (PyTorch Variable): Observations for this agent
explore (boolean): Whether or not to sample
head (int): Which policy head to use
Outputs:
action (PyTorch Variable): Actions for this agent
"""
return self.policy(obs, sample=explore, head=head)
def get_params(self):
return {'policy': self.policy.state_dict(),
'target_policy': self.target_policy.state_dict(),
'policy_optimizer': self.policy_optimizer.state_dict()}
def load_params(self, params, load_ir=False):
self.policy.load_state_dict(params['policy'])
self.target_policy.load_state_dict(params['target_policy'])
self.policy_optimizer.load_state_dict(params['policy_optimizer'])
|
py | 1a35f353edbdd93000e8311d795a3b0e1e2cd6ef | import logging.config
import tkinter as tk
from tkinter import ttk
class StudentPage(tk.Frame):
'''
Class creates Student Page frame.
'''
def __init__(self, master, controller):
'''
Initialize Student page
'''
ttk.Frame.__init__(self, master)
self.logger = logging.getLogger(__name__)
self.master = master
self.controller = controller
# Master frame for all widgets
self.master_frame = ttk.Frame(self.master)
# Frame for top window elements
self.top_frame = ttk.Frame(self.master_frame)
self.mid_frame = ttk.Frame(self.master_frame)
self.content_frame = ttk.Frame(self.master_frame)
self.students_frame = ttk.Frame(self.content_frame)
self.assignments_frame = ttk.Frame(self.content_frame, width=350, height=350)
self.master_frame.pack()
self.top_frame.pack(side=tk.TOP)
self.mid_frame.pack(side=tk.TOP)
self.content_frame.pack()
self.students_frame.pack(side=tk.LEFT, padx=10, pady=10)
self.assignments_frame.pack(side=tk.RIGHT, padx=10, pady=10)
self.assignments_frame.pack_propagate(False)
classes_label = ttk.Label(self.top_frame, text='Classes:')
self.class_value = tk.StringVar()
self.class_subject = ttk.Combobox(self.top_frame, textvariable=self.class_value, state='readonly')
def create_treeview(frame):
# Using treeview widget
treev = ttk.Treeview(frame, selectmode ='browse')
# Calling pack method w.r.to treeview
treev.pack(side ='right')
# Constructing vertical scrollbar
# with treeview
verscrlbar = ttk.Scrollbar(frame, orient ="vertical", command = treev.yview)
# Calling pack method w.r.to verical
# scrollbar
verscrlbar.pack(side ='right', fill ='x')
# Configuring treeview
treev.configure(xscrollcommand = verscrlbar.set)
return treev
self.tree_student = create_treeview(self.students_frame)
self.tree_assignments = create_treeview(self.assignments_frame)
classes_label.pack(side=tk.LEFT, padx=25, pady=10)
self.class_subject.pack()
|
gyp | 1a35f3702e530a496c0472cfcf41decf22a99afe | # Copyright (c) 2010 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'targets': [
{
'target_name': 'exe',
'type': 'executable',
'sources': [
'main.c',
],
},
],
}
|
py | 1a35f4457ce30b04ca80e9a1b11804d0b35151dc | import numpy as np
from PIL import Image
import cv2
import matplotlib.pyplot as plt
import pickle
from matplotlib import style
import time
style.use("ggplot")
SIZE = 20
HM_EPISODES = 25000
MOVE_PENALTY = 1
ENEMY_PENALTY = 300
FOOD_REWARD = 25
epsilon = 0.9
EPS_DECAY = 0.9998
SHOW_EVERY = 1000
start_q_table = None #'aa'#'qtable - 1574500480, pickle' #None # vai faila vards
LEARNING_RATE = 0.1
DISCOUNT = 0.95
PLAYER_N = 1
FOOD_N = 2
ENEMY_N = 3
d = {1: (255, 175, 0), 2: (0, 255, 0), 3: (0, 0, 255)}
class Blob:
def __init__(self):
self.x = np.random.randint(0, SIZE)
self.y = np.random.randint(0, SIZE)
def __str__(self):
return f"{self.x}, {self.y}"
def __sub__(self, other):
return (self.x - other.x, self.y - other.y)
def action(self, choice): # var addot vel
if choice == 0:
self.move(x=1, y=1)
elif choice == 1:
self.move(x=-1, y=-1)
elif choice == 2:
self.move(x=-1, y=1)
elif choice == 3:
self.move(x=1, y=-1)
if choice == 4:
self.move(x=0, y=1)
elif choice == 5:
self.move(x=0, y=-1)
elif choice == 6:
self.move(x=-1, y=0)
elif choice == 7:
self.move(x=1, y=0)
def move(self, x=False, y=False):
if not x:
self.x += np.random.randint(-1,2)
else:
self.x += x
if not y:
self.y += np.random.randint(-1,2)
else:
self.y += y
if self.x < 0: self.x = 0
elif self.x > SIZE - 1: self.x = SIZE - 1
if self.y < 0: self.y = 0
elif self.y > SIZE - 1: self.y = SIZE - 1
if start_q_table is None:
q_table = {}
for x1 in range(-SIZE + 1, SIZE):
for y1 in range(-SIZE + 1, SIZE):
for x2 in range(-SIZE + 1, SIZE):
for y2 in range(-SIZE + 1, SIZE):
q_table[((x1,y1), (x2,y2))] = [np.random.uniform(-5, 0) for i in range(7)]
else:
with open(start_q_table, "rb") as f:
q_table = pickle.load(f)
episode_rewards = []
for episode in range(HM_EPISODES):
player = Blob()
food = Blob()
enemy = Blob()
if episode % SHOW_EVERY == 0:
print(f"on # {episode}, epsilon: {epsilon}")
print(f"{SHOW_EVERY} ep mean {np.mean(episode_rewards[-SHOW_EVERY:])}")
show = True
else:
show = False
episode_reward = 0
for i in range(200):
obs = (player-food, player-enemy) # ko redz
if np.random.random() > epsilon:
action = np.argmax(q_table[obs])
else:
action = np.random.randint(0, 7)
player.action(action)
'''velak varbut
enemy.move()
food.move() '''
if player.x == enemy.x and player.y == enemy.y:
reward = -ENEMY_PENALTY
elif player.x == food.x and player.y == food.y:
reward = FOOD_REWARD
else:
reward = -MOVE_PENALTY
new_obs = (player-food, player-enemy)
max_future_q = np.max(q_table[new_obs])
current_q = q_table[obs][action]
if reward == FOOD_REWARD:
new_q = FOOD_REWARD
elif reward == -ENEMY_PENALTY:
new_q = -ENEMY_PENALTY
else:
new_q = (1 - LEARNING_RATE) * current_q + LEARNING_RATE * (reward + DISCOUNT * max_future_q)
q_table[obs][action] = new_q
if show:
env = np.zeros((SIZE, SIZE, 3), dtype=np.uint8)
env[food.y][food.x] = d[FOOD_N]
env[player.y][player.x] = d[PLAYER_N]
env[enemy.y][enemy.x] = d[ENEMY_N]
img = Image.fromarray(env, "RGB")
img = img.resize((300, 300))
cv2.imshow("", np.array(img))
if reward == FOOD_REWARD or reward == -ENEMY_PENALTY:
if cv2.waitKey(500) & 0xFF == ord("q"):
break
else:
if cv2.waitKey(1) & 0xFF == ord("q"):
break
episode_reward += reward
if reward == FOOD_REWARD or reward == -ENEMY_PENALTY:
break
episode_rewards.append(episode_reward)
epsilon *= EPS_DECAY
moving_avg = np.convolve(episode_rewards, np.ones((SHOW_EVERY,)) / SHOW_EVERY, mode="valid")
plt.plot([i for i in range(len(moving_avg))], moving_avg)
plt.ylabel(f"reward {SHOW_EVERY}")
plt.xlabel("episode #")
plt.show()
with open(f"qtablenew - {int(time.time())} pickle", "wb") as f:
pickle.dump(q_table, f)
|
py | 1a35f518133de37b4a3da12d74ca076fe7aba690 | #!/usr/bin/env python2
import shutil, os, argparse, sys, stat, time
sys.path.append(os.path.join(os.path.dirname(os.path.dirname(__file__)), "setUpScripts"))
from genFuncs import genHelper
sys.path.append(os.path.join(os.path.dirname(os.path.dirname(__file__)), "pyUtils"))
from utils import Utils
from color_text import ColorText as CT
def genArgParsePythonCompletes(programNames):
ret = """
_argParsePys()
{
local cur prev opts base
COMPREPLY=()
cur="${COMP_WORDS[COMP_CWORD]}"
prev="${COMP_WORDS[COMP_CWORD-1]}"
if [[ ${cur} == -* ]]; then
opts=$(for x in `${COMP_WORDS[0]} -h | grep " -" | sed "s/^. *-/-/g" | sed "s/ .*//g" | sed "s/, / /g"`; do echo ${x} ; done )
COMPREPLY=($(compgen -W "${opts}" -- ${cur}))
else
_filedir
fi
return 0
}
"""
for name in programNames:
ret += "complete -F _argParsePys {programName}\n".format(programName = name)
return ret
def genSetUpPyCompletes():
setUpPrograms = ["setup.py", "mapSrc.py", "needToRecompile.py", "fileModAffect.py", "configure.py"]
return genArgParsePythonCompletes(setUpPrograms)
def addSetUpPyCompletes(dest, outFilename):
with open(os.path.join(dest,"bash_completion.d",outFilename), "w") as f:
f.write(genSetUpPyCompletes())
def genMultiRingBashCompleteStr(programNames):
ret = """
_bibCppTools()
{
local cur prev opts base
COMPREPLY=()
cur="${COMP_WORDS[COMP_CWORD]}"
prev="${COMP_WORDS[COMP_CWORD-1]}"
if [[ $COMP_CWORD -lt 2 ]] ; then
opts=$(for x in `${COMP_WORDS[0]} | grep ")" | sed "s/.*) //g"`; do echo ${x} ; done )
COMPREPLY=($(compgen -W "${opts}" -- ${cur}))
elif [[ ${cur} == -* ]]; then
if [[ ${COMP_WORDS[1]} == batch* ]]; then
rest="${COMP_WORDS[@]:1:${#COMP_WORDS[@]} }"
if [[ $rest != *"-getFlags"* ]]; then
rest="$rest -getFlags"
fi
newopts=$(${COMP_WORDS[0]} $rest | column -t | cut -f 1 -d " " | cut -f 1 -d ,)
COMPREPLY=( $(compgen -W "${newopts}" -- ${cur}) )
else
newopts=$(${COMP_WORDS[0]} ${COMP_WORDS[1]} -getFlags | column -t | cut -f 1 -d " " | cut -f 1 -d ,)
COMPREPLY=( $(compgen -W "${newopts}" -- ${cur}) )
fi
else
if [[ ${prev} == -run ]]; then
opts=$(for x in `${COMP_WORDS[0]} | grep ")" | sed "s/.*) //g"`; do echo ${x} ; done )
COMPREPLY=($(compgen -W "${opts}" -- ${cur}))
else
_filedir
fi
fi
return 0
}
"""
for name in programNames:
ret += "complete -F _bibCppTools {programName}\n".format(programName = name)
return ret
def addMultiRingComletes(dest,programNames, outFilename):
with open(os.path.join(dest,"bash_completion.d",outFilename), "w") as f:
f.write(genMultiRingBashCompleteStr(programNames))
def genSingleCmdBashCompleteStr(programNames):
ret = """
_singleBibCppTools()
{
local cur prev opts base
COMPREPLY=()
cur="${COMP_WORDS[COMP_CWORD]}"
prev="${COMP_WORDS[COMP_CWORD-1]}"
if [[ ${cur} == -* ]]; then
newopts=$(${COMP_WORDS[0]} -getFlags | column -t | cut -f 1 -d " " | cut -f 1 -d ,)
COMPREPLY=( $(compgen -W "${newopts}" -- ${cur}) )
else
_filedir
fi
return 0
}
"""
for name in programNames:
ret += "complete -F _singleBibCppTools {programName}\n".format(programName = name)
return ret
def addSingleCmdComletes(dest,programNames, outFilename):
with open(os.path.join(dest,"bash_completion.d",outFilename), "w") as f:
f.write(genSingleCmdBashCompleteStr(programNames))
def genBashCompleteFolder(dest):
os.mkdir(os.path.join(dest, "bash_completion.d"))
def fileInfoHeader(headerName, author):
return """
// {headerName}
//
// Created by {author} on {date}.
// Copyright (c) {year} {author}. All rights reserved.
//
""".format(headerName=headerName, author=author,year=time.strftime("%Y"),date=time.strftime("%Y/%m/%d"))
def startHeader(headerName, author):
return """#pragma once
//
""" + fileInfoHeader(headerName, author)
def startCpp(nameStub, author):
return fileInfoHeader(nameStub + ".cpp", author) + """
#include "{name}.hpp"
""".format(name = nameStub)
def genRing(runnerName,ringName, projNamespace, internalIncludes, externalIncludes, parentPath, dest, author, placeHolderFunc):
externalIncludes = ["bibcpp.h"] + externalIncludes
if not parentPath.endswith("/"):
parentPath += "/"
ringDestName = os.path.join(dest, ringName)
if os.path.exists(ringDestName) or os.path.exists(ringDestName + ".h"):
print "Error, " + ringDestName + " already exists"
exit(1)
#create main dir
os.mkdir(ringDestName)
#create main header to include the ring
with open(ringDestName + ".h", "w") as f:
mainHeaderOut = startHeader( ringName + ".h", author) + """
#include "{parentPath}{name}/{name}SetUp.hpp"
#include "{parentPath}{name}/{name}Runner.hpp"
""".format(name=ringName,parentPath = parentPath)
f.write(mainHeaderOut)
#create setUp header
with open(os.path.join(ringDestName,ringName + "SetUp.hpp"), "w") as f:
defaultHeader = startHeader(ringName + "SetUp.hpp", author)
for eInclude in externalIncludes:
defaultHeader += "#include <" + eInclude + ">\n"
for iInclude in internalIncludes:
defaultHeader += "#include \"" + iInclude + "\"\n"
defaultHeader += """
namespace {projNamespace} {{
class {name}SetUp : public bib::progutils::programSetUp {{
public:
using programSetUp::programSetUp; //include programSetUp's constructors
}};
}} // namespace {projNamespace}
""".format(name =ringName, projNamespace = projNamespace)
f.write(defaultHeader)
#create setUp cpp
with open(os.path.join(ringDestName,ringName + "SetUp.cpp"), "w") as f:
infoHeader = startCpp(ringName + "SetUp", author)
infoHeader +="""
namespace {projNamespace} {{
}} // namespace {projNamespace}
""".format(projNamespace = projNamespace)
f.write(infoHeader)
#create runner header
with open(os.path.join(ringDestName,ringName + "Runner.hpp"), "w") as f:
infoHeader = startHeader(ringName + "Runner.hpp", author)
infoHeader +="""
#include "{name}SetUp.hpp"
namespace {projNamespace} {{
class {name}Runner : public bib::progutils::programRunner {{
public:
{name}Runner();
static int {placeHolderFunc}(std::map<std::string, std::string> inputCommands);
}};
}} // namespace {projNamespace}
""".format(name = ringName,projNamespace = projNamespace, placeHolderFunc= placeHolderFunc)
f.write(infoHeader)
#create runner cpp
with open(os.path.join(ringDestName,ringName + "Runner.cpp"), "w") as f:
infoHeader = startCpp(ringName + "Runner", author)
infoHeader +="""
namespace {projNamespace} {{
{name}Runner::{name}Runner()
: bib::progutils::programRunner({{addFunc("{placeHolderFunc}", {placeHolderFunc}, false)}},
"{runnerName}") {{}}
int {name}Runner::{placeHolderFunc}(std::map<std::string, std::string> inputCommands) {{
{name}SetUp setUp(inputCommands);
std::string name = "World";
setUp.setOption(name, "--name", "Someone\'s Name");
setUp.finishSetUp(std::cout);
std::cout << "From {name} {placeHolderFunc}, Hello " << name << "!" << std::endl;
return 0;
}}
}} // namespace {projNamespace}
""".format(name = ringName, projNamespace = projNamespace, placeHolderFunc= placeHolderFunc, runnerName = runnerName)
f.write(infoHeader)
def genOneRing(runnerName, ringName, projNamespace, rings, externalIncludes, parentPath, dest, author, placeHolderFunc):
externalIncludes = ["bibcpp.h"] + externalIncludes
if not parentPath.endswith("/"):
parentPath += "/"
ringDestName = os.path.join(dest, ringName)
if os.path.exists(ringDestName) or os.path.exists(ringDestName + ".h"):
print "Error, " + ringDestName + " already exists"
exit(1)
#create main dir
os.mkdir(ringDestName)
#create main header to include the ring
with open(ringDestName + ".h", "w") as f:
mainHeaderOut = startHeader( ringName + ".h", author) + """
#include "{parentPath}{name}/{name}SetUp.hpp"
#include "{parentPath}{name}/{name}Runner.hpp"
""".format(name=ringName,parentPath = parentPath)
f.write(mainHeaderOut)
#create setUp header
with open(os.path.join(ringDestName,ringName + "SetUp.hpp"), "w") as f:
defaultHeader = startHeader(ringName + "SetUp.hpp", author)
for eInclude in externalIncludes:
defaultHeader += "#include <" + eInclude + ">\n"
defaultHeader += """
namespace {projNamespace} {{
class {name}SetUp : public bib::progutils::programSetUp {{
public:
using programSetUp::programSetUp; //include programSetUp's constructors
}};
}} // namespace {projNamespace}
""".format(name =ringName, projNamespace = projNamespace)
f.write(defaultHeader)
#create setUp cpp
with open(os.path.join(ringDestName,ringName + "SetUp.cpp"), "w") as f:
infoHeader = startCpp(ringName + "SetUp", author)
infoHeader +="""
namespace {projNamespace} {{
}} // namespace {projNamespace}
""".format(projNamespace = projNamespace)
f.write(infoHeader)
#create runner header
with open(os.path.join(ringDestName,ringName + "Runner.hpp"), "w") as f:
infoHeader = startHeader(ringName + "Runner.hpp", author)
infoHeader +="""
#include "{name}SetUp.hpp"
namespace {projNamespace} {{
class {name}Runner : public bib::progutils::oneRing {{
public:
{name}Runner();
static int {placeHolderFunc}(std::map<std::string, std::string> inputCommands);
}};
}} // namespace {projNamespace}
""".format(name = ringName,projNamespace = projNamespace, placeHolderFunc = placeHolderFunc)
f.write(infoHeader)
#create runner cpp
with open(os.path.join(ringDestName,ringName + "Runner.cpp"), "w") as f:
infoHeader = startCpp(ringName + "Runner", author)
infoHeader +="\n"
for iInclude in rings:
infoHeader += "#include \"{parentPath}{addRingPrefix}/{addRing}.hpp\"\n".format(parentPath = parentPath, addRingPrefix = iInclude.replace("Runner", ""), addRing =iInclude )
infoHeader +="""
namespace {projNamespace} {{
{name}Runner::{name}Runner()
: bib::progutils::oneRing({{"""
for ring in rings:
infoHeader += "addRing(std::make_shared<{ring}>()),".format(ring = ring)
infoHeader += """}},{{addFunc("{placeHolderFunc}", {placeHolderFunc}, false)}},
"{runnerName}") {{}}
int {name}Runner::{placeHolderFunc}(std::map<std::string, std::string> inputCommands) {{
{name}SetUp setUp(inputCommands);
std::string name = "World";
setUp.setOption(name, "--name", "Someone\'s Name");
setUp.finishSetUp(std::cout);
std::cout << "From {name} {placeHolderFunc}, Hello " << name << "!" << std::endl;
return 0;
}}
}} // namespace {projNamespace}
"""
infoHeader = infoHeader.format(name = ringName, projNamespace = projNamespace,placeHolderFunc= placeHolderFunc, runnerName = runnerName)
f.write(infoHeader)
def genCommonIncludes(filename, includes):
with open(filename, "w") as f:
f.write("#pragma once\n")
f.write("\n")
f.write("// Created on "+ time.strftime("%Y/%m/%d") +"\n")
f.write("// Add system libraries here\n")
f.write("\n")
for i in includes:
f.write("#include <" + i + ">\n")
def genTypeDefs(filename, projName):
with open(filename, "w") as f:
f.write("#pragma once\n")
f.write("\n")
f.write("// Created on "+ time.strftime("%Y/%m/%d") +"\n")
f.write("// Add project typedefs here\n")
f.write("\n")
for i in ["vector", "map", "string"]:
f.write("#include <" + i + ">\n")
f.write("\n")
f.write("namespace " + projName+" {\n")
f.write("//typedef std::map<std::string, std::string> MapStrStr;\n")
f.write("//typedef std::vector<std::string> VecStr;\n")
f.write("} // namespace " + projName + "\n")
def genCommon(dest, projName, includes):
genCommonIncludes(os.path.join(dest, "src/" + projName + "/common/allSystemIncludes.h"), includes)
genTypeDefs(os.path.join(dest, "src/" + projName + "/common/typedefs.h"), projName)
with open(os.path.join(dest, "src/" + projName + "/common.h"), "w") as f:
f.write("#pragma once\n")
f.write("\n")
f.write("// Created on "+ time.strftime("%Y/%m/%d") +"\n")
f.write("// Including headers in common\n")
f.write("\n")
f.write("#include \""+ projName + "/common/allSystemIncludes.h\"\n")
f.write("#include \""+ projName + "/common/typedefs.h\"\n")
def genProgramHeader(dest, projName):
with open(os.path.join(dest, "src/" + projName + "/programs.h"), "w") as f:
headers = [progFile for progFile in os.listdir(os.path.join(dest, "src/" + projName + "/programs")) if progFile.endswith(".h")]
f.write("#pragma once\n")
f.write("\n")
f.write("// Created on "+ time.strftime("%Y/%m/%d") +"\n")
f.write("// Including headers in common\n")
f.write("\n")
for head in headers:
f.write("#include \""+ projName + "/programs/" + head + "\"\n")
def genWholeProjInclude(dest, projName, addProgramDir):
with open(os.path.join(dest, "src/" + projName + ".h"), "w") as f:
f.write("#pragma once\n")
f.write("\n")
f.write("// Created on "+ time.strftime("%Y/%m/%d") +"\n")
f.write("// Including whole project\n")
f.write("\n")
f.write("#include \""+ projName + "/common.h\"\n")
if(addProgramDir):
f.write("#include \""+ projName + "/programs.h\"\n")
def genMain(dest, projName):
with open(os.path.join(dest, "src/" + "main.cpp"), "w") as f:
f.write("\n")
f.write("// Created on "+ time.strftime("%Y/%m/%d") +"\n")
f.write("// main.cpp\n")
f.write("\n")
f.write("#include \""+ projName + ".h\"")
f.write("\n")
f.write("int main(int argc, char* argv[]){\n")
f.write(" " + projName + "::" + projName +"ProgramRunner runner;\n")
f.write(" return runner.run(argc, argv);\n")
f.write("}\n")
def genSingCmdMain(dest, projName):
with open(os.path.join(dest, "src/" + "main.cpp"), "w") as f:
f.write("\n")
f.write("// Created on "+ time.strftime("%Y/%m/%d") +"\n")
f.write("// main.cpp\n")
f.write("\n")
f.write("#include \""+ projName + ".h\"\n")
f.write("#include <bibcpp.h> \n")
f.write("\n")
f.write("int main(int argc, char* argv[]){\n")
mainContent = """
bib::progutils::programSetUp setUp(argc, argv);
std::string name = "World";
setUp.setOption(name, "--name", "Name to say hello to", false);
setUp.finishSetUp(std::cout);
std::cout << "Hello " << name << "!\\n";
return 0;
"""
f.write(mainContent)
f.write("}\n")
def genSrcSingleRingProgram(dest, projName, includes, externalIncludes, author):
os.mkdir(os.path.join(dest, "src"))
os.mkdir(os.path.join(dest, "src/" + projName))
os.mkdir(os.path.join(dest, "src/" + projName + "/common"))
os.mkdir(os.path.join(dest, "src/" + projName + "/programs"))
genCommon(dest, projName, includes)
genWholeProjInclude(dest, projName, True)
intIncForRing = [projName + "/common.h"]
genRing(projName, projName + "Program", projName, intIncForRing, externalIncludes, projName + "/programs", projName + "/" + "src/" + projName + "/programs", author, "hellowWorld")
genMain(dest, projName)
genProgramHeader(dest, projName)
genBashCompleteFolder(projName)
addMultiRingComletes(projName, [projName], projName)
addSetUpPyCompletes(projName, "pyCompletes")
def genSrcWithOneRingProgram(dest, projName, includes, externalIncludes, author):
os.mkdir(os.path.join(dest, "src"))
os.mkdir(os.path.join(dest, "src/" + projName))
os.mkdir(os.path.join(dest, "src/" + projName + "/common"))
os.mkdir(os.path.join(dest, "src/" + projName + "/programs"))
genCommon(dest, projName, includes)
genWholeProjInclude(dest, projName, True)
intIncForRing = [projName + "/common.h"]
genOneRing(projName, projName + "Program", projName, [projName + "Sub1" + "Runner", projName + "Sub2" + "Runner"], externalIncludes, projName + "/programs", projName + "/" + "src/" + projName + "/programs", author, "hellowWorldMain")
genRing(projName + "Sub1", projName + "Sub1", projName, intIncForRing, externalIncludes, projName + "/programs", projName + "/" + "src/" + projName + "/programs", author, "hellowWorld1")
genRing(projName + "Sub2", projName + "Sub2", projName, intIncForRing, externalIncludes, projName + "/programs", projName + "/" + "src/" + projName + "/programs", author, "hellowWorld2")
genProgramHeader(dest, projName)
genMain(dest, projName)
genBashCompleteFolder(projName)
addMultiRingComletes(projName, [projName], projName)
addSetUpPyCompletes(projName, "pyCompletes")
def genSrcWithOneCmdProgram(dest, projName, includes, externalIncludes, author):
os.mkdir(os.path.join(dest, "src"))
os.mkdir(os.path.join(dest, "src/" + projName))
os.mkdir(os.path.join(dest, "src/" + projName + "/common"))
os.mkdir(os.path.join(dest, "src/" + projName + "/programs"))
genCommon(dest, projName, includes)
genWholeProjInclude(dest, projName, False)
genSingCmdMain(dest, projName)
genBashCompleteFolder(projName)
addSingleCmdComletes(projName, [projName], projName)
addSetUpPyCompletes(projName, "pyCompletes")
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('-projName', type=str, nargs=1, required=True)
parser.add_argument('-dest', type=str, nargs=1, required=True)
parser.add_argument('-CC', type=str, nargs=1)
parser.add_argument('-CXX', type=str, nargs=1)
parser.add_argument('-externalLoc', type=str, nargs=1)
parser.add_argument('-neededLibs', type=str, nargs=1)
parser.add_argument('-author', type=str, required=True)
parser.add_argument('-programType', type=str, required=True)
return parser.parse_args()
def main():
args = parse_args()
externalIncludes = []
stdLibraryInc = ["iostream", "string", "unistd.h", "vector", "cstdint", "cstdio", "cstddef", "utility", "map", "unordered_map", "algorithm"]
projectOut = os.path.join(args.dest[0], args.projName[0])
os.mkdir(projectOut)
if args.programType == "singleRing":
genSrcSingleRingProgram(projectOut, args.projName[0], stdLibraryInc, externalIncludes, args.author)
elif args.programType == "oneRing":
genSrcWithOneRingProgram(projectOut, args.projName[0], stdLibraryInc, externalIncludes, args.author)
elif args.programType == "oneCmd":
genSrcWithOneCmdProgram(projectOut, args.projName[0], stdLibraryInc, externalIncludes, args.author)
else:
raise Exception("Error, only singleRing, oneRing,oneCmd available for options to programType, was given " + args.programType )
CC = genHelper.determineCC(args)
CXX = genHelper.determineCXX(args)
external = "external"
outname = args.projName[0]
prefix = "./"
installName = args.projName[0]
neededLibs = ["bibcppdev"]
if args.externalLoc:
external = os.path.realpath(args.externalLoc[0])
if args.neededLibs:
neededLibs = ["bibcppdev"] + args.neededLibs[0].split(",")
genHelper.generateCompfileFull(os.path.join(projectOut, "compfile.mk"), external, CC, CXX, outname, installName, prefix, neededLibs)
with open(os.path.join(projectOut, "configure.py"), "w") as configFile:
if(args.neededLibs):
configFile.write(genHelper.mkConfigFileStr(outname, ",".join(neededLibs)))
else:
configFile.write(genHelper.mkConfigFileStr(outname, "bibcppdev"))
os.chmod(os.path.join(projectOut, "configure.py"), stat.S_IXGRP | stat.S_IXOTH | stat.S_IXUSR | stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH | stat.S_IWUSR)
exFrom = os.path.abspath(os.path.dirname(__file__))
cpSetUpCmd = exFrom + "/copySetUpFiles.py -from " + exFrom +"/../../ -to " + projectOut
print CT.boldBlack(cpSetUpCmd)
Utils.run(cpSetUpCmd)
cpMakefilesCmd = "cp " + exFrom + "/../cppSetUpFiles/*akefile* " + projectOut
print CT.boldBlack(cpMakefilesCmd)
Utils.run(cpMakefilesCmd)
main()
|
py | 1a35f5bbf42e00683d7a330a27ad349e9ab55bd9 | from typing import Dict, Iterable
import sqlalchemy.sql.expression as sql
from sqlalchemy.orm import selectinload
from transiter.db import dbconnection, models
def list_groups_and_maps_for_stops_in_route(route_pk):
"""
This function is used to get the service maps for a route.
It returns a list of tuples (service map group, service map) for each
service map group having use_for_stops_in_route equal True.
:param route_pk: the route's PK
:return: the list described above
"""
session = dbconnection.get_session()
query = (
session.query(models.ServiceMapGroup, models.ServiceMap)
.join(models.System, models.System.pk == models.ServiceMapGroup.system_pk)
.join(models.Route, models.Route.system_pk == models.System.pk)
.outerjoin(
models.ServiceMap,
sql.and_(
models.ServiceMap.route_pk == models.Route.pk,
models.ServiceMap.group_pk == models.ServiceMapGroup.pk,
),
)
.filter(models.ServiceMapGroup.use_for_stops_in_route)
.filter(models.Route.pk == route_pk)
.options(selectinload(models.ServiceMap.vertices))
.options(selectinload(models.ServiceMap.vertices, models.ServiceMapVertex.stop))
)
return [(group, map_) for (group, map_) in query]
def get_stop_pk_to_group_id_to_routes_map(
stop_pks,
) -> Dict[int, Dict[str, Iterable[models.Route]]]:
"""
This function is used to get service map information for stops; namely,
which routes call at the stop based on the service maps.
Get a map whose key is a stop's PK and whose the value is another map.
This second map has a key for every service map group having
use_for_routes_at_stop equal to True. The value of this map is the list of
routes that contain the stop in the relevant service map.
:param stop_pks: stop PKs to build the map for
:return: the monster map described above
"""
session = dbconnection.get_session()
query = (
session.query(models.Stop.pk, models.ServiceMapGroup.id, models.Route)
.join(models.System, models.System.pk == models.Stop.system_pk)
.join(
models.ServiceMapGroup,
sql.and_(
models.ServiceMapGroup.system_pk == models.System.pk,
models.ServiceMapGroup.use_for_routes_at_stop,
),
)
.outerjoin(
models.ServiceMap,
sql.and_(
models.ServiceMap.group_pk == models.ServiceMapGroup.pk,
models.ServiceMap.pk.in_(
session.query(models.ServiceMapVertex.map_pk).filter(
models.ServiceMapVertex.stop_pk == models.Stop.pk
)
),
),
)
.outerjoin(models.Route, models.Route.pk == models.ServiceMap.route_pk)
.filter(models.Stop.pk.in_(stop_pks))
)
response = {stop_pk: {} for stop_pk in stop_pks}
for stop_pk, group_id, route in query:
if group_id not in response[stop_pk]:
response[stop_pk][group_id] = []
if route is not None:
response[stop_pk][group_id].append(route)
return response
|
py | 1a35f62c2e5e8ddcf8ba97e81fd931cbb7c9a07d | #!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = "Louis Richard"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020-2021"
__license__ = "MIT"
__version__ = "2.3.7"
__status__ = "Prototype"
def hpca_energies():
return [1.35500, 1.57180, 1.84280, 2.22220, 2.60160, 3.08940, 3.63140,
4.28180, 5.04060, 5.96200, 6.99180, 8.23840, 9.75600, 11.4904,
13.5500, 15.9890, 18.8616, 22.2762, 26.2328, 30.9482, 36.5308,
43.0890, 50.7854, 59.9452, 70.6768, 83.4138, 98.3730, 116.042,
136.855, 161.462, 190.459, 224.659, 264.984, 312.571, 368.723,
434.955, 513.057, 605.197, 713.868, 842.051, 993.323, 1171.70,
1382.10, 1630.28, 1923.07, 2268.43, 2675.80, 3156.28, 3723.11,
4391.72, 5180.44, 6110.72, 7208.11, 8502.57, 10029.5, 11830.6,
13955.2, 16461.4, 19417.5, 22904.6, 27017.9, 31869.8, 37593.1] |
py | 1a35f64e2c27c1911d36b7487a272863d78a37c5 | # -*- coding: UTF-8 -*-
# Copyright 2010-2018 Rumma & Ko Ltd
# License: BSD (see file COPYING for details)
import logging
logger = logging.getLogger(__name__)
from django.utils.translation import ugettext_lazy as _
from lino.modlib.office.roles import OfficeStaff
from lino.api import dd, rt
class Shortcut(dd.Choice):
"""Represents a shortcut field."""
model_spec = None
target = 'uploads.UploadsByController'
def __init__(self, model_spec, name, verbose_name, target=None):
if target is not None:
self.target = target
self.model_spec = model_spec
value = model_spec + "." + name
super(Shortcut, self).__init__(value, verbose_name, name)
def get_uploads(self, **kw):
"""Return a queryset with the uploads of this shortcut."""
return rt.models.uploads.Upload.objects.filter(
type__shortcut=self, **kw)
class Shortcuts(dd.ChoiceList):
verbose_name = _("Upload shortcut")
verbose_name_plural = _("Upload shortcuts")
item_class = Shortcut
max_length = 50 # fields get created before the values are known
class UploadAreas(dd.ChoiceList):
required_roles = dd.login_required(OfficeStaff)
verbose_name = _("Upload Area")
verbose_name_plural = _("Upload Areas")
add = UploadAreas.add_item
add('90', _("Uploads"), 'general')
def add_shortcut(*args, **kw):
return Shortcuts.add_item(*args, **kw)
|
py | 1a35f660e8c7681aafb5b8025ba0bb0410c8a515 | import cv2
from PIL import Image
import numpy as np
from subprocess import Popen, PIPE
from enum import IntEnum, auto
import sys, math, os, time, argparse
import threading
import queue
from keras.models import load_model
import tensorflow as tf
sys.path.append(os.path.join(os.path.dirname(__file__), 'UGATIT'))
from UGATIT import UGATIT
'''
depress warning
'''
import logging, warnings
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
warnings.simplefilter(action='ignore', category=FutureWarning)
warnings.simplefilter(action='ignore', category=Warning)
tf.get_logger().setLevel('INFO')
tf.autograph.set_verbosity(0)
tf.get_logger().setLevel(logging.ERROR)
'''
Command line arguments
'''
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
parser = argparse.ArgumentParser(description='specify input and output device')
parser.add_argument('--input_video_num', type=int, required=True,
help='input video device number. ex) if input is /dev/video0 then the value is 0')
parser.add_argument('--output_video_dev', type=str, required=True,
help='input video device. ex) /dev/video2')
parser.add_argument('--emotion_mode', type=str2bool, required=False, default=False,
help='enable emotion mode')
parser.add_argument('--anime_mode', type=str2bool, required=False, default=False,
help='enable anime mode')
parser.add_argument('--skip_frame', type=int, required=False, default=1,
help='enable skip frame')
parser.add_argument('--crop_face', type=str2bool, required=False, default=True,
help='enable crop face')
parser.add_argument('--show_fps', type=str2bool, required=False, default=False,
help='show fpc')
parser.add_argument('--show_source', type=str2bool, required=False, default=False,
help='show source')
'''
args for anime mode
'''
parser.add_argument('--phase', type=str, default='test', help='[train / test]')
parser.add_argument('--light', type=str2bool, default=False, help='[U-GAT-IT full version / U-GAT-IT light version]')
parser.add_argument('--dataset', type=str, default='selfie2anime', help='dataset_name')
parser.add_argument('--epoch', type=int, default=100, help='The number of epochs to run')
parser.add_argument('--iteration', type=int, default=10000, help='The number of training iterations')
parser.add_argument('--batch_size', type=int, default=1, help='The size of batch size')
parser.add_argument('--print_freq', type=int, default=1000, help='The number of image_print_freq')
parser.add_argument('--save_freq', type=int, default=1000, help='The number of ckpt_save_freq')
parser.add_argument('--decay_flag', type=str2bool, default=True, help='The decay_flag')
parser.add_argument('--decay_epoch', type=int, default=50, help='decay epoch')
parser.add_argument('--lr', type=float, default=0.0001, help='The learning rate')
parser.add_argument('--GP_ld', type=int, default=10, help='The gradient penalty lambda')
parser.add_argument('--adv_weight', type=int, default=1, help='Weight about GAN')
parser.add_argument('--cycle_weight', type=int, default=10, help='Weight about Cycle')
parser.add_argument('--identity_weight', type=int, default=10, help='Weight about Identity')
parser.add_argument('--cam_weight', type=int, default=1000, help='Weight about CAM')
parser.add_argument('--gan_type', type=str, default='lsgan', help='[gan / lsgan / wgan-gp / wgan-lp / dragan / hinge]')
parser.add_argument('--smoothing', type=str2bool, default=True, help='AdaLIN smoothing effect')
parser.add_argument('--ch', type=int, default=64, help='base channel number per layer')
parser.add_argument('--n_res', type=int, default=4, help='The number of resblock')
parser.add_argument('--n_dis', type=int, default=6, help='The number of discriminator layer')
parser.add_argument('--n_critic', type=int, default=1, help='The number of critic')
parser.add_argument('--sn', type=str2bool, default=True, help='using spectral norm')
parser.add_argument('--img_size', type=int, default=256, help='The size of image')
parser.add_argument('--img_ch', type=int, default=3, help='The size of image channel')
parser.add_argument('--augment_flag', type=str2bool, default=True, help='Image augmentation use or not')
parser.add_argument('--checkpoint_dir', type=str, default='checkpoint',
help='Directory name to save the checkpoints')
parser.add_argument('--result_dir', type=str, default='results',
help='Directory name to save the generated images')
parser.add_argument('--log_dir', type=str, default='logs',
help='Directory name to save training logs')
parser.add_argument('--sample_dir', type=str, default='samples',
help='Directory name to save the samples on training')
args = parser.parse_args()
BATCH_SIZE = args.batch_size
'''
Queue for anime mode
'''
anime_mode_input_queue = queue.Queue()
anime_mode_output_queue = queue.Queue()
anime_buffer_image = None
anime_frame_num = 0
anime_fps_start = time.time()
anime_fps = 0
anime_frame_count = 0
'''
Mode definition
'''
class modes(IntEnum):
SIMPLE_SMILE_MODE = auto()
EMOTION_MODE = auto()
ANIME_MODE = auto()
'''
Classifiers
'''
face_classifier_classifier = None
anime_session = None
anime_model = None
'''
Path for resources
'''
face_cascade_path = './models/haarcascade_frontalface_default.xml'
def anime_mode_worker():
frames = []
while True:
item_num = anime_mode_input_queue.qsize()
#print(item_num)
for i in range(item_num):
frame = anime_mode_input_queue.get()
frame = cv2.resize(frame, dsize=(256, 256))
frames.append(frame)
#print(f'{i}/{item_num}')
if len(frames) < BATCH_SIZE:
if item_num == 0:
pass
#time.sleep(1)
continue
frames = np.array(frames)
#print(sys.stderr, frames.shape)
new_frames = anime_model.predict(frames[-1 * BATCH_SIZE:])
for i, (old_frame, new_frame) in enumerate(zip(frames[-1 * BATCH_SIZE:], new_frames)):
anime_mode_output_queue.put( (old_frame, new_frame))
frames = []
def load_resources(mode):
global face_classifier_classifier
face_classifier_classifier = cv2.CascadeClassifier(face_cascade_path)
if mode == modes.ANIME_MODE:
global anime_session, anime_model
anime_session = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
anime_model = UGATIT(anime_session, args)
anime_model.build_model()
anime_model.load_model(anime_session)
def paste(img, imgback, x, y, angle, scale):
if img.shape [0] > imgback.shape[0] or img.shape[1] > imgback.shape[1]:
h_ratio = imgback.shape[0] / img.shape[0]
w_ratio = imgback.shape[1] / img.shape[1]
if h_ratio < w_ratio:
new_h = int(img.shape[0] * h_ratio)
new_w = int(img.shape[1] * h_ratio)
else:
new_h = int(img.shape[0] * w_ratio)
new_w = int(img.shape[1] * w_ratio)
if new_h % 2 != 0:
new_h += 1
if new_w % 2 != 0:
new_w += 1
img = cv2.resize(img, (new_w, new_h))
#print(sys.stderr, f'pate resize img : {new_h}, {new_w}')
r = img.shape[0]
c = img.shape[1]
rb = imgback.shape[0]
cb = imgback.shape[1]
hrb = round(rb/2)
hcb = round(cb/2)
hr = round(r/2)
hc = round(c/2)
#print(sys.stderr, f'(2) -> {r}, {c}, {rb},{cb}')
# Copy the forward image and move to the center of the background image
imgrot = np.zeros((rb,cb,3),np.uint8)
imgrot[hrb-hr:hrb+hr,hcb-hc:hcb+hc,:] = img[:hr*2,:hc*2,:]
# Rotation and scaling
M = cv2.getRotationMatrix2D((hcb,hrb),angle,scale)
imgrot = cv2.warpAffine(imgrot,M,(cb,rb))
# Translation
M = np.float32([[1,0,x],[0,1,y]])
imgrot = cv2.warpAffine(imgrot,M,(cb,rb))
# Makeing mask
imggray = cv2.cvtColor(imgrot,cv2.COLOR_BGR2GRAY)
ret, mask = cv2.threshold(imggray, 10, 255, cv2.THRESH_BINARY)
mask_inv = cv2.bitwise_not(mask)
# Now black-out the area of the forward image in the background image
img1_bg = cv2.bitwise_and(imgback,imgback,mask = mask_inv)
# Take only region of the forward image.
img2_fg = cv2.bitwise_and(imgrot,imgrot,mask = mask)
# Paste the forward image on the background image
imgpaste = cv2.add(img1_bg,img2_fg)
return imgpaste
def apply_offsets_for_anime_mode(face_location, offsets):
x, y, width, height = face_location
x_off, y_off = offsets # x_off is ignored here.
### At first Top and Bottom are determined.
top = y - y_off
bottom = y + height + y_off
if top < 0:
top = 0
### determin x_off so as to make square.
new_height = bottom - top
x_off = int((new_height - width ) / 2)
### Then Left and Right are determined.
left = x - x_off
right = x + width + x_off
if left < 0 :
left = 0
### return
return (x - x_off, x + width + x_off, top, bottom)
def preprocess_input(x, v2=True):
x = x.astype('float32')
x = x / 255.0
if v2:
x = x - 0.5
x = x * 2.0
return x
def edit_frame(frame):
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = face_classifier_classifier.detectMultiScale(gray, 1.1, 5)
if mode == modes.ANIME_MODE:
if args.crop_face == True:
for (x,y,w,h) in faces[:1]:
#cv2.rectangle(frame,(x,y),(x+w,y+h),(255, 0, 0),2)
global anime_buffer_image, anime_frame_num, anime_fps_start, anime_fps, anime_frame_count
### new frame entry to process (raw frame)
anime_offsets = (60, 60)
x1, x2, y1, y2 = apply_offsets_for_anime_mode((x,y,w,h), anime_offsets)
anime_rgb = frame[y1:y2, x1:x2]
if len(faces) == 0:
#anime_rgb = np.zeros((256, 256, 3), np.uint8)
anime_rgb = None
else:
anime_rgb = frame
try:
cv2.imwrite('tmp.png',anime_rgb)
img = cv2.imread('tmp.png', flags=cv2.IMREAD_COLOR)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
anime_rgb = img
anime_mode_input_queue.put(anime_rgb)
except Exception as e:
### if exception occur put original frame
#anime_mode_input_queue.put(frame)
pass
### show edited frame
try:
new_frame = anime_mode_output_queue.get(block=False)
# to be shown frame(animated frame)
(old_frame, new_frame) = new_frame
old_frame = cv2.resize(old_frame, (50, 50))
if args.show_source == True:
new_frame = paste(old_frame, new_frame, +80, -80, 0, 1.0)
anime_fps_now = time.time()
if anime_fps_now - anime_fps_start > 5:
spend_time = anime_fps_now - anime_fps_start
anime_fps = round((anime_frame_num / spend_time),2)
anime_fps_start = anime_fps_now
anime_frame_num = 0
# for fps
font_scale=0.5
color = (200,200,200)
thickness=1
if args.show_fps == True:
cv2.putText(new_frame, f'fps:{anime_fps}',
(10,50),
cv2.FONT_HERSHEY_SIMPLEX,
font_scale, color, thickness, cv2.LINE_AA
)
anime_frame_count += 1
if anime_frame_count % args.skip_frame == 0:
anime_frame_count = 0
anime_buffer_image = new_frame
anime_frame_num += 1
except queue.Empty as e:
if anime_buffer_image is None:
anime_buffer_image = np.zeros((256, 256, 3), np.uint8)
pass
### If face is not detected, show previous frame or blank frame
if mode == modes.ANIME_MODE:
if anime_buffer_image is not None:
frame = anime_buffer_image
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
else:
frame = np.zeros((256, 256, 3), np.uint8)
return frame
if __name__=="__main__":
input = args.input_video_num
output = args.output_video_dev
cap = cv2.VideoCapture(input)
if args.anime_mode == True:
mode = modes.ANIME_MODE
else:
mode = modes.SIMPLE_SMILE_MODE
print(f'start with mode: {mode}')
load_resources(mode)
print('web camera hook start!')
p = Popen(['ffmpeg', '-y', '-i', '-', '-pix_fmt', 'yuyv422', '-f', 'v4l2', output], stdin=PIPE)
try:
if mode == modes.ANIME_MODE:
t = threading.Thread(target=anime_mode_worker)
t.start()
while True:
ret,im = cap.read()
im = edit_frame(im)
im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
im = Image.fromarray(np.uint8(im))
im.save(p.stdin, 'JPEG')
except KeyboardInterrupt:
pass
anime_session.close()
p.stdin.close()
p.wait()
print('web camera hook fin!')
|
py | 1a35f7f42017cafb50222c3c36e0a5e7a6d6f1ec | """
As Rigid as Possible Interpolation from a pair of Mesh structures
"""
from Escher.Geometry import Mesh
from typing import List
import numpy as np
import Escher.GeometryRoutines as geom
import Escher.AlgebraRoutines as alg
import logging
from scipy.linalg import block_diag
from scipy.spatial.transform import Slerp,Rotation
def interpolate(src_mesh:Mesh, tgt_mesh:Mesh, interval:float, fragment_resolution="quadratic") -> List[Mesh]:
"""
Interpolate between 2 meshes which have corresponding vertices and same topology.
Arguments:
Source Mesh, Target Mesh, Transformation method.
interval: specifies a float between 0 and 1.
Transformation method = one_ring,tetrahedralize.
"""
interpolated_meshes = []
src_face_batch = src_mesh.get_faces_as_matrices() # fx3x3
tgt_face_batch = tgt_mesh.get_faces_as_matrices()
per_face_slerp_instances = []
per_face_scales = []
identity_rotation = np.expand_dims(np.eye(3),0)
per_face_transformations = []
per_face_translations = []
for _index in range(src_face_batch.shape[0]):
src_face_matrix = src_face_batch[_index]
tgt_face_matrix = tgt_face_batch[_index]
src_tet_matrix = geom.tetrahedralize(src_face_matrix)
tgt_tet_matrix = geom.tetrahedralize(tgt_face_matrix)
mat_Q = alg.get_transformation_matrix_for((src_tet_matrix[:3,:]-src_tet_matrix[3,:]).T,
(tgt_tet_matrix[:3,:]-tgt_tet_matrix[3,:]).T)
face_translation = np.expand_dims(tgt_tet_matrix[3,:].T,-1) - (mat_Q @ np.expand_dims(src_tet_matrix[3,:].T,-1))
per_face_translations.append(face_translation.squeeze())
per_face_transformations.append(mat_Q)
R,S = alg.get_rotation_scale_from_transformation(mat_Q)
rotation_endpoints_matrix = np.concatenate([identity_rotation,np.expand_dims(R,0)],axis=0)
_slerp = Slerp(times=[0,1],rotations=Rotation.from_matrix(rotation_endpoints_matrix))
per_face_slerp_instances.append(_slerp)
per_face_scales.append(S)
if fragment_resolution == "average":
vertex_id_to_face_id = src_mesh.get_vertex_id_to_face_id()
number_of_faces_each_vertex = np.expand_dims(np.array([len(face_list) for face_list in vertex_id_to_face_id]),-1)
for t in np.arange(0,1+interval,interval):
new_vertices = np.zeros(src_mesh.vertices.shape)
for _index in range(src_face_batch.shape[0]):
interpolated_rotation_matrix_face = per_face_slerp_instances[_index]([t])[0].as_matrix()
interpolated_scale_matrix_face = (1-t)*np.eye(3) + t*per_face_scales[_index]
interpolated_transformation_matrix = interpolated_rotation_matrix_face @ interpolated_scale_matrix_face
interpolated_translation = t*per_face_translations[_index].T
src_face_matrix = src_face_batch[_index]
new_face_matrix = (interpolated_transformation_matrix @ src_face_matrix.T).T + interpolated_translation
face = src_mesh.faces[_index]
for i,vertex_id in enumerate(face):
new_vertices[vertex_id,:] += new_face_matrix[i,:]
new_vertices /= number_of_faces_each_vertex
interpolated_mesh = Mesh(vertices=new_vertices,faces=src_mesh.faces)
interpolated_meshes.append(interpolated_mesh)
elif fragment_resolution == "quadratic":
src_face_inverse_list = []
#mat_H = np.zeros((src_mesh.num_vertices-1,src_mesh.num_vertices-1))
mat_H = np.zeros((src_mesh.num_vertices,src_mesh.num_vertices))
fixed_vertex_id = 0 # this vertex id is fixed by linear interpolation,
# we don't solve for it. That is why the system has a solution.
vertex_orders = [0,1,2]
for face_index in range(src_face_batch.shape[0]):
src_face_matrix = src_face_batch[face_index,:,:].T
src_face_inverse = np.linalg.inv(src_face_matrix)
src_face_inverse_list.append(src_face_inverse)
face = src_mesh.faces[face_index]
for vertex_order_in_face,v_id in enumerate(face):
#if v_id == fixed_vertex_id:
# continue
other_vertex_orders = [order for order in vertex_orders if order!=vertex_order_in_face]
row_for_vertex = src_face_inverse[vertex_order_in_face,:]
quadratic_term = np.sum(np.square(row_for_vertex))
mat_H[v_id,v_id] += quadratic_term
#mat_H[v_id-1,v_id-1] += quadratic_term
for other_vertex_order_ in other_vertex_orders:
other_vertex_id = face[other_vertex_order_]
other_vertex_row = src_face_inverse[other_vertex_order_,:]
#if other_vertex_id == fixed_vertex_id:
# continue
#else:
mixed_term = np.dot(row_for_vertex,other_vertex_row)
mat_H[v_id,other_vertex_id] += mixed_term
#mat_H[v_id-1,other_vertex_id-1] += mixed_term
mat_H_inverse = np.linalg.inv(mat_H)
x_index = 0
y_index = 1
z_index = 2
src_fixed_vertex = np.expand_dims(src_mesh.vertices[fixed_vertex_id],0)
tgt_fixed_vertex = np.expand_dims(tgt_mesh.vertices[fixed_vertex_id],0)
for t in np.arange(0,1,interval):
#print(t,flush=True)
mat_Gx = np.zeros((src_mesh.num_vertices,1))
#mat_Gx = np.zeros((src_mesh.num_vertices-1,1))
mat_Gy = np.zeros((src_mesh.num_vertices,1))
#mat_Gy = np.zeros((src_mesh.num_vertices-1,1))
mat_Gz = np.zeros((src_mesh.num_vertices,1))
#mat_Gz = np.zeros((src_mesh.num_vertices-1,1))
interpolated_fixed_vertex = ((1-t)*src_fixed_vertex + t*tgt_fixed_vertex)
for face_index in range(src_face_batch.shape[0]):
interpolated_rotation_matrix_face = per_face_slerp_instances[face_index]([t])[0].as_matrix()
interpolated_scale_matrix_face = (1-t)*np.eye(3) + t*per_face_scales[face_index]
interpolated_transformation_matrix = interpolated_rotation_matrix_face @ interpolated_scale_matrix_face
face_inverse_matrix = src_face_inverse_list[face_index]
face = src_mesh.faces[face_index]
for vertex_order_in_face,v_id in enumerate(face):
if v_id == fixed_vertex_id:
continue
linear_term_x = np.dot(face_inverse_matrix[vertex_order_in_face,:],interpolated_transformation_matrix[x_index,:])
mat_Gx[v_id] += -1*linear_term_x
#mat_Gx[v_id-1] += -1*linear_term_x
linear_term_y = np.dot(face_inverse_matrix[vertex_order_in_face,:],interpolated_transformation_matrix[y_index,:])
mat_Gy[v_id] += -1*linear_term_y
#mat_Gy[v_id-1] += -1*linear_term_y
linear_term_z = np.dot(face_inverse_matrix[vertex_order_in_face,:],interpolated_transformation_matrix[z_index,:])
mat_Gz[v_id] += -1*linear_term_z
#mat_Gz[v_id-1] += -1*linear_term_z
'''
other_vertex_orders = [order for order in vertex_orders if order!=vertex_order_in_face]
row_for_vertex = face_inverse_matrix[vertex_order_in_face,:]
for other_vertex_order_ in other_vertex_orders:
other_vertex_id = face[other_vertex_order_]
other_vertex_row = face_inverse_matrix[other_vertex_order_,:]
if other_vertex_id == fixed_vertex_id:
fixed_term_x = 2*interpolated_fixed_vertex[0][0]*row_for_vertex[0]*other_vertex_row[0]
fixed_term_y = 2*interpolated_fixed_vertex[0][1]*row_for_vertex[1]*other_vertex_row[1]
fixed_term_z = 2*interpolated_fixed_vertex[0][2]*row_for_vertex[2]*other_vertex_row[2]
mat_Gx[v_id] += fixed_term_x
#mat_Gx[v_id-1] += fixed_term_x
mat_Gy[v_id] += fixed_term_y
#mat_Gy[v_id-1] += fixed_term_y
mat_Gz[v_id] += fixed_term_z
#mat_Gz[v_id-1] += fixed_term_z
'''
mat_G = np.hstack([mat_Gx,mat_Gy,mat_Gz])
interpolated_vertices = -1* (mat_H_inverse @ mat_G)
interpolated_translation = (1-t)*src_mesh.vertices + t*tgt_mesh.vertices #np.expand_dims(interpolated_fixed_vertex[0] - src_mesh.vertices[fixed_vertex_id],0) #t*tgt_mesh.vertices[fixed_vertex_id] + (1-t)* src_mesh.vertices[fixed_vertex_id]
#interpolated_translation = t*(tgt_mesh.vertices[fixed_vertex_id+1:,:] - src_mesh.vertices[fixed_vertex_id+1:,:])
interpolated_vertices += interpolated_translation
#interpolated_vertices = np.vstack([interpolated_fixed_vertex,other_interpolated_vertices])
interpolated_mesh = Mesh(vertices=interpolated_vertices,faces=src_mesh.faces)
interpolated_meshes.append(interpolated_mesh)
else:
logging.error("Given fragment resolution method unknown")
return interpolated_meshes
|
py | 1a35f882641f9873abe9e82ba4acb3eff01d42be | import abc
import itertools
from dataclasses import dataclass, field
from typing import (
Any, ClassVar, Dict, Tuple, Iterable, Optional, List, Callable,
)
from dbt.exceptions import InternalException
from dbt.utils import translate_aliases
from dbt.logger import GLOBAL_LOGGER as logger
from typing_extensions import Protocol
from dbt.dataclass_schema import (
dbtClassMixin, StrEnum, ExtensibleDbtClassMixin,
ValidatedStringMixin, register_pattern
)
from dbt.contracts.util import Replaceable
class Identifier(ValidatedStringMixin):
ValidationRegex = r'^[A-Za-z_][A-Za-z0-9_]+$'
# we need register_pattern for jsonschema validation
register_pattern(Identifier, r'^[A-Za-z_][A-Za-z0-9_]+$')
@dataclass
class AdapterResponse(dbtClassMixin):
_message: str
code: Optional[str] = None
rows_affected: Optional[int] = None
def __str__(self):
return self._message
class ConnectionState(StrEnum):
INIT = 'init'
OPEN = 'open'
CLOSED = 'closed'
FAIL = 'fail'
@dataclass(init=False)
class Connection(ExtensibleDbtClassMixin, Replaceable):
type: Identifier
name: Optional[str] = None
state: ConnectionState = ConnectionState.INIT
transaction_open: bool = False
_handle: Optional[Any] = None
_credentials: Optional[Any] = None
def __init__(
self,
type: Identifier,
name: Optional[str],
credentials: dbtClassMixin,
state: ConnectionState = ConnectionState.INIT,
transaction_open: bool = False,
handle: Optional[Any] = None,
) -> None:
self.type = type
self.name = name
self.state = state
self.credentials = credentials
self.transaction_open = transaction_open
self.handle = handle
@property
def credentials(self):
return self._credentials
@credentials.setter
def credentials(self, value):
self._credentials = value
@property
def handle(self):
if isinstance(self._handle, LazyHandle):
try:
# this will actually change 'self._handle'.
self._handle.resolve(self)
except RecursionError as exc:
raise InternalException(
"A connection's open() method attempted to read the "
"handle value"
) from exc
return self._handle
@handle.setter
def handle(self, value):
self._handle = value
class LazyHandle:
"""Opener must be a callable that takes a Connection object and opens the
connection, updating the handle on the Connection.
"""
def __init__(self, opener: Callable[[Connection], Connection]):
self.opener = opener
def resolve(self, connection: Connection) -> Connection:
logger.debug(
'Opening a new connection, currently in state {}'
.format(connection.state)
)
return self.opener(connection)
# see https://github.com/python/mypy/issues/4717#issuecomment-373932080
# and https://github.com/python/mypy/issues/5374
# for why we have type: ignore. Maybe someday dataclasses + abstract classes
# will work.
@dataclass # type: ignore
class Credentials(
ExtensibleDbtClassMixin,
Replaceable,
metaclass=abc.ABCMeta
):
database: str
schema: str
_ALIASES: ClassVar[Dict[str, str]] = field(default={}, init=False)
@abc.abstractproperty
def type(self) -> str:
raise NotImplementedError(
'type not implemented for base credentials class'
)
def connection_info(
self, *, with_aliases: bool = False
) -> Iterable[Tuple[str, Any]]:
"""Return an ordered iterator of key/value pairs for pretty-printing.
"""
as_dict = self.to_dict(omit_none=False)
connection_keys = set(self._connection_keys())
aliases: List[str] = []
if with_aliases:
aliases = [
k for k, v in self._ALIASES.items() if v in connection_keys
]
for key in itertools.chain(self._connection_keys(), aliases):
if key in as_dict:
yield key, as_dict[key]
@abc.abstractmethod
def _connection_keys(self) -> Tuple[str, ...]:
raise NotImplementedError
@classmethod
def __pre_deserialize__(cls, data):
data = super().__pre_deserialize__(data)
data = cls.translate_aliases(data)
return data
@classmethod
def translate_aliases(
cls, kwargs: Dict[str, Any], recurse: bool = False
) -> Dict[str, Any]:
return translate_aliases(kwargs, cls._ALIASES, recurse)
def __post_serialize__(self, dct):
# no super() -- do we need it?
if self._ALIASES:
dct.update({
new_name: dct[canonical_name]
for new_name, canonical_name in self._ALIASES.items()
if canonical_name in dct
})
return dct
class UserConfigContract(Protocol):
send_anonymous_usage_stats: bool
use_colors: Optional[bool] = None
partial_parse: Optional[bool] = None
printer_width: Optional[int] = None
def set_values(self, cookie_dir: str) -> None:
...
class HasCredentials(Protocol):
credentials: Credentials
profile_name: str
config: UserConfigContract
target_name: str
threads: int
def to_target_dict(self):
raise NotImplementedError('to_target_dict not implemented')
DEFAULT_QUERY_COMMENT = '''
{%- set comment_dict = {} -%}
{%- do comment_dict.update(
app='dbt',
dbt_version=dbt_version,
profile_name=target.get('profile_name'),
target_name=target.get('target_name'),
) -%}
{%- if node is not none -%}
{%- do comment_dict.update(
node_id=node.unique_id,
) -%}
{% else %}
{# in the node context, the connection name is the node_id #}
{%- do comment_dict.update(connection_name=connection_name) -%}
{%- endif -%}
{{ return(tojson(comment_dict)) }}
'''
@dataclass
class QueryComment(dbtClassMixin):
comment: str = DEFAULT_QUERY_COMMENT
append: bool = False
class AdapterRequiredConfig(HasCredentials, Protocol):
project_name: str
query_comment: QueryComment
cli_vars: Dict[str, Any]
target_path: str
|
py | 1a35f9821768f654928844faa49d050c7597a7cf | #!/usr/bin/env python3
import torch
from enum import Enum
from inspect import signature
from .approximation_methods import SUPPORTED_METHODS
class ExpansionTypes(Enum):
repeat = 1
repeat_interleave = 2
def safe_div(denom, quotient, default_value=None):
r"""
A simple utility function to perform `denom / quotient`
if the statement is undefined => result will be `default_value`
"""
return denom / quotient if quotient != 0.0 else default_value
def _validate_target(num_samples, target):
if isinstance(target, list) or (
isinstance(target, torch.Tensor) and torch.numel(target) > 1
):
assert num_samples == len(target), (
"The number of samples provied in the"
"input {} does not match with the number of targets. {}".format(
num_samples, len(target)
)
)
def _validate_input(
inputs,
baselines,
n_steps=50,
method="riemann_trapezoid",
draw_baseline_from_distrib=False,
):
assert len(inputs) == len(baselines), (
"Input and baseline must have the same "
"dimensions, baseline has {} features whereas input has {}.".format(
len(baselines), len(inputs)
)
)
for input, baseline in zip(inputs, baselines):
if draw_baseline_from_distrib:
assert (
isinstance(baseline, (int, float))
or input.shape[1:] == baseline.shape[1:]
), (
"The samples in input and baseline batches must have"
" the same shape or the baseline corresponding to the"
" input tensor must be a scalar."
" Found baseline: {} and input: {} ".format(baseline, input)
)
else:
assert (
isinstance(baseline, (int, float))
or input.shape == baseline.shape
or baseline.shape[0] == 1
), (
"Baseline can be provided as a tensor for just one input and"
" broadcasted to the batch or input and baseline must have the"
" same shape or the baseline corresponding to each input tensor"
" must be a scalar. Found baseline: {} and input: {}".format(
baseline, input
)
)
assert (
n_steps >= 0
), "The number of steps must be a positive integer. " "Given: {}".format(n_steps)
assert method in SUPPORTED_METHODS, (
"Approximation method must be one for the following {}. "
"Given {}".format(SUPPORTED_METHODS, method)
)
def _validate_noise_tunnel_type(nt_type, supported_noise_tunnel_types):
assert nt_type in supported_noise_tunnel_types, (
"Noise types must be either `smoothgrad`, `smoothgrad_sq` or `vargrad`. "
"Given {}".format(nt_type)
)
def _format_tensor_into_tuples(inputs):
if not isinstance(inputs, tuple):
assert isinstance(
inputs, torch.Tensor
), "`inputs` must have type " "torch.Tensor but {} found: ".format(type(inputs))
inputs = (inputs,)
return inputs
def _format_input(inputs):
return _format_tensor_into_tuples(inputs)
def _format_additional_forward_args(additional_forward_args):
if additional_forward_args is not None and not isinstance(
additional_forward_args, tuple
):
additional_forward_args = (additional_forward_args,)
return additional_forward_args
def _format_baseline(baselines, inputs):
if baselines is None:
return _zeros(inputs)
if not isinstance(baselines, tuple):
baselines = (baselines,)
for baseline in baselines:
assert isinstance(
baseline, (torch.Tensor, int, float)
), "baseline input argument must be either a torch.Tensor or a number \
however {} detected".format(
type(baseline)
)
return baselines
def _format_input_baseline(inputs, baselines):
inputs = _format_input(inputs)
baselines = _format_baseline(baselines, inputs)
return inputs, baselines
# This function can potentially be merged with the `format_baseline` function
# however, since currently not all algorithms support baselines of type
# callable this will be kept in a separate function.
def _format_callable_baseline(baselines, inputs):
if callable(baselines):
# Note: this assumes that if baselines is a function and if it takes
# arguments, then the first argument is the `inputs`.
# This can be expanded in the future with better type checks
baseline_parameters = signature(baselines).parameters
if len(baseline_parameters) == 0:
baselines = baselines()
else:
baselines = baselines(inputs)
return _format_baseline(baselines, inputs)
def _format_attributions(is_inputs_tuple, attributions):
r"""
In case input is a tensor and the attributions is returned in form of a
tensor we take the first element of the attributions' tuple to match the
same shape signatues of the inputs
"""
assert isinstance(attributions, tuple), "Attributions must be in shape of a tuple"
assert is_inputs_tuple or len(attributions) == 1, (
"The input is a single tensor however the attributions aren't."
"The number of attributed tensors is: {}".format(len(attributions))
)
return attributions if is_inputs_tuple else attributions[0]
def _zeros(inputs):
r"""
Takes a tuple of tensors as input and returns a tuple that has the same
size as the `inputs` which contains zero tensors of the same
shape as the `inputs`
"""
return tuple(0.0 for input in inputs)
def _tensorize_baseline(inputs, baselines):
def _tensorize_single_baseline(baseline, input):
if isinstance(baseline, (int, float)):
return torch.full_like(input, baseline)
if input.shape[0] > baseline.shape[0] and baseline.shape[0] == 1:
return torch.cat([baseline] * input.shape[0])
return baseline
assert isinstance(inputs, tuple) and isinstance(baselines, tuple), (
"inputs and baselines must"
"have tuple type but found baselines: {} and inputs: {}".format(
type(baselines), type(inputs)
)
)
return tuple(
_tensorize_single_baseline(baseline, input)
for baseline, input in zip(baselines, inputs)
)
def _reshape_and_sum(tensor_input, num_steps, num_examples, layer_size):
# Used for attribution methods which perform integration
# Sums across integration steps by reshaping tensor to
# (num_steps, num_examples, (layer_size)) and summing over
# dimension 0. Returns a tensor of size (num_examples, (layer_size))
return torch.sum(
tensor_input.reshape((num_steps, num_examples) + layer_size), dim=0
)
def _verify_select_column(output, target):
target = (target,) if isinstance(target, int) else target
assert (
len(target) <= len(output.shape) - 1
), "Cannot choose target column with output shape %r." % (output.shape,)
return output[(slice(None), *target)]
def _select_targets(output, target):
num_examples = output.shape[0]
dims = len(output.shape)
if target is None:
return output
elif isinstance(target, int) or isinstance(target, tuple):
return _verify_select_column(output, target)
elif isinstance(target, torch.Tensor):
if torch.numel(target) == 1 and isinstance(target.item(), int):
return _verify_select_column(output, target.item())
elif len(target.shape) == 1 and torch.numel(target) == num_examples:
assert dims == 2, "Output must be 2D to select tensor of targets."
return torch.gather(output, 1, target.reshape(len(output), 1))
else:
raise AssertionError(
"Tensor target dimension %r is not valid." % (target.shape,)
)
elif isinstance(target, list):
assert len(target) == num_examples, "Target list length does not match output!"
if type(target[0]) is int:
assert dims == 2, "Output must be 2D to select tensor of targets."
return torch.gather(output, 1, torch.tensor(target).reshape(len(output), 1))
elif type(target[0]) is tuple:
return torch.stack(
[output[(i,) + targ_elem] for i, targ_elem in enumerate(target)]
)
else:
raise AssertionError("Target element type in list is not valid.")
else:
raise AssertionError("Target type %r is not valid." % target)
def _run_forward(forward_func, inputs, target=None, additional_forward_args=None):
# make everything a tuple so that it is easy to unpack without
# using if-statements
inputs = _format_input(inputs)
additional_forward_args = _format_additional_forward_args(additional_forward_args)
output = forward_func(
*(*inputs, *additional_forward_args)
if additional_forward_args is not None
else inputs
)
return _select_targets(output, target)
def _expand_additional_forward_args(
additional_forward_args, n_steps, expansion_type=ExpansionTypes.repeat
):
def _expand_tensor_forward_arg(
additional_forward_arg, n_steps, expansion_type=ExpansionTypes.repeat
):
if len(additional_forward_arg.size()) == 0:
return additional_forward_arg
if expansion_type == ExpansionTypes.repeat:
return torch.cat([additional_forward_arg] * n_steps, dim=0)
elif expansion_type == ExpansionTypes.repeat_interleave:
return additional_forward_arg.repeat_interleave(n_steps, dim=0)
else:
raise NotImplementedError(
"Currently only `repeat` and `repeat_interleave`"
" expansion_types are supported"
)
return tuple(
_expand_tensor_forward_arg(additional_forward_arg, n_steps, expansion_type)
if isinstance(additional_forward_arg, torch.Tensor)
else additional_forward_arg
for additional_forward_arg in additional_forward_args
)
def _expand_target(target, n_steps, expansion_type=ExpansionTypes.repeat):
if isinstance(target, list):
if expansion_type == ExpansionTypes.repeat:
return target * n_steps
elif expansion_type == ExpansionTypes.repeat_interleave:
expanded_target = []
for i in target:
expanded_target.extend([i] * n_steps)
return expanded_target
else:
raise NotImplementedError(
"Currently only `repeat` and `repeat_interleave`"
" expansion_types are supported"
)
elif isinstance(target, torch.Tensor) and torch.numel(target) > 1:
if expansion_type == ExpansionTypes.repeat:
return torch.cat([target] * n_steps, dim=0)
elif expansion_type == ExpansionTypes.repeat_interleave:
return target.repeat_interleave(n_steps, dim=0)
else:
raise NotImplementedError(
"Currently only `repeat` and `repeat_interleave`"
" expansion_types are supported"
)
return target
def _call_custom_attribution_func(
custom_attribution_func, multipliers, inputs, baselines
):
assert callable(custom_attribution_func), (
"`custom_attribution_func`"
" must be a callable function but {} provided".format(
type(custom_attribution_func)
)
)
custom_attr_func_params = signature(custom_attribution_func).parameters
assert len(custom_attr_func_params) in range(1, 4), (
"`custom_attribution_func`" " must take at least one and at most 3 arguments"
)
if len(custom_attr_func_params) == 1:
return custom_attribution_func(multipliers)
elif len(custom_attr_func_params) == 2:
return custom_attribution_func(multipliers, inputs)
elif len(custom_attr_func_params) == 3:
return custom_attribution_func(multipliers, inputs, baselines)
class MaxList:
"""Keep track of N maximal items
Implementation of MaxList:
for keeping track of the N top values of a large collection of items.
Maintains a sorted list of the top N items that can be fetched with
getlist().
Example use:
m = MaxList(2, key=lamda x: len(x))
ml.add("Hello World")
ml.add("Mermaid Man!!!!")
ml.add("Why?")
ml.getlist() -> ["Mermaid Man!!!!", "Hello World"]
If storing values that are not comparable, please provide a key function that
that maps the values to some numeric value.
"""
def __init__(self, size, key=lambda x: x):
self.size = size
self.key = key
self.list = []
def add(self, item):
"""Add an element to the MaxList
Args:
item: the item that you want to add to the MaxList
"""
value = self.key(item)
if len(self.list) < self.size:
if len(self.list) == 0:
self.list.append((value, item))
elif self.list[-1][0] >= value:
self.list.append((value, item))
else:
self._insert(item, value)
if self.list[-1][0] < value:
self._insert(item, value)
def get_list(self):
"""Retrive the list of N maximal items in sorted order
Returns:
list: the sorted list of maximal items
"""
return [item[1] for item in self.list]
def _insert(self, item, value):
if len(self.list) == 0:
self.list.append((value, item))
for i in range(len(self.list)):
if self.list[i][0] < value:
self.list.insert(i, (value, item))
break
self.list = self.list[: self.size]
class Stat:
"""Keep track of statistics for a quantity that is measured live
Implementation of an online statistics tracker, Stat:
For a memory efficient way of keeping track of statistics on a large set of
numbers. Adding numbers to the object will update the values stored in the
object to reflect the statistics of all numbers that the object has seen
so far.
Example usage:
s = Stat()
s([5,7]) OR s.update([5,7])
stats.get_mean() -> 6
stats.get_std() -> 1
"""
def __init__(self):
self.count = 0
self.mean = 0
self.mean_squared_error = 0
self.min = float("inf")
self.max = float("-inf")
def _std_size_check(self):
if self.count < 2:
raise Exception(
"Std/Variance is not defined for {} datapoints\
".format(
self.count
)
)
def update(self, x):
"""Update the stats given a new number
Adds x to the running statistics being kept track of, and updates internal
values that relfect that change.
Args:
x: a numeric value, or a list of numeric values
"""
if isinstance(x, list):
for value in x:
self.update(value)
else:
x = float(x)
self.min = min(self.min, x)
self.max = max(self.max, x)
self.count += 1
delta = x - self.mean
self.mean += delta / self.count
delta2 = x - self.mean
self.mean_squared_error += delta * delta2
def get_stats(self):
"""Retrieves a dictionary of statistics for the values seen.
Returns:
a fully populated dictionary for the statistics that have been
maintained. This output is easy to pipe into a table with a loop over
key value pairs.
"""
self._std_size_check()
sampleVariance = self.mean_squared_error / (self.count - 1)
Variance = self.mean_squared_error / self.count
return {
"mean": self.mean,
"sample_variance": sampleVariance,
"variance": Variance,
"std": Variance ** 0.5,
"min": self.min,
"max": self.max,
"count": self.count,
}
def get_std(self):
"""get the std of the statistics kept"""
self._std_size_check()
return (self.mean_squared_error / self.count) ** 0.5
def get_variance(self):
"""get the variance of the statistics kept"""
self._std_size_check()
return self.mean_squared_error / self.count
def get_sample_variance(self):
"""get the sample variance of the statistics kept"""
self._std_size_check()
return self.mean_squared_error / (self.count - 1)
def get_mean(self):
"""get the mean of the statistics kept"""
return self.mean
def get_max(self):
"""get the max of the statistics kept"""
return self.max
def get_min(self):
"""get the min of the statistics kept"""
return self.min
def get_count(self):
"""get the count of the statistics kept"""
return self.count
|
py | 1a35f9e7bff216e478bea66a6d8003613fa41882 | import unittest
import matplotlib
import pkmodel as pk
class SolutionTest(unittest.TestCase):
"""
Tests the :class:`Solution` class.
"""
def test_create(self):
"""
Tests Solution creation.
"""
protocol = pk.Protocol("test", 3, 1.1, 2.2, 3.3, 4.4, 5.5, 6, 7, 8, False)
models = [pk.ThreeCompartmentModel(protocol)]
solution = pk.Solution(models, True)
self.assertEqual(solution.models[0], models[0])
def test_graph(self):
"""
Tests Solution graph method.
"""
models = [
pk.TwoCompartmentModel(pk.Protocol("test", 2, 1.1, 2.2, 3.3, 4.4, 5.5, 0, 7, 8, False)),
pk.TwoCompartmentModel(pk.Protocol("test", 2, 1.1, 2.2, 3.3, 4.4, 5.5, 6, 0, 8, False)),
pk.TwoCompartmentModel(pk.Protocol("test", 2, 1.1, 2.2, 3.3, 4.4, 5.5, 6, 7, 8, False)),
pk.ThreeCompartmentModel(pk.Protocol("test", 3, 1.1, 2.2, 3.3, 4.4, 5.5, 0, 7, 8, False)),
pk.ThreeCompartmentModel(pk.Protocol("test", 3, 1.1, 2.2, 3.3, 4.4, 5.5, 6, 0, 8, False)),
pk.ThreeCompartmentModel(pk.Protocol("test", 3, 1.1, 2.2, 3.3, 4.4, 5.5, 6, 7, 8, False)),
]
matplotlib.use("Agg")
solution = pk.Solution(models, False)
solution.graph()
|
py | 1a35fa2664e9866212a65ce7e571d6f77a6c4e41 | """
war
War card game written for fun while following the 'Complete Python Developer Certification Course' by Imtiaz Ahmad, on Udemy.
"""
import sys
from setuptools import setup, find_packages
import versioneer
short_description = __doc__.split("\n")
# from https://github.com/pytest-dev/pytest-runner#conditional-requirement
needs_pytest = {'pytest', 'test', 'ptr'}.intersection(sys.argv)
pytest_runner = ['pytest-runner'] if needs_pytest else []
try:
with open("README.md", "r") as handle:
long_description = handle.read()
except:
long_description = "\n".join(short_description[2:])
setup(
# Self-descriptive entries which should always be present
name='war',
author='Dumitru-Claudiu Sergentu',
author_email='[email protected]',
description=short_description[0],
long_description=long_description,
long_description_content_type="text/markdown",
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
license='BSD-3-Clause',
# Which Python importable modules should be included when your package is installed
# Handled automatically by setuptools. Use 'exclude' to prevent some specific
# subpackage(s) from being added, if needed
packages=find_packages(),
# Optional include package data to ship with your package
# Customize MANIFEST.in if the general case does not suit your needs
# Comment out this line to prevent the files from being packaged with your software
include_package_data=True,
# Allows `setup.py test` to work correctly with pytest
setup_requires=[] + pytest_runner,
# Additional entries you may want simply uncomment the lines you want and fill in the data
# url='http://www.my_package.com', # Website
# install_requires=[], # Required packages, pulls from pip if needed; do not use for Conda deployment
# platforms=['Linux',
# 'Mac OS-X',
# 'Unix',
# 'Windows'], # Valid platforms your code works on, adjust to your flavor
# python_requires=">=3.5", # Python version restrictions
# Manual control if final package is compressible or not, set False to prevent the .egg from being made
# zip_safe=False,
)
|
py | 1a35fa60f89f8fa15caaba545cc937d187aebabe | __author__ = 'haukurk'
class DefaultConfig(object):
"""
Default Config (Is used when RESTAPICONFIG environment variable is not set)
"""
APP_NAME = 'rest-api'
DEBUG = False
LOG_LEVEL = 'WARNING'
LOG_DIR = 'logs/'
SQLALCHEMY_DATABASE_URI = "sqlite:///database/api.db"
SECRET_KEY = "Ch4ng3M3!"
class Development(DefaultConfig):
"""
Config class for development.
"""
DEBUG = True
LOG_LEVEL = 'INFO'
SQLALCHEMY_DATABASE_URI = "sqlite:///database/api_test.db"
class UnitTesting(DefaultConfig):
"""
Config class for unittests
"""
DEBUG = True
LOG_LEVEL = 'INFO'
SQLALCHEMY_DATABASE_URI = "sqlite:///database/api_unittest.db" |
py | 1a35fab30e2e5c6c0ac56fb7a375239646d8bded | from dissononce.processing.handshakepatterns.handshakepattern import HandshakePattern
class IK1HandshakePattern(HandshakePattern):
def __init__(self, ):
super(IK1HandshakePattern, self).__init__(
'IK1',
responder_pre_message_pattern=('s',),
message_patterns=(
('e', 's'),
('e', 'ee', 'se', 'es')
)
)
|
py | 1a35fae3221c0d900b48fdb814556ab1dc354213 | import unittest
from dockermatrix import *
import semver
class ImageTest(unittest.TestCase):
def setUp(self):
self.image = Image({'1', '1.0', '1.0.0'}, 'images/1.0')
def test_has_tags(self):
self.assertSetEqual({'1', '1.0', '1.0.0'}, self.image.tags)
def test_has_path(self):
self.assertEqual('images/1.0', self.image.path)
class ImageBuildTest(unittest.TestCase):
def setUp(self):
self.build = ImageBuild(semver.VersionInfo(1, 0, 0, None, None), ('option', None, 'other_option'))
def test_has_version(self):
self.assertTupleEqual(semver.VersionInfo(1, 0, 0, None, None), self.build.version)
def test_has_options(self):
self.assertTupleEqual(('option', None, 'other_option'), self.build.options)
def test_returns_formatted_version(self):
self.assertEqual('1.0.0', self.build.get_formatted_version())
def test_filters_options(self):
self.assertListEqual(['option', 'other_option'], self.build.filter_options())
class BuildMatrixTest(unittest.TestCase):
def setUp(self):
builds = {
ImageBuild(semver.VersionInfo(1, 0, 0, None, None), ('option',)),
ImageBuild(semver.VersionInfo(1, 0, 0, None, None), (None,)),
}
self.matrix = BuildMatrix(builds)
def test_has_the_latest_versions(self):
self.assertDictEqual(
{'1.0.0': {'1', '1.0'}},
self.matrix.latest
)
def test_detects_the_latest_versions(self):
builds = {
ImageBuild(semver.VersionInfo(1, 0, 0, None, None), ()),
ImageBuild(semver.VersionInfo(1, 0, 1, None, None), ()),
ImageBuild(semver.VersionInfo(1, 1, 0, None, None), ()),
ImageBuild(semver.VersionInfo(2, 0, 0, None, None), ()),
}
self.matrix = BuildMatrix(builds)
self.assertDictEqual(
{'1.0.1': {'1.0'}, '1.1.0': {'1', '1.1'}, '2.0.0': {'2', '2.0'}},
self.matrix.latest
)
def test_builds_matrix(self):
images = list(self.matrix.build('dist'))
self.assertIsInstance(images[0][0], ImageBuild)
self.assertIsInstance(images[0][1], Image)
self.assertIsInstance(images[1][0], ImageBuild)
self.assertIsInstance(images[1][1], Image)
|
py | 1a35fbbb3343fbf188fb5fb9b34423ea8d54dcff | import matplotlib.pyplot as plt
from .artists import kdeplot_op, kde2plot_op
def kdeplot(data, ax=None):
if ax is None:
_, ax = plt.subplots(1, 1, squeeze=True)
kdeplot_op(ax, data)
return ax
def kde2plot(x, y, grid=200, ax=None, **kwargs):
if ax is None:
_, ax = plt.subplots(1, 1, squeeze=True)
kde2plot_op(ax, x, y, grid, **kwargs)
return ax
|
py | 1a35fbbe15ef360f7669c3b550c7aa8cd377c8df | # coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tokenization classes for python and fast tokenizers. Fast tokenizers are provided by HuggingFace's tokenizers library."""
import copy
import functools
import itertools
import json
import logging
import operator
import os
import re
import warnings
from collections import UserDict, defaultdict
from contextlib import contextmanager
from enum import Enum
from typing import Any, Dict, List, MutableMapping, NamedTuple, Optional, Sequence, Tuple, Union
import numpy as np
from tokenizers import AddedToken as AddedTokenFast
from tokenizers import Encoding as EncodingFast
from tokenizers.decoders import Decoder as DecoderFast
from tokenizers.implementations import BaseTokenizer as BaseTokenizerFast
from .file_utils import cached_path, hf_bucket_url, is_remote_url, is_tf_available, is_torch_available, torch_required
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
logger = logging.getLogger(__name__)
NO_PAD_TOKEN_FOR_BATCH_MSG = (
"No padding token is set for this model, therefore no batch can be made with uneven "
"sequences. Set a padding token or adjust the lengths of the sequences building the "
"batch so that every sequence is of the same length."
)
UNEVEN_SEQUENCES_FOR_BATCH_MSG = (
"The sequences building the batch are not of the same size, no tensor "
"can be built. Set `pad_to_max_length=True` to pad the smaller sequences"
"up to the larger sequence's length."
)
SPECIAL_TOKENS_MAP_FILE = "special_tokens_map.json"
ADDED_TOKENS_FILE = "added_tokens.json"
TOKENIZER_CONFIG_FILE = "tokenizer_config.json"
VERY_LARGE_INTEGER = int(1e30) # This is used to set the max input length for a model with infinite size input
LARGE_INTEGER = int(1e20) # This is used when we need something big but slightly smaller than VERY_LARGE_INTEGER
# Define type aliases and NamedTuples
TextInput = str
PreTokenizedInput = List[str]
EncodedInput = List[int]
TextInputPair = Tuple[str, str]
PreTokenizedInputPair = Tuple[List[str], List[str]]
EncodedInputPair = Tuple[List[int], List[int]]
class TensorType(Enum):
PYTORCH = "pt"
TENSORFLOW = "tf"
NUMPY = "np"
class CharSpan(NamedTuple):
""" Character span in the original string
Args:
start: index of the first character in the original string
end: index of the character following the last character in the original string
"""
start: int
end: int
class TokenSpan(NamedTuple):
""" Token span in an encoded string (list of tokens)
Args:
start: index of the first token in the span
end: index of the token following the last token in the span
"""
start: int
end: int
def flatten(x: Sequence):
"""
Flatten the provided (potentially nested) sequence
Args:
x (Sequence): Potentially nested sequence to flatten
Returns:
list: Flattened sequence
"""
return functools.reduce(operator.iconcat, x, [])
@contextmanager
def truncate_and_pad(
tokenizer: BaseTokenizerFast,
max_length: int,
stride: int,
strategy: str,
pad_to_max_length: bool,
padding_side: str,
pad_token_id: int,
pad_token_type_id: int,
pad_token: str,
):
""" This contextmanager is in charge of defining the truncation and the padding strategies for fast tokenizers
(provided by HuggingFace tokenizers library) and restore the tokenizer settings afterwards.
This contextmanager assumes the provider tokenizer has no padding / truncation strategy
before the managed section. If your tokenizer set a padding / truncation strategy before,
then it will be reset to no padding/truncation when exiting the managed section.
Args:
tokenizer (BaseTokenizerFast): The tokenizer which will be used
max_length (int): The maximum size of the sequence
stride (int): The stride to use when handling overflow
strategy (str): Overflowing logic to use
pad_to_max_length (bool): Boolean indicating if the output needs to be padded up to max_length
padding_side (str): "left" or "right" indicating the direction the output sequence will be padded
pad_token_id (int): The integer representation of the padding token to use
pad_token_type_id (int): The integer representation of the padding token type to use
pad_token (str): The string representation of the padding token to use
"""
# Handle all the truncation and padding stuff
if max_length is not None:
tokenizer.enable_truncation(max_length, stride=stride, strategy=strategy)
if pad_to_max_length and (pad_token and pad_token_id >= 0):
tokenizer.enable_padding(
max_length=max_length,
direction=padding_side,
pad_id=pad_token_id,
pad_type_id=pad_token_type_id,
pad_token=pad_token,
)
elif pad_to_max_length:
logger.warning(
"Disabled padding because no padding token set (pad_token: {}, pad_token_id: {}).\n"
"To remove this error, you can add a new pad token and then resize model embedding:\n"
"\ttokenizer.pad_token = '<PAD>'\n\tmodel.resize_token_embeddings(len(tokenizer))".format(
pad_token, pad_token_id
)
)
yield
# TODO(morgan, anthony): once we have a simple way to serialize tokenizers maybe store and restore the state afterward
# to avoid destructing the padding / truncation strategy as we do now.
if max_length is not None:
tokenizer.no_truncation()
if pad_to_max_length and (pad_token and pad_token_id >= 0):
tokenizer.no_padding()
def convert_to_tensors(
batch_outputs: MutableMapping, return_tensors: Union[str, TensorType], prepend_batch_axis: bool = False
) -> MutableMapping:
# Convert to TensorType
if not isinstance(return_tensors, TensorType):
return_tensors = TensorType(return_tensors)
# Get a function reference for the correct framework
if return_tensors == TensorType.TENSORFLOW and is_tf_available():
as_tensor = tf.constant
elif return_tensors == TensorType.PYTORCH and is_torch_available():
as_tensor = torch.tensor
elif return_tensors == TensorType.NUMPY:
as_tensor = np.asarray
else:
raise ImportError(
"Unable to convert output to tensors format {}, PyTorch or TensorFlow is not available.".format(
return_tensors
)
)
# Do the tensor conversion in batch
for key, value in batch_outputs.items():
try:
if prepend_batch_axis:
value = [value]
tensor = as_tensor(value)
# at-least2d
if tensor.ndim > 2:
tensor = tensor.squeeze(0)
elif tensor.ndim < 2:
tensor = tensor[None, :]
batch_outputs[key] = tensor
except ValueError:
if None in [item for sequence in value for item in sequence]:
raise ValueError(NO_PAD_TOKEN_FOR_BATCH_MSG)
else:
raise ValueError(UNEVEN_SEQUENCES_FOR_BATCH_MSG)
return batch_outputs
class BatchEncoding(UserDict):
""" BatchEncoding hold the output of the encode and batch_encode methods (tokens, attention_masks, etc).
This class is derived from a python Dictionary and can be used as a dictionnary.
In addition, this class expose utility methods to map from word/char space to token space.
Args:
data (:obj:`dict`): Dictionary of lists/arrays returned by the encode/batch_encode methods ('input_ids', 'attention_mask'...)
encoding (:obj:`EncodingFast`, :obj:`list(EncodingFast)`, `optional`, defaults to :obj:`None`):
If the tokenizer is a fast tokenizer which outputs additional informations like mapping from word/char space to token space
the `EncodingFast` instance or list of instance (for batches) hold these informations.
"""
def __init__(
self,
data: Optional[Dict[str, Any]] = None,
encoding: Optional[Union[EncodingFast, Sequence[EncodingFast]]] = None,
):
super().__init__(data)
if isinstance(encoding, EncodingFast):
encoding = [encoding]
self._encodings = encoding
def __getitem__(self, item: Union[int, str]) -> EncodingFast:
""" If the key is a string, get the value of the dict associated to `key` ('input_ids', 'attention_mask'...)
If the key is an integer, get the EncodingFast for batch item with index `key`
"""
if isinstance(item, str):
return self.data[item]
elif self._encodings is not None:
return self._encodings[item]
else:
raise KeyError(
"Indexing with integers (to access backend Encoding for a given batch index) "
"is not available when using Python based tokenizers"
)
def __getattr__(self, item: str):
try:
return self.data[item]
except KeyError:
raise AttributeError
def keys(self):
return self.data.keys()
def values(self):
return self.data.values()
def items(self):
return self.data.items()
# After this point:
# Extended properties and methods only available for fast (Rust-based) tokenizers
# provided by HuggingFace tokenizers library.
@property
def encodings(self) -> Optional[List[EncodingFast]]:
"""
Return the list all encoding from the tokenization process
Returns: List[EncodingFast] or None if input was tokenized through Python (i.e. not fast) tokenizer
"""
return self._encodings
def tokens(self, batch_index: int = 0) -> List[int]:
if not self._encodings:
raise ValueError("tokens() is not available when using Python based tokenizers")
return self._encodings[batch_index].tokens
def words(self, batch_index: int = 0) -> List[Optional[int]]:
if not self._encodings:
raise ValueError("words() is not available when using Python based tokenizers")
return self._encodings[batch_index].words
def token_to_word(self, batch_or_token_index: int, token_index: Optional[int] = None) -> int:
""" Get the index of the word corresponding (i.e. comprising) to an encoded token
in a sequence of the batch.
Can be called as:
- self.token_to_word(token_index) if batch size is 1
- self.token_to_word(batch_index, token_index) if batch size is greater than 1
This method is particularly suited when the input sequences are provided as
pre-tokenized sequences (i.e. words are defined by the user). In this case it allows
to easily associate encoded tokens with provided tokenized words.
Args:
batch_or_token_index (:obj:`int`):
Index of the sequence in the batch. If the batch only comprise one sequence,
this can be the index of the token in the sequence
token_index (:obj:`int`, `optional`):
If a batch index is provided in `batch_or_token_index`, this can be the index
of the token in the sequence.
Returns:
word_index (:obj:`int`):
index of the word in the input sequence.
"""
if not self._encodings:
raise ValueError("token_to_word() is not available when using Python based tokenizers")
if token_index is not None:
batch_index = batch_or_token_index
else:
batch_index = 0
token_index = batch_or_token_index
if batch_index < 0:
batch_index = self._batch_size + batch_index
if token_index < 0:
token_index = self._seq_len + token_index
return self._encodings[batch_index].token_to_word(token_index)
def word_to_tokens(self, batch_or_word_index: int, word_index: Optional[int] = None) -> TokenSpan:
""" Get the encoded token span corresponding to a word in the sequence of the batch.
Token spans are returned as a TokenSpan NamedTuple with:
start: index of the first token
end: index of the token following the last token
Can be called as:
- self.word_to_tokens(word_index) if batch size is 1
- self.word_to_tokens(batch_index, word_index) if batch size is greater or equal to 1
This method is particularly suited when the input sequences are provided as
pre-tokenized sequences (i.e. words are defined by the user). In this case it allows
to easily associate encoded tokens with provided tokenized words.
Args:
batch_or_word_index (:obj:`int`):
Index of the sequence in the batch. If the batch only comprises one sequence,
this can be the index of the word in the sequence
word_index (:obj:`int`, `optional`):
If a batch index is provided in `batch_or_token_index`, this can be the index
of the word in the sequence.
Returns:
token_span (:obj:`TokenSpan`):
Span of tokens in the encoded sequence.
TokenSpan are NamedTuple with:
start: index of the first token
end: index of the token following the last token
"""
if not self._encodings:
raise ValueError("word_to_tokens() is not available when using Python based tokenizers")
if word_index is not None:
batch_index = batch_or_word_index
else:
batch_index = 0
word_index = batch_or_word_index
if batch_index < 0:
batch_index = self._batch_size + batch_index
if word_index < 0:
word_index = self._seq_len + word_index
return TokenSpan(*(self._encodings[batch_index].word_to_tokens(word_index)))
def token_to_chars(self, batch_or_token_index: int, token_index: Optional[int] = None) -> CharSpan:
""" Get the character span corresponding to an encoded token in a sequence of the batch.
Character spans are returned as a CharSpan NamedTuple with:
start: index of the first character in the original string associated to the token
end: index of the character following the last character in the original string associated to the token
Can be called as:
- self.token_to_chars(token_index) if batch size is 1
- self.token_to_chars(batch_index, token_index) if batch size is greater or equal to 1
Args:
batch_or_token_index (:obj:`int`):
Index of the sequence in the batch. If the batch only comprise one sequence,
this can be the index of the token in the sequence
token_index (:obj:`int`, `optional`):
If a batch index is provided in `batch_or_token_index`, this can be the index
of the token or tokens in the sequence.
Returns:
char_span (:obj:`CharSpan`):
Span of characters in the original string.
CharSpan are NamedTuple with:
start: index of the first character in the original string
end: index of the character following the last character in the original string
"""
if not self._encodings:
raise ValueError("token_to_chars() is not available when using Python based tokenizers")
if token_index is not None:
batch_index = batch_or_token_index
else:
batch_index = 0
token_index = batch_or_token_index
return CharSpan(*(self._encodings[batch_index].token_to_chars(token_index)))
def char_to_token(self, batch_or_char_index: int, char_index: Optional[int] = None) -> int:
""" Get the index of the token in the encoded output comprising a character
in the original string for a sequence of the batch.
Can be called as:
- self.char_to_token(char_index) if batch size is 1
- self.char_to_token(batch_index, char_index) if batch size is greater or equal to 1
This method is particularly suited when the input sequences are provided as
pre-tokenized sequences (i.e. words are defined by the user). In this case it allows
to easily associate encoded tokens with provided tokenized words.
Args:
batch_or_char_index (:obj:`int`):
Index of the sequence in the batch. If the batch only comprise one sequence,
this can be the index of the word in the sequence
char_index (:obj:`int`, `optional`):
If a batch index is provided in `batch_or_token_index`, this can be the index
of the word in the sequence.
Returns:
token_index (:obj:`int`):
Index of the token.
"""
if not self._encodings:
raise ValueError("char_to_token() is not available when using Python based tokenizers")
if char_index is not None:
batch_index = batch_or_char_index
else:
batch_index = 0
char_index = batch_or_char_index
return self._encodings[batch_index].char_to_token(char_index)
def word_to_chars(self, batch_or_word_index: int, word_index: Optional[int] = None) -> CharSpan:
""" Get the character span in the original string corresponding to given word in a sequence
of the batch.
Character spans are returned as a CharSpan NamedTuple with:
start: index of the first character in the original string
end: index of the character following the last character in the original string
Can be called as:
- self.word_to_chars(word_index) if batch size is 1
- self.word_to_chars(batch_index, word_index) if batch size is greater or equal to 1
Args:
batch_or_word_index (:obj:`int`):
Index of the sequence in the batch. If the batch only comprise one sequence,
this can be the index of the word in the sequence
word_index (:obj:`int`, `optional`):
If a batch index is provided in `batch_or_token_index`, this can be the index
of the word in the sequence.
Returns:
char_span (:obj:`CharSpan` or :obj:`List[CharSpan]`):
Span(s) of the associated character or characters in the string.
CharSpan are NamedTuple with:
start: index of the first character associated to the token in the original string
end: index of the character following the last character associated to the token in the original string
"""
if not self._encodings:
raise ValueError("word_to_chars() is not available when using Python based tokenizers")
if word_index is not None:
batch_index = batch_or_word_index
else:
batch_index = 0
word_index = batch_or_word_index
return CharSpan(*(self._encodings[batch_index].word_to_chars(word_index)))
def char_to_word(self, batch_or_char_index: int, char_index: Optional[int] = None) -> int:
""" Get the word in the original string corresponding to a character in the original string of
a sequence of the batch.
Can be called as:
- self.char_to_word(char_index) if batch size is 1
- self.char_to_word(batch_index, char_index) if batch size is greater than 1
This method is particularly suited when the input sequences are provided as
pre-tokenized sequences (i.e. words are defined by the user). In this case it allows
to easily associate encoded tokens with provided tokenized words.
Args:
batch_or_char_index (:obj:`int`):
Index of the sequence in the batch. If the batch only comprise one sequence,
this can be the index of the character in the orginal string.
char_index (:obj:`int`, `optional`):
If a batch index is provided in `batch_or_token_index`, this can be the index
of the character in the orginal string.
Returns:
token_index (:obj:`int` or :obj:`List[int]`):
Index or indices of the associated encoded token(s).
"""
if not self._encodings:
raise ValueError("char_to_word() is not available when using Python based tokenizers")
if char_index is not None:
batch_index = batch_or_char_index
else:
batch_index = 0
char_index = batch_or_char_index
return self._encodings[batch_index].char_to_word(char_index)
@torch_required
def to(self, device: str):
"""Send all values to device by calling v.to(device)"""
self.data = {k: v.to(device) for k, v in self.data.items()}
return self
class SpecialTokensMixin:
""" SpecialTokensMixin is derived by ``PreTrainedTokenizer`` and ``PreTrainedTokenizerFast`` and
handles specific behaviors related to special tokens. In particular, this class hold the
attributes which can be used to directly access to these special tokens in a
model-independant manner and allow to set and update the special tokens.
"""
SPECIAL_TOKENS_ATTRIBUTES = [
"bos_token",
"eos_token",
"unk_token",
"sep_token",
"pad_token",
"cls_token",
"mask_token",
"additional_special_tokens",
]
def __init__(self, **kwargs):
self._bos_token = None
self._eos_token = None
self._unk_token = None
self._sep_token = None
self._pad_token = None
self._cls_token = None
self._mask_token = None
self._pad_token_type_id = 0
self._additional_special_tokens = []
for key, value in kwargs.items():
if key in self.SPECIAL_TOKENS_ATTRIBUTES:
if key == "additional_special_tokens":
assert isinstance(value, (list, tuple)) and all(isinstance(t, str) for t in value)
setattr(self, key, value)
elif isinstance(value, AddedTokenFast):
setattr(self, key, str(value))
elif isinstance(value, str):
setattr(self, key, value)
else:
raise TypeError(
"special token {} has to be either str or AddedTokenFast but got: {}".format(key, type(value))
)
@property
def bos_token(self):
""" Beginning of sentence token (string). Log an error if used while not having been set. """
if self._bos_token is None:
logger.error("Using bos_token, but it is not set yet.")
return self._bos_token
@property
def eos_token(self):
""" End of sentence token (string). Log an error if used while not having been set. """
if self._eos_token is None:
logger.error("Using eos_token, but it is not set yet.")
return self._eos_token
@property
def unk_token(self):
""" Unknown token (string). Log an error if used while not having been set. """
if self._unk_token is None:
logger.error("Using unk_token, but it is not set yet.")
return self._unk_token
@property
def sep_token(self):
""" Separation token (string). E.g. separate context and query in an input sequence. Log an error if used while not having been set. """
if self._sep_token is None:
logger.error("Using sep_token, but it is not set yet.")
return self._sep_token
@property
def pad_token(self):
""" Padding token (string). Log an error if used while not having been set. """
if self._pad_token is None:
logger.error("Using pad_token, but it is not set yet.")
return self._pad_token
@property
def cls_token(self):
""" Classification token (string). E.g. to extract a summary of an input sequence leveraging self-attention along the full depth of the model. Log an error if used while not having been set. """
if self._cls_token is None:
logger.error("Using cls_token, but it is not set yet.")
return self._cls_token
@property
def mask_token(self):
""" Mask token (string). E.g. when training a model with masked-language modeling. Log an error if used while not having been set. """
if self._mask_token is None:
logger.error("Using mask_token, but it is not set yet.")
return self._mask_token
@property
def additional_special_tokens(self):
""" All the additional special tokens you may want to use (list of strings). Log an error if used while not having been set. """
if self._additional_special_tokens is None:
logger.error("Using additional_special_tokens, but it is not set yet.")
return self._additional_special_tokens
def _maybe_update_backend(self, value):
""" To be overriden by derived class if a backend tokenizer has to be updated. """
pass
@bos_token.setter
def bos_token(self, value):
self._bos_token = value
self._maybe_update_backend([value])
@eos_token.setter
def eos_token(self, value):
self._eos_token = value
self._maybe_update_backend([value])
@unk_token.setter
def unk_token(self, value):
self._unk_token = value
self._maybe_update_backend([value])
@sep_token.setter
def sep_token(self, value):
self._sep_token = value
self._maybe_update_backend([value])
@pad_token.setter
def pad_token(self, value):
self._pad_token = value
self._maybe_update_backend([value])
@cls_token.setter
def cls_token(self, value):
self._cls_token = value
self._maybe_update_backend([value])
@mask_token.setter
def mask_token(self, value):
self._mask_token = value
self._maybe_update_backend([value])
@additional_special_tokens.setter
def additional_special_tokens(self, value):
self._additional_special_tokens = value
self._maybe_update_backend(value)
@property
def bos_token_id(self):
""" Id of the beginning of sentence token in the vocabulary. Log an error if used while not having been set. """
return self.convert_tokens_to_ids(self.bos_token)
@property
def eos_token_id(self):
""" Id of the end of sentence token in the vocabulary. Log an error if used while not having been set. """
return self.convert_tokens_to_ids(self.eos_token)
@property
def unk_token_id(self):
""" Id of the unknown token in the vocabulary. Log an error if used while not having been set. """
return self.convert_tokens_to_ids(self.unk_token)
@property
def sep_token_id(self):
""" Id of the separation token in the vocabulary. E.g. separate context and query in an input sequence. Log an error if used while not having been set. """
return self.convert_tokens_to_ids(self.sep_token)
@property
def pad_token_id(self):
""" Id of the padding token in the vocabulary. Log an error if used while not having been set. """
return self.convert_tokens_to_ids(self.pad_token)
@property
def pad_token_type_id(self):
""" Id of the padding token type in the vocabulary."""
return self._pad_token_type_id
@property
def cls_token_id(self):
""" Id of the classification token in the vocabulary. E.g. to extract a summary of an input sequence leveraging self-attention along the full depth of the model. Log an error if used while not having been set. """
return self.convert_tokens_to_ids(self.cls_token)
@property
def mask_token_id(self):
""" Id of the mask token in the vocabulary. E.g. when training a model with masked-language modeling. Log an error if used while not having been set. """
return self.convert_tokens_to_ids(self.mask_token)
@property
def additional_special_tokens_ids(self):
""" Ids of all the additional special tokens in the vocabulary (list of integers). Log an error if used while not having been set. """
return self.convert_tokens_to_ids(self.additional_special_tokens)
@property
def special_tokens_map(self):
""" A dictionary mapping special token class attribute (cls_token, unk_token...) to their
values ('<unk>', '<cls>'...)
"""
set_attr = {}
for attr in self.SPECIAL_TOKENS_ATTRIBUTES:
attr_value = getattr(self, "_" + attr)
if attr_value:
set_attr[attr] = attr_value
return set_attr
@property
def all_special_tokens(self):
""" List all the special tokens ('<unk>', '<cls>'...) mapped to class attributes
(cls_token, unk_token...).
"""
all_toks = []
set_attr = self.special_tokens_map
for attr_value in set_attr.values():
all_toks = all_toks + (list(attr_value) if isinstance(attr_value, (list, tuple)) else [attr_value])
all_toks = list(set(all_toks))
return all_toks
@property
def all_special_ids(self):
""" List the vocabulary indices of the special tokens ('<unk>', '<cls>'...) mapped to
class attributes (cls_token, unk_token...).
"""
all_toks = self.all_special_tokens
all_ids = self.convert_tokens_to_ids(all_toks)
return all_ids
class PreTrainedTokenizer(SpecialTokensMixin):
""" Base class for all tokenizers.
Handle all the shared methods for tokenization and special tokens as well as methods
downloading/caching/loading pretrained tokenizers as well as adding tokens to the vocabulary.
This class also contain the added tokens in a unified way on top of all tokenizers so we don't
have to handle the specific vocabulary augmentation methods of the various underlying
dictionary structures (BPE, sentencepiece...).
Class attributes (overridden by derived classes):
- ``vocab_files_names``: a python ``dict`` with, as keys, the ``__init__`` keyword name of each vocabulary file
required by the model, and as associated values, the filename for saving the associated file (string).
- ``pretrained_vocab_files_map``: a python ``dict of dict`` the high-level keys
being the ``__init__`` keyword name of each vocabulary file required by the model, the low-level being the
`short-cut-names` (string) of the pretrained models with, as associated values, the `url` (string) to the
associated pretrained vocabulary file.
- ``max_model_input_sizes``: a python ``dict`` with, as keys, the `short-cut-names` (string) of the pretrained
models, and as associated values, the maximum length of the sequence inputs of this model, or None if the
model has no maximum input size.
- ``pretrained_init_configuration``: a python ``dict`` with, as keys, the `short-cut-names` (string) of the
pretrained models, and as associated values, a dictionnary of specific arguments to pass to the
``__init__``method of the tokenizer class for this pretrained model when loading the tokenizer with the
``from_pretrained()`` method.
Args:
- ``model_max_length``: (`Optional`) int: the maximum length in number of tokens for the inputs to the transformer model.
When the tokenizer is loaded with `from_pretrained`, this will be set to the value stored for the associated
model in ``max_model_input_sizes`` (see above). If no value is provided, will default to VERY_LARGE_INTEGER (`int(1e30)`).
no associated max_length can be found in ``max_model_input_sizes``.
- ``padding_side``: (`Optional`) string: the side on which the model should have padding applied.
Should be selected between ['right', 'left']
- ``model_input_names``: (`Optional`) List[string]: the list of the forward pass inputs accepted by the
model ("token_type_ids", "attention_mask"...).
- ``bos_token``: (`Optional`) string: a beginning of sentence token.
Will be associated to ``self.bos_token`` and ``self.bos_token_id``
- ``eos_token``: (`Optional`) string: an end of sentence token.
Will be associated to ``self.eos_token`` and ``self.eos_token_id``
- ``unk_token``: (`Optional`) string: an unknown token.
Will be associated to ``self.unk_token`` and ``self.unk_token_id``
- ``sep_token``: (`Optional`) string: a separation token (e.g. to separate context and query in an input sequence).
Will be associated to ``self.sep_token`` and ``self.sep_token_id``
- ``pad_token``: (`Optional`) string: a padding token.
Will be associated to ``self.pad_token`` and ``self.pad_token_id``
- ``cls_token``: (`Optional`) string: a classification token (e.g. to extract a summary of an input sequence
leveraging self-attention along the full depth of the model).
Will be associated to ``self.cls_token`` and ``self.cls_token_id``
- ``mask_token``: (`Optional`) string: a masking token (e.g. when training a model with masked-language
modeling). Will be associated to ``self.mask_token`` and ``self.mask_token_id``
- ``additional_special_tokens``: (`Optional`) list: a list of additional special tokens.
Adding all special tokens here ensure they won't be split by the tokenization process.
Will be associated to ``self.additional_special_tokens`` and ``self.additional_special_tokens_ids``
"""
vocab_files_names: Dict[str, str] = {}
pretrained_vocab_files_map: Dict[str, Dict[str, str]] = {}
pretrained_init_configuration: Dict[str, Dict[str, Any]] = {}
max_model_input_sizes: Dict[str, int] = {}
model_input_names: List[str] = ["token_type_ids", "attention_mask"]
padding_side: str = "right"
@property
def vocab_size(self) -> int:
""" Size of the base vocabulary (without the added tokens) """
raise NotImplementedError
@property
def is_fast(self) -> bool:
return False
@property
def max_len(self) -> int:
""" Kept here for backward compatibility.
Now renamed to `model_max_length` to avoid ambiguity.
"""
return self.model_max_length
@property
def max_len_single_sentence(self) -> int:
return self.model_max_length - self.num_special_tokens_to_add(pair=False)
@property
def max_len_sentences_pair(self) -> int:
return self.model_max_length - self.num_special_tokens_to_add(pair=True)
@max_len_single_sentence.setter
def max_len_single_sentence(self, value) -> int:
""" For backward compatibility, allow to try to setup 'max_len_single_sentence' """
if value == self.model_max_length - self.num_special_tokens_to_add(pair=False):
logger.warning(
"Setting 'max_len_single_sentence' is now deprecated. " "This value is automatically set up."
)
else:
raise ValueError(
"Setting 'max_len_single_sentence' is now deprecated. " "This value is automatically set up."
)
@max_len_sentences_pair.setter
def max_len_sentences_pair(self, value) -> int:
""" For backward compatibility, allow to try to setup 'max_len_sentences_pair' """
if value == self.model_max_length - self.num_special_tokens_to_add(pair=True):
logger.warning(
"Setting 'max_len_sentences_pair' is now deprecated. " "This value is automatically set up."
)
else:
raise ValueError(
"Setting 'max_len_sentences_pair' is now deprecated. " "This value is automatically set up."
)
def get_vocab(self):
""" Returns the vocabulary as a dict of {token: index} pairs. `tokenizer.get_vocab()[token]` is equivalent to `tokenizer.convert_tokens_to_ids(token)` when `token` is in the vocab. """
raise NotImplementedError()
def __init__(self, model_max_length=None, **kwargs):
super().__init__(**kwargs)
# For backward compatibility we fallback to set model_max_length from max_len if provided
if "max_len" in kwargs:
warnings.warn(
"Parameter max_len is deprecated and will be removed in a future release. "
"Use model_max_length instead.",
category=FutureWarning,
)
model_max_length = kwargs.pop("max_len")
self.model_max_length = model_max_length if model_max_length is not None else VERY_LARGE_INTEGER
# Padding side is right by default and overridden in subclasses. If specified in the kwargs, it is changed.
self.padding_side = kwargs.pop("padding_side", self.padding_side)
assert self.padding_side in [
"right",
"left",
], f"Padding side should be selected between 'right' and 'left', current value: {self.padding_side}"
self.model_input_names = kwargs.pop("model_input_names", self.model_input_names)
# Added tokens
self.added_tokens_encoder = {}
self.unique_added_tokens_encoder = set()
self.added_tokens_decoder = {}
# inputs and kwargs for saving and re-loading (see ``from_pretrained`` and ``save_pretrained``)
self.init_inputs = ()
self.init_kwargs = {}
def __len__(self):
""" Size of the full vocabulary with the added tokens """
return self.vocab_size + len(self.added_tokens_encoder)
@classmethod
def from_pretrained(cls, *inputs, **kwargs):
r"""
Instantiate a :class:`~transformers.PreTrainedTokenizer` (or a derived class) from a predefined tokenizer.
Args:
pretrained_model_name_or_path: either:
- a string with the `shortcut name` of a predefined tokenizer to load from cache or download, e.g.: ``bert-base-uncased``.
- a string with the `identifier name` of a predefined tokenizer that was user-uploaded to our S3, e.g.: ``dbmdz/bert-base-german-cased``.
- a path to a `directory` containing vocabulary files required by the tokenizer, for instance saved using the :func:`~transformers.PreTrainedTokenizer.save_pretrained` method, e.g.: ``./my_model_directory/``.
- (not applicable to all derived classes, deprecated) a path or url to a single saved vocabulary file if and only if the tokenizer only requires a single vocabulary file (e.g. Bert, XLNet), e.g.: ``./my_model_directory/vocab.txt``.
cache_dir: (`optional`) string:
Path to a directory in which a downloaded predefined tokenizer vocabulary files should be cached if the standard cache should not be used.
force_download: (`optional`) boolean, default False:
Force to (re-)download the vocabulary files and override the cached versions if they exists.
resume_download: (`optional`) boolean, default False:
Do not delete incompletely recieved file. Attempt to resume the download if such a file exists.
proxies: (`optional`) dict, default None:
A dictionary of proxy servers to use by protocol or endpoint, e.g.: {'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}.
The proxies are used on each request.
inputs: (`optional`) positional arguments: will be passed to the Tokenizer ``__init__`` method.
kwargs: (`optional`) keyword arguments: will be passed to the Tokenizer ``__init__`` method. Can be used to set special tokens like ``bos_token``, ``eos_token``, ``unk_token``, ``sep_token``, ``pad_token``, ``cls_token``, ``mask_token``, ``additional_special_tokens``. See parameters in the doc string of :class:`~transformers.PreTrainedTokenizer` for details.
Examples::
# We can't instantiate directly the base class `PreTrainedTokenizer` so let's show our examples on a derived class: BertTokenizer
# Download vocabulary from S3 and cache.
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
# Download vocabulary from S3 (user-uploaded) and cache.
tokenizer = BertTokenizer.from_pretrained('dbmdz/bert-base-german-cased')
# If vocabulary files are in a directory (e.g. tokenizer was saved using `save_pretrained('./test/saved_model/')`)
tokenizer = BertTokenizer.from_pretrained('./test/saved_model/')
# If the tokenizer uses a single vocabulary file, you can point directly to this file
tokenizer = BertTokenizer.from_pretrained('./test/saved_model/my_vocab.txt')
# You can link tokens to special vocabulary when instantiating
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', unk_token='<unk>')
# You should be sure '<unk>' is in the vocabulary when doing that.
# Otherwise use tokenizer.add_special_tokens({'unk_token': '<unk>'}) instead)
assert tokenizer.unk_token == '<unk>'
"""
return cls._from_pretrained(*inputs, **kwargs)
@classmethod
def _from_pretrained(cls, pretrained_model_name_or_path, *init_inputs, **kwargs):
cache_dir = kwargs.pop("cache_dir", None)
force_download = kwargs.pop("force_download", False)
resume_download = kwargs.pop("resume_download", False)
proxies = kwargs.pop("proxies", None)
local_files_only = kwargs.pop("local_files_only", False)
s3_models = list(cls.max_model_input_sizes.keys())
vocab_files = {}
init_configuration = {}
if pretrained_model_name_or_path in s3_models:
# Get the vocabulary from AWS S3 bucket
for file_id, map_list in cls.pretrained_vocab_files_map.items():
vocab_files[file_id] = map_list[pretrained_model_name_or_path]
if (
cls.pretrained_init_configuration
and pretrained_model_name_or_path in cls.pretrained_init_configuration
):
init_configuration = cls.pretrained_init_configuration[pretrained_model_name_or_path].copy()
else:
# Get the vocabulary from local files
logger.info(
"Model name '{}' not found in model shortcut name list ({}). "
"Assuming '{}' is a path, a model identifier, or url to a directory containing tokenizer files.".format(
pretrained_model_name_or_path, ", ".join(s3_models), pretrained_model_name_or_path
)
)
if os.path.isfile(pretrained_model_name_or_path) or is_remote_url(pretrained_model_name_or_path):
if len(cls.vocab_files_names) > 1:
raise ValueError(
f"Calling {cls.__name__}.from_pretrained() with the path to a single file or url is not supported."
"Use a model identifier or the path to a directory instead."
)
logger.warning(
f"Calling {cls.__name__}.from_pretrained() with the path to a single file or url is deprecated"
)
file_id = list(cls.vocab_files_names.keys())[0]
vocab_files[file_id] = pretrained_model_name_or_path
else:
# At this point pretrained_model_name_or_path is either a directory or a model identifier name
additional_files_names = {
"added_tokens_file": ADDED_TOKENS_FILE,
"special_tokens_map_file": SPECIAL_TOKENS_MAP_FILE,
"tokenizer_config_file": TOKENIZER_CONFIG_FILE,
}
# Look for the tokenizer main vocabulary files + the additional tokens files
for file_id, file_name in {**cls.vocab_files_names, **additional_files_names}.items():
if os.path.isdir(pretrained_model_name_or_path):
full_file_name = os.path.join(pretrained_model_name_or_path, file_name)
if not os.path.exists(full_file_name):
logger.info("Didn't find file {}. We won't load it.".format(full_file_name))
full_file_name = None
else:
full_file_name = hf_bucket_url(
pretrained_model_name_or_path, filename=file_name, use_cdn=False
)
vocab_files[file_id] = full_file_name
# Get files from url, cache, or disk depending on the case
try:
resolved_vocab_files = {}
for file_id, file_path in vocab_files.items():
if file_path is None:
resolved_vocab_files[file_id] = None
else:
resolved_vocab_files[file_id] = cached_path(
file_path,
cache_dir=cache_dir,
force_download=force_download,
proxies=proxies,
resume_download=resume_download,
local_files_only=local_files_only,
)
except EnvironmentError:
if pretrained_model_name_or_path in s3_models:
msg = "Couldn't reach server at '{}' to download vocabulary files."
else:
msg = (
"Model name '{}' was not found in tokenizers model name list ({}). "
"We assumed '{}' was a path or url to a directory containing vocabulary files "
"named {}, but couldn't find such vocabulary files at this path or url.".format(
pretrained_model_name_or_path,
", ".join(s3_models),
pretrained_model_name_or_path,
list(cls.vocab_files_names.values()),
)
)
raise EnvironmentError(msg)
if all(full_file_name is None for full_file_name in resolved_vocab_files.values()):
raise EnvironmentError(
"Model name '{}' was not found in tokenizers model name list ({}). "
"We assumed '{}' was a path, a model identifier, or url to a directory containing vocabulary files "
"named {} but couldn't find such vocabulary files at this path or url.".format(
pretrained_model_name_or_path,
", ".join(s3_models),
pretrained_model_name_or_path,
list(cls.vocab_files_names.values()),
)
)
for file_id, file_path in vocab_files.items():
if file_path == resolved_vocab_files[file_id]:
logger.info("loading file {}".format(file_path))
else:
logger.info("loading file {} from cache at {}".format(file_path, resolved_vocab_files[file_id]))
# Prepare tokenizer initialization kwargs
# Did we saved some inputs and kwargs to reload ?
tokenizer_config_file = resolved_vocab_files.pop("tokenizer_config_file", None)
if tokenizer_config_file is not None:
with open(tokenizer_config_file, encoding="utf-8") as tokenizer_config_handle:
init_kwargs = json.load(tokenizer_config_handle)
saved_init_inputs = init_kwargs.pop("init_inputs", ())
if not init_inputs:
init_inputs = saved_init_inputs
else:
init_kwargs = init_configuration
# Update with newly provided kwargs
init_kwargs.update(kwargs)
# Set max length if needed
if pretrained_model_name_or_path in cls.max_model_input_sizes:
# if we're using a pretrained model, ensure the tokenizer
# wont index sequences longer than the number of positional embeddings
model_max_length = cls.max_model_input_sizes[pretrained_model_name_or_path]
if model_max_length is not None and isinstance(model_max_length, (int, float)):
init_kwargs["model_max_length"] = min(init_kwargs.get("model_max_length", int(1e30)), model_max_length)
# Merge resolved_vocab_files arguments in init_kwargs.
added_tokens_file = resolved_vocab_files.pop("added_tokens_file", None)
special_tokens_map_file = resolved_vocab_files.pop("special_tokens_map_file", None)
for args_name, file_path in resolved_vocab_files.items():
if args_name not in init_kwargs:
init_kwargs[args_name] = file_path
if special_tokens_map_file is not None:
with open(special_tokens_map_file, encoding="utf-8") as special_tokens_map_handle:
special_tokens_map = json.load(special_tokens_map_handle)
for key, value in special_tokens_map.items():
if key not in init_kwargs:
init_kwargs[key] = value
# Instantiate tokenizer.
try:
tokenizer = cls(*init_inputs, **init_kwargs)
except OSError:
raise OSError(
"Unable to load vocabulary from file. "
"Please check that the provided vocabulary is accessible and not corrupted."
)
# Save inputs and kwargs for saving and re-loading with ``save_pretrained``
tokenizer.init_inputs = init_inputs
tokenizer.init_kwargs = init_kwargs
# update unique_added_tokens_encoder with special tokens for correct tokenization
tokenizer.unique_added_tokens_encoder.update(set(tokenizer.all_special_tokens))
# Add supplementary tokens.
if added_tokens_file is not None:
with open(added_tokens_file, encoding="utf-8") as added_tokens_handle:
added_tok_encoder = json.load(added_tokens_handle)
added_tok_decoder = {v: k for k, v in added_tok_encoder.items()}
tokenizer.added_tokens_encoder.update(added_tok_encoder)
tokenizer.added_tokens_decoder.update(added_tok_decoder)
tokenizer.unique_added_tokens_encoder.update(set(tokenizer.added_tokens_encoder.keys()))
return tokenizer
def save_pretrained(self, save_directory):
""" Save the tokenizer vocabulary files together with:
- added tokens,
- special-tokens-to-class-attributes-mapping,
- tokenizer instantiation positional and keywords inputs (e.g. do_lower_case for Bert).
Warning: This won't save modifications you may have applied to the tokenizer after the instantiation
(e.g. modifying tokenizer.do_lower_case after creation).
This method make sure the full tokenizer can then be re-loaded using the
:func:`~transformers.PreTrainedTokenizer.from_pretrained` class method.
"""
if not os.path.isdir(save_directory):
logger.error("Saving directory ({}) should be a directory".format(save_directory))
return
special_tokens_map_file = os.path.join(save_directory, SPECIAL_TOKENS_MAP_FILE)
added_tokens_file = os.path.join(save_directory, ADDED_TOKENS_FILE)
tokenizer_config_file = os.path.join(save_directory, TOKENIZER_CONFIG_FILE)
tokenizer_config = copy.deepcopy(self.init_kwargs)
if len(self.init_inputs) > 0:
tokenizer_config["init_inputs"] = copy.deepcopy(self.init_inputs)
for file_id in self.vocab_files_names.keys():
tokenizer_config.pop(file_id, None)
with open(tokenizer_config_file, "w", encoding="utf-8") as f:
f.write(json.dumps(tokenizer_config, ensure_ascii=False))
with open(special_tokens_map_file, "w", encoding="utf-8") as f:
f.write(json.dumps(self.special_tokens_map, ensure_ascii=False))
if len(self.added_tokens_encoder) > 0:
with open(added_tokens_file, "w", encoding="utf-8") as f:
out_str = json.dumps(self.added_tokens_encoder, ensure_ascii=False)
f.write(out_str)
vocab_files = self.save_vocabulary(save_directory)
return vocab_files + (special_tokens_map_file, added_tokens_file)
def save_vocabulary(self, save_directory) -> Tuple[str]:
""" Save the tokenizer vocabulary to a directory. This method does *NOT* save added tokens
and special token mappings.
Please use :func:`~transformers.PreTrainedTokenizer.save_pretrained` `()` to save the full
Tokenizer state if you want to reload it using the :func:`~transformers.PreTrainedTokenizer.from_pretrained`
class method.
"""
raise NotImplementedError
def add_tokens(self, new_tokens: Union[str, List[str]]) -> int:
"""
Add a list of new tokens to the tokenizer class. If the new tokens are not in the
vocabulary, they are added to it with indices starting from length of the current vocabulary.
Args:
new_tokens: string or list of string. Each string is a token to add. Tokens are only added if they are not
already in the vocabulary (tested by checking if the tokenizer assign the index of the ``unk_token`` to them).
Returns:
Number of tokens added to the vocabulary.
Examples::
# Let's see how to increase the vocabulary of Bert model and tokenizer
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
model = BertModel.from_pretrained('bert-base-uncased')
num_added_toks = tokenizer.add_tokens(['new_tok1', 'my_new-tok2'])
print('We have added', num_added_toks, 'tokens')
model.resize_token_embeddings(len(tokenizer)) # Notice: resize_token_embeddings expect to receive the full size of the new vocabulary, i.e. the length of the tokenizer.
"""
if not new_tokens:
return 0
if not isinstance(new_tokens, list):
new_tokens = [new_tokens]
tokens_to_add = []
for token in new_tokens:
assert isinstance(token, str)
if self.init_kwargs.get("do_lower_case", False) and token not in self.all_special_tokens:
token = token.lower()
if (
token != self.unk_token
and self.convert_tokens_to_ids(token) == self.convert_tokens_to_ids(self.unk_token)
and token not in tokens_to_add
):
tokens_to_add.append(token)
logger.info("Adding %s to the vocabulary", token)
added_tok_encoder = dict((tok, len(self) + i) for i, tok in enumerate(tokens_to_add))
added_tok_decoder = {v: k for k, v in added_tok_encoder.items()}
self.added_tokens_encoder.update(added_tok_encoder)
self.unique_added_tokens_encoder = set(self.added_tokens_encoder.keys()).union(set(self.all_special_tokens))
self.added_tokens_decoder.update(added_tok_decoder)
return len(tokens_to_add)
def num_special_tokens_to_add(self, pair=False):
"""
Returns the number of added tokens when encoding a sequence with special tokens.
Note:
This encodes inputs and checks the number of added tokens, and is therefore not efficient. Do not put this
inside your training loop.
Args:
pair: Returns the number of added tokens in the case of a sequence pair if set to True, returns the
number of added tokens in the case of a single sequence if set to False.
Returns:
Number of tokens added to sequences
"""
token_ids_0 = []
token_ids_1 = []
return len(self.build_inputs_with_special_tokens(token_ids_0, token_ids_1 if pair else None))
def add_special_tokens(self, special_tokens_dict):
"""
Add a dictionary of special tokens (eos, pad, cls...) to the encoder and link them
to class attributes. If special tokens are NOT in the vocabulary, they are added
to it (indexed starting from the last index of the current vocabulary).
Using `add_special_tokens` will ensure your special tokens can be used in several ways:
- special tokens are carefully handled by the tokenizer (they are never split)
- you can easily refer to special tokens using tokenizer class attributes like `tokenizer.cls_token`. This makes it easy to develop model-agnostic training and fine-tuning scripts.
When possible, special tokens are already registered for provided pretrained models (ex: BertTokenizer cls_token is already registered to be '[CLS]' and XLM's one is also registered to be '</s>')
Args:
special_tokens_dict: dict of string. Keys should be in the list of predefined special attributes:
[``bos_token``, ``eos_token``, ``unk_token``, ``sep_token``, ``pad_token``, ``cls_token``, ``mask_token``,
``additional_special_tokens``].
Tokens are only added if they are not already in the vocabulary (tested by checking if the tokenizer assign the index of the ``unk_token`` to them).
Returns:
Number of tokens added to the vocabulary.
Examples::
# Let's see how to add a new classification token to GPT-2
tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
model = GPT2Model.from_pretrained('gpt2')
special_tokens_dict = {'cls_token': '<CLS>'}
num_added_toks = tokenizer.add_special_tokens(special_tokens_dict)
print('We have added', num_added_toks, 'tokens')
model.resize_token_embeddings(len(tokenizer)) # Notice: resize_token_embeddings expect to receive the full size of the new vocabulary, i.e. the length of the tokenizer.
assert tokenizer.cls_token == '<CLS>'
"""
if not special_tokens_dict:
return 0
added_tokens = 0
for key, value in special_tokens_dict.items():
assert key in self.SPECIAL_TOKENS_ATTRIBUTES
if key == "additional_special_tokens":
assert isinstance(value, (list, tuple)) and all(isinstance(t, str) for t in value)
added_tokens += self.add_tokens(value)
else:
assert isinstance(value, str)
added_tokens += self.add_tokens([value])
logger.info("Assigning %s to the %s key of the tokenizer", value, key)
setattr(self, key, value)
return added_tokens
def tokenize(self, text: TextInput, **kwargs):
""" Converts a string in a sequence of tokens (string), using the tokenizer.
Split in words for word-based vocabulary or sub-words for sub-word-based
vocabularies (BPE/SentencePieces/WordPieces).
Take care of added tokens.
Args:
text (:obj:`string`): The sequence to be encoded.
**kwargs (:obj: `dict`): Arguments passed to the model-specific `prepare_for_tokenization` preprocessing method.
"""
all_special_tokens = self.all_special_tokens
text = self.prepare_for_tokenization(text, **kwargs)
# TODO: should this be in the base class?
def lowercase_text(t):
# convert non-special tokens to lowercase
escaped_special_toks = [re.escape(s_tok) for s_tok in all_special_tokens]
pattern = r"(" + r"|".join(escaped_special_toks) + r")|" + r"(.+?)"
return re.sub(pattern, lambda m: m.groups()[0] or m.groups()[1].lower(), t)
if self.init_kwargs.get("do_lower_case", False):
text = lowercase_text(text)
def split_on_token(tok, text):
result = []
split_text = text.split(tok)
for i, sub_text in enumerate(split_text):
sub_text = sub_text.rstrip()
if i == 0 and not sub_text:
result += [tok]
elif i == len(split_text) - 1:
if sub_text:
result += [sub_text]
else:
pass
else:
if sub_text:
result += [sub_text]
result += [tok]
return result
def split_on_tokens(tok_list, text):
if not text.strip():
return []
if not tok_list:
return self._tokenize(text)
tokenized_text = []
text_list = [text]
for tok in tok_list:
tokenized_text = []
for sub_text in text_list:
if sub_text not in self.unique_added_tokens_encoder:
tokenized_text += split_on_token(tok, sub_text)
else:
tokenized_text += [sub_text]
text_list = tokenized_text
return list(
itertools.chain.from_iterable(
(
self._tokenize(token) if token not in self.unique_added_tokens_encoder else [token]
for token in tokenized_text
)
)
)
added_tokens = self.unique_added_tokens_encoder
tokenized_text = split_on_tokens(added_tokens, text)
return tokenized_text
def _tokenize(self, text, **kwargs):
""" Converts a string in a sequence of tokens (string), using the tokenizer.
Split in words for word-based vocabulary or sub-words for sub-word-based
vocabularies (BPE/SentencePieces/WordPieces).
Do NOT take care of added tokens.
"""
raise NotImplementedError
def convert_tokens_to_ids(self, tokens):
""" Converts a token string (or a sequence of tokens) in a single integer id
(or a sequence of ids), using the vocabulary.
"""
if tokens is None:
return None
if isinstance(tokens, str):
return self._convert_token_to_id_with_added_voc(tokens)
ids = []
for token in tokens:
ids.append(self._convert_token_to_id_with_added_voc(token))
return ids
def _convert_token_to_id_with_added_voc(self, token):
if token is None:
return None
if token in self.added_tokens_encoder:
return self.added_tokens_encoder[token]
return self._convert_token_to_id(token)
def _convert_token_to_id(self, token):
raise NotImplementedError
def encode(
self,
text: Union[TextInput, PreTokenizedInput, EncodedInput],
text_pair: Optional[Union[TextInput, PreTokenizedInput, EncodedInput]] = None,
add_special_tokens: bool = True,
max_length: Optional[int] = None,
stride: int = 0,
truncation_strategy: str = "longest_first",
pad_to_max_length: bool = False,
return_tensors: Optional[Union[str, TensorType]] = None,
**kwargs
):
"""
Converts a string in a sequence of ids (integer), using the tokenizer and vocabulary. Adds the model-specific
special tokens (such as beginning of sequence, end of sequence, sequence separator).
If specifying ``add_special_tokens=False``, same as doing ``self.convert_tokens_to_ids(self.tokenize(text))``.
Args:
text (:obj:`str`, :obj:`List[str]` or :obj:`List[int]`):
The first sequence to be encoded. This can be a string, a list of strings (tokenized string using
the `tokenize` method) or a list of integers (tokenized string ids using the `convert_tokens_to_ids`
method)
text_pair (:obj:`str`, :obj:`List[str]` or :obj:`List[int]`, `optional`, defaults to :obj:`None`):
Optional second sequence to be encoded. This can be a string, a list of strings (tokenized
string using the `tokenize` method) or a list of integers (tokenized string ids using the
`convert_tokens_to_ids` method)
add_special_tokens (:obj:`bool`, `optional`, defaults to :obj:`True`):
If set to ``True``, the sequences will be encoded with the special tokens relative
to their model.
max_length (:obj:`int`, `optional`, defaults to :obj:`None`):
If set to a number, will limit the total sequence returned so that it has a maximum length.
If there are overflowing tokens, those will be added to the returned dictionary.
You can set it to the maximal input size of the model with `max_length = tokenizer.model_max_length`.
stride (:obj:`int`, `optional`, defaults to ``0``):
If set to a number along with max_length, the overflowing tokens returned will contain some tokens
from the main sequence returned. The value of this argument defines the number of additional tokens.
truncation_strategy (:obj:`str`, `optional`, defaults to `longest_first`):
String selected in the following options:
- 'longest_first' (default) Iteratively reduce the inputs sequence until the input is under max_length
starting from the longest one at each token (when there is a pair of input sequences)
- 'only_first': Only truncate the first sequence
- 'only_second': Only truncate the second sequence
- 'do_not_truncate': Does not truncate (raise an error if the input sequence is longer than max_length)
pad_to_max_length (:obj:`bool`, `optional`, defaults to :obj:`False`):
If set to True, the returned sequences will be padded according to the model's padding side and
padding index, up to their max length. If no max length is specified, the padding is done up to the
model's max length. The tokenizer padding sides are handled by the class attribute `padding_side`
which can be set to the following strings:
- 'left': pads on the left of the sequences
- 'right': pads on the right of the sequences
Defaults to False: no padding.
return_tensors (:obj:`str`, `optional`, defaults to :obj:`None`):
Can be set to 'tf' or 'pt' to return respectively TensorFlow :obj:`tf.constant`
or PyTorch :obj:`torch.Tensor` instead of a list of python integers.
**kwargs: passed to the `self.tokenize()` method
"""
encoded_inputs = self.encode_plus(
text,
text_pair=text_pair,
max_length=max_length,
add_special_tokens=add_special_tokens,
stride=stride,
truncation_strategy=truncation_strategy,
pad_to_max_length=pad_to_max_length,
return_tensors=return_tensors,
**kwargs,
)
return encoded_inputs["input_ids"]
def encode_plus(
self,
text: Union[TextInput, PreTokenizedInput, EncodedInput],
text_pair: Optional[Union[TextInput, PreTokenizedInput, EncodedInput]] = None,
add_special_tokens: bool = True,
max_length: Optional[int] = None,
stride: int = 0,
truncation_strategy: str = "longest_first",
pad_to_max_length: bool = False,
is_pretokenized: bool = False,
return_tensors: Optional[Union[str, TensorType]] = None,
return_token_type_ids: Optional[bool] = None,
return_attention_mask: Optional[bool] = None,
return_overflowing_tokens: bool = False,
return_special_tokens_mask: bool = False,
return_offsets_mapping: bool = False,
**kwargs
) -> BatchEncoding:
"""
Returns a dictionary containing the encoded sequence or sequence pair and additional information:
the mask for sequence classification and the overflowing elements if a ``max_length`` is specified.
Args:
text (:obj:`str`, :obj:`List[str]` or :obj:`List[int]` (the later only for not-fast tokenizers)):
The first sequence to be encoded. This can be a string, a list of strings (tokenized string using
the `tokenize` method) or a list of integers (tokenized string ids using the `convert_tokens_to_ids`
method)
text_pair (:obj:`str`, :obj:`List[str]` or :obj:`List[int]`, `optional`, defaults to :obj:`None`):
Optional second sequence to be encoded. This can be a string, a list of strings (tokenized
string using the `tokenize` method) or a list of integers (tokenized string ids using the
`convert_tokens_to_ids` method)
add_special_tokens (:obj:`bool`, `optional`, defaults to :obj:`True`):
If set to ``True``, the sequences will be encoded with the special tokens relative
to their model.
max_length (:obj:`int`, `optional`, defaults to :obj:`None`):
If set to a number, will limit the total sequence returned so that it has a maximum length.
If there are overflowing tokens, those will be added to the returned dictionary
You can set it to the maximal input size of the model with `max_length = tokenizer.model_max_length`.
stride (:obj:`int`, `optional`, defaults to ``0``):
If set to a number along with max_length, the overflowing tokens returned will contain some tokens
from the main sequence returned. The value of this argument defines the number of additional tokens.
truncation_strategy (:obj:`str`, `optional`, defaults to `longest_first`):
String selected in the following options:
- 'longest_first' (default) Iteratively reduce the inputs sequence until the input is under max_length
starting from the longest one at each token (when there is a pair of input sequences)
- 'only_first': Only truncate the first sequence
- 'only_second': Only truncate the second sequence
- 'do_not_truncate': Does not truncate (raise an error if the input sequence is longer than max_length)
pad_to_max_length (:obj:`bool`, `optional`, defaults to :obj:`False`):
If set to True, the returned sequences will be padded according to the model's padding side and
padding index, up to their max length. If no max length is specified, the padding is done up to the
model's max length. The tokenizer padding sides are handled by the class attribute `padding_side`
which can be set to the following strings:
- 'left': pads on the left of the sequences
- 'right': pads on the right of the sequences
Defaults to False: no padding.
is_pretokenized (:obj:`bool`, defaults to :obj:`False`):
Set to True to indicate the input is already tokenized
return_tensors (:obj:`str`, `optional`, defaults to :obj:`None`):
Can be set to 'tf' or 'pt' to return respectively TensorFlow :obj:`tf.constant`
or PyTorch :obj:`torch.Tensor` instead of a list of python integers.
return_token_type_ids (:obj:`bool`, `optional`, defaults to :obj:`None`):
Whether to return token type IDs. If left to the default, will return the token type IDs according
to the specific tokenizer's default, defined by the :obj:`return_outputs` attribute.
`What are token type IDs? <../glossary.html#token-type-ids>`_
return_attention_mask (:obj:`bool`, `optional`, defaults to :obj:`none`):
Whether to return the attention mask. If left to the default, will return the attention mask according
to the specific tokenizer's default, defined by the :obj:`return_outputs` attribute.
`What are attention masks? <../glossary.html#attention-mask>`__
return_overflowing_tokens (:obj:`bool`, `optional`, defaults to :obj:`False`):
Set to True to return overflowing token information (default False).
return_special_tokens_mask (:obj:`bool`, `optional`, defaults to :obj:`False`):
Set to True to return special tokens mask information (default False).
return_offsets_mapping (:obj:`bool`, `optional`, defaults to :obj:`False`):
Set to True to return (char_start, char_end) for each token (default False).
If using Python's tokenizer, this method will raise NotImplementedError.
This one is only available on fast tokenizers inheriting from PreTrainedTokenizerFast.
**kwargs: passed to the `self.tokenize()` method
Return:
A Dictionary of shape::
{
input_ids: list[int],
token_type_ids: list[int] if return_token_type_ids is True (default)
attention_mask: list[int] if return_attention_mask is True (default)
overflowing_tokens: list[int] if a ``max_length`` is specified and return_overflowing_tokens is True
num_truncated_tokens: int if a ``max_length`` is specified and return_overflowing_tokens is True
special_tokens_mask: list[int] if ``add_special_tokens`` if set to ``True``
and return_special_tokens_mask is True
}
With the fields:
- ``input_ids``: list of token ids to be fed to a model
- ``token_type_ids``: list of token type ids to be fed to a model
- ``attention_mask``: list of indices specifying which tokens should be attended to by the model
- ``overflowing_tokens``: list of overflowing tokens if a max length is specified.
- ``num_truncated_tokens``: number of overflowing tokens a ``max_length`` is specified
- ``special_tokens_mask``: if adding special tokens, this is a list of [0, 1], with 0 specifying special added
tokens and 1 specifying sequence tokens.
"""
def get_input_ids(text):
if isinstance(text, str):
tokens = self.tokenize(text, add_special_tokens=add_special_tokens, **kwargs)
return self.convert_tokens_to_ids(tokens)
elif isinstance(text, (list, tuple)) and len(text) > 0 and isinstance(text[0], str):
return self.convert_tokens_to_ids(text)
elif isinstance(text, (list, tuple)) and len(text) > 0 and isinstance(text[0], int):
return text
else:
raise ValueError(
"Input is not valid. Should be a string, a list/tuple of strings or a list/tuple of integers."
)
if return_offsets_mapping:
raise NotImplementedError(
"return_offset_mapping is not available when using Python tokenizers."
"To use this feature, change your tokenizer to one deriving from "
"transformers.PreTrainedTokenizerFast."
"More information on available tokenizers at "
"https://github.com/huggingface/transformers/pull/2674"
)
# Throw an error if we can pad because there is no padding token
if pad_to_max_length and self.pad_token_id is None:
raise ValueError(
"Unable to set proper padding strategy as the tokenizer does not have a padding token. "
"In this case please set the `pad_token` `(tokenizer.pad_token = tokenizer.eos_token e.g.)` "
"or add a new pad token via the function add_special_tokens if you want to use a padding strategy"
)
first_ids = get_input_ids(text)
second_ids = get_input_ids(text_pair) if text_pair is not None else None
return self.prepare_for_model(
first_ids,
pair_ids=second_ids,
max_length=max_length,
pad_to_max_length=pad_to_max_length,
add_special_tokens=add_special_tokens,
stride=stride,
truncation_strategy=truncation_strategy,
return_tensors=return_tensors,
return_attention_mask=return_attention_mask,
return_token_type_ids=return_token_type_ids,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
prepend_batch_axis=return_tensors is not None,
)
def batch_encode_plus(
self,
batch_text_or_text_pairs: Union[
List[TextInput],
List[TextInputPair],
List[PreTokenizedInput],
List[PreTokenizedInputPair],
List[EncodedInput],
List[EncodedInputPair],
],
add_special_tokens: bool = True,
max_length: Optional[int] = None,
stride: int = 0,
truncation_strategy: str = "longest_first",
pad_to_max_length: bool = False,
is_pretokenized: bool = False,
return_tensors: Optional[Union[str, TensorType]] = None,
return_token_type_ids: Optional[bool] = None,
return_attention_masks: Optional[bool] = None,
return_overflowing_tokens: bool = False,
return_special_tokens_masks: bool = False,
return_offsets_mapping: bool = False,
return_lengths: bool = False,
**kwargs
) -> BatchEncoding:
"""
Returns a dictionary containing the encoded sequence or sequence pair and additional information:
the mask for sequence classification and the overflowing elements if a ``max_length`` is specified.
Args:
batch_text_or_text_pairs (:obj:`List[str]`, :obj:`List[Tuple[str, str]]`,
:obj:`List[List[str]]`, :obj:`List[Tuple[List[str], List[str]]]`,
and for not-fast tokenizers, also:
:obj:`List[List[int]]`, :obj:`List[Tuple[List[int], List[int]]]`):
Batch of sequences or pair of sequences to be encoded.
This can be a list of string/string-sequences/int-sequences or a list of pair of
string/string-sequences/int-sequence (see details in encode_plus)
add_special_tokens (:obj:`bool`, `optional`, defaults to :obj:`True`):
If set to ``True``, the sequences will be encoded with the special tokens relative
to their model.
max_length (:obj:`int`, `optional`, defaults to :obj:`None`):
If set to a number, will limit the total sequence returned so that it has a maximum length.
If there are overflowing tokens, those will be added to the returned dictionary
stride (:obj:`int`, `optional`, defaults to ``0``):
If set to a number along with max_length, the overflowing tokens returned will contain some tokens
from the main sequence returned. The value of this argument defines the number of additional tokens.
truncation_strategy (:obj:`str`, `optional`, defaults to `longest_first`):
String selected in the following options:
- 'longest_first' (default) Iteratively reduce the inputs sequence until the input is under max_length
starting from the longest one at each token (when there is a pair of input sequences)
- 'only_first': Only truncate the first sequence
- 'only_second': Only truncate the second sequence
- 'do_not_truncate': Does not truncate (raise an error if the input sequence is longer than max_length)
pad_to_max_length (:obj:`bool`, `optional`, defaults to :obj:`False`):
If set to True, the returned sequences will be padded according to the model's padding side and
padding index, up to their max length. If no max length is specified, the padding is done up to the
model's max length. The tokenizer padding sides are handled by the class attribute `padding_side`
which can be set to the following strings:
- 'left': pads on the left of the sequences
- 'right': pads on the right of the sequences
Defaults to False: no padding.
is_pretokenized (:obj:`bool`, defaults to :obj:`False`):
Set to True to indicate the input is already tokenized
return_tensors (:obj:`str`, `optional`, defaults to :obj:`None`):
Can be set to 'tf' or 'pt' to return respectively TensorFlow :obj:`tf.constant`
or PyTorch :obj:`torch.Tensor` instead of a list of python integers.
return_token_type_ids (:obj:`bool`, `optional`, defaults to :obj:`None`):
Whether to return token type IDs. If left to the default, will return the token type IDs according
to the specific tokenizer's default, defined by the :obj:`return_outputs` attribute.
`What are token type IDs? <../glossary.html#token-type-ids>`_
return_attention_masks (:obj:`bool`, `optional`, defaults to :obj:`none`):
Whether to return the attention mask. If left to the default, will return the attention mask according
to the specific tokenizer's default, defined by the :obj:`return_outputs` attribute.
`What are attention masks? <../glossary.html#attention-mask>`__
return_overflowing_tokens (:obj:`bool`, `optional`, defaults to :obj:`False`):
Set to True to return overflowing token information (default False).
return_special_tokens_masks (:obj:`bool`, `optional`, defaults to :obj:`False`):
Set to True to return special tokens mask information (default False).
return_offsets_mapping (:obj:`bool`, `optional`, defaults to :obj:`False`):
Set to True to return (char_start, char_end) for each token (default False).
If using Python's tokenizer, this method will raise NotImplementedError. This one is only available on
Rust-based tokenizers inheriting from PreTrainedTokenizerFast.
return_lengths (:obj:`bool`, `optional`, defaults to :obj:`False`):
If set the resulting dictionary will include the length of each encoded inputs
**kwargs: passed to the `self.tokenize()` method
Return:
A Dictionary of shape::
{
input_ids: list[List[int]],
token_type_ids: list[List[int]] if return_token_type_ids is True (default)
attention_mask: list[List[int]] if return_attention_mask is True (default)
overflowing_tokens: list[List[int]] if a ``max_length`` is specified and return_overflowing_tokens is True
num_truncated_tokens: List[int] if a ``max_length`` is specified and return_overflowing_tokens is True
special_tokens_mask: list[List[int]] if ``add_special_tokens`` if set to ``True`` and return_special_tokens_mask is True
}
With the fields:
- ``input_ids``: list of token ids to be fed to a model
- ``token_type_ids``: list of token type ids to be fed to a model
- ``attention_mask``: list of indices specifying which tokens should be attended to by the model
- ``overflowing_tokens``: list of overflowing tokens if a max length is specified.
- ``num_truncated_tokens``: number of overflowing tokens a ``max_length`` is specified
- ``special_tokens_mask``: if adding special tokens, this is a list of [0, 1], with 0 specifying special added
tokens and 1 specifying sequence tokens.
"""
def get_input_ids(text):
if isinstance(text, str):
tokens = self.tokenize(text, add_special_tokens=add_special_tokens, **kwargs)
return self.convert_tokens_to_ids(tokens)
elif isinstance(text, (list, tuple)) and len(text) > 0 and isinstance(text[0], str):
return self.convert_tokens_to_ids(text)
elif isinstance(text, (list, tuple)) and len(text) > 0 and isinstance(text[0], int):
return text
else:
raise ValueError(
"Input is not valid. Should be a string, a list/tuple of strings or a list/tuple of integers."
)
# Throw an error if we can pad because there is no padding token
if pad_to_max_length and self.pad_token_id is None:
raise ValueError(
"Unable to set proper padding strategy as the tokenizer does not have a padding token. In this case please set the `pad_token` `(tokenizer.pad_token = tokenizer.eos_token e.g.)` or add a new pad token via the function add_special_tokens if you want to use a padding strategy"
)
if return_offsets_mapping:
raise NotImplementedError(
"return_offset_mapping is not available when using Python tokenizers."
"To use this feature, change your tokenizer to one deriving from "
"transformers.PreTrainedTokenizerFast."
"More information on available tokenizers at "
"https://github.com/huggingface/transformers/pull/2674"
)
input_ids = []
for ids_or_pair_ids in batch_text_or_text_pairs:
if isinstance(ids_or_pair_ids, (list, tuple)) and len(ids_or_pair_ids) == 2 and not is_pretokenized:
ids, pair_ids = ids_or_pair_ids
else:
ids, pair_ids = ids_or_pair_ids, None
first_ids = get_input_ids(ids)
second_ids = get_input_ids(pair_ids) if pair_ids is not None else None
input_ids.append((first_ids, second_ids))
if max_length is None and pad_to_max_length:
def total_sequence_length(input_pairs):
first_ids, second_ids = input_pairs
return len(first_ids) + (
self.num_special_tokens_to_add()
if second_ids is None
else (len(second_ids) + self.num_special_tokens_to_add(pair=True))
)
max_length = max([total_sequence_length(ids) for ids in input_ids])
batch_outputs = {}
for first_ids, second_ids in input_ids:
# Prepares a sequence of input id, or a pair of sequences of inputs ids so that it can be used by
# the model. It adds special tokens, truncates sequences if overflowing while taking into account
# the special tokens and manages a window stride for overflowing tokens
outputs = self.prepare_for_model(
first_ids,
pair_ids=second_ids,
max_length=max_length,
pad_to_max_length=pad_to_max_length,
add_special_tokens=add_special_tokens,
stride=stride,
truncation_strategy=truncation_strategy,
return_attention_mask=return_attention_masks,
return_token_type_ids=return_token_type_ids,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_masks,
return_lengths=return_lengths,
return_tensors=None, # We will convert the whole batch to tensors at the end
)
for key, value in outputs.items():
if key not in batch_outputs:
batch_outputs[key] = []
batch_outputs[key].append(value)
if return_tensors is not None:
convert_to_tensors(batch_outputs, return_tensors)
return BatchEncoding(batch_outputs)
def prepare_for_model(
self,
ids: List[int],
pair_ids: Optional[List[int]] = None,
max_length: Optional[int] = None,
add_special_tokens: bool = True,
stride: int = 0,
truncation_strategy: str = "longest_first",
pad_to_max_length: bool = False,
return_tensors: Optional[Union[str, TensorType]] = None,
return_token_type_ids: Optional[bool] = None,
return_attention_mask: Optional[bool] = None,
return_overflowing_tokens: bool = False,
return_special_tokens_mask: bool = False,
return_lengths: bool = False,
prepend_batch_axis: bool = False,
) -> BatchEncoding:
""" Prepares a sequence of input id, or a pair of sequences of inputs ids so that it can be used by the model.
It adds special tokens, truncates sequences if overflowing while taking into account the special tokens and
manages a moving window (with user defined stride) for overflowing tokens
Args:
ids: list of tokenized input ids. Can be obtained from a string by chaining the
`tokenize` and `convert_tokens_to_ids` methods.
pair_ids: Optional second list of input ids. Can be obtained from a string by chaining the
`tokenize` and `convert_tokens_to_ids` methods.
max_length: maximum length of the returned list. Will truncate by taking into account the special tokens.
add_special_tokens: if set to ``True``, the sequences will be encoded with the special tokens relative
to their model.
stride: window stride for overflowing tokens. Can be useful to remove edge effect when using sequential
list of inputs. The overflowing token will contains a part of the previous window of tokens.
truncation_strategy: string selected in the following options:
- 'longest_first' (default) Iteratively reduce the inputs sequence until the input is under max_length
starting from the longest one at each token (when there is a pair of input sequences)
- 'only_first': Only truncate the first sequence
- 'only_second': Only truncate the second sequence
- 'do_not_truncate': Does not truncate (raise an error if the input sequence is longer than max_length)
pad_to_max_length: if set to True, the returned sequences will be padded according to the model's padding side and
padding index, up to their max length. If no max length is specified, the padding is done up to the model's max length.
The tokenizer padding sides are handled by the following strings:
- 'left': pads on the left of the sequences
- 'right': pads on the right of the sequences
Defaults to False: no padding.
return_tensors: (optional) can be set to 'tf' or 'pt' to return respectively TensorFlow tf.constant
or PyTorch torch.Tensor instead of a list of python integers.
return_token_type_ids: (optional) Set to False to avoid returning token_type_ids (default: set to model specifics).
return_attention_mask: (optional) Set to False to avoid returning attention mask (default: set to model specifics)
return_overflowing_tokens: (optional) Set to True to return overflowing token information (default False).
return_special_tokens_mask: (optional) Set to True to return special tokens mask information (default False).
return_lengths (:obj:`bool`, `optional`, defaults to :obj:`False`):
If set the resulting dictionary will include the length of each encoded inputs
prepend_batch_axis (:obj:`bool`, `optional`, defaults to :obj:`False`):
If set the resulting object will feature an extra dim at position 0.
This can be seen as an unsqueezing operator.
Return:
A Dictionary of shape::
{
input_ids: list[int],
token_type_ids: list[int] if return_token_type_ids is True (default)
overflowing_tokens: list[int] if a ``max_length`` is specified and return_overflowing_tokens is True
num_truncated_tokens: int if a ``max_length`` is specified and return_overflowing_tokens is True
special_tokens_mask: list[int] if ``add_special_tokens`` if set to ``True`` and return_special_tokens_mask is True
length: int if return_lengths is True
}
With the fields:
- ``input_ids``: list of token ids to be fed to a model
- ``token_type_ids``: list of token type ids to be fed to a model
- ``overflowing_tokens``: list of overflowing tokens if a max length is specified.
- ``num_truncated_tokens``: number of overflowing tokens a ``max_length`` is specified
- ``special_tokens_mask``: if adding special tokens, this is a list of [0, 1], with 0 specifying special added
tokens and 1 specifying sequence tokens.
- ``length``: this is the length of ``input_ids``
"""
pair = bool(pair_ids is not None)
len_ids = len(ids)
len_pair_ids = len(pair_ids) if pair else 0
# Load from model defaults
if return_token_type_ids is None:
return_token_type_ids = "token_type_ids" in self.model_input_names
if return_attention_mask is None:
return_attention_mask = "attention_mask" in self.model_input_names
encoded_inputs = {}
# Truncation: Handle max sequence length
total_len = len_ids + len_pair_ids + (self.num_special_tokens_to_add(pair=pair) if add_special_tokens else 0)
if max_length and total_len > max_length:
ids, pair_ids, overflowing_tokens = self.truncate_sequences(
ids,
pair_ids=pair_ids,
num_tokens_to_remove=total_len - max_length,
truncation_strategy=truncation_strategy,
stride=stride,
)
if return_overflowing_tokens:
encoded_inputs["overflowing_tokens"] = overflowing_tokens
encoded_inputs["num_truncated_tokens"] = total_len - max_length
# Add special tokens
if add_special_tokens:
sequence = self.build_inputs_with_special_tokens(ids, pair_ids)
token_type_ids = self.create_token_type_ids_from_sequences(ids, pair_ids)
else:
sequence = ids + pair_ids if pair else ids
token_type_ids = [0] * len(ids) + ([1] * len(pair_ids) if pair else [])
# Build output dictionnary
encoded_inputs["input_ids"] = sequence
if return_token_type_ids:
encoded_inputs["token_type_ids"] = token_type_ids
if return_special_tokens_mask:
if add_special_tokens:
encoded_inputs["special_tokens_mask"] = self.get_special_tokens_mask(ids, pair_ids)
else:
encoded_inputs["special_tokens_mask"] = [0] * len(sequence)
# Check lengths
assert max_length is None or len(encoded_inputs["input_ids"]) <= max_length
if max_length is None and len(encoded_inputs["input_ids"]) > self.model_max_length:
logger.warning(
"Token indices sequence length is longer than the specified maximum sequence length "
"for this model ({} > {}). Running this sequence through the model will result in "
"indexing errors".format(len(ids), self.model_max_length)
)
# Padding
needs_to_be_padded = pad_to_max_length and (
max_length
and len(encoded_inputs["input_ids"]) < max_length
or max_length is None
and len(encoded_inputs["input_ids"]) < self.model_max_length
and self.model_max_length <= LARGE_INTEGER
)
if pad_to_max_length and max_length is None and self.model_max_length > LARGE_INTEGER:
logger.warning(
"Sequence can't be padded as no maximum length is specified and the model maximum length is too high."
)
if needs_to_be_padded:
difference = (max_length if max_length is not None else self.model_max_length) - len(
encoded_inputs["input_ids"]
)
if self.padding_side == "right":
if return_attention_mask:
encoded_inputs["attention_mask"] = [1] * len(encoded_inputs["input_ids"]) + [0] * difference
if return_token_type_ids:
encoded_inputs["token_type_ids"] = (
encoded_inputs["token_type_ids"] + [self.pad_token_type_id] * difference
)
if return_special_tokens_mask:
encoded_inputs["special_tokens_mask"] = encoded_inputs["special_tokens_mask"] + [1] * difference
encoded_inputs["input_ids"] = encoded_inputs["input_ids"] + [self.pad_token_id] * difference
elif self.padding_side == "left":
if return_attention_mask:
encoded_inputs["attention_mask"] = [0] * difference + [1] * len(encoded_inputs["input_ids"])
if return_token_type_ids:
encoded_inputs["token_type_ids"] = [self.pad_token_type_id] * difference + encoded_inputs[
"token_type_ids"
]
if return_special_tokens_mask:
encoded_inputs["special_tokens_mask"] = [1] * difference + encoded_inputs["special_tokens_mask"]
encoded_inputs["input_ids"] = [self.pad_token_id] * difference + encoded_inputs["input_ids"]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side))
else:
if return_attention_mask:
encoded_inputs["attention_mask"] = [1] * len(encoded_inputs["input_ids"])
if return_lengths:
encoded_inputs["length"] = len(encoded_inputs["input_ids"])
# Prepare model inputs as tensors if asked
if return_tensors is not None:
convert_to_tensors(encoded_inputs, return_tensors, prepend_batch_axis)
return BatchEncoding(encoded_inputs)
def prepare_for_tokenization(self, text: str, **kwargs) -> str:
""" Performs any necessary transformations before tokenization """
return text
def truncate_sequences(
self,
ids: List[int],
pair_ids: Optional[List[int]] = None,
num_tokens_to_remove: int = 0,
truncation_strategy: str = "longest_first",
stride: int = 0,
) -> Tuple[List[int], List[int], List[int]]:
""" Truncates a sequence pair in place to the maximum length.
Args:
ids: list of tokenized input ids. Can be obtained from a string by chaining the
`tokenize` and `convert_tokens_to_ids` methods.
pair_ids: Optional second list of input ids. Can be obtained from a string by chaining the
`tokenize` and `convert_tokens_to_ids` methods.
num_tokens_to_remove (:obj:`int`, `optional`, defaults to ``0``):
number of tokens to remove using the truncation strategy
truncation_strategy: string selected in the following options:
- 'longest_first' (default) Iteratively reduce the inputs sequence until the input is under max_length
starting from the longest one at each token (when there is a pair of input sequences).
Overflowing tokens only contains overflow from the first sequence.
- 'only_first': Only truncate the first sequence. raise an error if the first sequence is shorter or equal to than num_tokens_to_remove.
- 'only_second': Only truncate the second sequence
- 'do_not_truncate': Does not truncate (raise an error if the input sequence is longer than max_length)
stride (:obj:`int`, `optional`, defaults to ``0``):
If set to a number along with max_length, the overflowing tokens returned will contain some tokens
from the main sequence returned. The value of this argument defines the number of additional tokens.
"""
if num_tokens_to_remove <= 0:
return ids, pair_ids, []
if truncation_strategy == "longest_first":
overflowing_tokens = []
for _ in range(num_tokens_to_remove):
if pair_ids is None or len(ids) > len(pair_ids):
overflowing_tokens = [ids[-1]] + overflowing_tokens
ids = ids[:-1]
else:
pair_ids = pair_ids[:-1]
window_len = min(len(ids), stride)
if window_len > 0:
overflowing_tokens = ids[-window_len:] + overflowing_tokens
elif truncation_strategy == "only_first":
assert len(ids) > num_tokens_to_remove
window_len = min(len(ids), stride + num_tokens_to_remove)
overflowing_tokens = ids[-window_len:]
ids = ids[:-num_tokens_to_remove]
elif truncation_strategy == "only_second":
assert pair_ids is not None and len(pair_ids) > num_tokens_to_remove
window_len = min(len(pair_ids), stride + num_tokens_to_remove)
overflowing_tokens = pair_ids[-window_len:]
pair_ids = pair_ids[:-num_tokens_to_remove]
elif truncation_strategy == "do_not_truncate":
raise ValueError("Input sequence are too long for max_length. Please select a truncation strategy.")
else:
raise ValueError(
"Truncation_strategy should be selected in ['longest_first', 'only_first', 'only_second', 'do_not_truncate']"
)
return (ids, pair_ids, overflowing_tokens)
def create_token_type_ids_from_sequences(self, token_ids_0: List, token_ids_1: Optional[List] = None) -> List[int]:
if token_ids_1 is None:
return len(token_ids_0) * [0]
return [0] * len(token_ids_0) + [1] * len(token_ids_1)
def build_inputs_with_special_tokens(self, token_ids_0: List, token_ids_1: Optional[List] = None) -> List:
"""
Build model inputs from a sequence or a pair of sequence for sequence classification tasks
by concatenating and adding special tokens. This implementation does not add special tokens.
"""
if token_ids_1 is None:
return token_ids_0
return token_ids_0 + token_ids_1
def get_special_tokens_mask(
self, token_ids_0: List, token_ids_1: Optional[List] = None, already_has_special_tokens: bool = False
) -> List[int]:
"""
Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer ``prepare_for_model`` or ``encode_plus`` methods.
Args:
token_ids_0: list of ids (must not contain special tokens)
token_ids_1: Optional list of ids (must not contain special tokens), necessary when fetching sequence ids
for sequence pairs
already_has_special_tokens: (default False) Set to True if the token list is already formated with
special tokens for the model
Returns:
A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
"""
return [0] * ((len(token_ids_1) if token_ids_1 else 0) + len(token_ids_0))
def convert_ids_to_tokens(
self, ids: Union[int, List[int]], skip_special_tokens: bool = False
) -> Union[int, List[int]]:
""" Converts a single index or a sequence of indices (integers) in a token "
(resp.) a sequence of tokens (str), using the vocabulary and added tokens.
Args:
skip_special_tokens: Don't decode special tokens (self.all_special_tokens). Default: False
"""
if isinstance(ids, int):
if ids in self.added_tokens_decoder:
return self.added_tokens_decoder[ids]
else:
return self._convert_id_to_token(ids)
tokens = []
for index in ids:
index = int(index)
if skip_special_tokens and index in self.all_special_ids:
continue
if index in self.added_tokens_decoder:
tokens.append(self.added_tokens_decoder[index])
else:
tokens.append(self._convert_id_to_token(index))
return tokens
def _convert_id_to_token(self, index: int) -> str:
raise NotImplementedError
def convert_tokens_to_string(self, tokens: List[str]) -> str:
""" Converts a sequence of tokens (string) in a single string.
The most simple way to do it is ' '.join(self.convert_ids_to_tokens(token_ids))
but we often want to remove sub-word tokenization artifacts at the same time.
"""
return " ".join(self.convert_ids_to_tokens(tokens))
def decode(
self, token_ids: List[int], skip_special_tokens: bool = False, clean_up_tokenization_spaces: bool = True
) -> str:
"""
Converts a sequence of ids (integer) in a string, using the tokenizer and vocabulary
with options to remove special tokens and clean up tokenization spaces.
Similar to doing ``self.convert_tokens_to_string(self.convert_ids_to_tokens(token_ids))``.
Args:
token_ids: list of tokenized input ids. Can be obtained using the `encode` or `encode_plus` methods.
skip_special_tokens: if set to True, will replace special tokens.
clean_up_tokenization_spaces: if set to True, will clean up the tokenization spaces.
"""
filtered_tokens = self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens)
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separatly for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
sub_texts = []
current_sub_text = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(current_sub_text))
current_sub_text = []
sub_texts.append(token)
else:
current_sub_text.append(token)
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(current_sub_text))
text = " ".join(sub_texts)
if clean_up_tokenization_spaces:
clean_text = self.clean_up_tokenization(text)
return clean_text
else:
return text
def batch_decode(self, sequences: List[List[int]], **kwargs) -> List[str]:
return [self.decode(seq, **kwargs) for seq in sequences]
@staticmethod
def clean_up_tokenization(out_string: str) -> str:
""" Clean up a list of simple English tokenization artifacts like spaces before punctuations and abreviated forms.
"""
out_string = (
out_string.replace(" .", ".")
.replace(" ?", "?")
.replace(" !", "!")
.replace(" ,", ",")
.replace(" ' ", "'")
.replace(" n't", "n't")
.replace(" 'm", "'m")
.replace(" 's", "'s")
.replace(" 've", "'ve")
.replace(" 're", "'re")
)
return out_string
class PreTrainedTokenizerFast(PreTrainedTokenizer):
""" Base class for all fast tokenizers (wrapping HuggingFace tokenizers library).
Inherit from PreTrainedTokenizer.
Handle all the shared methods for tokenization and special tokens as well as methods
downloading/caching/loading pretrained tokenizers as well as adding tokens to the vocabulary.
This class also contain the added tokens in a unified way on top of all tokenizers so we don't
have to handle the specific vocabulary augmentation methods of the various underlying
dictionary structures (BPE, sentencepiece...).
Class attributes (overridden by derived classes):
- ``vocab_files_names``: a python ``dict`` with, as keys, the ``__init__`` keyword name of each vocabulary file
required by the model, and as associated values, the filename for saving the associated file (string).
- ``pretrained_vocab_files_map``: a python ``dict of dict`` the high-level keys
being the ``__init__`` keyword name of each vocabulary file required by the model, the low-level being the
`short-cut-names` (string) of the pretrained models with, as associated values, the `url` (string) to the
associated pretrained vocabulary file.
- ``max_model_input_sizes``: a python ``dict`` with, as keys, the `short-cut-names` (string) of the pretrained
models, and as associated values, the maximum length of the sequence inputs of this model, or None if the
model has no maximum input size.
- ``pretrained_init_configuration``: a python ``dict`` with, as keys, the `short-cut-names` (string) of the
pretrained models, and as associated values, a dictionnary of specific arguments to pass to the
``__init__``method of the tokenizer class for this pretrained model when loading the tokenizer with the
``from_pretrained()`` method.
Args:
- ``tokenizer`` (`BaseTokenizerFast`): A Fast tokenizer from the HuggingFace tokenizer library (in low level Rust language)
- ``model_max_length``: (`Optional`) int: the maximum length in number of tokens for the inputs to the transformer model.
When the tokenizer is loaded with `from_pretrained`, this will be set to the value stored for the associated
model in ``max_model_input_sizes`` (see above). If no value is provided, will default to VERY_LARGE_INTEGER (`int(1e30)`).
no associated max_length can be found in ``max_model_input_sizes``.
- ``padding_side``: (`Optional`) string: the side on which the model should have padding applied.
Should be selected between ['right', 'left']
- ``model_input_names``: (`Optional`) List[string]: the list of the forward pass inputs accepted by the
model ("token_type_ids", "attention_mask"...).
- ``bos_token``: (`Optional`) string: a beginning of sentence token.
Will be associated to ``self.bos_token`` and ``self.bos_token_id``
- ``eos_token``: (`Optional`) string: an end of sentence token.
Will be associated to ``self.eos_token`` and ``self.eos_token_id``
- ``unk_token``: (`Optional`) string: an unknown token.
Will be associated to ``self.unk_token`` and ``self.unk_token_id``
- ``sep_token``: (`Optional`) string: a separation token (e.g. to separate context and query in an input sequence).
Will be associated to ``self.sep_token`` and ``self.sep_token_id``
- ``pad_token``: (`Optional`) string: a padding token.
Will be associated to ``self.pad_token`` and ``self.pad_token_id``
- ``cls_token``: (`Optional`) string: a classification token (e.g. to extract a summary of an input sequence
leveraging self-attention along the full depth of the model).
Will be associated to ``self.cls_token`` and ``self.cls_token_id``
- ``mask_token``: (`Optional`) string: a masking token (e.g. when training a model with masked-language
modeling). Will be associated to ``self.mask_token`` and ``self.mask_token_id``
- ``additional_special_tokens``: (`Optional`) list: a list of additional special tokens.
Adding all special tokens here ensure they won't be split by the tokenization process.
Will be associated to ``self.additional_special_tokens`` and ``self.additional_special_tokens_ids``
"""
def __init__(self, tokenizer: BaseTokenizerFast, **kwargs):
if not isinstance(tokenizer, BaseTokenizerFast):
raise ValueError(
"Tokenizer should be an instance of a Tokenizer " "provided by HuggingFace tokenizers library."
)
self._tokenizer: BaseTokenizerFast = tokenizer
# Initialize all the rest of the kwargs
super().__init__(**kwargs)
@property
def backend_tokenizer(self) -> BaseTokenizerFast:
return self._tokenizer
@property
def decoder(self) -> DecoderFast:
return self._tokenizer._tokenizer.decoder
@property
def is_fast(self) -> bool:
return True
@property
def vocab_size(self) -> int:
return self._tokenizer.get_vocab_size(with_added_tokens=False)
def __len__(self) -> int:
return self._tokenizer.get_vocab_size(with_added_tokens=True)
def _maybe_update_backend(self, value):
""" Update the backend fast tokenizer.
Override method from base class SpecialTokensMixin """
self._tokenizer.add_special_tokens(value)
def _convert_encoding(
self,
encoding: EncodingFast,
return_tensors: Optional[Union[str, TensorType]] = None,
return_token_type_ids: Optional[bool] = None,
return_attention_mask: Optional[bool] = None,
return_overflowing_tokens: bool = False,
return_special_tokens_mask: bool = False,
return_offsets_mapping: bool = False,
) -> Dict[str, Any]:
""" Convert the encoding representation (from low-level HuggingFace tokenizer output) to a python Dict.
Overflowing tokens are converted to additional examples (like batches) so the output values of
the dict are lists (overflows) of lists (tokens).
If return_tensors is not None, these lists of lists are converted to 2-D tensors
for input_ids, token_type_ids and attention_mask.
Output shape: (overflows, sequence length)
"""
if return_token_type_ids is None:
return_token_type_ids = "token_type_ids" in self.model_input_names
if return_attention_mask is None:
return_attention_mask = "attention_mask" in self.model_input_names
if return_overflowing_tokens and encoding.overflowing is not None:
encodings = [encoding] + encoding.overflowing
else:
encodings = [encoding]
encoding_dict = defaultdict(list)
for e in encodings:
encoding_dict["input_ids"].append(e.ids)
if return_token_type_ids:
encoding_dict["token_type_ids"].append(e.type_ids)
if return_attention_mask:
encoding_dict["attention_mask"].append(e.attention_mask)
if return_special_tokens_mask:
encoding_dict["special_tokens_mask"].append(e.special_tokens_mask)
if return_offsets_mapping:
encoding_dict["offset_mapping"].append(e.offsets)
if return_tensors is not None:
encoding_dict = convert_to_tensors(encoding_dict, return_tensors)
return encoding_dict
def _convert_token_to_id_with_added_voc(self, token: int) -> str:
index = self._tokenizer.token_to_id(token)
if index is None:
return self.unk_token_id
return index
def _convert_id_to_token(self, index: int) -> Optional[str]:
return self._tokenizer.id_to_token(int(index))
def get_vocab(self):
return self._tokenizer.get_vocab(True)
def convert_tokens_to_string(self, tokens: List[int], skip_special_tokens: bool = False) -> str:
return self._tokenizer.decode(tokens, skip_special_tokens)
def add_tokens(self, new_tokens: List[Union[str, AddedTokenFast]]) -> int:
"""
Add a list of new tokens to the tokenizer class. If the new tokens are not in the
vocabulary, they are added to it with indices starting from length of the current vocabulary.
Args:
new_tokens: string or list of string or AddedTokenFast. Each string is a token to add.
Tokens are only added if they are not already in the vocabulary. AddedTokenFast wrap a string token to let you personnalize it's behavior (Whether this token should only match against single word, whether this token should strip all potential whitespaces on the left side, Whether this token should strip all potential whitespaces on the right side...).
See details for AddedToken in HuggingFace tokenizers library.
Returns:
Number of tokens added to the vocabulary.
Examples::
# Let's see how to increase the vocabulary of Bert model and tokenizer
tokenizer = BertTokenizerFast.from_pretrained('bert-base-uncased')
model = BertModel.from_pretrained('bert-base-uncased')
num_added_toks = tokenizer.add_tokens(['new_tok1', 'my_new-tok2'])
print('We have added', num_added_toks, 'tokens')
model.resize_token_embeddings(len(tokenizer)) # Notice: resize_token_embeddings expect to receive the full size of the new vocabulary, i.e. the length of the tokenizer.
"""
if isinstance(new_tokens, str):
new_tokens = [new_tokens]
return self._tokenizer.add_tokens(new_tokens)
def add_special_tokens(self, special_tokens_dict: dict) -> int:
# Map special tokens to class attributes (self.pad_token...)
super().add_special_tokens(special_tokens_dict)
# If the backend tokenizer the only specificities of special tokens are that
# - they will never be processed by the model, and
# - they will be removed while decoding.
# But they are not mapped to special attributes in the backend so we can just
# send a list.
tokens = []
for token in special_tokens_dict.values():
if isinstance(token, list):
tokens += token
else:
tokens += [token]
num_added_tokens = self._tokenizer.add_special_tokens(tokens)
return num_added_tokens
def num_special_tokens_to_add(self, pair: bool = False) -> int:
return self._tokenizer.num_special_tokens_to_add(pair)
def tokenize(
self, text: TextInput, pair: Optional[TextInput] = None, add_special_tokens: bool = False
) -> List[str]:
return self._tokenizer.encode(text, pair, add_special_tokens).tokens
def batch_encode_plus(
self,
batch_text_or_text_pairs: Union[
List[TextInput], List[TextInputPair], List[PreTokenizedInput], List[PreTokenizedInputPair]
],
add_special_tokens: bool = True,
max_length: Optional[int] = None,
stride: int = 0,
truncation_strategy: str = "longest_first",
pad_to_max_length: bool = False,
is_pretokenized: bool = False,
return_tensors: Optional[Union[str, TensorType]] = None,
return_token_type_ids: Optional[bool] = None,
return_attention_mask: Optional[bool] = None,
return_overflowing_tokens: bool = False,
return_special_tokens_mask: bool = False,
return_offsets_mapping: bool = False,
return_lengths: bool = False,
**kwargs
) -> BatchEncoding:
if not isinstance(batch_text_or_text_pairs, list):
raise ValueError(
"batch_text_or_text_pairs has to be a list (got {})".format(type(batch_text_or_text_pairs))
)
# Needed if we have to return a tensor
pad_to_max_length = pad_to_max_length or (return_tensors is not None and len(batch_text_or_text_pairs) > 1)
# Throw an error if we can pad because there is no padding token
if pad_to_max_length and self.pad_token_id is None:
raise ValueError("Unable to set proper padding strategy as the tokenizer does not have a padding token")
# Set the truncation and padding strategy and restore the initial configuration
with truncate_and_pad(
tokenizer=self._tokenizer,
max_length=max_length,
stride=stride,
strategy=truncation_strategy,
pad_to_max_length=pad_to_max_length,
padding_side=self.padding_side,
pad_token_id=self.pad_token_id if self._pad_token is not None else None,
pad_token_type_id=self.pad_token_type_id,
pad_token=self._pad_token,
):
# Check for the pretokenized path
if is_pretokenized:
encodings = []
# Iterate over each sample (we don't know yet if they are pairs or simple input
for i, sample in enumerate(batch_text_or_text_pairs):
if not isinstance(sample, (list, tuple)):
raise TypeError(
"batch_encode_plus(..., is_pretokenized=True) requires batch_text_or_text_pairs "
"to be either List[List[str]] or List[Tuple[List[str], List[str]]] but sample at "
"index {} is of type {}".format(i, type(sample))
)
# Test if we have a pair of sentences by checking the depth of nesting
is_pair = bool(len(sample) > 0 and isinstance(sample[0], (list, tuple)))
# Take care of the first sequence - we multi-thread over the words
encodings_text = EncodingFast.merge(
self._tokenizer.encode_batch(sample[0] if is_pair else sample, add_special_tokens=False),
growing_offsets=True,
)
# Take care of the second sequence if we have a pair
if is_pair:
encodings_pair = EncodingFast.merge(
self._tokenizer.encode_batch([("", s) for s in sample[1]], add_special_tokens=False),
growing_offsets=True,
)
else:
encodings_pair = None
# Post-process - truncate/pad and add special tokens
encoding = self._tokenizer.post_process(encodings_text, encodings_pair, add_special_tokens)
encodings.append(encoding)
# Classical path with strings input
else:
# Avoid thread overhead if only one example.
if len(batch_text_or_text_pairs) == 1:
if isinstance(batch_text_or_text_pairs[0], (tuple, list)):
encodings = self._tokenizer.encode(
*batch_text_or_text_pairs[0], add_special_tokens=add_special_tokens
)
else:
encodings = self._tokenizer.encode(
batch_text_or_text_pairs[0], add_special_tokens=add_special_tokens
)
encodings = [encodings]
else:
encodings = self._tokenizer.encode_batch(
batch_text_or_text_pairs, add_special_tokens=add_special_tokens
)
# Convert encoding to dict
# `Tokens` has type: List[Dict[str, List[List[int]]]] or List[Dict[str, 2D-Tensor]]
# with nested dimensions corresponding to batch, overflows, sequence length
tokens = [
self._convert_encoding(
encoding=encoding,
return_tensors=return_tensors,
return_token_type_ids=return_token_type_ids,
return_attention_mask=return_attention_mask,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
return_offsets_mapping=return_offsets_mapping,
)
for encoding in encodings
]
# Sanitize the output to have dict[list] from list[dict]
sanitized = {}
for key in tokens[0].keys():
# To List[List[List[int]]] of shape (batch, overflows, sequence length)
stack = [e for item in tokens for e in item[key]]
if return_tensors == "tf":
stack = tf.stack(stack, axis=0)
elif return_tensors == "pt":
stack = torch.stack(stack, dim=0)
# elif not return_tensors and len(stack) == 1:
# stack = stack[0]
sanitized[key] = stack
# If returning overflowing tokens, we need to return a mapping
# from the batch idx to the original sample
if return_overflowing_tokens:
overflow_to_sample_mapping = flatten([[i] * len(enc["input_ids"]) for i, enc in enumerate(tokens)])
sanitized["overflow_to_sample_mapping"] = overflow_to_sample_mapping
return BatchEncoding(sanitized, encodings)
def encode_plus(
self,
text: Union[TextInput, PreTokenizedInput],
text_pair: Optional[Union[TextInput, PreTokenizedInput]] = None,
add_special_tokens: bool = True,
max_length: Optional[int] = None,
pad_to_max_length: bool = False,
stride: int = 0,
truncation_strategy: str = "longest_first",
is_pretokenized: bool = False,
return_tensors: Optional[Union[str, TensorType]] = None,
return_token_type_ids: Optional[bool] = None,
return_attention_mask: Optional[bool] = None,
return_overflowing_tokens: bool = False,
return_special_tokens_mask: bool = False,
return_offsets_mapping: bool = False,
**kwargs
) -> BatchEncoding:
# Check for pretokenized path (ie [token1, token2, ..., tokenN] -> [id1, id2, ..., idN]
if is_pretokenized:
if isinstance(text, list) and len(text) > 0:
# Encode through encode_batch with sequence of only one word which will be merged after hand
encoding = self._tokenizer.encode_batch(text, add_special_tokens=False)
encoding = EncodingFast.merge(encoding, growing_offsets=True)
# Let's do the same for pairs if provided
if isinstance(text_pair, list):
# We prepend empty string before each word so that encoding is aware content is a pair
encoding_pair = self._tokenizer.encode_batch(
[("", p) for p in text_pair], add_special_tokens=False
)
encoding_pair = EncodingFast.merge(encoding_pair, growing_offsets=True)
elif text_pair is None:
encoding_pair = None
else:
raise TypeError(
"encode_plus(..., is_pretokenized=True) requires text and text_pair to be List[str] "
"but got (text={}, text_pair={})".format(type(text), type(text_pair))
)
# Post process and if asked to do so, insert special tokens where needed
encoding = self._tokenizer.post_process(encoding, encoding_pair, add_special_tokens)
batched_output = BatchEncoding(
self._convert_encoding(
encoding,
return_tensors=return_tensors,
return_token_type_ids=return_token_type_ids,
return_attention_mask=return_attention_mask,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
return_offsets_mapping=return_offsets_mapping,
),
encoding,
)
else:
raise TypeError(
"encode_plus(..., is_pretokenized=True) requires text to be List[str] "
"but got (text={}, text_pair={})".format(type(text), type(text_pair))
)
else:
batched_input = [(text, text_pair)] if text_pair else [text]
batched_output = self.batch_encode_plus(
batched_input,
add_special_tokens=add_special_tokens,
max_length=max_length,
stride=stride,
truncation_strategy=truncation_strategy,
return_tensors=return_tensors,
return_token_type_ids=return_token_type_ids,
return_attention_mask=return_attention_mask,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
return_offsets_mapping=return_offsets_mapping,
pad_to_max_length=pad_to_max_length,
**kwargs,
)
# Return tensor is None, then we can remove the leading batch axis
if not return_tensors:
batched_output = BatchEncoding(
{
key: value[0] if len(value) > 0 and isinstance(value[0], list) else value
for key, value in batched_output.items()
},
batched_output.encodings,
)
return batched_output
def decode(
self, token_ids: List[int], skip_special_tokens: bool = False, clean_up_tokenization_spaces: bool = True
) -> str:
text = self._tokenizer.decode(token_ids, skip_special_tokens)
if clean_up_tokenization_spaces:
clean_text = self.clean_up_tokenization(text)
return clean_text
else:
return text
def save_vocabulary(self, save_directory: str) -> Tuple[str]:
if os.path.isdir(save_directory):
files = self._tokenizer.save_model(save_directory)
else:
folder, file = os.path.split(os.path.abspath(save_directory))
files = self._tokenizer.save_model(folder, name=file)
return tuple(files)
def trim_batch(
input_ids, pad_token_id, attention_mask=None,
):
"""Remove columns that are populated exclusively by pad_token_id"""
keep_column_mask = input_ids.ne(pad_token_id).any(dim=0)
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
|
py | 1a35fbd229109a51727b1f33d54d4a28fcd48e9d | from ups_byt_test import *
import time
file_name = 'inout_0.4_test.txt'
# time formats
format_time_old = "%W %j %A %d.%m.%Y %H:%M:%S"
format_time_new = "%d.%m.%Y %H:%M:%S"
ups_data = {}
for c in range(len(ups_member_card)):
for key, value in ups_member_card.items():
ups_data[key] = {
"inout_log" : [],
}
with open(file_name) as fn:
for log_row, log_line in enumerate(fn):
parse_line = log_line.strip().split(" ")
parse_name = parse_line[0]
parse_status = parse_line[1]
time_log = " ".join(parse_line[2:])
if parse_name in ups_data.keys():
parse_time_raw = time.strptime(time_log, format_time_old)
time_new_format = time.strftime(format_time_new, parse_time_raw)
time_seconds = time.mktime(parse_time_raw)
time_log_status = "{} {}".format(parse_status, time_new_format)
ups_data[parse_name]["inout_log"].append(time_log_status)
|
py | 1a35fc4a77e684b84aa35214fad0a6776f33740b | #!/usr/bin/env python3
import subprocess
import jinja2
num_redis_hosts = 3
# create proxy config file
template = open('proxy/envoy.yaml.j2').read()
config = jinja2.Template(template).render(num_redis_hosts = num_redis_hosts)
envoy_yaml = open('proxy/envoy.yaml', 'w')
envoy_yaml.write(config)
# start containers
shell_cmd = 'docker-compose up --build -d --scale redis={}'.format(num_redis_hosts)
print(shell_cmd)
# subprocess.run(shell_cmd, shell=True, check=True)
|
py | 1a35fc5347616af9492a96ff1aedb94cb2569437 | """Data models."""
from attr import attrib, attrs
@attrs
class Card:
"""Card.
created An ISO 8601 timestamp for when the card was created
cvv Three digit cvv printed on the back of the card
funding See FundingAccount
exp_month Two digit (MM) expiry month
exp_year Four digit (YYYY) expiry year
hostname Hostname of card’s locked merchant (will be empty if not applicable)
last_four Last four digits of the card number
memo Friendly name to identify the card
pan Sixteen digit card number
spend_limit Amount (in cents) to limit approved authorizations.
Transaction requests above the spend limit will be declined
spend_limit_duration TRANSACTION, MONTHLY, ANNUALLY, FOREVER
state OPEN, PAUSED, CLOSED
token Globally unique identifier
type SINGLE_USE, MERCHANT_LOCKED, UNLOCKED
"""
created = attrib()
cvv = attrib()
funding = attrib()
exp_month = attrib()
exp_year = attrib()
hostname = attrib()
last_four = attrib()
memo = attrib()
pan = attrib()
spend_limit = attrib()
spend_limit_duration = attrib()
state = attrib()
token = attrib()
type = attrib()
@attrs
class Event:
"""Event.
A single card transaction may include multiple events that affect the
transaction state and lifecycle.
amount Amount of the transaction event
created Date and time this event entered the system
result APPROVED or decline reason. See below for full enumeration
token Globally unique identifier
type AUTHORIZATION, AUTHORIZATION_ADVICE, CLEARING, VOID, RETURN
"""
amount = attrib()
created = attrib()
result = attrib()
token = attrib()
type = attrib()
@attrs
class FundingAccount:
"""Funding Account.
account_name Account name identifying the funding source. In some cases
this may be the last four digits of the account number
token Globally unique identifier
type Type of funding source, see enumerations for list
"""
account_name = attrib()
token = attrib()
type = attrib()
@attrs
class Merchant:
"""Merchant.
acceptor_id Unique identifier to identify the payment card acceptor
city City of card acceptor
country Country of card acceptor
descriptor Short description of card acceptor
mcc Merchant category code
state Geographic state of card acceptor
"""
acceptor_id = attrib()
city = attrib()
country = attrib()
descriptor = attrib()
mcc = attrib()
state = attrib()
@attrs
class Funding:
"""Funding.
funding A list of objects that describe how this transaction was funded,
with the amount represented in cents. A reference to the funding
account for the card that made this transaction may appear here
and the token will match the token for the funding account in
the card field. If any promotional credit was used in paying for
this transaction, its type will be PROMO.
"""
amount = attrib()
token = attrib()
type = attrib()
@attrs
class Transaction:
"""Transaction.
amount Authorization amount (in cents) of the transaction. This may change
over time
card See Card schema definition
created Date and time when the transaction first occurred
events A list of all events that have modified this transaction
funding See Funding schema definition
merchant See Merchant schema definition
result APPROVED or decline reason. See below for full enumeration
settled_amount Amount (in cents) of the transaction that has been settled.
This may change over time
status PENDING, VOIDED, SETTLING, SETTLED, BOUNCED
token Globally unique identifier
"""
amount = attrib()
card = attrib()
created = attrib()
events = attrib()
funding = attrib()
merchant = attrib()
result = attrib()
settled_amount = attrib()
status = attrib()
token = attrib()
|
py | 1a35fc5f8f694bf9688ae609538aa46fade53e69 | import logging
import math
import pytest
from dexbot.strategies.staggered_orders import VirtualOrder
# Turn on debug for dexbot logger
logger = logging.getLogger("dexbot")
logger.setLevel(logging.DEBUG)
###################
# Higher-level methods which depends on lower-level methods
###################
def test_refresh_balances(orders1):
""" Check if balance refresh works
"""
worker = orders1
worker.refresh_balances()
balance = worker.count_asset()
assert worker.base_balance['amount'] > 0
assert worker.quote_balance['amount'] > 0
assert worker.base_total_balance == balance['base']
assert worker.quote_total_balance == balance['quote']
def test_refresh_orders(orders1):
""" Make sure orders refresh is working
Note: this test doesn't checks orders sorting
"""
worker = orders1
worker.refresh_orders()
assert worker.virtual_buy_orders[0]['base']['amount'] == 10
assert worker.virtual_sell_orders[0]['base']['amount'] == 10
assert worker.real_buy_orders[0]['base']['amount'] == 10
assert worker.real_sell_orders[0]['base']['amount'] == 10
assert len(worker.sell_orders) == 2
assert len(worker.buy_orders) == 2
def test_check_min_order_size(worker):
""" Make sure our orders are always match minimal allowed size
"""
worker.calculate_min_amounts()
if worker.order_min_quote > worker.order_min_base:
# Limiting asset is QUOTE
# Intentionally pass amount 2 times lower than minimum, the function should return increased amount
corrected_amount = worker.check_min_order_size(worker.order_min_quote / 2, 1)
assert corrected_amount == worker.order_min_quote
else:
# Limiting precision is BASE, at price=1 amounts are the same, so pass 2 times lower amount
corrected_amount = worker.check_min_order_size(worker.order_min_base / 2, 1)
assert corrected_amount >= worker.order_min_quote
# Place/cancel real order to ensure no errors from the node
worker.place_market_sell_order(corrected_amount, 1, returnOrderId=False)
worker.cancel_all_orders()
def test_remove_outside_orders(orders1):
""" All orders in orders1 fixture are outside of the range, so remove_outside_orders() should cancel all
"""
worker = orders1
worker.refresh_orders()
assert worker.remove_outside_orders(worker.sell_orders, worker.buy_orders)
assert len(worker.sell_orders) == 0
assert len(worker.buy_orders) == 0
def test_restore_virtual_orders(orders2):
""" Very basic test, checks if number of virtual orders at least 2
"""
worker = orders2
worker.restore_virtual_orders()
assert len(worker.virtual_orders) >= 2
def test_replace_real_order_with_virtual(orders2):
""" Try to replace 2 furthest orders with virtual, then compare difference
"""
worker = orders2
worker.virtual_orders = []
num_orders_before = len(worker.real_buy_orders) + len(worker.real_sell_orders)
worker.replace_real_order_with_virtual(worker.real_buy_orders[-1])
worker.replace_real_order_with_virtual(worker.real_sell_orders[-1])
worker.refresh_orders()
num_orders_after = len(worker.real_buy_orders) + len(worker.real_sell_orders)
assert num_orders_before - num_orders_after == 2
assert len(worker.virtual_orders) == 2
def test_replace_virtual_order_with_real(orders3):
""" Try to replace 2 furthest virtual orders with real orders
"""
worker = orders3
num_orders_before = len(worker.virtual_orders)
num_real_orders_before = len(worker.own_orders)
assert worker.replace_virtual_order_with_real(worker.virtual_buy_orders[-1])
assert worker.replace_virtual_order_with_real(worker.virtual_sell_orders[-1])
num_orders_after = len(worker.virtual_orders)
num_real_orders_after = len(worker.own_orders)
assert num_orders_before - num_orders_after == 2
assert num_real_orders_after - num_real_orders_before == 2
def test_store_profit_estimation_data(worker, storage_db):
""" Check if storing of profit estimation data works
"""
worker.refresh_balances()
worker.store_profit_estimation_data(force=True)
account = worker.worker.get('account')
data = worker.get_recent_balance_entry(account, worker.worker_name, worker.base_asset, worker.quote_asset)
assert data.center_price == worker.market_center_price
assert data.base_total == worker.base_total_balance
assert data.quote_total == worker.quote_total_balance
def test_check_partial_fill(worker, partially_filled_order):
""" Test that check_partial_fill() can detect partially filled order
"""
is_not_partially_filled = worker.check_partial_fill(partially_filled_order, fill_threshold=0)
assert not is_not_partially_filled
is_not_partially_filled = worker.check_partial_fill(partially_filled_order, fill_threshold=90)
assert is_not_partially_filled
def test_replace_partially_filled_order(worker, partially_filled_order):
""" Test if replace_partially_filled_order() do correct replacement
"""
worker.replace_partially_filled_order(partially_filled_order)
new_order = worker.own_orders[0]
assert new_order['base']['amount'] == new_order['for_sale']['amount']
def test_place_lowest_buy_order(worker2):
""" Check if placement of lowest buy order works in general
"""
worker = worker2
worker.refresh_balances()
worker.place_lowest_buy_order(worker.base_balance)
worker.refresh_orders()
# Expect furthest order price to be less than increment x2
assert worker.buy_orders[-1]['price'] < worker.lower_bound * (1 + worker.increment * 2)
def test_place_highest_sell_order(worker2):
""" Check if placement of highest sell order works in general
"""
worker = worker2
worker.refresh_balances()
worker.place_highest_sell_order(worker.quote_balance)
worker.refresh_orders()
# Expect furthest order price to be less than increment x2
assert worker.sell_orders[-1]['price'] ** -1 > worker.upper_bound / (1 + worker.increment * 2)
@pytest.mark.parametrize('asset', ['base', 'quote'])
def test_place_closer_order_real_or_virtual(orders5, asset):
""" Closer order may be real or virtual, depending on distance from the center and operational_depth
1. Closer order within operational depth must be real
2. Closer order outside of operational depth must be virtual if previous order is virtual
3. Closer order outside of operational depth must be real if previous order is real
"""
worker = orders5
if asset == 'base':
virtual_outside = worker.virtual_buy_orders[-1]
virtual_within = worker.virtual_buy_orders[0]
real_outside = worker.real_buy_orders[-1]
real_within = worker.real_buy_orders[0]
elif asset == 'quote':
virtual_outside = worker.virtual_sell_orders[-1]
virtual_within = worker.virtual_sell_orders[0]
real_outside = worker.real_sell_orders[-1]
real_within = worker.real_sell_orders[0]
closer_order = worker.place_closer_order(asset, virtual_outside, place_order=True)
assert isinstance(
closer_order, VirtualOrder
), "Closer order outside of operational depth must be virtual if previous order is virtual"
# When self.returnOrderId is True, place_market_xxx_order() will return bool
closer_order = worker.place_closer_order(asset, virtual_within, place_order=True)
assert closer_order, "Closer order within operational depth must be real"
closer_order = worker.place_closer_order(asset, real_outside, place_order=True)
assert closer_order, "Closer order outside of operational depth must be real if previous order is real"
closer_order = worker.place_closer_order(asset, real_within, place_order=True)
assert closer_order, "Closer order within operational depth must be real"
@pytest.mark.xfail(reason='https://github.com/bitshares/python-bitshares/issues/227')
@pytest.mark.parametrize('asset', ['base', 'quote'])
def test_place_closer_order_price_amount(orders5, asset):
""" Test that closer order price and amounts are correct
"""
worker = orders5
if asset == 'base':
order = worker.buy_orders[0]
elif asset == 'quote':
order = worker.sell_orders[0]
worker.returnOrderId = True
closer_order = worker.place_closer_order(asset, order, place_order=True)
# Test for correct price
assert closer_order['price'] == order['price'] * (1 + worker.increment)
# Test for correct amount
if (
worker.mode == 'mountain'
or (worker.mode == 'buy_slope' and asset == 'quote')
or (worker.mode == 'sell_slope' and asset == 'base')
):
assert closer_order['quote']['amount'] == order['quote']['amount']
elif (
worker.mode == 'valley'
or (worker.mode == 'buy_slope' and asset == 'base')
or (worker.mode == 'sell_slope' and asset == 'quote')
):
assert closer_order['base']['amount'] == order['base']['amount']
elif worker.mode == 'neutral':
assert closer_order['base']['amount'] == order['base']['amount'] * math.sqrt(1 + worker.increment)
@pytest.mark.xfail(reason='https://github.com/bitshares/python-bitshares/issues/227')
@pytest.mark.parametrize('asset', ['base', 'quote'])
def test_place_closer_order_no_place_order(orders5, asset):
""" Test place_closer_order() with place_order=False kwarg
"""
worker = orders5
if asset == 'base':
order = worker.buy_orders[0]
elif asset == 'quote':
order = worker.sell_orders[0]
closer_order = worker.place_closer_order(asset, order, place_order=False)
worker.place_closer_order(asset, order, place_order=True)
worker.refresh_orders()
if asset == 'base':
real_order = worker.buy_orders[0]
price = real_order['price']
amount = real_order['quote']['amount']
elif asset == 'quote':
real_order = worker.sell_orders[0]
price = real_order['price'] ** -1
amount = real_order['base']['amount']
assert closer_order['price'] == price
assert closer_order['amount'] == amount
@pytest.mark.parametrize('asset', ['base', 'quote'])
def test_place_closer_order_allow_partial_hard_limit(orders2, asset):
""" Test place_closer_order with allow_partial=True when avail balance is less than minimal allowed order size
"""
worker = orders2
if asset == 'base':
order = worker.buy_orders[0]
price = order['price']
# Pretend we have balance smaller than hard limit
worker.base_balance['amount'] = worker.check_min_order_size(0, price) / 2
elif asset == 'quote':
order = worker.sell_orders[0]
price = order['price'] ** -1
worker.quote_balance['amount'] = worker.check_min_order_size(0, price) / 2
num_orders_before = len(worker.own_orders)
worker.place_closer_order(asset, order, place_order=True, allow_partial=True)
num_orders_after = len(worker.own_orders)
# Expect that order was not placed
assert num_orders_before == num_orders_after
@pytest.mark.parametrize('asset', ['base', 'quote'])
def test_place_closer_order_allow_partial_soft_limit(orders2, asset):
""" Test place_closer_order with allow_partial=True when avail balance is less than self.partial_fill_threshold
restriction
"""
worker = orders2
if asset == 'base':
order = worker.buy_orders[0]
# Pretend we have balance smaller than soft limit
worker.base_balance['amount'] = order['base']['amount'] * worker.partial_fill_threshold / 1.1
elif asset == 'quote':
order = worker.sell_orders[0]
worker.quote_balance['amount'] = order['base']['amount'] * worker.partial_fill_threshold / 1.1
num_orders_before = len(worker.own_orders)
worker.place_closer_order(asset, order, place_order=True, allow_partial=True)
num_orders_after = len(worker.own_orders)
# Expect that order was not placed
assert num_orders_before == num_orders_after
@pytest.mark.parametrize('asset', ['base', 'quote'])
def test_place_closer_order_allow_partial(orders2, asset):
""" Test place_closer_order with allow_partial=True when avail balance is more than self.partial_fill_threshold
restriction (enough for partial order)
"""
worker = orders2
if asset == 'base':
order = worker.buy_orders[0]
worker.base_balance['amount'] = order['base']['amount'] * worker.partial_fill_threshold * 2
elif asset == 'quote':
order = worker.sell_orders[0]
worker.quote_balance['amount'] = order['base']['amount'] * worker.partial_fill_threshold * 2
num_orders_before = len(worker.own_orders)
worker.place_closer_order(asset, order, place_order=True, allow_partial=True)
num_orders_after = len(worker.own_orders)
# Expect order placed
assert num_orders_after - num_orders_before == 1
@pytest.mark.parametrize('asset', ['base', 'quote'])
def test_place_closer_order_not_allow_partial(orders2, asset):
""" Test place_closer_order with allow_partial=False
"""
worker = orders2
if asset == 'base':
order = worker.buy_orders[0]
worker.base_balance['amount'] = order['base']['amount'] * worker.partial_fill_threshold * 2
elif asset == 'quote':
order = worker.sell_orders[0]
worker.quote_balance['amount'] = order['base']['amount'] * worker.partial_fill_threshold * 2
num_orders_before = len(worker.own_orders)
worker.place_closer_order(asset, order, place_order=True, allow_partial=False)
num_orders_after = len(worker.own_orders)
# Expect that order was not placed
assert num_orders_before == num_orders_after
@pytest.mark.parametrize('asset', ['base', 'quote'])
def test_place_closer_order_own_asset_limit(orders5, asset):
""" Place closer order with own_asset_limit, test that amount of a new order is matching limit
"""
worker = orders5
if asset == 'base':
order = worker.buy_orders[0]
elif asset == 'quote':
order = worker.sell_orders[0]
limit = order['base']['amount'] / 2
worker.returnOrderId = True
closer_order = worker.place_closer_order(asset, order, place_order=True, own_asset_limit=limit)
assert closer_order['base']['amount'] == limit
@pytest.mark.parametrize('asset', ['base', 'quote'])
def test_place_closer_order_opposite_asset_limit(orders5, asset):
""" Place closer order with opposite_asset_limit, test that amount of a new order is matching limit
"""
worker = orders5
if asset == 'base':
order = worker.buy_orders[0]
elif asset == 'quote':
order = worker.sell_orders[0]
limit = order['quote']['amount'] / 2
worker.returnOrderId = True
closer_order = worker.place_closer_order(asset, order, place_order=True, opposite_asset_limit=limit)
assert closer_order['quote']['amount'] == limit
@pytest.mark.parametrize('asset', ['base', 'quote'])
def test_place_closer_order_instant_fill_disabled(orders5, asset):
""" When instant fill is disabled, new order should not cross lowest ask or highest bid
"""
worker = orders5
if asset == 'base':
order = worker.buy_orders[0]
elif asset == 'quote':
order = worker.sell_orders[0]
worker.is_instant_fill_enabled = False
# Bump increment so hish that closer order will inevitably cross an opposite one
worker.increment = 100
result = worker.place_closer_order(asset, order, place_order=True)
assert result is None
@pytest.mark.parametrize('asset', ['base', 'quote'])
def test_place_further_order_real_or_virtual(orders5, asset):
""" Further order may be real or virtual, depending on distance from the center and operational_depth
1. Further order within operational depth must be real
2. Further order within operational depth must be virtual if virtual=True was given
2. Further order outside of operational depth must be virtual
"""
worker = orders5
if asset == 'base':
real_outside = worker.real_buy_orders[-1]
real_within = worker.real_buy_orders[0]
elif asset == 'quote':
real_outside = worker.real_sell_orders[-1]
real_within = worker.real_sell_orders[0]
further_order = worker.place_further_order(asset, real_within, place_order=True)
assert further_order, "Further order within operational depth must be real"
further_order = worker.place_further_order(asset, real_within, place_order=True, virtual=True)
assert isinstance(
further_order, VirtualOrder
), "Further order within operational depth must be virtual if virtual=True was given"
further_order = worker.place_further_order(asset, real_outside, place_order=True)
assert isinstance(further_order, VirtualOrder), "Further order outside of operational depth must be virtual"
@pytest.mark.xfail(reason='https://github.com/bitshares/python-bitshares/issues/227')
@pytest.mark.parametrize('asset', ['base', 'quote'])
def test_place_further_order_price_amount(orders5, asset):
""" Test that further order price and amounts are correct
"""
worker = orders5
if asset == 'base':
order = worker.buy_orders[0]
elif asset == 'quote':
order = worker.sell_orders[0]
worker.returnOrderId = True
further_order = worker.place_further_order(asset, order, place_order=True)
# Test for correct price
assert further_order['price'] == order['price'] / (1 + worker.increment)
# Test for correct amount
if (
worker.mode == 'mountain'
or (worker.mode == 'buy_slope' and asset == 'quote')
or (worker.mode == 'sell_slope' and asset == 'base')
):
assert further_order['quote']['amount'] == order['quote']['amount']
elif (
worker.mode == 'valley'
or (worker.mode == 'buy_slope' and asset == 'base')
or (worker.mode == 'sell_slope' and asset == 'quote')
):
assert further_order['base']['amount'] == order['base']['amount']
elif worker.mode == 'neutral':
assert further_order['base']['amount'] == order['base']['amount'] / math.sqrt(1 + worker.increment)
@pytest.mark.xfail(reason='https://github.com/bitshares/python-bitshares/issues/227')
@pytest.mark.parametrize('asset', ['base', 'quote'])
def test_place_further_order_no_place_order(orders5, asset):
""" Test place_further_order() with place_order=False kwarg
"""
worker = orders5
if asset == 'base':
order = worker.buy_orders[0]
elif asset == 'quote':
order = worker.sell_orders[0]
further_order = worker.place_further_order(asset, order, place_order=False)
# Place real order to compare with
worker.place_further_order(asset, order, place_order=True)
worker.refresh_orders()
if asset == 'base':
real_order = worker.buy_orders[1]
price = real_order['price']
amount = real_order['quote']['amount']
elif asset == 'quote':
real_order = worker.sell_orders[1]
price = real_order['price'] ** -1
amount = real_order['base']['amount']
assert further_order['price'] == price
assert further_order['amount'] == amount
@pytest.mark.parametrize('asset', ['base', 'quote'])
def test_place_further_order_not_allow_partial(orders2, asset):
""" Test place_further_order with allow_partial=False
"""
worker = orders2
if asset == 'base':
order = worker.buy_orders[0]
worker.base_balance['amount'] = order['base']['amount'] / 2
elif asset == 'quote':
order = worker.sell_orders[0]
worker.quote_balance['amount'] = order['base']['amount'] / 2
num_orders_before = len(worker.own_orders)
worker.place_further_order(asset, order, place_order=True, allow_partial=False)
num_orders_after = len(worker.own_orders)
# Expect that order was not placed
assert num_orders_before == num_orders_after
@pytest.mark.parametrize('asset', ['base', 'quote'])
def test_place_further_order_allow_partial_hard_limit(orders2, asset):
""" Test place_further_order with allow_partial=True when avail balance is less than minimal allowed order size
"""
worker = orders2
if asset == 'base':
order = worker.buy_orders[0]
price = order['price']
# Pretend we have balance smaller than hard limit
worker.base_balance['amount'] = worker.check_min_order_size(0, price) / 2
elif asset == 'quote':
order = worker.sell_orders[0]
price = order['price'] ** -1
worker.quote_balance['amount'] = worker.check_min_order_size(0, price) / 2
num_orders_before = len(worker.own_orders)
worker.place_further_order(asset, order, place_order=True, allow_partial=True)
num_orders_after = len(worker.own_orders)
# Expect that order was not placed
assert num_orders_before == num_orders_after
@pytest.mark.parametrize('asset', ['base', 'quote'])
def test_place_further_order_allow_partial(orders2, asset):
""" Test place_further_order with allow_partial=True
"""
worker = orders2
if asset == 'base':
order = worker.buy_orders[0]
worker.base_balance['amount'] = order['base']['amount'] / 2
elif asset == 'quote':
order = worker.sell_orders[0]
worker.quote_balance['amount'] = order['base']['amount'] / 2
num_orders_before = len(worker.own_orders)
worker.place_closer_order(asset, order, place_order=True, allow_partial=True)
num_orders_after = len(worker.own_orders)
# Expect order placed
assert num_orders_after - num_orders_before == 1
|
py | 1a35fc8d9003781117dfc23ef06674d21969a205 | # fast.py
# Mission Pinball Framework
# Written by Brian Madden & Gabe Knuth
# Released under the MIT License. (See license info at the end of this file.)
# Documentation and more info at http://missionpinball.com/framework
import logging
import fastpinball
import time
from mpf.system.timing import Timing
from mpf.system.platform import Platform
class HardwarePlatform(Platform):
"""Platform class for the FAST hardware controller.
Parameters
----------
machine : int
A reference to the MachineController instance
"""
def __init__(self, machine):
super(HardwarePlatform, self).__init__(machine)
self.log = logging.getLogger('FAST Platform')
self.log.debug("Configuring machine for FAST hardware.")
# ----------------------------------------------------------------------
# Platform-specific hardware features. WARNING: Do not edit these. They
# are based on what the FAST hardware can and cannot do.
self.features['max_pulse'] = 255 # todo
self.features['hw_timer'] = True
self.features['hw_rule_coil_delay'] = False # todo
self.features['variable_recycle_time'] = True # todo
self.features['variable_debounce_time'] = True # todo
self.features['hw_enable_auto_disable'] = True
# Make the platform features available to everyone
self.machine.config['Platform'] = self.features
# ----------------------------------------------------------------------
self.hw_rules = dict()
# Set up the connection to the FAST controller
self.log.info("Initializing FAST Pinball Controller interface...")
ports = list()
if ('port0_name' in self.machine.config['Fast'] and
'port0_baud' in self.machine.config['Fast']):
ports.append((self.machine.config['Fast']['port0_name'],
self.machine.config['Fast']['port0_baud']))
if ('port1_name' in self.machine.config['Fast'] and
'port1_baud' in self.machine.config['Fast']):
ports.append((self.machine.config['Fast']['port1_name'],
self.machine.config['Fast']['port1_baud']))
if ('port2_name' in self.machine.config['Fast'] and
'port2_baud' in self.machine.config['Fast']):
ports.append((self.machine.config['Fast']['port2_name'],
self.machine.config['Fast']['port2_baud']))
self.log.debug("FAST Ports: %s", ports)
if ('main_port' in self.machine.config['Fast'] and
'led_port' in self.machine.config['Fast'] and
'dmd_port' in self.machine.config['Fast']):
port_assignments = (self.machine.config['Fast']['main_port'],
self.machine.config['Fast']['led_port'],
self.machine.config['Fast']['dmd_port'])
else:
self.log.error("Error in fast config. Entries needed for main_port"
" and led_port and dmd_port.")
quit()
self.fast = fastpinball.fpOpen(ports, port_assignments)
# We need to setup a timer to get the initial switch reads, so we just
# do this one at 1 sec now. It will be overwritten later when the
# run loop starts
fastpinball.fpTimerConfig(self.fast, 1000000)
fastpinball.fpReadAllSwitches(self.fast)
event = fastpinball.fpGetEventObject()
fastpinball.fpGetEventType(event)
fastpinball.fpEventPoll(self.fast, event)
self.log.info("FAST Pinball Controller interface connected")
if 'config_number_format' not in self.machine.config['Fast']:
self.machine.config['Fast']['config_number_format'] = 'int'
self.machine_type = (
self.machine.config['Hardware']['DriverBoards'].upper())
if self.machine_type == 'WPC':
self.log.debug("Configuring the FAST Controller for WPC driver "
"boards")
elif self.machine_type == 'FAST':
self.log.debug("Configuring FAST Controller for FAST driver boards.")
self.wpc_switch_map = {
'S11':'00', 'S12':'01', 'S13':'02', 'S14':'03',
'S15':'04', 'S16':'05', 'S17':'06', 'S18':'07',
'S21':'08', 'S22':'09', 'S23':'10', 'S24':'11',
'S25':'12', 'S26':'13', 'S27':'14', 'S28':'15',
'S31':'16', 'S32':'17', 'S33':'18', 'S34':'19',
'S35':'20', 'S36':'21', 'S37':'22', 'S38':'23',
'S41':'24', 'S42':'25', 'S43':'26', 'S44':'27',
'S45':'28', 'S46':'29', 'S47':'30', 'S48':'31',
'S51':'32', 'S52':'33', 'S53':'34', 'S54':'35',
'S55':'36', 'S56':'37', 'S57':'38', 'S58':'39',
'S61':'40', 'S62':'41', 'S63':'42', 'S64':'43',
'S65':'44', 'S66':'45', 'S67':'46', 'S68':'47',
'S71':'48', 'S72':'49', 'S73':'50', 'S74':'51',
'S75':'52', 'S76':'53', 'S77':'54', 'S78':'55',
'S81':'56', 'S82':'57', 'S83':'58', 'S84':'59',
'S85':'60', 'S86':'61', 'S87':'62', 'S88':'63',
'S91':'64', 'S92':'65', 'S93':'66', 'S94':'67',
'S95':'68', 'S96':'69', 'S97':'70', 'S98':'71',
'SD1':'80', 'SD2':'81', 'SD3':'82', 'SD4':'83',
'SD5':'84', 'SD6':'85', 'SD7':'86', 'SD8':'87',
'DIP1':'88', 'DIP2':'89', 'DIP3':'90',
'DIP4':'91', 'DIP5':'92', 'DIP6':'93',
'DIP7':'94', 'DIP8':'95',
'SF1':'96', 'SF2':'97', 'SF3':'98', 'SF4':'99',
'SF5':'100', 'SF6':'101', 'SF7':'102',
'SF8':'103',
}
self.wpc_light_map = {
'L11':'00', 'L12':'01', 'L13':'02', 'L14':'03',
'L15':'04', 'L16':'05', 'L17':'06', 'L18':'07',
'L21':'08', 'L22':'09', 'L23':'11', 'L24':'12',
'L25':'12', 'L26':'13', 'L27':'14', 'L28':'15',
'L31':'16', 'L32':'17', 'L33':'18', 'L34':'19',
'L35':'20', 'L36':'21', 'L37':'22', 'L38':'23',
'L41':'24', 'L42':'25', 'L43':'26', 'L44':'27',
'L45':'28', 'L46':'29', 'L47':'30', 'L48':'31',
'L51':'32', 'L52':'33', 'L53':'34', 'L54':'35',
'L55':'36', 'L56':'37', 'L57':'38', 'L58':'39',
'L61':'40', 'L62':'41', 'L63':'42', 'L64':'43',
'L65':'44', 'L66':'45', 'L67':'48', 'L68':'49',
'L71':'48', 'L72':'49', 'L73':'50', 'L74':'51',
'L75':'52', 'L76':'53', 'L77':'54', 'L78':'55',
'L81':'56', 'L82':'57', 'L83':'58', 'L84':'59',
'L85':'60', 'L86':'61', 'L87':'62', 'L88':'63',
}
self.wpc_driver_map = {
'C01':'00', 'C02':'01', 'C03':'02', 'C04':'03',
'C05':'04', 'C06':'05', 'C07':'06', 'C08':'07',
'C09':'08', 'C10':'09', 'C11':'10', 'C12':'11',
'C13':'12', 'C14':'13', 'C15':'14', 'C16':'15',
'C17':'16', 'C18':'17', 'C19':'18', 'C20':'19',
'C21':'20', 'C22':'21', 'C23':'22', 'C24':'23',
'C25':'24', 'C26':'25', 'C27':'26', 'C28':'27',
'FLRM':'32', 'FLRH':'33', 'FLLM':'34',
'FLLH':'35', 'FURM':'36', 'FURH':'37',
'FULM':'38', 'FULH':'39',
}
self.wpc_gi_map = {'G01':'00', 'G02':'01', 'G03':'02', 'G04':'03',
'G05':'04', 'G06':'05', 'G07':'06', 'G08':'07',
}
def timer_initialize(self):
self.log.debug("Initializing the FAST hardware timer for %sHz",
Timing.HZ)
fastpinball.fpTimerConfig(self.fast,
int(Timing.secs_per_tick * 1000000))
# timer tick is in microsecs
def configure_driver(self, config, device_type='coil'):
# If we have WPC driver boards, look up the switch number
if self.machine_type == 'WPC':
config['number'] = int(self.wpc_driver_map.get(
config['number_str']))
if 'connection' not in config:
config['connection'] = 0 # local driver (default for WPC)
else:
config['connection'] = 1 # network driver
# If we have fast driver boards, we need to make sure we have ints
elif self.machine_type == 'FAST':
if self.machine.config['Fast']['config_number_format'] == 'hex':
config['number'] = int(config['number_str'], 16)
# Now figure out the connection type
if 'connection' not in config:
config['connection'] = 1 # network driver (default for FAST)
else:
config['connection'] = 0 # local driver
# convert the driver number into a tuple which is:
# (driver number, connection type)
config['number'] = (config['number'], config['connection'])
return FASTDriver(config['number'], self.fast), config['number']
def configure_switch(self, config):
"""Configures the switch object for a FAST Pinball controller.
FAST Controllers support two types of switches: local and network. Local
switches are switches that are connected to the FAST controller board
itself, and network switches are those connected to a FAST I/O board.
MPF needs to know which type of switch is this is. You can specify the
switch's connection type in the config file via the "connection"
setting (either 'local' or 'network'.
If a connection type is not specified, this method will use some
intelligence to try to figure out which default should be used.
If the DriverBoard type is 'fast', then it assumes the default is
'network'. If it's anything else (wpc, system11, bally, etc.) then it
assumes the connection type is 'local'. Connection types can be mixed
and matched.
"""
if self.machine_type == 'WPC': # translate switch number to FAST switch
config['number'] = int(self.wpc_switch_map.get(
config['number_str']))
if 'connection' not in config:
config['connection'] = 0 # local switch (default for WPC)
else:
config['connection'] = 1 # network switch
elif self.machine_type == 'FAST':
if 'connection' not in config:
config['connection'] = 1 # network switch (default for FAST)
else:
config['connection'] = 0 # local switch
if self.machine.config['Fast']['config_number_format'] == 'hex':
config['number'] = int(config['number_str'], 16)
# converet the switch number into a tuple which is:
# (switch number, connection)
config['number'] = (config['number'], config['connection'])
if 'debounce_on' not in config:
if 'default_debounce_on_ms' in self.machine.config['Fast']:
config['debounce_on'] = self.machine.config['Fast']['default_debounce_on_ms']
else:
config['debounce_on'] = 20
if 'debounce_off' not in config:
if 'default_debounce_off_ms' in self.machine.config['Fast']:
config['debounce_off'] = self.machine.config['Fast']['default_debounce_off_ms']
else:
config['debounce_off'] = 20
self.log.debug("FAST Switch hardware tuple: %s", config['number'])
switch = FASTSwitch(config['number'], config['debounce_on'],
config['debounce_off'], self.fast)
state = fastpinball.fpReadSwitch(self.fast, config['number'][0],
config['number'][1])
# Return the switch object and an integer of its current state.
# 1 = active, 0 = inactive
return switch, config['number'], state
def configure_led(self, config):
# if the LED number is in <channel> - <led> format, convert it to a
# FAST hardware number
if '-' in config['number_str']:
num = config['number_str'].split('-')
config['number'] = int((num[0] * 64) + num[1])
else:
config['number'] = str(config['number'])
return FASTDirectLED(config['number'], self.fast)
def configure_gi(self, config):
if self.machine_type == 'WPC': # translate switch number to FAST switch
config['number'] = int(self.wpc_gi_map.get(config['number_str']))
return FASTGIString(config['number'], self.fast), config['number']
def configure_matrixlight(self, config):
if self.machine_type == 'WPC': # translate switch number to FAST switch
config['number'] = int(self.wpc_light_map.get(config['number_str']))
elif self.machine.config['Fast']['config_number_format'] == 'hex':
config['number'] = int(config['number_str'], 16)
return FASTMatrixLight(config['number'], self.fast), config['number']
def hw_loop(self):
"""Loop code which checks the controller for any events (switch state
changes or notification that a DMD frame was updated).
"""
fast_events = fastpinball.fpGetEventObject()
self.log.debug("Starting the hardware loop")
loop_start_time = time.time() - .01
num_loops = 0
while self.machine.done is False:
self.machine.loop_rate = int(num_loops /
(time.time() - loop_start_time))
fastpinball.fpEventPoll(self.fast, fast_events)
eventType = fastpinball.fpGetEventType(fast_events)
# eventType options:
# 0 = none
# 1 = local switch active
# 2 = local switch inactive
# 3 = network switch active
# 4 = network switch inactive
# 5 = local switch cache has been updated
# 6 = network switch cache has been updated
# 7 = timer tick
if eventType == 0:
continue
elif eventType == 7: # timer_tick
num_loops += 1
self.machine.timer_tick()
elif eventType == 1: # local switch has gone active
self.machine.switch_controller.process_switch(state=1,
num=(fastpinball.fpGetEventSwitchID(fast_events), 0))
elif eventType == 2: # local switch has gone inactive
self.machine.switch_controller.process_switch(state=0,
num=(fastpinball.fpGetEventSwitchID(fast_events), 0))
elif eventType == 3: # network switch has gone active
self.machine.switch_controller.process_switch(state=1,
num=(fastpinball.fpGetEventSwitchID(fast_events), 1))
elif eventType == 4: # network switch has gone inactive
self.machine.switch_controller.process_switch(state=0,
num=(fastpinball.fpGetEventSwitchID(fast_events), 1))
else:
if num_loops != 0:
self.log.info("Hardware loop speed: %sHz",
self.machine.loop_rate)
def _do_set_hw_rule(self,
sw,
sw_activity,
coil_action_ms, # 0 = disable, -1 = hold forever
coil=None,
pulse_ms=0,
pwm_on=0,
pwm_off=0,
delay=0,
recycle_time=0,
debounced=True,
drive_now=False):
"""Used to write (or update) a hardware rule to the FAST controller.
*Hardware Rules* are used to configure the hardware controller to
automatically change driver states based on switch changes. These rules
are completely handled by the hardware (i.e. with no interaction from
the Python game code). They're used for things that you want to happen
fast, like firing coils when flipper buttons are pushed, slingshots, pop
bumpers, etc.
You can overwrite existing hardware rules at any time to change or
remove them.
Parameters
----------
sw : switch object
Which switch you're creating this rule for. The parameter is a
reference to the switch object itsef.
sw_activity : int
Do you want this coil to fire when the switch becomes active
(1) or inactive (0)
coil_action_ms : int
The total time (in ms) that this coil action should take place.
A value of -1 means it's forever.
coil : coil object
Which coil is this rule controlling
pulse_ms : int
How long should the coil be pulsed (ms)
pwm_on : int
If the coil should be held on at less than 100% duty cycle,
this is the "on" time (in ms).
pwm_off : int
If the coil should be held on at less than 100% duty cycle,
this is the "off" time (in ms).
delay : int
Not currently implemented
recycle_time : int
How long (in ms) should this switch rule wait before firing
again. Put another way, what's the "fastest" this rule can
fire? This is used to prevent "machine gunning" of slingshots
and pop bumpers. Do not use it with flippers.
debounced : bool
Should the hardware fire this coil after the switch has been
debounced? Typically no.
drive_now : bool
Should the hardware check the state of the switches when this
rule is firts applied, and fire the coils if they should be?
Typically this is True, especially with flippers because you
want them to fire if the player is holding in the buttons when
the machine enables the flippers (which is done via several
calls to this method.)
"""
# todo update documentation for on time and off time for debounce
self.log.debug("Setting HW Rule. Switch:%s, Action ms:%s, Coil:%s, "
"Pulse:%s, pwm_on:%s, pwm_off:%s, Delay:%s, Recycle:%s,"
"Debounced:%s, Now:%s", sw.name, coil_action_ms,
coil.name, pulse_ms, pwm_on, pwm_off, delay,
recycle_time, debounced, drive_now)
mode = 0
on_time = 0
off_time = recycle_time
if coil_action_ms == -1:
if pwm_on and pwm_off:
mode = 3 # pwm mode
on_time = pwm_on
off_time = pwm_off
else:
mode = 1 # latched mode (coil on solid)
elif 0 < coil_action_ms <= 255:
mode = 0 # pulse mode
on_time = pulse_ms
if sw_activity == 0: # fire this rule when switch turns off
sw_activity = 3
elif sw_activity == 1: # fire this coil when switch turns on
sw_activity = 2
self.hw_rules[coil.config['number']] = {'mode': mode,
'switch': sw.number,
'on': on_time,
'off': off_time}
self.log.debug("Writing HW Rule to FAST Controller. Coil: %s, "
"Mode: %s, Switch: %s, On: %s, Off: %s",
coil.number, mode, sw.number,
on_time, off_time)
fastpinball.fpWriteDriver(self.fast, # fast board
coil.number[0], # coil number
mode, # mode
sw_activity, # triggerType
sw.number[0], # switch
on_time, # on time
off_time, # time before can enable again
coil.number[1], # local or network
)
# todo ensure / verify switch & coil are on the same board.
def _do_clear_hw_rule(self, sw_num):
"""Clears a hardware rule.
This is used if you want to remove the linkage between a switch and
some driver activity. For example, if you wanted to disable your
flippers (so that a player pushing the flipper buttons wouldn't cause
the flippers to flip), you'd call this method with your flipper button
as the *sw_num*.
Parameters
----------
sw_num : int
The number of the switch whose rule you want to clear.
"""
self.log.debug("Clearing HW Rule for switch %s", sw_num)
# find the rule(s) based on this switch
coils = [k for k, v in self.hw_rules.iteritems() if v == sw_num]
for coil in coils:
fastpinball.fpWriteDriver(self.fast, # fast board
coil[0], # coil number
0, # mode
0, # triggerType
0, # switch
0, # on time
0, # off time
coil[1], # local or network
)
# todo ensure / verify switch & coil are on the same board.
class FASTSwitch(object):
"""
fpWriteSwitchConfig params:
fp_device (self.fast)
switch number (switch number as int)
mode (0 = no report, 1 = report on, 2 = report inverted
debounce close
debounce open
sound
target (0 = local, 1 = network)
todo add support for different debounce open and close times
"""
def __init__(self, number, debounce_on, debounce_off, fast_device):
self.log = logging.getLogger('FASTSwitch')
self.fast = fast_device
self.number = number[0]
self.connection = number[1]
self.log.debug("fastpinball.fpWriteSwitchConfig(%s, %s, 1, %s, %s, 0, "
"%s)", fast_device, number[0], debounce_on,
debounce_off, number[1])
fastpinball.fpWriteSwitchConfig(fast_device, # fast board
number[0], # switch number
1, # mode (1=report "on")
debounce_on, # debounce on (close)
debounce_off, # debounce off (open)
0, # sound
number[1]) # connection type
class FASTDriver(object):
""" Base class for drivers connected to a FAST Controller.
old - fpWriteDriver(device, driver_id, mode, trigger_sw, on_ms, off_ms)
fpWriteDriver (
device
id
mode (see below)
triggerType (see below)
triggerSwitch (switch id number)
onTime (in ms)
offTime (in ms)
target (connection type. 0 = local, 1 = network)
)
mode options
0 = pulsed
1 = latched
2 = delay
3 = pwm
triggerType options
0 = off
1 = manual
2 = triggered by switch going on
3 = triggered by switch going off
"""
def __init__(self, number, fast_device):
self.log = logging.getLogger('FASTDriver')
self.number = number
self.fast = fast_device
def disable(self):
"""Disables (turns off) this driver."""
self.log.debug('Disabling Driver')
fastpinball.fpWriteDriver(self.fast, # fast board
self.number[0], # driver number
0, # mode
0, # triggerType
0, # switch
0, # on time
0, # off time
self.number[1], # local or network
)
def enable(self):
"""Enables (turns on) this driver."""
self.log.debug('Enabling Driver')
fastpinball.fpWriteDriver(self.fast, # fast board
self.number[0], # driver number
1, # mode
1, # triggerType
0, # switch
0, # on time
0, # off time
self.number[1], # local or network
)
# todo change hold to pulse with re-ups
def pulse(self, milliseconds=None):
"""Pulses this driver.
"""
if not milliseconds in range(256):
raise ValueError('Milliseconds must be in range 0-255.')
self.log.debug('Pulsing Driver for %sms', milliseconds)
fastpinball.fpWriteDriver(self.fast, # fast board
self.number[0], # driver number
0, # mode
1, # triggerType
0, # switch
milliseconds, # on time
0, # off time
self.number[1], # local or network
)
def pwm(self, on_ms=10, off_ms=10, original_on_ms=0, now=True):
"""Enables this driver in a pwm pattern.
"""
if not original_on_ms in range(256):
raise ValueError('original_on_ms must be in range 0-255.')
if not on_ms in range(256):
raise ValueError('on_ms must be in range 0-255.')
if not off_ms in range(256):
raise ValueError('off_ms must be in range 0-255.')
self.log.debug("pwm on:%d, off:%d, now:%s", on_ms,
off_ms, now)
fastpinball.fpWriteDriver(self.fast, # fast board
self.number[0], # driver number
3, # mode
1, # triggerType
0, # switch
on_ms, # on time
off_ms, # off time
self.number[1], # local or network
)
class FASTGIString(object):
def __init__(self, number, fast_device):
""" A FAST GI string in a WPC machine.
TODO: Need to implement the enable_relay and control which strings are
dimmable.
"""
self.log = logging.getLogger('FASTGILight')
self.number = number
self.fast = fast_device
def off(self):
fastpinball.fpWriteGiString(self.fast, self.number, 0)
self.last_time_changed = time.time()
def on(self, brightness=255, fade_ms=0, start=0):
if brightness >= 255:
fastpinball.fpWriteGiString(self.fast, self.number, 1)
elif brightness == 0:
self.off()
else:
fastpinball.fpWriteGiString(self.fast, self.number,
int(brightness/255))
self.last_time_changed = time.time()
class FASTMatrixLight(object):
def __init__(self, number, fast_device):
self.log = logging.getLogger('FASTMatrixLight')
self.number = number
self.fast = fast_device
def off(self):
"""Disables (turns off) this driver."""
fastpinball.fpWriteLamp(self.fast, self.number, 0)
self.last_time_changed = time.time()
def on(self, brightness=255, fade_ms=0, start=0):
"""Enables (turns on) this driver."""
if brightness >= 255:
fastpinball.fpWriteLamp(self.fast, self.number, 1)
elif brightness == 0:
self.off()
else:
pass
# patter rates of 10/1 through 2/9
self.last_time_changed = time.time()
class FASTDirectLED(object):
def __init__(self, number, fast_device):
self.log = logging.getLogger('FASTLED')
self.number = number
self.fast = fast_device
self.current_color = [0, 0, 0]
# All FAST LEDs are 3 element RGB
self.log.debug("Creating FAST RGB LED at hardware address: %s",
self.number)
def color(self, color):
# Pad the color with zeros to make sure we have as many colors as
# elements
# todo verify this is needed with FAST. It might just work without
color += [0] * (3 - len(color))
self.log.info("fastpinball.fpWriteRgb(self.fast, %s, %s, %s, %s)",
self.number, color[0], color[1], color[2])
fastpinball.fpWriteRgb(self.fast, self.number, color[0], color[1],
color[2])
def fade(self, color, fadetime):
# todo
# not yet implemented. For now we'll just immediately set the color
self.log.debug("Fading LED %s over %sms", self.name, fadetime)
self.color(color, fadetime)
def disable(self):
"""Disables (turns off) this LED instantly. For multi-color LEDs it
turns all elements off.
"""
fastpinball.fpWriteRgb(self.fast, self.number, 0, 0, 0)
def enable(self, brightness_compensation=True):
self.color([255, 255, 255], brightness_compensation)
# The MIT License (MIT)
# Oringal code on which this module was based:
# Copyright (c) 2009-2011 Adam Preble and Gerry Stellenberg
# Copyright (c) 2013-2014 Brian Madden and Gabe Knuth
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
|
py | 1a35fd4b9f323fd2cfee676fffbd87bb21346375 | from fastapi import APIRouter, Request
import json
from typing import List
from loguru import logger
from starlette.templating import _TemplateResponse
from app.config import RESOURCES_DIR
from app.dependencies import templates
router = APIRouter()
def credits_from_json() -> List:
path = RESOURCES_DIR / "credits.json"
try:
with open(path, 'r') as json_file:
json_list = json.load(json_file)
except (IOError, ValueError):
logger.exception(
"An error occurred during reading of json file")
return []
return json_list
@router.get("/credits")
def credits(request: Request) -> _TemplateResponse:
credit_list = credits_from_json()
return templates.TemplateResponse("credits.html", {
"request": request,
"credit_list": credit_list
})
|
py | 1a35fecfe2807f705f331bc8b9a72de04571df6b | def run(line, start_panel):
num_of_operands = [0, 3, 3, 1, 1, 2, 2, 3, 3, 1]
program = [int(x) for x in line]+[0]*10000
i, base = 0, 0
panels, pos, outputs = {(0,0):start_panel}, (0,0), []
directions, dir_idx = [(-1,0), (0,1), (1,0), (0,-1)], 0
while program[i] != 99:
modes = [int(x) for x in f"{program[i]:0>5}"[:3]][::-1]
instruction = int(f"{program[i]:0>5}"[3:])
base_tmp = [base if modes[x]==2 else 0 for x in range(num_of_operands[instruction])]
operands = [program[i+x+1] if modes[x]==1 else program[base_tmp[x]+program[i+x+1]] for x in range(num_of_operands[instruction])]
if instruction == 1:
program[base_tmp[2]+program[i+3]] = operands[0] + operands[1]
elif instruction == 2:
program[base_tmp[2]+program[i+3]] = operands[0] * operands[1]
elif instruction == 3:
program[base_tmp[0]+program[i+1]] = panels[pos] if pos in panels else 0
elif instruction == 4:
outputs.append(operands[0])
if len(outputs) == 2:
panels[pos] = outputs[0]
dir_idx = (dir_idx + (1 if outputs[1] else -1)) % len(directions)
pos = (pos[0] + directions[dir_idx][0], pos[1] + directions[dir_idx][1])
outputs = []
elif instruction == 5:
i = (operands[1] - 3) if operands[0]!=0 else i
elif instruction == 6:
i = (operands[1] - 3) if operands[0]==0 else i
elif instruction == 7:
program[base_tmp[2]+program[i+3]] = int(operands[0] < operands[1])
elif instruction == 8:
program[base_tmp[2]+program[i+3]] = int(operands[0] == operands[1])
elif instruction == 9:
base += operands[0]
i += num_of_operands[instruction] + 1
return panels
with open("input.txt") as file:
data = file.readline().split(",")
print(len(run(data,0)))
result = run(data, 1)
tmp = [[" "]*50 for _ in range(7)]
for i in zip(result.keys(), result.values()):
tmp[i[0][0]][i[0][1]] = "#" if i[1] else " "
[print(" ".join(x)) for x in tmp] |
py | 1a35fef98dff21b127b25f15db62031e873d2b80 | from nltk.cluster import KMeansClusterer, cosine_distance # will get nan when u v are zero?
import pandas as pd
from sklearn.cluster import KMeans
from gensim.utils import tokenize
import pyLDAvis
from gensim.models import LdaModel
from gensim.corpora.dictionary import Dictionary
import pandas as pd
import numpy as np
################################################
## Majority vote rules
################################################
def link_group_to_label(train_label, train_pred, num_topics=100):
"""with majority vote rule"""
# Maping clusters into labels
df = pd.DataFrame(list(zip(train_label, train_pred)), columns=['actual_class', 'cluster'])
confusion = pd.crosstab(index=df.cluster, columns=df.actual_class)
## handle no group
full = pd.DataFrame(index=range(num_topics), columns=train_label.unique())
full.loc[:, 'no_group'] = 0.1 # the minimum is 1
merge_full = full.combine_first(confusion).fillna(0)
group_to_label = merge_full.idxmax(axis=1)
## print out mapping
print("Group to label mapping: ")
for idx, t in enumerate(group_to_label):
print("Group {} <-> label {}".format(idx, t))
print("\n")
return group_to_label
################################################
## Clustering tools
################################################
def fit_clustering_model(dtm_train, train_label, num_clusters, metric='Cosine', model='KMeans', repeats=20):
'''
'''
assert metric in ['Cosine', 'L2']
assert model in ['KMeans']
# model training
if model == 'KMeans':
if metric == 'Cosine':
# normalise should be true!
clusterer = KMeansClusterer(num_clusters, cosine_distance, normalise=True, repeats=repeats, avoid_empty_clusters=True)
train_cluster_pred = clusterer.cluster(dtm_train, assign_clusters=True)
elif metric == 'L2':
clusterer = KMeans(n_clusters=num_clusters, n_init=repeats).fit(dtm_train)
train_cluster_pred = clusterer.labels_.tolist()
elif model == 'GMM':
pass
# GMM model not good in such case
# clusterer = mixture.GaussianMixture(n_components=num_clusters, n_init=repeats, covariance_type='diag')
# clusterer.fit(dtm_train)
# train_cluster_pred = clusterer.predict(dtm_train)
# Maping clusters into labels
clusters_to_labels = link_group_to_label(train_label, train_cluster_pred, num_clusters)
return clusterer, clusters_to_labels
def pred_clustering_model(dtm_test, clusterer, clusters_to_labels):
try:
test_cluster_pred = clusterer.predict(dtm_test) # for sklearn clustering with L2
except Exception:
test_cluster_pred = [clusterer.classify(v) for v in dtm_test] # for nltk clustering with Cosine similiarity
predict = [clusters_to_labels[i] for i in test_cluster_pred]
return predict
################################################
## Topic modeling tools
################################################
def transform_lda_corpus(docs, vocabulary=None):
assert isinstance(docs, pd.Series)
idx_to_word = vocabulary
tokenized_docs = docs.apply(lambda x: list(tokenize(x))).to_list()
if idx_to_word is None:
idx_to_word = Dictionary(tokenized_docs)
sparse_corpus = [idx_to_word.doc2bow(doc) for doc in tokenized_docs]
return idx_to_word, sparse_corpus
def fit_topic_model(docs, num_topics=100, save_name='lda_gensim_model'):
'''
docs is the pd Series
output lda model and topic prediction on docs
'''
vocabulary, sparse_corpus = transform_lda_corpus(docs, vocabulary=None)
lda = LdaModel(sparse_corpus, num_topics=num_topics, minimum_probability=0.0001, dtype=np.float64)
if save_name is not None:
lda.save(save_name)
lda = LdaModel.load(save_name) # index 会变小吗
return lda, vocabulary
def pred_topic_model(lda, docs, vocabulary):
assert vocabulary is not None
_, sparse_corpus = transform_lda_corpus(docs, vocabulary=vocabulary)
pred = lda[sparse_corpus]
topic_distribution = lil_to_dataframe(pred, nrows=len(docs), ncols=lda.num_topics)
## checking for no topic
a = topic_distribution.sum(axis=1)
print(a[a == 0])
pred = topic_distribution.idxmax(axis=1, skipna=False)
return pred, topic_distribution
def lil_to_dataframe(pred, nrows, ncols):
'''sorted([(1, 0.4), (2,0.6) , (3, 0.3)], key=lambda x:-x[1])[0][0]'''
res = {}
for row, doc_topics in enumerate(pred):
res[row] = dict(doc_topics)
d1 = pd.DataFrame(index=range(nrows), columns=range(ncols))
d2 = pd.DataFrame.from_dict(res, orient='index')
# d3 = d1.combine_first(d2)
d3 = d1.combine_first(d2).fillna(0)
return d3
def visualize_LDA_model(docs, voc, lda):
_, sparse_corpus = transform_lda_corpus(docs, vocabulary=voc)
pyLDAvis.enable_notebook()
panel = pyLDAvis.gensim.prepare(lda, corpus=sparse_corpus, dictionary=voc, mds='tsne')
return panel
def load_gensim_LDA_model(save_name='lda_gensim_model'):
return LdaModel.load(save_name) # key 会少一个
|
py | 1a35ff27ec20324dd335a13dc38c3b1f6d3b8d16 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = [
'GetDatabaseAccountTableResult',
'AwaitableGetDatabaseAccountTableResult',
'get_database_account_table',
]
@pulumi.output_type
class GetDatabaseAccountTableResult:
"""
An Azure Cosmos DB Table.
"""
def __init__(__self__, id=None, location=None, name=None, tags=None, type=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def id(self) -> str:
"""
The unique resource identifier of the database account.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
The location of the resource group to which the resource belongs.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the database account.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Tags are a list of key-value pairs that describe the resource. These tags can be used in viewing and grouping this resource (across resource groups). A maximum of 15 tags can be provided for a resource. Each tag must have a key no greater than 128 characters and value no greater than 256 characters. For example, the default experience for a template type is set with "defaultExperience": "Cassandra". Current "defaultExperience" values also include "Table", "Graph", "DocumentDB", and "MongoDB".
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of Azure resource.
"""
return pulumi.get(self, "type")
class AwaitableGetDatabaseAccountTableResult(GetDatabaseAccountTableResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetDatabaseAccountTableResult(
id=self.id,
location=self.location,
name=self.name,
tags=self.tags,
type=self.type)
def get_database_account_table(account_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
table_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetDatabaseAccountTableResult:
"""
An Azure Cosmos DB Table.
:param str account_name: Cosmos DB database account name.
:param str resource_group_name: Name of an Azure resource group.
:param str table_name: Cosmos DB table name.
"""
__args__ = dict()
__args__['accountName'] = account_name
__args__['resourceGroupName'] = resource_group_name
__args__['tableName'] = table_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:documentdb/v20150408:getDatabaseAccountTable', __args__, opts=opts, typ=GetDatabaseAccountTableResult).value
return AwaitableGetDatabaseAccountTableResult(
id=__ret__.id,
location=__ret__.location,
name=__ret__.name,
tags=__ret__.tags,
type=__ret__.type)
|
py | 1a3600c821e67723bd08b236850d628894ee70bd | # Copyright 2017,2018,2019,2020,2021 Sony Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import numpy as np
import nnabla as nn
from nnabla.testing import assert_allclose
def test_manip():
v = nn.Variable([2, 3, 4])
assert v.shape == (2, 3, 4)
with pytest.raises(Exception):
v.reste_shape([1, 2])
v.reset_shape([1, 2], force=True)
assert v.shape == (1, 2)
@pytest.mark.parametrize("need_grad", [True, False])
def test_from_array(need_grad):
data = np.random.randint(0, 10, size=(2, 3, 4))
grad = np.random.randint(0, 10, size=(2, 3, 4))
v1 = nn.Variable.from_numpy_array(data, need_grad=need_grad)
assert np.all(v1.d == data)
assert v1.d.dtype == data.dtype
assert v1.need_grad == need_grad
v2 = nn.Variable.from_numpy_array(data, grad, need_grad)
assert np.all(v2.d == data)
assert v2.d.dtype == data.dtype
assert np.all(v2.g == grad)
assert v2.g.dtype == grad.dtype
assert v2.need_grad == need_grad
def test_data_grad_reference():
v = nn.Variable([2, 3, 4])
assert v.d.dtype == np.float32
assert v.g.dtype == np.float32
def test_dtype_conversion():
v = nn.Variable([2, 3, 4])
a = v.data.cast(np.int)
a[...] = 2
assert (v.data.dtype == np.int)
assert np.all(a == 2)
b = v.data.cast(np.float32)
assert b.dtype == np.float32
assert b is not a
assert np.all(b == 2)
b[...] = np.random.randn(*b.shape) * 10
c = v.data.cast(np.int32)
assert np.all(c == b.astype(np.int32))
def test_data_grad():
v = nn.Variable([2, 3, 4])
v.d[...] = np.random.randn(*v.shape)
assert v.d is not v.g
assert not np.all(v.d == v.g)
def test_get_unlinked_variable():
v = nn.Variable([2, 3, 4], need_grad=True)
grad = np.random.randn(*v.shape).astype(np.float32)
v.g = grad
v.d = np.random.randn(*v.shape)
import nnabla.functions as F
with nn.context_scope(nn.Context()), nn.auto_forward():
v2 = F.identity(v)
v2_u = v2.get_unlinked_variable()
assert v2_u.need_grad
v3 = F.identity(v2_u)
v2_u.grad.zero()
v2_g = v2_u.g.copy()
v3.backward(clear_buffer=False)
assert type(v2_u) == type(v2)
assert np.all(v.g == grad)
assert np.all(v2_u.g == v2.g)
assert np.all(v2_u.g == v2_g + 1)
# Check need_grad option
assert v2.get_unlinked_variable(need_grad=True).need_grad
assert not v2.get_unlinked_variable(need_grad=False).need_grad
def test_reshape():
v = nn.Variable([2, 3, 4], need_grad=True)
grad = np.random.randn(*v.shape).astype(np.float32)
v.g = grad
v.d = np.random.randn(*v.shape)
import nnabla.functions as F
with nn.context_scope(nn.Context()), nn.auto_forward():
v2 = F.identity(v)
v2_s = v2.reshape((3, 4, 2))
v3 = F.identity(v2_s)
v3.backward(clear_buffer=False)
assert np.all(v2_s.g.flat == v2.g.flat)
assert np.all(v2_s.g == 1)
v2.d = 1
assert np.all(v2_s.d == 1)
# Check unlink
v2_su = v2.reshape((3, 4, 2), unlink=True)
assert v2_su.need_grad
assert v2_su.parent is None
v2_su.need_grad = False
v2_su2 = v2_su.reshape((3, 4, 2), unlink=True)
assert not v2_su2.need_grad
assert v2_su2.parent is None
def test_persistent():
x = nn.Variable([2, 3, 4], need_grad=True)
x1 = x + 1
x2 = x1 + 1
x3 = x2 + 1
y = x3 + 1
x3.persistent = True
x.data.zero()
y.forward(clear_buffer=True)
assert_allclose(x3.d, 3)
y.forward(clear_no_need_grad=True)
y.backward(clear_buffer=True)
assert_allclose(x3.d, 3)
assert_allclose(x3.g, 1)
def test_name():
x = nn.Variable([2, 3])
x.name = "VariableName"
assert x.name == "VariableName"
def test_name_all_variables():
def net(h):
import nnabla.functions as F
import nnabla.parametric_functions as PF
h = PF.convolution(h, 3, (3, 3), name="conv1")
h = PF.batch_normalization(h, name="bn1")
h = F.relu(h)
h = F.max_pooling(h, (2, 2))
h = PF.convolution(h, 3, (3, 3), name="conv2")
h = PF.batch_normalization(h, name="bn2")
pred = F.relu(h)
return pred
class Namer(object):
def __init__(self, ):
self.counter = 0
def __call__(self, nnabla_func):
for v in nnabla_func.outputs:
v.name = "{}_output_{:05d}".format(
nnabla_func.name, self.counter)
self.counter += 1
class Confirmer(object):
def __init__(self, ):
self.counter = 0
def __call__(self, nnabla_func):
for v in nnabla_func.outputs:
assert v.name == "{}_output_{:05d}".format(
nnabla_func.name, self.counter)
self.counter += 1
x = nn.Variable([2, 3, 8, 8])
pred = net(x)
pred.visit(Namer())
pred.forward(clear_no_need_grad=True)
pred.backward(clear_buffer=True)
pred.visit(Confirmer())
def test_clear_all_graph_links():
import nnabla.functions as F
import nnabla.parametric_functions as PF
class OneStepRNN(object):
def __init__(self, batch_size=8, state_size=8):
self.lstm0 = PF.LSTMCell(batch_size, state_size, name="lsmt0")
self.lstm1 = PF.LSTMCell(batch_size, state_size, name="lsmt1")
self.affine = PF.affine
def __call__(self, x, n_class=10):
h = self.lstm0(x)
h = self.lstm1(h)
h = self.affine(h, n_class)
return h
T = 3
batch_size = 2
dims = 4
state_size = 8
one_step_rnn = OneStepRNN(batch_size, state_size)
# Forward: unroll over time
loss = 0
for t in range(T):
x = nn.Variable.from_numpy_array(
np.random.randn(batch_size, dims))
y = nn.Variable.from_numpy_array(
np.random.choice(np.arange(10), batch_size, replace=True)).reshape((batch_size, 1))
pred = one_step_rnn(x)
l = F.mean(F.softmax_cross_entropy(pred, y))
loss += l
loss /= T
# Backward then truncate
loss.backward()
loss.clear_all_graph_links()
assert one_step_rnn.lstm0.h.parent == None
assert one_step_rnn.lstm0.c.parent == None
assert one_step_rnn.lstm1.h.parent == None
assert one_step_rnn.lstm1.c.parent == None
def test_function_references():
import nnabla as nn
import nnabla.parametric_functions as PF
v = nn.Variable.from_numpy_array(np.random.randn(2, 4))
assert len(v.function_references) == 0
h1 = PF.affine(v, 10, name="affine1")
assert len(v.function_references) == 1
assert h1.parent in v.function_references
h2 = PF.affine(v, 10, name="affine2")
assert len(v.function_references) == 2
assert h1.parent in v.function_references
assert h2.parent in v.function_references
del h1
assert len(v.function_references) == 1
assert h2.parent in v.function_references
del h2
assert len(v.function_references) == 0
@pytest.mark.parametrize("f", [lambda x: x, hash])
def test_variable_equality_and_hash(f):
shape = (2, 3, 4)
x = nn.Variable(shape)
assert f(x) == f(x)
y = nn.Variable(shape)
assert f(x) != f(y)
y = x.get_unlinked_variable()
assert f(x) == f(y)
y.need_grad = True
assert f(x) == f(y)
def test_variable_set():
# Testing hash and equality operator via set
shape = (2, 3, 4)
x = nn.Variable(shape)
s = set()
s.add(x)
assert x in s
y = nn.Variable(shape)
assert y not in s
y = x.get_unlinked_variable()
assert y in s
y.need_grad = True
assert y in s
def test_prohibit_clear_data():
import nnabla.functions as F
nn.prefer_cached_array(False)
shape = (2, 3, 4)
var_np = np.random.rand(*shape)
# the case of root variable
x1 = nn.Variable.from_numpy_array(var_np)
y1 = F.reshape(x1, (-1,), inplace=True)
y1 = F.reshape(y1, shape, inplace=True) * 2
x2 = nn.Variable.from_numpy_array(var_np)
y2 = F.reshape(x2, (-1,), inplace=False)
y2 = F.reshape(y2, shape, inplace=False) * 2
nn.forward_all([y1, y2], clear_buffer=True)
assert_allclose(x1.d, x2.d)
assert_allclose(y1.d, y2.d)
# the case of persistent variable
x1 = nn.Variable.from_numpy_array(var_np)
p_y1 = F.mul_scalar(x1, 2).apply(persistent=True)
y1 = F.reshape(p_y1, (-1,), inplace=True)
y1 = F.reshape(y1, shape, inplace=True) * 2
x2 = nn.Variable.from_numpy_array(var_np)
p_y2 = F.mul_scalar(x2, 2).apply(persistent=True)
y2 = F.reshape(p_y2, (-1,), inplace=False)
y2 = F.reshape(y2, shape, inplace=False) * 2
nn.forward_all([y1, y2], clear_buffer=True)
assert_allclose(p_y1.d, p_y2.d)
assert_allclose(y1.d, y2.d)
# the case of rewire_on root variable
# graph A: x11 -> f_inplace -> y11
x11 = nn.Variable.from_numpy_array(var_np)
y11 = F.reshape(x11, (-1,), inplace=True)
# graph B: x12 -> f_inplace -> mul_scalar -> y12
x12 = nn.Variable(shape=y11.shape)
y12 = F.reshape(x12, shape, inplace=True) * 2
# graph A->B: x11 -> f_inplace -> f_inplace -> mul_scalar -> y12
x12.rewire_on(y11)
x2 = nn.Variable.from_numpy_array(var_np)
y2 = F.reshape(x2, (-1,), inplace=False)
y2 = F.reshape(y2, shape, inplace=False) * 2
nn.forward_all([y12, y2], clear_buffer=True)
assert_allclose(x11.d, x2.d)
assert_allclose(y12.d, y2.d)
# the case of rewire_on persistent variable
# graph A: x11 -> mul_scalar -> p_x11 -> f_inplace -> y11
x11 = nn.Variable.from_numpy_array(var_np)
p_x11 = F.mul_scalar(x11, 2).apply(persistent=True)
y11 = F.reshape(p_x11, (-1,), inplace=True)
# graph B: x12 -> f_inplace -> mul_scalar -> y12
x12 = nn.Variable(shape=y11.shape)
y12 = F.reshape(x12, shape, inplace=True) * 2
# graph A->B: ... -> p_x11 -> f_inplace -> f_inplace -> mul_scalar -> y12
x12.rewire_on(y11)
x2 = nn.Variable.from_numpy_array(var_np)
p_x2 = F.mul_scalar(x2, 2).apply(persistent=True)
y2 = F.reshape(p_x2, (-1,), inplace=False)
y2 = F.reshape(y2, shape, inplace=False) * 2
nn.forward_all([y12, y2], clear_buffer=True)
assert_allclose(p_x11.d, p_x2.d)
assert_allclose(y12.d, y2.d)
def test_leaf_indexing_access():
import nnabla.functions as F
nn.set_auto_forward(False)
shape_x = (3, 2)
dx = np.random.rand(*shape_x)
shape_y = (2, 2)
dy = np.random.rand(*shape_y)
x = nn.Variable.from_numpy_array(dx)
y = nn.Variable.from_numpy_array(dy)
x[0:2, :] = y
z = F.identity(x)
z.forward()
d1 = x.d.copy()
nn.set_auto_forward(True)
x = nn.Variable.from_numpy_array(dx)
y = nn.Variable.from_numpy_array(dy)
x[0:2, :] = y
z2 = F.identity(x)
d2 = x.d.copy()
nn.set_auto_forward(False)
x = nn.Variable.from_numpy_array(dx)
y = nn.Variable.from_numpy_array(dy)
x[0:2, :] = y
z3 = F.identity(x)
z3.forward()
d3 = x.d.copy()
d4 = z3.d.copy()
assert_allclose(d1, d2)
assert_allclose(d2, d3)
assert_allclose(d3, d4)
|
py | 1a3602ab88e2b81bcfd3ec6d453a2667f60bd198 | import pathlib
from setuptools import setup, find_packages
here = pathlib.Path(__file__).resolve().parent
with open(here.joinpath("requirements.txt")) as fh:
req = fh.readlines()
with open(here.joinpath("requirements-dev.txt")) as fh:
req_dev = fh.readlines()
setup(
name="operatorcert",
version="1.0.0",
description="Tools for Red Hat Operator certification pipelines",
author="Red Hat, Inc.",
packages=find_packages(),
python_requires=">=3.6, <4",
install_requires=req,
extras_require={"dev": req_dev},
entry_points={
"console_scripts": [
"bundle-dockerfile=operatorcert.entrypoints.bundle_dockerfile:main",
"ocp-version-info=operatorcert.entrypoints.ocp_version_info:main",
"verify-changed-dirs=operatorcert.entrypoints.verify_changed_dirs:main",
"verify-pr-title=operatorcert.entrypoints.verify_pr_title:main",
"verify-pr-uniqueness=operatorcert.entrypoints.verify_pr_uniqueness:main",
"verify-pr-user=operatorcert.entrypoints.verify_pr_user:main",
"upload-artifacts=operatorcert.entrypoints.upload_artifacts:main",
"download-test-results=operatorcert.entrypoints.download_test_results:main",
"reserve-operator-name=operatorcert.entrypoints.reserve_operator_name:main",
"set-github-status=operatorcert.entrypoints.set_github_status:main",
"link-pull-request=operatorcert.entrypoints.link_pull_request:main",
"get-cert-project-related-data=operatorcert.entrypoints.get_cert_project_related_data:main",
"get-vendor-related-data=operatorcert.entrypoints.get_vendor_related_data:main",
"open-pull-request=operatorcert.entrypoints.github_pr:main",
"publish=operatorcert.entrypoints.publish:main",
"index=operatorcert.entrypoints.index:main",
"update-cert-project-status=operatorcert.entrypoints.update_cert_project_status:main",
"hydra-checklist=operatorcert.entrypoints.hydra_checklist:main",
"create-container-image=operatorcert.entrypoints.create_container_image:main",
"marketplace-replication=operatorcert.entrypoints.marketplace_replication:main",
"pipelinerun-summary=operatorcert.entrypoints.pipelinerun_summary:main",
"request-signature=operatorcert.entrypoints.request_signature:main",
"upload-signature=operatorcert.entrypoints.upload_signature:main",
"github-add-comment=operatorcert.entrypoints.github_add_comment:main",
],
},
)
|
py | 1a36031185b845bbfc84c38cab1cca1cbc448ab5 | """"Groups UI URLs
Copyright 2015 Archive Analytics Solutions
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from django.conf.urls import url
urlpatterns = [
url(r'^$', 'groups.views.home', name='home'),
url(r'^new/group', 'groups.views.new_group', name='new_group'),
url(r'^delete/group/(?P<name>.*)$', 'groups.views.delete_group', name='delete_group'),
url(r'^edit/group/(?P<name>.*)$', 'groups.views.edit_group', name='edit_group'),
url(r'^rm/(?P<name>.*)/(?P<uname>.*)$', 'groups.views.rm_user', name='rm_user'),
url(r'^add/(?P<name>.*)$', 'groups.views.add_user', name='add_user'),
url(r'^(?P<name>.*)$', 'groups.views.group_view', name='view'),
]
|
py | 1a3604afe4421befeae42de74870ab92e8994ceb | import warnings
from collections import Counter
from encodings.aliases import aliases
from hashlib import sha256
from json import dumps
from re import compile as re_compile, sub
from typing import Any, Dict, Iterator, List, Optional, Set, Tuple, Union
from .constant import TOO_BIG_SEQUENCE
from .md import mess_ratio
from .utils import iana_name, is_multi_byte_encoding, unicode_range
class CharsetMatch:
def __init__(
self,
payload: bytes,
guessed_encoding: str,
mean_mess_ratio: float,
has_sig_or_bom: bool,
languages: "CoherenceMatches",
decoded_payload: Optional[str] = None,
):
self._payload = payload # type: bytes
self._encoding = guessed_encoding # type: str
self._mean_mess_ratio = mean_mess_ratio # type: float
self._languages = languages # type: CoherenceMatches
self._has_sig_or_bom = has_sig_or_bom # type: bool
self._unicode_ranges = None # type: Optional[List[str]]
self._leaves = [] # type: List[CharsetMatch]
self._mean_coherence_ratio = 0.0 # type: float
self._output_payload = None # type: Optional[bytes]
self._output_encoding = None # type: Optional[str]
self._string = decoded_payload # type: Optional[str]
def __eq__(self, other: object) -> bool:
if not isinstance(other, CharsetMatch):
raise TypeError(
"__eq__ cannot be invoked on {} and {}.".format(
str(other.__class__), str(self.__class__)
)
)
return self.encoding == other.encoding and self.fingerprint == other.fingerprint
def __lt__(self, other: object) -> bool:
"""
Implemented to make sorted available upon CharsetMatches items.
"""
if not isinstance(other, CharsetMatch):
raise ValueError
chaos_difference = abs(self.chaos - other.chaos) # type: float
# Bellow 1% difference --> Use Coherence
if chaos_difference < 0.01:
return self.coherence > other.coherence
return self.chaos < other.chaos
@property
def chaos_secondary_pass(self) -> float:
"""
Check once again chaos in decoded text, except this time, with full content.
Use with caution, this can be very slow.
Notice: Will be removed in 3.0
"""
warnings.warn(
"chaos_secondary_pass is deprecated and will be removed in 3.0",
DeprecationWarning,
)
return mess_ratio(str(self), 1.0)
@property
def coherence_non_latin(self) -> float:
"""
Coherence ratio on the first non-latin language detected if ANY.
Notice: Will be removed in 3.0
"""
warnings.warn(
"coherence_non_latin is deprecated and will be removed in 3.0",
DeprecationWarning,
)
return 0.0
@property
def w_counter(self) -> Counter:
"""
Word counter instance on decoded text.
Notice: Will be removed in 3.0
"""
warnings.warn(
"w_counter is deprecated and will be removed in 3.0", DeprecationWarning
)
not_printable_pattern = re_compile(r"[0-9\W\n\r\t]+")
string_printable_only = sub(not_printable_pattern, " ", str(self).lower())
return Counter(string_printable_only.split())
def __str__(self) -> str:
# Lazy Str Loading
if self._string is None:
self._string = str(self._payload, self._encoding, "strict")
return self._string
def __repr__(self) -> str:
return "<CharsetMatch '{}' bytes({})>".format(self.encoding, self.fingerprint)
def add_submatch(self, other: "CharsetMatch") -> None:
if not isinstance(other, CharsetMatch) or other == self:
raise ValueError(
"Unable to add instance <{}> as a submatch of a CharsetMatch".format(
other.__class__
)
)
other._string = None # Unload RAM usage; dirty trick.
self._leaves.append(other)
@property
def encoding(self) -> str:
return self._encoding
@property
def encoding_aliases(self) -> List[str]:
"""
Encoding name are known by many name, using this could help when searching for IBM855 when it's listed as CP855.
"""
also_known_as = [] # type: List[str]
for u, p in aliases.items():
if self.encoding == u:
also_known_as.append(p)
elif self.encoding == p:
also_known_as.append(u)
return also_known_as
@property
def bom(self) -> bool:
return self._has_sig_or_bom
@property
def byte_order_mark(self) -> bool:
return self._has_sig_or_bom
@property
def languages(self) -> List[str]:
"""
Return the complete list of possible languages found in decoded sequence.
Usually not really useful. Returned list may be empty even if 'language' property return something != 'Unknown'.
"""
return [e[0] for e in self._languages]
@property
def language(self) -> str:
"""
Most probable language found in decoded sequence. If none were detected or inferred, the property will return
"Unknown".
"""
if not self._languages:
# Trying to infer the language based on the given encoding
# Its either English or we should not pronounce ourselves in certain cases.
if "ascii" in self.could_be_from_charset:
return "English"
# doing it there to avoid circular import
from charset_normalizer.cd import encoding_languages, mb_encoding_languages
languages = (
mb_encoding_languages(self.encoding)
if is_multi_byte_encoding(self.encoding)
else encoding_languages(self.encoding)
)
if len(languages) == 0 or "Latin Based" in languages:
return "Unknown"
return languages[0]
return self._languages[0][0]
@property
def chaos(self) -> float:
return self._mean_mess_ratio
@property
def coherence(self) -> float:
if not self._languages:
return 0.0
return self._languages[0][1]
@property
def percent_chaos(self) -> float:
return round(self.chaos * 100, ndigits=3)
@property
def percent_coherence(self) -> float:
return round(self.coherence * 100, ndigits=3)
@property
def raw(self) -> bytes:
"""
Original untouched bytes.
"""
return self._payload
@property
def submatch(self) -> List["CharsetMatch"]:
return self._leaves
@property
def has_submatch(self) -> bool:
return len(self._leaves) > 0
@property
def alphabets(self) -> List[str]:
if self._unicode_ranges is not None:
return self._unicode_ranges
detected_ranges = set() # type: Set[str]
for character in str(self):
detected_range = unicode_range(character) # type: Optional[str]
if detected_range:
detected_ranges.add(detected_range)
self._unicode_ranges = sorted(list(detected_ranges))
return self._unicode_ranges
@property
def could_be_from_charset(self) -> List[str]:
"""
The complete list of encoding that output the exact SAME str result and therefore could be the originating
encoding.
This list does include the encoding available in property 'encoding'.
"""
return [self._encoding] + [m.encoding for m in self._leaves]
def first(self) -> "CharsetMatch":
"""
Kept for BC reasons. Will be removed in 3.0.
"""
return self
def best(self) -> "CharsetMatch":
"""
Kept for BC reasons. Will be removed in 3.0.
"""
return self
def output(self, encoding: str = "utf_8") -> bytes:
"""
Method to get re-encoded bytes payload using given target encoding. Default to UTF-8.
Any errors will be simply ignored by the encoder NOT replaced.
"""
if self._output_encoding is None or self._output_encoding != encoding:
self._output_encoding = encoding
self._output_payload = str(self).encode(encoding, "replace")
return self._output_payload # type: ignore
@property
def fingerprint(self) -> str:
"""
Retrieve the unique SHA256 computed using the transformed (re-encoded) payload. Not the original one.
"""
return sha256(self.output()).hexdigest()
class CharsetMatches:
"""
Container with every CharsetMatch items ordered by default from most probable to the less one.
Act like a list(iterable) but does not implements all related methods.
"""
def __init__(self, results: List[CharsetMatch] = None):
self._results = sorted(results) if results else [] # type: List[CharsetMatch]
def __iter__(self) -> Iterator[CharsetMatch]:
for result in self._results:
yield result
def __getitem__(self, item: Union[int, str]) -> CharsetMatch:
"""
Retrieve a single item either by its position or encoding name (alias may be used here).
Raise KeyError upon invalid index or encoding not present in results.
"""
if isinstance(item, int):
return self._results[item]
if isinstance(item, str):
item = iana_name(item, False)
for result in self._results:
if item in result.could_be_from_charset:
return result
raise KeyError
def __len__(self) -> int:
return len(self._results)
def append(self, item: CharsetMatch) -> None:
"""
Insert a single match. Will be inserted accordingly to preserve sort.
Can be inserted as a submatch.
"""
if not isinstance(item, CharsetMatch):
raise ValueError(
"Cannot append instance '{}' to CharsetMatches".format(
str(item.__class__)
)
)
# We should disable the submatch factoring when the input file is too heavy (conserve RAM usage)
if len(item.raw) <= TOO_BIG_SEQUENCE:
for match in self._results:
if match.fingerprint == item.fingerprint and match.chaos == item.chaos:
match.add_submatch(item)
return
self._results.append(item)
self._results = sorted(self._results)
def best(self) -> Optional["CharsetMatch"]:
"""
Simply return the first match. Strict equivalent to matches[0].
"""
if not self._results:
return None
return self._results[0]
def first(self) -> Optional["CharsetMatch"]:
"""
Redundant method, call the method best(). Kept for BC reasons.
"""
return self.best()
CoherenceMatch = Tuple[str, float]
CoherenceMatches = List[CoherenceMatch]
class CliDetectionResult:
def __init__(
self,
path: str,
encoding: Optional[str],
encoding_aliases: List[str],
alternative_encodings: List[str],
language: str,
alphabets: List[str],
has_sig_or_bom: bool,
chaos: float,
coherence: float,
unicode_path: Optional[str],
is_preferred: bool,
):
self.path = path # type: str
self.unicode_path = unicode_path # type: Optional[str]
self.encoding = encoding # type: Optional[str]
self.encoding_aliases = encoding_aliases # type: List[str]
self.alternative_encodings = alternative_encodings # type: List[str]
self.language = language # type: str
self.alphabets = alphabets # type: List[str]
self.has_sig_or_bom = has_sig_or_bom # type: bool
self.chaos = chaos # type: float
self.coherence = coherence # type: float
self.is_preferred = is_preferred # type: bool
@property
def __dict__(self) -> Dict[str, Any]: # type: ignore
return {
"path": self.path,
"encoding": self.encoding,
"encoding_aliases": self.encoding_aliases,
"alternative_encodings": self.alternative_encodings,
"language": self.language,
"alphabets": self.alphabets,
"has_sig_or_bom": self.has_sig_or_bom,
"chaos": self.chaos,
"coherence": self.coherence,
"unicode_path": self.unicode_path,
"is_preferred": self.is_preferred,
}
def to_json(self) -> str:
return dumps(self.__dict__, ensure_ascii=True, indent=4)
CharsetNormalizerMatch = CharsetMatch
|
py | 1a3604d9f353b79caf3ee0d00414ea17637ae3ea | import ast
from dotmap import DotMap
from typing import Union, List
from .utils import visualize_1D_lcurves
class MetaLog(object):
meta_vars: List[str]
stats_vars: List[str]
time_vars: List[str]
num_configs: int
def __init__(self, meta_log: DotMap, non_aggregated: bool = False):
"""Class wrapper for meta_log dictionary w. additional functionality.
Args:
meta_log (DotMap): Raw reloaded meta-log dotmap dictionary.
non_aggregated (bool, optional):
Whether the meta-log has previously been aggregated across
seeds. Defaults to False.
"""
self.meta_log = meta_log
# Return shallow log if there is only a single experiment stored
self.num_configs = len(list(meta_log.keys()))
ph_run = list(meta_log.keys())[0]
# Extract different variable names from meta log
if not non_aggregated:
self.meta_vars = list(meta_log[ph_run].meta.keys())
self.stats_vars = list(meta_log[ph_run].stats.keys())
self.time_vars = list(meta_log[ph_run].time.keys())
else:
ph_seed = list(meta_log[ph_run].keys())[0]
self.meta_vars = list(meta_log[ph_run][ph_seed].meta.keys())
self.stats_vars = list(meta_log[ph_run][ph_seed].stats.keys())
self.time_vars = list(meta_log[ph_run][ph_seed].time.keys())
# Decode all byte strings in meta data
for run_id in self.meta_log.keys():
if "meta" in self.meta_log[run_id].keys():
self.meta_log[run_id] = decode_meta_strings(
self.meta_log[run_id]
)
else:
for seed_id in self.meta_log[run_id].keys():
self.meta_log[run_id][seed_id] = decode_meta_strings(
self.meta_log[run_id][seed_id]
)
# Make log shallow if there is only a single experiment stored
if self.num_configs == 1:
self.meta_log = self.meta_log[ph_run]
# Make possible that all runs are accessible via attribute as in pd
for key in self.meta_log:
setattr(self, key, self.meta_log[key])
def filter(self, run_ids: List[str]):
"""Subselect the meta log dict based on a list of run ids."""
sub_dict = subselect_meta_log(self.meta_log, run_ids)
return MetaLog(sub_dict)
def plot(
self,
target_to_plot: str,
iter_to_plot: Union[str, None] = None,
smooth_window: int = 1,
plot_title: Union[str, None] = None,
xy_labels: Union[list, None] = None,
base_label: str = "{}",
run_ids: Union[list, None] = None,
curve_labels: list = [],
every_nth_tick: Union[int, None] = None,
plot_std_bar: bool = False,
fname: Union[None, str] = None,
num_legend_cols: Union[int, None] = 1,
fig=None,
ax=None,
figsize: tuple = (9, 6),
plot_labels: bool = True,
legend_title: Union[None, str] = None,
ax_lims: Union[None, list] = None,
):
"""Plot all runs in meta-log for variable 'target_to_plot'."""
if iter_to_plot is None:
iter_to_plot = self.time_vars[0]
assert iter_to_plot in self.time_vars
if run_ids is None:
run_ids = self.eval_ids
fig, ax = visualize_1D_lcurves(
self.meta_log,
iter_to_plot,
target_to_plot,
smooth_window=smooth_window,
every_nth_tick=every_nth_tick,
num_legend_cols=num_legend_cols,
run_ids=run_ids,
plot_title=plot_title,
xy_labels=xy_labels,
base_label=base_label,
curve_labels=curve_labels,
plot_std_bar=plot_std_bar,
fig=fig,
ax=ax,
figsize=figsize,
plot_labels=plot_labels,
legend_title=legend_title,
ax_lims=ax_lims,
)
# Save the figure if a filename was provided
if fname is not None:
fig.savefig(fname, dpi=300)
else:
return fig, ax
@property
def eval_ids(self) -> Union[int, None]:
"""Get ids of runs stored in meta_log instance."""
if self.num_configs > 1:
return list(self.meta_log.keys())
else:
print("Only single aggregated configuration or random seed loaded.")
def __len__(self) -> int:
"""Return number of runs stored in meta_log."""
return len(self.eval_ids)
def __getitem__(self, item):
"""Get run log via string subscription."""
return self.meta_log[item]
def subselect_meta_log(meta_log: DotMap, run_ids: List[str]) -> DotMap:
"""Subselect the meta log dict based on a list of run ids."""
sub_log = DotMap()
for run_id in run_ids:
sub_log[run_id] = meta_log[run_id]
return sub_log
def decode_meta_strings(log: DotMap):
"""Decode all bytes encoded strings."""
for k in log.meta.keys():
temp_list = []
if type(log.meta[k]) != str:
list_to_loop = (
log.meta[k].tolist()
if type(log.meta[k]) != list
else log.meta[k]
)
if type(list_to_loop) in [str, bytes]:
list_to_loop = [list_to_loop]
for i in list_to_loop:
if type(i) == bytes:
if len(i) > 0:
temp_list.append(i.decode())
else:
temp_list.append(i)
else:
temp_list.append(log.meta[k])
if len(temp_list) == 1:
if k == "config_dict":
# Convert config into dict
config_dict = ast.literal_eval(temp_list[0])
log.meta[k] = config_dict
else:
log.meta[k] = temp_list[0]
else:
log.meta[k] = temp_list
return log
|
py | 1a36054e4b7418274e99314f78d19465e2792514 | from __future__ import division
import json
import numpy as np
import matplotlib.pyplot as plt
import time
import random
from sklearn.linear_model import LogisticRegressionCV
from sklearn.ensemble import RandomForestClassifier as RFC
import pandas as pd
import operator
from sklearn.metrics import roc_curve, precision_recall_curve, precision_recall_fscore_support
print 'IMPORTANT: experiment can be modified by changing parameter combinations in main function!'
print 'loading data...'
part1_pos_10 = json.loads(open("new_dedup_part1_pos_10.json").read()) # 1552
part1_pos_200 = json.loads(open("new_dedup_part1_pos_200_embed.json").read())
part1_pos_walk_200 = json.loads(open("new_dedup_part1_pos_200_walk.json").read())
part2_pos_10 = json.loads(open("new_dedup_part2_pos_10.json").read()) # 24251
part2_pos_200 = json.loads(open("new_dedup_part2_pos_200_embed.json").read())
part2_pos_walk_200 = json.loads(open("new_dedup_part2_pos_200_walk.json").read())
part3_pos_10 = json.loads(open("new_dedup_part3_pos_10.json").read()) # 1353
part3_pos_200 = json.loads(open("new_dedup_part3_pos_200_embed.json").read())
part3_pos_walk_200 = json.loads(open("new_dedup_part3_pos_200_walk.json").read())
part4_pos_10 = json.loads(open("new_dedup_part4_pos_10.json").read()) # 3399
part4_pos_200 = json.loads(open("new_dedup_part4_pos_200_embed.json").read())
part4_pos_walk_200 = json.loads(open("new_dedup_part4_pos_200_walk.json").read())
part5_pos_10 = json.loads(open("new_dedup_part5_pos_10.json").read()) # 11692
part5_pos_200 = json.loads(open("new_dedup_part5_pos_200_embed.json").read())
part5_pos_walk_200 = json.loads(open("new_dedup_part5_pos_200_walk.json").read())
global_pos_10 = json.loads(open("new_dedup_global_pos_10.json").read()) # 1552
global_pos_200 = json.loads(open("new_dedup_global_pos_200_embed.json").read())
global_pos_walk_200 = json.loads(open("new_dedup_global_pos_200_walk.json").read())
global_neg_10 = json.loads(open("new_dedup_global_neg_10.json").read()) # 1552
global_neg_200 = json.loads(open("new_dedup_global_neg_200_embed.json").read())
global_neg_walk_200 = json.loads(open("new_dedup_global_neg_200_walk.json").read())
def combineData(source1_pos=None,
source1_neg=None,
source2_pos=None,
source2_neg=None,
source3_pos=None,
source3_neg=None):
# assert (len(source1_pos) == len(source2_pos) == len(source3_pos)), "pos should be equal length"
# assert (len(source1_neg) == len(source2_neg) == len(source3_neg)), "neg should be equal length"
comb_pos = []
comb_neg = []
if source3_pos == None: # only combine two datasets
for i in range(len(source1_pos)):
comb_pos.append(source1_pos[i] + source2_pos[i])
if source1_neg != None:
for i in range(len(source1_neg)):
comb_neg.append(source1_neg[i] + source2_neg[i])
else:
for i in range(len(source1_pos)):
comb_pos.append(source1_pos[i] + source2_pos[i] + source3_pos[i])
if source1_neg != None:
for i in range(len(source1_neg)):
comb_neg.append(source1_neg[i] + source2_neg[i] + source3_neg[i])
if len(comb_neg) == 0:
return comb_pos
else:
return (comb_pos, comb_neg)
# combinations of each partition
# 901
part1_pos_10_walk = combineData(source1_pos=part1_pos_10, source2_pos=part1_pos_walk_200)
part1_pos_10_walk_dv = combineData(source1_pos=part1_pos_10, source3_pos=part1_pos_200, source2_pos=part1_pos_walk_200)
# 12294
part2_pos_10_walk = combineData(source1_pos=part2_pos_10, source2_pos=part2_pos_walk_200)
part2_pos_10_walk_dv = combineData(source1_pos=part2_pos_10, source3_pos=part2_pos_200, source2_pos=part2_pos_walk_200)
# 895
part3_pos_10_walk = combineData(source1_pos=part3_pos_10, source2_pos=part3_pos_walk_200)
part3_pos_10_walk_dv = combineData(source1_pos=part3_pos_10, source3_pos=part3_pos_200, source2_pos=part3_pos_walk_200)
# 1992
part4_pos_10_walk = combineData(source1_pos=part4_pos_10, source2_pos=part4_pos_walk_200)
part4_pos_10_walk_dv = combineData(source1_pos=part4_pos_10, source3_pos=part4_pos_200, source2_pos=part4_pos_walk_200)
# 5952
part5_pos_10_walk = combineData(source1_pos=part5_pos_10, source2_pos=part5_pos_walk_200)
part5_pos_10_walk_dv = combineData(source1_pos=part5_pos_10, source3_pos=part5_pos_200, source2_pos=part5_pos_walk_200)
(combPos_10_walk, combNeg_10_walk) = combineData(source1_pos=global_pos_10,
source1_neg=global_neg_10,
source2_pos=global_pos_walk_200,
source2_neg=global_neg_walk_200,
source3_pos=None,
source3_neg=None)
(combPos_10_walk_dv, combNeg_10_walk_dv) = combineData(source1_pos=global_pos_10,
source1_neg=global_neg_10,
source2_pos=global_pos_walk_200,
source2_neg=global_neg_walk_200,
source3_pos=global_pos_200,
source3_neg=global_neg_200)
# functions
# general function for taking samples from a list
print 'defining function...'
def takingSamples(alist, num=0, portion=0):
assert ((num > 0 and portion == 0) or (num == 0 and portion > 0)), "should offer only one method, num or portion"
seed = int(round(time.time() * 1000)) % 100000000
random.seed(seed)
length_of_list = len(alist)
listPicked = []
listNotPicked = []
if num > 0:
chosen_ids = set()
while len(chosen_ids) < num:
tmpRandInt = random.randint(0, length_of_list - 1) # cover both head and tail
chosen_ids.add(tmpRandInt)
t_f_list = [False for i in range(length_of_list)]
for i in chosen_ids:
t_f_list[i] = True
for i, j in enumerate(t_f_list):
if j:
listPicked.append(alist[i])
else:
listNotPicked.append(alist[i])
if portion > 0:
num = int(length_of_list * portion)
chosen_ids = set()
while len(chosen_ids) < num:
tmpRandInt = random.randint(0, length_of_list - 1) # cover both head and tail
chosen_ids.add(tmpRandInt)
t_f_list = [False for i in range(length_of_list)]
for i in chosen_ids:
t_f_list[i] = True
for i, j in enumerate(t_f_list):
if j:
listPicked.append(alist[i])
else:
listNotPicked.append(alist[i])
return (listPicked, listNotPicked)
# usage e.g.
# (listPicked, listNotPicked) = takingSamples([1,2,3,4,5,6], num=4)
# (listPicked, listNotPicked) = takingSamples([[1,2],[2,5],[3,7],[4,6],[5,5],[6,1]], num=4)
# print listPicked
# print listNotPicked
# averaging the results from trials
def avgProcess(trialsAns):
trialsAns_np = np.array(trialsAns)
num_trial = len(trialsAns_np) # 10
# place holder for average threshold, precision, recall, f1
avg_thres = np.array([0.0 for i in range(len(trialsAns_np[0]))])
avg_prec = np.array([0.0 for i in range(len(trialsAns_np[0]))])
avg_rec = np.array([0.0 for i in range(len(trialsAns_np[0]))])
avg_f1 = np.array([0.0 for i in range(len(trialsAns_np[0]))])
for i in range(num_trial):
tmp = np.array(trialsAns_np[i])
avg_thres += tmp[:, 0] # the 0th column
avg_prec += tmp[:, 1]
avg_rec += tmp[:, 2]
avg_f1 += tmp[:, 3]
avg_thres = avg_thres / float(num_trial)
avg_prec = avg_prec / float(num_trial)
avg_rec = avg_rec / float(num_trial)
avg_f1 = avg_f1 / float(num_trial)
avg_thres = list(avg_thres)
avg_prec = list(avg_prec)
avg_rec = list(avg_rec)
avg_f1 = list(avg_f1)
return (avg_thres, avg_prec, avg_rec, avg_f1)
# input should be lists of 10 or 210 dimensions
def oneTrialWithCertainTrainSize(num_pos_sample=50,
neg_pos_ratio=1,
pos_training_dataset=None,
pos_testing_dataset=None,
neg_dataset=None,
train_test_split=0,
# obselete feature, keep default parameter to bypass, feature achieved by "num_pos_sample" param
test_stratify=True,
# obselete feature, keep default parameter to bypass, feature achieved by "num_pos_sample" param
scoring="f1",
plt_or_not=True):
assert (type(pos_training_dataset) == list and type(neg_dataset) == list), "input datasets should be lists"
num_neg_sample = int(num_pos_sample * neg_pos_ratio)
# take sample of num_pos_sample number of positive examples
(posPicked, posNotPicked) = takingSamples(pos_training_dataset, num=num_pos_sample)
(negPicked, negNotPicked) = takingSamples(neg_dataset, num=num_neg_sample)
# create train_X, train_y
train_X = pd.DataFrame(posPicked + negPicked)
train_y = np.array([1 for i in range(len(posPicked))] + [0 for i in range(len(negPicked))])
# create test_X and test_y
if train_test_split != 0:
testSize = int(
(num_pos_sample + num_neg_sample) / train_test_split * (1 - train_test_split)) # size of test set
if test_stratify:
testPosSize = int(float(testSize) / (neg_pos_ratio + 1))
testNegSize = testSize - testPosSize
test_X = pd.DataFrame(
takingSamples(posNotPicked, num=testPosSize)[0] + takingSamples(negNotPicked, num=testNegSize)[0]) #
test_y = np.array([1 for i in range(testPosSize)] + [0 for i in range(testNegSize)])
else:
for idx in range(len(posNotPicked)):
posNotPicked[idx].append(1)
for idx in range(len(negNotPicked)):
negNotPicked[idx].append(0)
test_X = pd.DataFrame(takingSamples(posNotPicked + negNotPicked, num=testSize)[0])
test_y = np.array()
for i in test_X:
if i[-1] == 1:
test_y.append(1)
else:
test_y.append(0)
for idx in range(len(test_X)):
del test_X[idx][-1]
else:
if (pos_testing_dataset == None):
test_X = pd.DataFrame(posNotPicked + negNotPicked)
test_y = np.array([1 for i in range(len(posNotPicked))] + [0 for i in range(len(negNotPicked))])
else:
test_X = pd.DataFrame(pos_testing_dataset + negNotPicked)
test_y = np.array([1 for i in range(len(pos_testing_dataset))] + [0 for i in range(len(negNotPicked))])
# train and test the model
reg = RFC(n_estimators=100)
# reg = RFC(n_estimators=200, max_features='log2')
# reg = LogisticRegressionCV(scoring=scoring)
LogModel = reg.fit(train_X, train_y)
y_predlog = LogModel.predict_proba(test_X)
y_predlog_1 = y_predlog[:, 1]
prec, rec, thresholds = precision_recall_curve(test_y, y_predlog_1)
if plt_or_not:
plt.plot(rec, prec)
plt.xlabel("Recall")
plt.ylabel("Precision")
plt.title("Rec-Prec Curve of Logistic Regression Trials")
# pred_combine sorted
pred_combine = []
for i in range(len(test_y)):
pred_combine.append((y_predlog_1[i], test_y[i]))
pred_combine = sorted(pred_combine, key=operator.itemgetter(0))
# create an array of 0.1:0.01:0.99
thres_new = []
initial = 0.1
while initial <= 0.99:
thres_new.append(initial)
initial += 0.01
initial = round(initial, 2)
# generate "threshold, prec, rec, f1" list
# test_y is truth, y_predlog_1 is prob of being 1
result = []
item_index = 0
FN_accu = 0
TN_accu = 0
TP_accu = list(test_y).count(1)
FP_accu = list(test_y).count(0)
for i in thres_new: # i is [0.1:0.01:0.99]
if (item_index < len(pred_combine)):
while pred_combine[item_index][0] < i:
if pred_combine[item_index][1] == 1: # this item actually 1, predict as 0
FN_accu += 1
TP_accu -= 1
else: # this item is actually 0, predict as 0, pred_combine[item_index][1] == 0
TN_accu += 1
FP_accu -= 1
item_index += 1
if (item_index == len(pred_combine)): break
# print "th: " + str(i) + ", TP: " + str(TP_accu) + ", FP: " + str(FP_accu) + ", FN: " + str(FN_accu) + ", TN: " + str(TN_accu)
if (TP_accu == 0):
preci = 0
else:
preci = float(TP_accu) / (TP_accu + FP_accu)
if (TP_accu == 0):
recal = 0
else:
recal = float(TP_accu) / (FN_accu + TP_accu)
if (2 * preci * recal == 0):
fone = 0
else:
fone = 2 * preci * recal / (preci + recal)
result.append([i, preci, recal, fone])
return result # 90
# outArr = oneTrialWithCertainTrainSize(num_pos_sample=60, pos_neg_ratio=1, pos_dataset=global_pos_10_40262, neg_dataset=global_neg_10_402620)
# print "finish"
# trialsWithVariedTrainSize
def trialsWithVariedTrainSize(num_pos_sample=50,
num_pos_sample_cap=1500,
neg_pos_ratio=1,
pos_training_dataset=None,
pos_testing_dataset=None,
neg_dataset=None,
train_test_split=0,
# obsolete feature, keep default parameter to bypass, feature achieved by "num_pos_sample" param
test_stratify=True,
# obsolete feature, keep default parameter to bypass, feature achieved by "num_pos_sample" param
scoring="f1",
plt_or_not=True,
num_trial=10,
save=False,
saveName="0"):
generalResults = []
generalResultsPosNumRef = []
generalStdDev = []
while num_pos_sample <= num_pos_sample_cap:
trialsAns = []
# for each num_pos_sample, perform 10 trials
for trialsCount in range(num_trial):
# one single trial
outArr = oneTrialWithCertainTrainSize(num_pos_sample=num_pos_sample, neg_pos_ratio=neg_pos_ratio,
pos_training_dataset=pos_training_dataset,
pos_testing_dataset=pos_testing_dataset, neg_dataset=neg_dataset,
train_test_split=train_test_split, test_stratify=test_stratify,
scoring=scoring, plt_or_not=plt_or_not)
# put outArr together
trialsAns.append(outArr) # outArr = [threshold, prec, rec, f1tmp]
print "trial #" + str(trialsCount + 1) + " finished!"
# with open("trialsAns.json", "w") as f:
# json.dump(trialsAns, f)
print str(num_pos_sample) + " all trials finished!"
# calc std dev of max f1 based on trialsAns
# stdArray = []
# for e in range(len(trialsAns[0])):
# tmpArr = []
# for k in trialsAns:
# tmpArr.append(k[e][3])
# stdArray.append(np.std(np.array(tmpArr)))
#
# stddev = np.average(stdArray)
# generalStdDev.append(stddev)
#
if save == True:
fileName = "rawResults_" + saveName + ".json"
with open(fileName, "w") as f: json.dump(trialsAns, f)
(avg_thres, avg_prec, avg_rec, avg_f1) = avgProcess(trialsAns)
#
generalResults.append([avg_thres, avg_prec, avg_rec, avg_f1])
generalResultsPosNumRef.append(num_pos_sample)
# print results for each trial
targ = generalResults
index = targ[0][3].index(max(targ[0][3]))
for ntrial in range(len(trialsAns)):
fone = trialsAns[ntrial][index][3]
prec = trialsAns[ntrial][index][1]
rec = trialsAns[ntrial][index][2]
print "For trial#" + str(ntrial)
print "f1: %.4f" % fone + ", prec: %.4f" % prec + ", rec: %.4f" % rec
#
print str(num_pos_sample) + " positive finished!"
num_pos_sample += 50
# if num_pos_sample < 200: num_pos_sample += 10
# elif num_pos_sample < 500: num_pos_sample += 50
# else: num_pos_sample += 100
# return (generalResults, generalStdDev, generalResultsPosNumRef)
return (generalResults, generalResultsPosNumRef)
# return None
if __name__ == "__main__":
# experiment execution
print "start training..."
print 'part1 vs others classifer...'
# 10_walk_dv
print "train part1 test other parts with 10_walk_dv..."
(part1_10_walk_dv, generalResultsPosNumRef) = trialsWithVariedTrainSize(num_pos_sample=901,
num_pos_sample_cap=901,
neg_pos_ratio=1,
pos_training_dataset=part1_pos_10_walk_dv,
pos_testing_dataset=part2_pos_10_walk_dv + part3_pos_10_walk_dv + part4_pos_10_walk_dv + part5_pos_10_walk_dv,
neg_dataset=combNeg_10_walk_dv,
train_test_split=0,
test_stratify=True,
scoring="f1",
plt_or_not=False,
save=False)
targ = part1_10_walk_dv
max_f1 = max(targ[0][3]) # 0.5885
index_max_f1 = targ[0][3].index(max(targ[0][3])) # 73
prec_at_max_f1 = targ[0][1][index_max_f1] # 0.5536
rec_at_max_f1 = targ[0][2][index_max_f1] # 0.6204
print "index: %d, f1: %f, prec: %f, rec: %f" % (
index_max_f1, round(max_f1, 4), round(prec_at_max_f1, 4), round(rec_at_max_f1, 4))
print 'done!'
|
py | 1a3605f14dec1a9d1893cc03891991235b78199c | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import logging
import os
import sys
from typing import Dict, List, Optional, Tuple
import numpy as np
from dataclasses import dataclass, field
from fairseq.data import Dictionary, HubertDataset
from fairseq.dataclass.configs import FairseqDataclass
from fairseq.tasks import register_task
from fairseq.tasks.fairseq_task import FairseqTask
from omegaconf import MISSING
logger = logging.getLogger(__name__)
class LabelEncoder(object):
def __init__(self, dictionary: Dictionary) -> None:
self.dictionary = dictionary
def __call__(self, label: str) -> List[str]:
return self.dictionary.encode_line(
label,
append_eos=False,
add_if_not_exist=False,
)
@dataclass
class HubertPretrainingConfig(FairseqDataclass):
data: str = field(default=MISSING, metadata={"help": "path to data directory"})
fine_tuning: bool = field(
default=False, metadata={"help": "set to true if fine-tuning Hubert"}
)
labels: List[str] = field(
default_factory=lambda: ["ltr"],
metadata={
"help": (
"extension of the label files to load, frame-level labels for"
" pre-training, and sequence-level label for fine-tuning"
)
},
)
label_dir: Optional[str] = field(
default=None,
metadata={
"help": "if set, looks for labels in this directory instead",
},
)
label_rate: float = field(
default=-1.0,
metadata={"help": "label frame rate. -1.0 for sequence label"},
)
sample_rate: int = field(
default=16_000,
metadata={
"help": "target sample rate. audio files will be up/down "
"sampled to this rate"
},
)
normalize: bool = field(
default=False,
metadata={"help": "if set, normalizes input to have 0 mean and unit variance"},
)
enable_padding: bool = field(
default=False,
metadata={"help": "pad shorter samples instead of cropping"},
)
max_keep_size: Optional[int] = field(
default=None,
metadata={"help": "exclude sample longer than this"},
)
max_sample_size: Optional[int] = field(
default=None,
metadata={"help": "max sample size to crop to for batching"},
)
min_sample_size: Optional[int] = field(
default=None,
metadata={"help": "min sample size to crop to for batching"},
)
single_target: Optional[bool] = field(
default=False,
metadata={
"help": "if set, AddTargetDatasets outputs same keys " "as AddTargetDataset"
},
)
random_crop: Optional[bool] = field(
default=True,
metadata={"help": "always crop from the beginning if false"},
)
pad_audio: Optional[bool] = field(
default=False,
metadata={"help": "pad audio to the longest one in the batch if true"},
)
@register_task("hubert_pretraining", dataclass=HubertPretrainingConfig)
class HubertPretrainingTask(FairseqTask):
cfg: HubertPretrainingConfig
def __init__(
self,
cfg: HubertPretrainingConfig,
) -> None:
super().__init__(cfg)
logger.info(f"current directory is {os.getcwd()}")
logger.info(f"HubertPretrainingTask Config {cfg}")
self.cfg = cfg
self.fine_tuning = cfg.fine_tuning
if cfg.fine_tuning:
self.state.add_factory("target_dictionary", self.load_dictionaries)
else:
self.state.add_factory("dictionaries", self.load_dictionaries)
self.blank_symbol = "<s>"
@property
def source_dictionary(self) -> Optional[Dictionary]:
return None
@property
def target_dictionary(self) -> Optional[Dictionary]:
return self.state.target_dictionary
@property
def dictionaries(self) -> List[Dictionary]:
return self.state.dictionaries
@classmethod
def setup_task(
cls, cfg: HubertPretrainingConfig, **kwargs
) -> "HubertPretrainingTask":
return cls(cfg)
def load_dictionaries(self):
label_dir = self.cfg.data if self.cfg.label_dir is None else self.cfg.label_dir
dictionaries = [
Dictionary.load(f"{label_dir}/dict.{label}.txt")
for label in self.cfg.labels
]
return dictionaries[0] if self.cfg.fine_tuning else dictionaries
def get_label_dir(self) -> str:
if self.cfg.label_dir is None:
return self.cfg.data
return self.cfg.label_dir
def load_dataset(self, split: str, **kwargs) -> None:
manifest = f"{self.cfg.data}/{split}.tsv"
dicts = [self.target_dictionary] if self.cfg.fine_tuning else self.dictionaries
pad_list = [dict.pad() for dict in dicts]
eos_list = [dict.eos() for dict in dicts]
procs = [LabelEncoder(dict) for dict in dicts]
paths = [f"{self.get_label_dir()}/{split}.{l}" for l in self.cfg.labels]
# hubert v1: pad_audio=True, random_crop=False;
self.datasets[split] = HubertDataset(
manifest,
sample_rate=self.cfg.sample_rate,
label_paths=paths,
label_rates=self.cfg.label_rate,
pad_list=pad_list,
eos_list=eos_list,
label_processors=procs,
max_keep_sample_size=self.cfg.max_keep_size,
min_keep_sample_size=self.cfg.min_sample_size,
max_sample_size=self.cfg.max_sample_size,
pad_audio=self.cfg.pad_audio,
normalize=self.cfg.normalize,
store_labels=False,
random_crop=self.cfg.random_crop,
single_target=self.cfg.single_target,
)
def max_positions(self) -> Tuple[int, int]:
return (sys.maxsize, sys.maxsize)
def filter_indices_by_size(self, indices: np.array, *args, **kwargs) -> np.array:
return indices
|
py | 1a3606ad055ce17d392ff99e067f1a6badfc9ac7 | # Copyright 2014 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Class representing a Cloudstack instance. This module uses the csapi
library which calls the cloudstack API. For more information refer to
the Cloudstack documentation at https://github.com/syed/PerfKitBenchmarker.git
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
from perfkitbenchmarker import flags
from perfkitbenchmarker import linux_virtual_machine as linux_vm
from perfkitbenchmarker import virtual_machine
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.providers.cloudstack import cloudstack_disk
from perfkitbenchmarker.providers.cloudstack import cloudstack_network
from perfkitbenchmarker.providers.cloudstack import util
from perfkitbenchmarker import providers
from six.moves import range
FLAGS = flags.FLAGS
class CloudStackVirtualMachine(virtual_machine.BaseVirtualMachine):
"""Object representing a CloudStack Virtual Machine."""
CLOUD = providers.CLOUDSTACK
DEFAULT_ZONE = 'QC-1'
DEFAULT_MACHINE_TYPE = '1vCPU.1GB'
DEFAULT_IMAGE = None
DEFAULT_USER_NAME = 'cca-user'
DEFAULT_PROJECT = 'cloudops-Engineering'
def __init__(self, vm_spec):
"""Initialize a CloudStack virtual machine.
Args:
vm_spec: virtual_machine.BaseVirtualMachineSpec object of the vm.
"""
super(CloudStackVirtualMachine, self).__init__(vm_spec)
self.network = cloudstack_network.CloudStackNetwork.GetNetwork(self)
self.cs = util.CsClient(FLAGS.CS_API_URL,
FLAGS.CS_API_KEY,
FLAGS.CS_API_SECRET)
self.project_id = None
if FLAGS.project:
project = self.cs.get_project(FLAGS.project)
assert project, "Project not found"
self.project_id = project['id']
zone = self.cs.get_zone(self.zone)
assert zone, "Zone not found"
self.zone_id = zone['id']
self.user_name = self.DEFAULT_USER_NAME
self.image = self.image or self.DEFAULT_IMAGE
self.disk_counter = 0
@vm_util.Retry(max_retries=3)
def _CreateDependencies(self):
"""Create VM dependencies."""
# Create an ssh keypair
with open(self.ssh_public_key) as keyfd:
self.ssh_keypair_name = 'perfkit-sshkey-%s' % FLAGS.run_uri
pub_key = keyfd.read()
if not self.cs.get_ssh_keypair(self.ssh_keypair_name, self.project_id):
res = self.cs.register_ssh_keypair(self.ssh_keypair_name,
pub_key,
self.project_id)
assert res, "Unable to create ssh keypair"
# Allocate a public ip
network_id = self.network.id
if self.network.is_vpc:
network_id = self.network.vpc_id
public_ip = self.cs.alloc_public_ip(network_id, self.network.is_vpc)
if public_ip:
self.ip_address = public_ip['ipaddress']
self.ip_address_id = public_ip['id']
else:
logging.warn("Unable to allocate public IP")
def _DeleteDependencies(self):
"""Delete VM dependencies."""
# Remove the keypair
if self.cs.get_ssh_keypair(self.ssh_keypair_name, self.project_id):
self.cs.unregister_ssh_keypair(self.ssh_keypair_name, self.project_id)
# Remove the IP
if self.ip_address_id:
self.cs.release_public_ip(self.ip_address_id)
@vm_util.Retry(max_retries=3)
def _Create(self):
"""Create a Cloudstack VM instance."""
service_offering = self.cs.get_serviceoffering(self.machine_type)
assert service_offering, "No service offering found"
template = self.cs.get_template(self.image, self.project_id)
assert template, "No template found"
network_id = self.network.id
vm = None
vm = self.cs.create_vm(self.name,
self.zone_id,
service_offering['id'],
template['id'],
[network_id],
self.ssh_keypair_name,
self.project_id)
assert vm, "Unable to create VM"
self._vm = vm
self.id = vm['virtualmachine']['id']
@vm_util.Retry(max_retries=3)
def _PostCreate(self):
"""Get the instance's data."""
# assosiate the public ip created with the VMid
network_interface = self._vm['virtualmachine']['nic'][0]
self.internal_ip = network_interface['ipaddress']
# Create a Static NAT rule
if not self.cs.snat_rule_exists(self.ip_address_id, self.id):
snat_rule = self.cs.enable_static_nat(self.ip_address_id,
self.id,
self.network.id)
assert snat_rule, "Unable to create static NAT"
def _Delete(self):
"""Delete the VM instance."""
# Delete the VM
self.cs.delete_vm(self.id)
def _Exists(self):
"""Returns true if the VM exists."""
# Check if VM exisits
vm = self.cs.get_virtual_machine(self.name, self.project_id)
if vm and 'id' in vm:
return True
return False
def CreateScratchDisk(self, disk_spec):
"""Create a VM's scratch disk.
Args:
disk_spec: virtual_machine.BaseDiskSpec object of the disk.
"""
# Cloudstack doesn't really have a concept of local or remote disks A VM
# starts with one disk and all other volumes have to be attached via the
# API
self.disks = []
for i in range(disk_spec.num_striped_disks):
name = 'disk-%s-%d-%d' % (self.name, i + 1, self.disk_counter)
scratch_disk = cloudstack_disk.CloudStackDisk(disk_spec,
name,
self.zone_id,
self.project_id)
self.disks.append(scratch_disk)
self.disk_counter += 1
self._CreateScratchDiskFromDisks(disk_spec, self.disks)
class CentOs7BasedCloudStackVirtualMachine(CloudStackVirtualMachine,
linux_vm.CentOs7Mixin):
DEFAULT_IMAGE = 'CentOS 7 HVM base (64bit)'
|
py | 1a3609e18b1b8f2e99d60b02b285977229d0fbc4 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup
# with open('README.rst') as readme_file:
# readme = readme_file.read()
# with open('HISTORY.rst') as history_file:
# history = history_file.read()
requirements = [
'face_recognition_models>=0.3.0',
'Click>=6.0',
'dlib>=19.7',
'numpy',
'Pillow'
]
test_requirements = [
'tox',
'flake8'
]
setup(
name='face_recognition',
version='1.4.0',
description="Recognize faces from Python or from the command line",
# long_description=readme + '\n\n' + history,
author="Adam Geitgey",
author_email='[email protected]',
url='https://github.com/ageitgey/face_recognition',
packages=[
'face_recognition',
],
package_dir={'face_recognition': 'face_recognition'},
package_data={
'face_recognition': ['models/*.dat']
},
entry_points={
'console_scripts': [
'face_recognition=face_recognition.face_recognition_cli:main',
'face_detection=face_recognition.face_detection_cli:main'
]
},
install_requires=requirements,
license="MIT license",
zip_safe=False,
keywords='face_recognition',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
test_suite='tests',
tests_require=test_requirements
) |
py | 1a3609fb146aaef3604451683ba6132e33ca1408 | import copy
from engine.global_config import *
from engine.update_client import Update_Client
from engine.handler.input_handler import Input_Handler
from engine.status_check import Status_Check
from websocket_server.wswrap import WsWrap
from engine.character import Character
from engine.lex import Lex
from engine.inventory import inv
from pprint import pprint
###### Player Class ######
class Player(Character):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.entity_type = kwargs['entity_type']
self.core_attributes = kwargs['core_attributes']
self.player_state = kwargs['player_state']
self.stow_loc = kwargs['stow_loc']
self.client = kwargs['client']
self.unique_id = kwargs['unique_id']
def display_inventory(self):
inv = []
for i in self.inventory:
if self.inventory[i]['contents'] == None:
pass
else:
inv.append("{} {} {} {}".format(self.inventory[i]['contents'].name,
self.inventory[i]['worn'],
"your",
self.inventory[i]['name']))
if inv == []:
WsWrap.ws_send(self.client, {'type': 'text', 'spacing': 1}, "You have nothing.")
else:
WsWrap.ws_send(self.client, {'type': 'text', 'spacing': 1}, "You have {}.".format(", ".join(Lex.converted_contents(inv))))
# WsWrap.ws_send(self.client, {'type': 'text', 'spacing': 1}, rooms[room_num].name)
def echo(self):
if self.conditions['echo'] == True:
self.conditions['echo'] = False
WsWrap.ws_send(self.client, {'type': 'text', 'spacing': 1}, "Echo is now |alert| disabled. |alertx|")
else:
self.conditions['echo'] = True
WsWrap.ws_send(self.client, {'type': 'text', 'spacing': 1}, "Echo is now |success| enabled. |successx|")
def help(self, user_input, input_kwargs):
if len(user_input) < 2:
WsWrap.ws_send(self.client, {'type': 'text', 'spacing': 1}, "|alert| Syntax: |alertx| HELP ON or HELP OFF.")
else:
if user_input[1] == "on":
WsWrap.ws_send(self.client, {'type': 'text', 'spacing': 1}, 'Help is |success| ON.|successx|')
self.conditions['help'] = "enabled"
elif user_input[1] == "off":
WsWrap.ws_send(self.client, {'type': 'text', 'spacing': 1}, 'Help is |alert| OFF|alertx|. ')
self.conditions['help'] = "disabled"
else:
WsWrap.ws_send(self.client, {'type': 'text', 'spacing': 1}, "|alert| Syntax: |alertx| HELP ON or HELP OFF.")
def stow_set(self, user_input, input_kwargs):
stow_item = False
if len(user_input) == 1 or len(user_input) > 3:
WsWrap.ws_send(self.client, {'type': 'text', 'spacing': 1}, "Syntax: STOW SET (container) or STOW (ITEM)")
elif user_input[1] == "set":
input_kwargs['target'] = Input_Handler.target_self_inventory(self, user_input[2], input_kwargs)
self.stow_loc = input_kwargs['target']
if self.stow_loc == None:
WsWrap.ws_send(self.client, {'type': 'text', 'spacing': 1}, "Make sure you are wearing the container.")
else:
WsWrap.ws_send(self.client, {'type': 'text', 'spacing': 1}, "Ok.")
elif user_input[1] != "set":
if self.stow_loc == None:
WsWrap.ws_send(self.client, {'type': 'text', 'spacing': 1}, "You must first STOW SET (CONTAINER) for a container you are wearing.")
elif self.stow_loc.location_body['state'] != "worn":
WsWrap.ws_send(self.client, {'type': 'text', 'spacing': 1}, "You must be wearing that container.")
elif self.inventory['r_hand']['contents'] != None:
if user_input[1] in self.inventory['r_hand']['contents'].name:
stow_item = True
input_kwargs['target'] = self.inventory['r_hand']['contents']
input_kwargs['target_parent'] = self.stow_loc
elif self.inventory['l_hand']['contents'] != None:
if user_input[1] in self.inventory['l_hand']['contents'].name:
stow_item = True
input_kwargs['target'] = self.inventory['l_hand']['contents']
input_kwargs['target_parent'] = self.stow_loc
else:
WsWrap.ws_send(self.client, {'type': 'text', 'spacing': 1}, "You can't stow that.")
if stow_item == True:
status, response = Status_Check.item_open_closed(self, user_input, input_kwargs)
if status == True:
if self.stow_loc == input_kwargs['target']:
WsWrap.ws_send(self.client, {'type': 'text', 'spacing': 1}, "You can't stow something in itself.")
else:
Character.put_item(self, user_input, input_kwargs)
else:
WsWrap.ws_send(self.client, {'type': 'text', 'spacing': 1}, "That is not open.")
else:
WsWrap.ws_send(self.client, {'type': 'text', 'spacing': 1}, "Error with STOW target.")
def target_player(self):
# for i in server.clients:
# if i['id']
pass
def convert_players_to_obj():
inventory = copy.deepcopy(inv)
for i in players:
print(players[i])
new_player = Player(i, # uuid_id
players[i][0], # entity_type
players[i][1], # name
players[i][2], # race
players[i][3], # gender
players[i][4], # vitals
players[i][5], # core_attributes
players[i][6], # conditions
players[i][7], # credit
inventory, # inventory
players[i][8], # location
players[i][9], # player_state
players[i][10], # stow_loc
None, # client
players[i][11]) # client_id / unique_id
players[i] = new_player
print(vars(new_player))
# pprint(vars(new_player))
def list(self):
print("Server.clients:", server.clients) |
py | 1a360a1019a6adb528419c35b4e1d3a5636df6a3 | """Select and extract key frames in a video file.
Key frames are defined as a set of frames where each has an appropriate number
of matching points with its adjacent key frame.
RANSAC is applied to reduce the number of mismatched points and outliers.
"""
import cv2
import numpy as np
import argparse
def main(videofile):
# Construct VideoCapture object to get frame-by-frame stream
vid_cap = cv2.VideoCapture(videofile)
# SIFT descriptors are utilized to describe the overlapping between the
# current frame and its neighbor
sift = cv2.xfeatures2d.SIFT_create()
# The first key frame (frame0.jpg) is selected by default
success, last = vid_cap.read()
cv2.imwrite('key_frames/frame0.jpg', last)
print("Captured frame0.jpg")
count = 1
frame_num = 1
w = int(last.shape[1] * 2 / 3) # the region to detect matching points
stride = 40 # stride for accelerating capturing
min_match_num = 100 # minimum number of matches required (to stitch well)
max_match_num = 600 # maximum number of matches (to avoid redundant frames)
while success:
if count % stride == 0:
# Detect and compute key points and descriptors
kp1, des1 = sift.detectAndCompute(last[:, -w:], None)
kp2, des2 = sift.detectAndCompute(image[:, :w], None)
# Use the Brute-Force matcher to obtain matches
bf = cv2.BFMatcher(normType=cv2.NORM_L2) # Using Euclidean distance
matches = bf.knnMatch(des1, des2, k=2)
# Define Valid Match: whose distance is less than match_ratio times
# the distance of the second best nearest neighbor.
match_ratio = 0.6
# Pick up valid matches
valid_matches = []
for m1, m2 in matches:
if m1.distance < match_ratio * m2.distance:
valid_matches.append(m1)
# At least 4 points are needed to compute Homography
if len(valid_matches) > 4:
img1_pts = []
img2_pts = []
for match in valid_matches:
img1_pts.append(kp1[match.queryIdx].pt)
img2_pts.append(kp2[match.trainIdx].pt)
# Formalize as matrices (for the sake of computing Homography)
img1_pts = np.float32(img1_pts).reshape(-1, 1, 2)
img2_pts = np.float32(img2_pts).reshape(-1, 1, 2)
# Compute the Homography matrix
_, mask = cv2.findHomography(img1_pts, img2_pts,
cv2.RANSAC, 5.0)
if min_match_num < np.count_nonzero(mask) < max_match_num:
# Save key frame as JPG file
last = image
print("Captured frame{}.jpg".format(frame_num))
cv2.imwrite('key_frames/frame%d.jpg' % frame_num, last)
frame_num += 1
success, image = vid_cap.read()
count += 1
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('file', nargs='?', default='360video.mp4',
help="path of the video file (default: 360video.mp4)")
args = parser.parse_args()
main(args.file)
|
py | 1a360b3281fd4654a0bd5784f6a017cf51990474 | """ Useful error messages for optional dependencies that aren't found. """
from typing import Optional
def _optional_import_error_template(pkgname: str,
url: str,
library: Optional[str] = None,
conda_channel: Optional[str] = None,
) -> ImportError:
"""
Simple template to prevent text duplication.
Parameters
----------
pkgname : str
Package name on pip or conda
url : str
Link to install page for given package
library : str (optional)
Full package name
conda_channel: str (optional)
set to "conda-forge" to add -c conda-forge to the conda install line
Returns
-------
ImportError
"""
library = pkgname if library is None else library
conda_channel = f"-c {conda_channel} " if conda_channel is not None else ""
template = f"""
{library} could not be found. Try either
conda install {conda_channel}{pkgname}
on Anaconda environments or
pip install {pkgname}
in general. In case of trouble refer to
{url}
(link active as of 2018.10.31 - please report dead links on GitHub!)"""
return ImportError(template)
h5py_import_error = _optional_import_error_template("h5py",
"http://docs.h5py.org/en/latest/build.html")
mpl_import_error = _optional_import_error_template("matplotlib",
"https://matplotlib.org/users/installing.html")
mpmath_import_error = _optional_import_error_template("mpmath",
"http://mpmath.org/doc/current/setup.html#download-and-installation")
lmfit_import_error = _optional_import_error_template("lmfit",
"https://lmfit.github.io/lmfit-py/installation.html")
|
py | 1a360b59e319320018ee93c6ece378025a0a8322 | """distutils.command.bdist
Implements the Distutils 'bdist' command (create a built [binary]
distribution)."""
__revision__ = "$Id$"
import os
from distutils.core import Command
from distutils.errors import *
from distutils.util import get_platform
def show_formats():
"""Print list of available formats (arguments to "--format" option).
"""
from distutils.fancy_getopt import FancyGetopt
formats = []
for format in bdist.format_commands:
formats.append(("formats=" + format, None,
bdist.format_command[format][1]))
pretty_printer = FancyGetopt(formats)
pretty_printer.print_help("List of available distribution formats:")
class bdist(Command):
description = "create a built (binary) distribution"
user_options = [('bdist-base=', 'b',
"temporary directory for creating built distributions"),
('plat-name=', 'p',
"platform name to embed in generated filenames "
"(default: %s)" % get_platform()),
('formats=', None,
"formats for distribution (comma-separated list)"),
('dist-dir=', 'd',
"directory to put final built distributions in "
"[default: dist]"),
('skip-build', None,
"skip rebuilding everything (for testing/debugging)"),
]
boolean_options = ['skip-build']
help_options = [
('help-formats', None,
"lists available distribution formats", show_formats),
]
# The following commands do not take a format option from bdist
no_format_option = ('bdist_rpm',)
# This won't do in reality: will need to distinguish RPM-ish Linux,
# Debian-ish Linux, Solaris, FreeBSD, ..., Windows, Mac OS.
default_format = {'posix': 'gztar',
'nt': 'zip',
'os2': 'zip'}
# Establish the preferred order (for the --help-formats option).
format_commands = ['rpm', 'gztar', 'bztar', 'ztar', 'tar',
'wininst', 'zip', 'msi']
# And the real information.
format_command = {'rpm': ('bdist_rpm', "RPM distribution"),
'gztar': ('bdist_dumb', "gzip'ed tar file"),
'bztar': ('bdist_dumb', "bzip2'ed tar file"),
'ztar': ('bdist_dumb', "compressed tar file"),
'tar': ('bdist_dumb', "tar file"),
'wininst': ('bdist_wininst',
"Windows executable installer"),
'zip': ('bdist_dumb', "ZIP file"),
'msi': ('bdist_msi', "Microsoft Installer")
}
def initialize_options(self):
self.bdist_base = None
self.plat_name = None
self.formats = None
self.dist_dir = None
self.skip_build = 0
def finalize_options(self):
# have to finalize 'plat_name' before 'bdist_base'
if self.plat_name is None:
if self.skip_build:
self.plat_name = get_platform()
else:
self.plat_name = self.get_finalized_command('build').plat_name
# 'bdist_base' -- parent of per-built-distribution-format
# temporary directories (eg. we'll probably have
# "build/bdist.<plat>/dumb", "build/bdist.<plat>/rpm", etc.)
if self.bdist_base is None:
build_base = self.get_finalized_command('build').build_base
self.bdist_base = os.path.join(build_base,
'bdist.' + self.plat_name)
self.ensure_string_list('formats')
if self.formats is None:
try:
self.formats = [self.default_format[os.name]]
except KeyError:
raise DistutilsPlatformError(
"don't know how to create built distributions "
"on platform %s" % os.name)
if self.dist_dir is None:
self.dist_dir = "dist"
def run(self):
# Figure out which sub-commands we need to run.
commands = []
for format in self.formats:
try:
commands.append(self.format_command[format][0])
except KeyError:
raise DistutilsOptionError("invalid format '%s'" % format)
# Reinitialize and run each command.
for i in range(len(self.formats)):
cmd_name = commands[i]
sub_cmd = self.reinitialize_command(cmd_name)
if cmd_name not in self.no_format_option:
sub_cmd.format = self.formats[i]
# If we're going to need to run this command again, tell it to
# keep its temporary files around so subsequent runs go faster.
if cmd_name in commands[i+1:]:
sub_cmd.keep_temp = 1
self.run_command(cmd_name)
|
py | 1a360b5a652aa8a43972c19d79263f0762ddc08f | #!C:\Users\YacovMarsha\test_EUA\EUA_python\django-sky-visitor-master\Scripts\python.exe
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
|
py | 1a360b789fe055657a45674483df3056fa5f1846 | ##############################################################################
# Copyright (c) 2016 Huawei Technologies Co.,Ltd and others.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
import time
import common as op_utils
from glanceclient.client import Client as GlanceClient
from novaclient.client import Client as NovaClient
from neutronclient.v2_0.client import Client as NeutronClient
def _get_glance_client():
sess = op_utils.get_session()
return GlanceClient(
op_utils.get_glance_api_version(),
session=sess)
def _get_nova_client():
sess = op_utils.get_session()
return NovaClient(
op_utils.get_nova_api_version(),
session=sess)
def _get_neutron_client():
sess = op_utils.get_session()
neutron_client = NeutronClient(session=sess)
return neutron_client
def create_images(
imagefile=None,
image_name="bottlenecks_image"):
print "========== Create image in OS =========="
if imagefile is None:
print "imagefile not set/found"
return False
glance = _get_glance_client()
image = glance.images.create(
name=image_name,
disk_format="qcow2",
container_format="bare")
with open(imagefile) as fimage:
glance.images.upload(image.id, fimage)
timeInQueue = 0
img_status = image.status
while img_status == "queued" and timeInQueue < 30:
print " image's status: " + img_status
time.sleep(1)
timeInQueue = timeInQueue + 1
img_status = glance.images.get(image.id).status
print "After %d seconds,image status is [%s]" % (timeInQueue, img_status)
return True if img_status == "active" else False
def stack_create_keypairs(key_path, name="bottlenecks_keypair"):
print "========== Add keypairs in OS =========="
nova = _get_nova_client()
with open(key_path) as pkey:
nova.keypairs.create(name=name, public_key=pkey.read())
def stack_create_flavors(
name="bottlenecks_flavor",
ram=4096,
vcpus=2,
disk=10):
print "========== Create flavors in OS =========="
nova = _get_nova_client()
nova.flavors.create(name=name, ram=ram, vcpus=vcpus, disk=disk)
|
py | 1a360d096ac74f18b92b2de9cd8ecbd2f9201163 | import os
import random
from unittest import mock
import requests
import string
import time
import signal
import socket
import subprocess
import uuid
import sys
import yaml
import pandas as pd
import pytest
import mlflow
import mlflow.pyfunc.scoring_server as pyfunc_scoring_server
import mlflow.pyfunc
from mlflow.tracking.artifact_utils import _download_artifact_from_uri
from mlflow.utils.file_utils import read_yaml, write_yaml
from mlflow.utils.environment import _get_pip_deps, _CONSTRAINTS_FILE_NAME
from mlflow.utils.requirements_utils import _strip_local_version_identifier, _get_installed_version
LOCALHOST = "127.0.0.1"
def get_safe_port():
"""Returns an ephemeral port that is very likely to be free to bind to."""
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind((LOCALHOST, 0))
port = sock.getsockname()[1]
sock.close()
return port
def random_int(lo=1, hi=1e10):
return random.randint(lo, hi)
def random_str(size=10, chars=string.ascii_uppercase + string.digits):
return "".join(random.choice(chars) for _ in range(size))
def random_file(ext):
return "temp_test_%d.%s" % (random_int(), ext)
def score_model_in_sagemaker_docker_container(
model_uri,
data,
content_type,
flavor=mlflow.pyfunc.FLAVOR_NAME,
activity_polling_timeout_seconds=500,
):
"""
:param model_uri: URI to the model to be served.
:param data: The data to send to the docker container for testing. This is either a
Pandas dataframe or string of the format specified by `content_type`.
:param content_type: The type of the data to send to the docker container for testing. This is
one of `mlflow.pyfunc.scoring_server.CONTENT_TYPES`.
:param flavor: Model flavor to be deployed.
:param activity_polling_timeout_seconds: The amount of time, in seconds, to wait before
declaring the scoring process to have failed.
"""
env = dict(os.environ)
env.update(LC_ALL="en_US.UTF-8", LANG="en_US.UTF-8")
proc = _start_scoring_proc(
cmd=["mlflow", "sagemaker", "run-local", "-m", model_uri, "-p", "5000", "-f", flavor],
env=env,
)
return _evaluate_scoring_proc(proc, 5000, data, content_type, activity_polling_timeout_seconds)
def pyfunc_build_image(model_uri, extra_args=None):
"""
Builds a docker image containing the specified model, returning the name of the image.
:param model_uri: URI of model, e.g. runs:/some-run-id/run-relative/path/to/model
:param extra_args: List of extra args to pass to `mlflow models build-docker` command
"""
name = uuid.uuid4().hex
cmd = ["mlflow", "models", "build-docker", "-m", model_uri, "-n", name]
if extra_args:
cmd += extra_args
p = subprocess.Popen(cmd,)
assert p.wait() == 0, "Failed to build docker image to serve model from %s" % model_uri
return name
def pyfunc_serve_from_docker_image(image_name, host_port, extra_args=None):
"""
Serves a model from a docker container, exposing it as an endpoint at the specified port
on the host machine. Returns a handle (Popen object) to the server process.
"""
env = dict(os.environ)
env.update(LC_ALL="en_US.UTF-8", LANG="en_US.UTF-8")
scoring_cmd = ["docker", "run", "-p", "%s:8080" % host_port, image_name]
if extra_args is not None:
scoring_cmd += extra_args
return _start_scoring_proc(cmd=scoring_cmd, env=env)
def pyfunc_serve_from_docker_image_with_env_override(
image_name, host_port, gunicorn_opts, extra_args=None
):
"""
Serves a model from a docker container, exposing it as an endpoint at the specified port
on the host machine. Returns a handle (Popen object) to the server process.
"""
env = dict(os.environ)
env.update(LC_ALL="en_US.UTF-8", LANG="en_US.UTF-8")
scoring_cmd = [
"docker",
"run",
"-e",
"GUNICORN_CMD_ARGS=%s" % gunicorn_opts,
"-p",
"%s:8080" % host_port,
image_name,
]
if extra_args is not None:
scoring_cmd += extra_args
return _start_scoring_proc(cmd=scoring_cmd, env=env)
def pyfunc_serve_and_score_model(
model_uri,
data,
content_type,
activity_polling_timeout_seconds=500,
extra_args=None,
stdout=sys.stdout,
):
"""
:param model_uri: URI to the model to be served.
:param data: The data to send to the pyfunc server for testing. This is either a
Pandas dataframe or string of the format specified by `content_type`.
:param content_type: The type of the data to send to the pyfunc server for testing. This is
one of `mlflow.pyfunc.scoring_server.CONTENT_TYPES`.
:param activity_polling_timeout_seconds: The amount of time, in seconds, to wait before
declaring the scoring process to have failed.
:param extra_args: A list of extra arguments to pass to the pyfunc scoring server command. For
example, passing ``extra_args=["--no-conda"]`` will pass the ``--no-conda``
flag to the scoring server to ensure that conda environment activation
is skipped.
"""
env = dict(os.environ)
env.update(LC_ALL="en_US.UTF-8", LANG="en_US.UTF-8")
env.update(MLFLOW_TRACKING_URI=mlflow.get_tracking_uri())
env.update(MLFLOW_HOME=_get_mlflow_home())
port = get_safe_port()
scoring_cmd = [
"mlflow",
"models",
"serve",
"-m",
model_uri,
"-p",
str(port),
"--install-mlflow",
]
if extra_args is not None:
scoring_cmd += extra_args
proc = _start_scoring_proc(cmd=scoring_cmd, env=env, stdout=stdout, stderr=stdout)
return _evaluate_scoring_proc(proc, port, data, content_type, activity_polling_timeout_seconds)
def _get_mlflow_home():
"""
:return: The path to the MLflow installation root directory
"""
mlflow_module_path = os.path.dirname(os.path.abspath(mlflow.__file__))
# The MLflow root directory is one level about the mlflow module location
return os.path.join(mlflow_module_path, os.pardir)
def _start_scoring_proc(cmd, env, stdout=sys.stdout, stderr=sys.stderr):
if os.name != "nt":
return subprocess.Popen(
cmd,
stdout=stdout,
stderr=stderr,
universal_newlines=True,
env=env,
# Assign the scoring process to a process group. All child processes of the
# scoring process will be assigned to this group as well. This allows child
# processes of the scoring process to be terminated successfully
preexec_fn=os.setsid,
)
else:
return subprocess.Popen(
cmd,
stdout=stdout,
stderr=stderr,
universal_newlines=True,
env=env,
# On Windows, `os.setsid` and `preexec_fn` are unavailable
creationflags=subprocess.CREATE_NEW_PROCESS_GROUP,
)
class RestEndpoint:
def __init__(self, proc, port, activity_polling_timeout_seconds=250):
self._proc = proc
self._port = port
self._activity_polling_timeout_seconds = activity_polling_timeout_seconds
def __enter__(self):
for i in range(0, int(self._activity_polling_timeout_seconds / 5)):
assert self._proc.poll() is None, "scoring process died"
time.sleep(5)
# noinspection PyBroadException
try:
ping_status = requests.get(url="http://localhost:%d/ping" % self._port)
print("connection attempt", i, "server is up! ping status", ping_status)
if ping_status.status_code == 200:
break
except Exception:
print("connection attempt", i, "failed, server is not up yet")
if ping_status.status_code != 200:
raise Exception("ping failed, server is not happy")
print("server up, ping status", ping_status)
return self
def __exit__(self, tp, val, traceback):
if self._proc.poll() is None:
# Terminate the process group containing the scoring process.
# This will terminate all child processes of the scoring process
if os.name != "nt":
pgrp = os.getpgid(self._proc.pid)
os.killpg(pgrp, signal.SIGTERM)
else:
# https://stackoverflow.com/questions/47016723/windows-equivalent-for-spawning-and-killing-separate-process-group-in-python-3 # noqa
self._proc.send_signal(signal.CTRL_BREAK_EVENT)
self._proc.kill()
def invoke(self, data, content_type):
if type(data) == pd.DataFrame:
if content_type == pyfunc_scoring_server.CONTENT_TYPE_JSON_RECORDS_ORIENTED:
data = data.to_json(orient="records")
elif (
content_type == pyfunc_scoring_server.CONTENT_TYPE_JSON
or content_type == pyfunc_scoring_server.CONTENT_TYPE_JSON_SPLIT_ORIENTED
):
data = data.to_json(orient="split")
elif content_type == pyfunc_scoring_server.CONTENT_TYPE_CSV:
data = data.to_csv(index=False)
else:
raise Exception(
"Unexpected content type for Pandas dataframe input %s" % content_type
)
response = requests.post(
url="http://localhost:%d/invocations" % self._port,
data=data,
headers={"Content-Type": content_type},
)
return response
def _evaluate_scoring_proc(proc, port, data, content_type, activity_polling_timeout_seconds=250):
"""
:param activity_polling_timeout_seconds: The amount of time, in seconds, to wait before
declaring the scoring process to have failed.
"""
with RestEndpoint(proc, port, activity_polling_timeout_seconds) as endpoint:
return endpoint.invoke(data, content_type)
@pytest.fixture(scope="module", autouse=True)
def set_boto_credentials():
os.environ["AWS_ACCESS_KEY_ID"] = "NotARealAccessKey"
os.environ["AWS_SECRET_ACCESS_KEY"] = "NotARealSecretAccessKey"
os.environ["AWS_SESSION_TOKEN"] = "NotARealSessionToken"
@pytest.fixture
def mock_s3_bucket():
"""
Creates a mock S3 bucket using moto
:return: The name of the mock bucket
"""
import boto3
import moto
with moto.mock_s3():
bucket_name = "mock-bucket"
s3_client = boto3.client("s3")
s3_client.create_bucket(Bucket=bucket_name)
yield bucket_name
class safe_edit_yaml(object):
def __init__(self, root, file_name, edit_func):
self._root = root
self._file_name = file_name
self._edit_func = edit_func
self._original = read_yaml(root, file_name)
def __enter__(self):
new_dict = self._edit_func(self._original.copy())
write_yaml(self._root, self._file_name, new_dict, overwrite=True)
def __exit__(self, *args):
write_yaml(self._root, self._file_name, self._original, overwrite=True)
def create_mock_response(status_code, text):
"""
Create a mock resposne object with the status_code and text
:param: status_code int HTTP status code
:param: text message from the response
:reutrn: mock HTTP Response
"""
response = mock.MagicMock()
response.status_code = status_code
response.text = text
return response
def _read_yaml(path):
with open(path, "r") as f:
return yaml.safe_load(f)
def _read_lines(path):
with open(path, "r") as f:
return f.read().splitlines()
def _compare_conda_env_requirements(env_path, req_path):
assert os.path.exists(req_path)
custom_env_parsed = _read_yaml(env_path)
requirements = _read_lines(req_path)
assert _get_pip_deps(custom_env_parsed) == requirements
def _assert_pip_requirements(model_uri, requirements, constraints=None):
local_path = _download_artifact_from_uri(model_uri)
txt_reqs = _read_lines(os.path.join(local_path, "requirements.txt"))
conda_reqs = _get_pip_deps(_read_yaml(os.path.join(local_path, "conda.yaml")))
assert txt_reqs == requirements
assert conda_reqs == requirements
if constraints:
assert f"-c {_CONSTRAINTS_FILE_NAME}" in txt_reqs
assert f"-c {_CONSTRAINTS_FILE_NAME}" in conda_reqs
cons = _read_lines(os.path.join(local_path, _CONSTRAINTS_FILE_NAME))
assert cons == constraints
def _is_available_on_pypi(package, version=None, module=None):
"""
Returns True if the specified package version is available on PyPI.
:param package: The name of the package.
:param version: The version of the package. If None, defaults to the installed version.
:param module: The name of the top-level module provided by the package . For example,
if `package` is 'scikit-learn', `module` should be 'sklearn'. If None, defaults
to `package`.
"""
resp = requests.get("https://pypi.python.org/pypi/{}/json".format(package))
if not resp.ok:
return False
module = module or package
version = version or _get_installed_version(module)
version = _strip_local_version_identifier(version)
dist_files = resp.json()["releases"].get(version)
return (
dist_files is not None # specified version exists
and (len(dist_files) > 0) # at least one distribution file exists
and not dist_files[0].get("yanked", False) # specified version is not yanked
)
|
py | 1a360d508d6d614186746fe3d996f5dff624d3e0 | def getzones(DATA) -> dict: # getting 'interfaces' dict as input
result = dict()
for interface, params in DATA.items():
if params.get('zone'):
if not result.get(params['zone']):
result[params['zone']] = list()
result[params['zone']].append(interface)
return result
|
py | 1a360d9b51e42a4109a804a9afdd85130d2fefa4 | """TorchScript
This module contains functionality to support the JIT's scripting frontend, notably:
- torch.jit.script
This is not intended to be imported directly; please use the exposed
functionalities in `torch.jit`.
"""
import functools
import collections
import enum
import inspect
import copy
import pickle
import warnings
from typing import Any, Dict, List, Tuple, Union, Callable
import torch
import torch._jit_internal as _jit_internal
from torch.utils import set_module
from torch.jit._recursive import ScriptMethodStub, wrap_cpp_module, infer_methods_to_compile
from torch.nn import Module
from torch.jit._state import _enabled
from torch.jit._builtins import _register_builtin
from torch._six import with_metaclass
from torch.jit.frontend import get_jit_def, get_default_args, get_jit_class_def
from torch._jit_internal import _qualified_name
from torch.jit._fuser import _graph_for
from torch.jit._state import (
_try_get_jit_cached_function,
_try_get_jit_cached_overloads,
_set_jit_function_cache,
_set_jit_overload_cache,
)
from torch.overrides import (
has_torch_function, has_torch_function_unary, has_torch_function_variadic)
from torch.jit._monkeytype_config import (
monkeytype_trace,
JitTypeTraceConfig ,
JitTypeTraceStore
)
type_trace_db = JitTypeTraceStore() # DB to hold all call traces from MonkeyType
torch._C.ScriptMethod.graph_for = _graph_for # type: ignore[attr-defined]
torch._C.ScriptFunction.graph_for = _graph_for # type: ignore[attr-defined]
ScriptFunction = torch._C.ScriptFunction
ScriptFunction.__doc__ = """
Functionally equivalent to a :class:`ScriptModule`, but represents a single
function and does not have any attributes or Parameters.
"""
set_module(ScriptFunction, "torch.jit")
if _enabled:
Attribute = collections.namedtuple("Attribute", ["value", "type"])
else:
def Attribute(value, type): # type: ignore[no-redef]
return value
Attribute.__doc__ = """
This method is a pass-through function that returns `value`, mostly
used to indicate to the TorchScript compiler that the left-hand side
expression is a class instance attribute with type of `type`. Note that
`torch.jit.Attribute` should only be used in `__init__` method of `nn.Module`
subclasses.
Though TorchScript can infer correct type for most Python expressions, there are some cases where
type inference can be wrong, including:
- Empty containers like `[]` and `{}`, which TorchScript assumes to be container of `Tensor`s
- Optional types like `Optional[T]` but assigned a valid value of type `T`, TorchScript would assume
it is type `T` rather than `Optional[T]`
In eager mode, it is simply a pass-through function that returns `value`
without other implications.
Example:
.. testcode::
import torch
from typing import Dict
class AttributeModule(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.foo = torch.jit.Attribute(0.1, float)
# we should be able to use self.foo as a float here
assert 0.0 < self.foo
self.names_ages = torch.jit.Attribute({}, Dict[str, int])
self.names_ages["someone"] = 20
assert isinstance(self.names_ages["someone"], int)
m = AttributeModule()
# m will contain two attributes
# 1. foo of type float
# 2. names_ages of type Dict[str, int]
.. testcleanup::
del AttributeModule
del m
Args:
value: An initial value to be assigned to attribute.
type: A Python type
Returns:
Returns `value`
"""
def _get_type_trace_db():
# This is a private API. Use of this for external purposes is discouraged.
return type_trace_db
# Gets a function from the name of a method on a type
def _get_function_from_type(cls, name):
return getattr(cls, name, None)
# ScriptClasses must be new-style classes because we construct them using their
# __new__ method.
def _is_new_style_class(cls):
if hasattr(cls, "__class__"):
return "__dict__" in dir(cls) or hasattr(cls, "__slots__")
def _compile_and_register_class(obj, rcb, qualified_name):
ast = get_jit_class_def(obj, obj.__name__)
defaults = torch.jit.frontend.get_default_args_for_class(obj)
script_class = torch._C._jit_script_class_compile(qualified_name, ast, defaults, rcb)
torch.jit._state._add_script_class(obj, script_class)
return script_class
# These OrderedDictWrapper classes replace the actual OrderedDicts in
# module with versions that get/set properties inside of Module.
# This allows us to reuse most of nn.Module while still storing the
# data in C++.
# Each OrderedDict needs to support:
# x not in view
# x in view
# view[name] = ...
# view.values()
# del view[name]
# view.items()
# view.keys()
# len(view)
class OrderedDictWrapper(object):
def __init__(self, _c):
self._c = _c
def keys(self):
return [k for k, v in self.items()]
def values(self):
return [v for k, v in self.items()]
def __len__(self):
return len(self.values())
def __delitem__(self, k):
raise RuntimeError("cannot delete methods or parameters of a script module")
def items(self):
return self._c.items()
def __setitem__(self, k, v):
if k not in self:
raise RuntimeError(
"Can't add a new parameter after ScriptModule construction."
" Tried to add '{}".format(k)
)
self._c.setattr(k, v)
def __contains__(self, k):
return self._c.contains(k)
def __getitem__(self, k):
if k not in self:
raise KeyError(k)
return self._c.getattr(k)
class OrderedModuleDict(OrderedDictWrapper):
def __init__(self, module, python_dict):
super(OrderedModuleDict, self).__init__(torch._C.ModuleDict(module))
# contains _both_ script modules and non-script python-only modules
# because script modules are subclassed in python and the
# C++ Module class will not hold references to them,
# to ensure that you always get the same python value here
# we store it in the python dict as well
self._python_modules = python_dict
def items(self):
r = self._python_modules.items()
return r
def __contains__(self, k):
return k in self._python_modules
def __setitem__(self, k, v):
# Cases where sub-module can be re-assigned after ScriptModule construction
# 1. If the attr is an module interface type, it's guaranteed that the module is
# not inlined in the graph, so it's safe to swap a new ScriptModule in.
# 2. if the new value if a ScriptModule with the same JIT type, IR won't change
# and it's legit to swap a new module in.
# In these two cases we allow swapping a new scripted module and update the
# corresponding python module dict to keep sync.
# Note: the value to be swapped in has to be ScriptModule instead of nn.Module,
# otherwise it's illegal and we throw error.
if isinstance(v, ScriptModule):
self._c.setattr(k, v)
self._python_modules[k] = v
else:
raise RuntimeError(
"Cannot re-assign modules in a ScriptModule with non-scripted "
"module, tried to replace existing module '{}': {}".format(k, v)
)
def __getitem__(self, k):
return self._python_modules[k]
# For each user-defined class that subclasses ScriptModule, this meta-class:
# (1) finds all the methods annotated with @script_method in a ScriptModule and
# removes them from the class attributes
# (2) puts a wrapper around the class's __init__ method to recursively compile
# all of the script_methods with the module after the original __init__ has
# run. This has to occur after the user-defined __init__ so that submodules and
# parameters are initialized _before_ the script compiler resolve references to
# `self.param` or `self.module`.
class ScriptMeta(type):
def __init__(cls, name, bases, attrs): # noqa: B902
# Aggregate all the ScriptMethods and constants from superclasses
cls._methods: Dict[str, Any] = {}
cls._constants_set = set(getattr(cls, "__constants__", ()))
for base in reversed(bases):
for k, v in getattr(base, "_methods", {}).items():
cls._methods[k] = v
base_constants = getattr(base, "_constants_set", set())
cls._constants_set = cls._constants_set.union(base_constants)
# find all the script methods of the current class
for k, v in sorted(attrs.items()):
if isinstance(v, ScriptMethodStub):
delattr(cls, k)
cls._methods[v.original_method.__name__] = v
if getattr(cls, "_disable_script_meta", False):
# We leave built-in ScriptModule types alone, since this metaclass
# is only for compiling user classes that inherit from
# ScriptModule.
return super(ScriptMeta, cls).__init__(name, bases, attrs)
original_init = getattr(cls, "__init__", lambda self: None)
@functools.wraps(original_init)
def init_then_script(self, *args, **kwargs):
num_methods = len(cls._methods)
original_init(self, *args, **kwargs)
added_methods_in_init = len(cls._methods) > num_methods
if type(self) == cls:
def make_stubs(module):
cls = type(module)
if hasattr(cls, "_methods"):
return [v for k, v in sorted(cls._methods.items())]
else:
return infer_methods_to_compile(module)
self.__dict__[
"_actual_script_module"
] = torch.jit._recursive.create_script_module(self, make_stubs, share_types=not added_methods_in_init)
# Delete the Python attributes that now shadow the ScriptModule
# ones, so that __getattr__ and __setattr__ will properly find
# the scripted versions.
concrete_type = self._actual_script_module._concrete_type
for name in concrete_type.get_attributes():
delattr(self, name)
for name, _ in concrete_type.get_modules():
delattr(self, name)
for name in ("_parameters", "_buffers", "_modules"):
delattr(self, name)
cls.__init__ = init_then_script # type: ignore[misc]
return super(ScriptMeta, cls).__init__(name, bases, attrs)
class _CachedForward(object):
def __get__(self, obj, cls):
return self.__getattr__("forward") # type: ignore[attr-defined]
class ScriptWarning(Warning):
pass
def script_method(fn):
if not _enabled:
return fn
# NOTE: we need to traverse two frames here because the meta-class frame
# for ScriptModule will be present, as opposed to invoking @script on a
# a function or invoking define() on a CompilationUnit.
# The stack will look like:
#
# 0. createResolutionCallback()
# 1. script_method()
# 2. ScriptModule metaclass frame
# 3. Surrounding scope
#
# createResolutionCallback internally adds 1 to get us to the scope of this
# function (the calling function). Adding 2 gets us to the proper surrounding scope.
_rcb = _jit_internal.createResolutionCallbackFromFrame(frames_up=2)
ast = get_jit_def(fn, fn.__name__, self_name="ScriptModule")
return ScriptMethodStub(_rcb, ast, fn)
class ConstMap:
def __init__(self, const_mapping):
self.const_mapping = const_mapping
def __getattr__(self, attr):
return self.const_mapping[attr]
if _enabled:
# this is a Python 'non-data descriptor' that causes the first access
# to ScriptModule's forward to lookup the forward method and stash
# it in the objects dict. Due to the standard rules for attribute lookup,
# subsequent lookups will just directly return the previously looked up method.
# This is necessary because nn.Module defines forward as a method. If we
# did nothing, __getattr__ would not be called. Instead we'd get nn.Module.forward
# which always throws an exception.
class ScriptModule(with_metaclass(ScriptMeta, Module)): # type: ignore[misc]
r"""
A wrapper around C++ ``torch::jit::Module``. ``ScriptModule``\s
contain methods, attributes, parameters, and
constants. These can be accessed the same way as on a normal ``nn.Module``.
"""
__jit_unused_properties__ = ['code', 'code_with_constants', 'graph', 'inlined_graph', 'original_name']
def __init__(self):
super(ScriptModule, self).__init__()
forward = _CachedForward()
def __getattr__(self, attr):
if "_actual_script_module" not in self.__dict__:
return super(ScriptModule, self).__getattr__(attr)
return getattr(self._actual_script_module, attr)
def __setattr__(self, attr, value):
if "_actual_script_module" not in self.__dict__:
# Unwrap torch.jit.Attribute into a regular setattr + record
# the provided type in __annotations__.
#
# This ensures that if we use the attr again in `__init__`, it
# will look like the actual value, not an instance of Attribute.
if isinstance(value, Attribute):
# NB: Ensure that we set __annotations__ on the specific
# class in question, and not on a superclass (which would
# be wrong wrong wrong!).
# See also https://github.com/pytorch/pytorch/issues/39463
if "__annotations__" not in self.__class__.__dict__:
self.__class__.__annotations__ = {}
self.__annotations__[attr] = value.type
value = value.value
return super(ScriptModule, self).__setattr__(attr, value)
setattr(self._actual_script_module, attr, value)
def define(self, src):
if "_actual_script_module" in self.__dict__:
# If we have completed initialization, just defer to the
# backing RecursiveScriptModule to eagerly compile the provided
# source.
return self._actual_script_module.define(src)
# Otherwise, we are still in the object's __init__.
# In that case, add `src` as a stub to be compiled.
#
# We use frames_up=1 to get to the proper surrounding scope. The stack
# will look like:
# 0. createResolutionCallback
# 1. define()
# 2. surrounding scope.
#
# createResolutionCallback internally adds 1 to get us to our frame, then
# we add 1 to get to the proper surrounding scope.
rcb = _jit_internal.createResolutionCallbackFromFrame(frames_up=1)
ast = torch._C._parse_source_def(src)
self._methods[ast.name().name] = ScriptMethodStub(rcb, ast, None)
def _replicate_for_data_parallel(self):
return self._actual_script_module._replicate_for_data_parallel()
class RecursiveScriptModule(ScriptModule):
# XXX: RecursiveScriptModule inherits from ScriptModule for the sole
# reason that it retains the existing isinstance(ScriptModule)
# behavior.
r"""
The core data structure in TorchScript is the ``ScriptModule``. It is an
analogue of torch's ``nn.Module`` and represents an entire model as a tree of
submodules. Like normal modules, each individual module in a ``ScriptModule`` can
have submodules, parameters, and methods. In ``nn.Module``\s methods are implemented
as Python functions, but in ``ScriptModule``\s methods are implemented as
TorchScript functions, a statically-typed subset of Python that contains all
of PyTorch's built-in Tensor operations. This difference allows your
``ScriptModule``\s code to run without the need for a Python interpreter.
``ScriptModule``\s should not be created manually, instead use
either :func:`tracing <torch.jit.trace>` or :func:`scripting <torch.jit.script>`.
Tracing and scripting can be applied incrementally and :ref:`composed as necessary <Types>`.
* Tracing records the tensor operations as executed with a set of example inputs and uses these
operations to construct a computation graph. You can use the full dynamic behavior of Python with tracing,
but values other than Tensors and control flow aren't captured in the graph.
* Scripting inspects the Python code of the model
and compiles it to TorchScript. Scripting allows the use of many `types`_ of values and supports dynamic control flow.
Many, but not all features of Python are supported by the compiler, so changes to the source code may be necessary.
"""
_disable_script_meta = True
def __init__(self, cpp_module):
self.__dict__["_initializing"] = True
self._c = cpp_module
super(RecursiveScriptModule, self).__init__()
# Delete the 'training' attribute set up by `Module.__init__`. It
# will get set on the underlying cpp module, so we delete it here
# to avoid this version shadowing the cpp module version.
delattr(self, "training")
@staticmethod
def _construct(cpp_module, init_fn):
"""
Construct a RecursiveScriptModule that's ready for use. PyTorch
code should use this to construct a RecursiveScriptModule instead
of instead of calling `__init__` directly, as it makes sure the
object is properly finalized (and in the future, we may take
control of how the RecursiveScriptModule instance is created).
Args:
cpp_module: The C++ Module that will hold the actual state of
this RecursiveScriptModule instance.
init_fn: Lambda that initializes the RecursiveScriptModule passed to it.
"""
script_module = RecursiveScriptModule(cpp_module)
init_fn(script_module)
# Finalize the ScriptModule: replace the nn.Module state with our
# custom implementations and flip the _initializing bit.
RecursiveScriptModule._finalize_scriptmodule(script_module)
return script_module
@staticmethod
def _finalize_scriptmodule(script_module):
script_module._parameters = OrderedDictWrapper(
torch._C.ParameterDict(script_module._c)
)
script_module._buffers = OrderedDictWrapper(
torch._C.BufferDict(script_module._c)
)
script_module._modules = OrderedModuleDict(
script_module._c, script_module._modules
)
script_module._initializing = False
def _reconstruct(self, cpp_module):
"""
Re-construct an instance of RecursiveScriptModule using an instance of a C++ module.
Args:
cpp_module: The C++ module that this RecursiveScriptModule will be rebuilt around.
"""
self.__init__(cpp_module) # type: ignore[misc]
# Copy the concrete type from the C++ module to this ScriptModule.
self._concrete_type = torch._C.ConcreteModuleType.from_jit_type(
self._c._type()
)
# Copy submodules from the C++ module to this ScriptModule.
modules = {}
for name, cpp_module in torch._C.ModuleDict(self._c).items():
modules[name] = wrap_cpp_module(cpp_module)
self._modules = OrderedModuleDict(self._c, modules)
# Copy parameters and buffers.
self._parameters = OrderedDictWrapper(torch._C.ParameterDict(self._c))
self._buffers = OrderedDictWrapper(torch._C.BufferDict(self._c))
# Get rid of the functions from the old C++ module.
self.__dict__ = {
k: v
for k, v in self.__dict__.items()
if not isinstance(v, torch._C.ScriptMethod)
}
self.__dict__["_initializing"] = False
@property
def graph(self):
r"""
Returns a string representation of the internal graph for the
``forward`` method. See :ref:`interpreting-graphs` for details.
"""
return self._c._get_method("forward").graph
@property
def inlined_graph(self):
r"""
Returns a string representation of the internal graph for the
``forward`` method. This graph will be preprocessed to inline all function and method calls.
See :ref:`interpreting-graphs` for details.
"""
return self.forward.inlined_graph
@property
def code(self):
r"""
Returns a pretty-printed representation (as valid Python syntax) of
the internal graph for the ``forward`` method. See
:ref:`inspecting-code` for details.
"""
return self.forward.code
@property
def code_with_constants(self):
r"""
Returns a tuple of:
[0] a pretty-printed representation (as valid Python syntax) of
the internal graph for the ``forward`` method. See `code`.
[1] a ConstMap following the CONSTANT.cN format of the output in [0].
The indices in the [0] output are keys to the underlying constant's values.
See :ref:`inspecting-code` for details.
"""
r = self.forward.code_with_constants
return (r[0], ConstMap(r[1]))
def save(self, f, **kwargs):
r"""
save(f, _extra_files={})
See :func:`torch.jit.save <torch.jit.save>` for details.
"""
return self._c.save(str(f), **kwargs)
def _save_for_lite_interpreter(self, *args, **kwargs):
r"""
_save_for_lite_interpreter(f)
Add (or update) the bytecode session to the script model. The updated model is used
in lite interpreter for mobile applications.
Args:
f: a string containing a file name.
_extra_files: Map from filename to contents which will be stored as part of 'f'.
"""
return self._c._save_for_mobile(*args, **kwargs)
def _save_to_buffer_for_lite_interpreter(self, *args, **kwargs):
return self._c._save_to_buffer_for_mobile(*args, **kwargs)
def save_to_buffer(self, *args, **kwargs):
return self._c.save_to_buffer(*args, **kwargs)
def get_debug_state(self, *args, **kwargs):
return self._c.get_debug_state()
def extra_repr(self):
return "original_name={}".format(self.original_name)
def graph_for(self, *args, **kwargs):
return self.forward.graph_for(*args, **kwargs)
@property
def original_name(self):
if type(self) == str(self._c._type().name()):
return ""
return str(self._c._type().name())
def define(self, src):
# We use frames_up=1 to get to the proper surrounding scope. The stack
# will look like:
# 0. createResolutionCallback
# 1. define()
# 2. surrounding scope.
#
# createResolutionCallback internally adds 1 to get us to our frame, then
# we add 1 to get to the proper surrounding scope.
rcb = _jit_internal.createResolutionCallbackFromFrame(frames_up=1)
self._c._define(self._concrete_type, src, rcb)
def __getattr__(self, attr):
if "_initializing" not in self.__dict__:
raise RuntimeError(
"ScriptModule has not been initialized, did you forget to call super's init?"
)
if self._initializing:
return super(RecursiveScriptModule, self).__getattr__(attr)
# _modules check is before hasattr since modules are included as attributes in _c,
# but we want to get the python wrapper from _modules instead of the raw _c object.
if attr in self._modules:
return self._modules[attr]
elif self._c.hasattr(attr):
return self._c.getattr(attr)
elif self._c._has_method(attr):
script_method = self._c._get_method(attr)
# cache method so future calls do not go through __getattr__
# to improve invocation performance
self.__dict__[attr] = script_method
return script_method
return super(RecursiveScriptModule, self).__getattr__(attr)
def __setattr__(self, attr, value):
if self._initializing:
return super(RecursiveScriptModule, self).__setattr__(attr, value)
if attr in self._modules:
self._modules[attr] = value
elif self._c.hasattr(attr):
self._c.setattr(attr, value)
elif (
hasattr(self, "_concrete_type")
and attr in self._concrete_type.get_constants().keys()
):
# TODO: we don't have _concrete_type set after load(), and in general we lose constant information.
# We should encode constants as class type attributes (or something) so it persists across save/load.
raise AttributeError(
"Cannot mutate TorchScript constant value: '{}'. Value: '{}'".format(
attr, value
)
)
else:
# We allow setting Python attributes on the ScriptModule, for
# when people want to stash some convenience info on it.
# TODO: it's possible that the following is confusing:
# s = torch.jit.script(...)
# s.python_attr = ...
# s.save() <--- this doesn't have `python_attr`
# It's fairly trivial to save enough info to warn in this case.
return super(RecursiveScriptModule, self).__setattr__(attr, value)
def __getstate__(self):
raise pickle.PickleError(
"ScriptModules cannot be deepcopied using copy.deepcopy or saved using torch.save. "
+ "Mixed serialization of script and non-script modules is not supported. "
+ "For purely script modules use my_script_module.save(<filename>) instead."
)
def __copy__(self):
return torch.jit._recursive.wrap_cpp_module(copy.copy(self._c))
def __deepcopy__(self, memo):
return torch.jit._recursive.wrap_cpp_module(copy.deepcopy(self._c, memo))
# Python magic methods do method lookups on an object's class type, instead of looking up
# the method defines on the class instance. In order to continue to expose the magic methods
# of builtin-containers (ModuleList, Sequential, ModuleDict) to Python, we
# define magic methods here as a shim to the correct attribute.
def forward_magic_method(self, method_name, *args, **kwargs):
self_method = getattr(self, method_name)
if getattr(self_method, "__func__", None) == getattr(
RecursiveScriptModule, method_name
):
raise NotImplementedError()
return self_method(*args, **kwargs)
def __iter__(self):
return self.forward_magic_method("__iter__")
def __getitem__(self, idx):
return self.forward_magic_method("__getitem__", idx)
def __len__(self):
return self.forward_magic_method("__len__")
def __contains__(self, key):
return self.forward_magic_method("__contains__", key)
# dir is defined by the base nn.Module, so instead of throwing if
# it is not overridden, we call into the nn.Module __dir__ method
def __dir__(self):
self_method = self.__dir__
if self_method.__func__ == _get_function_from_type( # type: ignore[attr-defined]
RecursiveScriptModule, "__dir__"
):
return super(RecursiveScriptModule, self).__dir__()
return self_method()
# to resolve bool(value), Python looks if __bool__ is defined then __iter__
# is defined then returns true for classes. Since __iter__() on this
# class throws if it isn't overridden, we define __bool__ to preserve default behavior
def __bool__(self):
self_method = self.__bool__
if self_method.__func__ == _get_function_from_type( # type: ignore[attr-defined]
RecursiveScriptModule, "__bool__"
):
return True
return self_method()
def _replicate_for_data_parallel(self):
# we have to initialize ScriptModule properly so that
# it works with pybind11
def init_fn(script_module):
# Don't do anything here, we'll initialize the ScriptModule below
return
return RecursiveScriptModule._construct(
self._c._replicate_for_data_parallel(), init_fn
)
# Need to copy all RecursiveScriptModule methods to ScriptModule.
#
# This is because `super(MyScriptModule, self).foo()` does not use
# `__getattr__` to look up `foo`. So we need to make each method available on
# the ScriptModule manually.
for name, item in RecursiveScriptModule.__dict__.items():
if not callable(item) and not isinstance(item, property):
continue
if name.startswith("__") or hasattr(ScriptModule, name):
continue
# We can copy over the implementation wholesale because besides the
# `super()` thing above, ScriptModule behaves exactly like
# RecursiveScriptModule
setattr(ScriptModule, name, item)
def _get_methods(cls):
import inspect
# In Python 3 unbound methods are functions, but in Python 2 they are methods
return inspect.getmembers(
cls, predicate=lambda x: inspect.isfunction(x) or inspect.ismethod(x)
)
_compiled_methods_allowlist = {
"forward",
"register_buffer",
"register_parameter",
"add_module",
"_apply",
"apply",
"cuda",
"cpu",
"to",
"type",
"float",
"double",
"half",
"state_dict",
"_save_to_state_dict",
"load_state_dict",
"_load_from_state_dict",
"_named_members",
"parameters",
"named_parameters",
"buffers",
"named_buffers",
"children",
"named_children",
"modules",
"named_modules",
"zero_grad",
"share_memory",
"_get_name",
"extra_repr",
"_slow_forward",
"_tracing_name",
"eval",
"train",
}
def _make_fail(name):
def fail(self, *args, **kwargs):
raise RuntimeError(name + " is not supported on ScriptModules")
return fail
for name, method in _get_methods(torch.nn.Module):
if name.startswith("__"):
continue
if (
name not in RecursiveScriptModule.__dict__
and name not in _compiled_methods_allowlist
):
setattr(RecursiveScriptModule, method.__name__, _make_fail(name))
else:
# TODO MAKE SURE THAT DISABLING WORKS
class ScriptModule(torch.nn.Module): # type: ignore[no-redef]
def __init__(self, arg=None):
super().__init__()
class RecursiveScriptModule(ScriptModule): # type: ignore[no-redef]
def __init__(self, arg=None):
super().__init__()
def call_prepare_scriptable_func_impl(obj, memo):
if not isinstance(obj, torch.nn.Module):
return obj
obj_id = id(obj)
# If obj_id is in memo, obj has already been prepared or is being
# prepared in another call up the stack.
if obj_id in memo:
return memo[id(obj)]
obj = obj.__prepare_scriptable__() if hasattr(obj, '__prepare_scriptable__') else obj # type: ignore[operator]
# Record obj in memo to avoid infinite recursion in the case of cycles in the module
# hierarchy when recursing below.
memo[obj_id] = obj
new_obj_dict = {}
for name, sub_module in obj.__dict__.items():
if name == '_modules':
for k, v in sub_module.items():
sub_module[k] = call_prepare_scriptable_func_impl(v, memo)
new_obj_dict[name] = sub_module
elif isinstance(sub_module, torch.nn.Module) and not isinstance(sub_module, ScriptModule):
new_obj_dict[name] = call_prepare_scriptable_func_impl(sub_module, memo)
else:
new_obj_dict[name] = sub_module
for k, v in new_obj_dict.items():
obj.__dict__[name] = v
return obj
def call_prepare_scriptable_func(obj):
memo: Dict[int, torch.nn.Module] = {}
return call_prepare_scriptable_func_impl(obj, memo)
def _script_pdt(obj, optimize=None, _frames_up=0, _rcb=None,
example_inputs: Union[List[Tuple], Dict[Callable, List[Tuple]], None] = None):
# This is a private API, intended for internal use only. Usage of this API is only for experimental
# purposes only and is highly discouraged.
global type_trace_db
if not _enabled:
return obj
if optimize is not None:
warnings.warn(
"`optimize` is deprecated and has no effect. Use `with torch.jit.optimized_execution() instead"
)
# No-op for modules and functions that are already scripted
if isinstance(obj, ScriptModule):
return obj
if isinstance(obj, ScriptFunction):
return obj
if example_inputs:
# If MonkeyType is installed, enable profile directed type annotation
# Check if example_inputs are defined and generate call traces
# for the method by running eager mode version of the method with
# the provide example inputs. This logs all the traces in type_trace_db
type_trace_db = JitTypeTraceStore()
if monkeytype_trace:
monkeytype_config = JitTypeTraceConfig(type_trace_db)
with monkeytype_trace(monkeytype_config):
if isinstance(example_inputs, Dict):
# If the obj is an nn.Module or a class, then each method is
# executed with the arguments provided in the example inputs.
# example inputs here will be of type Dict(class.method, (arguments))
# This is used to infer type annotations for those methods
# which are not called directly under the hood of monkeytype.
for module, example_input in example_inputs.items():
for example in example_input:
module(*example)
elif isinstance(example_inputs, List):
for examples in example_inputs:
obj(*examples)
else:
warnings.warn("Error: Unable to infer types. Please format the inputs to type `List[Tuple]`"
" or `Dict[Callable, List[Tuple]]` to be run with MonkeyType.")
else:
warnings.warn("Warning: monkeytype is not installed. Please install https://github.com/Instagram/MonkeyType "
"to enable Profile-Directed Typing in TorchScript. Refer to "
"https://github.com/Instagram/MonkeyType/blob/master/README.rst to install MonkeyType. ")
return script(obj, optimize, _frames_up, _rcb)
def script(obj, optimize=None, _frames_up=0, _rcb=None):
r"""
Scripting a function or ``nn.Module`` will inspect the source code, compile
it as TorchScript code using the TorchScript compiler, and return a :class:`ScriptModule` or
:class:`ScriptFunction`. TorchScript itself is a subset of the Python language, so not all
features in Python work, but we provide enough functionality to compute on
tensors and do control-dependent operations. For a complete guide, see the
:ref:`language-reference`.
``torch.jit.script`` can be used as a function for modules and functions, and as a decorator
``@torch.jit.script`` for :ref:`torchscript-classes` and functions.
Args:
obj (callable, class, or ``nn.Module``): The ``nn.Module``, function, or class type to
compile.
Returns:
If ``obj`` is ``nn.Module``, ``script`` returns
a :class:`ScriptModule` object. The returned :class:`ScriptModule` will
have the same set of sub-modules and parameters as the
original ``nn.Module``. If ``obj`` is a standalone function,
a :class:`ScriptFunction` will be returned.
**Scripting a function**
The ``@torch.jit.script`` decorator will construct a :class:`ScriptFunction`
by compiling the body of the function.
Example (scripting a function):
.. testcode::
import torch
@torch.jit.script
def foo(x, y):
if x.max() > y.max():
r = x
else:
r = y
return r
print(type(foo)) # torch.jit.ScriptFunction
# See the compiled graph as Python code
print(foo.code)
# Call the function using the TorchScript interpreter
foo(torch.ones(2, 2), torch.ones(2, 2))
.. testoutput::
:hide:
...
**Scripting an nn.Module**
Scripting an ``nn.Module`` by default will compile the ``forward`` method and recursively
compile any methods, submodules, and functions called by ``forward``. If a ``nn.Module`` only uses
features supported in TorchScript, no changes to the original module code should be necessary. ``script``
will construct :class:`ScriptModule` that has copies of the attributes, parameters, and methods of
the original module.
Example (scripting a simple module with a Parameter):
.. testcode::
import torch
class MyModule(torch.nn.Module):
def __init__(self, N, M):
super(MyModule, self).__init__()
# This parameter will be copied to the new ScriptModule
self.weight = torch.nn.Parameter(torch.rand(N, M))
# When this submodule is used, it will be compiled
self.linear = torch.nn.Linear(N, M)
def forward(self, input):
output = self.weight.mv(input)
# This calls the `forward` method of the `nn.Linear` module, which will
# cause the `self.linear` submodule to be compiled to a `ScriptModule` here
output = self.linear(output)
return output
scripted_module = torch.jit.script(MyModule(2, 3))
Example (scripting a module with traced submodules):
.. testcode::
import torch
import torch.nn as nn
import torch.nn.functional as F
class MyModule(nn.Module):
def __init__(self):
super(MyModule, self).__init__()
# torch.jit.trace produces a ScriptModule's conv1 and conv2
self.conv1 = torch.jit.trace(nn.Conv2d(1, 20, 5), torch.rand(1, 1, 16, 16))
self.conv2 = torch.jit.trace(nn.Conv2d(20, 20, 5), torch.rand(1, 20, 16, 16))
def forward(self, input):
input = F.relu(self.conv1(input))
input = F.relu(self.conv2(input))
return input
scripted_module = torch.jit.script(MyModule())
To compile a method other than ``forward`` (and recursively compile anything it calls), add
the :func:`@torch.jit.export <torch.jit.export>` decorator to the method. To opt out of compilation
use :func:`@torch.jit.ignore <torch.jit.ignore>` or :func:`@torch.jit.unused <torch.jit.unused>`.
Example (an exported and ignored method in a module)::
import torch
import torch.nn as nn
class MyModule(nn.Module):
def __init__(self):
super(MyModule, self).__init__()
@torch.jit.export
def some_entry_point(self, input):
return input + 10
@torch.jit.ignore
def python_only_fn(self, input):
# This function won't be compiled, so any
# Python APIs can be used
import pdb
pdb.set_trace()
def forward(self, input):
if self.training:
self.python_only_fn(input)
return input * 99
scripted_module = torch.jit.script(MyModule())
print(scripted_module.some_entry_point(torch.randn(2, 2)))
print(scripted_module(torch.randn(2, 2)))
"""
if not _enabled:
return obj
if optimize is not None:
warnings.warn(
"`optimize` is deprecated and has no effect. Use `with torch.jit.optimized_execution() instead"
)
# No-op for modules and functions that are already scripted
if isinstance(obj, ScriptModule):
return obj
if isinstance(obj, ScriptFunction):
return obj
if isinstance(obj, torch.nn.Module):
obj = call_prepare_scriptable_func(obj)
return torch.jit._recursive.create_script_module(
obj, torch.jit._recursive.infer_methods_to_compile
)
qualified_name = _qualified_name(obj)
if inspect.isclass(obj):
# If this type is a `nn.Module` subclass, they probably meant to pass
# an instance instead of a Module
if issubclass(obj, torch.nn.Module):
raise RuntimeError(
"Type '{}' cannot be compiled since it inherits"
" from nn.Module,"
" pass an instance instead".format(obj)
)
# Enums are automatically usable in TorchScript, explicitly scripting
# is not necessary, but not harmful either.
if issubclass(obj, enum.Enum):
return obj
if not _is_new_style_class(obj):
raise RuntimeError(
"TorchScript classes must be new-style classes. "
"Please inherit from 'object'."
)
if len(obj.mro()) > 2:
raise RuntimeError(
"TorchScript classes does not support inheritance yet. "
"Please directly inherit from 'object'."
)
if _rcb is None:
_rcb = _jit_internal.createResolutionCallbackFromFrame(_frames_up + 1)
_compile_and_register_class(obj, _rcb, qualified_name)
return obj
else:
# this is a decorated fn, and we need to the underlying fn and its rcb
if hasattr(obj, "__script_if_tracing_wrapper"):
obj = obj.__original_fn
_rcb = _jit_internal.createResolutionCallbackFromClosure(obj)
_check_directly_compile_overloaded(obj)
maybe_already_compiled_fn = _try_get_jit_cached_function(obj)
if maybe_already_compiled_fn:
return maybe_already_compiled_fn
ast = get_jit_def(obj, obj.__name__)
if _rcb is None:
_rcb = _jit_internal.createResolutionCallbackFromClosure(obj)
fn = torch._C._jit_script_compile(
qualified_name, ast, _rcb, get_default_args(obj)
)
# Forward docstrings
fn.__doc__ = obj.__doc__
_set_jit_function_cache(obj, fn)
return fn
# overloads are registered in _jit_internal and compiled here so that _overload
# can be used in nn/functional.py without an import cycle
def _check_overload_defaults(impl_defaults, overload_defaults, loc):
for name, overload_value in overload_defaults.items():
if name not in impl_defaults or impl_defaults[name] != overload_value:
raise torch.jit.frontend.FrontendError(
loc,
"Default parameters on overloads do not affect the runtime so they "
"must equal to the default parameter on the implementation function. Found on "
"parameter {name}".format(name=name),
)
def _compile_function_with_overload(overload_fn, qual_name, impl_fn):
overload_decl = get_jit_def(overload_fn, overload_fn.__name__).decl()
overload_signature = torch.jit.annotations.get_signature(
overload_fn, None, None, inspect.ismethod(overload_fn)
)
impl_ast = get_jit_def(impl_fn, impl_fn.__name__)
overload_defaults = get_default_args(overload_fn)
implementation_defaults = get_default_args(impl_fn)
_rcb = _jit_internal.createResolutionCallbackFromClosure(impl_fn)
_check_overload_defaults(
implementation_defaults, overload_defaults, overload_decl.range()
)
fn = torch._C._jit_script_compile_overload(
qual_name,
overload_decl,
impl_ast,
_rcb,
implementation_defaults,
overload_signature,
)
return fn
def _get_overloads(obj):
# check for cached compiled fns
existing_compiled_fns = _try_get_jit_cached_overloads(obj)
qual_name = _qualified_name(obj)
uncompiled_overloads = _jit_internal._get_fn_overloads(qual_name)
if uncompiled_overloads is None:
return existing_compiled_fns
compiled_fns = []
for overload_fn in uncompiled_overloads:
compiled_fns.append(
_compile_function_with_overload(overload_fn, qual_name, obj)
)
if existing_compiled_fns:
compiled_fns = existing_compiled_fns + compiled_fns
# cache compilation, remove information stored to do compilation
_set_jit_overload_cache(obj, compiled_fns)
_jit_internal._clear_fn_overloads(qual_name)
return compiled_fns
def _check_directly_compile_overloaded(obj):
qual_name = _qualified_name(obj)
if _jit_internal._get_fn_overloads(qual_name) or _try_get_jit_cached_overloads(obj):
raise RuntimeError(
"Function {} cannot be directly compiled because it"
" is overloaded. It must be used in a context of a function"
" where its inputs can determine which overload to call.".format(qual_name)
)
def interface(obj):
if not inspect.isclass(obj):
raise RuntimeError("interface must be applied to a class")
if not _is_new_style_class(obj):
raise RuntimeError("TorchScript interfaces must inherit from 'object'")
# Expected MRO is:
# User module
# torch.nn.modules.module.Module
# object
is_module_interface = issubclass(obj, torch.nn.Module) and len(obj.mro()) == 3
if not is_module_interface and len(obj.mro()) > 2:
raise RuntimeError(
"TorchScript interface does not support inheritance yet. "
"Please directly inherit from 'object' or 'nn.Module'."
)
qualified_name = _qualified_name(obj)
rcb = _jit_internal.createResolutionCallbackFromFrame(1)
# if this type is a `nn.Module` subclass, generate a module interface type
# instead of a class interface type; a module interface type only compiles
# the user provided methods as part of the interface
ast = get_jit_class_def(obj, obj.__name__)
mangled_classname = torch._C._jit_script_interface_compile(
qualified_name, ast, rcb, is_module_interface
)
obj.__torch_script_interface__ = mangled_classname
return obj
def _recursive_compile_class(obj, loc):
_qual_name = _qualified_name(obj)
# We're starting a new compilation, so update the error call stack in
# case it fails
error_stack = torch._C.CallStack(_qual_name, loc)
rcb = _jit_internal.createResolutionCallbackForClassMethods(obj)
return _compile_and_register_class(obj, rcb, _qual_name)
CompilationUnit = torch._C.CompilationUnit
set_module(CompilationUnit, "torch.jit")
def _unwrap_optional(x):
assert x is not None, "Unwrapping null optional"
return x
_register_builtin(_unwrap_optional, "aten::_unwrap_optional")
_register_builtin(_jit_internal.is_scripting, "aten::is_scripting")
_register_builtin(has_torch_function, "aten::has_torch_function")
_register_builtin(has_torch_function_unary, "aten::has_torch_function")
_register_builtin(has_torch_function_variadic, "aten::has_torch_function")
|
py | 1a360dd013353df85f0f919bd2c0d6f8bf41d58a | # Copyright 2016 Cisco Systems, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import mock
from oslo_log import log as logging
from oslo_utils import fileutils
from oslo_utils import uuidutils
import webob.exc
from neutron.common import test_lib
from neutron.tests.unit.extensions import test_l3
from networking_cisco import backwards_compatibility as bc
from networking_cisco.plugins.cisco.common import cisco_constants
from networking_cisco.plugins.cisco.device_manager.plugging_drivers import (
aci_vlan_trunking_driver as aci_vlan)
from networking_cisco.plugins.cisco.extensions import routerrole
from networking_cisco.tests.unit.cisco.l3 import (
test_l3_router_appliance_plugin)
_uuid = uuidutils.generate_uuid
LOG = logging.getLogger(__name__)
ROUTER_ROLE_ATTR = routerrole.ROUTER_ROLE_ATTR
DEVICE = 'mydev'
PORT_ID = 'myportid'
MAC_ADDRESS = '00:11:22:33:44:55'
APP_PROFILE = 'myAppProfile'
SEGMENT_ID = '11'
NETWORK_TYPE = 'opflex'
TENANT = 'mytenent'
HOST = 'ubuntu'
NETWORK_TENANT = 'net_tenant'
EPG_NAME = 'myEpg'
APIC_VLAN1 = 11
APIC_VLAN2 = 12
class FakePortDb(object):
def __init__(self, id, network_id, device_owner, device_id):
self.id = id
self.network_id = network_id
self.device_id = device_id
self.device_owner = device_owner
self.hosting_info = {}
self.extra_subnets = []
def get(self, name):
return self[name]
def __getitem__(self, key):
if key == 'id':
return self.id
if key == 'network_id':
return self.network_id
if key == 'device_owner':
return self.device_owner
if key == 'device_id':
return self.device_id
if key == 'extra_subnets':
return self.extra_subnets
if key == 'hosting_info':
return self.hosting_info
class TestAciVLANTrunkingPlugDriverBase(
test_l3_router_appliance_plugin.L3RouterApplianceTestCaseBase,
test_l3.L3NatTestCaseMixin):
"""Test class for Base ACI VLAN Trunking Plugging driver
This class tests the functionality of the ACI VLAN Trunking Plugging
driver, which is indpendent of the workflow used (GBP or Neutron)
"""
# we use router types defined in .ini file.
configure_routertypes = False
router_type = 'ASR1k_Neutron_router'
def setUp(self):
super(TestAciVLANTrunkingPlugDriverBase, self).setUp(
create_mgmt_nw=False)
# save possible test_lib.test_config 'config_files' dict entry so we
# can restore it after tests since we will change its value
self._old_config_files = copy.copy(test_lib.test_config.get(
'config_files'))
# include config files for device manager service plugin and router
# service plugin since we define a number of hosting device templates,
# hosting devices and routertypes there
self._add_device_manager_plugin_ini_file()
self._add_router_plugin_ini_file()
#TODO(bobmel): Fix bug in test_extensions.py and we can remove the
# below call to setup_config()
self.setup_config()
self.l3_plugin._core_plugin.mechanism_manager = mock.MagicMock()
plug = aci_vlan.AciVLANTrunkingPlugDriver()
plug._apic_driver = mock.Mock()
self.plugging_driver = plug
self.vlan_dict = {'net1': APIC_VLAN1, 'net2': APIC_VLAN2}
def tearDown(self):
if self._old_config_files is None:
test_lib.test_config.pop('config_files', None)
else:
test_lib.test_config['config_files'] = self._old_config_files
super(TestAciVLANTrunkingPlugDriverBase, self).tearDown()
def test_create_hosting_device_resources(self):
tenant_id = 'some_tenant_id'
ctx = bc.context.Context('', tenant_id, is_admin=True)
mgmt_context = {'mgmt_nw_id': None}
res = self.plugging_driver.create_hosting_device_resources(
ctx, "some_id", tenant_id, mgmt_context, 2)
self.assertIsNone(res['mgmt_port'])
self.assertEqual(1, len(res))
def test_create_hosting_device_resources_no_mgmt_context(self):
tenant_id = 'some_tenant_id'
ctx = bc.context.Context('', tenant_id, is_admin=True)
res = self.plugging_driver.create_hosting_device_resources(
ctx, "some_id", tenant_id, None, 2)
self.assertIsNone(res['mgmt_port'], res)
self.assertEqual(1, len(res))
def test_get_hosting_device_resources_by_complementary_id(self):
tenant_id = 'some_tenant_id'
ctx = bc.context.Context('', tenant_id, is_admin=True)
mgmt_context = {'mgmt_nw_id': None}
res = self.plugging_driver.create_hosting_device_resources(
ctx, "some_id", tenant_id, mgmt_context, 1)
# ports that should not be returned
with self.port(), self.port(device_id='uuid2'), self.port(
tenant_id=tenant_id):
res_get = self.plugging_driver.get_hosting_device_resources(
ctx, '', 'some_id', tenant_id, None)
self.assertIsNone(res_get['mgmt_port'])
self.assertEqual(1, len(res))
def test_get_hosting_device_resources_by_device_id(self):
tenant_id = 'some_tenant_id'
ctx = bc.context.Context('', tenant_id, is_admin=True)
mgmt_context = {'mgmt_nw_id': None}
res = self.plugging_driver.create_hosting_device_resources(
ctx, "some_id", tenant_id, mgmt_context, 1)
# update attributes of created ports to fake what Nova updates
hd_uuid = 'hd_uuid1'
update_spec = {'port': {'device_id': hd_uuid,
'device_owner': 'nova'}}
for hd_port in self._list('ports')['ports']:
self._update('ports', hd_port['id'], update_spec)
# ports that should not be returned
with self.port(), self.port(device_id='uuid2'), self.port(
tenant_id=tenant_id), self.port(tenant_id=tenant_id,
device_owner='other_uuid'):
res_get = self.plugging_driver.get_hosting_device_resources(
ctx, hd_uuid, 'some_id', tenant_id, None)
self.assertIsNone(res_get['mgmt_port'])
self.assertEqual(1, len(res))
def test_delete_hosting_device_resources(self):
tenant_id = 'some_tenant_id'
ctx = bc.context.Context('', tenant_id, is_admin=True)
mgmt_context = {'mgmt_nw_id': None}
res = self.plugging_driver.create_hosting_device_resources(
ctx, "some_id", tenant_id, mgmt_context, 2)
nets = self._list('networks')
self.assertEqual(0, len(nets['networks']))
subnets = self._list('subnets')
self.assertEqual(0, len(subnets['subnets']))
ports = self._list('ports')
self.assertEqual(0, len(ports['ports']))
# avoid passing the mgmt port twice in argument list
mgmt_port = res['mgmt_port']
del res['mgmt_port']
self.plugging_driver.delete_hosting_device_resources(
ctx, tenant_id, mgmt_port, **res)
nets = self._list('networks')['networks']
# no networks and subnets should remain
self.assertEqual(0, len(nets))
subnets = self._list('subnets')['subnets']
self.assertEqual(0, len(subnets))
ports = self._list('ports')
self.assertEqual(0, len(ports['ports']))
def test_transit_nets_cfg_invalid_file_format(self):
self.plugging_driver._cfg_file = fileutils.write_to_tempfile(
("""{
'EDGENAT': {
'gateway_ip': '1.109.100.254',
'cidr_exposed': '1.109.100.1/24',
'segmentation_id': 1066
}
}
{
'EDGENATBackup': {
'gateway_ip': '1.209.200.254',
'cidr_exposed': '1.209.200.1/24',
'segmentation_id': 1066
}
}""").encode('utf-8')
)
# TODO(thbachman): couldn't get assertRaises to work here,
# so used this construct instead
try:
# just accessing the member should trigger the exception
self.plugging_driver.transit_nets_cfg
self.assertTrue(False)
except aci_vlan.AciDriverConfigInvalidFileFormat:
self.assertTrue(True)
fileutils.delete_if_exists(self.plugging_driver._cfg_file)
def test_config_sanity_check(self):
test_config1 = {
'Datacenter-Out': {
'cidr_exposed': '1.103.2.0/24'
}
}
test_config2 = {
'Datacenter-Out': {
'gateway_ip': '1.103.2.1',
}
}
test_config3 = {
'Datacenter-Out': {
'gateway_ip': '1.103.2.254',
'cidr_exposed': '1.103.2.1/24',
}
}
self.assertRaises(aci_vlan.AciDriverConfigMissingGatewayIp,
self.plugging_driver._sanity_check_config,
test_config1)
self.assertRaises(aci_vlan.AciDriverConfigMissingCidrExposed,
self.plugging_driver._sanity_check_config,
test_config2)
self.assertTrue(
test_config3,
self.plugging_driver._sanity_check_config(test_config3))
def test_no_driver(self):
self.plugging_driver._apic_driver = None
self.core_plugin.mechanism_manager.mech_drivers = {}
# TODO(thbachman): couldn't get assertRaises to work here,
# so used this construct instead
try:
self.plugging_driver.apic_driver()
self.assertTrue(False)
except aci_vlan.AciDriverNoAciDriverInstalledOrConfigured:
self.assertTrue(True)
class TestAciVLANTrunkingPlugDriverGbp(
test_l3_router_appliance_plugin.L3RouterApplianceTestCaseBase,
test_l3.L3NatTestCaseMixin):
"""GBP-specific workflow testing of ACI VLAN driver
This tests the GBP-specific workflow for the ACI VLAN Trunking
Plugging driver.
"""
# we use router types defined in .ini file.
configure_routertypes = False
router_type = 'ASR1k_Neutron_router'
def setUp(self):
super(TestAciVLANTrunkingPlugDriverGbp, self).setUp(
create_mgmt_nw=False)
# save possible test_lib.test_config 'config_files' dict entry so we
# can restore it after tests since we will change its value
self._old_config_files = copy.copy(test_lib.test_config.get(
'config_files'))
# include config files for device manager service plugin and router
# service plugin since we define a number of hosting device templates,
# hosting devices and routertypes there
self._add_device_manager_plugin_ini_file()
self._add_router_plugin_ini_file()
#TODO(bobmel): Fix bug in test_extensions.py and we can remove the
# below call to setup_config()
self.setup_config()
self.mock_gbp_driver = mock.MagicMock()
self.mock_gbp_plugin = mock.MagicMock()
self.mock_gbp_plugin.policy_driver_manager.policy_drivers = {
'apic': self.mock_gbp_driver}
g_p_mock = mock.MagicMock()
plugins = {'CORE': self.core_plugin,
'GROUP_POLICY': self.mock_gbp_plugin,
bc.constants.L3: self.l3_plugin,
cisco_constants.DEVICE_MANAGER: self.core_plugin}
g_p_mock.side_effect = lambda svc='CORE': plugins.get(svc)
mock.patch('networking_cisco.backwards_compatibility.get_plugin',
g_p_mock).start()
plug = aci_vlan.AciVLANTrunkingPlugDriver()
plug.apic_driver.gbp_plugin.get_l3p_id_from_router_id = mock.Mock(
return_value='somerouterid')
plug.apic_driver.l3out_vlan_alloc.get_vlan_allocated = self._stub_vlan
self.plugging_driver = plug
self.vlan_dict = {'net1': APIC_VLAN1,
'net2': APIC_VLAN2,
'Datacenter-Out': APIC_VLAN2}
def tearDown(self):
if self._old_config_files is None:
test_lib.test_config.pop('config_files', None)
else:
test_lib.test_config['config_files'] = self._old_config_files
# manager.NeutronManager.get_service_plugins = self._real_get_plugins
super(TestAciVLANTrunkingPlugDriverGbp, self).tearDown()
def _stub_vlan(self, net, vrf, vrf_tenant):
return self.vlan_dict.get(net)
def _gen_ext_net_name(self, name):
return aci_vlan.APIC_OWNED + _uuid() + "-" + name
def _set_apic_driver_mocks(self, router):
apic_driver = self.plugging_driver.apic_driver
apic_driver.gbp_plugin.get_l3p_id_from_router_id = mock.Mock(
return_value=router['id'])
apic_driver.get_vrf_details = mock.Mock(
return_value={'l3_policy_id': router['id']})
def _verify_vrf(self, vrf_id, router):
self.assertEqual(router['id'], vrf_id)
def test_extend_hosting_port_info_adds_segmentation_id_internal(self):
with self.network(name=self._gen_ext_net_name(
'Datacenter-Out')) as ext_network:
ext_net = ext_network['network']
ext_net_id = ext_net['id']
self._set_net_external(ext_net_id)
with self.subnet() as subnet1:
sn1 = subnet1['subnet']
gw_info = {'network_id': ext_net_id}
with self.router(external_gateway_info=gw_info,
tenant_id=sn1['tenant_id']) as router1:
r1 = router1['router']
hosting_info = {}
fake_port_db_obj = FakePortDb('fakeuuid',
sn1['network_id'],
bc.constants.DEVICE_OWNER_ROUTER_INTF, r1['id'])
fake_port_db_obj.hosting_info['segmentation_id'] = 50
hosting_device = {'id':
'00000000-0000-0000-0000-000000000002'}
tenant_id = 'tenant_uuid1'
ctx = bc.context.Context('', tenant_id, is_admin=True)
self._set_apic_driver_mocks(r1)
self.plugging_driver.extend_hosting_port_info(ctx,
fake_port_db_obj, hosting_device, hosting_info)
self.assertEqual('GigabitEthernet/1/0/1',
hosting_info['physical_interface'])
self.assertEqual(50, hosting_info['segmentation_id'])
self.assertIsNone(hosting_info.get('vrf_id'))
def test_extend_hosting_port_info_adds_segmentation_id_external(self):
with self.network(name=self._gen_ext_net_name(
'Datacenter-Out')) as ext_network:
ext_net = ext_network['network']
ext_net_id = ext_net['id']
self._set_net_external(ext_net_id)
with self.subnet(network=ext_network) as subnet1:
sn1 = subnet1['subnet']
hosting_info = {}
gw_info = {'network_id': ext_net_id}
with self.router(external_gateway_info=gw_info,
tenant_id=sn1['tenant_id']) as router1:
r1 = router1['router']
fake_port_db_obj = FakePortDb('fakeuuid', ext_net_id,
bc.constants.DEVICE_OWNER_ROUTER_GW, r1['id'])
fake_port_db_obj.hosting_info['segmentation_id'] = 40
hosting_device = {'id':
'00000000-0000-0000-0000-000000000002'}
tenant_id = 'tenant_uuid1'
ctx = bc.context.Context('', tenant_id, is_admin=True)
self._set_apic_driver_mocks(r1)
self.plugging_driver.extend_hosting_port_info(ctx,
fake_port_db_obj, hosting_device, hosting_info)
self.assertEqual('GigabitEthernet/2/0/1',
hosting_info['physical_interface'])
self.assertEqual(40, hosting_info['segmentation_id'])
self._verify_vrf(hosting_info['vrf_id'], r1)
# Had to create this b/c the helper won't let you set the name
def _create_subnet_with_name(self, net_id, cidr, name):
data = {'subnet': {'network_id': net_id,
'cidr': cidr,
'name': name,
'ip_version': 4,
'tenant_id': self._tenant_id}}
subnet_req = self.new_create_request('subnets', data, self.fmt)
subnet_res = subnet_req.get_response(self.api)
# Things can go wrong - raise HTTP exc with res code only
# so it can be caught by unit tests
if subnet_res.status_int >= webob.exc.HTTPClientError.code:
raise webob.exc.HTTPClientError(code=subnet_res.status_int)
return self.deserialize(self.fmt, subnet_res)
def test_extend_hosting_port_info_adds_snat_subnets(self):
TEST_NET_NAME = 'Datacenter-Out'
FAKE_IP = '1.1.1.2'
FAKE_GW = '1.1.1.1'
self.plugging_driver.apic_driver.get_snat_ip_for_vrf = mock.Mock(
return_value={'external_segment_name': TEST_NET_NAME,
'host_snat_ip': FAKE_IP,
'gateway_ip': FAKE_GW,
'prefixlen': 24})
with self.network(name=self._gen_ext_net_name(
TEST_NET_NAME)) as network:
net = network['network']
subnet = self._create_subnet_with_name(net['id'],
'10.0.0.0/24',
aci_vlan.APIC_SNAT_SUBNET)
sn1 = subnet['subnet']
ext_net_id = sn1['network_id']
self._set_net_external(ext_net_id)
gw_info = {'network_id': ext_net_id}
with self.router(external_gateway_info=gw_info,
tenant_id=sn1['tenant_id']) as router1:
r1 = router1['router']
hosting_info = {}
fake_port_db_obj = FakePortDb('fakeuuid', sn1['network_id'],
bc.constants.DEVICE_OWNER_ROUTER_GW, r1['id'])
fake_port_db_obj.hosting_info['segmentation_id'] = 40
hosting_device = {'id': '00000000-0000-0000-0000-000000000002'}
tenant_id = 'tenant_uuid1'
ctx = bc.context.Context('', tenant_id, is_admin=True)
self._set_apic_driver_mocks(r1)
self.plugging_driver.extend_hosting_port_info(ctx,
fake_port_db_obj, hosting_device, hosting_info)
self.assertEqual([{'id': r1['tenant_id'],
'ip': FAKE_IP,
'cidr': sn1['cidr']}],
hosting_info['snat_subnets'])
def test_extend_hosting_port_info_adds_interface_configuration(self):
TEST_INFO_CONFIG_LIST = ['testinfo1', 'testinfo2', 'testinfo3']
self.plugging_driver._default_ext_dict = {
'gateway_ip': '1.103.2.1',
'cidr_exposed': '1.103.2.0/24',
'interface_config': TEST_INFO_CONFIG_LIST
}
with self.network(name=self._gen_ext_net_name(
'Datacenter-Out')) as network1:
with self.subnet(network=network1) as subnet1:
sn1 = subnet1['subnet']
ext_net_id = sn1['network_id']
self._set_net_external(ext_net_id)
gw_info = {'network_id': ext_net_id}
with self.router(external_gateway_info=gw_info,
tenant_id=sn1['tenant_id']) as router1:
r1 = router1['router']
hosting_info = {}
fake_port_db_obj = FakePortDb('fakeuuid',
sn1['network_id'],
bc.constants.DEVICE_OWNER_ROUTER_INTF,
r1['id'])
hosting_device = {'id':
'00000000-0000-0000-0000-000000000002'}
tenant_id = 'tenant_uuid1'
ctx = bc.context.Context('', tenant_id, is_admin=True)
self._set_apic_driver_mocks(r1)
self.plugging_driver.extend_hosting_port_info(ctx,
fake_port_db_obj, hosting_device, hosting_info)
self.assertIsNotNone(hosting_info.get('interface_config'))
for config in hosting_info['interface_config']:
self.assertIn(config, TEST_INFO_CONFIG_LIST)
def test_extend_hosting_port_info_adds_global_configuration(self):
TEST_INFO_CONFIG_LIST = ['testinfo1', 'testinfo2', 'testinfo3']
self.plugging_driver._default_ext_dict = {
'gateway_ip': '1.103.2.1',
'cidr_exposed': '1.103.2.0/24',
'global_config': TEST_INFO_CONFIG_LIST
}
dummy_router = {'id': 'someuuid',
'tenant_id': 'sometenantid',
ROUTER_ROLE_ATTR: None}
self.plugging_driver.l3_plugin.get_router = mock.Mock(
return_value=dummy_router)
with self.network(name=self._gen_ext_net_name(
'Datacenter-Out')) as network1:
with self.subnet(network=network1) as subnet1:
sn1 = subnet1['subnet']
ext_net_id = sn1['network_id']
self._set_net_external(ext_net_id)
hosting_info = {}
fake_port_db_obj = FakePortDb('fakeuuid',
sn1['network_id'],
bc.constants.DEVICE_OWNER_ROUTER_GW,
dummy_router['id'])
hosting_device = {'id':
'00000000-0000-0000-0000-000000000002'}
tenant_id = 'tenant_uuid1'
ctx = bc.context.Context('', tenant_id, is_admin=True)
self._set_apic_driver_mocks(dummy_router)
self.plugging_driver.extend_hosting_port_info(ctx,
fake_port_db_obj, hosting_device, hosting_info)
self.assertIsNotNone(hosting_info.get('global_config'))
for config in hosting_info['global_config']:
self.assertIn(config, TEST_INFO_CONFIG_LIST)
def _update_provider_net_info(self, res_list, fields):
for res in res_list:
pv_info = self._pv_info['vlan'].get(res['id'])
if pv_info is None:
pv_info = self._pv_info['vxlan'].get(res['id'])
if pv_info is None:
nw_type = self._network_type
if not self._pv_info[nw_type]:
tag = {'vlan': 11, 'vxlan': 7000}[nw_type]
pv_info = {'nw_type': nw_type, 'tag': tag}
self._pv_info[nw_type][res['id']] = pv_info
if pv_info is None:
tag = max([i['tag']
for i in self._pv_info[nw_type].values()]) + 1
pv_info = {'nw_type': nw_type, 'tag': tag}
self._pv_info[nw_type][res['id']] = pv_info
res[bc.provider_net.NETWORK_TYPE] = pv_info['nw_type']
res[bc.provider_net.SEGMENTATION_ID] = pv_info['tag']
if fields is not None:
for attr in list(res):
if attr not in fields:
del res[attr]
def _mocked_get_network(self, context, id, fields=None):
res = self.real_get_network(context, id)
self._update_provider_net_info([res], fields)
return res
def _mocked_get_networks(self, *args, **kwargs):
if len(args) >= 3:
fields = args[2]
list_args = [i for i in args]
list_args[2] = None
args = list_args
else:
fields = kwargs.pop('fields', None)
res_list = self.real_get_networks(*args, **kwargs)
self._update_provider_net_info(res_list, fields)
return res_list
def _test_allocate_hosting_port(self, test_info1):
def _validate_allocation(plugin, ctx, r, port_db, test_info,
i, hd, plugging_driver):
binding_db = plugin._allocate_hosting_port(
ctx, r['id'], port_db, hd['id'], plugging_driver)
self.assertIsNotNone(binding_db.hosting_port_id)
self.assertEqual(port_db.id, binding_db.hosting_port_id)
self.assertEqual(test_info['vlan_tags'][i],
binding_db.segmentation_id)
with self.network(name=self._gen_ext_net_name(
'Datacenter-Out')) as ext_network:
ext_net = ext_network['network']
ext_net_id = ext_net['id']
self._set_net_external(ext_net_id)
with self.subnet(network=ext_network) as subnet1:
sn1 = subnet1['subnet']
gw_info = {'network_id': ext_net_id}
with self.router(external_gateway_info=gw_info,
tenant_id=sn1['tenant_id']) as router1:
r1 = router1['router']
hds = self._list('hosting_devices')['hosting_devices']
hd = hds[0]
self._pv_info = {'vlan': {}, 'vxlan': {}}
self._network_type = test_info1['network_types'][0]
self.real_get_network = self.core_plugin.get_network
self.real_get_networks = self.core_plugin.get_networks
self._set_apic_driver_mocks(r1)
with mock.patch.object(self.core_plugin,
'get_network') as m1,\
mock.patch.object(self.core_plugin,
'get_networks') as m2:
m1.side_effect = self._mocked_get_network
m2.side_effect = self._mocked_get_networks
u1_ctx = bc.context.Context('', r1['tenant_id'],
is_admin=True)
gw_port_db = self.core_plugin._get_ports_query(
u1_ctx, filters={'network_id': [ext_net_id]}).one()
_validate_allocation(
self.l3_plugin, u1_ctx, r1, gw_port_db,
test_info1, 0, hd, self.plugging_driver)
for i in range(1, len(test_info1['network_types'])):
cidr = '1.0.' + str(i) + '.0/24'
with self.subnet(cidr=cidr) as subnet2:
sn2 = subnet2['subnet']
itfc_info = self._router_interface_action(
'add', r1['id'], sn2['id'], None)
self._network_type = test_info1[
'network_types'][i]
port_db = self.core_plugin._get_port(
u1_ctx, itfc_info['port_id'])
_validate_allocation(
self.l3_plugin, u1_ctx, r1,
port_db, test_info1,
i, hd, self.plugging_driver)
def test_allocate_hosting_port_vlan_network_all_unused(self):
self._test_allocate_hosting_port({'network_types': ['vlan'],
'vlan_tags': [APIC_VLAN1]})
def test_allocate_hosting_port_vlan_network_vlan_already_allocated(self):
self._test_allocate_hosting_port(
{'network_types': ['vlan', 'vlan'],
'vlan_tags': [APIC_VLAN1, APIC_VLAN2]})
def test_allocate_hosting_port_vlan_network_not_found_failure(self):
with self.network(name=self._gen_ext_net_name(
'Datacenter-Out')) as ext_network:
ext_net = ext_network['network']
ext_net_id = ext_net['id']
self._set_net_external(ext_net_id)
with self.subnet() as subnet1:
sn1 = subnet1['subnet']
gw_info = {'network_id': ext_net_id}
with self.router(external_gateway_info=gw_info,
tenant_id=sn1['tenant_id']) as router1:
r1 = router1['router']
u_ctx = bc.context.Context(
'', r1['tenant_id'], is_admin=True)
gw_port_db = self.core_plugin._get_ports_query(
u_ctx, filters={'network_id': [ext_net_id]}).one()
self._set_apic_driver_mocks(r1)
allocations = self.plugging_driver.allocate_hosting_port(
u_ctx, r1['id'], gw_port_db,
'vlan', 'non_existant_uuid')
self.assertIsNone(allocations)
def test_allocate_hosting_port_info_adds_segment_id(self):
self.plugging_driver._default_ext_dict = {
'gateway_ip': '1.103.2.254',
'cidr_exposed': '1.103.2.1/24',
'interface_config': 'testinfo1',
'segmentation_id': 3003
}
with self.network(name=self._gen_ext_net_name(
'Datacenter-Out')) as network1:
net1 = network1['network']
self._set_net_external(net1['id'])
net1['provider:network_type'] = 'opflex'
def _return_mocked_net(self, args):
return net1
with self.subnet(network=network1) as subnet1:
sn1 = subnet1['subnet']
fake_port_db_obj = FakePortDb(
'some_dummy_id',
sn1['network_id'],
bc.constants.DEVICE_OWNER_ROUTER_GW,
'dummy_id'
)
hosting_device = {'id': '00000000-0000-0000-0000-000000000002'}
tenant_id = 'tenant_uuid1'
dummy_rid = 'dummy_router_id'
ctx = bc.context.Context('', tenant_id, is_admin=True)
with mock.patch.object(self.core_plugin, 'get_network') as m1:
m1.side_effect = _return_mocked_net
allocations = self.plugging_driver.allocate_hosting_port(
ctx, dummy_rid, fake_port_db_obj,
'opflex', hosting_device['id'])
self.assertEqual(3003, allocations['allocated_vlan'])
def test_allocate_hosting_port_info_exception(self):
self.plugging_driver._default_ext_dict = {
'gateway_ip': '1.103.2.254',
'cidr_exposed': '1.103.2.1/24',
'interface_config': 'testinfo1',
}
with self.network(name=self._gen_ext_net_name(
'Datacenter-Out')) as network1:
net1 = network1['network']
self._set_net_external(net1['id'])
net1['provider:network_type'] = 'opflex'
def _return_mocked_net(self, args):
return net1
with self.subnet(network=network1) as subnet1:
sn1 = subnet1['subnet']
fake_port_db_obj = FakePortDb(
'some_dummy_id',
sn1['network_id'],
bc.constants.DEVICE_OWNER_ROUTER_GW,
'dummy_id'
)
hosting_device = {'id': '00000000-0000-0000-0000-000000000002'}
tenant_id = 'tenant_uuid1'
dummy_rid = 'dummy_router_id'
ctx = bc.context.Context('', tenant_id, is_admin=True)
with mock.patch.object(self.core_plugin, 'get_network') as m1:
m1.side_effect = _return_mocked_net
self.assertRaises(
aci_vlan.AciDriverConfigMissingSegmentationId,
self.plugging_driver.allocate_hosting_port,
ctx, dummy_rid, fake_port_db_obj,
'opflex', hosting_device['id'])
class TestAciVLANTrunkingPlugDriverNeutron(TestAciVLANTrunkingPlugDriverGbp):
"""Neutron-specific workflow testing of ACI VLAN driver
This tests the Neutron-specific workflow for the ACI VLAN Trunking
Plugging driver.
"""
# we use router types defined in .ini file.
configure_routertypes = False
router_type = 'ASR1k_Neutron_router'
def setUp(self):
super(TestAciVLANTrunkingPlugDriverGbp, self).setUp(
create_mgmt_nw=False)
# save possible test_lib.test_config 'config_files' dict entry so we
# can restore it after tests since we will change its value
self._old_config_files = copy.copy(test_lib.test_config.get(
'config_files'))
# include config files for device manager service plugin and router
# service plugin since we define a number of hosting device templates,
# hosting devices and routertypes there
self._add_device_manager_plugin_ini_file()
self._add_router_plugin_ini_file()
#TODO(bobmel): Fix bug in test_extensions.py and we can remove the
# below call to setup_config()
self.setup_config()
self.l3_plugin._core_plugin.mechanism_manager = mock.MagicMock()
plug = aci_vlan.AciVLANTrunkingPlugDriver()
plug.apic_driver.l3out_vlan_alloc.get_vlan_allocated = self._stub_vlan
plug.apic_driver.per_tenant_context = True
self.plugging_driver = plug
self.vlan_dict = {'net1': APIC_VLAN1,
'net2': APIC_VLAN2,
'Datacenter-Out': APIC_VLAN2}
def tearDown(self):
if self._old_config_files is None:
test_lib.test_config.pop('config_files', None)
else:
test_lib.test_config['config_files'] = self._old_config_files
super(TestAciVLANTrunkingPlugDriverGbp, self).tearDown()
def _gen_ext_net_name(self, name):
return name
def _set_apic_driver_mocks(self, router):
apic_driver = self.plugging_driver.apic_driver
apic_driver.get_router_vrf_and_tenant = mock.Mock(
return_value={'aci_name': router['id'],
'aci_tenant': router['tenant_id']})
def _verify_vrf(self, vrf_id, router):
if self.plugging_driver.apic_driver.per_tenant_context:
self.assertEqual(router['tenant_id'], vrf_id)
else:
self.assertEqual(router['id'], vrf_id)
def test_extend_hosting_port_info_adds_snat_subnets(self):
TEST_NET_NAME = 'Datacenter-Out'
FAKE_IP = '1.1.1.2'
FAKE_GW = '1.1.1.1'
self.plugging_driver.apic_driver.get_snat_ip_for_vrf = mock.Mock(
return_value={'external_segment_name': TEST_NET_NAME,
'host_snat_ip': FAKE_IP,
'gateway_ip': FAKE_GW,
'prefixlen': 24})
with self.network(name=self._gen_ext_net_name(
TEST_NET_NAME)) as network:
ext_net = network['network']
ext_net_id = ext_net['id']
self._set_net_external(ext_net_id)
with self.network(name=(aci_vlan.APIC_SNAT_NET + '-' +
ext_net_id)) as snat_net:
net = snat_net['network']
subnet = self._create_subnet_with_name(
net['id'], '10.0.0.0/24', aci_vlan.APIC_SNAT_SUBNET)
sn1 = subnet['subnet']
gw_info = {'network_id': ext_net_id}
with self.router(external_gateway_info=gw_info,
tenant_id=sn1['tenant_id']) as router1:
r1 = router1['router']
hosting_info = {}
fake_port_db_obj = FakePortDb('fakeuuid', ext_net_id,
bc.constants.DEVICE_OWNER_ROUTER_GW, r1['id'])
fake_port_db_obj.hosting_info['segmentation_id'] = 40
hosting_device = {'id':
'00000000-0000-0000-0000-000000000002'}
tenant_id = 'tenant_uuid1'
ctx = bc.context.Context('', tenant_id, is_admin=True)
self._set_apic_driver_mocks(r1)
self.plugging_driver.extend_hosting_port_info(ctx,
fake_port_db_obj, hosting_device, hosting_info)
self.assertEqual([{'id': r1['tenant_id'],
'ip': FAKE_IP,
'cidr': sn1['cidr']}],
hosting_info['snat_subnets'])
def test_extend_hosting_port_info_no_snat_subnets_1(self):
TEST_NET_NAME = 'Datacenter-Out'
FAKE_IP = '1.1.1.2'
FAKE_GW = '1.1.1.1'
self.plugging_driver.apic_driver.get_snat_ip_for_vrf = mock.Mock(
return_value={'external_segment_name': TEST_NET_NAME,
'host_snat_ip': FAKE_IP,
'gateway_ip': FAKE_GW,
'prefixlen': 24})
with self.network(name=self._gen_ext_net_name(
TEST_NET_NAME)) as network:
ext_net = network['network']
ext_net_id = ext_net['id']
self._set_net_external(ext_net_id)
with self.network() as snat_net:
net = snat_net['network']
subnet = self._create_subnet_with_name(
net['id'], '10.0.0.0/24', aci_vlan.APIC_SNAT_SUBNET)
sn1 = subnet['subnet']
gw_info = {'network_id': ext_net_id}
with self.router(external_gateway_info=gw_info,
tenant_id=sn1['tenant_id']) as router1:
r1 = router1['router']
hosting_info = {}
fake_port_db_obj = FakePortDb('fakeuuid', ext_net_id,
bc.constants.DEVICE_OWNER_ROUTER_GW, r1['id'])
fake_port_db_obj.hosting_info['segmentation_id'] = 40
hosting_device = {'id':
'00000000-0000-0000-0000-000000000002'}
tenant_id = 'tenant_uuid1'
ctx = bc.context.Context('', tenant_id, is_admin=True)
self._set_apic_driver_mocks(r1)
self.plugging_driver.extend_hosting_port_info(ctx,
fake_port_db_obj, hosting_device, hosting_info)
self.assertEqual([], hosting_info['snat_subnets'])
def test_extend_hosting_port_info_no_snat_subnets_2(self):
TEST_NET_NAME = 'Datacenter-Out'
FAKE_IP = '1.1.1.2'
FAKE_GW = '1.1.1.1'
self.plugging_driver.apic_driver.get_snat_ip_for_vrf = mock.Mock(
return_value={'external_segment_name': TEST_NET_NAME,
'host_snat_ip': FAKE_IP,
'gateway_ip': FAKE_GW,
'prefixlen': 24})
with self.network(name=self._gen_ext_net_name(
TEST_NET_NAME)) as ext_network:
ext_net = ext_network['network']
ext_net_id = ext_net['id']
self._set_net_external(ext_net_id)
with self.subnet(network=ext_network) as subnet1:
sn1 = subnet1['subnet']
gw_info = {'network_id': ext_net_id}
with self.router(external_gateway_info=gw_info,
tenant_id=sn1['tenant_id']) as router1:
r1 = router1['router']
hosting_info = {}
fake_port_db_obj = FakePortDb('fakeuuid', ext_net_id,
bc.constants.DEVICE_OWNER_ROUTER_GW, r1['id'])
fake_port_db_obj.hosting_info['segmentation_id'] = 40
hosting_device = {'id':
'00000000-0000-0000-0000-000000000002'}
tenant_id = 'tenant_uuid1'
ctx = bc.context.Context('', tenant_id, is_admin=True)
self._set_apic_driver_mocks(r1)
self.plugging_driver.extend_hosting_port_info(ctx,
fake_port_db_obj, hosting_device, hosting_info)
self.assertIsNone(hosting_info.get('snat_subnets'))
def test_extend_hosting_port_adds_segmentation_id_external_1_vrf(self):
self.plugging_driver.apic_driver.per_tenant_context = False
with self.network(name=self._gen_ext_net_name(
'Datacenter-Out')) as ext_network:
ext_net = ext_network['network']
ext_net_id = ext_net['id']
self._set_net_external(ext_net_id)
with self.subnet(network=ext_network) as subnet1:
sn1 = subnet1['subnet']
hosting_info = {}
gw_info = {'network_id': ext_net_id}
with self.router(external_gateway_info=gw_info,
tenant_id=sn1['tenant_id']) as router1:
r1 = router1['router']
fake_port_db_obj = FakePortDb('fakeuuid', ext_net_id,
bc.constants.DEVICE_OWNER_ROUTER_GW, r1['id'])
fake_port_db_obj.hosting_info['segmentation_id'] = 40
hosting_device = {'id':
'00000000-0000-0000-0000-000000000002'}
tenant_id = 'tenant_uuid1'
ctx = bc.context.Context('', tenant_id, is_admin=True)
self._set_apic_driver_mocks(r1)
self.plugging_driver.extend_hosting_port_info(ctx,
fake_port_db_obj, hosting_device, hosting_info)
self.assertEqual('GigabitEthernet/2/0/1',
hosting_info['physical_interface'])
self.assertEqual(40, hosting_info['segmentation_id'])
self._verify_vrf(hosting_info['vrf_id'], r1)
def test_external_net_name(self):
self.assertIsNotNone(self.plugging_driver.get_ext_net_name)
def test_external_net_no_gw(self):
class DummyPort(object):
def __init__(self, router_id):
self.device_id = router_id
self.device_owner = None
drv = self.plugging_driver
tenant_id = 'some_tenant_id'
ctx = bc.context.Context('', tenant_id, is_admin=True)
with self.router() as router1:
r1 = router1['router']
dummy_port = DummyPort(r1['id'])
net_dict, net = drv._get_external_network_dict(ctx, dummy_port)
self.assertIsNone(net)
self.assertEqual({}, net_dict)
def test_allocate_hosting_port_no_router(self):
drv = self.plugging_driver
tenant_id = 'some_tenant_id'
ctx = bc.context.Context('', tenant_id, is_admin=True)
with self.port() as port1:
p1 = port1['port']
self.assertIsNone(drv.allocate_hosting_port(ctx,
None, p1, None, None))
def test_allocate_hosting_port_router_no_gw(self):
drv = self.plugging_driver
tenant_id = 'some_tenant_id'
ctx = bc.context.Context('', tenant_id, is_admin=True)
with self.port() as port1:
p1 = port1['port']
with self.router() as router1:
r1 = router1['router']
p1['device_owner'] = bc.constants.DEVICE_OWNER_ROUTER_INTF
self.assertIsNone(drv.allocate_hosting_port(ctx,
r1['id'], p1, None, None))
|
py | 1a360e3c1a9817baf48fea878f8bbeb0724d5f58 | import random
import logging
from nn.utils import timer
from crypto.utils import load_dlog_table_config
from crypto.sife_dynamic import SIFEDynamic
from crypto.sife_dynamic import SIFEDynamicTPA
from crypto.sife_dynamic import SIFEDynamicClient
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
sec_param_config_file = 'config/sec_param.json'
dlog_table_config_file = 'config/dlog_b8.json'
def test_sife_basic():
logger.info("testing the correctness of basic sife.")
eta = 5
max_test_value = 100
x = [random.randint(0, max_test_value) for i in range(eta)]
y = [random.randint(0, max_test_value) for i in range(eta)]
logger.debug("x: %s" % str(x))
logger.debug("y: %s" % str(y))
check_prod = sum(map(lambda i: x[i] * y[i], range(eta)))
logger.debug('original dot product <x,y>: %d' % check_prod)
sife = SIFEDynamic(eta, sec_param=256)
sife.setup()
pk = sife.generate_public_key(len(x))
ct = sife.encrypt(pk, x)
sk = sife.generate_private_key(y)
max_interprod = max_test_value * max_test_value * eta
with timer('total decryption time:', logger) as t:
dec_prod = sife.decrypt(pk, sk, y, ct, max_interprod)
logger.debug('decrypted dot product <x,y>: %d' % dec_prod)
def test_sife_basic_with_config():
logger.info("testing the correctness of sife using config file.")
eta = 785
# prepare the test data
max_test_value = 10
x = [random.randint(0, max_test_value) for i in range(eta)]
y = [random.randint(0, max_test_value) for i in range(eta)]
logger.debug("x: %s" % str(x))
logger.debug("y: %s" % str(y))
check_prod = sum(map(lambda i: x[i] * y[i], range(eta)))
logger.debug('original dot product <x,y>: %d' % check_prod)
logger.info('loading dlog configuration ...')
with timer('load dlog config, cost time:', logger) as t:
dlog = load_dlog_table_config(dlog_table_config_file)
logger.info('load dlog configuration DONE')
sife = SIFEDynamic(eta, sec_param=256,
sec_param_config=sec_param_config_file, dlog=dlog)
sife.setup()
pk = sife.generate_public_key(len(x))
ct = sife.encrypt(pk, x)
sk = sife.generate_private_key(y)
max_interprod = max_test_value * max_test_value * eta
with timer('total decryption time:', logger) as t:
dec_prod = sife.decrypt(pk, sk, y, ct, max_interprod)
logger.debug('decrypted dot product <x,y>: %d' % dec_prod)
def test_sife_dynamic():
logger.info('test dynamic sife in separate roles ...')
eta = 1000
sec_param = 256
max_test_value = 100
x = [random.randint(0, max_test_value) for i in range(eta)]
y = [random.randint(0, max_test_value) for i in range(eta)]
logger.debug("x: %s" % str(x))
logger.debug("y: %s" % str(y))
check_prod = sum(map(lambda i: x[i] * y[i], range(eta)))
logger.debug('original dot product <x,y>: %d' % check_prod)
logger.info('loading dlog configuration ...')
with timer('load dlog config, cost time:', logger) as t:
dlog = load_dlog_table_config(dlog_table_config_file)
logger.info('load dlog configuration DONE')
sife_tpa = SIFEDynamicTPA(eta, sec_param=sec_param, sec_param_config=sec_param_config_file)
sife_tpa.setup()
sife_enc_client = SIFEDynamicClient(role='enc')
sife_dec_client = SIFEDynamicClient(role='dec', dlog=dlog)
pk = sife_tpa.generate_public_key(len(x))
ct = sife_enc_client.encrypt(pk, x)
sk = sife_tpa.generate_private_key(y)
max_interprod = max_test_value * max_test_value * eta
with timer('total decryption time:', logger) as t:
dec_prod = sife_dec_client.decrypt(pk, sk, y, ct, max_interprod)
logger.debug('decrypted dot product <x,y>: %d' % dec_prod) |
py | 1a360ed8df827b29df633d81e3c44eb977c1373a | import os
import re
import yaml
from os.path import join as pjoin
def find_test_file(filename, module=None):
"""Looks for a test case or related file in the following order:
- test_cases/module/filename (if module)
- test_cases/module/filename.yml (if module)
- test_cases/filename
- test_cases/filename/filename
- test_cases/filename/filename.yml
"""
# keep track of all paths attempted, for debugging
tried = []
if module:
# try joining all args
path = pjoin('test_cases', module, filename)
tried.append(path)
# try joining all args + .yml
if os.path.isfile(path):
return path
else:
path += '.yml'
tried.append(path)
if os.path.isfile(path):
return path
# try omitting module
path = pjoin('test_cases', filename)
tried.append(path)
# one of the above should at least be a file or directory
if not os.path.exists(path):
raise FileNotFoundError("No such file or directory: " + repr(tried))
# try getting default file for this directory
if os.path.isdir(path):
path = pjoin(path, os.path.basename(path))
tried.append(path)
if os.path.isfile(path):
return path
else:
path += '.yml'
tried.append(path)
if not os.path.isfile(path):
raise FileNotFoundError("No such file: " + repr(tried))
return path
def setup_custom_options(test_case, module):
test_case = setup_test_inheritance(test_case, module)
map_filename = test_case.get('dict')
if map_filename:
map_filename = find_test_file(map_filename, module=module)
opt_map = read_yaml(map_filename)
for opt, settings in opt_map.items():
if opt in test_case:
value = str(test_case[opt])
pattern = r'\b' + opt + r'\b'
step = settings.get('step')
assert step, "Error: 'step' must be defined for custom options"
step = int(step) - 1
test_step = test_case['steps'][step]
presteps = settings.get('presteps')
if presteps:
for ind, step in enumerate(presteps):
presteps[ind] = re.sub(pattern, value, step)
test_presteps = test_step.setdefault('presteps', [])
test_presteps += presteps
elems = settings.get('elems')
if elems:
for ind, elem in enumerate(elems):
for elem_name, elem_value in elem.items():
elems[ind][elem_name] = re.sub(pattern, value, elem_value)
test_elems = test_step.setdefault('elems', [])
test_elems += elems
poststeps = settings.get('poststeps')
if poststeps:
for ind, step in enumerate(poststeps):
poststeps[ind] = re.sub(pattern, value, step)
test_poststeps = test_step.setdefault('poststeps', [])
test_poststeps += poststeps
# look for "module" in each step
# can't use for loop b/c iteration is nonlinear
ind = 0
while ind < len(test_case['steps']):
step = test_case['steps'][ind]
if 'module' in step:
module_info = step['module']
module_name = module_info['name']
module_template = find_test_file(module_name, module=module_name)
test_template = read_yaml(module_template)
# inherit options from test_template
test_copy = test_case.copy()
# default to test_copy's options except for steps/dict/parent
del test_copy['steps']
del test_copy['dict']
del test_copy['parent']
test_template.update(test_copy)
test_template = test_copy
# generate sub-case as though template were the main case
test_template = setup_custom_options(test_template, module=module_name)
# obtain user's desired slice of module's steps
index = module_info.get('index', None)
if index == None:
start = module_info.get('start', None)
stop = module_info.get('stop', None)
step_slice = slice(start, stop)
module_steps = test_template['steps'][step_slice]
else:
module_steps = [test_template['steps'][index]]
# replace module entry with steps
before = test_case['steps'][:ind]
after = test_case['steps'][ind+1:]
test_template['steps'] = before + module_steps + after
test_case = test_template
ind += len(module_steps)
else:
ind += 1
return test_case
def setup_test_inheritance(child_case, module):
if not 'parent' in child_case:
child_case['parent'] = module
lineage = [child_case]
filenames = [None]
parent = child_case.get('parent', module)
while parent != False:
# defaults to module name
if parent == None:
parent = module
else:
parent = parent
# break if module has itself as parent
parent = find_test_file(parent, module=module)
if parent == filenames[-1]:
break
with open(parent) as parent_file:
parent_case = yaml.load(parent_file.read(), Loader=yaml.FullLoader)
lineage.append(parent_case)
if parent in filenames:
filenames.append(parent)
errmsg = "Multiple/circular inheritance not allowed; got: "
errmsg += repr(filenames)
raise NotImplementedError(errmsg)
filenames.append(parent)
child_case = parent_case
parent = child_case.get('parent', module)
parent_case = lineage.pop()
while lineage:
child_case = lineage.pop()
parent_case.update(child_case)
return parent_case
def read_yaml(filename):
with open(filename) as fh:
return yaml.full_load(fh.read())
|
py | 1a361037882d34221b673f96338420bdda85bba6 | def findDecision(obj): #obj[0]: Passanger, obj[1]: Weather, obj[2]: Time, obj[3]: Coupon, obj[4]: Coupon_validity, obj[5]: Gender, obj[6]: Age, obj[7]: Maritalstatus, obj[8]: Children, obj[9]: Education, obj[10]: Occupation, obj[11]: Income, obj[12]: Bar, obj[13]: Coffeehouse, obj[14]: Restaurant20to50, obj[15]: Direction_same, obj[16]: Distance
# {"feature": "Occupation", "instances": 23, "metric_value": 0.9877, "depth": 1}
if obj[10]<=9:
# {"feature": "Bar", "instances": 18, "metric_value": 0.8524, "depth": 2}
if obj[12]>0.0:
# {"feature": "Time", "instances": 10, "metric_value": 1.0, "depth": 3}
if obj[2]>0:
# {"feature": "Weather", "instances": 7, "metric_value": 0.8631, "depth": 4}
if obj[1]<=0:
# {"feature": "Age", "instances": 6, "metric_value": 0.65, "depth": 5}
if obj[6]>0:
return 'False'
elif obj[6]<=0:
return 'True'
else: return 'True'
elif obj[1]>0:
return 'True'
else: return 'True'
elif obj[2]<=0:
return 'True'
else: return 'True'
elif obj[12]<=0.0:
return 'True'
else: return 'True'
elif obj[10]>9:
return 'False'
else: return 'False'
|
py | 1a3612d9dd3ce400b3a0c1fa8848bcc07770016c | #!/usr/bin/python
import sys
import json
def tablevel(tbl):
ret = ""
if tbl < 0:
return ""
else:
for i in xrange(tbl):
ret = ret + "\t"
return ret
def funcstrmkr(func, funcname, type):
tbl = 0
funcstr = ''
# funcstr += "var "+funcname+" = function("
funcstr += "Egg.prototype." + funcname + " = function("
flg = False;
for inp in func["inputs"]:
if(flg):
funcstr += ", "
funcstr += inp["name"]
flg = True
if (flg):
funcstr += ", "
funcstr += "cb){\n"
tbl += 1
# funcstr += tablevel(tbl) + "var GC = getContract(account);\n"
funcstr += tablevel(tbl) + "if (!this.egg) return cb(new Error(\'Egg contract has not been initialized\'));\n"
funcstr += tablevel(tbl) + "egg." + funcname
if type == "get":
funcstr = funcstr + ".call("
elif type == "post":
funcstr = funcstr + ".sendTransaction("
for inp in func["inputs"]:
funcstr += inp["name"] + ", "
funcstr += "function(err, result){\n"
tbl += 1
funcstr += tablevel(tbl) + "if(err) return cb(err, null);\n"
funcstr += tablevel(tbl) + "return cb(null"
if type == "get":
funcstr += ", result.values"
else:
for oup in func["outputs"]:
funcstr += ", result.values." + oup["name"]
funcstr += ");\n"
tbl -= 1
funcstr += tablevel(tbl) + "})\n"
tbl -= 1
funcstr += "}\n\n"
return funcstr
#Read the abi
infile = sys.argv[1]
outfile = sys.argv[2]
inf = open(infile,'r')
jo = json.load(inf)
inf.close()
Magic = False
#One by One take each function of the abi and compose the rest endpoint
restfuncs = []
modstr = "\nmodule.exports = {\n"
for func in jo:
if (func["type"] == "function"):
modstr += tablevel(1) + func["name"] + ":" + func["name"] + ",\n"
if (func["constant"] == False):
restfuncs.append(funcstrmkr(func, func["name"], "post"))
else:
restfuncs.append(funcstrmkr(func, func["name"],"get"))
modstr += "}\n\n"
#Now print out to file
ouf = open(outfile,'w')
#ouf.write("//Don't forget to set the output formatter to json!\n")
#ouf.write("contract.setOutputFormatter(erisC.outputFormatter.jsonStrings)\n\n")
#ouf.write("//Restify endpoints. Copy into appropriate section\n\n")
ouf.write(modstr)
for rf in restfuncs:
ouf.write(rf)
ouf.close()
|
py | 1a361353c6f30bff0e9c46ba2bc218a8359a5e04 | from django.contrib.auth import get_user_model
from django.utils.translation import override as override_language, ugettext_lazy as _
from django_rq import job
from pragmatic.managers import EmailManager
from pragmatic.signals import apm_custom_context
from commerce import settings as commerce_settings
@job(commerce_settings.REDIS_QUEUE)
@apm_custom_context('tasks')
def notify_about_new_order(order):
for user in get_user_model().objects.active().with_perm('commerce.view_order'):
with override_language(user.preferred_language):
EmailManager.send_mail(user, 'commerce/mails/order_created', _('New order'), data={'order': order}, request=None)
order.send_details()
@job(commerce_settings.REDIS_QUEUE)
@apm_custom_context('tasks')
def notify_about_changed_order_status(order):
user = order.user
with override_language(user.preferred_language):
return EmailManager.send_mail(user, 'commerce/mails/order_status_changed', _('Status of order %d changed') % order.number, data={'order': order}, request=None)
|
py | 1a3613794256687a993c83b54936d7e04c45314a | # -*- coding: utf-8 -*-
# Copyright (c) 2010-2016, MIT Probabilistic Computing Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import tempfile
import crosscat.LocalEngine
import bayeslite
import bayeslite.core as core
from bayeslite import bql_quote_name
from bayeslite.metamodels.crosscat import CrosscatMetamodel
from bayeslite.metamodels.iid_gaussian import StdNormalMetamodel
examples = {
'crosscat': (
lambda: CrosscatMetamodel(crosscat.LocalEngine.LocalEngine(seed=0)),
't',
'CREATE TABLE t(x NUMERIC, y CYCLIC, z CATEGORICAL)',
'INSERT INTO t (x, y, z) VALUES (?, ?, ?)',
[
(0, 1.57, 'foo'),
(1.83, 3.141, 'bar'),
(1.82, 3.140, 'bar'),
(-1, 6.28, 'foo'),
],
'p',
'p_cc',
'CREATE POPULATION p FOR t'
'(x NUMERICAL; y CYCLIC; z CATEGORICAL)',
'CREATE GENERATOR p_cc FOR p USING crosscat()',
'CREATE GENERATOR p_cc FOR p USING crosscat(DEPENDENT)',
'CREATE GENERATOR p_cc FOR p USING crosscat(INDEPENDENT)',
),
'iid_gaussian': (
lambda: StdNormalMetamodel(seed=0),
't',
'CREATE TABLE t(x NUMERIC, y NUMERIC)',
'INSERT INTO t (x, y) VALUES (?, ?)',
[(0, 1), (1, float('nan')), (2, -1.2)],
'p',
'p_sn',
'CREATE POPULATION p FOR t(x NUMERICAL; y NUMERICAL)',
'CREATE GENERATOR p_sn FOR p USING std_normal()',
# XXX Should invent something that fails for
# metamodel-specific reasons here.
'CREATE GENERATOR p_sn FOR p USING std_normal ...',
'CREATE GENERATOR p_sn FOR p USING std_normal ...'
),
}
@pytest.mark.parametrize('persist,exname',
[(persist, key)
for persist in (True, False)
for key in sorted(examples.keys())])
def test_example(persist, exname):
if persist:
with tempfile.NamedTemporaryFile(prefix='bayeslite') as f:
with bayeslite.bayesdb_open(pathname=f.name,
builtin_metamodels=False) as bdb:
_test_example(bdb, exname)
with bayeslite.bayesdb_open(pathname=f.name,
builtin_metamodels=False) as bdb:
_retest_example(bdb, exname)
else:
with bayeslite.bayesdb_open(builtin_metamodels=False) as bdb:
_test_example(bdb, exname)
def _test_example(bdb, exname):
mm, t, t_sql, data_sql, data, p, g, p_bql, g_bql, g_bqlbad0, g_bqlbad1 = \
examples[exname]
qt = bql_quote_name(t)
qg = bql_quote_name(g)
bayeslite.bayesdb_register_metamodel(bdb, mm())
# Create a table.
assert not core.bayesdb_has_table(bdb, t)
with bdb.savepoint_rollback():
bdb.sql_execute(t_sql)
assert core.bayesdb_has_table(bdb, t)
assert not core.bayesdb_has_table(bdb, t)
bdb.sql_execute(t_sql)
assert core.bayesdb_has_table(bdb, t)
# Insert data into the table.
assert bdb.execute('SELECT COUNT(*) FROM %s' % (qt,)).fetchvalue() == 0
for row in data:
bdb.sql_execute(data_sql, row)
n = len(data)
assert bdb.execute('SELECT COUNT(*) FROM %s' % (qt,)).fetchvalue() == n
# Create a population.
assert not core.bayesdb_has_population(bdb, p)
bdb.execute(p_bql)
p_id = core.bayesdb_get_population(bdb, p)
# Create a generator. Make sure savepoints work for this.
assert not core.bayesdb_has_generator(bdb, p_id, g)
with pytest.raises(Exception):
with bdb.savepoint():
bdb.execute(g_bqlbad0)
assert not core.bayesdb_has_generator(bdb, p_id, g)
with pytest.raises(Exception):
with bdb.savepoint():
bdb.execute(g_bqlbad1)
assert not core.bayesdb_has_generator(bdb, p_id, g)
with bdb.savepoint_rollback():
bdb.execute(g_bql)
assert core.bayesdb_has_generator(bdb, p_id, g)
assert not core.bayesdb_has_generator(bdb, p_id, g)
bdb.execute(g_bql)
assert core.bayesdb_has_generator(bdb, p_id, g)
assert not core.bayesdb_has_generator(bdb, p_id+1, g)
with pytest.raises(Exception):
bdb.execute(g_bql)
assert core.bayesdb_has_generator(bdb, p_id, g)
gid = core.bayesdb_get_generator(bdb, p_id, g)
assert not core.bayesdb_generator_has_model(bdb, gid, 0)
assert [] == core.bayesdb_generator_modelnos(bdb, gid)
with bdb.savepoint_rollback():
bdb.execute('INITIALIZE 1 MODEL FOR %s' % (qg,))
assert core.bayesdb_generator_has_model(bdb, gid, 0)
assert [0] == core.bayesdb_generator_modelnos(bdb, gid)
with bdb.savepoint_rollback():
bdb.execute('INITIALIZE 10 MODELS FOR %s' % (qg,))
for i in range(10):
assert core.bayesdb_generator_has_model(bdb, gid, i)
assert range(10) == core.bayesdb_generator_modelnos(bdb, gid)
bdb.execute('INITIALIZE 2 MODELS FOR %s' % (qg,))
# Test dropping things.
with pytest.raises(bayeslite.BQLError):
bdb.execute('DROP TABLE %s' % (qt,))
with bdb.savepoint_rollback():
# Note that sql_execute does not protect us!
bdb.sql_execute('DROP TABLE %s' % (qt,))
assert not core.bayesdb_has_table(bdb, t)
assert core.bayesdb_has_table(bdb, t)
# XXX Should we reject dropping a generator when there remain
# models? Should we not reject dropping a table when there remain
# generators? A table can be dropped when there remain indices.
#
# with pytest.raises(bayeslite.BQLError):
# # Models remain.
# bdb.execute('DROP GENERATOR %s' % (qg,))
with bdb.savepoint_rollback():
bdb.execute('DROP GENERATOR %s' % (qg,))
assert not core.bayesdb_has_generator(bdb, None, g)
assert core.bayesdb_has_generator(bdb, p_id, g)
with bdb.savepoint_rollback():
bdb.execute('DROP GENERATOR %s' % (qg,))
assert not core.bayesdb_has_generator(bdb, None, g)
bdb.execute(g_bql)
assert core.bayesdb_has_generator(bdb, None, g)
assert core.bayesdb_has_generator(bdb, p_id, g)
assert core.bayesdb_has_generator(bdb, None, g)
assert gid == core.bayesdb_get_generator(bdb, p_id, g)
# Test dropping models.
with bdb.savepoint_rollback():
bdb.execute('DROP MODEL 1 FROM %s' % (qg,))
assert core.bayesdb_generator_has_model(bdb, gid, 0)
assert not core.bayesdb_generator_has_model(bdb, gid, 1)
assert [0] == core.bayesdb_generator_modelnos(bdb, gid)
# Test analyzing models.
bdb.execute('ANALYZE %s FOR 1 ITERATION WAIT' % (qg,))
bdb.execute('ANALYZE %s MODEL 0 FOR 1 ITERATION WAIT' % (qg,))
bdb.execute('ANALYZE %s MODEL 1 FOR 1 ITERATION WAIT' % (qg,))
def _retest_example(bdb, exname):
mm, t, t_sql, data_sql, data, p, g, p_bql, g_bql, g_bqlbad0, g_bqlbad1 = \
examples[exname]
qt = bql_quote_name(t)
qg = bql_quote_name(g)
bayeslite.bayesdb_register_metamodel(bdb, mm())
p_id = core.bayesdb_get_population(bdb, p)
assert core.bayesdb_has_table(bdb, t)
assert core.bayesdb_has_generator(bdb, p_id, g)
gid = core.bayesdb_get_generator(bdb, p_id, g)
assert core.bayesdb_generator_has_model(bdb, gid, 0)
assert core.bayesdb_generator_has_model(bdb, gid, 1)
bdb.execute('ANALYZE %s FOR 1 ITERATION WAIT' % (qg,))
bdb.execute('ANALYZE %s MODEL 0 FOR 1 ITERATION WAIT' % (qg,))
bdb.execute('ANALYZE %s MODEL 1 FOR 1 ITERATION WAIT' % (qg,))
|
py | 1a3613dff1cdfc6a420222a030f7083c34976694 | # not working, not sure why (as parts work separately
# outside of function)
# (User's) Problem
# We have:
# a string
# We need:
# is that string a paindrome? yes/no
# We must:
# boolean output
# name of function is
# checkPalindrome
# Solution (Product)
# Strategy 1:
# turn string into a list(array)
# Make a compare_list which is the reverse order of
# the original list
# compare the two, if they are the same: true, else false
def checkPalindrome(inputString):
# make input a list
input_as_list = list(inputString)
# make a reverse list
# (first make a copy)
reverse_order = input_as_list
# (this function has no input or output, it reverses in place)
reverse_order.reverse()
# compare two lists
if input_as_list == reverse_order:
return True
else:
return False
|
py | 1a3613f20e576be9023869bcf6805bf408082cb5 | import numpy as np
import cv2
import time
import random
from Markov import Get_Markov
P = Get_Markov()
TILE_SIZE = 32
OFS = 50
MARKET = """
##################
##..............##
#R..HA..ME..IB..P#
#R..HA..ME..IB..P#
#R..HA..ME..IB..P#
#Y..HA..ME..IB..P#
#Y..HA..ME..IB..P#
##...............#
##..C#..C#..C#...#
##..##..##..##...#
##...............#
##############GG##
""".strip()
class SupermarketMap:
"""Visualizes the supermarket background"""
def __init__(self, layout, tiles):
"""
layout : a string with each character representing a tile
tile : a numpy array containing the tile image
"""
self.tiles = tiles
self.contents = [list(row) for row in layout.split("\n")]
self.xsize = len(self.contents[0])
self.ysize = len(self.contents)
self.image = np.zeros(
(self.ysize * TILE_SIZE, self.xsize * TILE_SIZE, 3), dtype=np.uint8
)
self.prepare_map()
def extract_tile(self, row, col):
y = (row-1)*32
x = (col-1)*32
return self.tiles[y:y+32, x:x+32]
def get_tile(self, char):
"""returns the array for a given tile character"""
if char == "#":
return self.extract_tile(1,1)
elif char == "G":
return self.extract_tile(8,4)
elif char == "C":
return self.extract_tile(3,9)
elif char == "B":
return self.extract_tile(1,5)
elif char == "E":
return self.extract_tile(8,12)
elif char == "A":
return self.extract_tile(7,14)
elif char == "R":
return self.extract_tile(4,9)
elif char == "Y":
return self.extract_tile(5,9)
elif char == "P":
return self.extract_tile(6,5)
elif char == "I":
return self.extract_tile(5,14)
elif char == "M":
return self.extract_tile(4,14)
elif char == "H":
return self.extract_tile(7,4)
else:
return self.extract_tile(1,3)
def prepare_map(self):
"""prepares the entire image as a big numpy array"""
for y, row in enumerate(self.contents):
for x, tile in enumerate(row):
bm = self.get_tile(tile)
self.image[
y * TILE_SIZE : (y + 1) * TILE_SIZE,
x * TILE_SIZE : (x + 1) * TILE_SIZE,
] = bm
def draw(self, frame, offset=OFS):
"""
draws the image into a frame
offset pixels from the top left corner
"""
frame[
OFS : OFS + self.image.shape[0], OFS : OFS + self.image.shape[1]
] = self.image
def write_image(self, filename):
"""writes the image into a file"""
cv2.imwrite(filename, self.image)
class Customer:
def __init__(self, terrain_map, image, customer_id, state, matrix = P):
self.terrain_map = terrain_map
self.image = image
self.customer_id = customer_id
self.state = state
self.matrix = matrix
def __repr__(self):
return f'the customer is now at {self.state}!'
def draw(self, frame):
location_pos = {'dairy':(10,2),'drinks':(6,2),'fruit':(14,2),
'spices':(2,2),'checkout':(11,8)}
xpos = OFS + location_pos[self.state][0] * TILE_SIZE
ypos = OFS + location_pos[self.state][1] * TILE_SIZE
frame[ypos:ypos+TILE_SIZE, xpos:xpos+TILE_SIZE] = self.image
# overlay the Customer image / sprite onto the frame
def next_state(self):
'''
Propagates the customer to the next state.
Returns nothing.
'''
self.state = random.choices(['checkout','dairy','drinks','fruit','spices'],
self.matrix.loc[self.state])
self.state = self.state[0]
# location_pos = {'dairy':(10,2),'drinks':(6,2),'fruit':(14,2),
# 'spices':(2,2),'checkout':(1,1)}
# self.state = location_pos[self.state]
return self.state
def move(self, direction):
newx = self.x
newy = self.y
if direction == 'up':
newy -= 1
if direction == 'down':
newy += 1
if direction == 'left':
newx -= 1
if direction == 'right':
newx += 1
if self.terrain_map.contents[newy][newx] == '.':
self.x = newx
self.y = newy
if __name__ == "__main__":
background = np.zeros((700, 1000, 3), np.uint8)
tiles = cv2.imread("tiles.png")
market = SupermarketMap(MARKET, tiles)
cust_image = market.extract_tile(5,1)
cust1 = Customer(market, cust_image, 1, state='dairy') # spice
# cust2 = Customer(market, cust_image, 6, 2) # drinks
# cust3 = Customer(market, cust_image, 10, 2) # dairy
# cust4 = Customer(market, cust_image, 14, 2) # fruit
count = 0
minutes = 0
while True: # this script will run forever
frame = background.copy()
market.draw(frame) # it draws in to the supermarket
cust1.draw(frame)
# cust2.draw(frame)
# cust3.draw(frame)
# cust4.draw(frame)
cv2.imshow("frame", frame)
key = chr(cv2.waitKey(1) & 0xFF)
if key == "q":
break
# if key == 'w':
# cust1.move('up')
# if key == 'a':
# cust1.move('left')
# if key == 'd':
# cust1.move('right')
# if key == 'z':
# cust1.move('down')
if count == 48:
count = 0
minutes += 1
cust1.next_state()
count += 1
cv2.destroyAllWindows()
market.write_image("supermarket.png")
|
py | 1a3613ffb606244be3216ff4e728300f550fc61e | from typing import Any, Dict, List, Optional
import httpx
from ...client import Client
from ...models.suggester import Suggester
from ...types import Response
def _get_kwargs(
project_name: str,
*,
client: Client,
) -> Dict[str, Any]:
url = "{}/projects/{projectName}/suggesters".format(client.base_url, projectName=project_name)
headers: Dict[str, Any] = client.get_headers()
cookies: Dict[str, Any] = client.get_cookies()
return {
"url": url,
"headers": headers,
"cookies": cookies,
"timeout": client.get_timeout(),
}
def _parse_response(*, response: httpx.Response) -> Optional[List[Suggester]]:
if response.status_code == 200:
response_200 = []
_response_200 = response.json()
for componentsschemas_suggester_array_item_data in _response_200:
componentsschemas_suggester_array_item = Suggester.from_dict(componentsschemas_suggester_array_item_data)
response_200.append(componentsschemas_suggester_array_item)
return response_200
return None
def _build_response(*, response: httpx.Response) -> Response[List[Suggester]]:
return Response(
status_code=response.status_code,
content=response.content,
headers=response.headers,
parsed=_parse_response(response=response),
)
def sync_detailed(
project_name: str,
*,
client: Client,
) -> Response[List[Suggester]]:
kwargs = _get_kwargs(
project_name=project_name,
client=client,
)
response = httpx.get(
verify=client.verify_ssl,
**kwargs,
)
return _build_response(response=response)
def sync(
project_name: str,
*,
client: Client,
) -> Optional[List[Suggester]]:
""" """
return sync_detailed(
project_name=project_name,
client=client,
).parsed
async def asyncio_detailed(
project_name: str,
*,
client: Client,
) -> Response[List[Suggester]]:
kwargs = _get_kwargs(
project_name=project_name,
client=client,
)
async with httpx.AsyncClient(verify=client.verify_ssl) as _client:
response = await _client.get(**kwargs)
return _build_response(response=response)
async def asyncio(
project_name: str,
*,
client: Client,
) -> Optional[List[Suggester]]:
""" """
return (
await asyncio_detailed(
project_name=project_name,
client=client,
)
).parsed
|
py | 1a3614047f0fa6c16b4ddaa475393fd06fcb0f8a | #!/usr/bin/env python3
# Copyright (c) 2013-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
import biplist
from ds_store import DSStore
from mac_alias import Alias
import sys
output_file = sys.argv[1]
package_name_ns = sys.argv[2]
ds = DSStore.open(output_file, 'w+')
ds['.']['bwsp'] = {
'ShowStatusBar': False,
'WindowBounds': '{{300, 280}, {500, 343}}',
'ContainerShowSidebar': False,
'SidebarWidth': 0,
'ShowTabView': False,
'PreviewPaneVisibility': False,
'ShowToolbar': False,
'ShowSidebar': False,
'ShowPathbar': True
}
icvp = {
'gridOffsetX': 0.0,
'textSize': 12.0,
'viewOptionsVersion': 1,
'backgroundImageAlias': b'\x00\x00\x00\x00\x02\x1e\x00\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xd1\x94\\\xb0H+\x00\x05\x00\x00\x00\x98\x0fbackground.tiff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x99\xd19\xb0\xf8\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\x00\x00\r\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0b.background\x00\x00\x10\x00\x08\x00\x00\xd1\x94\\\xb0\x00\x00\x00\x11\x00\x08\x00\x00\xd19\xb0\xf8\x00\x00\x00\x01\x00\x04\x00\x00\x00\x98\x00\x0e\x00 \x00\x0f\x00b\x00a\x00c\x00k\x00g\x00r\x00o\x00u\x00n\x00d\x00.\x00t\x00i\x00f\x00f\x00\x0f\x00\x02\x00\x00\x00\x12\x00\x1c/.background/background.tiff\x00\x14\x01\x06\x00\x00\x00\x00\x01\x06\x00\x02\x00\x00\x0cMacintosh HD\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xce\x97\xab\xc3H+\x00\x00\x01\x88[\x88\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02u\xab\x8d\xd1\x94\\\xb0devrddsk\xff\xff\xff\xff\x00\x00\t \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x07bitcoin\x00\x00\x10\x00\x08\x00\x00\xce\x97\xab\xc3\x00\x00\x00\x11\x00\x08\x00\x00\xd1\x94\\\xb0\x00\x00\x00\x01\x00\x14\x01\x88[\x88\x00\x16\xa9\t\x00\x08\xfaR\x00\x08\xfaQ\x00\x02d\x8e\x00\x0e\x00\x02\x00\x00\x00\x0f\x00\x1a\x00\x0c\x00M\x00a\x00c\x00i\x00n\x00t\x00o\x00s\x00h\x00 \x00H\x00D\x00\x13\x00\x01/\x00\x00\x15\x00\x02\x00\x14\xff\xff\x00\x00\xff\xff\x00\x00',
'backgroundColorBlue': 1.0,
'iconSize': 96.0,
'backgroundColorGreen': 1.0,
'arrangeBy': 'none',
'showIconPreview': True,
'gridSpacing': 100.0,
'gridOffsetY': 0.0,
'showItemInfo': False,
'labelOnBottom': True,
'backgroundType': 2,
'backgroundColorRed': 1.0
}
alias = Alias.from_bytes(icvp['backgroundImageAlias'])
alias.volume.name = package_name_ns
alias.volume.posix_path = '/Volumes/' + package_name_ns
alias.volume.disk_image_alias.target.filename = package_name_ns + '.temp.dmg'
alias.volume.disk_image_alias.target.carbon_path = 'Macintosh HD:Users:\x00bitcoinuser:\x00Documents:\x00bitcoin:\x00bitcoin:\x00' + package_name_ns + '.temp.dmg'
alias.volume.disk_image_alias.target.posix_path = 'Users/bitcoinuser/Documents/bitcoin/bitcoin/' + package_name_ns + '.temp.dmg'
alias.target.carbon_path = package_name_ns + ':.background:\x00background.tiff'
icvp['backgroundImageAlias'] = biplist.Data(alias.to_bytes())
ds['.']['icvp'] = icvp
ds['.']['vSrn'] = ('long', 1)
ds['Applications']['Iloc'] = (370, 156)
ds['BTCA.app']['Iloc'] = (128, 156)
ds.flush()
ds.close()
|
py | 1a3614f3576b441031b297bae1c4864a3ea9b24c | import re
import sys
import uuid
from collections import defaultdict
from contextlib import contextmanager
from io import BytesIO
from hashlib import sha1
from itertools import chain
from os.path import join
from corehq.blobs import get_blob_db, CODES # noqa: F401
from corehq.blobs.exceptions import AmbiguousBlobStorageError, NotFound
from corehq.blobs.util import (
classproperty,
document_method,
random_url_id,
SAFENAME,
)
from corehq.util.io import ClosingContextProxy
from couchdbkit.exceptions import InvalidAttachment, ResourceNotFound
from dimagi.ext.couchdbkit import (
Document,
DocumentSchema,
DictProperty,
IntegerProperty,
StringProperty,
)
from memoized import memoized
import six
class BlobMetaRef(DocumentSchema):
key = StringProperty()
blobmeta_id = IntegerProperty()
content_type = StringProperty()
content_length = IntegerProperty()
@classmethod
def _from_attachment(cls, data):
return cls(
content_type=data.get("content_type"),
content_length=data.get("length"),
)
@staticmethod
def _normalize_json(dbname, doc_id, data):
if "key" in data:
return data
return {
"key": join(dbname, safe_id(doc_id), data["id"]),
"content_length": data.get("content_length"),
"content_type": data.get("content_type"),
}
class BlobMixin(Document):
class Meta(object):
abstract = True
# TODO evaluate all uses of `external_blobs`
external_blobs = DictProperty(BlobMetaRef)
# When true, fallback to couch on fetch and delete if blob is not
# found in blobdb. Set this to True on subclasses that are in the
# process of being migrated. When this is false (the default) the
# methods on this mixin will not touch couchdb.
_migrating_blobs_from_couch = False
_atomic_blobs = None
@classmethod
def wrap(cls, data):
if data.get("external_blobs"):
doc_id = safe_id(data["_id"])
dbname = _get_couchdb_name(cls)
normalize = BlobMetaRef._normalize_json
blobs = {}
normalized = False
for key, value in data["external_blobs"].items():
if value["doc_type"] == "BlobMetaRef":
blobs[key] = value
else:
blobs[key] = normalize(dbname, data['_id'], value)
normalized = True
if normalized:
data = data.copy()
data["external_blobs"] = blobs
return super(BlobMixin, cls).wrap(data)
@classproperty
def _blobdb_type_code(cls):
"""Blob DB type code
This is an abstract attribute that must be set on non-abstract
subclasses of `BlobMixin`. Its value should be one of the codes
in `corehq.blobs.CODES`.
"""
raise NotImplementedError(
"abstract class attribute %s._blobdb_type_code is missing" %
cls.__name__
)
@property
def blobs(self):
"""Get a dictionary of BlobMetaRef objects keyed by attachment name
Includes CouchDB attachments if `_migrating_blobs_from_couch` is true.
The returned value should not be mutated.
"""
if not self._migrating_blobs_from_couch or not self._attachments:
return self.external_blobs
value = {name: BlobMetaRef._from_attachment(info)
for name, info in self._attachments.items()}
value.update(self.external_blobs)
return value
@document_method
def put_attachment(self, content, name=None, content_type=None,
content_length=None, domain=None, type_code=None):
"""Put attachment in blob database
See `get_short_identifier()` for restrictions on the upper bound
for number of attachments per object.
:param content: String or file object.
"""
db = get_blob_db()
if name is None:
name = getattr(content, "name", None)
if name is None:
raise InvalidAttachment("cannot save attachment without name")
if self._id is None:
raise ResourceNotFound("cannot put attachment on unidentified document")
if hasattr(self, "domain"):
if domain is not None and self.domain != domain:
raise ValueError("domain mismatch: %s != %s" % (self.domain, domain))
domain = self.domain
elif domain is None:
raise ValueError("domain attribute or argument is required")
old_meta = self.blobs.get(name)
if isinstance(content, str):
content = BytesIO(content.encode("utf-8"))
elif isinstance(content, bytes):
content = BytesIO(content)
# do we need to worry about BlobDB reading beyond content_length?
meta = db.put(
content,
domain=domain or self.domain,
parent_id=self._id,
name=name,
type_code=(self._blobdb_type_code if type_code is None else type_code),
content_type=content_type,
)
self.external_blobs[name] = BlobMetaRef(
key=meta.key,
blobmeta_id=meta.id,
content_type=content_type,
content_length=meta.content_length,
)
if self._migrating_blobs_from_couch and self._attachments:
self._attachments.pop(name, None)
if self._atomic_blobs is None:
self.save()
if old_meta and old_meta.key:
db.delete(key=old_meta.key)
elif old_meta and old_meta.key:
self._atomic_blobs[name].append(old_meta.key)
return True
@document_method
def fetch_attachment(self, name, stream=False):
"""Get named attachment
:param stream: When true, return a file-like object that can be
read at least once (streamers should not expect to seek within
or read the contents of the returned file more than once).
"""
db = get_blob_db()
try:
try:
key = self.external_blobs[name].key
except KeyError:
if self._migrating_blobs_from_couch:
return super(BlobMixin, self) \
.fetch_attachment(name, stream=stream)
raise NotFound(name)
meta = db.metadb.get(parent_id=self._id, key=key)
blob = meta.open()
except (NotFound, db.metadb.DoesNotExist):
raise ResourceNotFound(
"{model} {model_id} attachment: {name!r}".format(
model=type(self).__name__,
model_id=self._id,
name=name,
))
if stream:
return blob
with blob:
return blob.read()
def has_attachment(self, name):
return name in self.blobs
def delete_attachment(self, name):
if self._migrating_blobs_from_couch and self._attachments:
deleted = bool(self._attachments.pop(name, None))
else:
deleted = False
meta = self.external_blobs.pop(name, None)
if meta is not None:
if self._atomic_blobs is None:
deleted = get_blob_db().delete(key=meta.key) or deleted
else:
self._atomic_blobs[name].append(meta.key)
deleted = True
if self._atomic_blobs is None:
self.save()
return deleted
@document_method
def atomic_blobs(self, save=None):
"""Return a context manager to atomically save doc + blobs
Usage::
with doc.atomic_blobs():
doc.put_attachment(...)
# doc and blob are now saved
Blobs saved inside the context manager will be deleted if an
exception is raised inside the context body.
:param save: A function to be called instead of `self.save()`
"""
@contextmanager
def atomic_blobs_context():
if self._id is None:
self._id = uuid.uuid4().hex
old_external_blobs = dict(self.external_blobs)
if self._migrating_blobs_from_couch:
if self._attachments:
old_attachments = dict(self._attachments)
else:
old_attachments = None
atomicity = self._atomic_blobs
self._atomic_blobs = new_deleted = defaultdict(list)
db = get_blob_db()
success = False
try:
yield
(self.save if save is None else save)()
success = True
except:
typ, exc, tb = sys.exc_info()
# delete new blobs that were not saved
for name, meta in self.external_blobs.items():
old_meta = old_external_blobs.get(name)
if old_meta is None or meta.key != old_meta.key:
db.delete(key=meta.key)
self.external_blobs = old_external_blobs
if self._migrating_blobs_from_couch:
self._attachments = old_attachments
six.reraise(typ, exc, tb)
finally:
self._atomic_blobs = atomicity
if success:
# delete replaced blobs
deleted = set()
blobs = self.blobs
for name, meta in list(old_external_blobs.items()):
if name not in blobs or meta.key != blobs[name].key:
db.delete(key=meta.key)
deleted.add(meta.key)
# delete newly created blobs that were overwritten or deleted
for key in chain.from_iterable(new_deleted.values()):
if key not in deleted:
db.delete(key=key)
return atomic_blobs_context()
class BlobHelper(object):
"""Helper to get/set blobs given a document dict and couch database
NOTE: attachments will be stored in couch and will be inaccessible
using the normal attachments API if this is used to copy a document
having "_attachments" but not "external_blobs" to a database in
which the "doc_type" uses external blob storage and is not in
`_migrating_blobs_from_couch` mode. To work around this limitation,
put `"external_blobs": {}` in documents having a "doc_type" that
uses external blob storage. The same is true when copying a document
with "external_blobs" to a database that is not using an external
blob database. To work around that, remove the "external_blobs" item
from the document (after fetching all blobs) and be sure that the
document has an "_attachments" value that is not `None`.
Modifying "_attachments" or "external_blobs" values in a document is
not recommended while it is wrapped in this class.
"""
def __init__(self, doc, database, type_code):
if doc.get("_id") is None:
raise TypeError("BlobHelper requires a real _id")
self._id = doc["_id"]
self.doc = doc
self.doc_type = doc["doc_type"]
if "domain" in doc:
self.domain = doc["domain"]
elif self.doc_type == "Domain":
self.domain = doc["name"]
self._blobdb_type_code = type_code
self.database = database
self.couch_only = "external_blobs" not in doc
self._migrating_blobs_from_couch = bool(doc.get("_attachments")) \
and not self.couch_only
self._attachments = doc.get("_attachments")
self.external_blobs = {n: BlobMetaRef.wrap(
BlobMetaRef._normalize_json(database.dbname, self._id, m.copy())
) for n, m in doc.get("external_blobs", {}).items()}
def __repr__(self):
return "<%s %s domain=%s id=%s>" % (
type(self).__name__,
self.doc_type,
getattr(self, "domain", ""),
self._id,
)
_atomic_blobs = None
@property
def blobs(self):
return BlobMixin.blobs.fget(self)
def put_attachment(self, content, name=None, *args, **kw):
if self._attachments is None and self.couch_only:
raise AmbiguousBlobStorageError(" ".join("""
Ambiguous blob storage: doc has no _attachments and no
external_blobs. Put a dict (may be empty) in one or both
to indicate where blobs are located (_attachments ->
couch, external_blobs -> blob db). If both are present,
new blobs will be stored in the blob db, but existing
blobs will be fetched from couch if there is no
corresponding key in the external_blobs dict.
""".split()))
if self.couch_only:
self.database.put_attachment(self.doc, content, name, *args, **kw)
else:
BlobMixin.put_attachment(self, content, name, *args, **kw)
self._sync_doc()
return True
def fetch_attachment(self, name, *args, **kw):
if name in self.external_blobs:
return BlobMixin.fetch_attachment(self, name, *args, **kw)
return self.database.fetch_attachment(self._id, name, *args, **kw)
def delete_attachment(self, *args, **kw):
raise NotImplementedError
def atomic_blobs(self, save=None):
if save is not None:
original_save = save
def save():
self._sync_doc()
original_save()
if self.couch_only:
@contextmanager
def context():
(self.save if save is None else save)()
yield
else:
@contextmanager
def context():
try:
with BlobMixin.atomic_blobs(self, save):
yield
except:
self.doc["_attachments"] = self._attachments
self.doc["external_blobs"] = {name: meta.to_json()
for name, meta in self.external_blobs.items()}
raise
return context()
def _sync_doc(self):
if "_attachments" in self.doc:
assert self.doc["_attachments"] == self._attachments
if "external_blobs" in self.doc:
# because put_attachment calls self.save()
self.doc["external_blobs"] = {name: meta.to_json()
for name, meta in self.external_blobs.items()}
def save(self):
self._sync_doc()
self.database.save_doc(self.doc)
class DeferredBlobMixin(BlobMixin):
"""Similar to BlobMixin, but can defer attachment puts until save
This class is intended for backward compatibility with code that set
`_attachments` to a dict of attachments with content. It is not
recommended to use this in new code.
"""
class Meta(object):
abstract = True
_deferred_blobs = None
@property
def blobs(self):
value = super(DeferredBlobMixin, self).blobs
if self._deferred_blobs:
value = dict(value)
for name, info in self._deferred_blobs.items():
if info is not None:
value[name] = BlobMetaRef(
key=None,
content_type=info.get("content_type", None),
content_length=info.get("content_length", None),
)
else:
value.pop(name, None)
return value
@property
def persistent_blobs(self):
"""Get a dict like `blobs` containing only non-deferred items"""
value = super(DeferredBlobMixin, self).blobs
if self._deferred_blobs:
value = value.copy()
for name in self._deferred_blobs:
value.pop(name, None)
return value
def put_attachment(self, content, name=None, *args, **kw):
if self._deferred_blobs:
self._deferred_blobs.pop(name, None)
return super(DeferredBlobMixin, self).put_attachment(content, name,
*args, **kw)
def fetch_attachment(self, name, stream=False):
if self._deferred_blobs and name in self._deferred_blobs:
if self._deferred_blobs[name] is None:
raise ResourceNotFound(
"{model} {model_id} attachment: {name!r}".format(
model=type(self).__name__,
model_id=self._id,
name=name,
))
body = self._deferred_blobs[name]["content"]
if stream:
return ClosingContextProxy(BytesIO(body))
return body
return super(DeferredBlobMixin, self).fetch_attachment(name, stream)
def delete_attachment(self, name):
if self._deferred_blobs:
deleted = bool(self._deferred_blobs.pop(name, None))
else:
deleted = False
return super(DeferredBlobMixin, self).delete_attachment(name) or deleted
def deferred_put_attachment(self, content, name=None, content_type=None,
content_length=None, domain=None, type_code=None):
"""Queue attachment to be persisted on save
WARNING this loads the entire blob content into memory. Use of
this method is discouraged:
- Generally it is bad practice to load large blobs into memory
in their entirety. Ideally blobs should be streamed between
the client and the blob database.
- JSON serialization becomes less efficient because blobs are
base-64 encoded, requiring even more memory.
This method takes the same parameters as `put_attachment`.
"""
if isinstance(content, str):
content = content.encode('utf-8')
elif not isinstance(content, bytes):
content = content.read()
if self._deferred_blobs is None:
self._deferred_blobs = {}
length = len(content) if content_length is None else content_length
self._deferred_blobs[name] = {
"content": content,
"content_type": content_type,
"content_length": length,
"domain": domain or getattr(self, "domain", None),
"type_code": type_code,
}
def deferred_delete_attachment(self, name):
"""Mark attachment to be deleted on save"""
if self._deferred_blobs is None:
self._deferred_blobs = {}
self._deferred_blobs[name] = None
def save(self):
if self._deferred_blobs:
delete_names = []
with self.atomic_blobs(super(DeferredBlobMixin, self).save):
# list deferred blobs to avoid modification during iteration
for name, info in list(self._deferred_blobs.items()):
if info is not None:
self.put_attachment(name=name, **info)
else:
delete_names.append(name)
for name in delete_names:
self.delete_attachment(name)
assert not self._deferred_blobs, self._deferred_blobs
else:
super(DeferredBlobMixin, self).save()
def get_short_identifier():
"""Get a short random identifier
The identifier is chosen from a 64 bit key space, which is suitably
large for no likely collisions in 1000 concurrent keys but kept
small to minimize key length. 1000 is an arbitrary number chosen as
an upper bound of the number of attachments associated with any
given object. We may need to change this if we ever expect an object
to have significantly more than 1000 attachments. The probability of
a collision with a 64 bit ID is:
k = 1000
N = 2 ** 64
(k ** 2) / (2 * N) = 2.7e-14
which is somewhere near the probability of a meteor landing on
your house. For most objects the number of blobs present at any
moment in time will be far lower, and therefore the probability
of a collision will be much lower as well.
http://preshing.com/20110504/hash-collision-probabilities/
"""
return random_url_id(8)
@memoized
def _get_couchdb_name(doc_class):
return doc_class.get_db().dbname
def safe_id(identifier):
if not SAFENAME.match(identifier):
identifier = 'sha1-' + sha1(identifier.encode('utf-8')).hexdigest()
elif SHA1_ID.match(identifier):
# could collide with "safe" id and should never happen anyway
raise ValueError("illegal doc id: {!r}".format(identifier))
return identifier
SHA1_ID = re.compile("sha1-[0-9a-f]{40}$")
|
py | 1a3615a3707d199e1c2f331c82adb4ed82dcc134 | #!/usr/bin/env python
from app import app
if __name__ == "__main__":
app.run(debug=True, host='0.0.0.0')
|
py | 1a3615f92273d6554cd1db726bc316ed7a1e0802 | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: walletunlocker.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from . import rpc_pb2 as rpc__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='walletunlocker.proto',
package='lnrpc',
syntax='proto3',
serialized_options=b'Z%github.com/lightningnetwork/lnd/lnrpc',
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n\x14walletunlocker.proto\x12\x05lnrpc\x1a\trpc.proto\"A\n\x0eGenSeedRequest\x12\x19\n\x11\x61\x65zeed_passphrase\x18\x01 \x01(\x0c\x12\x14\n\x0cseed_entropy\x18\x02 \x01(\x0c\"H\n\x0fGenSeedResponse\x12\x1c\n\x14\x63ipher_seed_mnemonic\x18\x01 \x03(\t\x12\x17\n\x0f\x65nciphered_seed\x18\x02 \x01(\x0c\"\xca\x01\n\x11InitWalletRequest\x12\x17\n\x0fwallet_password\x18\x01 \x01(\x0c\x12\x1c\n\x14\x63ipher_seed_mnemonic\x18\x02 \x03(\t\x12\x19\n\x11\x61\x65zeed_passphrase\x18\x03 \x01(\x0c\x12\x17\n\x0frecovery_window\x18\x04 \x01(\x05\x12\x32\n\x0f\x63hannel_backups\x18\x05 \x01(\x0b\x32\x19.lnrpc.ChanBackupSnapshot\x12\x16\n\x0estateless_init\x18\x06 \x01(\x08\",\n\x12InitWalletResponse\x12\x16\n\x0e\x61\x64min_macaroon\x18\x01 \x01(\x0c\"\x93\x01\n\x13UnlockWalletRequest\x12\x17\n\x0fwallet_password\x18\x01 \x01(\x0c\x12\x17\n\x0frecovery_window\x18\x02 \x01(\x05\x12\x32\n\x0f\x63hannel_backups\x18\x03 \x01(\x0b\x32\x19.lnrpc.ChanBackupSnapshot\x12\x16\n\x0estateless_init\x18\x04 \x01(\x08\"\x16\n\x14UnlockWalletResponse\"~\n\x15\x43hangePasswordRequest\x12\x18\n\x10\x63urrent_password\x18\x01 \x01(\x0c\x12\x14\n\x0cnew_password\x18\x02 \x01(\x0c\x12\x16\n\x0estateless_init\x18\x03 \x01(\x08\x12\x1d\n\x15new_macaroon_root_key\x18\x04 \x01(\x08\"0\n\x16\x43hangePasswordResponse\x12\x16\n\x0e\x61\x64min_macaroon\x18\x01 \x01(\x0c\x32\xa5\x02\n\x0eWalletUnlocker\x12\x38\n\x07GenSeed\x12\x15.lnrpc.GenSeedRequest\x1a\x16.lnrpc.GenSeedResponse\x12\x41\n\nInitWallet\x12\x18.lnrpc.InitWalletRequest\x1a\x19.lnrpc.InitWalletResponse\x12G\n\x0cUnlockWallet\x12\x1a.lnrpc.UnlockWalletRequest\x1a\x1b.lnrpc.UnlockWalletResponse\x12M\n\x0e\x43hangePassword\x12\x1c.lnrpc.ChangePasswordRequest\x1a\x1d.lnrpc.ChangePasswordResponseB\'Z%github.com/lightningnetwork/lnd/lnrpcb\x06proto3'
,
dependencies=[rpc__pb2.DESCRIPTOR,])
_GENSEEDREQUEST = _descriptor.Descriptor(
name='GenSeedRequest',
full_name='lnrpc.GenSeedRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='aezeed_passphrase', full_name='lnrpc.GenSeedRequest.aezeed_passphrase', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='seed_entropy', full_name='lnrpc.GenSeedRequest.seed_entropy', index=1,
number=2, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=42,
serialized_end=107,
)
_GENSEEDRESPONSE = _descriptor.Descriptor(
name='GenSeedResponse',
full_name='lnrpc.GenSeedResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='cipher_seed_mnemonic', full_name='lnrpc.GenSeedResponse.cipher_seed_mnemonic', index=0,
number=1, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='enciphered_seed', full_name='lnrpc.GenSeedResponse.enciphered_seed', index=1,
number=2, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=109,
serialized_end=181,
)
_INITWALLETREQUEST = _descriptor.Descriptor(
name='InitWalletRequest',
full_name='lnrpc.InitWalletRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='wallet_password', full_name='lnrpc.InitWalletRequest.wallet_password', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='cipher_seed_mnemonic', full_name='lnrpc.InitWalletRequest.cipher_seed_mnemonic', index=1,
number=2, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='aezeed_passphrase', full_name='lnrpc.InitWalletRequest.aezeed_passphrase', index=2,
number=3, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='recovery_window', full_name='lnrpc.InitWalletRequest.recovery_window', index=3,
number=4, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='channel_backups', full_name='lnrpc.InitWalletRequest.channel_backups', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='stateless_init', full_name='lnrpc.InitWalletRequest.stateless_init', index=5,
number=6, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=184,
serialized_end=386,
)
_INITWALLETRESPONSE = _descriptor.Descriptor(
name='InitWalletResponse',
full_name='lnrpc.InitWalletResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='admin_macaroon', full_name='lnrpc.InitWalletResponse.admin_macaroon', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=388,
serialized_end=432,
)
_UNLOCKWALLETREQUEST = _descriptor.Descriptor(
name='UnlockWalletRequest',
full_name='lnrpc.UnlockWalletRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='wallet_password', full_name='lnrpc.UnlockWalletRequest.wallet_password', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='recovery_window', full_name='lnrpc.UnlockWalletRequest.recovery_window', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='channel_backups', full_name='lnrpc.UnlockWalletRequest.channel_backups', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='stateless_init', full_name='lnrpc.UnlockWalletRequest.stateless_init', index=3,
number=4, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=435,
serialized_end=582,
)
_UNLOCKWALLETRESPONSE = _descriptor.Descriptor(
name='UnlockWalletResponse',
full_name='lnrpc.UnlockWalletResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=584,
serialized_end=606,
)
_CHANGEPASSWORDREQUEST = _descriptor.Descriptor(
name='ChangePasswordRequest',
full_name='lnrpc.ChangePasswordRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='current_password', full_name='lnrpc.ChangePasswordRequest.current_password', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='new_password', full_name='lnrpc.ChangePasswordRequest.new_password', index=1,
number=2, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='stateless_init', full_name='lnrpc.ChangePasswordRequest.stateless_init', index=2,
number=3, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='new_macaroon_root_key', full_name='lnrpc.ChangePasswordRequest.new_macaroon_root_key', index=3,
number=4, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=608,
serialized_end=734,
)
_CHANGEPASSWORDRESPONSE = _descriptor.Descriptor(
name='ChangePasswordResponse',
full_name='lnrpc.ChangePasswordResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='admin_macaroon', full_name='lnrpc.ChangePasswordResponse.admin_macaroon', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=736,
serialized_end=784,
)
_INITWALLETREQUEST.fields_by_name['channel_backups'].message_type = rpc__pb2._CHANBACKUPSNAPSHOT
_UNLOCKWALLETREQUEST.fields_by_name['channel_backups'].message_type = rpc__pb2._CHANBACKUPSNAPSHOT
DESCRIPTOR.message_types_by_name['GenSeedRequest'] = _GENSEEDREQUEST
DESCRIPTOR.message_types_by_name['GenSeedResponse'] = _GENSEEDRESPONSE
DESCRIPTOR.message_types_by_name['InitWalletRequest'] = _INITWALLETREQUEST
DESCRIPTOR.message_types_by_name['InitWalletResponse'] = _INITWALLETRESPONSE
DESCRIPTOR.message_types_by_name['UnlockWalletRequest'] = _UNLOCKWALLETREQUEST
DESCRIPTOR.message_types_by_name['UnlockWalletResponse'] = _UNLOCKWALLETRESPONSE
DESCRIPTOR.message_types_by_name['ChangePasswordRequest'] = _CHANGEPASSWORDREQUEST
DESCRIPTOR.message_types_by_name['ChangePasswordResponse'] = _CHANGEPASSWORDRESPONSE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
GenSeedRequest = _reflection.GeneratedProtocolMessageType('GenSeedRequest', (_message.Message,), {
'DESCRIPTOR' : _GENSEEDREQUEST,
'__module__' : 'walletunlocker_pb2'
# @@protoc_insertion_point(class_scope:lnrpc.GenSeedRequest)
})
_sym_db.RegisterMessage(GenSeedRequest)
GenSeedResponse = _reflection.GeneratedProtocolMessageType('GenSeedResponse', (_message.Message,), {
'DESCRIPTOR' : _GENSEEDRESPONSE,
'__module__' : 'walletunlocker_pb2'
# @@protoc_insertion_point(class_scope:lnrpc.GenSeedResponse)
})
_sym_db.RegisterMessage(GenSeedResponse)
InitWalletRequest = _reflection.GeneratedProtocolMessageType('InitWalletRequest', (_message.Message,), {
'DESCRIPTOR' : _INITWALLETREQUEST,
'__module__' : 'walletunlocker_pb2'
# @@protoc_insertion_point(class_scope:lnrpc.InitWalletRequest)
})
_sym_db.RegisterMessage(InitWalletRequest)
InitWalletResponse = _reflection.GeneratedProtocolMessageType('InitWalletResponse', (_message.Message,), {
'DESCRIPTOR' : _INITWALLETRESPONSE,
'__module__' : 'walletunlocker_pb2'
# @@protoc_insertion_point(class_scope:lnrpc.InitWalletResponse)
})
_sym_db.RegisterMessage(InitWalletResponse)
UnlockWalletRequest = _reflection.GeneratedProtocolMessageType('UnlockWalletRequest', (_message.Message,), {
'DESCRIPTOR' : _UNLOCKWALLETREQUEST,
'__module__' : 'walletunlocker_pb2'
# @@protoc_insertion_point(class_scope:lnrpc.UnlockWalletRequest)
})
_sym_db.RegisterMessage(UnlockWalletRequest)
UnlockWalletResponse = _reflection.GeneratedProtocolMessageType('UnlockWalletResponse', (_message.Message,), {
'DESCRIPTOR' : _UNLOCKWALLETRESPONSE,
'__module__' : 'walletunlocker_pb2'
# @@protoc_insertion_point(class_scope:lnrpc.UnlockWalletResponse)
})
_sym_db.RegisterMessage(UnlockWalletResponse)
ChangePasswordRequest = _reflection.GeneratedProtocolMessageType('ChangePasswordRequest', (_message.Message,), {
'DESCRIPTOR' : _CHANGEPASSWORDREQUEST,
'__module__' : 'walletunlocker_pb2'
# @@protoc_insertion_point(class_scope:lnrpc.ChangePasswordRequest)
})
_sym_db.RegisterMessage(ChangePasswordRequest)
ChangePasswordResponse = _reflection.GeneratedProtocolMessageType('ChangePasswordResponse', (_message.Message,), {
'DESCRIPTOR' : _CHANGEPASSWORDRESPONSE,
'__module__' : 'walletunlocker_pb2'
# @@protoc_insertion_point(class_scope:lnrpc.ChangePasswordResponse)
})
_sym_db.RegisterMessage(ChangePasswordResponse)
DESCRIPTOR._options = None
_WALLETUNLOCKER = _descriptor.ServiceDescriptor(
name='WalletUnlocker',
full_name='lnrpc.WalletUnlocker',
file=DESCRIPTOR,
index=0,
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_start=787,
serialized_end=1080,
methods=[
_descriptor.MethodDescriptor(
name='GenSeed',
full_name='lnrpc.WalletUnlocker.GenSeed',
index=0,
containing_service=None,
input_type=_GENSEEDREQUEST,
output_type=_GENSEEDRESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='InitWallet',
full_name='lnrpc.WalletUnlocker.InitWallet',
index=1,
containing_service=None,
input_type=_INITWALLETREQUEST,
output_type=_INITWALLETRESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='UnlockWallet',
full_name='lnrpc.WalletUnlocker.UnlockWallet',
index=2,
containing_service=None,
input_type=_UNLOCKWALLETREQUEST,
output_type=_UNLOCKWALLETRESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='ChangePassword',
full_name='lnrpc.WalletUnlocker.ChangePassword',
index=3,
containing_service=None,
input_type=_CHANGEPASSWORDREQUEST,
output_type=_CHANGEPASSWORDRESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
])
_sym_db.RegisterServiceDescriptor(_WALLETUNLOCKER)
DESCRIPTOR.services_by_name['WalletUnlocker'] = _WALLETUNLOCKER
# @@protoc_insertion_point(module_scope)
|
py | 1a36169f796d511ef8b177da249a907d38b8ef23 | from django.test import TestCase
from django.contrib.auth import get_user_model
from django.urls import reverse
from rest_framework.test import APIClient
from rest_framework import status
CREATE_USER_URL = reverse('user:create')
TOKEN_URL = reverse('user:token')
ME_URL = reverse('user:me')
def create_user(**params):
return get_user_model().objects.create_user(**params)
class PublicUserApiTests(TestCase):
"""Test the users API (public)"""
def setUp(self) -> None:
self.client = APIClient()
def test_create_valid_user_success(self) -> None:
"""Test creating user with valid payload is successful"""
payload = {
'email': '[email protected]',
'password': 'pass@123',
'name': 'Test Name'
}
res = self.client.post(CREATE_USER_URL, payload)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
user = get_user_model().objects.get(**res.data)
self.assertTrue(user.check_password(payload['password']))
self.assertNotIn('password', res.data)
def test_user_exists(self):
"""Test creating a user that already exists fall"""
payload = {'email': '[email protected]', 'password': 'pass@1234', 'name': 'Test'}
create_user(**payload)
res = self.client.post(CREATE_USER_URL, payload)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_password_too_short(self):
"""Test that the password must be more than 5 characters"""
payload = {'email': '[email protected]', 'password': 'pass@', 'name': 'Test'}
res = self.client.post(CREATE_USER_URL, payload)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
user_exists = get_user_model().objects.filter(
email=payload['email']
).exists()
self.assertFalse(user_exists)
def test_create_token_for_user(self):
"""Test that a token is created for the user"""
payload = {'email': '[email protected]', 'password': 'pass@123', 'name': 'Test'}
create_user(**payload)
res = self.client.post(TOKEN_URL, payload)
self.assertIn('token', res.data)
self.assertEqual(res.status_code, status.HTTP_200_OK)
def test_create_token_invalid_credentials(self):
"""Test that token is not created if invalid credentials are given"""
create_user(email='[email protected]', password='pass@123')
payload = {'email': '[email protected]', 'password': 'wrongpass'}
res = self.client.post(TOKEN_URL, payload)
self.assertNotIn('token', res.data)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_token_no_user(self):
"""Test that token is not created if user doesn't exists"""
payload = {'email': '[email protected]', 'password': 'pass@123'}
res = self.client.post(TOKEN_URL, payload)
self.assertNotIn('token', res.data)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_token_missing_field(self):
"""Test that email & password are required"""
res = self.client.post(TOKEN_URL, {'email': 'one', 'password': ''})
self.assertNotIn('token', res.data)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_retrieve_user_unauthorized(self):
"""Test that authentication is required for users"""
res = self.client.get(ME_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateUserApiTest(TestCase):
"""Test API requests that require authentication"""
def setUp(self) -> None:
self.user = create_user(
email='[email protected]',
password='pass@123',
name='Vivek Pawar'
)
self.client = APIClient()
self.client.force_authenticate(user=self.user)
def test_retrieve_profile_success(self):
"""Test retrieving profile for logged in user"""
res = self.client.get(ME_URL)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, {
'name': self.user.name,
'email': self.user.email
})
def test_post_me_not_allowed(self):
"""Test that POST is not allowed on the url"""
res = self.client.post(ME_URL, {})
self.assertEqual(res.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
def test_update_user_profile(self):
"""test updating the user profile for authentication user"""
payload = {'name': 'Vivek Pawar', 'password': 'pass@123'}
res = self.client.patch(ME_URL, payload)
self.user.refresh_from_db()
self.assertEqual(self.user.name, payload['name'])
self.assertTrue(self.user.check_password(payload['password']))
self.assertEqual(res.status_code, status.HTTP_200_OK)
|
py | 1a3616b61cc48f65c6a7989293c9e978921dfdf8 | from util.util import autoregister
autoregister('lrs') |
py | 1a3617bd1a8f39b36c5ef837836c5e8b0905fde7 | import re
from models import Landmark
from utils import session_scope
NORTH = 0
EAST = 1
SOUTH = 2
WEST = 3
LEFT = -1
RIGHT = 1
SIDES_OF_WORLD = {'north': NORTH, 'east': EAST, 'south': SOUTH, 'west': WEST}
ALL_SIDES_OF_THE_WORLD = ['north', 'east', 'south', 'west']
LEFT_RIGHT = {'left': LEFT, 'right': RIGHT}
class RoutingPointObj:
def __init__(self, start_point='', end_point=''):
self.start_point = start_point
self.end_point = end_point
def __repr__(self):
return '"start_point": {start_point} ' \
'"end_point": {end_point}'.format(start_point=self.start_point,
end_point=self.end_point)
class RoutingException(Exception):
def __int__(self, message=''):
self.message = 'Routing mechanism can\'t handle this route, plz adhere to the established format'
class RouteParser:
routing_points = []
looking_at = 0
def _parse_command(self, data):
original_command = next(data)
command = original_command.lower()
if 'start' in command:
return self._parse_start_command(command)
elif command.lower().startswith('turn'):
return self._parse_turn_command(command, data)
elif 'landmark' in command:
return self._get_landmark_point(original_command)
elif {'north', 'south', 'west', 'east'}.intersection(command.split(' ')):
return self._calc_distance_with_side(command)
else:
# that's mean command like 'go 3 blocks
return self._calc_distance(command)
@staticmethod
def _get_landmark_point(command):
# search landmark by name
landmark_name = re.search(r"'(.*?)'", command, re.DOTALL).group(1)
with session_scope() as session:
landmark = session.query(Landmark).filter_by(name=landmark_name).scalar()
return landmark.coordinate
def parse_routing_points(self, route):
result = []
data = self._read_route_file(route)
try:
while True:
stop_point = self._parse_command(data)
self.routing_points.append(stop_point)
except StopIteration:
for idx, val in enumerate(self.routing_points):
try:
result.append([val, self.routing_points[idx + 1]])
except IndexError:
break
return result
@staticmethod
def _parse_start_command(command):
pattern = '\((.+?)\)'
result = re.search(pattern, command)
if result:
return result.group()
@staticmethod
def _read_route_file(file):
f = open(file, 'r')
while True:
data = f.readline().rstrip()
if not data:
break
yield data
def _parse_turn_command(self, command, data):
# this method should parse the command like 'Turn right/left'
# return new side of the world
turn_command = command.lower()
side_str = 'right' if 'right' in turn_command else 'left'
side = int(LEFT_RIGHT[side_str])
if self.looking_at + side < 0:
self.looking_at = 3
elif self.looking_at + side > 3:
self.looking_at = 0
else:
self.looking_at = self.looking_at + side
# according to rules after turn we should start movement to landmark or just go to some blocks
next_original_command = next(data)
next_command = next_original_command.lower()
if 'landmark' in next_command:
landmark = self._get_landmark_point(next_original_command)
if self._is_landmark_valid(self._get_current_point(), self._convert_points(landmark)):
return landmark
else:
# unit never meet that landmark
raise RoutingException
else:
return self._calc_distance(next_command)
def _get_current_point(self):
if self.routing_points:
current_point = self.routing_points[-1]
return self._convert_points(current_point)
else:
raise RoutingException
def _calc_distance_with_side(self, command):
next_view = set(ALL_SIDES_OF_THE_WORLD).intersection(command.split(' ')).pop()
self.looking_at = SIDES_OF_WORLD[next_view]
return self._calc_distance(command)
def _is_landmark_valid(self, current_point, landmark):
curr_x, curr_y = current_point
land_x, land_y = landmark
if (self.looking_at == NORTH and land_y < curr_y) or \
(self.looking_at == SOUTH and land_y > curr_y) or \
(self.looking_at == EAST and land_x < curr_x) or \
(self.looking_at == WEST and land_x > curr_x):
return False
return True
@staticmethod
def _convert_points(points):
'''
:param points: coordinate points like "(0,0)"
:return: tuple of int value (0,0)
'''
result = [int(s.strip('()')) for s in points.split(',')]
x, y = result
return x, y
def _calc_distance(self, command):
x, y = self._get_current_point()
value = [int(s) for s in command.split(' ') if s.isdigit()]
if len(value) > 1:
raise RoutingException
else:
value = value[0]
if self.looking_at == NORTH:
y += value
elif self.looking_at == EAST:
x += value
elif self.looking_at == SOUTH:
y -= value
elif self.looking_at == WEST:
x -= value
if x < 0: x = 0
if y < 0: y = 0
return '({x},{y})'.format(x=x, y=y)
|
py | 1a361932d52cc118cdf619ba1c926285222e6ccb | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Plotting terminal based histograms
"""
from __future__ import print_function
from __future__ import division
import os
import sys
import math
import optparse
from os.path import dirname
from .utils.helpers import *
from .utils.commandhelp import hist
def calc_bins(n, min_val, max_val, h=None, binwidth=None):
"""
Calculate number of bins for the histogram
"""
if not h:
h = max(10, math.log(n + 1, 2))
if binwidth == 0:
binwidth = 0.1
if binwidth is None:
binwidth = (max_val - min_val) / h
for b in drange(min_val, max_val, step=binwidth, include_stop=True):
if b.is_integer():
yield int(b)
else:
yield b
def read_numbers(numbers):
"""
Read the input data in the most optimal way
"""
if isiterable(numbers):
for number in numbers:
yield float(str(number).strip())
else:
with open(numbers) as fh:
for number in fh:
yield float(number.strip())
def run_demo():
"""
Run a demonstration
"""
module_dir = dirname(dirname(os.path.realpath(__file__)))
demo_file = os.path.join(module_dir, 'examples/data/exp.txt')
if not os.path.isfile(demo_file):
sys.stderr.write("demo input file not found!\n")
sys.stderr.write("run the downloaddata.sh script in the example first\n")
sys.exit(1)
# plotting a histogram
print("plotting a basic histogram")
print("plot_hist('%s')" % demo_file)
print("hist -f %s" % demo_file)
print("cat %s | hist" % demo_file)
plot_hist(demo_file)
print("*" * 80)
# with colours
print("histogram with colours")
print("plot_hist('%s', colour='blue')" % demo_file)
print("hist -f %s -c blue" % demo_file)
plot_hist(demo_file, colour='blue')
print("*" * 80)
# changing the shape of the point
print("changing the shape of the bars")
print("plot_hist('%s', pch='.')" % demo_file)
print("hist -f %s -p ." % demo_file)
plot_hist(demo_file, pch='.')
print("*" * 80)
# changing the size of the plot
print("changing the size of the plot")
print("plot_hist('%s', height=35.0, bincount=40)" % demo_file)
print("hist -f %s -s 35.0 -b 40" % demo_file)
plot_hist(demo_file, height=35.0, bincount=40)
def plot_hist(f, height=20.0, bincount=None, binwidth=None, pch="o", colour="default", title="", xlab=None, showSummary=False, regular=False, xtitle=None, ytitle=None):
"""
Make a histogram
Arguments:
height -- the height of the histogram in # of lines
bincount -- number of bins in the histogram
binwidth -- width of bins in the histogram
pch -- shape of the bars in the plot
colour -- colour of the bars in the terminal
title -- title at the top of the plot
xlab -- boolen value for whether or not to display x-axis labels
showSummary -- boolean value for whether or not to display a summary
regular -- boolean value for whether or not to start y-labels at 0
"""
if pch is None:
pch = "o"
if isinstance(f, str):
with open(f) as fh:
f = fh.readlines()
min_val, max_val = None, None
n, mean, sd = 0.0, 0.0, 0.0
for number in read_numbers(f):
n += 1
if min_val is None or number < min_val:
min_val = number
if max_val is None or number > max_val:
max_val = number
mean += number
mean /= n
for number in read_numbers(f):
sd += (mean - number)**2
sd /= (n - 1)
sd **= 0.5
bins = list(calc_bins(n, min_val, max_val, bincount, binwidth))
hist = dict((i, 0) for i in range(len(bins)))
for number in read_numbers(f):
for i, b in enumerate(bins):
if number <= b:
hist[i] += 1
break
if number == max_val and max_val > bins[len(bins) - 1]:
hist[len(hist) - 1] += 1
min_y, max_y = min(hist.values()), max(hist.values())
start = max(min_y, 1)
stop = max_y + 1
if regular:
start = 1
if height is None:
height = stop - start
if height > 20:
height = 20
ys = list(drange(start, stop, float(stop - start) / height))
ys.reverse()
nlen = max(len(str(min_y)), len(str(max_y))) + 1
if title:
print(box_text([title], max(len(hist) * 2, len(title)), nlen))
print()
if ytitle:
print(" " + "y: "+ ytitle + "\n")
# return_string += "y: "+ ytitle + "\n"
used_labs = set()
for y in ys:
ylab = str(int(y))
if ylab in used_labs:
ylab = ""
else:
used_labs.add(ylab)
ylab = " " * (nlen - len(ylab)) + ylab + "|"
print(ylab, end=' ')
for i in range(len(hist)):
if int(y) <= hist[i]:
printcolour(pch, True, colour)
else:
printcolour(" ", True, colour)
print('')
xs = hist.keys()
print(" " * (nlen + 1) + "-" * len(xs))
if xlab:
labels = abbreviate([str(b) for b in bins])
xlen = len(labels[0])
for i in range(0, xlen):
printcolour(" " * (nlen + 1), True, colour)
for x in range(0, len(hist)):
num = labels[x]
if x % 2 != 0:
pass
elif i < len(num):
print(num[i], end=' ')
else:
print(" ", end=' ')
print('')
if xtitle:
full_title = "x: "+ xtitle
print(" " * ((nlen + 1) + len(xs) - len(full_title)) + full_title + "\n")
# return_string += " " * (xs - len(full_title)) + full_title + "\n"
center = max(map(len, map(str, [n, min_val, mean, max_val])))
center += 15
if showSummary:
print()
title = ["Summary"]
print(box_text(title, max(len(hist) * 2, len(title)), nlen))
stats = ["observations: %d" % n, "min value: %f" % min_val,
"mean : %f" % mean, "std dev : %f" % sd, "max value: %f" % max_val]
print(box_text(stats, max(len(hist) * 2, len(title)), nlen))
# print("-" * (2 + center))
# print("|" + "Summary".center(center) + "|")
# print("-" * (2 + center))
# summary = "|" + ("observations: %d" % n).center(center) + "|\n"
# summary += "|" + ("min value: %f" % min_val).center(center) + "|\n"
# summary += "|" + ("mean : %f" % mean).center(center) + "|\n"
# summary += "|" + ("std dev : %f" % sd).center(center) + "|\n"
# summary += "|" + ("max value: %f" % max_val).center(center) + "|\n"
# summary += "-" * (2 + center)
# print(summary)
def main():
parser = optparse.OptionParser(usage=hist['usage'])
parser.add_option(
'-f', '--file', help='a file containing a column of numbers', default=None, dest='f')
parser.add_option('-t', '--title', help='title for the chart', default="", dest='t')
parser.add_option(
'-b', '--bins', help='number of bins in the histogram', type='int', default=None, dest='b')
parser.add_option('-w', '--binwidth', help='width of bins in the histogram',
type='float', default=None, dest='binwidth')
parser.add_option('-s', '--height', help='height of the histogram (in lines)',
type='int', default=None, dest='h')
parser.add_option('-p', '--pch', help='shape of each bar', default='o', dest='p')
parser.add_option('-x', '--xlab', help='label bins on x-axis',
default=None, action="store_true", dest='x')
parser.add_option('-c', '--colour', help='colour of the plot (%s)' %
colour_help, default='default', dest='colour')
parser.add_option('-d', '--demo', help='run demos', action='store_true', dest='demo')
parser.add_option('-n', '--nosummary', help='hide summary',
action='store_false', dest='showSummary', default=True)
parser.add_option('-r', '--regular',
help='use regular y-scale (0 - maximum y value), instead of truncated y-scale (minimum y-value - maximum y-value)',
default=False, action="store_true", dest='regular')
opts, args = parser.parse_args()
if opts.f is None:
if len(args) > 0:
opts.f = args[0]
elif opts.demo is None or opts.demo is False:
opts.f = sys.stdin.readlines()
if opts.demo:
run_demo()
elif opts.f:
plot_hist(opts.f, opts.h, opts.b, opts.binwidth, opts.p, opts.colour,
opts.t, opts.x, opts.showSummary, opts.regular)
else:
print("nothing to plot!")
if __name__ == "__main__":
main()
|
py | 1a361a0ae4af456edbb2e6c8a75b7467e6d8d08a | from django.conf.urls import url
from rest_framework_jwt.views import obtain_jwt_token
from .views import UniversalTruth
urlpatterns = [
url(r'^jwt-login/$', obtain_jwt_token),
url(r'truth/$', UniversalTruth.as_view(), name='truth')
]
|
py | 1a361c509b7bc46c4ecc94fdf70dc850aca6419f | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from pathlib import Path
import pytest
from flash import DataKeys
from flash.core.utilities.imports import _TEXT_TESTING
from flash.text import TranslationData
TEST_CSV_DATA = """input,target
this is a sentence one,this is a translated sentence one
this is a sentence two,this is a translated sentence two
this is a sentence three,this is a translated sentence three
"""
TEST_JSON_DATA = """
{"input": "this is a sentence one","target":"this is a translated sentence one"}
{"input": "this is a sentence two","target":"this is a translated sentence two"}
{"input": "this is a sentence three","target":"this is a translated sentence three"}
"""
TEST_JSON_DATA_FIELD = """{"data": [
{"input": "this is a sentence one","target":"this is a translated sentence one"},
{"input": "this is a sentence two","target":"this is a translated sentence two"},
{"input": "this is a sentence three","target":"this is a translated sentence three"}]}
"""
def csv_data(tmpdir):
path = Path(tmpdir) / "data.csv"
path.write_text(TEST_CSV_DATA)
return path
def json_data(tmpdir):
path = Path(tmpdir) / "data.json"
path.write_text(TEST_JSON_DATA)
return path
def json_data_with_field(tmpdir):
path = Path(tmpdir) / "data.json"
path.write_text(TEST_JSON_DATA_FIELD)
return path
@pytest.mark.skipif(os.name == "nt", reason="Huggingface timing out on Windows")
@pytest.mark.skipif(not _TEXT_TESTING, reason="text libraries aren't installed.")
def test_from_csv(tmpdir):
csv_path = csv_data(tmpdir)
dm = TranslationData.from_csv(
"input",
"target",
train_file=csv_path,
batch_size=1,
)
batch = next(iter(dm.train_dataloader()))
assert isinstance(batch[DataKeys.INPUT][0], str)
assert isinstance(batch[DataKeys.TARGET][0], str)
@pytest.mark.skipif(os.name == "nt", reason="Huggingface timing out on Windows")
@pytest.mark.skipif(not _TEXT_TESTING, reason="text libraries aren't installed.")
def test_from_files(tmpdir):
csv_path = csv_data(tmpdir)
dm = TranslationData.from_csv(
"input",
"target",
train_file=csv_path,
val_file=csv_path,
test_file=csv_path,
batch_size=1,
)
batch = next(iter(dm.val_dataloader()))
assert isinstance(batch[DataKeys.INPUT][0], str)
assert isinstance(batch[DataKeys.TARGET][0], str)
batch = next(iter(dm.test_dataloader()))
assert isinstance(batch[DataKeys.INPUT][0], str)
assert isinstance(batch[DataKeys.TARGET][0], str)
@pytest.mark.skipif(os.name == "nt", reason="Huggingface timing out on Windows")
@pytest.mark.skipif(not _TEXT_TESTING, reason="text libraries aren't installed.")
def test_from_json(tmpdir):
json_path = json_data(tmpdir)
dm = TranslationData.from_json(
"input",
"target",
train_file=json_path,
batch_size=1,
)
batch = next(iter(dm.train_dataloader()))
assert isinstance(batch[DataKeys.INPUT][0], str)
assert isinstance(batch[DataKeys.TARGET][0], str)
@pytest.mark.skipif(os.name == "nt", reason="Huggingface timing out on Windows")
@pytest.mark.skipif(not _TEXT_TESTING, reason="text libraries aren't installed.")
def test_from_json_with_field(tmpdir):
json_path = json_data_with_field(tmpdir)
dm = TranslationData.from_json(
"input",
"target",
train_file=json_path,
batch_size=1,
field="data",
)
batch = next(iter(dm.train_dataloader()))
assert isinstance(batch[DataKeys.INPUT][0], str)
assert isinstance(batch[DataKeys.TARGET][0], str)
|
py | 1a361d16948fc4c8835a0db3844efde96c467dbd | from osbot_aws.helpers.Test_Helper import Test_Helper
from osbot_elastic.Elastic_Search import Elastic_Search
from osbot_utils.utils.Dev import Dev
class Test_Elastic_Search(Test_Helper):
def setUp(self):
self.index = 'test-index'
self.secret_id = 'gw-elastic-server-1'
self.elastic = Elastic_Search(self.index, self.secret_id)
self.result = None
def tearDown(self) -> None:
if self.result is not None:
Dev.pprint(self.result)
def test_create_index(self):
self.elastic.create_index()._result
self.elastic.add({'answer':42})
self.result = self.elastic.create_index_pattern()._result
#self.elastic.index = 'test-index*'
#self.elastic.delete_index_pattern()
#self.elastic.delete_index()
self.result = self.elastic._result
def test_info(self):
info = self.elastic.es.info()
assert info['tagline'] == 'You Know, for Search'
list(set(info)) == ['version', 'tagline', 'cluster_name', 'cluster_uuid', 'name']
def test_test_info(self):
assert '.apm-agent-configuration' in self.elastic.index_list()
def test_add_data_with_timestamp(self):
data = { 'answer' : 42}
response = self.elastic.add_data_with_timestamp(data)
assert response.get("_index") == self.elastic.index
def test_index_list(self):
self.result = self.elastic.index_list()
def test_search_using_query(self):
term = 'jira'
query = {"query": { "wildcard": { "Summary": term}}}
result = list(self.elastic.search_using_query(query))
assert term in result.pop(0)['Summary'].lower()
assert len(result) > 20
def test_test_search_using_query___large_query(self):
query = {"_source": ["Key", "Issue Links"], }
result = list(self.elastic.set_index('sec_project').search_using_query(query))
Dev.pprint(len(result))
def test_get_data_between_dates(self):
results = self.elastic.get_data_between_dates("Created", "now-1d", "now")
Dev.pprint(len(results))
for issue in results:
print(issue.get('Summary'))
def test_index_list(self):
assert 'jira' in self.elastic.index_list()
def test_search_using_lucene(self):
#query = "Summary:jira"
query = 'Project:RISK AND Status:"Fixed"'
self.index = "*"
results = list(self.elastic.search_using_lucene(query))
#for issue in results:
# print('{0:10} {1:10} {2:20} {3}'.format(issue.get('Key'), issue.get('Project'),issue.get('Status'),issue.get('Summary')))
assert len(results) > 100
def test_search_using_lucene____Issue_with_Epic(self):
self.elastic.index = 'it_assets'
query = '"GSOKR-924"'
results = list(self.elastic.search_using_lucene(query))
assert len(results) == 25 |
py | 1a361da5ccb3260d87301aa7fe593936ae7cae71 | from flask import Flask
import pytest
import os
import importlib
import sys
import traceback
MODULE_NAMES = ['numpy']
modules = {}
for m in MODULE_NAMES:
try:
modules[m] = importlib.import_module(m)
except ImportError:
modules[m] = None
app = Flask(__name__)
@app.route('/<module_name>')
def in_module_tests(module_name):
if module_name not in modules:
return "This module is not listed"
try:
result = modules[module_name].test()
num_failures = result.failures
result_string = "{}: number of failures={}".format(module_name, len(num_failures))
except (NameError, ImportError, AttributeError):
result_string = "{}: Error running test!".format(module_name)
return result_string
@app.route('/all')
def run_all():
results = "<br>\n".join([in_module_tests(m) for m in MODULE_NAMES])
return str(results)
def module_version(module_name):
m = modules[module_name]
if m is None:
version_string = "{}: unable to import".format(module_name)
else:
version_string = "{}: {}".format(module_name, m.__version__)
return version_string
@app.route('/')
def root():
versions = "<br>\n".join([module_version(m) for m in MODULE_NAMES])
python_version = "\npython-version%s\n" % sys.version
r = """<br><br>
Imports Successful!<br>
To test each module go to /numpy
or test all at /all.<br>
Test suites can take up to 10 minutes to run, main output is in app logs."""
return python_version + versions + r
if __name__ == '__main__':
try:
port = int(os.getenv("PORT", 8080))
app.run(host='0.0.0.0', port=port, debug=True)
except Exception as e:
traceback.print_exc()
raise e
|
py | 1a361e2958ddcccd24c567e8cfe3f28446760c2b | import pyopencl as cl
import numpy
# Create context and command queue
platform = cl.get_platforms()[0]
devices = platform.get_devices()
context = cl.Context(devices)
queue = cl.CommandQueue(context)
# Open program file and build
program_file = open('mult.cl', 'r')
program_text = program_file.read()
program = cl.Program(context, program_text)
try:
program.build()
except:
print("Build log:")
print(program.get_build_info(devices[0],
cl.program_build_info.LOG))
raise
# Create arguments for kernel: a scalar, a LocalMemory object, and a buffer
scalar = numpy.float32(5.0)
lm = cl.LocalMemory(100 * 32)
float_data = numpy.linspace(1, 100, 100).astype(numpy.float32)
float_buffer = cl.Buffer(context,
cl.mem_flags.READ_WRITE | cl.mem_flags.COPY_HOST_PTR,
hostbuf=float_data)
# Create, configure, and execute kernel (Seems too easy, doesn't it?)
program.mult(queue, (25,), (25,), scalar, float_buffer, lm)
# Read data back from buffer
cl.enqueue_read_buffer(queue, float_buffer, float_data).wait()
print float_data
|
py | 1a361efd431be5f818cfffb05705446d0021b376 | from django.forms import models as model_forms
from django.core.exceptions import ImproperlyConfigured
from django.http import HttpResponseRedirect
from django.views.generic.base import TemplateResponseMixin, View
from django.views.generic.detail import (SingleObjectMixin,
SingleObjectTemplateResponseMixin, BaseDetailView)
class FormMixin(object):
"""
A mixin that provides a way to show and handle a form in a request.
"""
initial = {}
form_class = None
success_url = None
def get_initial(self):
"""
Returns the initial data to use for forms on this view.
"""
return self.initial
def get_form_class(self):
"""
Returns the form class to use in this view
"""
return self.form_class
def get_form(self, form_class):
"""
Returns an instance of the form to be used in this view.
"""
if self.request.method in ('POST', 'PUT'):
return form_class(
self.request.POST,
self.request.FILES,
initial=self.get_initial()
)
else:
return form_class(
initial=self.get_initial()
)
def get_context_data(self, **kwargs):
return kwargs
def get_success_url(self):
if self.success_url:
url = self.success_url
else:
raise ImproperlyConfigured(
"No URL to redirect to. Provide a success_url.")
return url
def form_valid(self, form):
return HttpResponseRedirect(self.get_success_url())
def form_invalid(self, form):
return self.render_to_response(self.get_context_data(form=form))
class ModelFormMixin(FormMixin, SingleObjectMixin):
"""
A mixin that provides a way to show and handle a modelform in a request.
"""
def get_form_class(self):
"""
Returns the form class to use in this view
"""
if self.form_class:
return self.form_class
else:
if self.model is None:
model = self.queryset.model
else:
model = self.model
return model_forms.modelform_factory(model)
def get_form(self, form_class):
"""
Returns a form instantiated with the model instance from get_object().
"""
if self.request.method in ('POST', 'PUT'):
return form_class(
self.request.POST,
self.request.FILES,
initial=self.get_initial(),
instance=self.object,
)
else:
return form_class(
initial=self.get_initial(),
instance=self.object,
)
def get_success_url(self):
if self.success_url:
url = self.success_url
else:
try:
url = self.object.get_absolute_url()
except AttributeError:
raise ImproperlyConfigured(
"No URL to redirect to. Either provide a url or define"
" a get_absolute_url method on the Model.")
return url
def form_valid(self, form):
self.object = form.save()
return super(ModelFormMixin, self).form_valid(form)
def form_invalid(self, form):
return self.render_to_response(self.get_context_data(form=form))
def get_context_data(self, **kwargs):
context = kwargs
if self.object:
context['object'] = self.object
context_object_name = self.get_context_object_name(self.object)
if context_object_name:
context[context_object_name] = self.object
return context
class ProcessFormView(View):
"""
A mixin that processes a form on POST.
"""
def get(self, request, *args, **kwargs):
form_class = self.get_form_class()
form = self.get_form(form_class)
return self.render_to_response(self.get_context_data(form=form))
def post(self, request, *args, **kwargs):
form_class = self.get_form_class()
form = self.get_form(form_class)
if form.is_valid():
return self.form_valid(form)
else:
return self.form_invalid(form)
# PUT is a valid HTTP verb for creating (with a known URL) or editing an
# object, note that browsers only support POST for now.
put = post
class BaseFormView(FormMixin, ProcessFormView):
"""
A base view for displaying a form
"""
class FormView(TemplateResponseMixin, BaseFormView):
"""
A view for displaying a form, and rendering a template response.
"""
class BaseCreateView(ModelFormMixin, ProcessFormView):
"""
Base view for creating an new object instance.
Using this base class requires subclassing to provide a response mixin.
"""
def get(self, request, *args, **kwargs):
self.object = None
return super(BaseCreateView, self).get(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
self.object = None
return super(BaseCreateView, self).post(request, *args, **kwargs)
# PUT is a valid HTTP verb for creating (with a known URL) or editing an
# object, note that browsers only support POST for now.
put = post
class CreateView(SingleObjectTemplateResponseMixin, BaseCreateView):
"""
View for creating an new object instance,
with a response rendered by template.
"""
template_name_suffix = '_form'
class BaseUpdateView(ModelFormMixin, ProcessFormView):
"""
Base view for updating an existing object.
Using this base class requires subclassing to provide a response mixin.
"""
def get(self, request, *args, **kwargs):
self.object = self.get_object()
return super(BaseUpdateView, self).get(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
self.object = self.get_object()
return super(BaseUpdateView, self).post(request, *args, **kwargs)
# PUT is a valid HTTP verb for creating (with a known URL) or editing an
# object, note that browsers only support POST for now.
put = post
class UpdateView(SingleObjectTemplateResponseMixin, BaseUpdateView):
"""
View for updating an object,
with a response rendered by template..
"""
template_name_suffix = '_form'
class DeletionMixin(object):
"""
A mixin providing the ability to delete objects
"""
success_url = None
def delete(self, request, *args, **kwargs):
self.object = self.get_object()
self.object.delete()
return HttpResponseRedirect(self.get_success_url())
# Add support for browsers which only accept GET and POST for now.
post = delete
def get_success_url(self):
if self.success_url:
return self.success_url
else:
raise ImproperlyConfigured(
"No URL to redirect to. Provide a success_url.")
class BaseDeleteView(DeletionMixin, BaseDetailView):
"""
Base view for deleting an object.
Using this base class requires subclassing to provide a response mixin.
"""
class DeleteView(SingleObjectTemplateResponseMixin, BaseDeleteView):
"""
View for deleting an object retrieved with `self.get_object()`,
with a response rendered by template.
"""
template_name_suffix = '_confirm_delete'
|
py | 1a361fd5bdd9cbdbf4ee82fa0c5fd5645b920a9e | import glob
import inspect
import os
import re
import sys
import textwrap
from functools import partial, wraps
from unittest.mock import MagicMock
import pytest
from toolz import curry
from prefect import Task, task
from prefect.engine.state import State
from prefect.utilities.tasks import defaults_from_attrs
try:
from generate_docs import (
load_outline,
create_absolute_path,
format_code,
format_doc,
format_lists,
format_signature,
format_subheader,
get_call_signature,
get_class_methods,
patch_imports,
build_example,
VALID_DOCSTRING_SECTIONS,
)
with patch_imports():
OUTLINE = load_outline()
except ImportError:
pytestmark = pytest.skip(
"Documentation requirements not installed.", allow_module_level=True
)
pytest.mark.skipif(sys.version_info < (3, 6))
ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
def consistency_check(obj, obj_name):
patt = re.compile(r"(?<=>`)(.*?)(?=[\(|`:])")
doc = format_doc(obj)
try:
arg_list_index = doc.index("**Args**:")
end = doc[arg_list_index:].find("</ul")
arg_doc = doc[arg_list_index : (arg_list_index + end)]
doc_args = {arg.strip() for arg in patt.findall(arg_doc)}
except ValueError:
doc_args = set()
items = get_call_signature(obj)
actual_args = {(a if isinstance(a, str) else a[0]) for a in items}
undocumented = {
a for a in actual_args.difference(doc_args) if not a.startswith("_")
}
# If the sig contains **kwargs, any keyword is valid
if any(k.startswith("**") for k in actual_args):
non_existent = {}
else:
non_existent = {
a for a in doc_args.difference(actual_args) if not a.startswith("_")
}
if undocumented:
undoc_args = ", ".join(undocumented)
raise ValueError(
f"{obj_name} has arguments without documentation: {undoc_args}"
)
elif non_existent:
undoc_args = ", ".join(non_existent)
raise ValueError(
f"{obj_name} has documentation for arguments that aren't real: {undoc_args}"
)
def no_args():
pass
def one_arg(x):
pass
def one_string_kwarg(k="key"):
pass
def standard_sig(x, y, k="key", q=None, b=True):
pass
def varargs_no_default(*args, iso, **kwargs):
pass
def varargs_with_default(*args, iso=None, **kwargs):
pass
class CustomException(Exception):
"""
Docstring.
Args:
- x (Any, optional): Just a placeholder
"""
def __init__(self, x):
self.x = x
super().__init__()
class NamedException(Exception):
"""
Just a name, nothing more.
"""
class A:
"""
A class called "A".
Args:
- attr (str): meaningless
- keep (bool, optional): whatever, defaults to `True`
Raises:
- TypeError: if you don't provide `attr`
"""
def __init__(self, attr, keep=True):
pass
def run(self, *args, b=True, **kwargs):
pass
def y(self, *args, b, **kwargs):
pass
@classmethod
def from_nothing(cls, stuff=None):
pass
class MyTask(Task):
@defaults_from_attrs("y", "z")
def run(self, x, y=None, z=None):
return x, y, z
code = """
from prefect import task, Task, Flow
import random
@task
def random_number():
return random.randint(0, 100)
@task
def plus_one(x):
return x + 1
with Flow('My Functional Flow') as flow:
r = random_number()
y = plus_one(x=r)
"""
def test_tokenizer():
tokenized = format_code(code)
assert '<span class="token decorator">@task</span>' in tokenized
@pytest.mark.parametrize(
"obj,exp",
[
(no_args, ""),
(one_arg, "x"),
(one_string_kwarg, "k="key""),
(standard_sig, "x, y, k="key", q=None, b=True"),
(varargs_with_default, "*args, iso=None, **kwargs"),
(varargs_no_default, "*args, iso, **kwargs"),
(A, "attr, keep=True"),
(A.run, "*args, b=True, **kwargs"),
(A.y, "*args, b, **kwargs"),
(A.from_nothing, "stuff=None"),
(CustomException, "x"),
(NamedException, "*args, **kwargs"),
(MyTask.run, "x, y=None, z=None"),
],
)
def test_format_signature(obj, exp):
assert format_signature(obj) == exp
@pytest.mark.parametrize(
"obj,exp",
[
(no_args, ""),
(one_arg, "x"),
(one_string_kwarg, "k="key""),
(standard_sig, "x, y, k="key", q=None, b=True"),
(varargs_with_default, "*args, iso=None, **kwargs"),
(varargs_no_default, "*args, iso, **kwargs"),
(A.run, "*args, b=True, **kwargs"),
(A.y, "*args, b, **kwargs"),
(MyTask.run, "x, y=None, z=None"),
],
)
def test_format_signature_with_curry(obj, exp):
assert format_signature(curry(obj)) == exp
@pytest.mark.parametrize(
"obj,exp",
[(one_string_kwarg, "k=42"), (standard_sig, "x, y, k=42, q=None, b=True")],
)
def test_format_signature_with_partial(obj, exp):
new_func = partial(obj, k=42)
assert format_signature(new_func) == exp
@pytest.mark.parametrize(
"obj,exp",
[
(no_args, ""),
(one_arg, "x"),
(one_string_kwarg, "k="key""),
(standard_sig, "x, y, k="key", q=None, b=True"),
(varargs_with_default, "*args, iso=None, **kwargs"),
(varargs_no_default, "*args, iso, **kwargs"),
(A.run, "*args, b=True, **kwargs"),
(A.y, "*args, b, **kwargs"),
],
)
def test_format_signature_with_wraps(obj, exp):
@wraps(obj)
def new_func(*args, **kwargs):
return obj(*args, **kwargs)
assert format_signature(new_func) == exp
@pytest.mark.parametrize(
"obj,exp",
[(task, "prefect.utilities.tasks.task"), (State, "prefect.engine.state.State")],
)
def test_create_absolute_path_on_prefect_object(obj, exp):
path = create_absolute_path(obj)
assert path == exp
@pytest.mark.parametrize("obj,exp", [(A, "A"), (A.run, "A.run"), (no_args, "no_args")])
def test_create_absolute_path_on_nonprefect_object(obj, exp):
path = create_absolute_path(obj)
assert path == exp
def test_format_subheader_on_class():
doc = format_subheader(A)
assert doc == (
" ## A\n"
" <div class='class-sig' id='a'>"
'<p class="prefect-sig">class </p><p class="prefect-class">A</p>(attr, keep=True)<span class="source">[source]</span></div>\n\n'
)
def test_format_list_on_normal_doc():
doc = """
Does a thing.
Args:
- x (bool): it's x
- y (bool): it's y
Returns:
- whatever you want
Raises:
- NotImplementedError: because it doesnt exist
References:
- Example: https://example.com
"""
formatted_doc = format_lists(doc)
assert formatted_doc == (
"\n Does a thing.\n\n Args:\n "
'<ul class="args">'
'<li class="args">'
"`x (bool)`: it's x\n </li>"
'<li class="args">'
"`y (bool)`: it's y</li></ul>\n "
'Returns:\n <ul class="args">'
'<li class="args">whatever you want</li></ul>'
'\n\n Raises:\n <ul class="args">'
'<li class="args">`NotImplementedError`: because it doesnt exist</li></ul>\n '
'References:\n <ul class="args">'
'<li class="args">`Example`: https://example.com\n </li></ul>'
""
)
def test_format_doc_on_simple_doc():
def my_fun():
"""
Indicates that a task should not run and wait for manual execution.
Args:
- message (Any, optional): Defaults to `None`. A message about the signal.
"""
pass
formatted = format_doc(my_fun)
assert formatted == (
"Indicates that a task should not run and wait for manual execution.\n\n"
'**Args**: <ul class="args">'
'<li class="args">'
"`message (Any, optional)`: Defaults to `None`. A message about the signal.</li></ul>"
)
def test_format_doc_on_subclass_with_doc_but_inherited_init():
class Parent:
"""
This is the parent doc
Args:
- x (int): a number
"""
def __init__(self, x: int):
pass
def fn(self):
pass
class Child(Parent):
"""
This is the child doc
"""
def fn(self):
pass
doc = format_doc(Child)
expected = textwrap.dedent(
"""
This is the child doc
"""
).strip()
assert doc == expected
def test_format_doc_on_raw_exception():
formatted = format_doc(NamedException)
expected = textwrap.dedent(
"""
Just a name, nothing more.
"""
).strip()
assert formatted == expected
@pytest.mark.parametrize(
"fn", [fn for page in OUTLINE for fn in page.get("functions", [])]
)
def test_consistency_of_function_docs(fn):
consistency_check(fn, f"{fn.__name__}")
@pytest.mark.parametrize(
"obj", [obj for page in OUTLINE for obj, _ in page.get("classes", [])]
)
def test_consistency_of_class_docs(obj):
if isinstance(obj, MagicMock):
pytest.skip("Mocked classes from optional requirements cannot be checked")
consistency_check(obj, f"{obj.__module__}.{obj.__name__}")
@pytest.mark.parametrize(
"obj,fn",
[
(obj, fn)
for page in OUTLINE
for obj, methods in page.get("classes", [])
for fn in get_class_methods(obj, methods)
],
) # parametrized like this for easy reading of tests
def test_consistency_of_class_method_docs(obj, fn):
consistency_check(fn, f"{obj.__module__}.{obj.__name__}.{fn.__name__}")
def test_format_doc_removes_unnecessary_newlines_when_appropriate_in_tables():
def doc_fun():
"""
I am a
poorly formatte
d doc string.
Args:
- x (optional): actually not
really here
I talk too much.
Raises:
- TypeError: why not
Example:
```python
## TODO:
## put some
## code here
```
"""
pass
res = format_doc(doc_fun, in_table=True)
sub_string = (
'<p class="methods">I am a poorly formatte d doc string.<br><br>**Args**:'
)
assert sub_string in res
assert "<br>**Raises**:" in res
def test_format_doc_correctly_handles_code_blocks_outside_of_tables():
def doc_fun():
"""
A `dict` that also supports attribute ("dot") access. Think of this as an extension
to the standard python `dict` object.
Args:
- init_dict (dict, optional): dictionary to initialize the `DotDict`
with
- **kwargs (optional): key, value pairs with which to initialize the
`DotDict`
**Example**:
```python
dotdict = DotDict({'a': 34}, b=56, c=set())
dotdict.a # 34
dotdict['b'] # 56
dotdict.c # set()
```
"""
pass
res = format_doc(doc_fun)
sub_string = (
"**Example**: \n```python\n dotdict = DotDict({'a': 34},"
" b=56, c=set())\n dotdict.a # 34\n dotdict['b'] # 56\n dotdict.c # set()\n\n```"
)
assert sub_string in res
def test_format_doc_escapes_asteriks_inside_tables():
def my_doc():
"""
See:
```python
my_doc(**kwargs)
```
"""
pass
res = format_doc(my_doc, in_table=True)
assert res.count(r">\*<") == 2
all_objects = []
for page in OUTLINE:
all_objects.extend(page.get("functions", []))
for cls, methods in page.get("classes"):
all_objects.append(cls)
all_objects.extend(get_class_methods(cls, methods))
@pytest.mark.parametrize("obj", all_objects)
def test_section_headers_are_properly_formatted(obj):
doc = inspect.getdoc(obj)
if not doc:
return
for section in VALID_DOCSTRING_SECTIONS:
if re.search(f"^\\s*{section}\\s*$", doc, flags=re.M):
assert (
False
), f"{obj.__module__}.{obj.__qualname__} has a poorly formatted '{section}' header"
@pytest.mark.parametrize("path", glob.glob(os.path.join(ROOT, "examples", "*.py")))
def test_example(path):
rendered, flows = build_example(path)
for f in flows.keys():
# Assert there is a serialized Flow in storage
assert len(flows[f]["storage"]["flows"]) > 0
|
py | 1a361ff8af27499c33a3d6bb6530f68216382bb6 | from blueman.Functions import *
import gettext
from blueman.plugins.AppletPlugin import AppletPlugin
from blueman.main.SignalTracker import SignalTracker
from gi.repository import GObject
from gi.repository import Gtk
class DiscvManager(AppletPlugin):
__depends__ = ["Menu"]
__author__ = "Walmis"
__icon__ = "gtk-find"
__description__ = _(
"Provides a menu item for making the default adapter temporarily visible when it is set to hidden by default")
__options__ = {
"time": {
"type": int,
"default": 60,
"name": _("Discoverable timeout"),
"desc": _("Amount of time in seconds discoverable mode will last"),
"range": (60, 600)
}
}
def on_load(self, applet):
self.Signals = SignalTracker()
self.item = create_menuitem(_("_Make Discoverable"), get_icon("gtk-find", 16))
applet.Plugins.Menu.Register(self, self.item, 20, False)
self.Applet = applet
self.adapter = None
self.time_left = -1
self.Signals.Handle(self.item, "activate", self.on_set_discoverable)
self.item.props.tooltip_text = _("Make the default adapter temporarily visible")
self.timeout = None
def on_unload(self):
self.Applet.Plugins.Menu.Unregister(self)
del self.item
if self.timeout:
GObject.source_remove(self.timeout)
self.Signals.DisconnectAll()
def on_manager_state_changed(self, state):
if state:
self.init_adapter()
self.update_menuitems()
else:
self.Signals.Disconnect(0)
self.adapter = None
self.update_menuitems()
def on_update(self):
self.time_left -= 1
self.item.get_child().props.label = _("Discoverable... %ss") % self.time_left
self.item.props.sensitive = False
return True
def on_set_discoverable(self, item):
if self.adapter:
self.adapter.set("Discoverable", True)
self.adapter.set("DiscoverableTimeout", self.get_option("time"))
def init_adapter(self):
try:
self.adapter = self.Applet.Manager.get_adapter()
except:
self.adapter = None
def on_adapter_removed(self, path):
dprint(path)
if path == self.adapter.get_object_path():
self.init_adapter()
self.update_menuitems()
def on_adapter_property_changed(self, path, key, value):
if self.adapter and path == self.adapter.get_object_path():
dprint("prop", key, value)
if key == "DiscoverableTimeout":
if value == 0: #always visible
if self.timeout != None:
GObject.source_remove(self.timeout)
self.time_left = -1
self.timeout = None
else:
if self.time_left > -1:
if self.timeout != None:
GObject.source_remove(self.timeout)
self.time_left = value
self.timeout = GObject.timeout_add(1000, self.on_update)
return
elif (key == "Discoverable" and not value) or (key == "Powered" and not value):
dprint("Stop")
if self.timeout != None:
GObject.source_remove(self.timeout)
self.time_left = -1
self.timeout = None
self.update_menuitems()
def update_menuitems(self):
try:
props = self.adapter.get_properties()
except Exception as e:
dprint("warning: Adapter is None")
self.item.props.visible = False
else:
if (not props["Discoverable"] or props["DiscoverableTimeout"] > 0) and props["Powered"]:
self.item.props.visible = True
self.item.get_child().props.label = _("_Make Discoverable")
self.item.props.sensitive = True
else:
self.item.props.visible = False
|
py | 1a36208df4a675bd5d3da5c313d89079f395e370 | #!/usr/bin/env python
# *****************************************************************
# (C) Copyright IBM Corp. 2021. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# *****************************************************************
import sys
import os
import pathlib
sys.path.append(os.path.join(pathlib.Path(__file__).parent.absolute(), '..'))
import open_ce.utils as utils # pylint: disable=wrong-import-position
import open_ce.inputs as inputs # pylint: disable=wrong-import-position
from common import get_configs, make_parser, check_recipes
def main(arg_strings=None):
'''
Entry function.
'''
parser = make_parser()
args = inputs.parse_args(parser, arg_strings)
variants = utils.make_variants(args.python_versions, args.build_types, args.mpi_types, args.cuda_versions)
check_result = True
for variant in variants:
main_build_config_data, main_config = get_configs(variant, args.conda_build_configs)
if main_build_config_data["recipes"] is None:
continue
if not check_recipes(main_build_config_data, main_config, variant):
check_result = False
print("Recipe validation failed for variant '{}'.".format(variant))
assert check_result, "All recipes must be valid."
if __name__ == '__main__':
try:
main()
print("RECIPE VALIDATION SUCCESS")
except Exception as exc: # pylint: disable=broad-except
print("RECIPE VALIDATION ERROR: ", exc)
sys.exit(1)
|
py | 1a36222f3b917add03a779d89df9a37c1b2cd4a6 | import functools
import operator
import os
from collections import OrderedDict
from datetime import date, datetime, time
from operator import methodcaller
import numpy as np
import pandas as pd
import pytest
import toolz
import ibis
import ibis.common.exceptions as com
import ibis.expr.analysis as L
import ibis.expr.api as api
import ibis.expr.datatypes as dt
import ibis.expr.operations as ops
import ibis.expr.rules as rlz
import ibis.expr.types as ir
from ibis import literal
from ibis.common.exceptions import IbisTypeError
from ibis.expr.signature import Argument as Arg
from ibis.tests.util import assert_equal
def test_null():
expr = ibis.literal(None)
assert isinstance(expr, ir.NullScalar)
assert isinstance(expr.op(), ops.NullLiteral)
assert expr._arg.value is None
expr2 = ibis.null()
assert_equal(expr, expr2)
assert expr is expr2
assert expr.type() is dt.null
assert expr2.type() is dt.null
@pytest.mark.xfail(
raises=AssertionError,
reason='UTF-8 support in Impala non-existent at the moment?',
)
def test_unicode():
assert False
@pytest.mark.parametrize(
['value', 'expected_type'],
[
(5, 'int8'),
(127, 'int8'),
(128, 'int16'),
(32767, 'int16'),
(32768, 'int32'),
(2147483647, 'int32'),
(2147483648, 'int64'),
(-5, 'int8'),
(-128, 'int8'),
(-129, 'int16'),
(-32769, 'int32'),
(-2147483649, 'int64'),
(1.5, 'double'),
('foo', 'string'),
([1, 2, 3], 'array<int8>'),
],
)
def test_literal_with_implicit_type(value, expected_type):
expr = ibis.literal(value)
assert isinstance(expr, ir.ScalarExpr)
assert expr.type() == dt.dtype(expected_type)
assert isinstance(expr.op(), ops.Literal)
assert expr.op().value is value
pointA = (1, 2)
pointB = (-3, 4)
pointC = (5, 19)
lineAB = [pointA, pointB]
lineBC = [pointB, pointC]
lineCA = [pointC, pointA]
polygon1 = [lineAB, lineBC, lineCA]
polygon2 = [lineAB, lineBC, lineCA]
multilinestring = [lineAB, lineBC, lineCA]
multipoint = [pointA, pointB, pointC]
multipolygon1 = [polygon1, polygon2]
@pytest.mark.parametrize(
['value', 'expected_type'],
[
(5, 'int16'),
(127, 'double'),
(128, 'int64'),
(32767, 'double'),
(32768, 'float'),
(2147483647, 'int64'),
(-5, 'int16'),
(-128, 'int32'),
(-129, 'int64'),
(-32769, 'float'),
(-2147483649, 'double'),
(1.5, 'double'),
('foo', 'string'),
(list(pointA), 'point'),
(tuple(pointA), 'point'),
(list(lineAB), 'linestring'),
(tuple(lineAB), 'linestring'),
(list(polygon1), 'polygon'),
(tuple(polygon1), 'polygon'),
(list(multilinestring), 'multilinestring'),
(tuple(multilinestring), 'multilinestring'),
(list(multipoint), 'multipoint'),
(tuple(multipoint), 'multipoint'),
(list(multipolygon1), 'multipolygon'),
(tuple(multipolygon1), 'multipolygon'),
],
)
def test_literal_with_explicit_type(value, expected_type):
expr = ibis.literal(value, type=expected_type)
assert expr.type().equals(dt.validate_type(expected_type))
@pytest.mark.parametrize(
['value', 'expected_type', 'expected_class'],
[
(list('abc'), 'array<string>', ir.ArrayScalar),
([1, 2, 3], 'array<int8>', ir.ArrayScalar),
({'a': 1, 'b': 2, 'c': 3}, 'map<string, int8>', ir.MapScalar),
({1: 2, 3: 4, 5: 6}, 'map<int8, int8>', ir.MapScalar),
(
{'a': [1.0, 2.0], 'b': [], 'c': [3.0]},
'map<string, array<double>>',
ir.MapScalar,
),
(
OrderedDict(
[
('a', 1),
('b', list('abc')),
('c', OrderedDict([('foo', [1.0, 2.0])])),
]
),
'struct<a: int8, b: array<string>, c: struct<foo: array<double>>>',
ir.StructScalar,
),
],
)
def test_literal_complex_types(value, expected_type, expected_class):
expr = ibis.literal(value)
expr_type = expr.type()
assert expr_type.equals(dt.validate_type(expected_type))
assert isinstance(expr, expected_class)
assert isinstance(expr.op(), ops.Literal)
assert expr.op().value is value
def test_simple_map_operations():
value = {'a': [1.0, 2.0], 'b': [], 'c': [3.0]}
value2 = {'a': [1.0, 2.0], 'c': [3.0], 'd': [4.0, 5.0]}
expr = ibis.literal(value)
expr2 = ibis.literal(value2)
assert isinstance(expr, ir.MapValue)
assert isinstance(expr.length().op(), ops.MapLength)
assert isinstance((expr + expr2).op(), ops.MapConcat)
assert isinstance((expr2 + expr).op(), ops.MapConcat)
default = ibis.literal([0.0])
assert isinstance(expr.get('d', default).op(), ops.MapValueOrDefaultForKey)
# test for an invalid default type, nulls are ok
with pytest.raises(IbisTypeError):
expr.get('d', ibis.literal('foo'))
assert isinstance(
expr.get('d', ibis.literal(None)).op(), ops.MapValueOrDefaultForKey
)
assert isinstance(expr['b'].op(), ops.MapValueForKey)
assert isinstance(expr.keys().op(), ops.MapKeys)
assert isinstance(expr.values().op(), ops.MapValues)
@pytest.mark.parametrize(
['value', 'expected_type'],
[
(32767, 'int8'),
(32768, 'int16'),
(2147483647, 'int16'),
(2147483648, 'int32'),
('foo', 'double'),
],
)
def test_literal_with_non_coercible_type(value, expected_type):
expected_msg = 'Value .* cannot be safely coerced to .*'
with pytest.raises(TypeError, match=expected_msg):
ibis.literal(value, type=expected_type)
def test_non_inferrable_literal():
expected_msg = (
'The datatype of value .* cannot be inferred, try '
'passing it explicitly with the `type` keyword.'
)
value = tuple(pointA)
with pytest.raises(TypeError, match=expected_msg):
ibis.literal(value)
point = ibis.literal(value, type='point')
assert point.type() == dt.point
def test_literal_list():
what = [1, 2, 1000]
expr = api.literal(what)
assert isinstance(expr, ir.ArrayScalar)
# it works!
repr(expr)
def test_literal_array():
what = []
expr = api.literal(what)
assert isinstance(expr, ir.ArrayValue)
assert expr.type().equals(dt.Array(dt.null))
def test_mixed_arity(table):
what = ["bar", table.g, "foo"]
expr = api.as_value_expr(what)
values = expr.op().values
assert isinstance(values[1], ir.StringColumn)
# it works!
repr(expr)
@pytest.mark.parametrize('container', [list, tuple, set, frozenset])
def test_isin_notin_list(table, container):
values = container([1, 2, 3, 4])
expr = table.a.isin(values)
not_expr = table.a.notin(values)
assert isinstance(expr, ir.BooleanColumn)
assert isinstance(expr.op(), ops.Contains)
assert isinstance(not_expr, ir.BooleanColumn)
assert isinstance(not_expr.op(), ops.NotContains)
def test_value_counts(table, string_col):
bool_clause = table[string_col].notin(['1', '4', '7'])
expr = table[bool_clause][string_col].value_counts()
assert isinstance(expr, ir.TableExpr)
@pytest.mark.xfail(raises=AssertionError, reason='NYT')
def test_isin_not_comparable():
assert False
@pytest.mark.xfail(raises=AssertionError, reason='NYT')
def test_isin_array_expr():
assert False
@pytest.mark.xfail(raises=AssertionError, reason='NYT')
def test_isin_invalid_cases():
# For example, array expression in a list of values, where the inner
# array values originate from some other table
assert False
def test_isin_notin_scalars():
a, b, c = [ibis.literal(x) for x in [1, 1, 2]]
result = a.isin([1, 2])
assert isinstance(result, ir.BooleanScalar)
result = a.notin([b, c, 3])
assert isinstance(result, ir.BooleanScalar)
@pytest.mark.xfail(raises=AssertionError, reason='NYT')
def test_isin_null():
assert False
@pytest.mark.xfail(raises=AssertionError, reason='NYT')
def test_negate_isin():
# Should yield a NotContains
assert False
def test_scalar_isin_list_with_array(table):
val = ibis.literal(2)
options = [table.a, table.b, table.c]
expr = val.isin(options)
assert isinstance(expr, ir.BooleanColumn)
not_expr = val.notin(options)
assert isinstance(not_expr, ir.BooleanColumn)
def test_distinct_basic(functional_alltypes):
expr = functional_alltypes.distinct()
assert isinstance(expr.op(), ops.Distinct)
assert isinstance(expr, ir.TableExpr)
assert expr.op().table is functional_alltypes
expr = functional_alltypes.string_col.distinct()
assert isinstance(expr.op(), ops.DistinctColumn)
assert isinstance(expr, ir.StringColumn)
@pytest.mark.xfail(reason='NYT')
def test_distinct_array_interactions(functional_alltypes):
# array cardinalities / shapes are likely to be different.
a = functional_alltypes.int_col.distinct()
b = functional_alltypes.bigint_col
with pytest.raises(ir.RelationError):
a + b
@pytest.mark.parametrize('where', [lambda t: None, lambda t: t.int_col != 0])
def test_distinct_count(functional_alltypes, where):
result = functional_alltypes.string_col.distinct().count(
where=where(functional_alltypes)
)
assert isinstance(result.op(), ops.CountDistinct)
expected = functional_alltypes.string_col.nunique(
where=where(functional_alltypes)
).name('count')
assert result.equals(expected)
def test_distinct_unnamed_array_expr():
table = ibis.table(
[('year', 'int32'), ('month', 'int32'), ('day', 'int32')], 'foo'
)
# it works!
expr = (
ibis.literal('-')
.join(
[
table.year.cast('string'),
table.month.cast('string'),
table.day.cast('string'),
]
)
.distinct()
)
repr(expr)
def test_distinct_count_numeric_types(functional_alltypes):
metric = (
functional_alltypes.bigint_col.distinct()
.count()
.name('unique_bigints')
)
functional_alltypes.group_by('string_col').aggregate(metric)
def test_nunique(functional_alltypes):
expr = functional_alltypes.string_col.nunique()
assert isinstance(expr.op(), ops.CountDistinct)
@pytest.mark.xfail(raises=AssertionError, reason='NYT')
def test_project_with_distinct():
assert False
def test_isnull(table):
expr = table['g'].isnull()
assert isinstance(expr, ir.BooleanColumn)
assert isinstance(expr.op(), ops.IsNull)
expr = ibis.literal('foo').isnull()
assert isinstance(expr, ir.BooleanScalar)
assert isinstance(expr.op(), ops.IsNull)
def test_notnull(table):
expr = table['g'].notnull()
assert isinstance(expr, ir.BooleanColumn)
assert isinstance(expr.op(), ops.NotNull)
expr = ibis.literal('foo').notnull()
assert isinstance(expr, ir.BooleanScalar)
assert isinstance(expr.op(), ops.NotNull)
@pytest.mark.parametrize('column', ['e', 'f'], ids=['float', 'double'])
def test_isnan_isinf_column(table, column):
expr = table[column].isnan()
assert isinstance(expr, ir.BooleanColumn)
assert isinstance(expr.op(), ops.IsNan)
expr = table[column].isinf()
assert isinstance(expr, ir.BooleanColumn)
assert isinstance(expr.op(), ops.IsInf)
@pytest.mark.parametrize('value', [1.3, np.nan, np.inf, -np.inf])
def test_isnan_isinf_scalar(value):
expr = ibis.literal(value).isnan()
assert isinstance(expr, ir.BooleanScalar)
assert isinstance(expr.op(), ops.IsNan)
expr = ibis.literal(value).isinf()
assert isinstance(expr, ir.BooleanScalar)
assert isinstance(expr.op(), ops.IsInf)
@pytest.mark.xfail(raises=AssertionError, reason='NYT')
def test_null_literal():
assert False
@pytest.mark.parametrize(
['column', 'operation'],
[
('d', 'cumsum'),
('d', 'cummean'),
('d', 'cummin'),
('d', 'cummax'),
('h', 'cumany'),
('h', 'cumall'),
],
)
def test_cumulative_yield_array_types(table, column, operation):
expr = getattr(getattr(table, column), operation)()
assert isinstance(expr, ir.ColumnExpr)
@pytest.fixture(params=['ln', 'log', 'log2', 'log10'])
def log(request):
return operator.methodcaller(request.param)
@pytest.mark.parametrize('column', list('abcdef'))
def test_log(table, log, column):
result = log(table[column])
assert isinstance(result, ir.FloatingColumn)
# is this what we want?
# assert result.get_name() == c
def test_log_string(table):
g = table.g
with pytest.raises(IbisTypeError):
ops.Log(g, None).to_expr()
@pytest.mark.parametrize('klass', [ops.Ln, ops.Log2, ops.Log10])
def test_log_variants_string(table, klass):
g = table.g
with pytest.raises(IbisTypeError):
klass(g).to_expr()
def test_log_boolean(table, log):
# boolean not implemented for these
h = table['h']
with pytest.raises(IbisTypeError):
log(h)
def test_log_literal(log):
assert isinstance(log(ibis.literal(5)), ir.FloatingScalar)
assert isinstance(log(ibis.literal(5.5)), ir.FloatingScalar)
@pytest.mark.xfail(raises=AssertionError, reason='NYT')
def test_exp():
assert False
@pytest.mark.xfail(raises=AssertionError, reason='NYT')
def test_sqrt():
assert False
@pytest.mark.xfail(raises=AssertionError, reason='NYT')
def test_trig_functions():
assert False
@pytest.mark.xfail(raises=AssertionError, reason='NYT')
def test_round():
assert False
def test_cast_same_type_noop(table):
c = table.g
assert c.cast('string') is c
i = ibis.literal(5)
assert i.cast('int8') is i
@pytest.mark.parametrize('type', ['int8', 'int32', 'double', 'float'])
def test_string_to_number(table, type):
casted = table.g.cast(type)
casted_literal = ibis.literal('5').cast(type).name('bar')
assert isinstance(casted, ir.ColumnExpr)
assert casted.type() == dt.dtype(type)
assert isinstance(casted_literal, ir.ScalarExpr)
assert casted_literal.type() == dt.dtype(type)
assert casted_literal.get_name() == 'bar'
@pytest.mark.parametrize('col', list('abcdefh'))
def test_number_to_string_column(table, col):
casted = table[col].cast('string')
assert isinstance(casted, ir.StringColumn)
def test_number_to_string_scalar():
casted_literal = ibis.literal(5).cast('string').name('bar')
assert isinstance(casted_literal, ir.StringScalar)
assert casted_literal.get_name() == 'bar'
def test_casted_exprs_are_named(table):
expr = table.f.cast('string')
assert expr.get_name() == 'cast(f, string)'
# it works! per GH #396
expr.value_counts()
@pytest.mark.xfail(raises=AssertionError, reason='NYT')
def test_nonzero():
assert False
@pytest.mark.parametrize('col', list('abcdefh'))
def test_negate(table, col):
c = table[col]
result = -c
assert isinstance(result, type(c))
assert isinstance(result.op(), ops.Negate)
def test_negate_boolean_scalar():
result = -(ibis.literal(False))
assert isinstance(result, ir.BooleanScalar)
assert isinstance(result.op(), ops.Negate)
@pytest.mark.xfail(raises=AssertionError, reason='NYT')
def test_isnull_notnull():
assert False
@pytest.mark.parametrize('column', ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h'])
@pytest.mark.parametrize('how', [None, 'first', 'last', 'heavy'])
@pytest.mark.parametrize('condition_fn', [lambda t: None, lambda t: t.a > 8])
def test_arbitrary(table, column, how, condition_fn):
col = table[column]
where = condition_fn(table)
expr = col.arbitrary(how=how, where=where)
assert expr.type() == col.type()
assert isinstance(expr, ir.ScalarExpr)
assert L.is_reduction(expr)
@pytest.mark.parametrize(
['column', 'operation'],
[
('h', lambda column: column.any()),
('h', lambda column: column.notany()),
('h', lambda column: column.all()),
('c', lambda column: (column == 0).any()),
('c', lambda column: (column == 0).all()),
],
)
def test_any_all_notany(table, column, operation):
expr = operation(table[column])
assert isinstance(expr, ir.BooleanScalar)
assert L.is_reduction(expr)
@pytest.mark.parametrize(
'operation',
[
operator.lt,
operator.gt,
operator.ge,
operator.le,
operator.eq,
operator.ne,
],
)
@pytest.mark.parametrize('column', list('abcdef'))
@pytest.mark.parametrize('case', [2, 2 ** 9, 2 ** 17, 2 ** 33, 1.5])
def test_numbers_compare_numeric_literal(table, operation, column, case):
ex_op_class = {
operator.eq: ops.Equals,
operator.ne: ops.NotEquals,
operator.le: ops.LessEqual,
operator.lt: ops.Less,
operator.ge: ops.GreaterEqual,
operator.gt: ops.Greater,
}
col = table[column]
result = operation(col, case)
assert isinstance(result, ir.BooleanColumn)
assert isinstance(result.op(), ex_op_class[operation])
def test_boolean_comparisons(table):
bool_col = table.h
result = bool_col == True # noqa
assert isinstance(result, ir.BooleanColumn)
result = bool_col == False # noqa
assert isinstance(result, ir.BooleanColumn)
@pytest.mark.parametrize(
'operation',
[
operator.lt,
operator.gt,
operator.ge,
operator.le,
operator.eq,
operator.ne,
],
)
def test_string_comparisons(table, operation):
string_col = table.g
result = operation(string_col, 'foo')
assert isinstance(result, ir.BooleanColumn)
@pytest.mark.parametrize(
'operation', [operator.xor, operator.or_, operator.and_]
)
def test_boolean_logical_ops(table, operation):
expr = table.a > 0
result = operation(expr, table.h)
assert isinstance(result, ir.BooleanColumn)
result = operation(expr, True)
refl_result = operation(True, expr)
assert isinstance(result, ir.BooleanColumn)
assert isinstance(refl_result, ir.BooleanColumn)
true = ibis.literal(True)
false = ibis.literal(False)
result = operation(true, false)
assert isinstance(result, ir.BooleanScalar)
def test_null_column():
t = ibis.table([('a', 'string')], name='t')
s = t.mutate(b=ibis.NA)
assert s.b.type() == dt.null
assert isinstance(s.b, ir.NullColumn)
def test_null_column_union():
s = ibis.table([('a', 'string'), ('b', 'double')])
t = ibis.table([('a', 'string')])
with pytest.raises(ibis.common.exceptions.RelationError):
s.union(t.mutate(b=ibis.NA)) # needs a type
assert s.union(t.mutate(b=ibis.NA.cast('double'))).schema() == s.schema()
def test_string_compare_numeric_array(table):
with pytest.raises(TypeError):
table.g == table.f
with pytest.raises(TypeError):
table.g == table.c
def test_string_compare_numeric_literal(table):
with pytest.raises(TypeError):
table.g == ibis.literal(1.5)
with pytest.raises(TypeError):
table.g == ibis.literal(5)
def test_between(table):
result = table.f.between(0, 1)
assert isinstance(result, ir.BooleanColumn)
assert isinstance(result.op(), ops.Between)
# it works!
result = table.g.between('a', 'f')
assert isinstance(result, ir.BooleanColumn)
result = ibis.literal(1).between(table.a, table.c)
assert isinstance(result, ir.BooleanColumn)
result = ibis.literal(7).between(5, 10)
assert isinstance(result, ir.BooleanScalar)
# Cases where between should immediately fail, e.g. incomparables
with pytest.raises(TypeError):
table.f.between('0', '1')
with pytest.raises(TypeError):
table.f.between(0, '1')
with pytest.raises(TypeError):
table.f.between('0', 1)
def test_chained_comparisons_not_allowed(table):
with pytest.raises(ValueError):
0 < table.f < 1
@pytest.mark.parametrize(
'operation', [operator.add, operator.mul, operator.truediv, operator.sub]
)
def test_binop_string_type_error(table, operation):
# Strings are not valid for any numeric arithmetic
ints = table.d
strs = table.g
with pytest.raises(TypeError):
operation(ints, strs)
with pytest.raises(TypeError):
operation(strs, ints)
@pytest.mark.parametrize(
['op', 'name', 'case', 'ex_type'],
[
(operator.add, 'a', 0, 'int8'),
(operator.add, 'a', 5, 'int16'),
(operator.add, 'a', 100000, 'int32'),
(operator.add, 'a', -100000, 'int32'),
(operator.add, 'a', 1.5, 'double'),
(operator.add, 'b', 0, 'int16'),
(operator.add, 'b', 5, 'int32'),
(operator.add, 'b', -5, 'int32'),
(operator.add, 'c', 0, 'int32'),
(operator.add, 'c', 5, 'int64'),
(operator.add, 'c', -5, 'int64'),
# technically this can overflow, but we allow it
(operator.add, 'd', 5, 'int64'),
(operator.mul, 'a', 0, 'int8'),
(operator.mul, 'a', 5, 'int16'),
(operator.mul, 'a', 2 ** 24, 'int32'),
(operator.mul, 'a', -(2 ** 24) + 1, 'int32'),
(operator.mul, 'a', 1.5, 'double'),
(operator.mul, 'b', 0, 'int16'),
(operator.mul, 'b', 5, 'int32'),
(operator.mul, 'b', -5, 'int32'),
(operator.mul, 'c', 0, 'int32'),
(operator.mul, 'c', 5, 'int64'),
(operator.mul, 'c', -5, 'int64'),
# technically this can overflow, but we allow it
(operator.mul, 'd', 5, 'int64'),
(operator.sub, 'a', 5, 'int16'),
(operator.sub, 'a', 100000, 'int32'),
(operator.sub, 'a', -100000, 'int32'),
(operator.sub, 'a', 1.5, 'double'),
(operator.sub, 'b', 5, 'int32'),
(operator.sub, 'b', -5, 'int32'),
(operator.sub, 'c', 5, 'int64'),
(operator.sub, 'c', -5, 'int64'),
# technically this can overflow, but we allow it
(operator.sub, 'd', 5, 'int64'),
(operator.truediv, 'a', 5, 'double'),
(operator.truediv, 'a', 1.5, 'double'),
(operator.truediv, 'b', 5, 'double'),
(operator.truediv, 'b', -5, 'double'),
(operator.truediv, 'c', 5, 'double'),
(operator.pow, 'a', 0, 'double'),
(operator.pow, 'b', 0, 'double'),
(operator.pow, 'c', 0, 'double'),
(operator.pow, 'd', 0, 'double'),
(operator.pow, 'e', 0, 'float'),
(operator.pow, 'f', 0, 'double'),
(operator.pow, 'a', 2, 'double'),
(operator.pow, 'b', 2, 'double'),
(operator.pow, 'c', 2, 'double'),
(operator.pow, 'd', 2, 'double'),
(operator.pow, 'a', 1.5, 'double'),
(operator.pow, 'b', 1.5, 'double'),
(operator.pow, 'c', 1.5, 'double'),
(operator.pow, 'd', 1.5, 'double'),
(operator.pow, 'e', 2, 'float'),
(operator.pow, 'f', 2, 'double'),
(operator.pow, 'a', -2, 'double'),
(operator.pow, 'b', -2, 'double'),
(operator.pow, 'c', -2, 'double'),
(operator.pow, 'd', -2, 'double'),
],
ids=lambda arg: str(getattr(arg, '__name__', arg)),
)
def test_literal_promotions(table, op, name, case, ex_type):
col = table[name]
result = op(col, case)
assert result.type() == dt.dtype(ex_type)
result = op(case, col)
assert result.type() == dt.dtype(ex_type)
@pytest.mark.parametrize(
('op', 'left_fn', 'right_fn', 'ex_type'),
[
(operator.sub, lambda t: t['a'], lambda t: 0, 'int8'),
(operator.sub, lambda t: 0, lambda t: t['a'], 'int16'),
(operator.sub, lambda t: t['b'], lambda t: 0, 'int16'),
(operator.sub, lambda t: 0, lambda t: t['b'], 'int32'),
(operator.sub, lambda t: t['c'], lambda t: 0, 'int32'),
(operator.sub, lambda t: 0, lambda t: t['c'], 'int64'),
],
ids=lambda arg: str(getattr(arg, '__name__', arg)),
)
def test_zero_subtract_literal_promotions(
table, op, left_fn, right_fn, ex_type
):
# in case of zero subtract the order of operands matters
left, right = left_fn(table), right_fn(table)
result = op(left, right)
assert result.type() == dt.dtype(ex_type)
@pytest.mark.xfail(raises=AssertionError, reason='NYT')
def test_add_array_promotions():
assert False
@pytest.mark.xfail(raises=AssertionError, reason='NYT')
def test_subtract_array_promotions():
assert False
@pytest.mark.xfail(raises=AssertionError, reason='NYT')
def test_multiply_array_promotions():
assert False
@pytest.mark.xfail(raises=AssertionError, reason='NYT')
def test_divide_array_promotions():
assert False
@pytest.mark.xfail(raises=AssertionError, reason='NYT')
def test_string_add_concat():
assert False
@pytest.fixture
def expr():
exprs = [ibis.literal(1).name('a'), ibis.literal(2).name('b')]
return ibis.expr_list(exprs)
def test_names(expr):
assert expr.names() == ['a', 'b']
def test_prefix(expr):
prefixed = expr.prefix('foo_')
result = prefixed.names()
assert result == ['foo_a', 'foo_b']
def test_rename(expr):
renamed = expr.rename(lambda x: 'foo({0})'.format(x))
result = renamed.names()
assert result == ['foo(a)', 'foo(b)']
def test_suffix(expr):
suffixed = expr.suffix('.x')
result = suffixed.names()
assert result == ['a.x', 'b.x']
def test_concat():
exprs = [ibis.literal(1).name('a'), ibis.literal(2).name('b')]
exprs2 = [ibis.literal(3).name('c'), ibis.literal(4).name('d')]
list1 = ibis.expr_list(exprs)
list2 = ibis.expr_list(exprs2)
result = list1.concat(list2)
expected = ibis.expr_list(exprs + exprs2)
assert_equal(result, expected)
def test_substitute_dict():
table = ibis.table([('foo', 'string'), ('bar', 'string')], 't1')
subs = {'a': 'one', 'b': table.bar}
result = table.foo.substitute(subs)
expected = (
table.foo.case()
.when('a', 'one')
.when('b', table.bar)
.else_(table.foo)
.end()
)
assert_equal(result, expected)
result = table.foo.substitute(subs, else_=ibis.NA)
expected = (
table.foo.case()
.when('a', 'one')
.when('b', table.bar)
.else_(ibis.NA)
.end()
)
assert_equal(result, expected)
@pytest.mark.parametrize(
'typ',
[
'array<map<string, array<array<double>>>>',
'string',
'double',
'float',
'int64',
],
)
def test_not_without_boolean(typ):
t = ibis.table([('a', typ)], name='t')
c = t.a
with pytest.raises(TypeError):
~c
@pytest.mark.parametrize(
('position', 'names'),
[
(0, 'foo'),
(1, 'bar'),
([0], ['foo']),
([1], ['bar']),
([0, 1], ['foo', 'bar']),
([1, 0], ['bar', 'foo']),
],
)
@pytest.mark.parametrize(
'expr_func',
[
lambda t, args: t[args],
lambda t, args: t.sort_by(args),
lambda t, args: t.group_by(args).aggregate(bar_avg=t.bar.mean()),
],
)
def test_table_operations_with_integer_column(position, names, expr_func):
t = ibis.table([('foo', 'string'), ('bar', 'double')])
result = expr_func(t, position)
expected = expr_func(t, names)
assert result.equals(expected)
@pytest.mark.parametrize('value', ['abcdefg', ['a', 'b', 'c'], [1, 2, 3]])
@pytest.mark.parametrize(
'operation', ['pow', 'sub', 'truediv', 'floordiv', 'mod']
)
def test_generic_value_api_no_arithmetic(value, operation):
func = getattr(operator, operation)
expr = ibis.literal(value)
with pytest.raises(TypeError):
func(expr, expr)
@pytest.mark.parametrize(
('value', 'expected'), [(5, dt.int8), (5.4, dt.double), ('abc', dt.string)]
)
def test_fillna_null(value, expected):
assert ibis.NA.fillna(value).type().equals(expected)
@pytest.mark.parametrize(
('left', 'right'),
[
(literal('2017-04-01'), date(2017, 4, 2)),
(date(2017, 4, 2), literal('2017-04-01')),
(literal('2017-04-01 01:02:33'), datetime(2017, 4, 1, 1, 3, 34)),
(datetime(2017, 4, 1, 1, 3, 34), literal('2017-04-01 01:02:33')),
],
)
@pytest.mark.parametrize(
'op',
[
operator.eq,
operator.ne,
operator.lt,
operator.le,
operator.gt,
operator.ge,
lambda left, right: ibis.timestamp('2017-04-01 00:02:34').between(
left, right
),
lambda left, right: ibis.timestamp('2017-04-01')
.cast(dt.date)
.between(left, right),
],
)
def test_string_temporal_compare(op, left, right):
result = op(left, right)
assert result.type().equals(dt.boolean)
@pytest.mark.parametrize(
('value', 'type', 'expected_type_class'),
[
(2.21, 'decimal', dt.Decimal),
(3.14, 'double', dt.Double),
(4.2, 'int64', dt.Double),
(4, 'int64', dt.Int64),
],
)
def test_decimal_modulo_output_type(value, type, expected_type_class):
t = ibis.table([('a', type)])
expr = t.a % value
assert isinstance(expr.type(), expected_type_class)
@pytest.mark.parametrize(
('left', 'right'),
[(literal('10:00'), time(10, 0)), (time(10, 0), literal('10:00'))],
)
@pytest.mark.parametrize(
'op',
[
operator.eq,
operator.ne,
operator.lt,
operator.le,
operator.gt,
operator.ge,
],
)
def test_time_compare(op, left, right):
result = op(left, right)
assert result.type().equals(dt.boolean)
@pytest.mark.parametrize(
('left', 'right'),
[
(literal('10:00'), date(2017, 4, 2)),
(literal('10:00'), datetime(2017, 4, 2, 1, 1)),
(literal('10:00'), literal('2017-04-01')),
],
)
@pytest.mark.parametrize(
'op', [operator.eq, operator.lt, operator.le, operator.gt, operator.ge]
)
def test_time_timestamp_invalid_compare(op, left, right):
result = op(left, right)
assert result.type().equals(dt.boolean)
def test_scalar_parameter_set():
value = ibis.param({dt.int64})
assert isinstance(value.op(), ops.ScalarParameter)
assert value.type().equals(dt.Set(dt.int64))
def test_scalar_parameter_repr():
value = ibis.param(dt.timestamp).name('value')
assert repr(value) == 'value = ScalarParameter[timestamp]'
value_op = value.op()
assert repr(value_op) == "ScalarParameter(type=timestamp)"
@pytest.mark.parametrize(
('left', 'right', 'expected'),
[
(
# same value type, same name
ibis.param(dt.timestamp),
ibis.param(dt.timestamp),
False,
),
(
# different value type, same name
ibis.param(dt.date),
ibis.param(dt.timestamp),
False,
),
(
# same value type, different name
ibis.param(dt.timestamp),
ibis.param(dt.timestamp),
False,
),
(
# different value type, different name
ibis.param(dt.date),
ibis.param(dt.timestamp),
False,
),
(
# different Python class, left side is param
ibis.param(dt.timestamp),
dt.date,
False,
),
(
# different Python class, right side is param
dt.date,
ibis.param(dt.timestamp),
False,
),
],
)
def test_scalar_parameter_compare(left, right, expected):
assert left.equals(right) == expected
@pytest.mark.parametrize(
('case', 'creator'),
[
(datetime.now(), toolz.compose(methodcaller('time'), ibis.timestamp)),
('now', toolz.compose(methodcaller('time'), ibis.timestamp)),
(datetime.now().time(), ibis.time),
('10:37', ibis.time),
],
)
@pytest.mark.parametrize(
('left', 'right'), [(1, 'a'), ('a', 1), (1.0, 2.0), (['a'], [1])]
)
def test_between_time_failure_time(case, creator, left, right):
value = creator(case)
with pytest.raises(TypeError):
value.between(left, right)
def test_custom_type_binary_operations():
class Foo(ir.ValueExpr):
def __add__(self, other):
op = self.op()
return type(op)(op.value + other).to_expr()
__radd__ = __add__
class FooNode(ops.ValueOp):
value = Arg(rlz.integer)
def output_type(self):
return functools.partial(Foo, dtype=dt.int64)
left = ibis.literal(2)
right = FooNode(3).to_expr()
result = left + right
assert isinstance(result, Foo)
assert isinstance(result.op(), FooNode)
left = FooNode(3).to_expr()
right = ibis.literal(2)
result = left + right
assert isinstance(result, Foo)
assert isinstance(result.op(), FooNode)
def test_empty_array_as_argument():
class Foo(ir.Expr):
pass
class FooNode(ops.ValueOp):
value = Arg(rlz.value(dt.Array(dt.int64)))
def output_type(self):
return Foo
node = FooNode([])
value = node.value
expected = literal([]).cast(dt.Array(dt.int64))
assert value.type().equals(dt.Array(dt.null))
assert value.cast(dt.Array(dt.int64)).equals(expected)
def test_nullable_column_propagated():
t = ibis.table(
[
('a', dt.Int32(nullable=True)),
('b', dt.Int32(nullable=False)),
('c', dt.String(nullable=False)),
('d', dt.double), # nullable by default
('f', dt.Double(nullable=False)),
]
)
assert t.a.type().nullable is True
assert t.b.type().nullable is False
assert t.c.type().nullable is False
assert t.d.type().nullable is True
assert t.f.type().nullable is False
s = t.a + t.d
assert s.type().nullable is True
s = t.b + t.d
assert s.type().nullable is True
s = t.b + t.f
assert s.type().nullable is False
@pytest.mark.parametrize(
'base_expr',
[
ibis.table([('interval_col', dt.Interval(unit='D'))]).interval_col,
ibis.interval(seconds=42),
],
)
def test_interval_negate(base_expr):
expr = -base_expr
expr2 = base_expr.negate()
expr3 = ibis.negate(base_expr)
assert isinstance(expr.op(), ops.Negate)
assert expr.equals(expr2)
assert expr.equals(expr3)
def test_large_timestamp():
expr = ibis.timestamp('4567-02-03')
expected = datetime(year=4567, month=2, day=3)
result = expr.op().value
assert result == expected
@pytest.mark.parametrize('tz', [None, 'UTC'])
def test_timestamp_with_timezone(tz):
expr = ibis.timestamp('2017-01-01', timezone=tz)
expected = pd.Timestamp('2017-01-01', tz=tz)
result = expr.op().value
assert expected == result
@pytest.mark.parametrize('tz', [None, 'UTC'])
def test_timestamp_timezone_type(tz):
expr = ibis.timestamp('2017-01-01', timezone=tz)
expected = dt.Timestamp(timezone=tz)
assert expected == expr.op().dtype
def test_map_get_broadcast():
t = ibis.table([('a', 'string')], name='t')
lookup_table = ibis.literal({'a': 1, 'b': 2})
expr = lookup_table.get(t.a)
assert isinstance(expr, ir.IntegerColumn)
def test_map_getitem_broadcast():
t = ibis.table([('a', 'string')], name='t')
lookup_table = ibis.literal({'a': 1, 'b': 2})
expr = lookup_table[t.a]
assert isinstance(expr, ir.IntegerColumn)
def test_map_keys_output_type():
mapping = ibis.literal({'a': 1, 'b': 2})
assert mapping.keys().type() == dt.Array(dt.string)
def test_map_values_output_type():
mapping = ibis.literal({'a': 1, 'b': 2})
assert mapping.values().type() == dt.Array(dt.int8)
def test_scalar_isin_map_keys():
mapping = ibis.literal({'a': 1, 'b': 2})
key = ibis.literal('a')
expr = key.isin(mapping.keys())
assert isinstance(expr, ir.BooleanScalar)
def test_column_isin_map_keys():
t = ibis.table([('a', 'string')], name='t')
mapping = ibis.literal({'a': 1, 'b': 2})
expr = t.a.isin(mapping.keys())
assert isinstance(expr, ir.BooleanColumn)
def test_map_get_with_compatible_value_smaller():
value = ibis.literal({'A': 1000, 'B': 2000})
expr = value.get('C', 3)
assert value.type() == dt.Map(dt.string, dt.int16)
assert expr.type() == dt.int16
def test_map_get_with_compatible_value_bigger():
value = ibis.literal({'A': 1, 'B': 2})
expr = value.get('C', 3000)
assert value.type() == dt.Map(dt.string, dt.int8)
assert expr.type() == dt.int16
def test_map_get_with_incompatible_value_different_kind():
value = ibis.literal({'A': 1000, 'B': 2000})
with pytest.raises(IbisTypeError):
value.get('C', 3.0)
@pytest.mark.parametrize('null_value', [None, ibis.NA])
def test_map_get_with_null_on_not_nullable(null_value):
map_type = dt.Map(dt.string, dt.Int16(nullable=False))
value = ibis.literal({'A': 1000, 'B': 2000}).cast(map_type)
assert value.type() == map_type
with pytest.raises(IbisTypeError):
assert value.get('C', null_value)
@pytest.mark.parametrize('null_value', [None, ibis.NA])
def test_map_get_with_null_on_nullable(null_value):
value = ibis.literal({'A': 1000, 'B': None})
result = value.get('C', null_value)
assert result.type().nullable
@pytest.mark.parametrize('null_value', [None, ibis.NA])
def test_map_get_with_null_on_null_type_with_null(null_value):
value = ibis.literal({'A': None, 'B': None})
result = value.get('C', null_value)
assert result.type().nullable
def test_map_get_with_null_on_null_type_with_non_null():
value = ibis.literal({'A': None, 'B': None})
assert value.get('C', 1).type() == dt.int8
def test_map_get_with_incompatible_value():
value = ibis.literal({'A': 1000, 'B': 2000})
with pytest.raises(IbisTypeError):
value.get('C', ['A'])
@pytest.mark.parametrize(
('value', 'expected_type'),
[
(datetime.now(), dt.timestamp),
(datetime.now().date(), dt.date),
(datetime.now().time(), dt.time),
],
)
def test_invalid_negate(value, expected_type):
expr = ibis.literal(value)
assert expr.type() == expected_type
with pytest.raises(TypeError):
-expr
@pytest.mark.parametrize(
'type',
[
np.float16,
np.float32,
np.float64,
np.int16,
np.int32,
np.int64,
np.int64,
np.int8,
np.timedelta64,
np.uint16,
np.uint32,
np.uint64,
np.uint64,
np.uint8,
float,
int,
],
)
def test_valid_negate(type):
value = type(1)
expr = ibis.literal(value)
assert -expr is not None
@pytest.mark.xfail(
reason='Type not supported in most backends', raises=TypeError
)
@pytest.mark.skipif(
os.name == 'nt', reason='np.float128 not appear to exist on windows'
)
def test_valid_negate_float128():
value = np.float128(1)
expr = ibis.literal(value)
assert -expr is not None
@pytest.mark.parametrize(
('kind', 'begin', 'end'),
[
('preceding', None, None),
('preceding', 1, None),
('preceding', -1, 1),
('preceding', 1, -1),
('preceding', -1, -1),
('following', None, None),
('following', None, 1),
('following', -1, 1),
('following', 1, -1),
('following', -1, -1),
],
)
def test_window_unbounded_invalid(kind, begin, end):
kwargs = {kind: (begin, end)}
with pytest.raises(com.IbisInputError):
ibis.window(**kwargs)
@pytest.mark.parametrize(
('left', 'right', 'expected'),
[
(ibis.literal(1), ibis.literal(1.0), dt.float64),
(ibis.literal('a'), ibis.literal('b'), dt.string),
(ibis.literal(1.0), ibis.literal(1), dt.float64),
(ibis.literal(1), ibis.literal(1), dt.int8),
(ibis.literal(1), ibis.literal(1000), dt.int16),
(ibis.literal(2 ** 16), ibis.literal(2 ** 17), dt.int32),
(ibis.literal(2 ** 50), ibis.literal(1000), dt.int64),
(ibis.literal([1, 2]), ibis.literal([1, 2]), dt.Array(dt.int8)),
(ibis.literal(['a']), ibis.literal([]), dt.Array(dt.string)),
(ibis.literal([]), ibis.literal(['a']), dt.Array(dt.string)),
(ibis.literal([]), ibis.literal([]), dt.Array(dt.null)),
],
)
def test_nullif_type(left, right, expected):
assert left.nullif(right).type() == expected
@pytest.mark.parametrize(
('left', 'right'), [(ibis.literal(1), ibis.literal('a'))]
)
def test_nullif_fail(left, right):
with pytest.raises(com.IbisTypeError):
left.nullif(right)
with pytest.raises(com.IbisTypeError):
right.nullif(left)
@pytest.mark.parametrize(
"join_method",
[
"left_join",
pytest.param(
"right_join",
marks=pytest.mark.xfail(
raises=AttributeError, reason="right_join is not an ibis API"
),
),
"inner_join",
"outer_join",
"asof_join",
pytest.param(
"semi_join",
marks=pytest.mark.xfail(
raises=com.IbisTypeError,
reason=(
"semi_join only gives access to the left table's "
"columns"
),
),
),
],
)
@pytest.mark.xfail(
raises=(com.IbisError, AttributeError),
reason="Select from unambiguous joins not implemented",
)
def test_select_on_unambiguous_join(join_method):
t = ibis.table([("a0", dt.int64), ("b1", dt.string)], name="t")
s = ibis.table([("a1", dt.int64), ("b2", dt.string)], name="s")
method = getattr(t, join_method)
join = method(s, t.b1 == s.b2)
expr1 = join["a0", "a1"]
expr2 = join[["a0", "a1"]]
expr3 = join.select(["a0", "a1"])
assert expr1.equals(expr2)
assert expr1.equals(expr3)
def test_chained_select_on_join():
t = ibis.table([("a", dt.int64)], name="t")
s = ibis.table([("a", dt.int64), ("b", dt.string)], name="s")
join = t.join(s)[t.a, s.b]
expr1 = join["a", "b"]
expr2 = join.select(["a", "b"])
assert expr1.equals(expr2)
def test_repr_list_of_lists():
lit = ibis.literal([[1]])
result = repr(lit)
expected = """\
Literal[array<array<int8>>]
[[1]]"""
assert result == expected
def test_repr_list_of_lists_in_table():
t = ibis.table([('a', 'int64')], name='t')
lit = ibis.literal([[1]])
expr = t[t, lit.name('array_of_array')]
result = repr(expr)
expected = """\
ref_0
UnboundTable[table]
name: t
schema:
a : int64
Selection[table]
table:
Table: ref_0
selections:
Table: ref_0
array_of_array = Literal[array<array<int8>>]
[[1]]"""
assert result == expected
|
py | 1a36232f19234f7a31576ced41afe8c99f62ae2a | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim: et sw=4 ts=4
'''
Copyright (c) 2008, Yahoo! Inc. All rights reserved.
Code licensed under the BSD License:
http://developer.yahoo.net/yui/license.html
version: 1.0.0b1
'''
import yuidoc_parse, yuidoc_highlight, yuidoc_generate
def main():
from optparse import OptionParser
optparser = OptionParser("usage: %prog inputdir [options] inputdir")
optparser.set_defaults(extension=".js",
newext=".highlighted",
parseroutdir="/tmp",
outputdir="docs",
parserfile="parsed.json",
showprivate=False,
project="Yahoo! UI Library",
version="",
projecturl="http://developer.yahoo.com/yui/",
yuiversion=False,
ydn=False
)
optparser.add_option( "-p", "--parseroutdir",
action="store", dest="parseroutdir", type="string",
help="Directory to write the parser temp data" )
optparser.add_option( "-o", "--outputdir",
action="store", dest="outputdir", type="string",
help="Directory to write the html documentation" )
optparser.add_option( "-f", "--file",
action="store", dest="parserfile", type="string",
help="The name of the file that contains the JSON doc info" )
optparser.add_option( "-t", "--template",
action="store", dest="templatedir", type="string",
help="The directory containing the html tmplate" )
optparser.add_option( "-c", "--crosslink",
action="store", dest="crosslinkdir", type="string",
help="The directory containing json data for other modules to crosslink" )
optparser.add_option( "-s", "--showprivate",
action="store_true", dest="showprivate",
help="Should private properties/methods be in the docs?" )
optparser.add_option( "-e", "--extension",
action="store", dest="extension", type="string",
help="The extension to parse" )
optparser.add_option( "-n", "--newextension",
action="store", dest="newext", type="string",
help="The extension to append to the yuisyntax highlighted output file" )
optparser.add_option( "-m", "--project",
action="store", dest="project", type="string",
help="The name of the project" )
optparser.add_option( "-v", "--version",
action="store", dest="version", type="string",
help="The version of the project" )
optparser.add_option( "-u", "--projecturl",
action="store", dest="projecturl", type="string",
help="The project url" )
optparser.add_option( "-Y", "--yuiversion",
action="store", dest="yuiversion", type="string",
help="The version of YUI library used in the project. This parameter applies to the output for attributes, which differs between YUI2 and YUI3." )
optparser.add_option( "-y", "--ydn",
action="store_true", dest="ydn",
help="Add YDN MyBlogLog intrumentation?" )
(opts, inputdirs) = optparser.parse_args()
if len(inputdirs) > 0:
docparser = yuidoc_parse.DocParser( inputdirs,
opts.parseroutdir,
opts.parserfile,
opts.extension,
opts.version,
opts.yuiversion
)
highlighter = yuidoc_highlight.DocHighlighter( [opts.parseroutdir],
opts.parseroutdir,
opts.extension,
opts.newext )
gen = yuidoc_generate.DocGenerator( opts.parseroutdir,
opts.parserfile,
opts.outputdir,
opts.templatedir,
opts.newext,
opts.showprivate,
opts.project,
opts.version,
opts.projecturl,
opts.ydn
)
gen.process()
else:
optparser.error("Incorrect number of arguments")
if __name__ == '__main__':
main()
|
py | 1a36246171e1728f2a83ede57369262117ec3274 | import ipaddress
import os
import re
from urllib.parse import urlsplit, urlunsplit
from django.core.exceptions import ValidationError
from django.utils.deconstruct import deconstructible
from django.utils.functional import SimpleLazyObject
from django.utils.ipv6 import is_valid_ipv6_address
from django.utils.translation import gettext_lazy as _, ngettext_lazy
# These values, if given to validate(), will trigger the self.required check.
EMPTY_VALUES = (None, '', [], (), {})
def _lazy_re_compile(regex, flags=0):
"""Lazily compile a regex with flags."""
def _compile():
# Compile the regex if it was not passed pre-compiled.
if isinstance(regex, str):
return re.compile(regex, flags)
else:
assert not flags, "flags must be empty if regex is passed pre-compiled"
return regex
return SimpleLazyObject(_compile)
@deconstructible
class RegexValidator:
regex = ''
message = _('Enter a valid value.')
code = 'invalid'
inverse_match = False
flags = 0
def __init__(self, regex=None, message=None, code=None, inverse_match=None, flags=None):
if regex is not None:
self.regex = regex
if message is not None:
self.message = message
if code is not None:
self.code = code
if inverse_match is not None:
self.inverse_match = inverse_match
if flags is not None:
self.flags = flags
if self.flags and not isinstance(self.regex, str):
raise TypeError("If the flags are set, regex must be a regular expression string.")
self.regex = _lazy_re_compile(self.regex, self.flags)
def __call__(self, value):
"""
Validate that the input contains (or does *not* contain, if
inverse_match is True) a match for the regular expression.
"""
regex_matches = bool(self.regex.search(str(value)))
invalid_input = regex_matches if self.inverse_match else not regex_matches
if invalid_input:
raise ValidationError(self.message, code=self.code)
def __eq__(self, other):
return (
isinstance(other, RegexValidator) and
self.regex.pattern == other.regex.pattern and
self.regex.flags == other.regex.flags and
(self.message == other.message) and
(self.code == other.code) and
(self.inverse_match == other.inverse_match)
)
@deconstructible
class URLValidator(RegexValidator):
ul = '\u00a1-\uffff' # unicode letters range (must not be a raw string)
# IP patterns
ipv4_re = r'(?:25[0-5]|2[0-4]\d|[0-1]?\d?\d)(?:\.(?:25[0-5]|2[0-4]\d|[0-1]?\d?\d)){3}'
ipv6_re = r'\[[0-9a-f:\.]+\]' # (simple regex, validated later)
# Host patterns
hostname_re = r'[a-z' + ul + r'0-9](?:[a-z' + ul + r'0-9-]{0,61}[a-z' + ul + r'0-9])?'
# Max length for domain name labels is 63 characters per RFC 1034 sec. 3.1
domain_re = r'(?:\.(?!-)[a-z' + ul + r'0-9-]{1,63}(?<!-))*'
tld_re = (
r'\.' # dot
r'(?!-)' # can't start with a dash
r'(?:[a-z' + ul + '-]{2,63}' # domain label
r'|xn--[a-z0-9]{1,59})' # or punycode label
r'(?<!-)' # can't end with a dash
r'\.?' # may have a trailing dot
)
host_re = '(' + hostname_re + domain_re + tld_re + '|localhost)'
regex = _lazy_re_compile(
r'^(?:[a-z0-9\.\-\+]*)://' # scheme is validated separately
r'(?:\S+(?::\S*)?@)?' # user:pass authentication
r'(?:' + ipv4_re + '|' + ipv6_re + '|' + host_re + ')'
r'(?::\d{2,5})?' # port
r'(?:[/?#][^\s]*)?' # resource path
r'\Z', re.IGNORECASE)
message = _('Enter a valid URL.')
schemes = ['http', 'https', 'ftp', 'ftps']
def __init__(self, schemes=None, **kwargs):
super().__init__(**kwargs)
if schemes is not None:
self.schemes = schemes
def __call__(self, value):
# Check first if the scheme is valid
scheme = value.split('://')[0].lower()
if scheme not in self.schemes:
raise ValidationError(self.message, code=self.code)
# Then check full URL
try:
super().__call__(value)
except ValidationError as e:
# Trivial case failed. Try for possible IDN domain
if value:
try:
scheme, netloc, path, query, fragment = urlsplit(value)
except ValueError: # for example, "Invalid IPv6 URL"
raise ValidationError(self.message, code=self.code)
try:
netloc = netloc.encode('idna').decode('ascii') # IDN -> ACE
except UnicodeError: # invalid domain part
raise e
url = urlunsplit((scheme, netloc, path, query, fragment))
super().__call__(url)
else:
raise
else:
# Now verify IPv6 in the netloc part
host_match = re.search(r'^\[(.+)\](?::\d{2,5})?$', urlsplit(value).netloc)
if host_match:
potential_ip = host_match.groups()[0]
try:
validate_ipv6_address(potential_ip)
except ValidationError:
raise ValidationError(self.message, code=self.code)
# The maximum length of a full host name is 253 characters per RFC 1034
# section 3.1. It's defined to be 255 bytes or less, but this includes
# one byte for the length of the name and one byte for the trailing dot
# that's used to indicate absolute names in DNS.
if len(urlsplit(value).netloc) > 253:
raise ValidationError(self.message, code=self.code)
integer_validator = RegexValidator(
_lazy_re_compile(r'^-?\d+\Z'),
message=_('Enter a valid integer.'),
code='invalid',
)
def validate_integer(value):
return integer_validator(value)
@deconstructible
class EmailValidator:
message = _('Enter a valid email address.')
code = 'invalid'
user_regex = _lazy_re_compile(
r"(^[-!#$%&'*+/=?^_`{}|~0-9A-Z]+(\.[-!#$%&'*+/=?^_`{}|~0-9A-Z]+)*\Z" # dot-atom
r'|^"([\001-\010\013\014\016-\037!#-\[\]-\177]|\\[\001-\011\013\014\016-\177])*"\Z)', # quoted-string
re.IGNORECASE)
domain_regex = _lazy_re_compile(
# max length for domain name labels is 63 characters per RFC 1034
r'((?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+)(?:[A-Z0-9-]{2,63}(?<!-))\Z',
re.IGNORECASE)
literal_regex = _lazy_re_compile(
# literal form, ipv4 or ipv6 address (SMTP 4.1.3)
r'\[([A-f0-9:\.]+)\]\Z',
re.IGNORECASE)
domain_whitelist = ['localhost']
def __init__(self, message=None, code=None, whitelist=None):
if message is not None:
self.message = message
if code is not None:
self.code = code
if whitelist is not None:
self.domain_whitelist = whitelist
def __call__(self, value):
if not value or '@' not in value:
raise ValidationError(self.message, code=self.code)
user_part, domain_part = value.rsplit('@', 1)
if not self.user_regex.match(user_part):
raise ValidationError(self.message, code=self.code)
if (domain_part not in self.domain_whitelist and
not self.validate_domain_part(domain_part)):
# Try for possible IDN domain-part
try:
domain_part = domain_part.encode('idna').decode('ascii')
if self.validate_domain_part(domain_part):
return
except UnicodeError:
pass
raise ValidationError(self.message, code=self.code)
def validate_domain_part(self, domain_part):
if self.domain_regex.match(domain_part):
return True
literal_match = self.literal_regex.match(domain_part)
if literal_match:
ip_address = literal_match.group(1)
try:
validate_ipv46_address(ip_address)
return True
except ValidationError:
pass
return False
def __eq__(self, other):
return (
isinstance(other, EmailValidator) and
(self.domain_whitelist == other.domain_whitelist) and
(self.message == other.message) and
(self.code == other.code)
)
validate_email = EmailValidator()
slug_re = _lazy_re_compile(r'^[-a-zA-Z0-9_]+\Z')
validate_slug = RegexValidator(
slug_re,
# Translators: "letters" means latin letters: a-z and A-Z.
_("Enter a valid 'slug' consisting of letters, numbers, underscores or hyphens."),
'invalid'
)
slug_unicode_re = _lazy_re_compile(r'^[-\w]+\Z')
validate_unicode_slug = RegexValidator(
slug_unicode_re,
_("Enter a valid 'slug' consisting of Unicode letters, numbers, underscores, or hyphens."),
'invalid'
)
def validate_ipv4_address(value):
try:
ipaddress.IPv4Address(value)
except ValueError:
raise ValidationError(_('Enter a valid IPv4 address.'), code='invalid')
def validate_ipv6_address(value):
if not is_valid_ipv6_address(value):
raise ValidationError(_('Enter a valid IPv6 address.'), code='invalid')
def validate_ipv46_address(value):
try:
validate_ipv4_address(value)
except ValidationError:
try:
validate_ipv6_address(value)
except ValidationError:
raise ValidationError(_('Enter a valid IPv4 or IPv6 address.'), code='invalid')
ip_address_validator_map = {
'both': ([validate_ipv46_address], _('Enter a valid IPv4 or IPv6 address.')),
'ipv4': ([validate_ipv4_address], _('Enter a valid IPv4 address.')),
'ipv6': ([validate_ipv6_address], _('Enter a valid IPv6 address.')),
}
def ip_address_validators(protocol, unpack_ipv4):
"""
Depending on the given parameters, return the appropriate validators for
the GenericIPAddressField.
"""
if protocol != 'both' and unpack_ipv4:
raise ValueError(
"You can only use `unpack_ipv4` if `protocol` is set to 'both'")
try:
return ip_address_validator_map[protocol.lower()]
except KeyError:
raise ValueError("The protocol '%s' is unknown. Supported: %s"
% (protocol, list(ip_address_validator_map)))
def int_list_validator(sep=',', message=None, code='invalid', allow_negative=False):
regexp = _lazy_re_compile(r'^%(neg)s\d+(?:%(sep)s%(neg)s\d+)*\Z' % {
'neg': '(-)?' if allow_negative else '',
'sep': re.escape(sep),
})
return RegexValidator(regexp, message=message, code=code)
validate_comma_separated_integer_list = int_list_validator(
message=_('Enter only digits separated by commas.'),
)
@deconstructible
class BaseValidator:
message = _('Ensure this value is %(limit_value)s (it is %(show_value)s).')
code = 'limit_value'
def __init__(self, limit_value, message=None):
self.limit_value = limit_value
if message:
self.message = message
def __call__(self, value):
cleaned = self.clean(value)
params = {'limit_value': self.limit_value, 'show_value': cleaned, 'value': value}
if self.compare(cleaned, self.limit_value):
raise ValidationError(self.message, code=self.code, params=params)
def __eq__(self, other):
return (
isinstance(other, self.__class__) and
self.limit_value == other.limit_value and
self.message == other.message and
self.code == other.code
)
def compare(self, a, b):
return a is not b
def clean(self, x):
return x
@deconstructible
class MaxValueValidator(BaseValidator):
message = _('Ensure this value is less than or equal to %(limit_value)s.')
code = 'max_value'
def compare(self, a, b):
return a > b
@deconstructible
class MinValueValidator(BaseValidator):
message = _('Ensure this value is greater than or equal to %(limit_value)s.')
code = 'min_value'
def compare(self, a, b):
return a < b
@deconstructible
class MinLengthValidator(BaseValidator):
message = ngettext_lazy(
'Ensure this value has at least %(limit_value)d character (it has %(show_value)d).',
'Ensure this value has at least %(limit_value)d characters (it has %(show_value)d).',
'limit_value')
code = 'min_length'
def compare(self, a, b):
return a < b
def clean(self, x):
return len(x)
@deconstructible
class MaxLengthValidator(BaseValidator):
message = ngettext_lazy(
'Ensure this value has at most %(limit_value)d character (it has %(show_value)d).',
'Ensure this value has at most %(limit_value)d characters (it has %(show_value)d).',
'limit_value')
code = 'max_length'
def compare(self, a, b):
return a > b
def clean(self, x):
return len(x)
@deconstructible
class DecimalValidator:
"""
Validate that the input does not exceed the maximum number of digits
expected, otherwise raise ValidationError.
"""
messages = {
'max_digits': ngettext_lazy(
'Ensure that there are no more than %(max)s digit in total.',
'Ensure that there are no more than %(max)s digits in total.',
'max'
),
'max_decimal_places': ngettext_lazy(
'Ensure that there are no more than %(max)s decimal place.',
'Ensure that there are no more than %(max)s decimal places.',
'max'
),
'max_whole_digits': ngettext_lazy(
'Ensure that there are no more than %(max)s digit before the decimal point.',
'Ensure that there are no more than %(max)s digits before the decimal point.',
'max'
),
}
def __init__(self, max_digits, decimal_places):
self.max_digits = max_digits
self.decimal_places = decimal_places
def __call__(self, value):
digit_tuple, exponent = value.as_tuple()[1:]
decimals = abs(exponent)
# digit_tuple doesn't include any leading zeros.
digits = len(digit_tuple)
if decimals > digits:
# We have leading zeros up to or past the decimal point. Count
# everything past the decimal point as a digit. We do not count
# 0 before the decimal point as a digit since that would mean
# we would not allow max_digits = decimal_places.
digits = decimals
whole_digits = digits - decimals
if self.max_digits is not None and digits > self.max_digits:
raise ValidationError(
self.messages['max_digits'],
code='max_digits',
params={'max': self.max_digits},
)
if self.decimal_places is not None and decimals > self.decimal_places:
raise ValidationError(
self.messages['max_decimal_places'],
code='max_decimal_places',
params={'max': self.decimal_places},
)
if (self.max_digits is not None and self.decimal_places is not None and
whole_digits > (self.max_digits - self.decimal_places)):
raise ValidationError(
self.messages['max_whole_digits'],
code='max_whole_digits',
params={'max': (self.max_digits - self.decimal_places)},
)
def __eq__(self, other):
return (
isinstance(other, self.__class__) and
self.max_digits == other.max_digits and
self.decimal_places == other.decimal_places
)
@deconstructible
class FileExtensionValidator:
message = _(
"File extension '%(extension)s' is not allowed. "
"Allowed extensions are: '%(allowed_extensions)s'."
)
code = 'invalid_extension'
def __init__(self, allowed_extensions=None, message=None, code=None):
if allowed_extensions is not None:
allowed_extensions = [allowed_extension.lower() for allowed_extension in allowed_extensions]
self.allowed_extensions = allowed_extensions
if message is not None:
self.message = message
if code is not None:
self.code = code
def __call__(self, value):
extension = os.path.splitext(value.name)[1][1:].lower()
if self.allowed_extensions is not None and extension not in self.allowed_extensions:
raise ValidationError(
self.message,
code=self.code,
params={
'extension': extension,
'allowed_extensions': ', '.join(self.allowed_extensions)
}
)
def __eq__(self, other):
return (
isinstance(other, self.__class__) and
self.allowed_extensions == other.allowed_extensions and
self.message == other.message and
self.code == other.code
)
def get_available_image_extensions():
try:
from PIL import Image
except ImportError:
return []
else:
Image.init()
return [ext.lower()[1:] for ext in Image.EXTENSION]
validate_image_file_extension = FileExtensionValidator(
allowed_extensions=get_available_image_extensions(),
)
|
py | 1a3624da50024667f2b3f16162b57963d7cf046e | # -*- coding: utf-8 -*-
'''
The AWS Cloud Module
====================
The AWS cloud module is used to interact with the Amazon Web Services system.
This module has been replaced by the EC2 cloud module, and is no longer
supported. The documentation shown here is for reference only; it is highly
recommended to change all usages of this driver over to the EC2 driver.
If this driver is still needed, set up the cloud configuration at
``/etc/salt/cloud.providers`` or ``/etc/salt/cloud.providers.d/aws.conf``:
.. code-block:: yaml
my-aws-config:
# The AWS API authentication id
id: GKTADJGHEIQSXMKKRBJ08H
# The AWS API authentication key
key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs
# The ssh keyname to use
keyname: default
# The amazon security group
securitygroup: ssh_open
# The location of the private key which corresponds to the keyname
private_key: /root/default.pem
provider: aws
'''
# pylint: disable=E0102
# Import python libs
import os
import stat
import uuid
import pprint
import logging
# Import salt.cloud libs
import salt.utils.cloud
import salt.config as config
from salt.utils import namespaced_function
from salt.cloud.libcloudfuncs import * # pylint: disable=W0614,W0401
from salt.cloud.libcloudfuncs import destroy as libcloudfuncs_destroy
from salt.cloud.exceptions import (
SaltCloudException,
SaltCloudSystemExit,
SaltCloudConfigError,
SaltCloudExecutionTimeout,
SaltCloudExecutionFailure
)
# Get logging started
log = logging.getLogger(__name__)
# Define the module's virtual name
__virtualname__ = 'aws'
# Only load in this module if the AWS configurations are in place
def __virtual__():
'''
Set up the libcloud funcstions and check for AWS configs
'''
try:
import botocore
# Since we have botocore, we won't load the libcloud AWS module
log.debug(
'The \'botocore\' library is installed. The libcloud AWS support '
'will not be loaded.'
)
return False
except ImportError:
pass
if get_configured_provider() is False:
log.debug(
'There is no AWS cloud provider configuration available. Not '
'loading module'
)
return False
for provider, details in __opts__['providers'].iteritems():
if 'provider' not in details or details['provider'] != 'aws':
continue
if not os.path.exists(details['private_key']):
raise SaltCloudException(
'The AWS key file {0!r} used in the {1!r} provider '
'configuration does not exist\n'.format(
details['private_key'],
provider
)
)
keymode = str(
oct(stat.S_IMODE(os.stat(details['private_key']).st_mode))
)
if keymode not in ('0400', '0600'):
raise SaltCloudException(
'The AWS key file {0!r} used in the {1!r} provider '
'configuration needs to be set to mode 0400 or 0600\n'.format(
details['private_key'],
provider
)
)
global avail_images, avail_sizes, script, list_nodes
global avail_locations, list_nodes_full, list_nodes_select, get_image
global get_size, libcloudfuncs_destroy, show_instance
# open a connection in a specific region
conn = get_conn(**{'location': get_location()})
# Init the libcloud functions
get_size = namespaced_function(get_size, globals(), (conn,))
get_image = namespaced_function(get_image, globals(), (conn,))
avail_locations = namespaced_function(avail_locations, globals(), (conn,))
avail_images = namespaced_function(avail_images, globals(), (conn,))
avail_sizes = namespaced_function(avail_sizes, globals(), (conn,))
script = namespaced_function(script, globals(), (conn,))
list_nodes = namespaced_function(list_nodes, globals(), (conn,))
list_nodes_full = namespaced_function(list_nodes_full, globals(), (conn,))
list_nodes_select = namespaced_function(
list_nodes_select, globals(), (conn,)
)
libcloudfuncs_destroy = namespaced_function(
libcloudfuncs_destroy, globals(), (conn,)
)
show_instance = namespaced_function(show_instance, globals())
log.debug('Loading Libcloud AWS cloud module')
return __virtualname__
EC2_LOCATIONS = {
'ap-northeast-1': Provider.EC2_AP_NORTHEAST,
'ap-southeast-1': Provider.EC2_AP_SOUTHEAST,
'eu-west-1': Provider.EC2_EU_WEST,
'sa-east-1': Provider.EC2_SA_EAST,
'us-east-1': Provider.EC2_US_EAST,
'us-west-1': Provider.EC2_US_WEST,
'us-west-2': Provider.EC2_US_WEST_OREGON
}
DEFAULT_LOCATION = 'us-east-1'
if hasattr(Provider, 'EC2_AP_SOUTHEAST2'):
EC2_LOCATIONS['ap-southeast-2'] = Provider.EC2_AP_SOUTHEAST2
def get_configured_provider():
'''
Return the first configured instance.
'''
return config.is_provider_configured(
__opts__,
__active_provider_name__ or 'aws',
('id', 'key', 'keyname', 'securitygroup', 'private_key')
)
def get_conn(**kwargs):
'''
Return a conn object for the passed VM data
'''
if 'location' in kwargs:
location = kwargs['location']
if location not in EC2_LOCATIONS:
raise SaltCloudException(
'The specified location does not seem to be valid: '
'{0}\n'.format(
location
)
)
else:
location = DEFAULT_LOCATION
driver = get_driver(EC2_LOCATIONS[location])
vm_ = get_configured_provider()
return driver(
config.get_cloud_config_value('id', vm_, __opts__, search_global=False),
config.get_cloud_config_value('key', vm_, __opts__, search_global=False)
)
def keyname(vm_):
'''
Return the keyname
'''
return config.get_cloud_config_value(
'keyname', vm_, __opts__, search_global=False
)
def securitygroup(vm_):
'''
Return the security group
'''
return config.get_cloud_config_value(
'securitygroup', vm_, __opts__, search_global=False
)
def iam_profile(vm_):
'''
Return the IAM role
'''
return config.get_cloud_config_value(
'iam_profile', vm_, __opts__, search_global=False
)
def block_device_mappings(vm_):
'''
Return the block device mapping:
::
[{'DeviceName': '/dev/sdb', 'VirtualName': 'ephemeral0'},
{'DeviceName': '/dev/sdc', 'VirtualName': 'ephemeral1'}]
'''
return config.get_cloud_config_value(
'block_device_mappings', vm_, __opts__, search_global=False
)
def ssh_username(vm_):
'''
Return the ssh_username. Defaults to 'ec2-user'.
'''
usernames = config.get_cloud_config_value(
'ssh_username', vm_, __opts__
)
if not isinstance(usernames, list):
usernames = [usernames]
# get rid of None's or empty names
usernames = filter(lambda x: x, usernames)
# Keep a copy of the usernames the user might have provided
initial = usernames[:]
# Add common usernames to the list to be tested
for name in ('ec2-user', 'ubuntu', 'admin', 'bitnami', 'root'):
if name not in usernames:
usernames.append(name)
# Add the user provided usernames to the end of the list since enough time
# might need to pass before the remote service is available for logins and
# the proper username might have passed it's iteration.
# This has detected in a CentOS 5.7 EC2 image
usernames.extend(initial)
return usernames
def ssh_interface(vm_):
'''
Return the ssh_interface type to connect to. Either 'public_ips' (default)
or 'private_ips'.
'''
return config.get_cloud_config_value(
'ssh_interface', vm_, __opts__, default='public_ips',
search_global=False
)
def get_location(vm_=None):
'''
Return the AWS region to use, in this order:
- CLI parameter
- Cloud profile setting
- Global salt-cloud config
'''
return __opts__.get(
'location',
config.get_cloud_config_value(
'location',
vm_ or get_configured_provider(), __opts__,
default=DEFAULT_LOCATION
)
)
def get_availability_zone(conn, vm_):
'''
Return the availability zone to use
'''
avz = config.get_cloud_config_value(
'availability_zone', vm_, __opts__, search_global=False
)
locations = conn.list_locations()
if avz is None:
# Default to first zone
return locations[0]
for loc in locations:
if loc.availability_zone.name == avz:
return loc
def create(vm_):
'''
Create a single VM from a data dict
'''
key_filename = config.get_cloud_config_value(
'private_key', vm_, __opts__, search_global=False, default=None
)
if key_filename is not None and not os.path.isfile(key_filename):
raise SaltCloudConfigError(
'The defined key_filename {0!r} does not exist'.format(
key_filename
)
)
location = get_location(vm_)
log.info('Creating Cloud VM {0} in {1}'.format(vm_['name'], location))
conn = get_conn(location=location)
usernames = ssh_username(vm_)
kwargs = {
'ssh_key': config.get_cloud_config_value(
'private_key', vm_, __opts__, search_global=False
),
'name': vm_['name'],
'image': get_image(conn, vm_),
'size': get_size(conn, vm_),
'location': get_availability_zone(conn, vm_)
}
ex_keyname = keyname(vm_)
if ex_keyname:
kwargs['ex_keyname'] = ex_keyname
ex_securitygroup = securitygroup(vm_)
if ex_securitygroup:
kwargs['ex_securitygroup'] = ex_securitygroup
ex_blockdevicemappings = block_device_mappings(vm_)
if ex_blockdevicemappings:
kwargs['ex_blockdevicemappings'] = ex_blockdevicemappings
ex_iam_profile = iam_profile(vm_)
if ex_iam_profile:
# libcloud does not implement 'iam_profile' yet.
# A pull request has been suggested
# https://github.com/apache/libcloud/pull/150
raise SaltCloudConfigError(
'libcloud does not implement \'iam_profile\' yet. '
'Use EC2 driver instead.'
)
tags = config.get_cloud_config_value('tag', vm_, __opts__, {}, search_global=False)
if not isinstance(tags, dict):
raise SaltCloudConfigError(
'\'tag\' should be a dict.'
)
kwargs['ex_metadata'] = config.get_cloud_config_value('metadata', vm_, __opts__, default={}, search_global=False)
if not isinstance(kwargs['ex_metadata'], dict):
raise SaltCloudConfigError(
'\'metadata\' should be a dict.'
)
try:
data = conn.create_node(**kwargs)
except Exception as exc:
log.error(
'Error creating {0} on AWS\n\n'
'The following exception was thrown by libcloud when trying to '
'run the initial deployment: {1}\n'.format(
vm_['name'], exc
),
# Show the traceback if the debug logging level is enabled
exc_info=log.isEnabledFor(logging.DEBUG)
)
return False
log.info('Created node {0}'.format(vm_['name']))
def __get_node_data(conn, vm_name):
data = get_node(conn, vm_name)
if data is None:
# Trigger a failure in the waiting function
return False
if ssh_interface(vm_) == 'private_ips' and data.private_ips:
return data
if ssh_interface(vm_) == 'public_ips' and data.public_ips:
return data
try:
data = salt.utils.cloud.wait_for_ip(
__get_node_data,
update_args=(conn, vm_['name']),
timeout=config.get_cloud_config_value(
'wait_for_ip_timeout', vm_, __opts__, default=5 * 60),
interval=config.get_cloud_config_value(
'wait_for_ip_interval', vm_, __opts__, default=0.5),
)
except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc:
try:
# It might be already up, let's destroy it!
destroy(vm_['name'])
except SaltCloudSystemExit:
pass
finally:
raise SaltCloudSystemExit(exc.message)
if tags:
set_tags(vm_['name'], tags, call='action')
if ssh_interface(vm_) == 'private_ips':
log.info('Salt node data. Private_ip: {0}'.format(data.private_ips[0]))
ip_address = data.private_ips[0]
else:
log.info('Salt node data. Public_ip: {0}'.format(data.public_ips[0]))
ip_address = data.public_ips[0]
username = 'ec2-user'
ssh_connect_timeout = config.get_cloud_config_value(
'ssh_connect_timeout', vm_, __opts__, 900 # 15 minutes
)
if salt.utils.cloud.wait_for_port(ip_address, timeout=ssh_connect_timeout):
for user in usernames:
if salt.utils.cloud.wait_for_passwd(
host=ip_address,
username=user,
ssh_timeout=config.get_cloud_config_value(
'wait_for_passwd_timeout', vm_, __opts__,
default=1 * 60),
key_filename=key_filename):
username = user
break
else:
raise SaltCloudSystemExit(
'Failed to authenticate against remote ssh'
)
ret = {}
if config.get_cloud_config_value('deploy', vm_, __opts__) is True:
deploy_script = script(vm_)
deploy_kwargs = {
'host': ip_address,
'username': username,
'key_filename': key_filename,
'tmp_dir': config.get_cloud_config_value(
'tmp_dir', vm_, __opts__, default='/tmp/.saltcloud'
),
'deploy_command': config.get_cloud_config_value(
'deploy_command', vm_, __opts__,
default='/tmp/.saltcloud/deploy.sh',
),
'tty': config.get_cloud_config_value(
'tty', vm_, __opts__, default=True
),
'script': deploy_script.script,
'name': vm_['name'],
'sudo': config.get_cloud_config_value(
'sudo', vm_, __opts__, default=(username != 'root')
),
'sudo_password': config.get_cloud_config_value(
'sudo_password', vm_, __opts__, default=None
),
'start_action': __opts__['start_action'],
'parallel': __opts__['parallel'],
'conf_file': __opts__['conf_file'],
'sock_dir': __opts__['sock_dir'],
'minion_pem': vm_['priv_key'],
'minion_pub': vm_['pub_key'],
'keep_tmp': __opts__['keep_tmp'],
'preseed_minion_keys': vm_.get('preseed_minion_keys', None),
'display_ssh_output': config.get_cloud_config_value(
'display_ssh_output', vm_, __opts__, default=True
),
'script_args': config.get_cloud_config_value(
'script_args', vm_, __opts__
),
'script_env': config.get_cloud_config_value('script_env', vm_, __opts__),
'minion_conf': salt.utils.cloud.minion_config(__opts__, vm_)
}
# Deploy salt-master files, if necessary
if config.get_cloud_config_value('make_master', vm_, __opts__) is True:
deploy_kwargs['make_master'] = True
deploy_kwargs['master_pub'] = vm_['master_pub']
deploy_kwargs['master_pem'] = vm_['master_pem']
master_conf = salt.utils.cloud.master_config(__opts__, vm_)
deploy_kwargs['master_conf'] = master_conf
if master_conf.get('syndic_master', None):
deploy_kwargs['make_syndic'] = True
deploy_kwargs['make_minion'] = config.get_cloud_config_value(
'make_minion', vm_, __opts__, default=True
)
# Check for Windows install params
win_installer = config.get_cloud_config_value('win_installer', vm_, __opts__)
if win_installer:
deploy_kwargs['win_installer'] = win_installer
minion = salt.utils.cloud.minion_config(__opts__, vm_)
deploy_kwargs['master'] = minion['master']
deploy_kwargs['username'] = config.get_cloud_config_value(
'win_username', vm_, __opts__, default='Administrator'
)
deploy_kwargs['password'] = config.get_cloud_config_value(
'win_password', vm_, __opts__, default=''
)
# Store what was used to the deploy the VM
ret['deploy_kwargs'] = deploy_kwargs
deployed = False
if win_installer:
deployed = salt.utils.cloud.deploy_windows(**deploy_kwargs)
else:
deployed = salt.utils.cloud.deploy_script(**deploy_kwargs)
if deployed:
log.info('Salt installed on {name}'.format(**vm_))
else:
log.error('Failed to start Salt on Cloud VM {name}'.format(**vm_))
log.info('Created Cloud VM {0[name]!r}'.format(vm_))
log.debug(
'{0[name]!r} VM creation details:\n{1}'.format(
vm_, pprint.pformat(data.__dict__)
)
)
volumes = config.get_cloud_config_value(
'volumes', vm_, __opts__, search_global=True
)
if volumes:
log.info('Create and attach volumes to node {0}'.format(data.name))
create_attach_volumes(volumes, location, data)
ret.update(data.__dict__)
return ret
def create_attach_volumes(volumes, location, data):
'''
Create and attach volumes to created node
'''
conn = get_conn(location=location)
node_avz = data.__dict__.get('extra').get('availability')
avz = None
for avz in conn.list_locations():
if avz.availability_zone.name == node_avz:
break
for volume in volumes:
volume_name = '{0} on {1}'.format(volume['device'], data.name)
created_volume = conn.create_volume(volume['size'], volume_name, avz)
attach = conn.attach_volume(data, created_volume, volume['device'])
if attach:
log.info(
'{0} attached to {1} (aka {2}) as device {3}'.format(
created_volume.id, data.id, data.name, volume['device']
)
)
def stop(name, call=None):
'''
Stop a node
'''
data = {}
if call != 'action':
raise SaltCloudSystemExit(
'This action must be called with -a or --action.'
)
location = get_location()
conn = get_conn(location=location)
node = get_node(conn, name)
try:
data = conn.ex_stop_node(node=node)
log.debug(data)
log.info('Stopped node {0}'.format(name))
except Exception:
log.error('Failed to stop node {0}\n'.format(name), exc_info=True)
return data
def start(name, call=None):
'''
Start a node
'''
data = {}
if call != 'action':
raise SaltCloudSystemExit(
'This action must be called with -a or --action.'
)
location = get_location()
conn = get_conn(location=location)
node = get_node(conn, name)
try:
data = conn.ex_start_node(node=node)
log.debug(data)
log.info('Started node {0}'.format(name))
except Exception:
log.error('Failed to start node {0}\n'.format(name), exc_info=True)
return data
def set_tags(name, tags, call=None):
'''
Set tags for a node
CLI Example::
salt-cloud -a set_tags mymachine tag1=somestuff tag2='Other stuff'
'''
if call != 'action':
raise SaltCloudSystemExit(
'This action must be called with -a or --action.'
)
location = get_location()
conn = get_conn(location=location)
node = get_node(conn, name)
try:
log.info('Setting tags for {0}'.format(name))
conn.ex_create_tags(resource=node, tags=tags)
# print the new tags- with special handling for renaming of a node
if 'Name' in tags:
return get_tags(tags['Name'])
return get_tags(name)
except Exception:
log.error('Failed to set tags for {0}\n'.format(name), exc_info=True)
def get_tags(name, call=None):
'''
Retrieve tags for a node
'''
data = {}
if call != 'action':
raise SaltCloudSystemExit(
'This action must be called with -a or --action.'
)
location = get_location()
conn = get_conn(location=location)
node = get_node(conn, name)
try:
log.info('Retrieving tags from {0}'.format(name))
data = conn.ex_describe_tags(resource=node)
log.info(data)
except Exception:
log.error(
'Failed to retrieve tags from {0}\n'.format(name),
exc_info=True
)
return data
def del_tags(name, kwargs, call=None):
'''
Delete tags for a node
CLI Example::
salt-cloud -a del_tags mymachine tag1,tag2,tag3
'''
ret = {}
if call != 'action':
raise SaltCloudSystemExit(
'This action must be called with -a or --action.'
)
location = get_location()
conn = get_conn(location=location)
node = get_node(conn, name)
current_tags = conn.ex_describe_tags(resource=node)
tags = {}
for tag in kwargs['tags'].split(','):
tags[tag] = current_tags[tag]
try:
conn.ex_delete_tags(resource=node, tags=tags)
log.info('Deleting tags from {0}'.format(name))
ret = get_tags(name)
except Exception:
log.error(
'Failed to delete tags from {0}\n'.format(name),
exc_info=True
)
return ret
def rename(name, kwargs, call=None):
'''
Properly rename a node. Pass in the new name as "new name".
CLI Example::
salt-cloud -a rename mymachine newname=yourmachine
'''
if call != 'action':
raise SaltCloudSystemExit(
'This action must be called with -a or --action.'
)
location = get_location()
conn = get_conn(location=location)
node = get_node(conn, name)
tags = {'Name': kwargs['newname']}
try:
log.info('Renaming {0} to {1}'.format(name, kwargs['newname']))
conn.ex_create_tags(resource=node, tags=tags)
salt.utils.cloud.rename_key(
__opts__['pki_dir'], name, kwargs['newname']
)
except Exception as exc:
log.error(
'Failed to rename {0} to {1}: {2}\n'.format(
name, kwargs['newname'], exc
),
# Show the traceback if the debug logging level is enabled
exc_info=log.isEnabledFor(logging.DEBUG)
)
return kwargs['newname']
def destroy(name):
'''
Wrap core libcloudfuncs destroy method, adding check for termination
protection
'''
ret = {}
newname = name
if config.get_cloud_config_value('rename_on_destroy',
get_configured_provider(),
__opts__, search_global=False) is True:
newname = '{0}-DEL{1}'.format(name, uuid.uuid4().hex)
rename(name, kwargs={'newname': newname}, call='action')
log.info(
'Machine will be identified as {0} until it has been '
'cleaned up by AWS.'.format(
newname
)
)
ret['newname'] = newname
try:
result = libcloudfuncs_destroy(newname, get_conn())
ret.update({'Destroyed': result})
except Exception as exc:
if not exc.message.startswith('OperationNotPermitted'):
log.exception(exc)
raise exc
log.info(
'Failed: termination protection is enabled on {0}'.format(
name
)
)
return ret
|
py | 1a3625526e86ae49fa3f2070dd82bc9842190921 | # Copyright 2019 Ross Wightman
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" RMSProp modified to behave like Tensorflow impl
Originally cut & paste from PyTorch RMSProp
https://github.com/pytorch/pytorch/blob/063946d2b3f3f1e953a2a3b54e0b34f1393de295/torch/optim/rmsprop.py
Licensed under BSD-Clause 3 (ish), https://github.com/pytorch/pytorch/blob/master/LICENSE
Modifications Copyright 2020 Ross Wightman
"""
import torch
from torch.optim import Optimizer
class RMSpropTF(Optimizer):
"""Implements RMSprop algorithm (TensorFlow style epsilon)
NOTE: This is a direct cut-and-paste of PyTorch RMSprop with eps applied before sqrt
and a few other modifications to closer match Tensorflow for matching hyper-params.
Noteworthy changes include:
1. Epsilon applied inside square-root
2. square_avg initialized to ones
3. LR scaling of update accumulated in momentum buffer
Proposed by G. Hinton in his
`course <http://www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf>`_.
The centered version first appears in `Generating Sequences
With Recurrent Neural Networks <https://arxiv.org/pdf/1308.0850v5.pdf>`_.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 1e-2)
momentum (float, optional): momentum factor (default: 0)
alpha (float, optional): smoothing (decay) constant (default: 0.9)
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-10)
centered (bool, optional) : if ``True``, compute the centered RMSProp,
the gradient is normalized by an estimation of its variance
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
decoupled_decay (bool, optional): decoupled weight decay as per https://arxiv.org/abs/1711.05101
lr_in_momentum (bool, optional): learning rate scaling is included in the momentum buffer
update as per defaults in Tensorflow
"""
def __init__(self, params, lr=1e-2, alpha=0.9, eps=1e-10, weight_decay=0, momentum=0., centered=False,
decoupled_decay=False, lr_in_momentum=True):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= momentum:
raise ValueError("Invalid momentum value: {}".format(momentum))
if not 0.0 <= weight_decay:
raise ValueError("Invalid weight_decay value: {}".format(weight_decay))
if not 0.0 <= alpha:
raise ValueError("Invalid alpha value: {}".format(alpha))
defaults = dict(lr=lr, momentum=momentum, alpha=alpha, eps=eps, centered=centered, weight_decay=weight_decay,
decoupled_decay=decoupled_decay, lr_in_momentum=lr_in_momentum)
super(RMSpropTF, self).__init__(params, defaults)
def __setstate__(self, state):
super(RMSpropTF, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('momentum', 0)
group.setdefault('centered', False)
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('RMSprop does not support sparse gradients')
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
state['square_avg'] = torch.ones_like(p.data) # PyTorch inits to zero
if group['momentum'] > 0:
state['momentum_buffer'] = torch.zeros_like(p.data)
if group['centered']:
state['grad_avg'] = torch.zeros_like(p.data)
square_avg = state['square_avg']
one_minus_alpha = 1. - group['alpha']
state['step'] += 1
if group['weight_decay'] != 0:
if 'decoupled_decay' in group and group['decoupled_decay']:
p.data.add_(-group['weight_decay'], p.data)
else:
grad = grad.add(group['weight_decay'], p.data)
# Tensorflow order of ops for updating squared avg
square_avg.add_(one_minus_alpha, grad.pow(2) - square_avg)
# square_avg.mul_(alpha).addcmul_(1 - alpha, grad, grad) # PyTorch original
if group['centered']:
grad_avg = state['grad_avg']
grad_avg.add_(one_minus_alpha, grad - grad_avg)
# grad_avg.mul_(alpha).add_(1 - alpha, grad) # PyTorch original
avg = square_avg.addcmul(-1, grad_avg, grad_avg).add(group['eps']).sqrt_() # eps moved in sqrt
else:
avg = square_avg.add(group['eps']).sqrt_() # eps moved in sqrt
if group['momentum'] > 0:
buf = state['momentum_buffer']
# Tensorflow accumulates the LR scaling in the momentum buffer
if 'lr_in_momentum' in group and group['lr_in_momentum']:
buf.mul_(group['momentum']).addcdiv_(group['lr'], grad, avg)
p.data.add_(-buf)
else:
# PyTorch scales the param update by LR
buf.mul_(group['momentum']).addcdiv_(grad, avg)
p.data.add_(-group['lr'], buf)
else:
p.data.addcdiv_(-group['lr'], grad, avg)
return loss
|
py | 1a362617f88166f844bed101adbc742bd02d40d3 | import random
import numpy as np
from xgboost.sklearn import XGBClassifier
action_list = []
observation_list = []
result_list = []
def i_win(me, you):
return int((me - you + 4) % 3) - 1
# for i in range(3):
# text = ""
# for j in range(3):
# text += f'{i_win(i, j)} '
# print(f'{text}')
def Agent(observation, configuration):
global action_list, observation_list, result_list
if observation.step == 0:
action = random.randint(0, 2)
action_list.append(action)
return action
if observation.step < 20:
observation_list.append(observation.lastOpponentAction)
result_list.append(
i_win(action_list[-1], observation.lastOpponentAction))
action = random.randint(0, 2)
action_list.append(action)
return action
observation_list.append(observation.lastOpponentAction)
result_list.append(i_win(action_list[-1], observation.lastOpponentAction))
if observation.step < 25:
start_from = 0
else:
start_from = -1*random.randint(16, 20)
X_train = np.array([action_list[start_from:-1],
observation_list[start_from:-1], result_list[start_from:-1]]).T
y_train = np.roll(observation_list[start_from:-1], -1).T
model = XGBClassifier(
learning_rate=0.01,
n_estimators=20,
nthread=4,
use_label_encoder=False)
model.fit(X_train, y_train)
last_data = np.array(
[action_list[-1], observation_list[-1], result_list[-1]])
expected_observation = model.predict(last_data.reshape(1, -1))
if sum(result_list) < -3 and observation.step > 30:
if random.randint(0, 1):
action = int((expected_observation - 1) % 3)
else:
action = expected_observation
else:
action = int((expected_observation + 1) % 3)
action_list.append(action)
return action
|
py | 1a36264f0194b8895944e7dc89cee6e57d42d0e6 | #!"d:\webprojects\spring 2021\financial-demo\scripts\python.exe"
# EASY-INSTALL-ENTRY-SCRIPT: 'future==0.18.2','console_scripts','pasteurize'
import re
import sys
# for compatibility with easy_install; see #2198
__requires__ = 'future==0.18.2'
try:
from importlib.metadata import distribution
except ImportError:
try:
from importlib_metadata import distribution
except ImportError:
from pkg_resources import load_entry_point
def importlib_load_entry_point(spec, group, name):
dist_name, _, _ = spec.partition('==')
matches = (
entry_point
for entry_point in distribution(dist_name).entry_points
if entry_point.group == group and entry_point.name == name
)
return next(matches).load()
globals().setdefault('load_entry_point', importlib_load_entry_point)
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(load_entry_point('future==0.18.2', 'console_scripts', 'pasteurize')())
|
py | 1a3626e8446c52aafaeac0bf8eabd0aa6c3ec069 | import pytest
from hiku.executors.asyncio import AsyncIOExecutor
from hiku.federation.endpoint import (
FederatedGraphQLEndpoint,
AsyncFederatedGraphQLEndpoint,
)
from hiku.federation.engine import Engine
from hiku.executors.sync import SyncExecutor
from tests.test_federation.utils import (
GRAPH,
ASYNC_GRAPH,
)
def execute(graph, query_dict):
graphql_endpoint = FederatedGraphQLEndpoint(
Engine(SyncExecutor()),
graph,
)
return graphql_endpoint.dispatch(query_dict)
async def execute_async(graph, query_dict):
graphql_endpoint = AsyncFederatedGraphQLEndpoint(
Engine(AsyncIOExecutor()),
graph,
)
return await graphql_endpoint.dispatch(query_dict)
ENTITIES_QUERY = {
'query': """
query($representations:[_Any!]!) {
_entities(representations:$representations) {
...on Order {
cart {
id
status
items { id name }
}
}
}
}
""",
'variables': {
'representations': [
{'__typename': 'Order', 'cartId': 1},
{'__typename': 'Order', 'cartId': 2},
]
}
}
SDL_QUERY = {'query': '{_service {sdl}}'}
def test_execute_sdl():
result = execute(GRAPH, SDL_QUERY)
assert result['data']['_service']['sdl'] is not None
def test_execute_sync_executor():
result = execute(GRAPH, ENTITIES_QUERY)
expect = [
{'cart': {'id': 1, 'status': 'NEW',
'items': [{'id': 10, 'name': 'Ipad'}]}},
{'cart': {'id': 2, 'status': 'ORDERED',
'items': [{'id': 20, 'name': 'Book'},
{'id': 21, 'name': 'Pen'}]}}
]
assert expect == result['data']['_entities']
@pytest.mark.asyncio
async def test_execute_async_executor():
result = await execute_async(ASYNC_GRAPH, ENTITIES_QUERY)
expect = [
{'cart': {'id': 1, 'status': 'NEW',
'items': [{'id': 10, 'name': 'Ipad'}]}},
{'cart': {'id': 2, 'status': 'ORDERED',
'items': [{'id': 20, 'name': 'Book'},
{'id': 21, 'name': 'Pen'}]}}
]
assert expect == result['data']['_entities']
|
py | 1a3626eee57f3562785767d4e99dcf1917b1ec96 | from setuptools import setup
dependencies = ["numpy",
"scipy",
"numba"]
def readme():
with open('README.md') as f:
return f.read()
setup(name='PyRADS',
version='0.1.0',
description='PyRADS is the "Python line-by-line RADiation model for planetary atmosphereS"',
long_description=readme(),
url='',
author='Daniel D.B. Koll',
author_email='[email protected]',
license='MIT',
packages=['pyrads'],
install_requires=dependencies,
zip_safe=False)
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.