filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_0_2692 | import pytest
from abridger.extraction_model import Relation
from abridger.schema import SqliteSchema
from test.unit.extractor.base import TestExtractorBase
class TestExtractorSubjectRelationReProcessingIncoming(TestExtractorBase):
@pytest.fixture()
def schema1(self):
for stmt in [
'''
CREATE TABLE test1 (
id INTEGER PRIMARY KEY
);
''', '''
CREATE TABLE test2 (
id INTEGER PRIMARY KEY,
test1_id INTEGER REFERENCES test1
);
''', '''
CREATE TABLE test3 (
id INTEGER PRIMARY KEY,
test2_id INTEGER REFERENCES test2
);
''', '''
CREATE TABLE test4 (
id INTEGER PRIMARY KEY,
test1_id INTEGER REFERENCES test1,
test2_id INTEGER REFERENCES test2
);
''',
]:
self.database.execute(stmt)
return SqliteSchema.create_from_conn(self.database.connection)
@pytest.fixture()
def data1(self, schema1):
table1 = schema1.tables[0]
table2 = schema1.tables[1]
table3 = schema1.tables[2]
table4 = schema1.tables[3]
rows = [
(table1, (1,)),
(table2, (1, 1)),
(table3, (1, 1)),
(table4, (1, 1, 1)),
]
self.database.insert_rows(rows)
return rows
def test_re_processing(self, schema1, data1):
# 1 <- 2 <- 3
# ^ ^
# \ /
# 4
# The extractor algorithm goes breadth first. In this example,
# the test2 table is hit twice. However the first time it is hit,
# it has less relationships, so it won't pull in test3.
# The second subject includes test3 and test4. test2 will only get
# processed when test2 has already been seen by subject 1.
# This test ensures that test2 is re-processed due to subject 2
# having more relationships.
rel21 = {'table': 'test2', 'column': 'test1_id'}
rel32 = {'table': 'test3', 'column': 'test2_id'}
rel41 = {'table': 'test4', 'column': 'test1_id'}
extraction_model_data = [
# This subject won't include test3, only test 2
{
'subject': [
{'tables': [{'table': 'test1'}]},
{'relations': [rel21]},
]
},
# This subject will include test3 via test4
{
'subject': [
{'tables': [{'table': 'test1'}]},
{'relations': [rel41, rel32]},
]
}
]
self.check_launch(schema1, extraction_model_data, data1)
class TestExtractorTwoSubjectTwoColumnNulling(TestExtractorBase):
TEST_CASES = []
for i in (True, False):
for j in (True, False):
for k in (True, False):
for l in (True, False):
TEST_CASES.append([i, j, k, l])
@pytest.fixture()
def schema1(self):
for stmt in [
'''
CREATE TABLE test1 (
id INTEGER PRIMARY KEY
);
''', '''
CREATE TABLE test2 (
id INTEGER PRIMARY KEY,
test1_id INTEGER REFERENCES test1,
test3_id INTEGER REFERENCES test3,
test5_id INTEGER REFERENCES test5
);
''', '''
CREATE TABLE test3 (
id INTEGER PRIMARY KEY
);
''', '''
CREATE TABLE test5 (
id INTEGER PRIMARY KEY
);
''', '''
CREATE TABLE test4 (
id INTEGER PRIMARY KEY,
test1_id INTEGER REFERENCES test1,
test2_id INTEGER REFERENCES test2
);
''',
]:
self.database.execute(stmt)
return SqliteSchema.create_from_conn(self.database.connection)
@pytest.fixture()
def data1(self, schema1):
table1 = schema1.tables[0]
table2 = schema1.tables[1]
table3 = schema1.tables[2]
table4 = schema1.tables[3]
table5 = schema1.tables[4]
rows = [
(table1, (1,)),
(table3, (1,)),
(table5, (1,)),
(table2, (1, 1, 1, 1)),
(table4, (1, 1, 1)),
]
self.database.insert_rows(rows)
return rows
@pytest.mark.parametrize('i, j, k, l', TEST_CASES)
def test_nulling(self, schema1, data1, i, j, k, l):
# 5
# ^
# /
# 1 <- 2 -> 3
# ^ ^
# \ /
# 4
# The extractor algorithm goes breadth first. By testing with two
# subjects, things can be rigged so that the test2 table is processed
# twice, with different relationships.
#
# This tests checks that two outgoing relations on the test2 table
# are processed correctly. If a row in test3 or test5 is not needed,
# then the column on test2 should be made null.
#
# The 16 combinations are:
# relationship from 2-> 3 enabled/disabled for subject 1 -- i
# relationship from 2-> 3 enabled/disabled for subject 1 -- j
# relationship from 2-> 5 enabled/disabled for subject 2 -- k
# relationship from 2-> 5 enabled/disabled for subject 2 -- l
table2 = schema1.tables[1]
rel21 = {'table': 'test2', 'column': 'test1_id'}
rel41 = {'table': 'test4', 'column': 'test1_id'}
# Outgoing relations are enabled by default.
rel23d = {'table': 'test2', 'column': 'test3_id', 'disabled': True,
'type': Relation.TYPE_OUTGOING}
rel25d = {'table': 'test2', 'column': 'test5_id', 'disabled': True,
'type': Relation.TYPE_OUTGOING}
# Incoming relations
relations = [[rel21], [rel41]]
# Disable outgoing relations
if not i:
relations[0].append(rel23d)
if not j:
relations[0].append(rel25d)
if not k:
relations[1].append(rel23d)
if not l:
relations[1].append(rel25d)
expect3 = 1 if i or k else None # Expect a non-None in test3_id
expect5 = 1 if j or l else None # Expect a non-None in test5_id
expected_data = data1[0:1] + data1[4:5]
expected_data.append((table2, (1, 1, expect3, expect5)))
if expect3:
expected_data += data1[1:2] # Expect a row in test3
if expect5:
expected_data += data1[2:3] # Expect a row in test5
extraction_model_data = [
{'subject': [
{'tables': [{'table': 'test1'}]},
{'relations': relations[0]}]},
{'subject': [
{'tables': [{'table': 'test1'}]},
{'relations': relations[1]}]}
]
self.check_launch(schema1, extraction_model_data, expected_data)
|
the-stack_0_2693 | import unittest
from mock import patch, call, Mock
import update_data_after_sync
class FakeCollection(object):
def find(self):
return [
{'topic_id': 'UKGOVUK_1', '_id': 'https://www.gov.uk/feed?a=b&c=d', 'created': '2013-08-01T12:53:31Z'},
{'topic_id': 'UKGOVUK_2', '_id': 'https://www.gov.uk/pubs?w=x&y=z', 'created': '2015-02-26T09:57:35Z', 'disabled': True},
]
def insert(self, *args):
return True
def remove(self, topic_id):
return True
def count(self):
return 2
@patch.dict(update_data_after_sync.os.environ, {'GOVUK_WEBSITE_ROOT': 'https://integration.gov.uk'})
class UpdateDataAfterSyncTestCase(unittest.TestCase):
@patch.dict(update_data_after_sync.app.config, {'GOVDELIVERY_HOSTNAME': 'omg-production'})
def test_will_not_run_in_production(self):
with self.assertRaises(SystemExit):
update_data_after_sync.update_all_records()
@patch.object(update_data_after_sync, 'logging')
@patch.object(update_data_after_sync.db, 'topics', new_callable=FakeCollection)
@patch.object(FakeCollection, 'remove', return_value=True)
@patch.object(FakeCollection, 'insert', return_value=True)
@patch.dict(update_data_after_sync.os.environ, {'GOVDELIVERY_HOSTNAME': 'stage-api.govdelivery.com'})
@patch.dict(update_data_after_sync.app.config, {'GOVDELIVERY_ACCOUNT_CODE': 'DUPDUPDUP'})
def test_updating_all_records(self, mock_insert_record, mock_delete_record, mock_db, mock_logging):
update_data_after_sync.update_all_records()
mock_logging.info.assert_has_calls([
call('Updating 2 topics with domain integration.gov.uk and account code DUPDUPDUP'),
call('Done')
])
mock_insert_record.assert_has_calls([
call({
'_id': 'https://integration.gov.uk/feed?a=b&c=d',
'topic_id': 'DUPDUPDUP_1',
'created': '2013-08-01T12:53:31Z',
}),
call({
'_id': 'https://integration.gov.uk/pubs?w=x&y=z',
'topic_id': 'DUPDUPDUP_2',
'created' : '2015-02-26T09:57:35Z',
'disabled': True
}),
])
mock_delete_record.assert_has_calls([
call({'_id': 'https://www.gov.uk/feed?a=b&c=d'}),
call({'_id': 'https://www.gov.uk/pubs?w=x&y=z'}),
])
|
the-stack_0_2694 | #!/usr/bin/env python
"""mergesort.py: Program to implement merge sort"""
__author__ = 'Rohit Sinha'
def merge_sort(alist):
if len(alist) <= 1:
return alist
middle = len(alist) / 2
left = alist[:middle]
right = alist[middle:]
left = merge_sort(left)
right = merge_sort(right)
return list(merge(left, right))
def merge(left, right):
result = []
left_index, right_index = 0, 0
while left_index < len(left) and right_index < len(right):
if left[left_index] <= right[right_index]:
result.append(left[left_index])
left_index += 1
else:
result.append(right[right_index])
right_index += 1
if left:
result.extend(left[left_index:])
if right:
result.extend(right[right_index:])
return result
if __name__ == '__main__':
alist = [84, 69, 76, 86, 94, 91]
alist = merge_sort(alist)
print(alist) |
the-stack_0_2696 | import numpy as np
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
from paddle.distribution import Normal
def rsample(loc, scale):
shape = loc.shape
normal_ = paddle.nn.initializer.Normal()
eps = paddle.empty(shape, dtype=loc.dtype)
normal_(eps)
return loc + eps * scale
class Retina:
"""A visual retina.
Extracts a foveated glimpse `phi` around location `l`
from an image `x`.
Concretely, encodes the region around `l` at a
high-resolution but uses a progressively lower
resolution for pixels further from `l`, resulting
in a compressed representation of the original
image `x`.
Args:
x: a 4D Tensor of shape (B, H, W, C). The minibatch
of images.
l: a 2D Tensor of shape (B, 2). Contains normalized
coordinates in the range [-1, 1].
g: size of the first square patch.
k: number of patches to extract in the glimpse.
s: scaling factor that controls the size of
successive patches.
Returns:
phi: a 5D tensor of shape (B, k, g, g, C). The
foveated glimpse of the image.
"""
def __init__(self, g, k, s):
self.g = g
self.k = k
self.s = s
def foveate(self, x, l):
"""Extract `k` square patches of size `g`, centered
at location `l`. The initial patch is a square of
size `g`, and each subsequent patch is a square
whose side is `s` times the size of the previous
patch.
The `k` patches are finally resized to (g, g) and
concatenated into a tensor of shape (B, k, g, g, C).
"""
phi = []
size = self.g
# extract k patches of increasing size
for i in range(self.k):
phi.append(self.extract_patch(x, l, size)) # 这op含pad
size = int(self.s * size)
# resize the patches to squares of size g
for i in range(1, len(phi)):
k = phi[i].shape[-1] // self.g
phi[i] = F.avg_pool2d(phi[i], k) # avg pool
# concatenate into a single tensor and flatten
phi = paddle.concat(phi, 1)
phi = phi.reshape([phi.shape[0], -1])
return phi
def extract_patch(self, x, l, size):
"""Extract a single patch for each image in `x`.
Args:
x: a 4D Tensor of shape (B, H, W, C). The minibatch
of images.
l: a 2D Tensor of shape (B, 2).
size: a scalar defining the size of the extracted patch.
Returns:
patch: a 4D Tensor of shape (B, size, size, C)
"""
B, C, H, W = x.shape
start = self.denormalize(H, l)
start=start.numpy()
end = start + size
# pad with zeros
x = F.pad(x, [0,0,0,0,size // 2, size // 2, size // 2, size // 2]).numpy()
# loop through mini-batch and extract patches
patch = []
for i in range(B):
# !numpy大法好!paddle的索引op实在太慢了。1280次,torch:0.0219s,paddle:0.727s,numpy:0.00099s
subset=x[i, :, start[i, 1] : end[i, 1], start[i, 0] : end[i, 0]]
patch.append(subset)
return paddle.to_tensor(np.stack(patch))
def denormalize(self, T, coords):
"""Convert coordinates in the range [-1, 1] to
coordinates in the range [0, T] where `T` is
the size of the image.
"""
return paddle.to_tensor(0.5 * ((coords + 1.0) * T), dtype='int64')
def exceeds(self, from_x, to_x, from_y, to_y, T):
"""Check whether the extracted patch will exceed
the boundaries of the image of size `T`.
"""
if (from_x < 0) or (from_y < 0) or (to_x > T) or (to_y > T):
return True
return False
class GlimpseNetwork(nn.Layer):
"""The glimpse network.
Combines the "what" and the "where" into a glimpse
feature vector `g_t`.
- "what": glimpse extracted from the retina.
- "where": location tuple where glimpse was extracted.
Concretely, feeds the output of the retina `phi` to
a fc layer and the glimpse location vector `l_t_prev`
to a fc layer. Finally, these outputs are fed each
through a fc layer and their sum is rectified.
In other words:
`g_t = relu( fc( fc(l) ) + fc( fc(phi) ) )`
Args:
h_g: hidden layer size of the fc layer for `phi`.
h_l: hidden layer size of the fc layer for `l`.
g: size of the square patches in the glimpses extracted
by the retina.
k: number of patches to extract per glimpse.
s: scaling factor that controls the size of successive patches.
c: number of channels in each image.
x: a 4D Tensor of shape (B, H, W, C). The minibatch
of images.
l_t_prev: a 2D tensor of shape (B, 2). Contains the glimpse
coordinates [x, y] for the previous timestep `t-1`.
Returns:
g_t: a 2D tensor of shape (B, hidden_size).
The glimpse representation returned by
the glimpse network for the current
timestep `t`.
"""
def __init__(self, h_g, h_l, g, k, s, c):
super().__init__()
self.retina = Retina(g, k, s)
# glimpse layer
D_in = k * g * g * c
self.fc1 = nn.Linear(D_in, h_g)
# location layer
D_in = 2
self.fc2 = nn.Linear(D_in, h_l)
self.fc3 = nn.Linear(h_g, h_g + h_l)
self.fc4 = nn.Linear(h_l, h_g + h_l)
def forward(self, x, l_t_prev):
# generate glimpse phi from image x
phi = self.retina.foveate(x, l_t_prev)
# flatten location vector
l_t_prev = l_t_prev.reshape([l_t_prev.shape[0], -1]) # 他的锅
# feed phi and l to respective fc layers
phi_out = F.relu(self.fc1(phi))
l_out = F.relu(self.fc2(l_t_prev))
what = self.fc3(phi_out)
where = self.fc4(l_out)
# feed to fc layer
g_t = F.relu(what + where)
return g_t
class CoreNetwork(nn.Layer):
"""The core network.
An RNN that maintains an internal state by integrating
information extracted from the history of past observations.
It encodes the agent's knowledge of the environment through
a state vector `h_t` that gets updated at every time step `t`.
Concretely, it takes the glimpse representation `g_t` as input,
and combines it with its internal state `h_t_prev` at the previous
time step, to produce the new internal state `h_t` at the current
time step.
In other words:
`h_t = relu( fc(h_t_prev) + fc(g_t) )`
Args:
input_size: input size of the rnn.
hidden_size: hidden size of the rnn.
g_t: a 2D tensor of shape (B, hidden_size). The glimpse
representation returned by the glimpse network for the
current timestep `t`.
h_t_prev: a 2D tensor of shape (B, hidden_size). The
hidden state vector for the previous timestep `t-1`.
Returns:
h_t: a 2D tensor of shape (B, hidden_size). The hidden
state vector for the current timestep `t`.
"""
def __init__(self, input_size, hidden_size):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.i2h = nn.Linear(input_size, hidden_size)
self.h2h = nn.Linear(hidden_size, hidden_size)
def forward(self, g_t, h_t_prev):
h1 = self.i2h(g_t)
h2 = self.h2h(h_t_prev)
h_t = F.relu(h1 + h2)
return h_t
class ActionNetwork(nn.Layer):
"""The action network.
Uses the internal state `h_t` of the core network to
produce the final output classification.
Concretely, feeds the hidden state `h_t` through a fc
layer followed by a softmax to create a vector of
output probabilities over the possible classes.
Hence, the environment action `a_t` is drawn from a
distribution conditioned on an affine transformation
of the hidden state vector `h_t`, or in other words,
the action network is simply a linear softmax classifier.
Args:
input_size: input size of the fc layer.
output_size: output size of the fc layer.
h_t: the hidden state vector of the core network
for the current time step `t`.
Returns:
a_t: output probability vector over the classes.
"""
def __init__(self, input_size, output_size):
super().__init__()
self.fc = nn.Linear(input_size, output_size)
def forward(self, h_t):
a_t = F.log_softmax(self.fc(h_t), axis=1)
return a_t
class LocationNetwork(nn.Layer):
"""The location network.
Uses the internal state `h_t` of the core network to
produce the location coordinates `l_t` for the next
time step.
Concretely, feeds the hidden state `h_t` through a fc
layer followed by a tanh to clip the output beween
[-1, 1]. This produces a 2D vector of means used to
parametrize a two-component Gaussian with a fixed
variance from which the location coordinates `l_t`
for the next time step are sampled.
Hence, the location `l_t` is chosen stochastically
from a distribution conditioned on an affine
transformation of the hidden state vector `h_t`.
Args:
input_size: input size of the fc layer.
output_size: output size of the fc layer.
std: standard deviation of the normal distribution.
h_t: the hidden state vector of the core network for
the current time step `t`.
Returns:
mu: a 2D vector of shape (B, 2).
l_t: a 2D vector of shape (B, 2).
"""
def __init__(self, input_size, output_size, std):
super().__init__()
self.std = std
hid_size = input_size // 2
self.fc = nn.Linear(input_size, hid_size)
self.fc_lt = nn.Linear(hid_size, output_size)
def forward(self, h_t):
# compute mean
feat = F.relu(self.fc(h_t.detach()))
mu = paddle.tanh(self.fc_lt(feat))
# reparametrization trick
l_t = rsample(loc=mu,scale=self.std)
l_t = l_t.detach()
log_pi = Normal(mu, paddle.to_tensor(self.std)).log_prob(l_t)
# we assume both dimensions are independent
# 1. pdf of the joint is the product of the pdfs
# 2. log of the product is the sum of the logs
log_pi = paddle.sum(log_pi, axis=1)
# bound between [-1, 1]
l_t = paddle.clip(l_t, -1, 1)
return log_pi, l_t
class BaselineNetwork(nn.Layer):
"""The baseline network.
This network regresses the baseline in the
reward function to reduce the variance of
the gradient update.
Args:
input_size: input size of the fc layer.
output_size: output size of the fc layer.
h_t: the hidden state vector of the core network
for the current time step `t`.
Returns:
b_t: a 2D vector of shape (B, 1). The baseline
for the current time step `t`.
"""
def __init__(self, input_size, output_size):
super().__init__()
self.fc = nn.Linear(input_size, output_size)
def forward(self, h_t):
b_t = self.fc(h_t.detach())
return b_t
|
the-stack_0_2701 | import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
import plotly.graph_objects as go
from plotly.subplots import make_subplots
import joblib
from drain3.drain import Drain
import numpy as np
collections = joblib.load("results/collections.joblib")
labels = joblib.load("results/labels.joblib")
containers = joblib.load("results/containers.joblib")
cd = joblib.load("results/matrices_dict.joblib")
dd = joblib.load("results/drain_dict.joblib")
def find_max_value(matrix_dict: dict)->int:
max_value = 0
for _, d1 in matrix_dict.items(): #collections
for _, d2 in d1.items(): #labels
for _, d3 in d2.items(): #containers
test_value = np.amax(d3)
if test_value > max_value:
max_value = test_value
return max_value
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
app.layout = html.Div([
html.Label('Collection'),
dcc.Dropdown(
id='collections',
options=[{"label": idx, "value": idx} for idx in collections],
value='1',
multi=True
),
html.Label('Label'),
dcc.Dropdown(
id='labels',
options=[{"label": l, "value": l} for l in labels],
value='healthy',
multi=True
),
html.Label('Container'),
dcc.Dropdown(
id='containers',
options=[{"label": c, "value": c} for c in containers],
value='core.soaesb',
multi=True
),
dcc.Graph(id='heatmap')
], style={'columnCount': 1})
@app.callback(
Output('heatmap', 'figure'),
Input('collections', 'value'),
Input('labels', 'value'),
Input('containers', 'value'))
def update_heatmap(collections_set, labels_set, containers_set):
# rows will always be containers and columns can either be labels or collections
if not (len(collections_set) > 1 & len(labels_set) > 1):
n_cols = len(collections_set) if len(collections_set) > 1 else len(labels_set)
mdict = {i: {} for i in range(len(containers_set))}
cdict = {i: {} for i in range(len(containers_set))}
for i in range(len(containers_set)):
for j in range(n_cols):
if len(collections_set) > 1:
mdict[i][j] = cd[j][labels][containers_set[i]]
else:
if len(labels_set)>1:
mdict[i][j] = cd[int(collections_set)][labels_set[j]][containers_set[i]]
cdict[i] = [cluster.get_template() for cluster in dd[containers_set[i]].clusters]
else:
mdict[i][j] = cd[int(collections_set)][labels_set][containers_set[i]]
n_cols = len(collections_set) if len(collections_set) > 1 else len(labels_set)
fig = make_subplots(
rows = len(containers_set),
cols = n_cols,
start_cell = "top-left"
)
fig.update_yaxes(showticklabels=False)
# fig.update_layout(margin=dict(t=100, r=100, b=100, l=100),
# width=2000, height=1200,
# autosize=False)
fig.update_coloraxes(
cmin = 0,
cmax = find_max_value(cd)
)
for i in range(len(containers_set)):
for j in range(n_cols):
fig.add_trace(
go.Heatmap(z=mdict[i][j].tolist(),
y=cdict[i]),
row=i+1,
col=j+1)
return fig
if __name__ == '__main__':
app.run_server(host='0.0.0.0', debug=True)
|
the-stack_0_2702 | # Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests that specifically target tfq_unitary_op."""
import numpy as np
from absl.testing import parameterized
import tensorflow as tf
import cirq
from tensorflow_quantum.python import util
from tensorflow_quantum.core.ops import tfq_unitary_op
class UnitaryTest(tf.test.TestCase, parameterized.TestCase):
"""Tests tfq_calculate_unitary."""
def test_calculate_unitary_inputs(self):
"""Make sure the unitary op fails gracefully on bad inputs."""
unitary_op = tfq_unitary_op.get_unitary_op()
n_qubits = 5
batch_size = 5
symbol_names = ['alpha']
qubits = cirq.GridQubit.rect(1, n_qubits)
circuit_batch, resolver_batch = \
util.random_symbol_circuit_resolver_batch(
qubits, symbol_names, batch_size)
symbol_values_array = np.array(
[[resolver[symbol]
for symbol in symbol_names]
for resolver in resolver_batch])
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
'programs must be rank 1'):
# programs tensor has the wrong shape.
unitary_op(util.convert_to_tensor([circuit_batch]), symbol_names,
symbol_values_array)
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
'symbol_names must be rank 1'):
# symbol_names tensor has the wrong shape.
unitary_op(util.convert_to_tensor(circuit_batch),
np.array([symbol_names]), symbol_values_array)
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
'symbol_values must be rank 2'):
# symbol_values tensor has the wrong shape.
unitary_op(util.convert_to_tensor(circuit_batch), symbol_names,
np.array([symbol_values_array]))
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
'symbol_values must be rank 2'):
# symbol_values tensor has the wrong shape 2.
unitary_op(util.convert_to_tensor(circuit_batch), symbol_names,
symbol_values_array[0])
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
'Unparseable proto'):
# programs tensor has the right type, but invalid value.
unitary_op(['junk'] * batch_size, symbol_names, symbol_values_array)
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
'Could not find symbol in parameter map'):
# symbol_names tensor has the right type, but invalid value.
unitary_op(util.convert_to_tensor(circuit_batch), ['junk'],
symbol_values_array)
with self.assertRaisesRegex(TypeError, 'Cannot convert'):
# programs tensor has the wrong type.
unitary_op([1] * batch_size, symbol_names, symbol_values_array)
with self.assertRaisesRegex(TypeError, 'Cannot convert'):
# symbol_names tensor has the wrong type.
unitary_op(util.convert_to_tensor(circuit_batch), [1],
symbol_values_array)
with self.assertRaisesRegex(tf.errors.UnimplementedError, ''):
# symbol_values tensor has the wrong type.
unitary_op(util.convert_to_tensor(circuit_batch), symbol_names,
[['junk']] * batch_size)
with self.assertRaisesRegex(TypeError, 'missing'):
# too few tensors.
# pylint: disable=no-value-for-parameter
unitary_op(util.convert_to_tensor(circuit_batch), symbol_names)
# pylint: enable=no-value-for-parameter
# TODO (mbbrough): determine if we should allow extra arguments ?
with self.assertRaisesRegex(TypeError, 'positional arguments'):
# pylint: disable=too-many-function-args
unitary_op(util.convert_to_tensor(circuit_batch), symbol_names,
symbol_values_array, [])
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
expected_regex='cirq.Channel'):
# attempting to use noisy circuit.
noisy_circuit = cirq.Circuit(cirq.depolarize(0.3).on_each(*qubits))
unitary_op(
util.convert_to_tensor([noisy_circuit for _ in circuit_batch]),
symbol_names, symbol_values_array)
@parameterized.parameters([
{
'all_n_qubits': [2, 3]
},
{
'all_n_qubits': [1, 5, 8]
},
])
def test_calculate_unitary_output_padding(self, all_n_qubits):
"""If calculate_unitary is asked to calculate matrices given circuits
acting on different numbers of qubits, the op should return a tensor
padded with zeros up to the size of the largest circuit."""
unitary_op = tfq_unitary_op.get_unitary_op()
circuit_batch = []
for n_qubits in all_n_qubits:
qubits = cirq.GridQubit.rect(1, n_qubits)
circuit_batch += util.random_circuit_resolver_batch(qubits, 1)[0]
tfq_results = unitary_op(util.convert_to_tensor(circuit_batch), [],
[[]] * len(circuit_batch))
results = [cirq.unitary(circuit) for circuit in circuit_batch]
self.assertAllClose(tfq_results.to_list(), results, atol=1e-5)
def test_calculate_unitary_empty(self):
"""Ensure calculate_unitary is consistent with empty circuits."""
unitary_op = tfq_unitary_op.get_unitary_op()
empty_u = cirq.unitary(cirq.Circuit())
tfq_empty_u = unitary_op(util.convert_to_tensor([cirq.Circuit()]), [],
[[]])
self.assertAllClose(tfq_empty_u, [empty_u], atol=1e-5) # wrap in batch.
def test_calculate_unitary_no_circuit(self):
"""Ensure calculate_unitary is consistent with no circuits."""
unitary_op = tfq_unitary_op.get_unitary_op()
no_circuit = tf.raw_ops.Empty(shape=(0,), dtype=tf.string)
empty_values = tf.raw_ops.Empty(shape=(0, 0), dtype=tf.float32)
tfq_empty_u = unitary_op(no_circuit, [], empty_values)
expected_shape = tf.TensorShape([0, None, None])
self.assertEqual(tfq_empty_u.shape.as_list(), expected_shape.as_list())
@parameterized.parameters([{
'n_qubits': 6,
'unitary_op': tfq_unitary_op.get_unitary_op(True)
}, {
'n_qubits': 7,
'unitary_op': tfq_unitary_op.get_unitary_op(True)
}, {
'n_qubits': 6,
'unitary_op': tfq_unitary_op.get_unitary_op(False)
}, {
'n_qubits': 7,
'unitary_op': tfq_unitary_op.get_unitary_op(False)
}])
def test_calculate_unitary_consistency_symbol_free(self, n_qubits,
unitary_op):
"""Test calculate_unitary works without symbols."""
unitary_op = tfq_unitary_op.get_unitary_op()
qubits = cirq.GridQubit.rect(1, n_qubits)
circuit_batch, _ = util.random_circuit_resolver_batch(qubits, 25)
tfq_results = unitary_op(util.convert_to_tensor(circuit_batch), [],
[[]] * len(circuit_batch))
results = [cirq.unitary(circuit) for circuit in circuit_batch]
self.assertAllClose(tfq_results, results, atol=1e-5)
@parameterized.parameters([{
'n_qubits': 3,
'unitary_op': tfq_unitary_op.get_unitary_op(True)
}, {
'n_qubits': 4,
'unitary_op': tfq_unitary_op.get_unitary_op(True)
}, {
'n_qubits': 3,
'unitary_op': tfq_unitary_op.get_unitary_op(False)
}, {
'n_qubits': 4,
'unitary_op': tfq_unitary_op.get_unitary_op(False)
}])
def test_calculate_unitary_consistency(self, n_qubits, unitary_op):
"""Test that calculate_unitary works with symbols."""
unitary_op = tfq_unitary_op.get_unitary_op()
qubits = cirq.GridQubit.rect(1, n_qubits)
symbols = ['alpha', 'beta', 'gamma']
circuit_batch, resolver_batch = \
util.random_symbol_circuit_resolver_batch(qubits, symbols, 25)
values = np.empty((len(circuit_batch), len(symbols)))
for i in range(len(circuit_batch)):
for j in range(len(symbols)):
values[i][j] = resolver_batch[i][symbols[j]]
tfq_results = unitary_op(util.convert_to_tensor(circuit_batch), symbols,
values)
results = []
for circuit, resolver in zip(circuit_batch, resolver_batch):
resolved_circuit = cirq.resolve_parameters(circuit, resolver)
results.append(cirq.unitary(resolved_circuit))
self.assertAllClose(tfq_results, results, atol=1e-5)
if __name__ == "__main__":
tf.test.main()
|
the-stack_0_2703 | # Copyright 2018 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Callable, Optional
import numpy as np
import pytest
import cirq
def assert_optimizes(
before: cirq.Circuit,
expected: cirq.Circuit,
optimizer: Optional[Callable[[cirq.Circuit], None]] = None):
if optimizer is None:
optimizer = cirq.MergeSingleQubitGates().optimize_circuit
optimizer(before)
# Ignore differences that would be caught by follow-up optimizations.
followup_optimizations = [
cirq.DropNegligible(),
cirq.DropEmptyMoments()
]
for post in followup_optimizations:
post(before) # type: ignore # error: "object" not callable
post(expected) # type: ignore # error: "object" not callable
try:
assert before == expected
except AssertionError: # coverage: ignore
# coverage: ignore
print("BEFORE")
print(before)
print("EXPECTED")
print(expected)
raise
def test_leaves_singleton():
m = cirq.MergeSingleQubitGates()
q = cirq.NamedQubit('q')
c = cirq.Circuit([cirq.Moment([cirq.X(q)])])
m.optimization_at(c, 0, c.operation_at(q, 0))
cirq.testing.assert_same_circuits(
c,
cirq.Circuit([cirq.Moment([cirq.X(q)])]))
def test_not_both():
with pytest.raises(ValueError):
_ = cirq.MergeSingleQubitGates(
synthesizer=lambda *args: None,
rewriter=lambda *args: None)
def test_combines_sequence():
m = cirq.MergeSingleQubitGates()
q = cirq.NamedQubit('q')
c = cirq.Circuit.from_ops(
cirq.X(q)**0.5,
cirq.Z(q)**0.5,
cirq.X(q)**-0.5)
opt_summary = m.optimization_at(c, 0, c.operation_at(q, 0))
assert opt_summary.clear_span == 3
assert list(opt_summary.clear_qubits) == [q]
assert len(opt_summary.new_operations) == 1
assert isinstance(opt_summary.new_operations[0].gate,
cirq.SingleQubitMatrixGate)
cirq.testing.assert_allclose_up_to_global_phase(
cirq.unitary(opt_summary.new_operations[0]),
cirq.unitary(cirq.Y**0.5),
atol=1e-7)
def test_removes_identity_sequence():
q = cirq.NamedQubit('q')
assert_optimizes(
before=cirq.Circuit([
cirq.Moment([cirq.Z(q)]),
cirq.Moment([cirq.H(q)]),
cirq.Moment([cirq.X(q)]),
cirq.Moment([cirq.H(q)]),
]),
expected=cirq.Circuit())
def test_stopped_at_2qubit():
m = cirq.MergeSingleQubitGates()
q = cirq.NamedQubit('q')
q2 = cirq.NamedQubit('q2')
c = cirq.Circuit([
cirq.Moment([cirq.Z(q)]),
cirq.Moment([cirq.H(q)]),
cirq.Moment([cirq.X(q)]),
cirq.Moment([cirq.H(q)]),
cirq.Moment([cirq.CZ(q, q2)]),
cirq.Moment([cirq.H(q)]),
])
opt_summary = m.optimization_at(c, 0, c.operation_at(q, 0))
assert opt_summary.clear_span == 4
assert list(opt_summary.clear_qubits) == [q]
if len(opt_summary.new_operations) != 0:
assert len(opt_summary.new_operations) == 1
assert isinstance(opt_summary.new_operations[0].gate,
cirq.SingleQubitMatrixGate)
cirq.testing.assert_allclose_up_to_global_phase(
cirq.unitary(opt_summary.new_operations[0]),
np.eye(2),
atol=1e-7)
def test_ignores_2qubit_target():
m = cirq.MergeSingleQubitGates()
q = cirq.NamedQubit('q')
q2 = cirq.NamedQubit('q2')
c = cirq.Circuit([
cirq.Moment([cirq.CZ(q, q2)]),
])
m.optimization_at(c, 0, c.operation_at(q, 0))
cirq.testing.assert_same_circuits(
c,
cirq.Circuit([cirq.Moment([cirq.CZ(q, q2)])]))
def test_ignore_unsupported_gate():
class UnsupportedDummy(cirq.Gate):
pass
q0 = cirq.LineQubit(0)
circuit = cirq.Circuit.from_ops(
UnsupportedDummy()(q0),
)
c_orig = cirq.Circuit(circuit)
cirq.MergeSingleQubitGates().optimize_circuit(circuit)
assert circuit == c_orig
def test_rewrite():
q0 = cirq.LineQubit(0)
q1 = cirq.LineQubit(1)
circuit = cirq.Circuit.from_ops(
cirq.X(q0),
cirq.X(q1),
cirq.Y(q0),
cirq.CZ(q0, q1),
cirq.Y(q1),
)
cirq.MergeSingleQubitGates(
rewriter=lambda ops: cirq.H(ops[0].qubits[0])
).optimize_circuit(circuit)
cirq.DropEmptyMoments().optimize_circuit(circuit)
cirq.testing.assert_same_circuits(circuit, cirq.Circuit.from_ops(
cirq.H(q0),
cirq.H(q1),
cirq.CZ(q0, q1),
cirq.H(q1),
))
def test_merge_single_qubit_gates_into_phased_x_z():
a, b = cirq.LineQubit.range(2)
assert_optimizes(
before=cirq.Circuit.from_ops(
cirq.X(a),
cirq.Y(b)**0.5,
cirq.CZ(a, b),
cirq.H(a),
cirq.Z(a),
),
expected=cirq.Circuit.from_ops(
cirq.X(a),
cirq.Y(b)**0.5,
cirq.CZ(a, b),
cirq.Y(a)**-0.5,
),
optimizer=cirq.merge_single_qubit_gates_into_phased_x_z)
|
the-stack_0_2705 | from __future__ import print_function, division
import itertools
try:
import pathlib
except ImportError:
import pathlib2 as pathlib
import json
import os
def composite_channel(target, image, color, range_min, range_max):
''' Render _image_ in pseudocolor and composite into _target_
Args:
target: Numpy float32 array containing composition target image
image: Numpy uint16 array of image to render and composite
color: Color as r, g, b float array, 0-1
range_min: Threshhold range minimum, 0-65535
range_max: Threshhold range maximum, 0-65535
'''
f_image = (image.astype('float32') - range_min) / (range_max - range_min)
f_image = f_image.clip(0, 1, out=f_image)
for i, component in enumerate(color):
target[:, :, i] += f_image * component
def _calculate_total_tiles(opener, tile_size, num_levels):
tiles = 0
for level in range(num_levels):
(nx, ny) = opener.get_level_tiles(level, tile_size)
tiles += nx * ny
return tiles
def _check_duplicate(group_path, settings, old_rows):
old_settings = next((row for row in old_rows if row['Group Path'] == group_path), {})
return settings == old_settings
def render_color_tiles(opener, output_dir, tile_size, config_rows, logger, progress_callback=None, allow_cache=True):
EXT = 'jpg'
for settings in config_rows:
settings['Source'] = opener.path
print('Processing:', str(opener.path))
output_path = pathlib.Path(output_dir)
if not output_path.exists():
output_path.mkdir(parents=True)
config_path = output_path / 'config.json'
old_rows = []
if allow_cache:
if os.path.exists(config_path):
with open(config_path, 'r') as f:
try:
old_rows = json.load(f)
except json.decoder.JSONDecodeError as err:
print(err)
with open(config_path, 'w') as f:
json.dump(config_rows, f)
num_levels = opener.get_shape()[1]
total_tiles = _calculate_total_tiles(opener, tile_size, num_levels)
progress = 0
if num_levels < 2:
logger.warning(f'Number of levels {num_levels} < 2')
group_dirs = {settings['Group Path']: settings for settings in config_rows}
is_up_to_date = {g: False for g, s in group_dirs.items()}
if allow_cache:
is_up_to_date = {g: _check_duplicate(g, s, old_rows) for g, s in group_dirs.items()}
for level in range(num_levels):
(nx, ny) = opener.get_level_tiles(level, tile_size)
print(' level {} ({} x {})'.format(level, ny, nx))
for ty, tx in itertools.product(range(0, ny), range(0, nx)):
filename = '{}_{}_{}.{}'.format(level, tx, ty, EXT)
for settings in config_rows:
group_dir = settings['Group Path']
if not (output_path / group_dir).exists():
(output_path / group_dir).mkdir(parents=True)
output_file = str(output_path / group_dir / filename)
# Only save file if change in config rows
if not (os.path.exists(output_file) and is_up_to_date[group_dir]):
try:
opener.save_tile(output_file, settings, tile_size, level, tx, ty)
except AttributeError as e:
logger.error(f'{level} ty {ty} tx {tx}: {e}')
else:
logger.warning(f'Not saving tile level {level} ty {ty} tx {tx}')
logger.warning(f'Path {output_file} exists with same rendering settings')
progress += 1
if progress_callback is not None:
progress_callback(progress, len(config_rows)*total_tiles)
|
the-stack_0_2706 | import errno
import os
import random
import re
import shutil
import subprocess
import sys
import textwrap
import uuid
from datetime import date
from distutils.core import Command
import boto3
import pkg_resources
import requests
from botocore.handlers import disable_signing
from cookiecutter.main import cookiecutter
from setuptools.command import easy_install
def download_url(url, download_dir):
filename = os.path.join(download_dir, os.path.basename(url))
if not os.path.exists(filename):
with open(filename, 'wb') as f:
response = requests.get(url, stream=True)
total = response.headers.get('content-length')
if total is None:
f.write(response.content)
else:
downloaded = 0
total = int(total)
for data in response.iter_content(chunk_size=max(int(total / 1000), 1024 * 1024)):
downloaded += len(data)
f.write(data)
done = int(50 * downloaded / total)
print('\r{}{} {}%'.format('█' * done, '.' * (50-done), 2*done), end='', flush=True)
print()
else:
print('Already downloaded')
return filename
class app(Command):
description = "Create a native application to wrap this project"
user_options = [
('dir=', 'd',
"Directory to put the project in"),
('formal-name=', None,
"Formal name for the project"),
('class-name=', None,
"Entry class name for the project"),
('organization-name=', None,
"Name of the organization managing the project"),
('template=', None,
"Template (or template repository URL) to use."),
('bundle', None,
'Bundle identifier for the author organization - usually a reversed domain (e.g., "org.python")'),
('icon=', None,
"Name of the icon file."),
('guid=', None,
"GUID identifying the app."),
('secret-key=', None,
"Secret key for the app."),
('splash=', None,
"Name of the splash screen file."),
('app-requires', None,
'List of platform-specific requirements for this app.'),
('support-pkg=', None,
'URL for the support package to use'),
('download-dir=', None,
"Directory where the project support packages will be cached"),
('build', 'b',
"Build the project after generating"),
('start', 's',
"Start the application after building"),
('os-version=', None,
"Set the device OS version. (e.g., iOS 10.2)"),
('device-name=', None,
"Set the device to run. (e.g., iPhone 7 Plus)"),
('background-image=', None,
"Name of the background image file (macOS .dmg only)"),
('sanitize-version', None,
"Forces installer version to only contain numbers."),
('clean', None,
"Delete any artifacts from previous run"),
]
def initialize_options(self):
self.dir = None
self.formal_name = None
self.class_name = None
self.organization_name = None
self.template = None
self.bundle = None
self.icon = None
self.splash = None
self.app_requires = None
self.support_pkg = None
self.support_dir = None
self.download_dir = None
self.document_types = None
self.version_code = None
self.guid = None
self.secret_key = None
self.build = False
self.start = False
self.os_version = None
self.device_name = None
self.sanitize_version = None
self.clean = None
self.background_image = None
def finalize_options(self):
if self.formal_name is None:
self.formal_name = self.distribution.get_name().title()
if self.class_name is None:
CLASS_NAME_CHARS = re.compile('[^a-zA-Z]')
self.class_name = CLASS_NAME_CHARS.sub('', self.formal_name.title())
if self.organization_name is None:
self.organization_name = self.distribution.get_author().title()
if self.bundle is None:
if self.distribution.get_author_email():
domain = self.distribution.get_author_email().split('@')[-1]
else:
domain = 'org.python'
self.bundle = '.'.join(reversed(domain.split('.')))
if self.download_dir is None:
self.download_dir = os.path.expanduser(os.path.join('~', '.briefcase'))
if self.document_types is None:
self.document_types = {}
# The Version Code is a pure-string, numerically sortable
# version number.
match = re.match('(?P<major>\d+)(\.(?P<minor>\d+)(\.(?P<revision>\d+))?)?', self.distribution.get_version())
self._numeric_version_parts = (
int(match.groups()[0]) if match.groups()[0] else 0,
int(match.groups()[2]) if match.groups()[2] else 0,
int(match.groups()[4]) if match.groups()[4] else 0,
)
self.version_code = '%02d%02d%02d' % self._numeric_version_parts
self.version_numeric = '%d.%d.%d' % self._numeric_version_parts
# The app's GUID (if not manually specified) is a namespace UUID
# based on the URL for the app.
if self.guid is None:
self.guid = uuid.uuid3(uuid.NAMESPACE_URL, self.distribution.get_url())
# The secret key is 40 characters of entropy
if self.secret_key is None:
self.secret_key = ''.join(random.choice("abcdefghijklmnopqrstuvwxyz0123456789") for i in range(40))
# Ensure the download directory exists
try:
os.makedirs(self.download_dir)
except OSError as e:
if e.errno != errno.EEXIST:
raise
if self.start:
self.build = True
def find_support_pkg(self):
# Get an S3 client, and disable signing (so we don't need credentials)
S3_BUCKET = 'pybee-briefcase-support'
S3_REGION = 'us-west-2'
S3_URL = 'https://{}.s3-{}.amazonaws.com/'.format(S3_BUCKET, S3_REGION)
s3 = boto3.client('s3', region_name=S3_REGION)
s3.meta.events.register('choose-signer.s3.*', disable_signing)
top_build_number = 0
top_build = None
paginator = s3.get_paginator('list_objects')
for page in paginator.paginate(
Bucket=S3_BUCKET,
Prefix='{}/{}.{}/{}/'.format(
self.support_project,
sys.version_info.major,
sys.version_info.minor,
self.platform
)):
for item in page.get('Contents', []):
build_number = int(
item['Key'].rstrip('.tar.gz').split('.')[-1].lstrip('b'))
if build_number > top_build_number:
top_build_number = build_number
top_build = item['Key']
if top_build:
return S3_URL + top_build
else:
return None
@property
def app_dir(self):
return os.path.join(os.getcwd(), self.resource_dir, 'app')
@property
def app_packages_dir(self):
return os.path.join(os.getcwd(), self.resource_dir, 'app_packages')
@property
def version(self):
return self.distribution.get_version()
@property
def _python_version(self):
return '{}.{}'.format(sys.version_info.major, sys.version_info.minor)
def generate_app_template(self, extra_context=None):
print(" * Writing application template...")
if self.sanitize_version and self.version_numeric != self.version:
print(" ! Version currently contains characters: {}".format(self.version))
print(" ! Installer version sanitized to: {}".format(self.version_numeric))
extra_context = extra_context or {}
extra_context['version'] = self.version_numeric
if self.template is None:
template_path = os.path.expanduser('~/.cookiecutters/Python-{}-template'.format(self.platform))
if os.path.exists(template_path):
self.template = template_path
self._git_fetch(template_path)
self._git_checkout(template_path)
if not self._has_cookiecutter_json(template_path):
print("Directory {} isn't a valid template (no cookiecutter.json found).".format(template_path))
sys.exit(1)
self._git_pull(template_path)
else:
self.template = 'https://github.com/pybee/Python-{}-template.git'.format(self.platform)
print("Project template: {}".format(self.template))
_extra_context = {
'app_name': self.distribution.get_name(),
'formal_name': self.formal_name,
'class_name': self.class_name,
'organization_name': self.organization_name,
'author': self.distribution.get_author(),
'description': self.distribution.get_description(),
'dir_name': self.dir,
'bundle': self.bundle,
'year': date.today().strftime('%Y'),
'month': date.today().strftime('%B'),
'version': self.version,
'version_code': self.version_code,
'guid': self.guid,
'secret_key': self.secret_key,
'document_types': self.document_types,
}
if extra_context:
_extra_context.update(extra_context)
cookiecutter(
self.template,
no_input=True,
checkout=self._python_version,
extra_context=_extra_context
)
def _has_cookiecutter_json(self, template_path):
cookiecutter_json_path = os.path.join(template_path, 'cookiecutter.json')
return os.path.exists(cookiecutter_json_path)
def _get_all_branches(self, path):
branches = subprocess.check_output(["git", "ls-remote", "--heads"], stderr=subprocess.STDOUT, cwd=path)
branches = branches.decode('utf-8').splitlines()
branches = branches[1:]
all_branches = [name.rsplit("/", 1)[1] for name in branches]
return all_branches
def _git_fetch(self, path):
subprocess.Popen(["git", "fetch"], cwd=path).wait()
def _git_checkout(self, path):
try:
subprocess.check_output(["git", "checkout", self._python_version], stderr=subprocess.STDOUT, cwd=path)
except subprocess.CalledProcessError:
print("There is no branch for Python version %r (existing branches: " %
self._python_version, ", ".join(self._get_all_branches(path)) + ").")
def _git_pull(self, path):
template_name = path.split('/')[-1]
try:
subprocess.check_output(["git", "pull"], stderr=subprocess.STDOUT, cwd=path)
print('Template {} succesfully updated.'.format(template_name))
except subprocess.CalledProcessError as pull_error:
error_message = pull_error.output.decode('utf-8')
if 'resolve host' in error_message:
print('Unable to update template {}, using unpulled.'.format(template_name))
print(error_message)
def install_app_requirements(self):
print(" * Installing requirements...")
if self.distribution.install_requires:
subprocess.Popen(
[
"pip", "install",
"--upgrade",
"--force-reinstall",
'--target={}'.format(self.app_packages_dir)
] + self.distribution.install_requires,
).wait()
else:
print("No requirements.")
def install_platform_requirements(self):
print(" * Installing platform requirements...")
if self.app_requires:
subprocess.Popen(
[
"pip", "install",
"--upgrade",
"--force-reinstall",
'--target={}'.format(self.app_packages_dir)
] + self.app_requires,
).wait()
else:
print("No platform requirements.")
def install_code(self):
print(" * Installing project code...")
subprocess.Popen(
[
"pip", "install",
"--upgrade",
"--force-reinstall",
"--no-dependencies",
'--target={}'.format(self.app_dir),
'.'
],
).wait()
@property
def launcher_header(self):
"""
Optionally override the shebang line for launcher scripts
This should return a suitable relative path which will find the
bundled python for the relevant platform if the setuptools default
is not suitable.
"""
return None
@property
def launcher_script_location(self):
return self.app_dir
def install_launch_scripts(self):
exe_names = []
if self.distribution.entry_points:
print(" * Creating launchers...")
subprocess.Popen(
[
"pip", "install",
"--upgrade",
"--force-reinstall",
'--target={}'.format(self.app_dir),
'setuptools'
],
).wait()
rel_sesources = os.path.relpath(self.resource_dir, self.launcher_script_location)
rel_sesources_split = ', '.join(["'%s'" % f for f in rel_sesources.split(os.sep)])
easy_install.ScriptWriter.template = textwrap.dedent("""
# EASY-INSTALL-ENTRY-SCRIPT: %(spec)r,%(group)r,%(name)r
__requires__ = %(spec)r
import os
import re
import sys
import site
from os.path import dirname, abspath, join
resources = abspath(join(dirname(__file__), {}))
site.addsitedir(join(resources, 'app'))
site.addsitedir(join(resources, 'app_packages'))
os.environ['PATH'] += os.pathsep + resources
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point(%(spec)r, %(group)r, %(name)r)()
)
""".format(rel_sesources_split)).lstrip()
ei = easy_install.easy_install(self.distribution)
for dist in pkg_resources.find_distributions(self.app_dir):
# Note: this is a different Distribution class to self.distribution
ei.args = True # Needs something to run finalize_options
ei.finalize_options()
ei.script_dir = self.launcher_script_location
for args in easy_install.ScriptWriter.best().get_args(dist, header=self.launcher_header):
ei.write_script(*args)
# Grab names of launchers
for entry_points in dist.get_entry_map().values():
exe_names.extend(entry_points.keys())
if self.formal_name not in exe_names:
print(" ! No entry_point matching formal_name, \n"
" template builtin script will be main launcher.")
return exe_names
def install_resources(self):
if self.icon:
print(" * Adding icons...")
self.install_icon()
else:
print(" * No icons defined - using default...")
if self.splash:
print(" * Adding splash screens...")
self.install_splash()
else:
print(" * No splash screen defined...")
def install_support_package(self):
if self.support_pkg is None:
print(" * Determining best support package...")
self.support_pkg = self.find_support_pkg()
if self.support_dir is None:
self.support_dir = self.resource_dir
if self.support_pkg:
print(" * Installing support package...")
print("Support package:", self.support_pkg)
# Download and unpack the support package.
filename = download_url(url=self.support_pkg, download_dir=self.download_dir)
destination = os.path.join(os.getcwd(), self.support_dir)
shutil.unpack_archive(filename, extract_dir=destination)
else:
print()
print("No pre-built support package could be found for Python %s.%s." %
(sys.version_info.major, sys.version_info.minor))
print("You will need to compile your own. You may want to start with")
print("the code from https://github.com/pybee/%s and" % self.support_project)
print("then specify the compiled tarball with:")
print()
print(" python setup.py {} --support-pkg=<path to tarball>".format(self.platform.lower()))
print()
sys.exit(1)
def install_extras(self):
pass
def build_app(self):
pass
def run_app(self):
pass
def post_install(self):
print()
print("Installation complete.")
def post_build(self):
print()
print("Build complete.")
def start_app(self):
print("Don't know how to start {} applications.".format(self.platform))
def post_start(self):
print()
print("App started.")
def run(self):
full_generation = True
if os.path.exists(self.dir):
print()
if self.clean:
print(" * Deleting existing content...")
if os.path.isdir(self.dir):
shutil.rmtree(self.dir)
else:
os.remove(self.dir)
else:
print(" * Updating user code...")
full_generation = False
if full_generation:
self.generate_app_template()
self.install_support_package()
self.install_app_requirements()
self.install_platform_requirements()
self.install_code()
self.install_launch_scripts()
self.install_resources()
self.install_extras()
self.post_install()
if self.build:
success = self.build_app()
if success is None or success is True:
self.post_build()
if self.start:
self.start_app()
self.post_start()
|
the-stack_0_2707 | import RPi.GPIO as GPIO
import time
GPIO.setwarnings(False)
GPIO.setmode (GPIO.BOARD)
GPIO.setup (12,GPIO.OUT)
p = GPIO.PWM(12, 50)
duty = 0
p.start(duty)
for change_duty in range(0,101,10):
p.ChangeDutyCycle(change_duty)
time.sleep(0.1)
for change_duty in range(100, -1, -10):
p.ChangeDutyCycle(change_duty)
time.sleep(0.1)
p.stop()
|
the-stack_0_2710 | '''
@author:yk
基于直方图变换的风格迁移
修改os.chdir 输入python style.py xx.jpg(待变化的图片) xx.jpg(目标风格的图片)
'''
import cv2 as cv
import numpy as np
import random
import os
import matplotlib.pyplot as plt
import sys
os.chdir("C:\\Users\\m\\Desktop\\第三次作业")
def show(img,name="img"): #显示图像
cv.imshow(name,img)
cv.waitKey(0)
cv.destroyAllWindows()
def read(name): #读取图像
return cv.imread(name+".bmp",0)
def hist_equal(img): #直方图均衡(求各个像素占比)
M,N=img.shape
s=np.zeros([256,1])
for j in range(M): #遍历每个像素的像素值
for k in range(N):
s[img[j][k]]+=1 #对应位置+1
for i in range(1,256):
s[i]=s[i-1]+s[i] #累计求和
s=s/(M*N)
return s
def hist_match(src,dst): #直方图匹配
M1,N1=src.shape
M2,N2=dst.shape
s=hist_equal(src) #src的sk
z=hist_equal(dst) #dst的zk
g=np.zeros([256]) #初始化g函数
index=0
for i in range(256): #寻找sk与zk最接近的一个数,返回下标作为索引值
mins=1000
for j in range(256):
k=abs(s[i]-z[j])
if k < mins:
mins=k
index=j
g[i]=index
return g
def img_trans(img,g): #根据g函数,求出原图像关于g函数的转换,返回增强的图片
M,N=img.shape
dst=np.zeros(img.shape,dtype=np.uint8)
for i in range(M):
for j in range(N):
dst[i][j]=g[img[i][j]]
return dst
def img_enhance(img1,img2): #绘制增强后的图以及其对应的直方图
g=hist_match(img1,img2)
dst=img_trans(img1,g)
hist=cv.calcHist([dst],[0],None,[256],[0,256])
plt.plot(hist)
plt.ylim([0,10000])
plt.clf()
return dst
if __name__ =="__main__":
name1=sys.argv[1]
name2=sys.argv[2]
orig1=cv.imread(name1)
orig2=cv.imread(name2)
b1,g1,r1=cv.split(orig1)
b2,g2,r2=cv.split(orig2)
dst1=img_enhance(b1,b2)
dst2=img_enhance(g1,g2)
dst3=img_enhance(r1,r2)
dst=cv.merge([dst1,dst2,dst3])
show(dst)
|
the-stack_0_2711 | """
Divide By Mean
==============
"""
import logging
from functools import partial
import numpy as np
from .fitness_normalizer import FitnessNormalizer
logger = logging.getLogger(__name__)
class DivideByMean(FitnessNormalizer):
"""
Divides fitness values by the population mean.
While this function can be used if the fitness value of each
:class:`.Molecule` in the population is a single
number, it is most useful when the fitness value is a
:class:`tuple` of numbers. In this case, it is necessary to somehow
combine the numbers so that a single fitness value is produced.
For example, take a fitness value which is the vector holding the
properties ``[energy, diameter, num_atoms]``. For a given molecule
these numbers may be something like ``[200,000, 12, 140]``. If we
were to sum these numbers, the energy term would dominate the final
fitness value. In order to combine these numbers we can divide them
by the population averages. For example, if the average energy
of molecules in the population is ``300,000`` the average diameter
is ``10`` and the average number of atoms is ``70`` then the
fitness vector would be scaled to ``[0.5, 1.2, 2]``. These
numbers are now of a similar magnitude and can be summed to give a
reasonable value. After division , each value represents how
much better than the population average each property value is.
In essence we have removed the units from each parameter.
Examples
--------
*Selectively Normalizing Fitness Values*
Sometimes you do not want to normalize all the values in a
population together. For example, if a failed fitness value
calculation resulted in some records having a fitness value of
``None``, you would want to ignore these records from the
normalization
.. testcode:: selectively-normalizing-fitness-values
import stk
import numpy as np
building_block = stk.BuildingBlock(
smiles='BrCCBr',
functional_groups=[stk.BromoFactory()],
)
population = (
stk.MoleculeRecord(
topology_graph=stk.polymer.Linear(
building_blocks=(building_block, ),
repeating_unit='A',
num_repeating_units=2,
),
).with_fitness_value(
fitness_value=(1., 2., 3.),
normalized=False,
),
# This will have a fitness value of None.
stk.MoleculeRecord(
topology_graph=stk.polymer.Linear(
building_blocks=(building_block, ),
repeating_unit='A',
num_repeating_units=2,
),
),
)
mean_scaler = stk.DivideByMean(
# Only normalize values which are not None.
filter=lambda population, record:
record.get_fitness_value() is not None
)
# Calling mean_scaler.normalize() will return a new
# population holding the molecule records with normalized
# fitness values.
normalized_population = tuple(mean_scaler.normalize(
population=population,
))
normalized_record1, normalized_record2 = normalized_population
assert np.all(np.equal(
normalized_record1.get_fitness_value(),
(1, 1, 1),
))
"""
def __init__(self, filter=lambda population, record: True):
"""
Initialize a :class:`.DivideByMean` instance.
Parameters
----------
filter : :class:`callable`, optional
Takes two parameters, first is a :class:`tuple`
of :class:`.MoleculeRecord` instances,
and the second is a :class:`.MoleculeRecord`. The
:class:`callable` returns ``True`` or ``False``. Only
molecules which return ``True`` will have fitness values
normalized. By default, all molecules will have fitness
values normalized.
The instance passed to the `population` argument of
:meth:`.normalize` is passed as the first argument, while
the second argument will be passed every
:class:`.MoleculeRecord` in it, one at a time.
"""
self._filter = filter
def normalize(self, population):
filtered = filter(
partial(self._filter, population),
population,
)
mean = np.mean(
a=[record.get_fitness_value() for record in filtered],
axis=0,
)
logger.debug(f'Means used: {mean}')
for record in population:
if self._filter(population, record):
yield record.with_fitness_value(
fitness_value=np.divide(
record.get_fitness_value(),
mean,
)
)
else:
yield record
|
the-stack_0_2712 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Control flow statements: loops, conditionals, etc."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.autograph.operators import py_builtins
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_math_ops
def for_stmt(iter_, extra_test, body, init_state):
"""Functional form of a for statement.
The loop operates on a state, which includes all symbols that are
variant across loop iterations, excluding the iterate as well as the
variables local to the loop.
For example, given the loop below that calculates the geometric and
arithmetic means or some numbers:
geo_mean = 1
arith_mean = 0
for i in range(n):
a = numbers[i]
geo_mean *= a
arith_mean += a
The state is represented by the variables geo_mean and arith_mean. The
argument for initial_state may contain the tuple (1, 0), the body will
include the arguments geo_mean and arith_mean and will return a tuple
representing the new values for geo_mean and respectively arith_mean.
Args:
iter_: The entity being iterated over.
extra_test: Callable with the state as arguments, and boolean return type.
An additional loop condition.
body: Callable with the iterate and the state as arguments, and
state as return type. The actual loop body.
init_state: Tuple containing the initial state.
Returns:
Tuple containing the final state.
"""
if tensor_util.is_tensor(iter_):
return _known_len_for_stmt(iter_, extra_test, body, init_state)
elif isinstance(iter_, dataset_ops.DatasetV2):
return _dataset_for_stmt(iter_, extra_test, body, init_state)
else:
return _py_for_stmt(iter_, extra_test, body, init_state)
def _py_for_stmt(iter_, extra_test, body, init_state):
"""Overload of for_stmt that executes a Python for loop."""
state = init_state
for target in iter_:
if not extra_test(*state):
break
state = body(target, *state)
# TODO(mdan): Remove this special case.
if len(state) == 1:
return state[0]
return state
def _known_len_for_stmt(iter_, extra_test, body, init_state):
"""Overload of for_stmt that iterates over objects that admit a length."""
n = py_builtins.len_(iter_)
def while_body(iterate_index, *state):
iterate = iter_[iterate_index]
new_state = body(iterate, *state)
return (iterate_index + 1,) + new_state
def while_cond(iterate_index, *state):
return gen_math_ops.logical_and(iterate_index < n, extra_test(*state))
results = while_stmt(
while_cond,
while_body,
init_state=(0,) + init_state,
extra_deps=(iter_,),
opts=dict(maximum_iterations=n))
# Dropping the iteration index because it's not syntactically visible.
results = results[1:]
# TODO(mdan): Remove this special case.
if len(results) == 1:
return results[0]
return results
def _dataset_for_stmt(ds, extra_test, body, init_state):
"""Overload of for_stmt that iterates over TF Datasets."""
# Because Datsets only expose get_next, in the style of Python iterators,
# we are forced to unpack the loop as:
#
# epoch_number, iterate = ds.get_next()
# while epoch_number < 2:
# <body>
# epoch_number, iterate = ds.get_next()
epoch_numbers = dataset_ops.Dataset.range(2)
def tag_with(ds, tag):
return dataset_ops.Dataset.zip(
(dataset_ops.Dataset.from_tensors(tag).repeat(), ds))
ds_with_epoch = epoch_numbers.flat_map(lambda i: tag_with(ds, i))
iterator = ds_with_epoch.make_initializable_iterator()
with ops.control_dependencies((iterator.initializer,)):
epoch_number, iterate = iterator.get_next()
def while_body(epoch_number, iterate, *state):
new_state = body(iterate, *state)
epoch_number, iterate = iterator.get_next()
return (epoch_number, iterate) + new_state
def while_cond(epoch_number, iterate, *state):
del iterate
return gen_math_ops.logical_and(epoch_number < 1, extra_test(*state))
results = while_stmt(
while_cond,
while_body,
init_state=(epoch_number, iterate) + init_state,
extra_deps=())
# Dropping the epoch number and iterate because they are not syntactically
# visible.
results = results[2:]
# TODO(mdan): Remove this special case.
if len(results) == 1:
return results[0]
return results
def while_stmt(test, body, init_state, extra_deps, opts=None):
"""Functional form of a while statement.
The loop operates on a so-called state, which includes all symbols that are
variant across loop iterations. In what follows we refer to state as either
a tuple of entities that represent an actual state, or a list of arguments
of the corresponding types.
Args:
test: Callable with the state as arguments, and boolean return type.
The loop condition.
body: Callable with the state as arguments, and state as return type.
The actual loop body.
init_state: Tuple containing the initial state.
extra_deps: Tuple containing additional entities on which the loop may
depend, such as loop invariants referenced by test. Used
exclusively for dispatch control.
opts: Optional dict of extra loop parameters.
Returns:
Tuple containing the final state.
"""
# TODO(mdan): Consider adding a generic mechanism for dynamic dispatch.
# That could be something as simple as a collection of dispatch rules, with
# some prioritization.
if any(tensor_util.is_tensor(v) for v in init_state + extra_deps):
return _tf_while_stmt(test, body, init_state, opts)
else:
return _py_while_stmt(test, body, init_state, opts)
def _tf_while_stmt(test, body, init_state, opts):
"""Overload of while_stmt that stages a TF while_stmt."""
if opts is None:
opts = {}
return control_flow_ops.while_loop(test, body, init_state, **opts)
def _py_while_stmt(test, body, init_state, opts):
"""Overload of while_stmt that executes a Python while loop."""
del opts
state = init_state
while test(*state):
state = body(*state)
return state
def if_stmt(cond, body, orelse):
"""Functional form of an if statement.
Args:
cond: Boolean.
body: Callable with no arguments, and outputs of the positive (if) branch
as return type.
orelse: Callable with no arguments, and outputs of the negative (else)
branch as return type.
Returns:
Tuple containing the statement outputs.
"""
if tensor_util.is_tensor(cond):
return tf_if_stmt(cond, body, orelse)
else:
return _py_if_stmt(cond, body, orelse)
def tf_if_stmt(cond, body, orelse):
"""Overload of if_stmt that stages a TF cond."""
return control_flow_ops.cond(cond, body, orelse)
def _py_if_stmt(cond, body, orelse):
"""Overload of if_stmt that executes a Python if statement."""
return body() if cond else orelse()
|
the-stack_0_2713 | import numpy as np
import h5py
import pandas as pd
from typing import Any, Callable
from scipy.stats import binned_statistic
from scipy.interpolate import interp1d
from sklearn.utils import resample
from imblearn.over_sampling import SMOTE
def get_data(arg_label:str,
boxsize:int=100,
path_to_file:str="/cosma7/data/dp004/dc-cues1/tng_dataframes/",
):
"""
"""
filename = f"merged_dataframe_{boxsize}.h5"
hdf5_filename = path_to_file + filename
df = pd.read_hdf(hdf5_filename, key="df", mode="r")
df = df.fillna(-9999.)
ids = df.ID_DMO
drop_list=["N_gals", "M_stars_central", "total_M_stars",
"x_hydro", "y_hydro", "z_hydro",
"x_dmo", "y_dmo", "z_dmo",
"M200_HYDRO", "ID_HYDRO", "ID_DMO",
"Group_R_Crit200", #"CentralVmax", #"m2500c",
"vrms_2500c", "vrms_200c", "vrms_std_2500c",
"CentralMassInMaxRad",
"displacement",
'vrms_std_200c', 'beta2500c',
"concentration_nfw"
]
# Chose label
if arg_label == "dark_or_light":
df["labels"] = df.N_gals > 0
df = df.drop(columns=drop_list)
elif arg_label == "nr_of_satellites":
df["labels"] = df.N_gals - 1
df = df[df.N_gals > 1]
df = df.drop(columns=drop_list)
elif arg_label == "stellar_mass":
df["labels"] = np.log10(df.M_stars_central)
df["labels"] = df["labels"].replace([-np.inf, np.inf], 0.)
df = df.drop(columns=drop_list)
elif arg_label == "both":
df["labels"] = df.N_gals > 0
'''
keep_list = [
"Formation Time", "CentralVmax", "CentralHalfmassRad", "concentration_prada", "Spin",
"env_10", "labels",
]
df = df[keep_list]
'''
return df.drop(columns="labels"), df.labels
def load_positions(test_idx = None,
path_to_file:str="/cosma7/data/dp004/dc-cues1/tng_dataframes/",
boxsize:int=100
):
filename = f"merged_dataframe_{int(boxsize)}.h5"
hdf5_filename = path_to_file + filename
df = pd.read_hdf(hdf5_filename, key="df", mode="r")
if test_idx is not None:
df=df.iloc[test_idx]
hydro_pos = np.vstack([df.x_hydro, df.y_hydro, df.z_hydro]).T
dmo_pos = np.vstack([df.x_dmo, df.y_dmo, df.z_dmo]).T
return hydro_pos, dmo_pos
def _find_transition_regions(df_features: pd.DataFrame, n_centrals):
"""
Function to find two masses: where half the haloes are luminous, and where all haloes are luminous
Args:
df: dataframe containing masses and wheather luminous or dark
Returns:
mass_center: mass at which half of the haloes are luminous.
mass_end: mass at which 100% of haloes are luminous.
"""
nbins = 15
m200c = 10**df_features.M200c
bins = np.logspace(np.log10(np.min(m200c)), 12.5, nbins + 1)
nluminous, mass_edges, _ = binned_statistic(
m200c, n_centrals, statistic="mean", bins=bins
)
interpolator = interp1d(nluminous, (mass_edges[1:] + mass_edges[:-1]) / 2.0)
mass_center = interpolator(0.5)
mass_end = ((mass_edges[1:] + mass_edges[:-1]) / 2.0)[nluminous > 0.99][0]
return np.log10(mass_center), np.log10(mass_end)
def balance_dataset(df_features, df_labels, sampler, split='mass'):
if split == 'mass':
df_features_resampled, df_labels_resampled=_balance_mass_split(df_features,
df_labels, sampler)
else:
df_features_resampled, df_labels_resampled=_balance(df_features, df_labels, sampler)
return df_features_resampled, df_labels_resampled
def _balance(df_features, df_labels, sampler):
sampler_ = sampler(random_state=42)
features_resampled, labels_resampled = sampler_.fit_sample(df_features, df_labels)
df_features_resampled = pd.DataFrame(data=features_resampled,
columns=df_features.columns)
df_labels_resampled= pd.Series(data=labels_resampled)
return df_features_resampled, df_labels_resampled
def _balance_mass_split(
df_features, df_labels, sampler
):
center_transition, end_transition = _find_transition_regions(df_features, df_labels)
df_left_transition_feats, df_left_transition_labels = _balance_df_given_mass(
df_features, df_labels, 0.0, center_transition, sampler
)
df_right_transition_feats, df_right_transition_labels = _balance_df_given_mass(
df_features, df_labels, center_transition, 15, sampler
)
df_features = pd.concat([df_left_transition_feats, df_right_transition_feats])
df_labels = pd.concat([df_left_transition_labels, df_right_transition_labels])
return df_features, df_labels
def _balance_df_given_mass(
df_features, df_labels, minimum_mass, maximum_mass, sampler
):
"""
internal function indicated by leading _
"""
mass_threshold = (df_features.M200c > minimum_mass) & (df_features.M200c < maximum_mass)
df_M = df_features[mass_threshold]
df_M_labels = df_labels[mass_threshold]
df_features_resampled, df_labels_resampled = _balance(df_M, df_M_labels, sampler)
return df_features_resampled, df_labels_resampled
|
the-stack_0_2714 | # -*- coding: utf-8 -*-
'''
Execute an unmodified puppet_node_classifier and read the output as YAML. The YAML data is then directly overlaid onto the minion's Pillar data.
'''
# Don't "fix" the above docstring to put it on two lines, as the sphinx
# autosummary pulls only the first line for its description.
# Import python libs
import logging
# Import third party libs
import yaml
# Set up logging
log = logging.getLogger(__name__)
def ext_pillar(minion_id, pillar, command):
'''
Execute an unmodified puppet_node_classifier and read the output as YAML
'''
try:
data = yaml.safe_load(__salt__['cmd.run']('{0} {1}'.format(command, minion_id)))
data = data['parameters']
return data
except Exception:
log.critical(
'YAML data from {0} failed to parse'.format(command)
)
return {}
|
the-stack_0_2716 | from __future__ import absolute_import
from __future__ import print_function
from amitools.fs.block.Block import *
import amitools.fs.DosType as DosType
class PartitionDosEnv:
valid_keys = ('max_transfer', 'mask', 'num_buffer', 'reserved', 'boot_pri', 'pre_alloc', 'boot_blocks')
def __init__(self, size=16, block_size=128, sec_org=0, surfaces=0, sec_per_blk=1, blk_per_trk=0,
reserved=2, pre_alloc=0, interleave=0, low_cyl=0, high_cyl=0, num_buffer=30,
buf_mem_type=0, max_transfer=0xffffff, mask=0x7ffffffe, boot_pri=0, dos_type=DosType.DOS0,
baud=0, control=0, boot_blocks=0):
self.size = size
self.block_size = block_size
self.sec_org = sec_org
self.surfaces = surfaces
self.sec_per_blk = sec_per_blk
self.blk_per_trk = blk_per_trk
self.reserved = reserved
self.pre_alloc = pre_alloc
self.interleave = interleave
self.low_cyl = low_cyl
self.high_cyl = high_cyl
self.num_buffer = num_buffer
self.buf_mem_type = buf_mem_type
self.max_transfer = max_transfer
self.mask = mask
self.boot_pri = boot_pri
self.dos_type = dos_type
self.baud = baud
self.control = control
self.boot_blocks = boot_blocks
def dump(self):
print("DosEnv")
print(" size: %d" % self.size)
print(" block_size: %d" % self.block_size)
print(" sec_org: %d" % self.sec_org)
print(" surfaces: %d" % self.surfaces)
print(" sec_per_blk: %d" % self.sec_per_blk)
print(" blk_per_trk: %d" % self.blk_per_trk)
print(" reserved: %d" % self.reserved)
print(" pre_alloc: %d" % self.pre_alloc)
print(" interleave: %d" % self.interleave)
print(" low_cyl: %d" % self.low_cyl)
print(" high_cyl: %d" % self.high_cyl)
print(" num_buffer: %d" % self.num_buffer)
print(" buf_mem_type: 0x%08x" % self.buf_mem_type)
print(" max_transfer: 0x%08x" % self.max_transfer)
print(" mask: 0x%08x" % self.mask)
print(" boot_pri: %d" % self.boot_pri)
print(" dos_type: 0x%08x = %s" % (self.dos_type, DosType.num_to_tag_str(self.dos_type)))
print(" baud: %d" % self.baud)
print(" control: %d" % self.control)
print(" boot_blocks: %d" % self.boot_blocks)
def read(self, blk):
self.size = blk._get_long(32)
self.block_size = blk._get_long(33)
self.sec_org = blk._get_long(34)
self.surfaces = blk._get_long(35)
self.sec_per_blk = blk._get_long(36)
self.blk_per_trk = blk._get_long(37)
self.reserved = blk._get_long(38)
self.pre_alloc = blk._get_long(39)
self.interleave = blk._get_long(40)
self.low_cyl = blk._get_long(41)
self.high_cyl = blk._get_long(42)
self.num_buffer = blk._get_long(43)
self.buf_mem_type = blk._get_long(44)
self.max_transfer = blk._get_long(45)
self.mask = blk._get_long(46)
self.boot_pri = blk._get_slong(47)
self.dos_type = blk._get_long(48)
self.baud = blk._get_long(49)
self.control = blk._get_long(50)
self.boot_blocks = blk._get_long(51)
def write(self, blk):
blk._put_long(32, self.size)
blk._put_long(33, self.block_size)
blk._put_long(34, self.sec_org)
blk._put_long(35, self.surfaces)
blk._put_long(36, self.sec_per_blk)
blk._put_long(37, self.blk_per_trk)
blk._put_long(38, self.reserved)
blk._put_long(39, self.pre_alloc)
blk._put_long(40, self.interleave)
blk._put_long(41, self.low_cyl)
blk._put_long(42, self.high_cyl)
blk._put_long(43, self.num_buffer)
blk._put_long(44, self.buf_mem_type)
blk._put_long(45, self.max_transfer)
blk._put_long(46, self.mask)
blk._put_slong(47, self.boot_pri)
blk._put_long(48, self.dos_type)
blk._put_long(49, self.baud)
blk._put_long(50, self.control)
blk._put_long(51, self.boot_blocks)
class PartitionBlock(Block):
FLAG_BOOTABLE = 1
FLAG_NO_AUTOMOUNT = 2
def __init__(self, blkdev, blk_num):
Block.__init__(self, blkdev, blk_num, chk_loc=2, is_type=Block.PART)
def create(self, drv_name, dos_env, host_id=7, next=Block.no_blk, flags=0, dev_flags=0,
size=64):
Block.create(self)
self.size = size
self.host_id = host_id
self.next = next
self.flags = flags
self.dev_flags = dev_flags
self.drv_name = drv_name
if dos_env == None:
dos_env = PartitionDosEnv()
self.dos_env = dos_env
self.valid = True
def write(self):
self._create_data()
self._put_long(1, self.size)
self._put_long(3, self.host_id)
self._put_long(4, self.next)
self._put_long(5, self.flags)
self._put_long(8, self.dev_flags)
self._put_bstr(9, 31, self.drv_name)
self.dos_env.write(self)
Block.write(self)
def read(self):
Block.read(self)
if not self.valid:
return False
self.size = self._get_long(1)
self.host_id = self._get_long(3)
self.next = self._get_long(4)
self.flags = self._get_long(5)
self.dev_flags = self._get_long(8)
self.drv_name = self._get_bstr(9, 31)
self.dos_env = PartitionDosEnv()
self.dos_env.read(self)
return self.valid
def dump(self):
Block.dump(self, "Partition")
print(" size: %d" % self.size)
print(" host_id: %d" % self.host_id)
print(" next: %s" % self._dump_ptr(self.next))
print(" flags: 0x%08x" % self.flags)
print(" dev_flags: 0x%08x" % self.dev_flags)
print(" drv_name: '%s'" % self.drv_name)
self.dos_env.dump()
|
the-stack_0_2717 | import DSGRN
from DSGRN import *
import networkx as nx
import matplotlib.pyplot as plt
from copy import deepcopy
import os
from all_networks_with_n_nodes_e_edges import *
from save_files import *
from GradientFun import *
from get_FG import *
from get_FP_Poset import *
from networkx_cond import *
def reduce_gradient_graph_to_nodes_of_interest(database, grad_graph, FP_Poset):
c = database.conn.cursor()
FP_keep = [node for node in FP_Poset.keys()]
G = nx.DiGraph() #building networkx graph
for node in grad_graph:
G.add_node(node)
for edge in grad_graph[node]:
G.add_edge(node, edge)
del_list = []
for node in grad_graph:
p = node[-1]
MGI_result = c.execute('select MorseGraphIndex from Signatures where ParameterIndex is ' + str(p))
MGI = MGI_result.fetchone()[0]
FP_result = [row[0] for row in c.execute('select Label from MorseGraphAnnotations where MorseGraphIndex is ' + str(MGI))]
if not set(FP_result).intersection(set(FP_keep)):
del_list.append(node)
for n in del_list: #removes del_list nodes in networkx graph and grad_graph keys
G.remove_node(n)
del grad_graph[n]
return G, grad_graph
def get_product_graph(database, cG, scc, FP_Poset):
'''
cG: condensation graph of gradient graph with only monostable fixed points and nodes in FP_Poset, expects networkx object.
scc: dictonary of stongly connected components, where keys are node labels in given graph
and values are nodes in original graph the condensation is derived from.
returns: Product Graph, i.e., reduces cG to having only edges that appear in FP_Poset.
Then removes all parts of graph not connected to a node in the start set.
'''
c = database.conn.cursor()
H = nx.DiGraph() #building networkx graph from FP_poset
for node in FP_Poset:
for edge in FP_Poset[node]:
H.add_edge(node, edge)
del_list = [] #currently written with FP repeats
P = deepcopy(cG)
for edge in P.edges():
s = scc[edge[0]][0][-1]
t = scc[edge[1]][0][-1]
sMGI = c.execute('select MorseGraphIndex from Signatures where ParameterIndex is ' + str(s))
MGI = sMGI.fetchone()[0]
sFP = [row[0] for row in c.execute('select Label from MorseGraphAnnotations where MorseGraphIndex is ' + str(MGI))]
tMGI = c.execute('select MorseGraphIndex from Signatures where ParameterIndex is ' + str(t))
MGI = tMGI.fetchone()[0]
tFP = [row[0] for row in c.execute('select Label from MorseGraphAnnotations where MorseGraphIndex is ' + str(MGI))]
keep = False
if (sFP[0],tFP[0]) in H.edges():
keep = True
if sFP[0] == tFP[0]:
keep = True
if keep == False:
del_list.append(edge)
for edge in del_list:
P.remove_edge(edge[0],edge[1])
P.remove_nodes_from(list(nx.isolates(cG)))
start_set = []
for node in P:
p = scc[node][0]
if p[0] == 0 and p[1] == 0:
start_set.append(node)
del_list = []
for node in P.nodes():
for i in start_set:
if i != node:
try:
nx.shortest_path(P, i, node)
break
except:
if i == start_set[-1]:
del_list.append(node)
break
else:
continue
for node in del_list:
P.remove_node(node)
return P
def return_start_stop_set(database, graph, scc, Hb_max, Kni_max, start_FP_list = None, stop_FP_list = None):
'''
graph: can be in dictinary or networkx form, function expects a condensation graph.
scc: dictonary of stongly connected components, where keys are node labels in given graph
and values are nodes in original graph the condensation is derived from.
Hb_max, Kni_max: Highest factor graph layers.
start_FP_list, stop_FP_list: list of fixed points wanting to constrain the starting and stoping sets to.
returns: set of nodes considered starting nodes for a path and stoping nodes.
'''
c = database.conn.cursor()
start_set = []
stop_set = []
for node in graph:
n = scc[node][0]
#print(node, p)
if n[0] == 0 and n[1] == 0:
if start_FP_list != None:
p = scc[node][0][-1]
MGI_result = c.execute('select MorseGraphIndex from Signatures where ParameterIndex is ' + str(p))
MGI = MGI_result.fetchone()[0]
FP_result = [row[0] for row in c.execute('select Label from MorseGraphAnnotations where MorseGraphIndex is ' + str(MGI))]
if FP_result in start_FP_list:
start_set.append(node)
else:
start_set.append(node)
if n[0] == Hb_max and n[1] == Kni_max:
if stop_FP_list != None:
p = scc[node][0][-1]
MGI_result = c.execute('select MorseGraphIndex from Signatures where ParameterIndex is ' + str(p))
MGI = MGI_result.fetchone()[0]
FP_result = [row[0] for row in c.execute('select Label from MorseGraphAnnotations where MorseGraphIndex is ' + str(MGI))]
if FP_result in stop_FP_list:
stop_set.append(node)
else:
stop_set.append(node)
return start_set, stop_set
def test_any_path_exists_in_product(string, network_filename, database = None, grad_graph = None, reduce = True):
'''
string: network string.
network_filename: Name wanting to save network text file as, expects that .txt not at end.
returns: True if paths exists, False if no paths exists in product graph.
'''
# Make DSGRN database
if database == None:
txt_filename = "/home/elizabeth/Desktop/GIT/dsgrn_acdc/networks/" + network_filename + ".txt"
f = open(txt_filename,"w") # Make txt file for network, needed to build DSGRN database
f.write(string)
f.close()
db_filename = "/home/elizabeth/Desktop/GIT/dsgrn_acdc/networks/" + network_filename + ".db"
os.system("mpiexec -n 2 Signatures "+ txt_filename + ' ' + db_filename)
database = Database(db_filename)
out_edges = get_number_out_edges_from_string(string)
Hb_list, Kni_list = get_Hb_Kni_list(database)
Hb_max = len(Hb_list)-1
Kni_max = len(Kni_list)-1
FP_Poset = get_FP_Poset(out_edges)[0]
# If grad_graph has not already been computed for this network, compute it and save.
if grad_graph == None:
gradlist = get_gradlist_strict(database, Hb_list, Kni_list)
grad_graph = get_gradient_graph_parallel(database, gradlist, 7, Hb_list, Kni_list)
grad_graph_filename = "grad_graph_strict_"+network_filename
save_json(grad_graph, grad_graph_filename)
if reduce == True:
G, ngg = reduce_gradient_graph_to_nodes_of_interest(database, grad_graph, FP_Poset)
ngg_filename = "reduced_grad_graph_strict_"+network_filename
save_json(ngg, ngg_filename)
strongcc = strongly_connected_components_by_MGI(G, database)
cG, scc = condensation(G, strongcc)
P = get_product_graph(database, cG, scc, FP_Poset)
start_set, stop_set = return_start_stop_set(database, P, scc, Hb_max, Kni_max)
if start_set == []:
print("Empty start set")
result = False
if stop_set == []:
print("Empty stop set")
result = False
else:
for s in start_set:
for t in stop_set:
try:
nx.shortest_path(cG, s, t)
print('Path exists from ' + str(s) + ' to '+ str(t))
result = True
break
except:
if s == start_set[-1]:
if t == stop_set[-1]:
print('No Path Exists')
result = False
break
else:
continue
else:
continue
break
return result
def find_breaks_in_FG_comb(database, P, scc, Hb_max, Kni_max):
breaks = []
for h in range(Hb_max+1):
for k in range(Kni_max+1):
if (h,k) != (0,0) and (h,k) != (Hb_max, Kni_max):
remove = (h,k)
T = deepcopy(P)
for node in P.nodes():
if scc[node][0][0:2] == remove:
T.remove_node(node)
start_set, stop_set = return_start_stop_set(database, T, scc, Hb_max, Kni_max)
if start_set == []:
result = False
if stop_set == []:
result = False
else:
for s in start_set:
for t in stop_set:
try:
nx.shortest_path(T, s, t)
result = True
break
except:
if s == start_set[-1]:
if t == stop_set[-1]:
result = False
break
else:
continue
else:
continue
break
if result == False:
breaks.append((h,k))
x = []
y = []
for s in scc:
x.append(Hb_max-scc[s][0][0])
y.append(scc[s][0][1])
plt.scatter(y, x)
for i in breaks:
plt.scatter([Hb_max-i[0]],[i[1]], color = 'r')
plt.xlabel('Kni Facter Graph Layer')
plt.ylabel('Hb Facter Graph Layer')
plt.show()
return breaks
def create_cond_subgraphs_graphml(database, grad_graph, cond, prod_graph_nodes, path_nodes, scc, FP_Region, start_set, stop_set, Filename):
''' graphml filetype '''
c = database.conn.cursor()
N = nx.DiGraph()
for node in grad_graph:
N.add_node(node)
for edge in grad_graph[node]:
N.add_edge(node, edge)
G = nx.DiGraph()
Kni_att = {}
Hb_att = {}
MGI_att = {}
Region_att = {}
scc_size_att = {}
graph = {}
s_t = {}
for node in cond:
G.add_node(node)
count = 0
for edge in cond[node]:
G.add_edge(node, edge)
yes_count = 0
for s in scc[node]:
for t in scc[edge]:
if N.has_edge(s,t) == True:
yes_count += 1
count +=1
G[node][edge]['weight'] = yes_count
for edge in cond[node]:
G[node][edge]['weight'] = G[node][edge]['weight']/count
p = scc[node][0][-1]
MGI_result = c.execute('select MorseGraphIndex from Signatures where ParameterIndex is ' + str(p))
MGI = MGI_result.fetchone()[0]
MGI_att[node] = MGI
FP_result = [row[0] for row in c.execute('select Label from MorseGraphAnnotations where MorseGraphIndex is ' + str(MGI))]
if len(FP_result) == 1:
for r in FP_Region:
if FP_result[0] in FP_Region[r]:
Region_att[node] = r
else:
Region_att[node] = 'not mono-stable'
Hb_att[node] = scc[node][0][0]
Kni_att[node] = scc[node][0][1]
if node in path_nodes:
graph[node] = 'path'
elif node in prod_graph_nodes:
graph[node] = 'product'
else:
graph[node] = 'cond'
scc_size_att[node] = len(scc[node])
for node in start_set:
s_t[node] = 'starting'
for node in stop_set:
s_t[node] = 'stoping'
nx.set_node_attributes(G, 'Hb_FG_layer', Hb_att)
nx.set_node_attributes(G, 'Kni_FG_layer', Kni_att)
nx.set_node_attributes(G, 'MGI', MGI_att)
nx.set_node_attributes(G, 'Region', Region_att)
nx.set_node_attributes(G, 'group', graph)
nx.set_node_attributes(G, 'scc size', scc_size_att)
nx.set_node_attributes(G, 'start_stop', s_t)
group=nx.get_node_attributes(G,'group')
att = {}
for edge in G.edges():
s = edge[0]
t = edge[1]
if group[s] == 'path':
if group[t] != 'path':
att[s] = 'leave path'
nx.set_node_attributes(G, 'leaving', att)
nx.write_graphml(G, Filename)
def get_gephi_graph_for_cond(database, network, grad_graph, graphml_filename, path_nodes = []):
'''
grad_graph: expects graph as dictinary
network_txt_filename: filename and place where txt file format of the network string is saved.
graphml_filename: name wanting for graphml file, will add location automatically. Expects .graphml at end.
'''
out_edges = get_number_out_edges_from_string(network)
FP_Poset, FP_Region = get_FP_Poset(out_edges)
G = reduce_gradient_graph_to_nodes_of_interest(database, grad_graph, FP_Poset)[0]
strongcc = strongly_connected_components_by_MGI(G, database)
cG, scc = condensation(G, strongcc)
P = get_product_graph(database, cG, scc, FP_Poset)
Hb_list, Kni_list = get_Hb_Kni_list(database)
Hb_max = len(Hb_list)-1
Kni_max = len(Kni_list)-1
start_set, stop_set = return_start_stop_set(database, P, scc, Hb_max, Kni_max)
filename = '/home/elizabeth/Desktop/GIT/dsgrn_acdc/Saved_Files/Graphml/' + graphml_filename
### Notice graph only has bagged FP in it, the condensation of the gradient graph only, without removing all nodes not in bag is much larger.
create_cond_subgraphs_graphml(database, grad_graph, cG, P, path_nodes, scc, FP_Region, start_set, stop_set, filename) |
the-stack_0_2718 | from torch import Tensor
from torch.autograd import Variable
from torch.optim import Adam
from itertools import chain
from utils.misc import hard_update
from utils.policies import DiscretePolicy
import torch.nn.functional as F
class Agent(object):
"""
General class for agents (policy, target policy, etc)
"""
def __init__(self, obs_shape, action_size, hidden_dim=64,
lr=0.01, adam_eps=1e-8, nonlin=F.relu, n_pol_heads=1):
self.policy = DiscretePolicy(obs_shape,
action_size,
hidden_dim=hidden_dim,
nonlin=nonlin,
n_heads=n_pol_heads)
self.target_policy = DiscretePolicy(obs_shape,
action_size,
hidden_dim=hidden_dim,
nonlin=nonlin,
n_heads=n_pol_heads)
hard_update(self.target_policy, self.policy)
self.policy_optimizer = Adam(self.policy.parameters(), lr=lr, eps=adam_eps)
def step(self, obs, explore=False, head=0):
"""
Take a step forward in environment for a minibatch of observations
Inputs:
obs (PyTorch Variable): Observations for this agent
explore (boolean): Whether or not to sample
head (int): Which policy head to use
Outputs:
action (PyTorch Variable): Actions for this agent
"""
return self.policy(obs, sample=explore, head=head)
def get_params(self):
return {'policy': self.policy.state_dict(),
'target_policy': self.target_policy.state_dict(),
'policy_optimizer': self.policy_optimizer.state_dict()}
def load_params(self, params, load_ir=False):
self.policy.load_state_dict(params['policy'])
self.target_policy.load_state_dict(params['target_policy'])
self.policy_optimizer.load_state_dict(params['policy_optimizer'])
|
the-stack_0_2719 | import logging.config
import tkinter as tk
from tkinter import ttk
class StudentPage(tk.Frame):
'''
Class creates Student Page frame.
'''
def __init__(self, master, controller):
'''
Initialize Student page
'''
ttk.Frame.__init__(self, master)
self.logger = logging.getLogger(__name__)
self.master = master
self.controller = controller
# Master frame for all widgets
self.master_frame = ttk.Frame(self.master)
# Frame for top window elements
self.top_frame = ttk.Frame(self.master_frame)
self.mid_frame = ttk.Frame(self.master_frame)
self.content_frame = ttk.Frame(self.master_frame)
self.students_frame = ttk.Frame(self.content_frame)
self.assignments_frame = ttk.Frame(self.content_frame, width=350, height=350)
self.master_frame.pack()
self.top_frame.pack(side=tk.TOP)
self.mid_frame.pack(side=tk.TOP)
self.content_frame.pack()
self.students_frame.pack(side=tk.LEFT, padx=10, pady=10)
self.assignments_frame.pack(side=tk.RIGHT, padx=10, pady=10)
self.assignments_frame.pack_propagate(False)
classes_label = ttk.Label(self.top_frame, text='Classes:')
self.class_value = tk.StringVar()
self.class_subject = ttk.Combobox(self.top_frame, textvariable=self.class_value, state='readonly')
def create_treeview(frame):
# Using treeview widget
treev = ttk.Treeview(frame, selectmode ='browse')
# Calling pack method w.r.to treeview
treev.pack(side ='right')
# Constructing vertical scrollbar
# with treeview
verscrlbar = ttk.Scrollbar(frame, orient ="vertical", command = treev.yview)
# Calling pack method w.r.to verical
# scrollbar
verscrlbar.pack(side ='right', fill ='x')
# Configuring treeview
treev.configure(xscrollcommand = verscrlbar.set)
return treev
self.tree_student = create_treeview(self.students_frame)
self.tree_assignments = create_treeview(self.assignments_frame)
classes_label.pack(side=tk.LEFT, padx=25, pady=10)
self.class_subject.pack()
|
the-stack_0_2721 | import numpy as np
from PIL import Image
import cv2
import matplotlib.pyplot as plt
import pickle
from matplotlib import style
import time
style.use("ggplot")
SIZE = 20
HM_EPISODES = 25000
MOVE_PENALTY = 1
ENEMY_PENALTY = 300
FOOD_REWARD = 25
epsilon = 0.9
EPS_DECAY = 0.9998
SHOW_EVERY = 1000
start_q_table = None #'aa'#'qtable - 1574500480, pickle' #None # vai faila vards
LEARNING_RATE = 0.1
DISCOUNT = 0.95
PLAYER_N = 1
FOOD_N = 2
ENEMY_N = 3
d = {1: (255, 175, 0), 2: (0, 255, 0), 3: (0, 0, 255)}
class Blob:
def __init__(self):
self.x = np.random.randint(0, SIZE)
self.y = np.random.randint(0, SIZE)
def __str__(self):
return f"{self.x}, {self.y}"
def __sub__(self, other):
return (self.x - other.x, self.y - other.y)
def action(self, choice): # var addot vel
if choice == 0:
self.move(x=1, y=1)
elif choice == 1:
self.move(x=-1, y=-1)
elif choice == 2:
self.move(x=-1, y=1)
elif choice == 3:
self.move(x=1, y=-1)
if choice == 4:
self.move(x=0, y=1)
elif choice == 5:
self.move(x=0, y=-1)
elif choice == 6:
self.move(x=-1, y=0)
elif choice == 7:
self.move(x=1, y=0)
def move(self, x=False, y=False):
if not x:
self.x += np.random.randint(-1,2)
else:
self.x += x
if not y:
self.y += np.random.randint(-1,2)
else:
self.y += y
if self.x < 0: self.x = 0
elif self.x > SIZE - 1: self.x = SIZE - 1
if self.y < 0: self.y = 0
elif self.y > SIZE - 1: self.y = SIZE - 1
if start_q_table is None:
q_table = {}
for x1 in range(-SIZE + 1, SIZE):
for y1 in range(-SIZE + 1, SIZE):
for x2 in range(-SIZE + 1, SIZE):
for y2 in range(-SIZE + 1, SIZE):
q_table[((x1,y1), (x2,y2))] = [np.random.uniform(-5, 0) for i in range(7)]
else:
with open(start_q_table, "rb") as f:
q_table = pickle.load(f)
episode_rewards = []
for episode in range(HM_EPISODES):
player = Blob()
food = Blob()
enemy = Blob()
if episode % SHOW_EVERY == 0:
print(f"on # {episode}, epsilon: {epsilon}")
print(f"{SHOW_EVERY} ep mean {np.mean(episode_rewards[-SHOW_EVERY:])}")
show = True
else:
show = False
episode_reward = 0
for i in range(200):
obs = (player-food, player-enemy) # ko redz
if np.random.random() > epsilon:
action = np.argmax(q_table[obs])
else:
action = np.random.randint(0, 7)
player.action(action)
'''velak varbut
enemy.move()
food.move() '''
if player.x == enemy.x and player.y == enemy.y:
reward = -ENEMY_PENALTY
elif player.x == food.x and player.y == food.y:
reward = FOOD_REWARD
else:
reward = -MOVE_PENALTY
new_obs = (player-food, player-enemy)
max_future_q = np.max(q_table[new_obs])
current_q = q_table[obs][action]
if reward == FOOD_REWARD:
new_q = FOOD_REWARD
elif reward == -ENEMY_PENALTY:
new_q = -ENEMY_PENALTY
else:
new_q = (1 - LEARNING_RATE) * current_q + LEARNING_RATE * (reward + DISCOUNT * max_future_q)
q_table[obs][action] = new_q
if show:
env = np.zeros((SIZE, SIZE, 3), dtype=np.uint8)
env[food.y][food.x] = d[FOOD_N]
env[player.y][player.x] = d[PLAYER_N]
env[enemy.y][enemy.x] = d[ENEMY_N]
img = Image.fromarray(env, "RGB")
img = img.resize((300, 300))
cv2.imshow("", np.array(img))
if reward == FOOD_REWARD or reward == -ENEMY_PENALTY:
if cv2.waitKey(500) & 0xFF == ord("q"):
break
else:
if cv2.waitKey(1) & 0xFF == ord("q"):
break
episode_reward += reward
if reward == FOOD_REWARD or reward == -ENEMY_PENALTY:
break
episode_rewards.append(episode_reward)
epsilon *= EPS_DECAY
moving_avg = np.convolve(episode_rewards, np.ones((SHOW_EVERY,)) / SHOW_EVERY, mode="valid")
plt.plot([i for i in range(len(moving_avg))], moving_avg)
plt.ylabel(f"reward {SHOW_EVERY}")
plt.xlabel("episode #")
plt.show()
with open(f"qtablenew - {int(time.time())} pickle", "wb") as f:
pickle.dump(q_table, f)
|
the-stack_0_2723 | from typing import Dict, Iterable
import sqlalchemy.sql.expression as sql
from sqlalchemy.orm import selectinload
from transiter.db import dbconnection, models
def list_groups_and_maps_for_stops_in_route(route_pk):
"""
This function is used to get the service maps for a route.
It returns a list of tuples (service map group, service map) for each
service map group having use_for_stops_in_route equal True.
:param route_pk: the route's PK
:return: the list described above
"""
session = dbconnection.get_session()
query = (
session.query(models.ServiceMapGroup, models.ServiceMap)
.join(models.System, models.System.pk == models.ServiceMapGroup.system_pk)
.join(models.Route, models.Route.system_pk == models.System.pk)
.outerjoin(
models.ServiceMap,
sql.and_(
models.ServiceMap.route_pk == models.Route.pk,
models.ServiceMap.group_pk == models.ServiceMapGroup.pk,
),
)
.filter(models.ServiceMapGroup.use_for_stops_in_route)
.filter(models.Route.pk == route_pk)
.options(selectinload(models.ServiceMap.vertices))
.options(selectinload(models.ServiceMap.vertices, models.ServiceMapVertex.stop))
)
return [(group, map_) for (group, map_) in query]
def get_stop_pk_to_group_id_to_routes_map(
stop_pks,
) -> Dict[int, Dict[str, Iterable[models.Route]]]:
"""
This function is used to get service map information for stops; namely,
which routes call at the stop based on the service maps.
Get a map whose key is a stop's PK and whose the value is another map.
This second map has a key for every service map group having
use_for_routes_at_stop equal to True. The value of this map is the list of
routes that contain the stop in the relevant service map.
:param stop_pks: stop PKs to build the map for
:return: the monster map described above
"""
session = dbconnection.get_session()
query = (
session.query(models.Stop.pk, models.ServiceMapGroup.id, models.Route)
.join(models.System, models.System.pk == models.Stop.system_pk)
.join(
models.ServiceMapGroup,
sql.and_(
models.ServiceMapGroup.system_pk == models.System.pk,
models.ServiceMapGroup.use_for_routes_at_stop,
),
)
.outerjoin(
models.ServiceMap,
sql.and_(
models.ServiceMap.group_pk == models.ServiceMapGroup.pk,
models.ServiceMap.pk.in_(
session.query(models.ServiceMapVertex.map_pk).filter(
models.ServiceMapVertex.stop_pk == models.Stop.pk
)
),
),
)
.outerjoin(models.Route, models.Route.pk == models.ServiceMap.route_pk)
.filter(models.Stop.pk.in_(stop_pks))
)
response = {stop_pk: {} for stop_pk in stop_pks}
for stop_pk, group_id, route in query:
if group_id not in response[stop_pk]:
response[stop_pk][group_id] = []
if route is not None:
response[stop_pk][group_id].append(route)
return response
|
the-stack_0_2725 | # -*- coding: UTF-8 -*-
# Copyright 2010-2018 Rumma & Ko Ltd
# License: BSD (see file COPYING for details)
import logging
logger = logging.getLogger(__name__)
from django.utils.translation import ugettext_lazy as _
from lino.modlib.office.roles import OfficeStaff
from lino.api import dd, rt
class Shortcut(dd.Choice):
"""Represents a shortcut field."""
model_spec = None
target = 'uploads.UploadsByController'
def __init__(self, model_spec, name, verbose_name, target=None):
if target is not None:
self.target = target
self.model_spec = model_spec
value = model_spec + "." + name
super(Shortcut, self).__init__(value, verbose_name, name)
def get_uploads(self, **kw):
"""Return a queryset with the uploads of this shortcut."""
return rt.models.uploads.Upload.objects.filter(
type__shortcut=self, **kw)
class Shortcuts(dd.ChoiceList):
verbose_name = _("Upload shortcut")
verbose_name_plural = _("Upload shortcuts")
item_class = Shortcut
max_length = 50 # fields get created before the values are known
class UploadAreas(dd.ChoiceList):
required_roles = dd.login_required(OfficeStaff)
verbose_name = _("Upload Area")
verbose_name_plural = _("Upload Areas")
add = UploadAreas.add_item
add('90', _("Uploads"), 'general')
def add_shortcut(*args, **kw):
return Shortcuts.add_item(*args, **kw)
|
the-stack_0_2726 | import cv2
from PIL import Image
import numpy as np
from subprocess import Popen, PIPE
from enum import IntEnum, auto
import sys, math, os, time, argparse
import threading
import queue
from keras.models import load_model
import tensorflow as tf
sys.path.append(os.path.join(os.path.dirname(__file__), 'UGATIT'))
from UGATIT import UGATIT
'''
depress warning
'''
import logging, warnings
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
warnings.simplefilter(action='ignore', category=FutureWarning)
warnings.simplefilter(action='ignore', category=Warning)
tf.get_logger().setLevel('INFO')
tf.autograph.set_verbosity(0)
tf.get_logger().setLevel(logging.ERROR)
'''
Command line arguments
'''
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
parser = argparse.ArgumentParser(description='specify input and output device')
parser.add_argument('--input_video_num', type=int, required=True,
help='input video device number. ex) if input is /dev/video0 then the value is 0')
parser.add_argument('--output_video_dev', type=str, required=True,
help='input video device. ex) /dev/video2')
parser.add_argument('--emotion_mode', type=str2bool, required=False, default=False,
help='enable emotion mode')
parser.add_argument('--anime_mode', type=str2bool, required=False, default=False,
help='enable anime mode')
parser.add_argument('--skip_frame', type=int, required=False, default=1,
help='enable skip frame')
parser.add_argument('--crop_face', type=str2bool, required=False, default=True,
help='enable crop face')
parser.add_argument('--show_fps', type=str2bool, required=False, default=False,
help='show fpc')
parser.add_argument('--show_source', type=str2bool, required=False, default=False,
help='show source')
'''
args for anime mode
'''
parser.add_argument('--phase', type=str, default='test', help='[train / test]')
parser.add_argument('--light', type=str2bool, default=False, help='[U-GAT-IT full version / U-GAT-IT light version]')
parser.add_argument('--dataset', type=str, default='selfie2anime', help='dataset_name')
parser.add_argument('--epoch', type=int, default=100, help='The number of epochs to run')
parser.add_argument('--iteration', type=int, default=10000, help='The number of training iterations')
parser.add_argument('--batch_size', type=int, default=1, help='The size of batch size')
parser.add_argument('--print_freq', type=int, default=1000, help='The number of image_print_freq')
parser.add_argument('--save_freq', type=int, default=1000, help='The number of ckpt_save_freq')
parser.add_argument('--decay_flag', type=str2bool, default=True, help='The decay_flag')
parser.add_argument('--decay_epoch', type=int, default=50, help='decay epoch')
parser.add_argument('--lr', type=float, default=0.0001, help='The learning rate')
parser.add_argument('--GP_ld', type=int, default=10, help='The gradient penalty lambda')
parser.add_argument('--adv_weight', type=int, default=1, help='Weight about GAN')
parser.add_argument('--cycle_weight', type=int, default=10, help='Weight about Cycle')
parser.add_argument('--identity_weight', type=int, default=10, help='Weight about Identity')
parser.add_argument('--cam_weight', type=int, default=1000, help='Weight about CAM')
parser.add_argument('--gan_type', type=str, default='lsgan', help='[gan / lsgan / wgan-gp / wgan-lp / dragan / hinge]')
parser.add_argument('--smoothing', type=str2bool, default=True, help='AdaLIN smoothing effect')
parser.add_argument('--ch', type=int, default=64, help='base channel number per layer')
parser.add_argument('--n_res', type=int, default=4, help='The number of resblock')
parser.add_argument('--n_dis', type=int, default=6, help='The number of discriminator layer')
parser.add_argument('--n_critic', type=int, default=1, help='The number of critic')
parser.add_argument('--sn', type=str2bool, default=True, help='using spectral norm')
parser.add_argument('--img_size', type=int, default=256, help='The size of image')
parser.add_argument('--img_ch', type=int, default=3, help='The size of image channel')
parser.add_argument('--augment_flag', type=str2bool, default=True, help='Image augmentation use or not')
parser.add_argument('--checkpoint_dir', type=str, default='checkpoint',
help='Directory name to save the checkpoints')
parser.add_argument('--result_dir', type=str, default='results',
help='Directory name to save the generated images')
parser.add_argument('--log_dir', type=str, default='logs',
help='Directory name to save training logs')
parser.add_argument('--sample_dir', type=str, default='samples',
help='Directory name to save the samples on training')
args = parser.parse_args()
BATCH_SIZE = args.batch_size
'''
Queue for anime mode
'''
anime_mode_input_queue = queue.Queue()
anime_mode_output_queue = queue.Queue()
anime_buffer_image = None
anime_frame_num = 0
anime_fps_start = time.time()
anime_fps = 0
anime_frame_count = 0
'''
Mode definition
'''
class modes(IntEnum):
SIMPLE_SMILE_MODE = auto()
EMOTION_MODE = auto()
ANIME_MODE = auto()
'''
Classifiers
'''
face_classifier_classifier = None
anime_session = None
anime_model = None
'''
Path for resources
'''
face_cascade_path = './models/haarcascade_frontalface_default.xml'
def anime_mode_worker():
frames = []
while True:
item_num = anime_mode_input_queue.qsize()
#print(item_num)
for i in range(item_num):
frame = anime_mode_input_queue.get()
frame = cv2.resize(frame, dsize=(256, 256))
frames.append(frame)
#print(f'{i}/{item_num}')
if len(frames) < BATCH_SIZE:
if item_num == 0:
pass
#time.sleep(1)
continue
frames = np.array(frames)
#print(sys.stderr, frames.shape)
new_frames = anime_model.predict(frames[-1 * BATCH_SIZE:])
for i, (old_frame, new_frame) in enumerate(zip(frames[-1 * BATCH_SIZE:], new_frames)):
anime_mode_output_queue.put( (old_frame, new_frame))
frames = []
def load_resources(mode):
global face_classifier_classifier
face_classifier_classifier = cv2.CascadeClassifier(face_cascade_path)
if mode == modes.ANIME_MODE:
global anime_session, anime_model
anime_session = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
anime_model = UGATIT(anime_session, args)
anime_model.build_model()
anime_model.load_model(anime_session)
def paste(img, imgback, x, y, angle, scale):
if img.shape [0] > imgback.shape[0] or img.shape[1] > imgback.shape[1]:
h_ratio = imgback.shape[0] / img.shape[0]
w_ratio = imgback.shape[1] / img.shape[1]
if h_ratio < w_ratio:
new_h = int(img.shape[0] * h_ratio)
new_w = int(img.shape[1] * h_ratio)
else:
new_h = int(img.shape[0] * w_ratio)
new_w = int(img.shape[1] * w_ratio)
if new_h % 2 != 0:
new_h += 1
if new_w % 2 != 0:
new_w += 1
img = cv2.resize(img, (new_w, new_h))
#print(sys.stderr, f'pate resize img : {new_h}, {new_w}')
r = img.shape[0]
c = img.shape[1]
rb = imgback.shape[0]
cb = imgback.shape[1]
hrb = round(rb/2)
hcb = round(cb/2)
hr = round(r/2)
hc = round(c/2)
#print(sys.stderr, f'(2) -> {r}, {c}, {rb},{cb}')
# Copy the forward image and move to the center of the background image
imgrot = np.zeros((rb,cb,3),np.uint8)
imgrot[hrb-hr:hrb+hr,hcb-hc:hcb+hc,:] = img[:hr*2,:hc*2,:]
# Rotation and scaling
M = cv2.getRotationMatrix2D((hcb,hrb),angle,scale)
imgrot = cv2.warpAffine(imgrot,M,(cb,rb))
# Translation
M = np.float32([[1,0,x],[0,1,y]])
imgrot = cv2.warpAffine(imgrot,M,(cb,rb))
# Makeing mask
imggray = cv2.cvtColor(imgrot,cv2.COLOR_BGR2GRAY)
ret, mask = cv2.threshold(imggray, 10, 255, cv2.THRESH_BINARY)
mask_inv = cv2.bitwise_not(mask)
# Now black-out the area of the forward image in the background image
img1_bg = cv2.bitwise_and(imgback,imgback,mask = mask_inv)
# Take only region of the forward image.
img2_fg = cv2.bitwise_and(imgrot,imgrot,mask = mask)
# Paste the forward image on the background image
imgpaste = cv2.add(img1_bg,img2_fg)
return imgpaste
def apply_offsets_for_anime_mode(face_location, offsets):
x, y, width, height = face_location
x_off, y_off = offsets # x_off is ignored here.
### At first Top and Bottom are determined.
top = y - y_off
bottom = y + height + y_off
if top < 0:
top = 0
### determin x_off so as to make square.
new_height = bottom - top
x_off = int((new_height - width ) / 2)
### Then Left and Right are determined.
left = x - x_off
right = x + width + x_off
if left < 0 :
left = 0
### return
return (x - x_off, x + width + x_off, top, bottom)
def preprocess_input(x, v2=True):
x = x.astype('float32')
x = x / 255.0
if v2:
x = x - 0.5
x = x * 2.0
return x
def edit_frame(frame):
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = face_classifier_classifier.detectMultiScale(gray, 1.1, 5)
if mode == modes.ANIME_MODE:
if args.crop_face == True:
for (x,y,w,h) in faces[:1]:
#cv2.rectangle(frame,(x,y),(x+w,y+h),(255, 0, 0),2)
global anime_buffer_image, anime_frame_num, anime_fps_start, anime_fps, anime_frame_count
### new frame entry to process (raw frame)
anime_offsets = (60, 60)
x1, x2, y1, y2 = apply_offsets_for_anime_mode((x,y,w,h), anime_offsets)
anime_rgb = frame[y1:y2, x1:x2]
if len(faces) == 0:
#anime_rgb = np.zeros((256, 256, 3), np.uint8)
anime_rgb = None
else:
anime_rgb = frame
try:
cv2.imwrite('tmp.png',anime_rgb)
img = cv2.imread('tmp.png', flags=cv2.IMREAD_COLOR)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
anime_rgb = img
anime_mode_input_queue.put(anime_rgb)
except Exception as e:
### if exception occur put original frame
#anime_mode_input_queue.put(frame)
pass
### show edited frame
try:
new_frame = anime_mode_output_queue.get(block=False)
# to be shown frame(animated frame)
(old_frame, new_frame) = new_frame
old_frame = cv2.resize(old_frame, (50, 50))
if args.show_source == True:
new_frame = paste(old_frame, new_frame, +80, -80, 0, 1.0)
anime_fps_now = time.time()
if anime_fps_now - anime_fps_start > 5:
spend_time = anime_fps_now - anime_fps_start
anime_fps = round((anime_frame_num / spend_time),2)
anime_fps_start = anime_fps_now
anime_frame_num = 0
# for fps
font_scale=0.5
color = (200,200,200)
thickness=1
if args.show_fps == True:
cv2.putText(new_frame, f'fps:{anime_fps}',
(10,50),
cv2.FONT_HERSHEY_SIMPLEX,
font_scale, color, thickness, cv2.LINE_AA
)
anime_frame_count += 1
if anime_frame_count % args.skip_frame == 0:
anime_frame_count = 0
anime_buffer_image = new_frame
anime_frame_num += 1
except queue.Empty as e:
if anime_buffer_image is None:
anime_buffer_image = np.zeros((256, 256, 3), np.uint8)
pass
### If face is not detected, show previous frame or blank frame
if mode == modes.ANIME_MODE:
if anime_buffer_image is not None:
frame = anime_buffer_image
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
else:
frame = np.zeros((256, 256, 3), np.uint8)
return frame
if __name__=="__main__":
input = args.input_video_num
output = args.output_video_dev
cap = cv2.VideoCapture(input)
if args.anime_mode == True:
mode = modes.ANIME_MODE
else:
mode = modes.SIMPLE_SMILE_MODE
print(f'start with mode: {mode}')
load_resources(mode)
print('web camera hook start!')
p = Popen(['ffmpeg', '-y', '-i', '-', '-pix_fmt', 'yuyv422', '-f', 'v4l2', output], stdin=PIPE)
try:
if mode == modes.ANIME_MODE:
t = threading.Thread(target=anime_mode_worker)
t.start()
while True:
ret,im = cap.read()
im = edit_frame(im)
im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
im = Image.fromarray(np.uint8(im))
im.save(p.stdin, 'JPEG')
except KeyboardInterrupt:
pass
anime_session.close()
p.stdin.close()
p.wait()
print('web camera hook fin!')
|
the-stack_0_2727 | """
As Rigid as Possible Interpolation from a pair of Mesh structures
"""
from Escher.Geometry import Mesh
from typing import List
import numpy as np
import Escher.GeometryRoutines as geom
import Escher.AlgebraRoutines as alg
import logging
from scipy.linalg import block_diag
from scipy.spatial.transform import Slerp,Rotation
def interpolate(src_mesh:Mesh, tgt_mesh:Mesh, interval:float, fragment_resolution="quadratic") -> List[Mesh]:
"""
Interpolate between 2 meshes which have corresponding vertices and same topology.
Arguments:
Source Mesh, Target Mesh, Transformation method.
interval: specifies a float between 0 and 1.
Transformation method = one_ring,tetrahedralize.
"""
interpolated_meshes = []
src_face_batch = src_mesh.get_faces_as_matrices() # fx3x3
tgt_face_batch = tgt_mesh.get_faces_as_matrices()
per_face_slerp_instances = []
per_face_scales = []
identity_rotation = np.expand_dims(np.eye(3),0)
per_face_transformations = []
per_face_translations = []
for _index in range(src_face_batch.shape[0]):
src_face_matrix = src_face_batch[_index]
tgt_face_matrix = tgt_face_batch[_index]
src_tet_matrix = geom.tetrahedralize(src_face_matrix)
tgt_tet_matrix = geom.tetrahedralize(tgt_face_matrix)
mat_Q = alg.get_transformation_matrix_for((src_tet_matrix[:3,:]-src_tet_matrix[3,:]).T,
(tgt_tet_matrix[:3,:]-tgt_tet_matrix[3,:]).T)
face_translation = np.expand_dims(tgt_tet_matrix[3,:].T,-1) - (mat_Q @ np.expand_dims(src_tet_matrix[3,:].T,-1))
per_face_translations.append(face_translation.squeeze())
per_face_transformations.append(mat_Q)
R,S = alg.get_rotation_scale_from_transformation(mat_Q)
rotation_endpoints_matrix = np.concatenate([identity_rotation,np.expand_dims(R,0)],axis=0)
_slerp = Slerp(times=[0,1],rotations=Rotation.from_matrix(rotation_endpoints_matrix))
per_face_slerp_instances.append(_slerp)
per_face_scales.append(S)
if fragment_resolution == "average":
vertex_id_to_face_id = src_mesh.get_vertex_id_to_face_id()
number_of_faces_each_vertex = np.expand_dims(np.array([len(face_list) for face_list in vertex_id_to_face_id]),-1)
for t in np.arange(0,1+interval,interval):
new_vertices = np.zeros(src_mesh.vertices.shape)
for _index in range(src_face_batch.shape[0]):
interpolated_rotation_matrix_face = per_face_slerp_instances[_index]([t])[0].as_matrix()
interpolated_scale_matrix_face = (1-t)*np.eye(3) + t*per_face_scales[_index]
interpolated_transformation_matrix = interpolated_rotation_matrix_face @ interpolated_scale_matrix_face
interpolated_translation = t*per_face_translations[_index].T
src_face_matrix = src_face_batch[_index]
new_face_matrix = (interpolated_transformation_matrix @ src_face_matrix.T).T + interpolated_translation
face = src_mesh.faces[_index]
for i,vertex_id in enumerate(face):
new_vertices[vertex_id,:] += new_face_matrix[i,:]
new_vertices /= number_of_faces_each_vertex
interpolated_mesh = Mesh(vertices=new_vertices,faces=src_mesh.faces)
interpolated_meshes.append(interpolated_mesh)
elif fragment_resolution == "quadratic":
src_face_inverse_list = []
#mat_H = np.zeros((src_mesh.num_vertices-1,src_mesh.num_vertices-1))
mat_H = np.zeros((src_mesh.num_vertices,src_mesh.num_vertices))
fixed_vertex_id = 0 # this vertex id is fixed by linear interpolation,
# we don't solve for it. That is why the system has a solution.
vertex_orders = [0,1,2]
for face_index in range(src_face_batch.shape[0]):
src_face_matrix = src_face_batch[face_index,:,:].T
src_face_inverse = np.linalg.inv(src_face_matrix)
src_face_inverse_list.append(src_face_inverse)
face = src_mesh.faces[face_index]
for vertex_order_in_face,v_id in enumerate(face):
#if v_id == fixed_vertex_id:
# continue
other_vertex_orders = [order for order in vertex_orders if order!=vertex_order_in_face]
row_for_vertex = src_face_inverse[vertex_order_in_face,:]
quadratic_term = np.sum(np.square(row_for_vertex))
mat_H[v_id,v_id] += quadratic_term
#mat_H[v_id-1,v_id-1] += quadratic_term
for other_vertex_order_ in other_vertex_orders:
other_vertex_id = face[other_vertex_order_]
other_vertex_row = src_face_inverse[other_vertex_order_,:]
#if other_vertex_id == fixed_vertex_id:
# continue
#else:
mixed_term = np.dot(row_for_vertex,other_vertex_row)
mat_H[v_id,other_vertex_id] += mixed_term
#mat_H[v_id-1,other_vertex_id-1] += mixed_term
mat_H_inverse = np.linalg.inv(mat_H)
x_index = 0
y_index = 1
z_index = 2
src_fixed_vertex = np.expand_dims(src_mesh.vertices[fixed_vertex_id],0)
tgt_fixed_vertex = np.expand_dims(tgt_mesh.vertices[fixed_vertex_id],0)
for t in np.arange(0,1,interval):
#print(t,flush=True)
mat_Gx = np.zeros((src_mesh.num_vertices,1))
#mat_Gx = np.zeros((src_mesh.num_vertices-1,1))
mat_Gy = np.zeros((src_mesh.num_vertices,1))
#mat_Gy = np.zeros((src_mesh.num_vertices-1,1))
mat_Gz = np.zeros((src_mesh.num_vertices,1))
#mat_Gz = np.zeros((src_mesh.num_vertices-1,1))
interpolated_fixed_vertex = ((1-t)*src_fixed_vertex + t*tgt_fixed_vertex)
for face_index in range(src_face_batch.shape[0]):
interpolated_rotation_matrix_face = per_face_slerp_instances[face_index]([t])[0].as_matrix()
interpolated_scale_matrix_face = (1-t)*np.eye(3) + t*per_face_scales[face_index]
interpolated_transformation_matrix = interpolated_rotation_matrix_face @ interpolated_scale_matrix_face
face_inverse_matrix = src_face_inverse_list[face_index]
face = src_mesh.faces[face_index]
for vertex_order_in_face,v_id in enumerate(face):
if v_id == fixed_vertex_id:
continue
linear_term_x = np.dot(face_inverse_matrix[vertex_order_in_face,:],interpolated_transformation_matrix[x_index,:])
mat_Gx[v_id] += -1*linear_term_x
#mat_Gx[v_id-1] += -1*linear_term_x
linear_term_y = np.dot(face_inverse_matrix[vertex_order_in_face,:],interpolated_transformation_matrix[y_index,:])
mat_Gy[v_id] += -1*linear_term_y
#mat_Gy[v_id-1] += -1*linear_term_y
linear_term_z = np.dot(face_inverse_matrix[vertex_order_in_face,:],interpolated_transformation_matrix[z_index,:])
mat_Gz[v_id] += -1*linear_term_z
#mat_Gz[v_id-1] += -1*linear_term_z
'''
other_vertex_orders = [order for order in vertex_orders if order!=vertex_order_in_face]
row_for_vertex = face_inverse_matrix[vertex_order_in_face,:]
for other_vertex_order_ in other_vertex_orders:
other_vertex_id = face[other_vertex_order_]
other_vertex_row = face_inverse_matrix[other_vertex_order_,:]
if other_vertex_id == fixed_vertex_id:
fixed_term_x = 2*interpolated_fixed_vertex[0][0]*row_for_vertex[0]*other_vertex_row[0]
fixed_term_y = 2*interpolated_fixed_vertex[0][1]*row_for_vertex[1]*other_vertex_row[1]
fixed_term_z = 2*interpolated_fixed_vertex[0][2]*row_for_vertex[2]*other_vertex_row[2]
mat_Gx[v_id] += fixed_term_x
#mat_Gx[v_id-1] += fixed_term_x
mat_Gy[v_id] += fixed_term_y
#mat_Gy[v_id-1] += fixed_term_y
mat_Gz[v_id] += fixed_term_z
#mat_Gz[v_id-1] += fixed_term_z
'''
mat_G = np.hstack([mat_Gx,mat_Gy,mat_Gz])
interpolated_vertices = -1* (mat_H_inverse @ mat_G)
interpolated_translation = (1-t)*src_mesh.vertices + t*tgt_mesh.vertices #np.expand_dims(interpolated_fixed_vertex[0] - src_mesh.vertices[fixed_vertex_id],0) #t*tgt_mesh.vertices[fixed_vertex_id] + (1-t)* src_mesh.vertices[fixed_vertex_id]
#interpolated_translation = t*(tgt_mesh.vertices[fixed_vertex_id+1:,:] - src_mesh.vertices[fixed_vertex_id+1:,:])
interpolated_vertices += interpolated_translation
#interpolated_vertices = np.vstack([interpolated_fixed_vertex,other_interpolated_vertices])
interpolated_mesh = Mesh(vertices=interpolated_vertices,faces=src_mesh.faces)
interpolated_meshes.append(interpolated_mesh)
else:
logging.error("Given fragment resolution method unknown")
return interpolated_meshes
|
the-stack_0_2728 | import abc
import itertools
from dataclasses import dataclass, field
from typing import (
Any, ClassVar, Dict, Tuple, Iterable, Optional, List, Callable,
)
from dbt.exceptions import InternalException
from dbt.utils import translate_aliases
from dbt.logger import GLOBAL_LOGGER as logger
from typing_extensions import Protocol
from dbt.dataclass_schema import (
dbtClassMixin, StrEnum, ExtensibleDbtClassMixin,
ValidatedStringMixin, register_pattern
)
from dbt.contracts.util import Replaceable
class Identifier(ValidatedStringMixin):
ValidationRegex = r'^[A-Za-z_][A-Za-z0-9_]+$'
# we need register_pattern for jsonschema validation
register_pattern(Identifier, r'^[A-Za-z_][A-Za-z0-9_]+$')
@dataclass
class AdapterResponse(dbtClassMixin):
_message: str
code: Optional[str] = None
rows_affected: Optional[int] = None
def __str__(self):
return self._message
class ConnectionState(StrEnum):
INIT = 'init'
OPEN = 'open'
CLOSED = 'closed'
FAIL = 'fail'
@dataclass(init=False)
class Connection(ExtensibleDbtClassMixin, Replaceable):
type: Identifier
name: Optional[str] = None
state: ConnectionState = ConnectionState.INIT
transaction_open: bool = False
_handle: Optional[Any] = None
_credentials: Optional[Any] = None
def __init__(
self,
type: Identifier,
name: Optional[str],
credentials: dbtClassMixin,
state: ConnectionState = ConnectionState.INIT,
transaction_open: bool = False,
handle: Optional[Any] = None,
) -> None:
self.type = type
self.name = name
self.state = state
self.credentials = credentials
self.transaction_open = transaction_open
self.handle = handle
@property
def credentials(self):
return self._credentials
@credentials.setter
def credentials(self, value):
self._credentials = value
@property
def handle(self):
if isinstance(self._handle, LazyHandle):
try:
# this will actually change 'self._handle'.
self._handle.resolve(self)
except RecursionError as exc:
raise InternalException(
"A connection's open() method attempted to read the "
"handle value"
) from exc
return self._handle
@handle.setter
def handle(self, value):
self._handle = value
class LazyHandle:
"""Opener must be a callable that takes a Connection object and opens the
connection, updating the handle on the Connection.
"""
def __init__(self, opener: Callable[[Connection], Connection]):
self.opener = opener
def resolve(self, connection: Connection) -> Connection:
logger.debug(
'Opening a new connection, currently in state {}'
.format(connection.state)
)
return self.opener(connection)
# see https://github.com/python/mypy/issues/4717#issuecomment-373932080
# and https://github.com/python/mypy/issues/5374
# for why we have type: ignore. Maybe someday dataclasses + abstract classes
# will work.
@dataclass # type: ignore
class Credentials(
ExtensibleDbtClassMixin,
Replaceable,
metaclass=abc.ABCMeta
):
database: str
schema: str
_ALIASES: ClassVar[Dict[str, str]] = field(default={}, init=False)
@abc.abstractproperty
def type(self) -> str:
raise NotImplementedError(
'type not implemented for base credentials class'
)
def connection_info(
self, *, with_aliases: bool = False
) -> Iterable[Tuple[str, Any]]:
"""Return an ordered iterator of key/value pairs for pretty-printing.
"""
as_dict = self.to_dict(omit_none=False)
connection_keys = set(self._connection_keys())
aliases: List[str] = []
if with_aliases:
aliases = [
k for k, v in self._ALIASES.items() if v in connection_keys
]
for key in itertools.chain(self._connection_keys(), aliases):
if key in as_dict:
yield key, as_dict[key]
@abc.abstractmethod
def _connection_keys(self) -> Tuple[str, ...]:
raise NotImplementedError
@classmethod
def __pre_deserialize__(cls, data):
data = super().__pre_deserialize__(data)
data = cls.translate_aliases(data)
return data
@classmethod
def translate_aliases(
cls, kwargs: Dict[str, Any], recurse: bool = False
) -> Dict[str, Any]:
return translate_aliases(kwargs, cls._ALIASES, recurse)
def __post_serialize__(self, dct):
# no super() -- do we need it?
if self._ALIASES:
dct.update({
new_name: dct[canonical_name]
for new_name, canonical_name in self._ALIASES.items()
if canonical_name in dct
})
return dct
class UserConfigContract(Protocol):
send_anonymous_usage_stats: bool
use_colors: Optional[bool] = None
partial_parse: Optional[bool] = None
printer_width: Optional[int] = None
def set_values(self, cookie_dir: str) -> None:
...
class HasCredentials(Protocol):
credentials: Credentials
profile_name: str
config: UserConfigContract
target_name: str
threads: int
def to_target_dict(self):
raise NotImplementedError('to_target_dict not implemented')
DEFAULT_QUERY_COMMENT = '''
{%- set comment_dict = {} -%}
{%- do comment_dict.update(
app='dbt',
dbt_version=dbt_version,
profile_name=target.get('profile_name'),
target_name=target.get('target_name'),
) -%}
{%- if node is not none -%}
{%- do comment_dict.update(
node_id=node.unique_id,
) -%}
{% else %}
{# in the node context, the connection name is the node_id #}
{%- do comment_dict.update(connection_name=connection_name) -%}
{%- endif -%}
{{ return(tojson(comment_dict)) }}
'''
@dataclass
class QueryComment(dbtClassMixin):
comment: str = DEFAULT_QUERY_COMMENT
append: bool = False
class AdapterRequiredConfig(HasCredentials, Protocol):
project_name: str
query_comment: QueryComment
cli_vars: Dict[str, Any]
target_path: str
|
the-stack_0_2729 | #!/usr/bin/env python3
import torch
from enum import Enum
from inspect import signature
from .approximation_methods import SUPPORTED_METHODS
class ExpansionTypes(Enum):
repeat = 1
repeat_interleave = 2
def safe_div(denom, quotient, default_value=None):
r"""
A simple utility function to perform `denom / quotient`
if the statement is undefined => result will be `default_value`
"""
return denom / quotient if quotient != 0.0 else default_value
def _validate_target(num_samples, target):
if isinstance(target, list) or (
isinstance(target, torch.Tensor) and torch.numel(target) > 1
):
assert num_samples == len(target), (
"The number of samples provied in the"
"input {} does not match with the number of targets. {}".format(
num_samples, len(target)
)
)
def _validate_input(
inputs,
baselines,
n_steps=50,
method="riemann_trapezoid",
draw_baseline_from_distrib=False,
):
assert len(inputs) == len(baselines), (
"Input and baseline must have the same "
"dimensions, baseline has {} features whereas input has {}.".format(
len(baselines), len(inputs)
)
)
for input, baseline in zip(inputs, baselines):
if draw_baseline_from_distrib:
assert (
isinstance(baseline, (int, float))
or input.shape[1:] == baseline.shape[1:]
), (
"The samples in input and baseline batches must have"
" the same shape or the baseline corresponding to the"
" input tensor must be a scalar."
" Found baseline: {} and input: {} ".format(baseline, input)
)
else:
assert (
isinstance(baseline, (int, float))
or input.shape == baseline.shape
or baseline.shape[0] == 1
), (
"Baseline can be provided as a tensor for just one input and"
" broadcasted to the batch or input and baseline must have the"
" same shape or the baseline corresponding to each input tensor"
" must be a scalar. Found baseline: {} and input: {}".format(
baseline, input
)
)
assert (
n_steps >= 0
), "The number of steps must be a positive integer. " "Given: {}".format(n_steps)
assert method in SUPPORTED_METHODS, (
"Approximation method must be one for the following {}. "
"Given {}".format(SUPPORTED_METHODS, method)
)
def _validate_noise_tunnel_type(nt_type, supported_noise_tunnel_types):
assert nt_type in supported_noise_tunnel_types, (
"Noise types must be either `smoothgrad`, `smoothgrad_sq` or `vargrad`. "
"Given {}".format(nt_type)
)
def _format_tensor_into_tuples(inputs):
if not isinstance(inputs, tuple):
assert isinstance(
inputs, torch.Tensor
), "`inputs` must have type " "torch.Tensor but {} found: ".format(type(inputs))
inputs = (inputs,)
return inputs
def _format_input(inputs):
return _format_tensor_into_tuples(inputs)
def _format_additional_forward_args(additional_forward_args):
if additional_forward_args is not None and not isinstance(
additional_forward_args, tuple
):
additional_forward_args = (additional_forward_args,)
return additional_forward_args
def _format_baseline(baselines, inputs):
if baselines is None:
return _zeros(inputs)
if not isinstance(baselines, tuple):
baselines = (baselines,)
for baseline in baselines:
assert isinstance(
baseline, (torch.Tensor, int, float)
), "baseline input argument must be either a torch.Tensor or a number \
however {} detected".format(
type(baseline)
)
return baselines
def _format_input_baseline(inputs, baselines):
inputs = _format_input(inputs)
baselines = _format_baseline(baselines, inputs)
return inputs, baselines
# This function can potentially be merged with the `format_baseline` function
# however, since currently not all algorithms support baselines of type
# callable this will be kept in a separate function.
def _format_callable_baseline(baselines, inputs):
if callable(baselines):
# Note: this assumes that if baselines is a function and if it takes
# arguments, then the first argument is the `inputs`.
# This can be expanded in the future with better type checks
baseline_parameters = signature(baselines).parameters
if len(baseline_parameters) == 0:
baselines = baselines()
else:
baselines = baselines(inputs)
return _format_baseline(baselines, inputs)
def _format_attributions(is_inputs_tuple, attributions):
r"""
In case input is a tensor and the attributions is returned in form of a
tensor we take the first element of the attributions' tuple to match the
same shape signatues of the inputs
"""
assert isinstance(attributions, tuple), "Attributions must be in shape of a tuple"
assert is_inputs_tuple or len(attributions) == 1, (
"The input is a single tensor however the attributions aren't."
"The number of attributed tensors is: {}".format(len(attributions))
)
return attributions if is_inputs_tuple else attributions[0]
def _zeros(inputs):
r"""
Takes a tuple of tensors as input and returns a tuple that has the same
size as the `inputs` which contains zero tensors of the same
shape as the `inputs`
"""
return tuple(0.0 for input in inputs)
def _tensorize_baseline(inputs, baselines):
def _tensorize_single_baseline(baseline, input):
if isinstance(baseline, (int, float)):
return torch.full_like(input, baseline)
if input.shape[0] > baseline.shape[0] and baseline.shape[0] == 1:
return torch.cat([baseline] * input.shape[0])
return baseline
assert isinstance(inputs, tuple) and isinstance(baselines, tuple), (
"inputs and baselines must"
"have tuple type but found baselines: {} and inputs: {}".format(
type(baselines), type(inputs)
)
)
return tuple(
_tensorize_single_baseline(baseline, input)
for baseline, input in zip(baselines, inputs)
)
def _reshape_and_sum(tensor_input, num_steps, num_examples, layer_size):
# Used for attribution methods which perform integration
# Sums across integration steps by reshaping tensor to
# (num_steps, num_examples, (layer_size)) and summing over
# dimension 0. Returns a tensor of size (num_examples, (layer_size))
return torch.sum(
tensor_input.reshape((num_steps, num_examples) + layer_size), dim=0
)
def _verify_select_column(output, target):
target = (target,) if isinstance(target, int) else target
assert (
len(target) <= len(output.shape) - 1
), "Cannot choose target column with output shape %r." % (output.shape,)
return output[(slice(None), *target)]
def _select_targets(output, target):
num_examples = output.shape[0]
dims = len(output.shape)
if target is None:
return output
elif isinstance(target, int) or isinstance(target, tuple):
return _verify_select_column(output, target)
elif isinstance(target, torch.Tensor):
if torch.numel(target) == 1 and isinstance(target.item(), int):
return _verify_select_column(output, target.item())
elif len(target.shape) == 1 and torch.numel(target) == num_examples:
assert dims == 2, "Output must be 2D to select tensor of targets."
return torch.gather(output, 1, target.reshape(len(output), 1))
else:
raise AssertionError(
"Tensor target dimension %r is not valid." % (target.shape,)
)
elif isinstance(target, list):
assert len(target) == num_examples, "Target list length does not match output!"
if type(target[0]) is int:
assert dims == 2, "Output must be 2D to select tensor of targets."
return torch.gather(output, 1, torch.tensor(target).reshape(len(output), 1))
elif type(target[0]) is tuple:
return torch.stack(
[output[(i,) + targ_elem] for i, targ_elem in enumerate(target)]
)
else:
raise AssertionError("Target element type in list is not valid.")
else:
raise AssertionError("Target type %r is not valid." % target)
def _run_forward(forward_func, inputs, target=None, additional_forward_args=None):
# make everything a tuple so that it is easy to unpack without
# using if-statements
inputs = _format_input(inputs)
additional_forward_args = _format_additional_forward_args(additional_forward_args)
output = forward_func(
*(*inputs, *additional_forward_args)
if additional_forward_args is not None
else inputs
)
return _select_targets(output, target)
def _expand_additional_forward_args(
additional_forward_args, n_steps, expansion_type=ExpansionTypes.repeat
):
def _expand_tensor_forward_arg(
additional_forward_arg, n_steps, expansion_type=ExpansionTypes.repeat
):
if len(additional_forward_arg.size()) == 0:
return additional_forward_arg
if expansion_type == ExpansionTypes.repeat:
return torch.cat([additional_forward_arg] * n_steps, dim=0)
elif expansion_type == ExpansionTypes.repeat_interleave:
return additional_forward_arg.repeat_interleave(n_steps, dim=0)
else:
raise NotImplementedError(
"Currently only `repeat` and `repeat_interleave`"
" expansion_types are supported"
)
return tuple(
_expand_tensor_forward_arg(additional_forward_arg, n_steps, expansion_type)
if isinstance(additional_forward_arg, torch.Tensor)
else additional_forward_arg
for additional_forward_arg in additional_forward_args
)
def _expand_target(target, n_steps, expansion_type=ExpansionTypes.repeat):
if isinstance(target, list):
if expansion_type == ExpansionTypes.repeat:
return target * n_steps
elif expansion_type == ExpansionTypes.repeat_interleave:
expanded_target = []
for i in target:
expanded_target.extend([i] * n_steps)
return expanded_target
else:
raise NotImplementedError(
"Currently only `repeat` and `repeat_interleave`"
" expansion_types are supported"
)
elif isinstance(target, torch.Tensor) and torch.numel(target) > 1:
if expansion_type == ExpansionTypes.repeat:
return torch.cat([target] * n_steps, dim=0)
elif expansion_type == ExpansionTypes.repeat_interleave:
return target.repeat_interleave(n_steps, dim=0)
else:
raise NotImplementedError(
"Currently only `repeat` and `repeat_interleave`"
" expansion_types are supported"
)
return target
def _call_custom_attribution_func(
custom_attribution_func, multipliers, inputs, baselines
):
assert callable(custom_attribution_func), (
"`custom_attribution_func`"
" must be a callable function but {} provided".format(
type(custom_attribution_func)
)
)
custom_attr_func_params = signature(custom_attribution_func).parameters
assert len(custom_attr_func_params) in range(1, 4), (
"`custom_attribution_func`" " must take at least one and at most 3 arguments"
)
if len(custom_attr_func_params) == 1:
return custom_attribution_func(multipliers)
elif len(custom_attr_func_params) == 2:
return custom_attribution_func(multipliers, inputs)
elif len(custom_attr_func_params) == 3:
return custom_attribution_func(multipliers, inputs, baselines)
class MaxList:
"""Keep track of N maximal items
Implementation of MaxList:
for keeping track of the N top values of a large collection of items.
Maintains a sorted list of the top N items that can be fetched with
getlist().
Example use:
m = MaxList(2, key=lamda x: len(x))
ml.add("Hello World")
ml.add("Mermaid Man!!!!")
ml.add("Why?")
ml.getlist() -> ["Mermaid Man!!!!", "Hello World"]
If storing values that are not comparable, please provide a key function that
that maps the values to some numeric value.
"""
def __init__(self, size, key=lambda x: x):
self.size = size
self.key = key
self.list = []
def add(self, item):
"""Add an element to the MaxList
Args:
item: the item that you want to add to the MaxList
"""
value = self.key(item)
if len(self.list) < self.size:
if len(self.list) == 0:
self.list.append((value, item))
elif self.list[-1][0] >= value:
self.list.append((value, item))
else:
self._insert(item, value)
if self.list[-1][0] < value:
self._insert(item, value)
def get_list(self):
"""Retrive the list of N maximal items in sorted order
Returns:
list: the sorted list of maximal items
"""
return [item[1] for item in self.list]
def _insert(self, item, value):
if len(self.list) == 0:
self.list.append((value, item))
for i in range(len(self.list)):
if self.list[i][0] < value:
self.list.insert(i, (value, item))
break
self.list = self.list[: self.size]
class Stat:
"""Keep track of statistics for a quantity that is measured live
Implementation of an online statistics tracker, Stat:
For a memory efficient way of keeping track of statistics on a large set of
numbers. Adding numbers to the object will update the values stored in the
object to reflect the statistics of all numbers that the object has seen
so far.
Example usage:
s = Stat()
s([5,7]) OR s.update([5,7])
stats.get_mean() -> 6
stats.get_std() -> 1
"""
def __init__(self):
self.count = 0
self.mean = 0
self.mean_squared_error = 0
self.min = float("inf")
self.max = float("-inf")
def _std_size_check(self):
if self.count < 2:
raise Exception(
"Std/Variance is not defined for {} datapoints\
".format(
self.count
)
)
def update(self, x):
"""Update the stats given a new number
Adds x to the running statistics being kept track of, and updates internal
values that relfect that change.
Args:
x: a numeric value, or a list of numeric values
"""
if isinstance(x, list):
for value in x:
self.update(value)
else:
x = float(x)
self.min = min(self.min, x)
self.max = max(self.max, x)
self.count += 1
delta = x - self.mean
self.mean += delta / self.count
delta2 = x - self.mean
self.mean_squared_error += delta * delta2
def get_stats(self):
"""Retrieves a dictionary of statistics for the values seen.
Returns:
a fully populated dictionary for the statistics that have been
maintained. This output is easy to pipe into a table with a loop over
key value pairs.
"""
self._std_size_check()
sampleVariance = self.mean_squared_error / (self.count - 1)
Variance = self.mean_squared_error / self.count
return {
"mean": self.mean,
"sample_variance": sampleVariance,
"variance": Variance,
"std": Variance ** 0.5,
"min": self.min,
"max": self.max,
"count": self.count,
}
def get_std(self):
"""get the std of the statistics kept"""
self._std_size_check()
return (self.mean_squared_error / self.count) ** 0.5
def get_variance(self):
"""get the variance of the statistics kept"""
self._std_size_check()
return self.mean_squared_error / self.count
def get_sample_variance(self):
"""get the sample variance of the statistics kept"""
self._std_size_check()
return self.mean_squared_error / (self.count - 1)
def get_mean(self):
"""get the mean of the statistics kept"""
return self.mean
def get_max(self):
"""get the max of the statistics kept"""
return self.max
def get_min(self):
"""get the min of the statistics kept"""
return self.min
def get_count(self):
"""get the count of the statistics kept"""
return self.count
|
the-stack_0_2730 | import unittest
import matplotlib
import pkmodel as pk
class SolutionTest(unittest.TestCase):
"""
Tests the :class:`Solution` class.
"""
def test_create(self):
"""
Tests Solution creation.
"""
protocol = pk.Protocol("test", 3, 1.1, 2.2, 3.3, 4.4, 5.5, 6, 7, 8, False)
models = [pk.ThreeCompartmentModel(protocol)]
solution = pk.Solution(models, True)
self.assertEqual(solution.models[0], models[0])
def test_graph(self):
"""
Tests Solution graph method.
"""
models = [
pk.TwoCompartmentModel(pk.Protocol("test", 2, 1.1, 2.2, 3.3, 4.4, 5.5, 0, 7, 8, False)),
pk.TwoCompartmentModel(pk.Protocol("test", 2, 1.1, 2.2, 3.3, 4.4, 5.5, 6, 0, 8, False)),
pk.TwoCompartmentModel(pk.Protocol("test", 2, 1.1, 2.2, 3.3, 4.4, 5.5, 6, 7, 8, False)),
pk.ThreeCompartmentModel(pk.Protocol("test", 3, 1.1, 2.2, 3.3, 4.4, 5.5, 0, 7, 8, False)),
pk.ThreeCompartmentModel(pk.Protocol("test", 3, 1.1, 2.2, 3.3, 4.4, 5.5, 6, 0, 8, False)),
pk.ThreeCompartmentModel(pk.Protocol("test", 3, 1.1, 2.2, 3.3, 4.4, 5.5, 6, 7, 8, False)),
]
matplotlib.use("Agg")
solution = pk.Solution(models, False)
solution.graph()
|
the-stack_0_2731 | """
war
War card game written for fun while following the 'Complete Python Developer Certification Course' by Imtiaz Ahmad, on Udemy.
"""
import sys
from setuptools import setup, find_packages
import versioneer
short_description = __doc__.split("\n")
# from https://github.com/pytest-dev/pytest-runner#conditional-requirement
needs_pytest = {'pytest', 'test', 'ptr'}.intersection(sys.argv)
pytest_runner = ['pytest-runner'] if needs_pytest else []
try:
with open("README.md", "r") as handle:
long_description = handle.read()
except:
long_description = "\n".join(short_description[2:])
setup(
# Self-descriptive entries which should always be present
name='war',
author='Dumitru-Claudiu Sergentu',
author_email='[email protected]',
description=short_description[0],
long_description=long_description,
long_description_content_type="text/markdown",
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
license='BSD-3-Clause',
# Which Python importable modules should be included when your package is installed
# Handled automatically by setuptools. Use 'exclude' to prevent some specific
# subpackage(s) from being added, if needed
packages=find_packages(),
# Optional include package data to ship with your package
# Customize MANIFEST.in if the general case does not suit your needs
# Comment out this line to prevent the files from being packaged with your software
include_package_data=True,
# Allows `setup.py test` to work correctly with pytest
setup_requires=[] + pytest_runner,
# Additional entries you may want simply uncomment the lines you want and fill in the data
# url='http://www.my_package.com', # Website
# install_requires=[], # Required packages, pulls from pip if needed; do not use for Conda deployment
# platforms=['Linux',
# 'Mac OS-X',
# 'Unix',
# 'Windows'], # Valid platforms your code works on, adjust to your flavor
# python_requires=">=3.5", # Python version restrictions
# Manual control if final package is compressible or not, set False to prevent the .egg from being made
# zip_safe=False,
)
|
the-stack_0_2733 | from dissononce.processing.handshakepatterns.handshakepattern import HandshakePattern
class IK1HandshakePattern(HandshakePattern):
def __init__(self, ):
super(IK1HandshakePattern, self).__init__(
'IK1',
responder_pre_message_pattern=('s',),
message_patterns=(
('e', 's'),
('e', 'ee', 'se', 'es')
)
)
|
the-stack_0_2735 | import matplotlib.pyplot as plt
from .artists import kdeplot_op, kde2plot_op
def kdeplot(data, ax=None):
if ax is None:
_, ax = plt.subplots(1, 1, squeeze=True)
kdeplot_op(ax, data)
return ax
def kde2plot(x, y, grid=200, ax=None, **kwargs):
if ax is None:
_, ax = plt.subplots(1, 1, squeeze=True)
kde2plot_op(ax, x, y, grid, **kwargs)
return ax
|
the-stack_0_2736 | # coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tokenization classes for python and fast tokenizers. Fast tokenizers are provided by HuggingFace's tokenizers library."""
import copy
import functools
import itertools
import json
import logging
import operator
import os
import re
import warnings
from collections import UserDict, defaultdict
from contextlib import contextmanager
from enum import Enum
from typing import Any, Dict, List, MutableMapping, NamedTuple, Optional, Sequence, Tuple, Union
import numpy as np
from tokenizers import AddedToken as AddedTokenFast
from tokenizers import Encoding as EncodingFast
from tokenizers.decoders import Decoder as DecoderFast
from tokenizers.implementations import BaseTokenizer as BaseTokenizerFast
from .file_utils import cached_path, hf_bucket_url, is_remote_url, is_tf_available, is_torch_available, torch_required
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
logger = logging.getLogger(__name__)
NO_PAD_TOKEN_FOR_BATCH_MSG = (
"No padding token is set for this model, therefore no batch can be made with uneven "
"sequences. Set a padding token or adjust the lengths of the sequences building the "
"batch so that every sequence is of the same length."
)
UNEVEN_SEQUENCES_FOR_BATCH_MSG = (
"The sequences building the batch are not of the same size, no tensor "
"can be built. Set `pad_to_max_length=True` to pad the smaller sequences"
"up to the larger sequence's length."
)
SPECIAL_TOKENS_MAP_FILE = "special_tokens_map.json"
ADDED_TOKENS_FILE = "added_tokens.json"
TOKENIZER_CONFIG_FILE = "tokenizer_config.json"
VERY_LARGE_INTEGER = int(1e30) # This is used to set the max input length for a model with infinite size input
LARGE_INTEGER = int(1e20) # This is used when we need something big but slightly smaller than VERY_LARGE_INTEGER
# Define type aliases and NamedTuples
TextInput = str
PreTokenizedInput = List[str]
EncodedInput = List[int]
TextInputPair = Tuple[str, str]
PreTokenizedInputPair = Tuple[List[str], List[str]]
EncodedInputPair = Tuple[List[int], List[int]]
class TensorType(Enum):
PYTORCH = "pt"
TENSORFLOW = "tf"
NUMPY = "np"
class CharSpan(NamedTuple):
""" Character span in the original string
Args:
start: index of the first character in the original string
end: index of the character following the last character in the original string
"""
start: int
end: int
class TokenSpan(NamedTuple):
""" Token span in an encoded string (list of tokens)
Args:
start: index of the first token in the span
end: index of the token following the last token in the span
"""
start: int
end: int
def flatten(x: Sequence):
"""
Flatten the provided (potentially nested) sequence
Args:
x (Sequence): Potentially nested sequence to flatten
Returns:
list: Flattened sequence
"""
return functools.reduce(operator.iconcat, x, [])
@contextmanager
def truncate_and_pad(
tokenizer: BaseTokenizerFast,
max_length: int,
stride: int,
strategy: str,
pad_to_max_length: bool,
padding_side: str,
pad_token_id: int,
pad_token_type_id: int,
pad_token: str,
):
""" This contextmanager is in charge of defining the truncation and the padding strategies for fast tokenizers
(provided by HuggingFace tokenizers library) and restore the tokenizer settings afterwards.
This contextmanager assumes the provider tokenizer has no padding / truncation strategy
before the managed section. If your tokenizer set a padding / truncation strategy before,
then it will be reset to no padding/truncation when exiting the managed section.
Args:
tokenizer (BaseTokenizerFast): The tokenizer which will be used
max_length (int): The maximum size of the sequence
stride (int): The stride to use when handling overflow
strategy (str): Overflowing logic to use
pad_to_max_length (bool): Boolean indicating if the output needs to be padded up to max_length
padding_side (str): "left" or "right" indicating the direction the output sequence will be padded
pad_token_id (int): The integer representation of the padding token to use
pad_token_type_id (int): The integer representation of the padding token type to use
pad_token (str): The string representation of the padding token to use
"""
# Handle all the truncation and padding stuff
if max_length is not None:
tokenizer.enable_truncation(max_length, stride=stride, strategy=strategy)
if pad_to_max_length and (pad_token and pad_token_id >= 0):
tokenizer.enable_padding(
max_length=max_length,
direction=padding_side,
pad_id=pad_token_id,
pad_type_id=pad_token_type_id,
pad_token=pad_token,
)
elif pad_to_max_length:
logger.warning(
"Disabled padding because no padding token set (pad_token: {}, pad_token_id: {}).\n"
"To remove this error, you can add a new pad token and then resize model embedding:\n"
"\ttokenizer.pad_token = '<PAD>'\n\tmodel.resize_token_embeddings(len(tokenizer))".format(
pad_token, pad_token_id
)
)
yield
# TODO(morgan, anthony): once we have a simple way to serialize tokenizers maybe store and restore the state afterward
# to avoid destructing the padding / truncation strategy as we do now.
if max_length is not None:
tokenizer.no_truncation()
if pad_to_max_length and (pad_token and pad_token_id >= 0):
tokenizer.no_padding()
def convert_to_tensors(
batch_outputs: MutableMapping, return_tensors: Union[str, TensorType], prepend_batch_axis: bool = False
) -> MutableMapping:
# Convert to TensorType
if not isinstance(return_tensors, TensorType):
return_tensors = TensorType(return_tensors)
# Get a function reference for the correct framework
if return_tensors == TensorType.TENSORFLOW and is_tf_available():
as_tensor = tf.constant
elif return_tensors == TensorType.PYTORCH and is_torch_available():
as_tensor = torch.tensor
elif return_tensors == TensorType.NUMPY:
as_tensor = np.asarray
else:
raise ImportError(
"Unable to convert output to tensors format {}, PyTorch or TensorFlow is not available.".format(
return_tensors
)
)
# Do the tensor conversion in batch
for key, value in batch_outputs.items():
try:
if prepend_batch_axis:
value = [value]
tensor = as_tensor(value)
# at-least2d
if tensor.ndim > 2:
tensor = tensor.squeeze(0)
elif tensor.ndim < 2:
tensor = tensor[None, :]
batch_outputs[key] = tensor
except ValueError:
if None in [item for sequence in value for item in sequence]:
raise ValueError(NO_PAD_TOKEN_FOR_BATCH_MSG)
else:
raise ValueError(UNEVEN_SEQUENCES_FOR_BATCH_MSG)
return batch_outputs
class BatchEncoding(UserDict):
""" BatchEncoding hold the output of the encode and batch_encode methods (tokens, attention_masks, etc).
This class is derived from a python Dictionary and can be used as a dictionnary.
In addition, this class expose utility methods to map from word/char space to token space.
Args:
data (:obj:`dict`): Dictionary of lists/arrays returned by the encode/batch_encode methods ('input_ids', 'attention_mask'...)
encoding (:obj:`EncodingFast`, :obj:`list(EncodingFast)`, `optional`, defaults to :obj:`None`):
If the tokenizer is a fast tokenizer which outputs additional informations like mapping from word/char space to token space
the `EncodingFast` instance or list of instance (for batches) hold these informations.
"""
def __init__(
self,
data: Optional[Dict[str, Any]] = None,
encoding: Optional[Union[EncodingFast, Sequence[EncodingFast]]] = None,
):
super().__init__(data)
if isinstance(encoding, EncodingFast):
encoding = [encoding]
self._encodings = encoding
def __getitem__(self, item: Union[int, str]) -> EncodingFast:
""" If the key is a string, get the value of the dict associated to `key` ('input_ids', 'attention_mask'...)
If the key is an integer, get the EncodingFast for batch item with index `key`
"""
if isinstance(item, str):
return self.data[item]
elif self._encodings is not None:
return self._encodings[item]
else:
raise KeyError(
"Indexing with integers (to access backend Encoding for a given batch index) "
"is not available when using Python based tokenizers"
)
def __getattr__(self, item: str):
try:
return self.data[item]
except KeyError:
raise AttributeError
def keys(self):
return self.data.keys()
def values(self):
return self.data.values()
def items(self):
return self.data.items()
# After this point:
# Extended properties and methods only available for fast (Rust-based) tokenizers
# provided by HuggingFace tokenizers library.
@property
def encodings(self) -> Optional[List[EncodingFast]]:
"""
Return the list all encoding from the tokenization process
Returns: List[EncodingFast] or None if input was tokenized through Python (i.e. not fast) tokenizer
"""
return self._encodings
def tokens(self, batch_index: int = 0) -> List[int]:
if not self._encodings:
raise ValueError("tokens() is not available when using Python based tokenizers")
return self._encodings[batch_index].tokens
def words(self, batch_index: int = 0) -> List[Optional[int]]:
if not self._encodings:
raise ValueError("words() is not available when using Python based tokenizers")
return self._encodings[batch_index].words
def token_to_word(self, batch_or_token_index: int, token_index: Optional[int] = None) -> int:
""" Get the index of the word corresponding (i.e. comprising) to an encoded token
in a sequence of the batch.
Can be called as:
- self.token_to_word(token_index) if batch size is 1
- self.token_to_word(batch_index, token_index) if batch size is greater than 1
This method is particularly suited when the input sequences are provided as
pre-tokenized sequences (i.e. words are defined by the user). In this case it allows
to easily associate encoded tokens with provided tokenized words.
Args:
batch_or_token_index (:obj:`int`):
Index of the sequence in the batch. If the batch only comprise one sequence,
this can be the index of the token in the sequence
token_index (:obj:`int`, `optional`):
If a batch index is provided in `batch_or_token_index`, this can be the index
of the token in the sequence.
Returns:
word_index (:obj:`int`):
index of the word in the input sequence.
"""
if not self._encodings:
raise ValueError("token_to_word() is not available when using Python based tokenizers")
if token_index is not None:
batch_index = batch_or_token_index
else:
batch_index = 0
token_index = batch_or_token_index
if batch_index < 0:
batch_index = self._batch_size + batch_index
if token_index < 0:
token_index = self._seq_len + token_index
return self._encodings[batch_index].token_to_word(token_index)
def word_to_tokens(self, batch_or_word_index: int, word_index: Optional[int] = None) -> TokenSpan:
""" Get the encoded token span corresponding to a word in the sequence of the batch.
Token spans are returned as a TokenSpan NamedTuple with:
start: index of the first token
end: index of the token following the last token
Can be called as:
- self.word_to_tokens(word_index) if batch size is 1
- self.word_to_tokens(batch_index, word_index) if batch size is greater or equal to 1
This method is particularly suited when the input sequences are provided as
pre-tokenized sequences (i.e. words are defined by the user). In this case it allows
to easily associate encoded tokens with provided tokenized words.
Args:
batch_or_word_index (:obj:`int`):
Index of the sequence in the batch. If the batch only comprises one sequence,
this can be the index of the word in the sequence
word_index (:obj:`int`, `optional`):
If a batch index is provided in `batch_or_token_index`, this can be the index
of the word in the sequence.
Returns:
token_span (:obj:`TokenSpan`):
Span of tokens in the encoded sequence.
TokenSpan are NamedTuple with:
start: index of the first token
end: index of the token following the last token
"""
if not self._encodings:
raise ValueError("word_to_tokens() is not available when using Python based tokenizers")
if word_index is not None:
batch_index = batch_or_word_index
else:
batch_index = 0
word_index = batch_or_word_index
if batch_index < 0:
batch_index = self._batch_size + batch_index
if word_index < 0:
word_index = self._seq_len + word_index
return TokenSpan(*(self._encodings[batch_index].word_to_tokens(word_index)))
def token_to_chars(self, batch_or_token_index: int, token_index: Optional[int] = None) -> CharSpan:
""" Get the character span corresponding to an encoded token in a sequence of the batch.
Character spans are returned as a CharSpan NamedTuple with:
start: index of the first character in the original string associated to the token
end: index of the character following the last character in the original string associated to the token
Can be called as:
- self.token_to_chars(token_index) if batch size is 1
- self.token_to_chars(batch_index, token_index) if batch size is greater or equal to 1
Args:
batch_or_token_index (:obj:`int`):
Index of the sequence in the batch. If the batch only comprise one sequence,
this can be the index of the token in the sequence
token_index (:obj:`int`, `optional`):
If a batch index is provided in `batch_or_token_index`, this can be the index
of the token or tokens in the sequence.
Returns:
char_span (:obj:`CharSpan`):
Span of characters in the original string.
CharSpan are NamedTuple with:
start: index of the first character in the original string
end: index of the character following the last character in the original string
"""
if not self._encodings:
raise ValueError("token_to_chars() is not available when using Python based tokenizers")
if token_index is not None:
batch_index = batch_or_token_index
else:
batch_index = 0
token_index = batch_or_token_index
return CharSpan(*(self._encodings[batch_index].token_to_chars(token_index)))
def char_to_token(self, batch_or_char_index: int, char_index: Optional[int] = None) -> int:
""" Get the index of the token in the encoded output comprising a character
in the original string for a sequence of the batch.
Can be called as:
- self.char_to_token(char_index) if batch size is 1
- self.char_to_token(batch_index, char_index) if batch size is greater or equal to 1
This method is particularly suited when the input sequences are provided as
pre-tokenized sequences (i.e. words are defined by the user). In this case it allows
to easily associate encoded tokens with provided tokenized words.
Args:
batch_or_char_index (:obj:`int`):
Index of the sequence in the batch. If the batch only comprise one sequence,
this can be the index of the word in the sequence
char_index (:obj:`int`, `optional`):
If a batch index is provided in `batch_or_token_index`, this can be the index
of the word in the sequence.
Returns:
token_index (:obj:`int`):
Index of the token.
"""
if not self._encodings:
raise ValueError("char_to_token() is not available when using Python based tokenizers")
if char_index is not None:
batch_index = batch_or_char_index
else:
batch_index = 0
char_index = batch_or_char_index
return self._encodings[batch_index].char_to_token(char_index)
def word_to_chars(self, batch_or_word_index: int, word_index: Optional[int] = None) -> CharSpan:
""" Get the character span in the original string corresponding to given word in a sequence
of the batch.
Character spans are returned as a CharSpan NamedTuple with:
start: index of the first character in the original string
end: index of the character following the last character in the original string
Can be called as:
- self.word_to_chars(word_index) if batch size is 1
- self.word_to_chars(batch_index, word_index) if batch size is greater or equal to 1
Args:
batch_or_word_index (:obj:`int`):
Index of the sequence in the batch. If the batch only comprise one sequence,
this can be the index of the word in the sequence
word_index (:obj:`int`, `optional`):
If a batch index is provided in `batch_or_token_index`, this can be the index
of the word in the sequence.
Returns:
char_span (:obj:`CharSpan` or :obj:`List[CharSpan]`):
Span(s) of the associated character or characters in the string.
CharSpan are NamedTuple with:
start: index of the first character associated to the token in the original string
end: index of the character following the last character associated to the token in the original string
"""
if not self._encodings:
raise ValueError("word_to_chars() is not available when using Python based tokenizers")
if word_index is not None:
batch_index = batch_or_word_index
else:
batch_index = 0
word_index = batch_or_word_index
return CharSpan(*(self._encodings[batch_index].word_to_chars(word_index)))
def char_to_word(self, batch_or_char_index: int, char_index: Optional[int] = None) -> int:
""" Get the word in the original string corresponding to a character in the original string of
a sequence of the batch.
Can be called as:
- self.char_to_word(char_index) if batch size is 1
- self.char_to_word(batch_index, char_index) if batch size is greater than 1
This method is particularly suited when the input sequences are provided as
pre-tokenized sequences (i.e. words are defined by the user). In this case it allows
to easily associate encoded tokens with provided tokenized words.
Args:
batch_or_char_index (:obj:`int`):
Index of the sequence in the batch. If the batch only comprise one sequence,
this can be the index of the character in the orginal string.
char_index (:obj:`int`, `optional`):
If a batch index is provided in `batch_or_token_index`, this can be the index
of the character in the orginal string.
Returns:
token_index (:obj:`int` or :obj:`List[int]`):
Index or indices of the associated encoded token(s).
"""
if not self._encodings:
raise ValueError("char_to_word() is not available when using Python based tokenizers")
if char_index is not None:
batch_index = batch_or_char_index
else:
batch_index = 0
char_index = batch_or_char_index
return self._encodings[batch_index].char_to_word(char_index)
@torch_required
def to(self, device: str):
"""Send all values to device by calling v.to(device)"""
self.data = {k: v.to(device) for k, v in self.data.items()}
return self
class SpecialTokensMixin:
""" SpecialTokensMixin is derived by ``PreTrainedTokenizer`` and ``PreTrainedTokenizerFast`` and
handles specific behaviors related to special tokens. In particular, this class hold the
attributes which can be used to directly access to these special tokens in a
model-independant manner and allow to set and update the special tokens.
"""
SPECIAL_TOKENS_ATTRIBUTES = [
"bos_token",
"eos_token",
"unk_token",
"sep_token",
"pad_token",
"cls_token",
"mask_token",
"additional_special_tokens",
]
def __init__(self, **kwargs):
self._bos_token = None
self._eos_token = None
self._unk_token = None
self._sep_token = None
self._pad_token = None
self._cls_token = None
self._mask_token = None
self._pad_token_type_id = 0
self._additional_special_tokens = []
for key, value in kwargs.items():
if key in self.SPECIAL_TOKENS_ATTRIBUTES:
if key == "additional_special_tokens":
assert isinstance(value, (list, tuple)) and all(isinstance(t, str) for t in value)
setattr(self, key, value)
elif isinstance(value, AddedTokenFast):
setattr(self, key, str(value))
elif isinstance(value, str):
setattr(self, key, value)
else:
raise TypeError(
"special token {} has to be either str or AddedTokenFast but got: {}".format(key, type(value))
)
@property
def bos_token(self):
""" Beginning of sentence token (string). Log an error if used while not having been set. """
if self._bos_token is None:
logger.error("Using bos_token, but it is not set yet.")
return self._bos_token
@property
def eos_token(self):
""" End of sentence token (string). Log an error if used while not having been set. """
if self._eos_token is None:
logger.error("Using eos_token, but it is not set yet.")
return self._eos_token
@property
def unk_token(self):
""" Unknown token (string). Log an error if used while not having been set. """
if self._unk_token is None:
logger.error("Using unk_token, but it is not set yet.")
return self._unk_token
@property
def sep_token(self):
""" Separation token (string). E.g. separate context and query in an input sequence. Log an error if used while not having been set. """
if self._sep_token is None:
logger.error("Using sep_token, but it is not set yet.")
return self._sep_token
@property
def pad_token(self):
""" Padding token (string). Log an error if used while not having been set. """
if self._pad_token is None:
logger.error("Using pad_token, but it is not set yet.")
return self._pad_token
@property
def cls_token(self):
""" Classification token (string). E.g. to extract a summary of an input sequence leveraging self-attention along the full depth of the model. Log an error if used while not having been set. """
if self._cls_token is None:
logger.error("Using cls_token, but it is not set yet.")
return self._cls_token
@property
def mask_token(self):
""" Mask token (string). E.g. when training a model with masked-language modeling. Log an error if used while not having been set. """
if self._mask_token is None:
logger.error("Using mask_token, but it is not set yet.")
return self._mask_token
@property
def additional_special_tokens(self):
""" All the additional special tokens you may want to use (list of strings). Log an error if used while not having been set. """
if self._additional_special_tokens is None:
logger.error("Using additional_special_tokens, but it is not set yet.")
return self._additional_special_tokens
def _maybe_update_backend(self, value):
""" To be overriden by derived class if a backend tokenizer has to be updated. """
pass
@bos_token.setter
def bos_token(self, value):
self._bos_token = value
self._maybe_update_backend([value])
@eos_token.setter
def eos_token(self, value):
self._eos_token = value
self._maybe_update_backend([value])
@unk_token.setter
def unk_token(self, value):
self._unk_token = value
self._maybe_update_backend([value])
@sep_token.setter
def sep_token(self, value):
self._sep_token = value
self._maybe_update_backend([value])
@pad_token.setter
def pad_token(self, value):
self._pad_token = value
self._maybe_update_backend([value])
@cls_token.setter
def cls_token(self, value):
self._cls_token = value
self._maybe_update_backend([value])
@mask_token.setter
def mask_token(self, value):
self._mask_token = value
self._maybe_update_backend([value])
@additional_special_tokens.setter
def additional_special_tokens(self, value):
self._additional_special_tokens = value
self._maybe_update_backend(value)
@property
def bos_token_id(self):
""" Id of the beginning of sentence token in the vocabulary. Log an error if used while not having been set. """
return self.convert_tokens_to_ids(self.bos_token)
@property
def eos_token_id(self):
""" Id of the end of sentence token in the vocabulary. Log an error if used while not having been set. """
return self.convert_tokens_to_ids(self.eos_token)
@property
def unk_token_id(self):
""" Id of the unknown token in the vocabulary. Log an error if used while not having been set. """
return self.convert_tokens_to_ids(self.unk_token)
@property
def sep_token_id(self):
""" Id of the separation token in the vocabulary. E.g. separate context and query in an input sequence. Log an error if used while not having been set. """
return self.convert_tokens_to_ids(self.sep_token)
@property
def pad_token_id(self):
""" Id of the padding token in the vocabulary. Log an error if used while not having been set. """
return self.convert_tokens_to_ids(self.pad_token)
@property
def pad_token_type_id(self):
""" Id of the padding token type in the vocabulary."""
return self._pad_token_type_id
@property
def cls_token_id(self):
""" Id of the classification token in the vocabulary. E.g. to extract a summary of an input sequence leveraging self-attention along the full depth of the model. Log an error if used while not having been set. """
return self.convert_tokens_to_ids(self.cls_token)
@property
def mask_token_id(self):
""" Id of the mask token in the vocabulary. E.g. when training a model with masked-language modeling. Log an error if used while not having been set. """
return self.convert_tokens_to_ids(self.mask_token)
@property
def additional_special_tokens_ids(self):
""" Ids of all the additional special tokens in the vocabulary (list of integers). Log an error if used while not having been set. """
return self.convert_tokens_to_ids(self.additional_special_tokens)
@property
def special_tokens_map(self):
""" A dictionary mapping special token class attribute (cls_token, unk_token...) to their
values ('<unk>', '<cls>'...)
"""
set_attr = {}
for attr in self.SPECIAL_TOKENS_ATTRIBUTES:
attr_value = getattr(self, "_" + attr)
if attr_value:
set_attr[attr] = attr_value
return set_attr
@property
def all_special_tokens(self):
""" List all the special tokens ('<unk>', '<cls>'...) mapped to class attributes
(cls_token, unk_token...).
"""
all_toks = []
set_attr = self.special_tokens_map
for attr_value in set_attr.values():
all_toks = all_toks + (list(attr_value) if isinstance(attr_value, (list, tuple)) else [attr_value])
all_toks = list(set(all_toks))
return all_toks
@property
def all_special_ids(self):
""" List the vocabulary indices of the special tokens ('<unk>', '<cls>'...) mapped to
class attributes (cls_token, unk_token...).
"""
all_toks = self.all_special_tokens
all_ids = self.convert_tokens_to_ids(all_toks)
return all_ids
class PreTrainedTokenizer(SpecialTokensMixin):
""" Base class for all tokenizers.
Handle all the shared methods for tokenization and special tokens as well as methods
downloading/caching/loading pretrained tokenizers as well as adding tokens to the vocabulary.
This class also contain the added tokens in a unified way on top of all tokenizers so we don't
have to handle the specific vocabulary augmentation methods of the various underlying
dictionary structures (BPE, sentencepiece...).
Class attributes (overridden by derived classes):
- ``vocab_files_names``: a python ``dict`` with, as keys, the ``__init__`` keyword name of each vocabulary file
required by the model, and as associated values, the filename for saving the associated file (string).
- ``pretrained_vocab_files_map``: a python ``dict of dict`` the high-level keys
being the ``__init__`` keyword name of each vocabulary file required by the model, the low-level being the
`short-cut-names` (string) of the pretrained models with, as associated values, the `url` (string) to the
associated pretrained vocabulary file.
- ``max_model_input_sizes``: a python ``dict`` with, as keys, the `short-cut-names` (string) of the pretrained
models, and as associated values, the maximum length of the sequence inputs of this model, or None if the
model has no maximum input size.
- ``pretrained_init_configuration``: a python ``dict`` with, as keys, the `short-cut-names` (string) of the
pretrained models, and as associated values, a dictionnary of specific arguments to pass to the
``__init__``method of the tokenizer class for this pretrained model when loading the tokenizer with the
``from_pretrained()`` method.
Args:
- ``model_max_length``: (`Optional`) int: the maximum length in number of tokens for the inputs to the transformer model.
When the tokenizer is loaded with `from_pretrained`, this will be set to the value stored for the associated
model in ``max_model_input_sizes`` (see above). If no value is provided, will default to VERY_LARGE_INTEGER (`int(1e30)`).
no associated max_length can be found in ``max_model_input_sizes``.
- ``padding_side``: (`Optional`) string: the side on which the model should have padding applied.
Should be selected between ['right', 'left']
- ``model_input_names``: (`Optional`) List[string]: the list of the forward pass inputs accepted by the
model ("token_type_ids", "attention_mask"...).
- ``bos_token``: (`Optional`) string: a beginning of sentence token.
Will be associated to ``self.bos_token`` and ``self.bos_token_id``
- ``eos_token``: (`Optional`) string: an end of sentence token.
Will be associated to ``self.eos_token`` and ``self.eos_token_id``
- ``unk_token``: (`Optional`) string: an unknown token.
Will be associated to ``self.unk_token`` and ``self.unk_token_id``
- ``sep_token``: (`Optional`) string: a separation token (e.g. to separate context and query in an input sequence).
Will be associated to ``self.sep_token`` and ``self.sep_token_id``
- ``pad_token``: (`Optional`) string: a padding token.
Will be associated to ``self.pad_token`` and ``self.pad_token_id``
- ``cls_token``: (`Optional`) string: a classification token (e.g. to extract a summary of an input sequence
leveraging self-attention along the full depth of the model).
Will be associated to ``self.cls_token`` and ``self.cls_token_id``
- ``mask_token``: (`Optional`) string: a masking token (e.g. when training a model with masked-language
modeling). Will be associated to ``self.mask_token`` and ``self.mask_token_id``
- ``additional_special_tokens``: (`Optional`) list: a list of additional special tokens.
Adding all special tokens here ensure they won't be split by the tokenization process.
Will be associated to ``self.additional_special_tokens`` and ``self.additional_special_tokens_ids``
"""
vocab_files_names: Dict[str, str] = {}
pretrained_vocab_files_map: Dict[str, Dict[str, str]] = {}
pretrained_init_configuration: Dict[str, Dict[str, Any]] = {}
max_model_input_sizes: Dict[str, int] = {}
model_input_names: List[str] = ["token_type_ids", "attention_mask"]
padding_side: str = "right"
@property
def vocab_size(self) -> int:
""" Size of the base vocabulary (without the added tokens) """
raise NotImplementedError
@property
def is_fast(self) -> bool:
return False
@property
def max_len(self) -> int:
""" Kept here for backward compatibility.
Now renamed to `model_max_length` to avoid ambiguity.
"""
return self.model_max_length
@property
def max_len_single_sentence(self) -> int:
return self.model_max_length - self.num_special_tokens_to_add(pair=False)
@property
def max_len_sentences_pair(self) -> int:
return self.model_max_length - self.num_special_tokens_to_add(pair=True)
@max_len_single_sentence.setter
def max_len_single_sentence(self, value) -> int:
""" For backward compatibility, allow to try to setup 'max_len_single_sentence' """
if value == self.model_max_length - self.num_special_tokens_to_add(pair=False):
logger.warning(
"Setting 'max_len_single_sentence' is now deprecated. " "This value is automatically set up."
)
else:
raise ValueError(
"Setting 'max_len_single_sentence' is now deprecated. " "This value is automatically set up."
)
@max_len_sentences_pair.setter
def max_len_sentences_pair(self, value) -> int:
""" For backward compatibility, allow to try to setup 'max_len_sentences_pair' """
if value == self.model_max_length - self.num_special_tokens_to_add(pair=True):
logger.warning(
"Setting 'max_len_sentences_pair' is now deprecated. " "This value is automatically set up."
)
else:
raise ValueError(
"Setting 'max_len_sentences_pair' is now deprecated. " "This value is automatically set up."
)
def get_vocab(self):
""" Returns the vocabulary as a dict of {token: index} pairs. `tokenizer.get_vocab()[token]` is equivalent to `tokenizer.convert_tokens_to_ids(token)` when `token` is in the vocab. """
raise NotImplementedError()
def __init__(self, model_max_length=None, **kwargs):
super().__init__(**kwargs)
# For backward compatibility we fallback to set model_max_length from max_len if provided
if "max_len" in kwargs:
warnings.warn(
"Parameter max_len is deprecated and will be removed in a future release. "
"Use model_max_length instead.",
category=FutureWarning,
)
model_max_length = kwargs.pop("max_len")
self.model_max_length = model_max_length if model_max_length is not None else VERY_LARGE_INTEGER
# Padding side is right by default and overridden in subclasses. If specified in the kwargs, it is changed.
self.padding_side = kwargs.pop("padding_side", self.padding_side)
assert self.padding_side in [
"right",
"left",
], f"Padding side should be selected between 'right' and 'left', current value: {self.padding_side}"
self.model_input_names = kwargs.pop("model_input_names", self.model_input_names)
# Added tokens
self.added_tokens_encoder = {}
self.unique_added_tokens_encoder = set()
self.added_tokens_decoder = {}
# inputs and kwargs for saving and re-loading (see ``from_pretrained`` and ``save_pretrained``)
self.init_inputs = ()
self.init_kwargs = {}
def __len__(self):
""" Size of the full vocabulary with the added tokens """
return self.vocab_size + len(self.added_tokens_encoder)
@classmethod
def from_pretrained(cls, *inputs, **kwargs):
r"""
Instantiate a :class:`~transformers.PreTrainedTokenizer` (or a derived class) from a predefined tokenizer.
Args:
pretrained_model_name_or_path: either:
- a string with the `shortcut name` of a predefined tokenizer to load from cache or download, e.g.: ``bert-base-uncased``.
- a string with the `identifier name` of a predefined tokenizer that was user-uploaded to our S3, e.g.: ``dbmdz/bert-base-german-cased``.
- a path to a `directory` containing vocabulary files required by the tokenizer, for instance saved using the :func:`~transformers.PreTrainedTokenizer.save_pretrained` method, e.g.: ``./my_model_directory/``.
- (not applicable to all derived classes, deprecated) a path or url to a single saved vocabulary file if and only if the tokenizer only requires a single vocabulary file (e.g. Bert, XLNet), e.g.: ``./my_model_directory/vocab.txt``.
cache_dir: (`optional`) string:
Path to a directory in which a downloaded predefined tokenizer vocabulary files should be cached if the standard cache should not be used.
force_download: (`optional`) boolean, default False:
Force to (re-)download the vocabulary files and override the cached versions if they exists.
resume_download: (`optional`) boolean, default False:
Do not delete incompletely recieved file. Attempt to resume the download if such a file exists.
proxies: (`optional`) dict, default None:
A dictionary of proxy servers to use by protocol or endpoint, e.g.: {'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}.
The proxies are used on each request.
inputs: (`optional`) positional arguments: will be passed to the Tokenizer ``__init__`` method.
kwargs: (`optional`) keyword arguments: will be passed to the Tokenizer ``__init__`` method. Can be used to set special tokens like ``bos_token``, ``eos_token``, ``unk_token``, ``sep_token``, ``pad_token``, ``cls_token``, ``mask_token``, ``additional_special_tokens``. See parameters in the doc string of :class:`~transformers.PreTrainedTokenizer` for details.
Examples::
# We can't instantiate directly the base class `PreTrainedTokenizer` so let's show our examples on a derived class: BertTokenizer
# Download vocabulary from S3 and cache.
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
# Download vocabulary from S3 (user-uploaded) and cache.
tokenizer = BertTokenizer.from_pretrained('dbmdz/bert-base-german-cased')
# If vocabulary files are in a directory (e.g. tokenizer was saved using `save_pretrained('./test/saved_model/')`)
tokenizer = BertTokenizer.from_pretrained('./test/saved_model/')
# If the tokenizer uses a single vocabulary file, you can point directly to this file
tokenizer = BertTokenizer.from_pretrained('./test/saved_model/my_vocab.txt')
# You can link tokens to special vocabulary when instantiating
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', unk_token='<unk>')
# You should be sure '<unk>' is in the vocabulary when doing that.
# Otherwise use tokenizer.add_special_tokens({'unk_token': '<unk>'}) instead)
assert tokenizer.unk_token == '<unk>'
"""
return cls._from_pretrained(*inputs, **kwargs)
@classmethod
def _from_pretrained(cls, pretrained_model_name_or_path, *init_inputs, **kwargs):
cache_dir = kwargs.pop("cache_dir", None)
force_download = kwargs.pop("force_download", False)
resume_download = kwargs.pop("resume_download", False)
proxies = kwargs.pop("proxies", None)
local_files_only = kwargs.pop("local_files_only", False)
s3_models = list(cls.max_model_input_sizes.keys())
vocab_files = {}
init_configuration = {}
if pretrained_model_name_or_path in s3_models:
# Get the vocabulary from AWS S3 bucket
for file_id, map_list in cls.pretrained_vocab_files_map.items():
vocab_files[file_id] = map_list[pretrained_model_name_or_path]
if (
cls.pretrained_init_configuration
and pretrained_model_name_or_path in cls.pretrained_init_configuration
):
init_configuration = cls.pretrained_init_configuration[pretrained_model_name_or_path].copy()
else:
# Get the vocabulary from local files
logger.info(
"Model name '{}' not found in model shortcut name list ({}). "
"Assuming '{}' is a path, a model identifier, or url to a directory containing tokenizer files.".format(
pretrained_model_name_or_path, ", ".join(s3_models), pretrained_model_name_or_path
)
)
if os.path.isfile(pretrained_model_name_or_path) or is_remote_url(pretrained_model_name_or_path):
if len(cls.vocab_files_names) > 1:
raise ValueError(
f"Calling {cls.__name__}.from_pretrained() with the path to a single file or url is not supported."
"Use a model identifier or the path to a directory instead."
)
logger.warning(
f"Calling {cls.__name__}.from_pretrained() with the path to a single file or url is deprecated"
)
file_id = list(cls.vocab_files_names.keys())[0]
vocab_files[file_id] = pretrained_model_name_or_path
else:
# At this point pretrained_model_name_or_path is either a directory or a model identifier name
additional_files_names = {
"added_tokens_file": ADDED_TOKENS_FILE,
"special_tokens_map_file": SPECIAL_TOKENS_MAP_FILE,
"tokenizer_config_file": TOKENIZER_CONFIG_FILE,
}
# Look for the tokenizer main vocabulary files + the additional tokens files
for file_id, file_name in {**cls.vocab_files_names, **additional_files_names}.items():
if os.path.isdir(pretrained_model_name_or_path):
full_file_name = os.path.join(pretrained_model_name_or_path, file_name)
if not os.path.exists(full_file_name):
logger.info("Didn't find file {}. We won't load it.".format(full_file_name))
full_file_name = None
else:
full_file_name = hf_bucket_url(
pretrained_model_name_or_path, filename=file_name, use_cdn=False
)
vocab_files[file_id] = full_file_name
# Get files from url, cache, or disk depending on the case
try:
resolved_vocab_files = {}
for file_id, file_path in vocab_files.items():
if file_path is None:
resolved_vocab_files[file_id] = None
else:
resolved_vocab_files[file_id] = cached_path(
file_path,
cache_dir=cache_dir,
force_download=force_download,
proxies=proxies,
resume_download=resume_download,
local_files_only=local_files_only,
)
except EnvironmentError:
if pretrained_model_name_or_path in s3_models:
msg = "Couldn't reach server at '{}' to download vocabulary files."
else:
msg = (
"Model name '{}' was not found in tokenizers model name list ({}). "
"We assumed '{}' was a path or url to a directory containing vocabulary files "
"named {}, but couldn't find such vocabulary files at this path or url.".format(
pretrained_model_name_or_path,
", ".join(s3_models),
pretrained_model_name_or_path,
list(cls.vocab_files_names.values()),
)
)
raise EnvironmentError(msg)
if all(full_file_name is None for full_file_name in resolved_vocab_files.values()):
raise EnvironmentError(
"Model name '{}' was not found in tokenizers model name list ({}). "
"We assumed '{}' was a path, a model identifier, or url to a directory containing vocabulary files "
"named {} but couldn't find such vocabulary files at this path or url.".format(
pretrained_model_name_or_path,
", ".join(s3_models),
pretrained_model_name_or_path,
list(cls.vocab_files_names.values()),
)
)
for file_id, file_path in vocab_files.items():
if file_path == resolved_vocab_files[file_id]:
logger.info("loading file {}".format(file_path))
else:
logger.info("loading file {} from cache at {}".format(file_path, resolved_vocab_files[file_id]))
# Prepare tokenizer initialization kwargs
# Did we saved some inputs and kwargs to reload ?
tokenizer_config_file = resolved_vocab_files.pop("tokenizer_config_file", None)
if tokenizer_config_file is not None:
with open(tokenizer_config_file, encoding="utf-8") as tokenizer_config_handle:
init_kwargs = json.load(tokenizer_config_handle)
saved_init_inputs = init_kwargs.pop("init_inputs", ())
if not init_inputs:
init_inputs = saved_init_inputs
else:
init_kwargs = init_configuration
# Update with newly provided kwargs
init_kwargs.update(kwargs)
# Set max length if needed
if pretrained_model_name_or_path in cls.max_model_input_sizes:
# if we're using a pretrained model, ensure the tokenizer
# wont index sequences longer than the number of positional embeddings
model_max_length = cls.max_model_input_sizes[pretrained_model_name_or_path]
if model_max_length is not None and isinstance(model_max_length, (int, float)):
init_kwargs["model_max_length"] = min(init_kwargs.get("model_max_length", int(1e30)), model_max_length)
# Merge resolved_vocab_files arguments in init_kwargs.
added_tokens_file = resolved_vocab_files.pop("added_tokens_file", None)
special_tokens_map_file = resolved_vocab_files.pop("special_tokens_map_file", None)
for args_name, file_path in resolved_vocab_files.items():
if args_name not in init_kwargs:
init_kwargs[args_name] = file_path
if special_tokens_map_file is not None:
with open(special_tokens_map_file, encoding="utf-8") as special_tokens_map_handle:
special_tokens_map = json.load(special_tokens_map_handle)
for key, value in special_tokens_map.items():
if key not in init_kwargs:
init_kwargs[key] = value
# Instantiate tokenizer.
try:
tokenizer = cls(*init_inputs, **init_kwargs)
except OSError:
raise OSError(
"Unable to load vocabulary from file. "
"Please check that the provided vocabulary is accessible and not corrupted."
)
# Save inputs and kwargs for saving and re-loading with ``save_pretrained``
tokenizer.init_inputs = init_inputs
tokenizer.init_kwargs = init_kwargs
# update unique_added_tokens_encoder with special tokens for correct tokenization
tokenizer.unique_added_tokens_encoder.update(set(tokenizer.all_special_tokens))
# Add supplementary tokens.
if added_tokens_file is not None:
with open(added_tokens_file, encoding="utf-8") as added_tokens_handle:
added_tok_encoder = json.load(added_tokens_handle)
added_tok_decoder = {v: k for k, v in added_tok_encoder.items()}
tokenizer.added_tokens_encoder.update(added_tok_encoder)
tokenizer.added_tokens_decoder.update(added_tok_decoder)
tokenizer.unique_added_tokens_encoder.update(set(tokenizer.added_tokens_encoder.keys()))
return tokenizer
def save_pretrained(self, save_directory):
""" Save the tokenizer vocabulary files together with:
- added tokens,
- special-tokens-to-class-attributes-mapping,
- tokenizer instantiation positional and keywords inputs (e.g. do_lower_case for Bert).
Warning: This won't save modifications you may have applied to the tokenizer after the instantiation
(e.g. modifying tokenizer.do_lower_case after creation).
This method make sure the full tokenizer can then be re-loaded using the
:func:`~transformers.PreTrainedTokenizer.from_pretrained` class method.
"""
if not os.path.isdir(save_directory):
logger.error("Saving directory ({}) should be a directory".format(save_directory))
return
special_tokens_map_file = os.path.join(save_directory, SPECIAL_TOKENS_MAP_FILE)
added_tokens_file = os.path.join(save_directory, ADDED_TOKENS_FILE)
tokenizer_config_file = os.path.join(save_directory, TOKENIZER_CONFIG_FILE)
tokenizer_config = copy.deepcopy(self.init_kwargs)
if len(self.init_inputs) > 0:
tokenizer_config["init_inputs"] = copy.deepcopy(self.init_inputs)
for file_id in self.vocab_files_names.keys():
tokenizer_config.pop(file_id, None)
with open(tokenizer_config_file, "w", encoding="utf-8") as f:
f.write(json.dumps(tokenizer_config, ensure_ascii=False))
with open(special_tokens_map_file, "w", encoding="utf-8") as f:
f.write(json.dumps(self.special_tokens_map, ensure_ascii=False))
if len(self.added_tokens_encoder) > 0:
with open(added_tokens_file, "w", encoding="utf-8") as f:
out_str = json.dumps(self.added_tokens_encoder, ensure_ascii=False)
f.write(out_str)
vocab_files = self.save_vocabulary(save_directory)
return vocab_files + (special_tokens_map_file, added_tokens_file)
def save_vocabulary(self, save_directory) -> Tuple[str]:
""" Save the tokenizer vocabulary to a directory. This method does *NOT* save added tokens
and special token mappings.
Please use :func:`~transformers.PreTrainedTokenizer.save_pretrained` `()` to save the full
Tokenizer state if you want to reload it using the :func:`~transformers.PreTrainedTokenizer.from_pretrained`
class method.
"""
raise NotImplementedError
def add_tokens(self, new_tokens: Union[str, List[str]]) -> int:
"""
Add a list of new tokens to the tokenizer class. If the new tokens are not in the
vocabulary, they are added to it with indices starting from length of the current vocabulary.
Args:
new_tokens: string or list of string. Each string is a token to add. Tokens are only added if they are not
already in the vocabulary (tested by checking if the tokenizer assign the index of the ``unk_token`` to them).
Returns:
Number of tokens added to the vocabulary.
Examples::
# Let's see how to increase the vocabulary of Bert model and tokenizer
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
model = BertModel.from_pretrained('bert-base-uncased')
num_added_toks = tokenizer.add_tokens(['new_tok1', 'my_new-tok2'])
print('We have added', num_added_toks, 'tokens')
model.resize_token_embeddings(len(tokenizer)) # Notice: resize_token_embeddings expect to receive the full size of the new vocabulary, i.e. the length of the tokenizer.
"""
if not new_tokens:
return 0
if not isinstance(new_tokens, list):
new_tokens = [new_tokens]
tokens_to_add = []
for token in new_tokens:
assert isinstance(token, str)
if self.init_kwargs.get("do_lower_case", False) and token not in self.all_special_tokens:
token = token.lower()
if (
token != self.unk_token
and self.convert_tokens_to_ids(token) == self.convert_tokens_to_ids(self.unk_token)
and token not in tokens_to_add
):
tokens_to_add.append(token)
logger.info("Adding %s to the vocabulary", token)
added_tok_encoder = dict((tok, len(self) + i) for i, tok in enumerate(tokens_to_add))
added_tok_decoder = {v: k for k, v in added_tok_encoder.items()}
self.added_tokens_encoder.update(added_tok_encoder)
self.unique_added_tokens_encoder = set(self.added_tokens_encoder.keys()).union(set(self.all_special_tokens))
self.added_tokens_decoder.update(added_tok_decoder)
return len(tokens_to_add)
def num_special_tokens_to_add(self, pair=False):
"""
Returns the number of added tokens when encoding a sequence with special tokens.
Note:
This encodes inputs and checks the number of added tokens, and is therefore not efficient. Do not put this
inside your training loop.
Args:
pair: Returns the number of added tokens in the case of a sequence pair if set to True, returns the
number of added tokens in the case of a single sequence if set to False.
Returns:
Number of tokens added to sequences
"""
token_ids_0 = []
token_ids_1 = []
return len(self.build_inputs_with_special_tokens(token_ids_0, token_ids_1 if pair else None))
def add_special_tokens(self, special_tokens_dict):
"""
Add a dictionary of special tokens (eos, pad, cls...) to the encoder and link them
to class attributes. If special tokens are NOT in the vocabulary, they are added
to it (indexed starting from the last index of the current vocabulary).
Using `add_special_tokens` will ensure your special tokens can be used in several ways:
- special tokens are carefully handled by the tokenizer (they are never split)
- you can easily refer to special tokens using tokenizer class attributes like `tokenizer.cls_token`. This makes it easy to develop model-agnostic training and fine-tuning scripts.
When possible, special tokens are already registered for provided pretrained models (ex: BertTokenizer cls_token is already registered to be '[CLS]' and XLM's one is also registered to be '</s>')
Args:
special_tokens_dict: dict of string. Keys should be in the list of predefined special attributes:
[``bos_token``, ``eos_token``, ``unk_token``, ``sep_token``, ``pad_token``, ``cls_token``, ``mask_token``,
``additional_special_tokens``].
Tokens are only added if they are not already in the vocabulary (tested by checking if the tokenizer assign the index of the ``unk_token`` to them).
Returns:
Number of tokens added to the vocabulary.
Examples::
# Let's see how to add a new classification token to GPT-2
tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
model = GPT2Model.from_pretrained('gpt2')
special_tokens_dict = {'cls_token': '<CLS>'}
num_added_toks = tokenizer.add_special_tokens(special_tokens_dict)
print('We have added', num_added_toks, 'tokens')
model.resize_token_embeddings(len(tokenizer)) # Notice: resize_token_embeddings expect to receive the full size of the new vocabulary, i.e. the length of the tokenizer.
assert tokenizer.cls_token == '<CLS>'
"""
if not special_tokens_dict:
return 0
added_tokens = 0
for key, value in special_tokens_dict.items():
assert key in self.SPECIAL_TOKENS_ATTRIBUTES
if key == "additional_special_tokens":
assert isinstance(value, (list, tuple)) and all(isinstance(t, str) for t in value)
added_tokens += self.add_tokens(value)
else:
assert isinstance(value, str)
added_tokens += self.add_tokens([value])
logger.info("Assigning %s to the %s key of the tokenizer", value, key)
setattr(self, key, value)
return added_tokens
def tokenize(self, text: TextInput, **kwargs):
""" Converts a string in a sequence of tokens (string), using the tokenizer.
Split in words for word-based vocabulary or sub-words for sub-word-based
vocabularies (BPE/SentencePieces/WordPieces).
Take care of added tokens.
Args:
text (:obj:`string`): The sequence to be encoded.
**kwargs (:obj: `dict`): Arguments passed to the model-specific `prepare_for_tokenization` preprocessing method.
"""
all_special_tokens = self.all_special_tokens
text = self.prepare_for_tokenization(text, **kwargs)
# TODO: should this be in the base class?
def lowercase_text(t):
# convert non-special tokens to lowercase
escaped_special_toks = [re.escape(s_tok) for s_tok in all_special_tokens]
pattern = r"(" + r"|".join(escaped_special_toks) + r")|" + r"(.+?)"
return re.sub(pattern, lambda m: m.groups()[0] or m.groups()[1].lower(), t)
if self.init_kwargs.get("do_lower_case", False):
text = lowercase_text(text)
def split_on_token(tok, text):
result = []
split_text = text.split(tok)
for i, sub_text in enumerate(split_text):
sub_text = sub_text.rstrip()
if i == 0 and not sub_text:
result += [tok]
elif i == len(split_text) - 1:
if sub_text:
result += [sub_text]
else:
pass
else:
if sub_text:
result += [sub_text]
result += [tok]
return result
def split_on_tokens(tok_list, text):
if not text.strip():
return []
if not tok_list:
return self._tokenize(text)
tokenized_text = []
text_list = [text]
for tok in tok_list:
tokenized_text = []
for sub_text in text_list:
if sub_text not in self.unique_added_tokens_encoder:
tokenized_text += split_on_token(tok, sub_text)
else:
tokenized_text += [sub_text]
text_list = tokenized_text
return list(
itertools.chain.from_iterable(
(
self._tokenize(token) if token not in self.unique_added_tokens_encoder else [token]
for token in tokenized_text
)
)
)
added_tokens = self.unique_added_tokens_encoder
tokenized_text = split_on_tokens(added_tokens, text)
return tokenized_text
def _tokenize(self, text, **kwargs):
""" Converts a string in a sequence of tokens (string), using the tokenizer.
Split in words for word-based vocabulary or sub-words for sub-word-based
vocabularies (BPE/SentencePieces/WordPieces).
Do NOT take care of added tokens.
"""
raise NotImplementedError
def convert_tokens_to_ids(self, tokens):
""" Converts a token string (or a sequence of tokens) in a single integer id
(or a sequence of ids), using the vocabulary.
"""
if tokens is None:
return None
if isinstance(tokens, str):
return self._convert_token_to_id_with_added_voc(tokens)
ids = []
for token in tokens:
ids.append(self._convert_token_to_id_with_added_voc(token))
return ids
def _convert_token_to_id_with_added_voc(self, token):
if token is None:
return None
if token in self.added_tokens_encoder:
return self.added_tokens_encoder[token]
return self._convert_token_to_id(token)
def _convert_token_to_id(self, token):
raise NotImplementedError
def encode(
self,
text: Union[TextInput, PreTokenizedInput, EncodedInput],
text_pair: Optional[Union[TextInput, PreTokenizedInput, EncodedInput]] = None,
add_special_tokens: bool = True,
max_length: Optional[int] = None,
stride: int = 0,
truncation_strategy: str = "longest_first",
pad_to_max_length: bool = False,
return_tensors: Optional[Union[str, TensorType]] = None,
**kwargs
):
"""
Converts a string in a sequence of ids (integer), using the tokenizer and vocabulary. Adds the model-specific
special tokens (such as beginning of sequence, end of sequence, sequence separator).
If specifying ``add_special_tokens=False``, same as doing ``self.convert_tokens_to_ids(self.tokenize(text))``.
Args:
text (:obj:`str`, :obj:`List[str]` or :obj:`List[int]`):
The first sequence to be encoded. This can be a string, a list of strings (tokenized string using
the `tokenize` method) or a list of integers (tokenized string ids using the `convert_tokens_to_ids`
method)
text_pair (:obj:`str`, :obj:`List[str]` or :obj:`List[int]`, `optional`, defaults to :obj:`None`):
Optional second sequence to be encoded. This can be a string, a list of strings (tokenized
string using the `tokenize` method) or a list of integers (tokenized string ids using the
`convert_tokens_to_ids` method)
add_special_tokens (:obj:`bool`, `optional`, defaults to :obj:`True`):
If set to ``True``, the sequences will be encoded with the special tokens relative
to their model.
max_length (:obj:`int`, `optional`, defaults to :obj:`None`):
If set to a number, will limit the total sequence returned so that it has a maximum length.
If there are overflowing tokens, those will be added to the returned dictionary.
You can set it to the maximal input size of the model with `max_length = tokenizer.model_max_length`.
stride (:obj:`int`, `optional`, defaults to ``0``):
If set to a number along with max_length, the overflowing tokens returned will contain some tokens
from the main sequence returned. The value of this argument defines the number of additional tokens.
truncation_strategy (:obj:`str`, `optional`, defaults to `longest_first`):
String selected in the following options:
- 'longest_first' (default) Iteratively reduce the inputs sequence until the input is under max_length
starting from the longest one at each token (when there is a pair of input sequences)
- 'only_first': Only truncate the first sequence
- 'only_second': Only truncate the second sequence
- 'do_not_truncate': Does not truncate (raise an error if the input sequence is longer than max_length)
pad_to_max_length (:obj:`bool`, `optional`, defaults to :obj:`False`):
If set to True, the returned sequences will be padded according to the model's padding side and
padding index, up to their max length. If no max length is specified, the padding is done up to the
model's max length. The tokenizer padding sides are handled by the class attribute `padding_side`
which can be set to the following strings:
- 'left': pads on the left of the sequences
- 'right': pads on the right of the sequences
Defaults to False: no padding.
return_tensors (:obj:`str`, `optional`, defaults to :obj:`None`):
Can be set to 'tf' or 'pt' to return respectively TensorFlow :obj:`tf.constant`
or PyTorch :obj:`torch.Tensor` instead of a list of python integers.
**kwargs: passed to the `self.tokenize()` method
"""
encoded_inputs = self.encode_plus(
text,
text_pair=text_pair,
max_length=max_length,
add_special_tokens=add_special_tokens,
stride=stride,
truncation_strategy=truncation_strategy,
pad_to_max_length=pad_to_max_length,
return_tensors=return_tensors,
**kwargs,
)
return encoded_inputs["input_ids"]
def encode_plus(
self,
text: Union[TextInput, PreTokenizedInput, EncodedInput],
text_pair: Optional[Union[TextInput, PreTokenizedInput, EncodedInput]] = None,
add_special_tokens: bool = True,
max_length: Optional[int] = None,
stride: int = 0,
truncation_strategy: str = "longest_first",
pad_to_max_length: bool = False,
is_pretokenized: bool = False,
return_tensors: Optional[Union[str, TensorType]] = None,
return_token_type_ids: Optional[bool] = None,
return_attention_mask: Optional[bool] = None,
return_overflowing_tokens: bool = False,
return_special_tokens_mask: bool = False,
return_offsets_mapping: bool = False,
**kwargs
) -> BatchEncoding:
"""
Returns a dictionary containing the encoded sequence or sequence pair and additional information:
the mask for sequence classification and the overflowing elements if a ``max_length`` is specified.
Args:
text (:obj:`str`, :obj:`List[str]` or :obj:`List[int]` (the later only for not-fast tokenizers)):
The first sequence to be encoded. This can be a string, a list of strings (tokenized string using
the `tokenize` method) or a list of integers (tokenized string ids using the `convert_tokens_to_ids`
method)
text_pair (:obj:`str`, :obj:`List[str]` or :obj:`List[int]`, `optional`, defaults to :obj:`None`):
Optional second sequence to be encoded. This can be a string, a list of strings (tokenized
string using the `tokenize` method) or a list of integers (tokenized string ids using the
`convert_tokens_to_ids` method)
add_special_tokens (:obj:`bool`, `optional`, defaults to :obj:`True`):
If set to ``True``, the sequences will be encoded with the special tokens relative
to their model.
max_length (:obj:`int`, `optional`, defaults to :obj:`None`):
If set to a number, will limit the total sequence returned so that it has a maximum length.
If there are overflowing tokens, those will be added to the returned dictionary
You can set it to the maximal input size of the model with `max_length = tokenizer.model_max_length`.
stride (:obj:`int`, `optional`, defaults to ``0``):
If set to a number along with max_length, the overflowing tokens returned will contain some tokens
from the main sequence returned. The value of this argument defines the number of additional tokens.
truncation_strategy (:obj:`str`, `optional`, defaults to `longest_first`):
String selected in the following options:
- 'longest_first' (default) Iteratively reduce the inputs sequence until the input is under max_length
starting from the longest one at each token (when there is a pair of input sequences)
- 'only_first': Only truncate the first sequence
- 'only_second': Only truncate the second sequence
- 'do_not_truncate': Does not truncate (raise an error if the input sequence is longer than max_length)
pad_to_max_length (:obj:`bool`, `optional`, defaults to :obj:`False`):
If set to True, the returned sequences will be padded according to the model's padding side and
padding index, up to their max length. If no max length is specified, the padding is done up to the
model's max length. The tokenizer padding sides are handled by the class attribute `padding_side`
which can be set to the following strings:
- 'left': pads on the left of the sequences
- 'right': pads on the right of the sequences
Defaults to False: no padding.
is_pretokenized (:obj:`bool`, defaults to :obj:`False`):
Set to True to indicate the input is already tokenized
return_tensors (:obj:`str`, `optional`, defaults to :obj:`None`):
Can be set to 'tf' or 'pt' to return respectively TensorFlow :obj:`tf.constant`
or PyTorch :obj:`torch.Tensor` instead of a list of python integers.
return_token_type_ids (:obj:`bool`, `optional`, defaults to :obj:`None`):
Whether to return token type IDs. If left to the default, will return the token type IDs according
to the specific tokenizer's default, defined by the :obj:`return_outputs` attribute.
`What are token type IDs? <../glossary.html#token-type-ids>`_
return_attention_mask (:obj:`bool`, `optional`, defaults to :obj:`none`):
Whether to return the attention mask. If left to the default, will return the attention mask according
to the specific tokenizer's default, defined by the :obj:`return_outputs` attribute.
`What are attention masks? <../glossary.html#attention-mask>`__
return_overflowing_tokens (:obj:`bool`, `optional`, defaults to :obj:`False`):
Set to True to return overflowing token information (default False).
return_special_tokens_mask (:obj:`bool`, `optional`, defaults to :obj:`False`):
Set to True to return special tokens mask information (default False).
return_offsets_mapping (:obj:`bool`, `optional`, defaults to :obj:`False`):
Set to True to return (char_start, char_end) for each token (default False).
If using Python's tokenizer, this method will raise NotImplementedError.
This one is only available on fast tokenizers inheriting from PreTrainedTokenizerFast.
**kwargs: passed to the `self.tokenize()` method
Return:
A Dictionary of shape::
{
input_ids: list[int],
token_type_ids: list[int] if return_token_type_ids is True (default)
attention_mask: list[int] if return_attention_mask is True (default)
overflowing_tokens: list[int] if a ``max_length`` is specified and return_overflowing_tokens is True
num_truncated_tokens: int if a ``max_length`` is specified and return_overflowing_tokens is True
special_tokens_mask: list[int] if ``add_special_tokens`` if set to ``True``
and return_special_tokens_mask is True
}
With the fields:
- ``input_ids``: list of token ids to be fed to a model
- ``token_type_ids``: list of token type ids to be fed to a model
- ``attention_mask``: list of indices specifying which tokens should be attended to by the model
- ``overflowing_tokens``: list of overflowing tokens if a max length is specified.
- ``num_truncated_tokens``: number of overflowing tokens a ``max_length`` is specified
- ``special_tokens_mask``: if adding special tokens, this is a list of [0, 1], with 0 specifying special added
tokens and 1 specifying sequence tokens.
"""
def get_input_ids(text):
if isinstance(text, str):
tokens = self.tokenize(text, add_special_tokens=add_special_tokens, **kwargs)
return self.convert_tokens_to_ids(tokens)
elif isinstance(text, (list, tuple)) and len(text) > 0 and isinstance(text[0], str):
return self.convert_tokens_to_ids(text)
elif isinstance(text, (list, tuple)) and len(text) > 0 and isinstance(text[0], int):
return text
else:
raise ValueError(
"Input is not valid. Should be a string, a list/tuple of strings or a list/tuple of integers."
)
if return_offsets_mapping:
raise NotImplementedError(
"return_offset_mapping is not available when using Python tokenizers."
"To use this feature, change your tokenizer to one deriving from "
"transformers.PreTrainedTokenizerFast."
"More information on available tokenizers at "
"https://github.com/huggingface/transformers/pull/2674"
)
# Throw an error if we can pad because there is no padding token
if pad_to_max_length and self.pad_token_id is None:
raise ValueError(
"Unable to set proper padding strategy as the tokenizer does not have a padding token. "
"In this case please set the `pad_token` `(tokenizer.pad_token = tokenizer.eos_token e.g.)` "
"or add a new pad token via the function add_special_tokens if you want to use a padding strategy"
)
first_ids = get_input_ids(text)
second_ids = get_input_ids(text_pair) if text_pair is not None else None
return self.prepare_for_model(
first_ids,
pair_ids=second_ids,
max_length=max_length,
pad_to_max_length=pad_to_max_length,
add_special_tokens=add_special_tokens,
stride=stride,
truncation_strategy=truncation_strategy,
return_tensors=return_tensors,
return_attention_mask=return_attention_mask,
return_token_type_ids=return_token_type_ids,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
prepend_batch_axis=return_tensors is not None,
)
def batch_encode_plus(
self,
batch_text_or_text_pairs: Union[
List[TextInput],
List[TextInputPair],
List[PreTokenizedInput],
List[PreTokenizedInputPair],
List[EncodedInput],
List[EncodedInputPair],
],
add_special_tokens: bool = True,
max_length: Optional[int] = None,
stride: int = 0,
truncation_strategy: str = "longest_first",
pad_to_max_length: bool = False,
is_pretokenized: bool = False,
return_tensors: Optional[Union[str, TensorType]] = None,
return_token_type_ids: Optional[bool] = None,
return_attention_masks: Optional[bool] = None,
return_overflowing_tokens: bool = False,
return_special_tokens_masks: bool = False,
return_offsets_mapping: bool = False,
return_lengths: bool = False,
**kwargs
) -> BatchEncoding:
"""
Returns a dictionary containing the encoded sequence or sequence pair and additional information:
the mask for sequence classification and the overflowing elements if a ``max_length`` is specified.
Args:
batch_text_or_text_pairs (:obj:`List[str]`, :obj:`List[Tuple[str, str]]`,
:obj:`List[List[str]]`, :obj:`List[Tuple[List[str], List[str]]]`,
and for not-fast tokenizers, also:
:obj:`List[List[int]]`, :obj:`List[Tuple[List[int], List[int]]]`):
Batch of sequences or pair of sequences to be encoded.
This can be a list of string/string-sequences/int-sequences or a list of pair of
string/string-sequences/int-sequence (see details in encode_plus)
add_special_tokens (:obj:`bool`, `optional`, defaults to :obj:`True`):
If set to ``True``, the sequences will be encoded with the special tokens relative
to their model.
max_length (:obj:`int`, `optional`, defaults to :obj:`None`):
If set to a number, will limit the total sequence returned so that it has a maximum length.
If there are overflowing tokens, those will be added to the returned dictionary
stride (:obj:`int`, `optional`, defaults to ``0``):
If set to a number along with max_length, the overflowing tokens returned will contain some tokens
from the main sequence returned. The value of this argument defines the number of additional tokens.
truncation_strategy (:obj:`str`, `optional`, defaults to `longest_first`):
String selected in the following options:
- 'longest_first' (default) Iteratively reduce the inputs sequence until the input is under max_length
starting from the longest one at each token (when there is a pair of input sequences)
- 'only_first': Only truncate the first sequence
- 'only_second': Only truncate the second sequence
- 'do_not_truncate': Does not truncate (raise an error if the input sequence is longer than max_length)
pad_to_max_length (:obj:`bool`, `optional`, defaults to :obj:`False`):
If set to True, the returned sequences will be padded according to the model's padding side and
padding index, up to their max length. If no max length is specified, the padding is done up to the
model's max length. The tokenizer padding sides are handled by the class attribute `padding_side`
which can be set to the following strings:
- 'left': pads on the left of the sequences
- 'right': pads on the right of the sequences
Defaults to False: no padding.
is_pretokenized (:obj:`bool`, defaults to :obj:`False`):
Set to True to indicate the input is already tokenized
return_tensors (:obj:`str`, `optional`, defaults to :obj:`None`):
Can be set to 'tf' or 'pt' to return respectively TensorFlow :obj:`tf.constant`
or PyTorch :obj:`torch.Tensor` instead of a list of python integers.
return_token_type_ids (:obj:`bool`, `optional`, defaults to :obj:`None`):
Whether to return token type IDs. If left to the default, will return the token type IDs according
to the specific tokenizer's default, defined by the :obj:`return_outputs` attribute.
`What are token type IDs? <../glossary.html#token-type-ids>`_
return_attention_masks (:obj:`bool`, `optional`, defaults to :obj:`none`):
Whether to return the attention mask. If left to the default, will return the attention mask according
to the specific tokenizer's default, defined by the :obj:`return_outputs` attribute.
`What are attention masks? <../glossary.html#attention-mask>`__
return_overflowing_tokens (:obj:`bool`, `optional`, defaults to :obj:`False`):
Set to True to return overflowing token information (default False).
return_special_tokens_masks (:obj:`bool`, `optional`, defaults to :obj:`False`):
Set to True to return special tokens mask information (default False).
return_offsets_mapping (:obj:`bool`, `optional`, defaults to :obj:`False`):
Set to True to return (char_start, char_end) for each token (default False).
If using Python's tokenizer, this method will raise NotImplementedError. This one is only available on
Rust-based tokenizers inheriting from PreTrainedTokenizerFast.
return_lengths (:obj:`bool`, `optional`, defaults to :obj:`False`):
If set the resulting dictionary will include the length of each encoded inputs
**kwargs: passed to the `self.tokenize()` method
Return:
A Dictionary of shape::
{
input_ids: list[List[int]],
token_type_ids: list[List[int]] if return_token_type_ids is True (default)
attention_mask: list[List[int]] if return_attention_mask is True (default)
overflowing_tokens: list[List[int]] if a ``max_length`` is specified and return_overflowing_tokens is True
num_truncated_tokens: List[int] if a ``max_length`` is specified and return_overflowing_tokens is True
special_tokens_mask: list[List[int]] if ``add_special_tokens`` if set to ``True`` and return_special_tokens_mask is True
}
With the fields:
- ``input_ids``: list of token ids to be fed to a model
- ``token_type_ids``: list of token type ids to be fed to a model
- ``attention_mask``: list of indices specifying which tokens should be attended to by the model
- ``overflowing_tokens``: list of overflowing tokens if a max length is specified.
- ``num_truncated_tokens``: number of overflowing tokens a ``max_length`` is specified
- ``special_tokens_mask``: if adding special tokens, this is a list of [0, 1], with 0 specifying special added
tokens and 1 specifying sequence tokens.
"""
def get_input_ids(text):
if isinstance(text, str):
tokens = self.tokenize(text, add_special_tokens=add_special_tokens, **kwargs)
return self.convert_tokens_to_ids(tokens)
elif isinstance(text, (list, tuple)) and len(text) > 0 and isinstance(text[0], str):
return self.convert_tokens_to_ids(text)
elif isinstance(text, (list, tuple)) and len(text) > 0 and isinstance(text[0], int):
return text
else:
raise ValueError(
"Input is not valid. Should be a string, a list/tuple of strings or a list/tuple of integers."
)
# Throw an error if we can pad because there is no padding token
if pad_to_max_length and self.pad_token_id is None:
raise ValueError(
"Unable to set proper padding strategy as the tokenizer does not have a padding token. In this case please set the `pad_token` `(tokenizer.pad_token = tokenizer.eos_token e.g.)` or add a new pad token via the function add_special_tokens if you want to use a padding strategy"
)
if return_offsets_mapping:
raise NotImplementedError(
"return_offset_mapping is not available when using Python tokenizers."
"To use this feature, change your tokenizer to one deriving from "
"transformers.PreTrainedTokenizerFast."
"More information on available tokenizers at "
"https://github.com/huggingface/transformers/pull/2674"
)
input_ids = []
for ids_or_pair_ids in batch_text_or_text_pairs:
if isinstance(ids_or_pair_ids, (list, tuple)) and len(ids_or_pair_ids) == 2 and not is_pretokenized:
ids, pair_ids = ids_or_pair_ids
else:
ids, pair_ids = ids_or_pair_ids, None
first_ids = get_input_ids(ids)
second_ids = get_input_ids(pair_ids) if pair_ids is not None else None
input_ids.append((first_ids, second_ids))
if max_length is None and pad_to_max_length:
def total_sequence_length(input_pairs):
first_ids, second_ids = input_pairs
return len(first_ids) + (
self.num_special_tokens_to_add()
if second_ids is None
else (len(second_ids) + self.num_special_tokens_to_add(pair=True))
)
max_length = max([total_sequence_length(ids) for ids in input_ids])
batch_outputs = {}
for first_ids, second_ids in input_ids:
# Prepares a sequence of input id, or a pair of sequences of inputs ids so that it can be used by
# the model. It adds special tokens, truncates sequences if overflowing while taking into account
# the special tokens and manages a window stride for overflowing tokens
outputs = self.prepare_for_model(
first_ids,
pair_ids=second_ids,
max_length=max_length,
pad_to_max_length=pad_to_max_length,
add_special_tokens=add_special_tokens,
stride=stride,
truncation_strategy=truncation_strategy,
return_attention_mask=return_attention_masks,
return_token_type_ids=return_token_type_ids,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_masks,
return_lengths=return_lengths,
return_tensors=None, # We will convert the whole batch to tensors at the end
)
for key, value in outputs.items():
if key not in batch_outputs:
batch_outputs[key] = []
batch_outputs[key].append(value)
if return_tensors is not None:
convert_to_tensors(batch_outputs, return_tensors)
return BatchEncoding(batch_outputs)
def prepare_for_model(
self,
ids: List[int],
pair_ids: Optional[List[int]] = None,
max_length: Optional[int] = None,
add_special_tokens: bool = True,
stride: int = 0,
truncation_strategy: str = "longest_first",
pad_to_max_length: bool = False,
return_tensors: Optional[Union[str, TensorType]] = None,
return_token_type_ids: Optional[bool] = None,
return_attention_mask: Optional[bool] = None,
return_overflowing_tokens: bool = False,
return_special_tokens_mask: bool = False,
return_lengths: bool = False,
prepend_batch_axis: bool = False,
) -> BatchEncoding:
""" Prepares a sequence of input id, or a pair of sequences of inputs ids so that it can be used by the model.
It adds special tokens, truncates sequences if overflowing while taking into account the special tokens and
manages a moving window (with user defined stride) for overflowing tokens
Args:
ids: list of tokenized input ids. Can be obtained from a string by chaining the
`tokenize` and `convert_tokens_to_ids` methods.
pair_ids: Optional second list of input ids. Can be obtained from a string by chaining the
`tokenize` and `convert_tokens_to_ids` methods.
max_length: maximum length of the returned list. Will truncate by taking into account the special tokens.
add_special_tokens: if set to ``True``, the sequences will be encoded with the special tokens relative
to their model.
stride: window stride for overflowing tokens. Can be useful to remove edge effect when using sequential
list of inputs. The overflowing token will contains a part of the previous window of tokens.
truncation_strategy: string selected in the following options:
- 'longest_first' (default) Iteratively reduce the inputs sequence until the input is under max_length
starting from the longest one at each token (when there is a pair of input sequences)
- 'only_first': Only truncate the first sequence
- 'only_second': Only truncate the second sequence
- 'do_not_truncate': Does not truncate (raise an error if the input sequence is longer than max_length)
pad_to_max_length: if set to True, the returned sequences will be padded according to the model's padding side and
padding index, up to their max length. If no max length is specified, the padding is done up to the model's max length.
The tokenizer padding sides are handled by the following strings:
- 'left': pads on the left of the sequences
- 'right': pads on the right of the sequences
Defaults to False: no padding.
return_tensors: (optional) can be set to 'tf' or 'pt' to return respectively TensorFlow tf.constant
or PyTorch torch.Tensor instead of a list of python integers.
return_token_type_ids: (optional) Set to False to avoid returning token_type_ids (default: set to model specifics).
return_attention_mask: (optional) Set to False to avoid returning attention mask (default: set to model specifics)
return_overflowing_tokens: (optional) Set to True to return overflowing token information (default False).
return_special_tokens_mask: (optional) Set to True to return special tokens mask information (default False).
return_lengths (:obj:`bool`, `optional`, defaults to :obj:`False`):
If set the resulting dictionary will include the length of each encoded inputs
prepend_batch_axis (:obj:`bool`, `optional`, defaults to :obj:`False`):
If set the resulting object will feature an extra dim at position 0.
This can be seen as an unsqueezing operator.
Return:
A Dictionary of shape::
{
input_ids: list[int],
token_type_ids: list[int] if return_token_type_ids is True (default)
overflowing_tokens: list[int] if a ``max_length`` is specified and return_overflowing_tokens is True
num_truncated_tokens: int if a ``max_length`` is specified and return_overflowing_tokens is True
special_tokens_mask: list[int] if ``add_special_tokens`` if set to ``True`` and return_special_tokens_mask is True
length: int if return_lengths is True
}
With the fields:
- ``input_ids``: list of token ids to be fed to a model
- ``token_type_ids``: list of token type ids to be fed to a model
- ``overflowing_tokens``: list of overflowing tokens if a max length is specified.
- ``num_truncated_tokens``: number of overflowing tokens a ``max_length`` is specified
- ``special_tokens_mask``: if adding special tokens, this is a list of [0, 1], with 0 specifying special added
tokens and 1 specifying sequence tokens.
- ``length``: this is the length of ``input_ids``
"""
pair = bool(pair_ids is not None)
len_ids = len(ids)
len_pair_ids = len(pair_ids) if pair else 0
# Load from model defaults
if return_token_type_ids is None:
return_token_type_ids = "token_type_ids" in self.model_input_names
if return_attention_mask is None:
return_attention_mask = "attention_mask" in self.model_input_names
encoded_inputs = {}
# Truncation: Handle max sequence length
total_len = len_ids + len_pair_ids + (self.num_special_tokens_to_add(pair=pair) if add_special_tokens else 0)
if max_length and total_len > max_length:
ids, pair_ids, overflowing_tokens = self.truncate_sequences(
ids,
pair_ids=pair_ids,
num_tokens_to_remove=total_len - max_length,
truncation_strategy=truncation_strategy,
stride=stride,
)
if return_overflowing_tokens:
encoded_inputs["overflowing_tokens"] = overflowing_tokens
encoded_inputs["num_truncated_tokens"] = total_len - max_length
# Add special tokens
if add_special_tokens:
sequence = self.build_inputs_with_special_tokens(ids, pair_ids)
token_type_ids = self.create_token_type_ids_from_sequences(ids, pair_ids)
else:
sequence = ids + pair_ids if pair else ids
token_type_ids = [0] * len(ids) + ([1] * len(pair_ids) if pair else [])
# Build output dictionnary
encoded_inputs["input_ids"] = sequence
if return_token_type_ids:
encoded_inputs["token_type_ids"] = token_type_ids
if return_special_tokens_mask:
if add_special_tokens:
encoded_inputs["special_tokens_mask"] = self.get_special_tokens_mask(ids, pair_ids)
else:
encoded_inputs["special_tokens_mask"] = [0] * len(sequence)
# Check lengths
assert max_length is None or len(encoded_inputs["input_ids"]) <= max_length
if max_length is None and len(encoded_inputs["input_ids"]) > self.model_max_length:
logger.warning(
"Token indices sequence length is longer than the specified maximum sequence length "
"for this model ({} > {}). Running this sequence through the model will result in "
"indexing errors".format(len(ids), self.model_max_length)
)
# Padding
needs_to_be_padded = pad_to_max_length and (
max_length
and len(encoded_inputs["input_ids"]) < max_length
or max_length is None
and len(encoded_inputs["input_ids"]) < self.model_max_length
and self.model_max_length <= LARGE_INTEGER
)
if pad_to_max_length and max_length is None and self.model_max_length > LARGE_INTEGER:
logger.warning(
"Sequence can't be padded as no maximum length is specified and the model maximum length is too high."
)
if needs_to_be_padded:
difference = (max_length if max_length is not None else self.model_max_length) - len(
encoded_inputs["input_ids"]
)
if self.padding_side == "right":
if return_attention_mask:
encoded_inputs["attention_mask"] = [1] * len(encoded_inputs["input_ids"]) + [0] * difference
if return_token_type_ids:
encoded_inputs["token_type_ids"] = (
encoded_inputs["token_type_ids"] + [self.pad_token_type_id] * difference
)
if return_special_tokens_mask:
encoded_inputs["special_tokens_mask"] = encoded_inputs["special_tokens_mask"] + [1] * difference
encoded_inputs["input_ids"] = encoded_inputs["input_ids"] + [self.pad_token_id] * difference
elif self.padding_side == "left":
if return_attention_mask:
encoded_inputs["attention_mask"] = [0] * difference + [1] * len(encoded_inputs["input_ids"])
if return_token_type_ids:
encoded_inputs["token_type_ids"] = [self.pad_token_type_id] * difference + encoded_inputs[
"token_type_ids"
]
if return_special_tokens_mask:
encoded_inputs["special_tokens_mask"] = [1] * difference + encoded_inputs["special_tokens_mask"]
encoded_inputs["input_ids"] = [self.pad_token_id] * difference + encoded_inputs["input_ids"]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side))
else:
if return_attention_mask:
encoded_inputs["attention_mask"] = [1] * len(encoded_inputs["input_ids"])
if return_lengths:
encoded_inputs["length"] = len(encoded_inputs["input_ids"])
# Prepare model inputs as tensors if asked
if return_tensors is not None:
convert_to_tensors(encoded_inputs, return_tensors, prepend_batch_axis)
return BatchEncoding(encoded_inputs)
def prepare_for_tokenization(self, text: str, **kwargs) -> str:
""" Performs any necessary transformations before tokenization """
return text
def truncate_sequences(
self,
ids: List[int],
pair_ids: Optional[List[int]] = None,
num_tokens_to_remove: int = 0,
truncation_strategy: str = "longest_first",
stride: int = 0,
) -> Tuple[List[int], List[int], List[int]]:
""" Truncates a sequence pair in place to the maximum length.
Args:
ids: list of tokenized input ids. Can be obtained from a string by chaining the
`tokenize` and `convert_tokens_to_ids` methods.
pair_ids: Optional second list of input ids. Can be obtained from a string by chaining the
`tokenize` and `convert_tokens_to_ids` methods.
num_tokens_to_remove (:obj:`int`, `optional`, defaults to ``0``):
number of tokens to remove using the truncation strategy
truncation_strategy: string selected in the following options:
- 'longest_first' (default) Iteratively reduce the inputs sequence until the input is under max_length
starting from the longest one at each token (when there is a pair of input sequences).
Overflowing tokens only contains overflow from the first sequence.
- 'only_first': Only truncate the first sequence. raise an error if the first sequence is shorter or equal to than num_tokens_to_remove.
- 'only_second': Only truncate the second sequence
- 'do_not_truncate': Does not truncate (raise an error if the input sequence is longer than max_length)
stride (:obj:`int`, `optional`, defaults to ``0``):
If set to a number along with max_length, the overflowing tokens returned will contain some tokens
from the main sequence returned. The value of this argument defines the number of additional tokens.
"""
if num_tokens_to_remove <= 0:
return ids, pair_ids, []
if truncation_strategy == "longest_first":
overflowing_tokens = []
for _ in range(num_tokens_to_remove):
if pair_ids is None or len(ids) > len(pair_ids):
overflowing_tokens = [ids[-1]] + overflowing_tokens
ids = ids[:-1]
else:
pair_ids = pair_ids[:-1]
window_len = min(len(ids), stride)
if window_len > 0:
overflowing_tokens = ids[-window_len:] + overflowing_tokens
elif truncation_strategy == "only_first":
assert len(ids) > num_tokens_to_remove
window_len = min(len(ids), stride + num_tokens_to_remove)
overflowing_tokens = ids[-window_len:]
ids = ids[:-num_tokens_to_remove]
elif truncation_strategy == "only_second":
assert pair_ids is not None and len(pair_ids) > num_tokens_to_remove
window_len = min(len(pair_ids), stride + num_tokens_to_remove)
overflowing_tokens = pair_ids[-window_len:]
pair_ids = pair_ids[:-num_tokens_to_remove]
elif truncation_strategy == "do_not_truncate":
raise ValueError("Input sequence are too long for max_length. Please select a truncation strategy.")
else:
raise ValueError(
"Truncation_strategy should be selected in ['longest_first', 'only_first', 'only_second', 'do_not_truncate']"
)
return (ids, pair_ids, overflowing_tokens)
def create_token_type_ids_from_sequences(self, token_ids_0: List, token_ids_1: Optional[List] = None) -> List[int]:
if token_ids_1 is None:
return len(token_ids_0) * [0]
return [0] * len(token_ids_0) + [1] * len(token_ids_1)
def build_inputs_with_special_tokens(self, token_ids_0: List, token_ids_1: Optional[List] = None) -> List:
"""
Build model inputs from a sequence or a pair of sequence for sequence classification tasks
by concatenating and adding special tokens. This implementation does not add special tokens.
"""
if token_ids_1 is None:
return token_ids_0
return token_ids_0 + token_ids_1
def get_special_tokens_mask(
self, token_ids_0: List, token_ids_1: Optional[List] = None, already_has_special_tokens: bool = False
) -> List[int]:
"""
Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer ``prepare_for_model`` or ``encode_plus`` methods.
Args:
token_ids_0: list of ids (must not contain special tokens)
token_ids_1: Optional list of ids (must not contain special tokens), necessary when fetching sequence ids
for sequence pairs
already_has_special_tokens: (default False) Set to True if the token list is already formated with
special tokens for the model
Returns:
A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
"""
return [0] * ((len(token_ids_1) if token_ids_1 else 0) + len(token_ids_0))
def convert_ids_to_tokens(
self, ids: Union[int, List[int]], skip_special_tokens: bool = False
) -> Union[int, List[int]]:
""" Converts a single index or a sequence of indices (integers) in a token "
(resp.) a sequence of tokens (str), using the vocabulary and added tokens.
Args:
skip_special_tokens: Don't decode special tokens (self.all_special_tokens). Default: False
"""
if isinstance(ids, int):
if ids in self.added_tokens_decoder:
return self.added_tokens_decoder[ids]
else:
return self._convert_id_to_token(ids)
tokens = []
for index in ids:
index = int(index)
if skip_special_tokens and index in self.all_special_ids:
continue
if index in self.added_tokens_decoder:
tokens.append(self.added_tokens_decoder[index])
else:
tokens.append(self._convert_id_to_token(index))
return tokens
def _convert_id_to_token(self, index: int) -> str:
raise NotImplementedError
def convert_tokens_to_string(self, tokens: List[str]) -> str:
""" Converts a sequence of tokens (string) in a single string.
The most simple way to do it is ' '.join(self.convert_ids_to_tokens(token_ids))
but we often want to remove sub-word tokenization artifacts at the same time.
"""
return " ".join(self.convert_ids_to_tokens(tokens))
def decode(
self, token_ids: List[int], skip_special_tokens: bool = False, clean_up_tokenization_spaces: bool = True
) -> str:
"""
Converts a sequence of ids (integer) in a string, using the tokenizer and vocabulary
with options to remove special tokens and clean up tokenization spaces.
Similar to doing ``self.convert_tokens_to_string(self.convert_ids_to_tokens(token_ids))``.
Args:
token_ids: list of tokenized input ids. Can be obtained using the `encode` or `encode_plus` methods.
skip_special_tokens: if set to True, will replace special tokens.
clean_up_tokenization_spaces: if set to True, will clean up the tokenization spaces.
"""
filtered_tokens = self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens)
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separatly for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
sub_texts = []
current_sub_text = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(current_sub_text))
current_sub_text = []
sub_texts.append(token)
else:
current_sub_text.append(token)
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(current_sub_text))
text = " ".join(sub_texts)
if clean_up_tokenization_spaces:
clean_text = self.clean_up_tokenization(text)
return clean_text
else:
return text
def batch_decode(self, sequences: List[List[int]], **kwargs) -> List[str]:
return [self.decode(seq, **kwargs) for seq in sequences]
@staticmethod
def clean_up_tokenization(out_string: str) -> str:
""" Clean up a list of simple English tokenization artifacts like spaces before punctuations and abreviated forms.
"""
out_string = (
out_string.replace(" .", ".")
.replace(" ?", "?")
.replace(" !", "!")
.replace(" ,", ",")
.replace(" ' ", "'")
.replace(" n't", "n't")
.replace(" 'm", "'m")
.replace(" 's", "'s")
.replace(" 've", "'ve")
.replace(" 're", "'re")
)
return out_string
class PreTrainedTokenizerFast(PreTrainedTokenizer):
""" Base class for all fast tokenizers (wrapping HuggingFace tokenizers library).
Inherit from PreTrainedTokenizer.
Handle all the shared methods for tokenization and special tokens as well as methods
downloading/caching/loading pretrained tokenizers as well as adding tokens to the vocabulary.
This class also contain the added tokens in a unified way on top of all tokenizers so we don't
have to handle the specific vocabulary augmentation methods of the various underlying
dictionary structures (BPE, sentencepiece...).
Class attributes (overridden by derived classes):
- ``vocab_files_names``: a python ``dict`` with, as keys, the ``__init__`` keyword name of each vocabulary file
required by the model, and as associated values, the filename for saving the associated file (string).
- ``pretrained_vocab_files_map``: a python ``dict of dict`` the high-level keys
being the ``__init__`` keyword name of each vocabulary file required by the model, the low-level being the
`short-cut-names` (string) of the pretrained models with, as associated values, the `url` (string) to the
associated pretrained vocabulary file.
- ``max_model_input_sizes``: a python ``dict`` with, as keys, the `short-cut-names` (string) of the pretrained
models, and as associated values, the maximum length of the sequence inputs of this model, or None if the
model has no maximum input size.
- ``pretrained_init_configuration``: a python ``dict`` with, as keys, the `short-cut-names` (string) of the
pretrained models, and as associated values, a dictionnary of specific arguments to pass to the
``__init__``method of the tokenizer class for this pretrained model when loading the tokenizer with the
``from_pretrained()`` method.
Args:
- ``tokenizer`` (`BaseTokenizerFast`): A Fast tokenizer from the HuggingFace tokenizer library (in low level Rust language)
- ``model_max_length``: (`Optional`) int: the maximum length in number of tokens for the inputs to the transformer model.
When the tokenizer is loaded with `from_pretrained`, this will be set to the value stored for the associated
model in ``max_model_input_sizes`` (see above). If no value is provided, will default to VERY_LARGE_INTEGER (`int(1e30)`).
no associated max_length can be found in ``max_model_input_sizes``.
- ``padding_side``: (`Optional`) string: the side on which the model should have padding applied.
Should be selected between ['right', 'left']
- ``model_input_names``: (`Optional`) List[string]: the list of the forward pass inputs accepted by the
model ("token_type_ids", "attention_mask"...).
- ``bos_token``: (`Optional`) string: a beginning of sentence token.
Will be associated to ``self.bos_token`` and ``self.bos_token_id``
- ``eos_token``: (`Optional`) string: an end of sentence token.
Will be associated to ``self.eos_token`` and ``self.eos_token_id``
- ``unk_token``: (`Optional`) string: an unknown token.
Will be associated to ``self.unk_token`` and ``self.unk_token_id``
- ``sep_token``: (`Optional`) string: a separation token (e.g. to separate context and query in an input sequence).
Will be associated to ``self.sep_token`` and ``self.sep_token_id``
- ``pad_token``: (`Optional`) string: a padding token.
Will be associated to ``self.pad_token`` and ``self.pad_token_id``
- ``cls_token``: (`Optional`) string: a classification token (e.g. to extract a summary of an input sequence
leveraging self-attention along the full depth of the model).
Will be associated to ``self.cls_token`` and ``self.cls_token_id``
- ``mask_token``: (`Optional`) string: a masking token (e.g. when training a model with masked-language
modeling). Will be associated to ``self.mask_token`` and ``self.mask_token_id``
- ``additional_special_tokens``: (`Optional`) list: a list of additional special tokens.
Adding all special tokens here ensure they won't be split by the tokenization process.
Will be associated to ``self.additional_special_tokens`` and ``self.additional_special_tokens_ids``
"""
def __init__(self, tokenizer: BaseTokenizerFast, **kwargs):
if not isinstance(tokenizer, BaseTokenizerFast):
raise ValueError(
"Tokenizer should be an instance of a Tokenizer " "provided by HuggingFace tokenizers library."
)
self._tokenizer: BaseTokenizerFast = tokenizer
# Initialize all the rest of the kwargs
super().__init__(**kwargs)
@property
def backend_tokenizer(self) -> BaseTokenizerFast:
return self._tokenizer
@property
def decoder(self) -> DecoderFast:
return self._tokenizer._tokenizer.decoder
@property
def is_fast(self) -> bool:
return True
@property
def vocab_size(self) -> int:
return self._tokenizer.get_vocab_size(with_added_tokens=False)
def __len__(self) -> int:
return self._tokenizer.get_vocab_size(with_added_tokens=True)
def _maybe_update_backend(self, value):
""" Update the backend fast tokenizer.
Override method from base class SpecialTokensMixin """
self._tokenizer.add_special_tokens(value)
def _convert_encoding(
self,
encoding: EncodingFast,
return_tensors: Optional[Union[str, TensorType]] = None,
return_token_type_ids: Optional[bool] = None,
return_attention_mask: Optional[bool] = None,
return_overflowing_tokens: bool = False,
return_special_tokens_mask: bool = False,
return_offsets_mapping: bool = False,
) -> Dict[str, Any]:
""" Convert the encoding representation (from low-level HuggingFace tokenizer output) to a python Dict.
Overflowing tokens are converted to additional examples (like batches) so the output values of
the dict are lists (overflows) of lists (tokens).
If return_tensors is not None, these lists of lists are converted to 2-D tensors
for input_ids, token_type_ids and attention_mask.
Output shape: (overflows, sequence length)
"""
if return_token_type_ids is None:
return_token_type_ids = "token_type_ids" in self.model_input_names
if return_attention_mask is None:
return_attention_mask = "attention_mask" in self.model_input_names
if return_overflowing_tokens and encoding.overflowing is not None:
encodings = [encoding] + encoding.overflowing
else:
encodings = [encoding]
encoding_dict = defaultdict(list)
for e in encodings:
encoding_dict["input_ids"].append(e.ids)
if return_token_type_ids:
encoding_dict["token_type_ids"].append(e.type_ids)
if return_attention_mask:
encoding_dict["attention_mask"].append(e.attention_mask)
if return_special_tokens_mask:
encoding_dict["special_tokens_mask"].append(e.special_tokens_mask)
if return_offsets_mapping:
encoding_dict["offset_mapping"].append(e.offsets)
if return_tensors is not None:
encoding_dict = convert_to_tensors(encoding_dict, return_tensors)
return encoding_dict
def _convert_token_to_id_with_added_voc(self, token: int) -> str:
index = self._tokenizer.token_to_id(token)
if index is None:
return self.unk_token_id
return index
def _convert_id_to_token(self, index: int) -> Optional[str]:
return self._tokenizer.id_to_token(int(index))
def get_vocab(self):
return self._tokenizer.get_vocab(True)
def convert_tokens_to_string(self, tokens: List[int], skip_special_tokens: bool = False) -> str:
return self._tokenizer.decode(tokens, skip_special_tokens)
def add_tokens(self, new_tokens: List[Union[str, AddedTokenFast]]) -> int:
"""
Add a list of new tokens to the tokenizer class. If the new tokens are not in the
vocabulary, they are added to it with indices starting from length of the current vocabulary.
Args:
new_tokens: string or list of string or AddedTokenFast. Each string is a token to add.
Tokens are only added if they are not already in the vocabulary. AddedTokenFast wrap a string token to let you personnalize it's behavior (Whether this token should only match against single word, whether this token should strip all potential whitespaces on the left side, Whether this token should strip all potential whitespaces on the right side...).
See details for AddedToken in HuggingFace tokenizers library.
Returns:
Number of tokens added to the vocabulary.
Examples::
# Let's see how to increase the vocabulary of Bert model and tokenizer
tokenizer = BertTokenizerFast.from_pretrained('bert-base-uncased')
model = BertModel.from_pretrained('bert-base-uncased')
num_added_toks = tokenizer.add_tokens(['new_tok1', 'my_new-tok2'])
print('We have added', num_added_toks, 'tokens')
model.resize_token_embeddings(len(tokenizer)) # Notice: resize_token_embeddings expect to receive the full size of the new vocabulary, i.e. the length of the tokenizer.
"""
if isinstance(new_tokens, str):
new_tokens = [new_tokens]
return self._tokenizer.add_tokens(new_tokens)
def add_special_tokens(self, special_tokens_dict: dict) -> int:
# Map special tokens to class attributes (self.pad_token...)
super().add_special_tokens(special_tokens_dict)
# If the backend tokenizer the only specificities of special tokens are that
# - they will never be processed by the model, and
# - they will be removed while decoding.
# But they are not mapped to special attributes in the backend so we can just
# send a list.
tokens = []
for token in special_tokens_dict.values():
if isinstance(token, list):
tokens += token
else:
tokens += [token]
num_added_tokens = self._tokenizer.add_special_tokens(tokens)
return num_added_tokens
def num_special_tokens_to_add(self, pair: bool = False) -> int:
return self._tokenizer.num_special_tokens_to_add(pair)
def tokenize(
self, text: TextInput, pair: Optional[TextInput] = None, add_special_tokens: bool = False
) -> List[str]:
return self._tokenizer.encode(text, pair, add_special_tokens).tokens
def batch_encode_plus(
self,
batch_text_or_text_pairs: Union[
List[TextInput], List[TextInputPair], List[PreTokenizedInput], List[PreTokenizedInputPair]
],
add_special_tokens: bool = True,
max_length: Optional[int] = None,
stride: int = 0,
truncation_strategy: str = "longest_first",
pad_to_max_length: bool = False,
is_pretokenized: bool = False,
return_tensors: Optional[Union[str, TensorType]] = None,
return_token_type_ids: Optional[bool] = None,
return_attention_mask: Optional[bool] = None,
return_overflowing_tokens: bool = False,
return_special_tokens_mask: bool = False,
return_offsets_mapping: bool = False,
return_lengths: bool = False,
**kwargs
) -> BatchEncoding:
if not isinstance(batch_text_or_text_pairs, list):
raise ValueError(
"batch_text_or_text_pairs has to be a list (got {})".format(type(batch_text_or_text_pairs))
)
# Needed if we have to return a tensor
pad_to_max_length = pad_to_max_length or (return_tensors is not None and len(batch_text_or_text_pairs) > 1)
# Throw an error if we can pad because there is no padding token
if pad_to_max_length and self.pad_token_id is None:
raise ValueError("Unable to set proper padding strategy as the tokenizer does not have a padding token")
# Set the truncation and padding strategy and restore the initial configuration
with truncate_and_pad(
tokenizer=self._tokenizer,
max_length=max_length,
stride=stride,
strategy=truncation_strategy,
pad_to_max_length=pad_to_max_length,
padding_side=self.padding_side,
pad_token_id=self.pad_token_id if self._pad_token is not None else None,
pad_token_type_id=self.pad_token_type_id,
pad_token=self._pad_token,
):
# Check for the pretokenized path
if is_pretokenized:
encodings = []
# Iterate over each sample (we don't know yet if they are pairs or simple input
for i, sample in enumerate(batch_text_or_text_pairs):
if not isinstance(sample, (list, tuple)):
raise TypeError(
"batch_encode_plus(..., is_pretokenized=True) requires batch_text_or_text_pairs "
"to be either List[List[str]] or List[Tuple[List[str], List[str]]] but sample at "
"index {} is of type {}".format(i, type(sample))
)
# Test if we have a pair of sentences by checking the depth of nesting
is_pair = bool(len(sample) > 0 and isinstance(sample[0], (list, tuple)))
# Take care of the first sequence - we multi-thread over the words
encodings_text = EncodingFast.merge(
self._tokenizer.encode_batch(sample[0] if is_pair else sample, add_special_tokens=False),
growing_offsets=True,
)
# Take care of the second sequence if we have a pair
if is_pair:
encodings_pair = EncodingFast.merge(
self._tokenizer.encode_batch([("", s) for s in sample[1]], add_special_tokens=False),
growing_offsets=True,
)
else:
encodings_pair = None
# Post-process - truncate/pad and add special tokens
encoding = self._tokenizer.post_process(encodings_text, encodings_pair, add_special_tokens)
encodings.append(encoding)
# Classical path with strings input
else:
# Avoid thread overhead if only one example.
if len(batch_text_or_text_pairs) == 1:
if isinstance(batch_text_or_text_pairs[0], (tuple, list)):
encodings = self._tokenizer.encode(
*batch_text_or_text_pairs[0], add_special_tokens=add_special_tokens
)
else:
encodings = self._tokenizer.encode(
batch_text_or_text_pairs[0], add_special_tokens=add_special_tokens
)
encodings = [encodings]
else:
encodings = self._tokenizer.encode_batch(
batch_text_or_text_pairs, add_special_tokens=add_special_tokens
)
# Convert encoding to dict
# `Tokens` has type: List[Dict[str, List[List[int]]]] or List[Dict[str, 2D-Tensor]]
# with nested dimensions corresponding to batch, overflows, sequence length
tokens = [
self._convert_encoding(
encoding=encoding,
return_tensors=return_tensors,
return_token_type_ids=return_token_type_ids,
return_attention_mask=return_attention_mask,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
return_offsets_mapping=return_offsets_mapping,
)
for encoding in encodings
]
# Sanitize the output to have dict[list] from list[dict]
sanitized = {}
for key in tokens[0].keys():
# To List[List[List[int]]] of shape (batch, overflows, sequence length)
stack = [e for item in tokens for e in item[key]]
if return_tensors == "tf":
stack = tf.stack(stack, axis=0)
elif return_tensors == "pt":
stack = torch.stack(stack, dim=0)
# elif not return_tensors and len(stack) == 1:
# stack = stack[0]
sanitized[key] = stack
# If returning overflowing tokens, we need to return a mapping
# from the batch idx to the original sample
if return_overflowing_tokens:
overflow_to_sample_mapping = flatten([[i] * len(enc["input_ids"]) for i, enc in enumerate(tokens)])
sanitized["overflow_to_sample_mapping"] = overflow_to_sample_mapping
return BatchEncoding(sanitized, encodings)
def encode_plus(
self,
text: Union[TextInput, PreTokenizedInput],
text_pair: Optional[Union[TextInput, PreTokenizedInput]] = None,
add_special_tokens: bool = True,
max_length: Optional[int] = None,
pad_to_max_length: bool = False,
stride: int = 0,
truncation_strategy: str = "longest_first",
is_pretokenized: bool = False,
return_tensors: Optional[Union[str, TensorType]] = None,
return_token_type_ids: Optional[bool] = None,
return_attention_mask: Optional[bool] = None,
return_overflowing_tokens: bool = False,
return_special_tokens_mask: bool = False,
return_offsets_mapping: bool = False,
**kwargs
) -> BatchEncoding:
# Check for pretokenized path (ie [token1, token2, ..., tokenN] -> [id1, id2, ..., idN]
if is_pretokenized:
if isinstance(text, list) and len(text) > 0:
# Encode through encode_batch with sequence of only one word which will be merged after hand
encoding = self._tokenizer.encode_batch(text, add_special_tokens=False)
encoding = EncodingFast.merge(encoding, growing_offsets=True)
# Let's do the same for pairs if provided
if isinstance(text_pair, list):
# We prepend empty string before each word so that encoding is aware content is a pair
encoding_pair = self._tokenizer.encode_batch(
[("", p) for p in text_pair], add_special_tokens=False
)
encoding_pair = EncodingFast.merge(encoding_pair, growing_offsets=True)
elif text_pair is None:
encoding_pair = None
else:
raise TypeError(
"encode_plus(..., is_pretokenized=True) requires text and text_pair to be List[str] "
"but got (text={}, text_pair={})".format(type(text), type(text_pair))
)
# Post process and if asked to do so, insert special tokens where needed
encoding = self._tokenizer.post_process(encoding, encoding_pair, add_special_tokens)
batched_output = BatchEncoding(
self._convert_encoding(
encoding,
return_tensors=return_tensors,
return_token_type_ids=return_token_type_ids,
return_attention_mask=return_attention_mask,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
return_offsets_mapping=return_offsets_mapping,
),
encoding,
)
else:
raise TypeError(
"encode_plus(..., is_pretokenized=True) requires text to be List[str] "
"but got (text={}, text_pair={})".format(type(text), type(text_pair))
)
else:
batched_input = [(text, text_pair)] if text_pair else [text]
batched_output = self.batch_encode_plus(
batched_input,
add_special_tokens=add_special_tokens,
max_length=max_length,
stride=stride,
truncation_strategy=truncation_strategy,
return_tensors=return_tensors,
return_token_type_ids=return_token_type_ids,
return_attention_mask=return_attention_mask,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
return_offsets_mapping=return_offsets_mapping,
pad_to_max_length=pad_to_max_length,
**kwargs,
)
# Return tensor is None, then we can remove the leading batch axis
if not return_tensors:
batched_output = BatchEncoding(
{
key: value[0] if len(value) > 0 and isinstance(value[0], list) else value
for key, value in batched_output.items()
},
batched_output.encodings,
)
return batched_output
def decode(
self, token_ids: List[int], skip_special_tokens: bool = False, clean_up_tokenization_spaces: bool = True
) -> str:
text = self._tokenizer.decode(token_ids, skip_special_tokens)
if clean_up_tokenization_spaces:
clean_text = self.clean_up_tokenization(text)
return clean_text
else:
return text
def save_vocabulary(self, save_directory: str) -> Tuple[str]:
if os.path.isdir(save_directory):
files = self._tokenizer.save_model(save_directory)
else:
folder, file = os.path.split(os.path.abspath(save_directory))
files = self._tokenizer.save_model(folder, name=file)
return tuple(files)
def trim_batch(
input_ids, pad_token_id, attention_mask=None,
):
"""Remove columns that are populated exclusively by pad_token_id"""
keep_column_mask = input_ids.ne(pad_token_id).any(dim=0)
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
|
the-stack_0_2737 | from ups_byt_test import *
import time
file_name = 'inout_0.4_test.txt'
# time formats
format_time_old = "%W %j %A %d.%m.%Y %H:%M:%S"
format_time_new = "%d.%m.%Y %H:%M:%S"
ups_data = {}
for c in range(len(ups_member_card)):
for key, value in ups_member_card.items():
ups_data[key] = {
"inout_log" : [],
}
with open(file_name) as fn:
for log_row, log_line in enumerate(fn):
parse_line = log_line.strip().split(" ")
parse_name = parse_line[0]
parse_status = parse_line[1]
time_log = " ".join(parse_line[2:])
if parse_name in ups_data.keys():
parse_time_raw = time.strptime(time_log, format_time_old)
time_new_format = time.strftime(format_time_new, parse_time_raw)
time_seconds = time.mktime(parse_time_raw)
time_log_status = "{} {}".format(parse_status, time_new_format)
ups_data[parse_name]["inout_log"].append(time_log_status)
|
the-stack_0_2738 | #!/usr/bin/env python3
import subprocess
import jinja2
num_redis_hosts = 3
# create proxy config file
template = open('proxy/envoy.yaml.j2').read()
config = jinja2.Template(template).render(num_redis_hosts = num_redis_hosts)
envoy_yaml = open('proxy/envoy.yaml', 'w')
envoy_yaml.write(config)
# start containers
shell_cmd = 'docker-compose up --build -d --scale redis={}'.format(num_redis_hosts)
print(shell_cmd)
# subprocess.run(shell_cmd, shell=True, check=True)
|
the-stack_0_2739 | """Data models."""
from attr import attrib, attrs
@attrs
class Card:
"""Card.
created An ISO 8601 timestamp for when the card was created
cvv Three digit cvv printed on the back of the card
funding See FundingAccount
exp_month Two digit (MM) expiry month
exp_year Four digit (YYYY) expiry year
hostname Hostname of card’s locked merchant (will be empty if not applicable)
last_four Last four digits of the card number
memo Friendly name to identify the card
pan Sixteen digit card number
spend_limit Amount (in cents) to limit approved authorizations.
Transaction requests above the spend limit will be declined
spend_limit_duration TRANSACTION, MONTHLY, ANNUALLY, FOREVER
state OPEN, PAUSED, CLOSED
token Globally unique identifier
type SINGLE_USE, MERCHANT_LOCKED, UNLOCKED
"""
created = attrib()
cvv = attrib()
funding = attrib()
exp_month = attrib()
exp_year = attrib()
hostname = attrib()
last_four = attrib()
memo = attrib()
pan = attrib()
spend_limit = attrib()
spend_limit_duration = attrib()
state = attrib()
token = attrib()
type = attrib()
@attrs
class Event:
"""Event.
A single card transaction may include multiple events that affect the
transaction state and lifecycle.
amount Amount of the transaction event
created Date and time this event entered the system
result APPROVED or decline reason. See below for full enumeration
token Globally unique identifier
type AUTHORIZATION, AUTHORIZATION_ADVICE, CLEARING, VOID, RETURN
"""
amount = attrib()
created = attrib()
result = attrib()
token = attrib()
type = attrib()
@attrs
class FundingAccount:
"""Funding Account.
account_name Account name identifying the funding source. In some cases
this may be the last four digits of the account number
token Globally unique identifier
type Type of funding source, see enumerations for list
"""
account_name = attrib()
token = attrib()
type = attrib()
@attrs
class Merchant:
"""Merchant.
acceptor_id Unique identifier to identify the payment card acceptor
city City of card acceptor
country Country of card acceptor
descriptor Short description of card acceptor
mcc Merchant category code
state Geographic state of card acceptor
"""
acceptor_id = attrib()
city = attrib()
country = attrib()
descriptor = attrib()
mcc = attrib()
state = attrib()
@attrs
class Funding:
"""Funding.
funding A list of objects that describe how this transaction was funded,
with the amount represented in cents. A reference to the funding
account for the card that made this transaction may appear here
and the token will match the token for the funding account in
the card field. If any promotional credit was used in paying for
this transaction, its type will be PROMO.
"""
amount = attrib()
token = attrib()
type = attrib()
@attrs
class Transaction:
"""Transaction.
amount Authorization amount (in cents) of the transaction. This may change
over time
card See Card schema definition
created Date and time when the transaction first occurred
events A list of all events that have modified this transaction
funding See Funding schema definition
merchant See Merchant schema definition
result APPROVED or decline reason. See below for full enumeration
settled_amount Amount (in cents) of the transaction that has been settled.
This may change over time
status PENDING, VOIDED, SETTLING, SETTLED, BOUNCED
token Globally unique identifier
"""
amount = attrib()
card = attrib()
created = attrib()
events = attrib()
funding = attrib()
merchant = attrib()
result = attrib()
settled_amount = attrib()
status = attrib()
token = attrib()
|
the-stack_0_2741 | # fast.py
# Mission Pinball Framework
# Written by Brian Madden & Gabe Knuth
# Released under the MIT License. (See license info at the end of this file.)
# Documentation and more info at http://missionpinball.com/framework
import logging
import fastpinball
import time
from mpf.system.timing import Timing
from mpf.system.platform import Platform
class HardwarePlatform(Platform):
"""Platform class for the FAST hardware controller.
Parameters
----------
machine : int
A reference to the MachineController instance
"""
def __init__(self, machine):
super(HardwarePlatform, self).__init__(machine)
self.log = logging.getLogger('FAST Platform')
self.log.debug("Configuring machine for FAST hardware.")
# ----------------------------------------------------------------------
# Platform-specific hardware features. WARNING: Do not edit these. They
# are based on what the FAST hardware can and cannot do.
self.features['max_pulse'] = 255 # todo
self.features['hw_timer'] = True
self.features['hw_rule_coil_delay'] = False # todo
self.features['variable_recycle_time'] = True # todo
self.features['variable_debounce_time'] = True # todo
self.features['hw_enable_auto_disable'] = True
# Make the platform features available to everyone
self.machine.config['Platform'] = self.features
# ----------------------------------------------------------------------
self.hw_rules = dict()
# Set up the connection to the FAST controller
self.log.info("Initializing FAST Pinball Controller interface...")
ports = list()
if ('port0_name' in self.machine.config['Fast'] and
'port0_baud' in self.machine.config['Fast']):
ports.append((self.machine.config['Fast']['port0_name'],
self.machine.config['Fast']['port0_baud']))
if ('port1_name' in self.machine.config['Fast'] and
'port1_baud' in self.machine.config['Fast']):
ports.append((self.machine.config['Fast']['port1_name'],
self.machine.config['Fast']['port1_baud']))
if ('port2_name' in self.machine.config['Fast'] and
'port2_baud' in self.machine.config['Fast']):
ports.append((self.machine.config['Fast']['port2_name'],
self.machine.config['Fast']['port2_baud']))
self.log.debug("FAST Ports: %s", ports)
if ('main_port' in self.machine.config['Fast'] and
'led_port' in self.machine.config['Fast'] and
'dmd_port' in self.machine.config['Fast']):
port_assignments = (self.machine.config['Fast']['main_port'],
self.machine.config['Fast']['led_port'],
self.machine.config['Fast']['dmd_port'])
else:
self.log.error("Error in fast config. Entries needed for main_port"
" and led_port and dmd_port.")
quit()
self.fast = fastpinball.fpOpen(ports, port_assignments)
# We need to setup a timer to get the initial switch reads, so we just
# do this one at 1 sec now. It will be overwritten later when the
# run loop starts
fastpinball.fpTimerConfig(self.fast, 1000000)
fastpinball.fpReadAllSwitches(self.fast)
event = fastpinball.fpGetEventObject()
fastpinball.fpGetEventType(event)
fastpinball.fpEventPoll(self.fast, event)
self.log.info("FAST Pinball Controller interface connected")
if 'config_number_format' not in self.machine.config['Fast']:
self.machine.config['Fast']['config_number_format'] = 'int'
self.machine_type = (
self.machine.config['Hardware']['DriverBoards'].upper())
if self.machine_type == 'WPC':
self.log.debug("Configuring the FAST Controller for WPC driver "
"boards")
elif self.machine_type == 'FAST':
self.log.debug("Configuring FAST Controller for FAST driver boards.")
self.wpc_switch_map = {
'S11':'00', 'S12':'01', 'S13':'02', 'S14':'03',
'S15':'04', 'S16':'05', 'S17':'06', 'S18':'07',
'S21':'08', 'S22':'09', 'S23':'10', 'S24':'11',
'S25':'12', 'S26':'13', 'S27':'14', 'S28':'15',
'S31':'16', 'S32':'17', 'S33':'18', 'S34':'19',
'S35':'20', 'S36':'21', 'S37':'22', 'S38':'23',
'S41':'24', 'S42':'25', 'S43':'26', 'S44':'27',
'S45':'28', 'S46':'29', 'S47':'30', 'S48':'31',
'S51':'32', 'S52':'33', 'S53':'34', 'S54':'35',
'S55':'36', 'S56':'37', 'S57':'38', 'S58':'39',
'S61':'40', 'S62':'41', 'S63':'42', 'S64':'43',
'S65':'44', 'S66':'45', 'S67':'46', 'S68':'47',
'S71':'48', 'S72':'49', 'S73':'50', 'S74':'51',
'S75':'52', 'S76':'53', 'S77':'54', 'S78':'55',
'S81':'56', 'S82':'57', 'S83':'58', 'S84':'59',
'S85':'60', 'S86':'61', 'S87':'62', 'S88':'63',
'S91':'64', 'S92':'65', 'S93':'66', 'S94':'67',
'S95':'68', 'S96':'69', 'S97':'70', 'S98':'71',
'SD1':'80', 'SD2':'81', 'SD3':'82', 'SD4':'83',
'SD5':'84', 'SD6':'85', 'SD7':'86', 'SD8':'87',
'DIP1':'88', 'DIP2':'89', 'DIP3':'90',
'DIP4':'91', 'DIP5':'92', 'DIP6':'93',
'DIP7':'94', 'DIP8':'95',
'SF1':'96', 'SF2':'97', 'SF3':'98', 'SF4':'99',
'SF5':'100', 'SF6':'101', 'SF7':'102',
'SF8':'103',
}
self.wpc_light_map = {
'L11':'00', 'L12':'01', 'L13':'02', 'L14':'03',
'L15':'04', 'L16':'05', 'L17':'06', 'L18':'07',
'L21':'08', 'L22':'09', 'L23':'11', 'L24':'12',
'L25':'12', 'L26':'13', 'L27':'14', 'L28':'15',
'L31':'16', 'L32':'17', 'L33':'18', 'L34':'19',
'L35':'20', 'L36':'21', 'L37':'22', 'L38':'23',
'L41':'24', 'L42':'25', 'L43':'26', 'L44':'27',
'L45':'28', 'L46':'29', 'L47':'30', 'L48':'31',
'L51':'32', 'L52':'33', 'L53':'34', 'L54':'35',
'L55':'36', 'L56':'37', 'L57':'38', 'L58':'39',
'L61':'40', 'L62':'41', 'L63':'42', 'L64':'43',
'L65':'44', 'L66':'45', 'L67':'48', 'L68':'49',
'L71':'48', 'L72':'49', 'L73':'50', 'L74':'51',
'L75':'52', 'L76':'53', 'L77':'54', 'L78':'55',
'L81':'56', 'L82':'57', 'L83':'58', 'L84':'59',
'L85':'60', 'L86':'61', 'L87':'62', 'L88':'63',
}
self.wpc_driver_map = {
'C01':'00', 'C02':'01', 'C03':'02', 'C04':'03',
'C05':'04', 'C06':'05', 'C07':'06', 'C08':'07',
'C09':'08', 'C10':'09', 'C11':'10', 'C12':'11',
'C13':'12', 'C14':'13', 'C15':'14', 'C16':'15',
'C17':'16', 'C18':'17', 'C19':'18', 'C20':'19',
'C21':'20', 'C22':'21', 'C23':'22', 'C24':'23',
'C25':'24', 'C26':'25', 'C27':'26', 'C28':'27',
'FLRM':'32', 'FLRH':'33', 'FLLM':'34',
'FLLH':'35', 'FURM':'36', 'FURH':'37',
'FULM':'38', 'FULH':'39',
}
self.wpc_gi_map = {'G01':'00', 'G02':'01', 'G03':'02', 'G04':'03',
'G05':'04', 'G06':'05', 'G07':'06', 'G08':'07',
}
def timer_initialize(self):
self.log.debug("Initializing the FAST hardware timer for %sHz",
Timing.HZ)
fastpinball.fpTimerConfig(self.fast,
int(Timing.secs_per_tick * 1000000))
# timer tick is in microsecs
def configure_driver(self, config, device_type='coil'):
# If we have WPC driver boards, look up the switch number
if self.machine_type == 'WPC':
config['number'] = int(self.wpc_driver_map.get(
config['number_str']))
if 'connection' not in config:
config['connection'] = 0 # local driver (default for WPC)
else:
config['connection'] = 1 # network driver
# If we have fast driver boards, we need to make sure we have ints
elif self.machine_type == 'FAST':
if self.machine.config['Fast']['config_number_format'] == 'hex':
config['number'] = int(config['number_str'], 16)
# Now figure out the connection type
if 'connection' not in config:
config['connection'] = 1 # network driver (default for FAST)
else:
config['connection'] = 0 # local driver
# convert the driver number into a tuple which is:
# (driver number, connection type)
config['number'] = (config['number'], config['connection'])
return FASTDriver(config['number'], self.fast), config['number']
def configure_switch(self, config):
"""Configures the switch object for a FAST Pinball controller.
FAST Controllers support two types of switches: local and network. Local
switches are switches that are connected to the FAST controller board
itself, and network switches are those connected to a FAST I/O board.
MPF needs to know which type of switch is this is. You can specify the
switch's connection type in the config file via the "connection"
setting (either 'local' or 'network'.
If a connection type is not specified, this method will use some
intelligence to try to figure out which default should be used.
If the DriverBoard type is 'fast', then it assumes the default is
'network'. If it's anything else (wpc, system11, bally, etc.) then it
assumes the connection type is 'local'. Connection types can be mixed
and matched.
"""
if self.machine_type == 'WPC': # translate switch number to FAST switch
config['number'] = int(self.wpc_switch_map.get(
config['number_str']))
if 'connection' not in config:
config['connection'] = 0 # local switch (default for WPC)
else:
config['connection'] = 1 # network switch
elif self.machine_type == 'FAST':
if 'connection' not in config:
config['connection'] = 1 # network switch (default for FAST)
else:
config['connection'] = 0 # local switch
if self.machine.config['Fast']['config_number_format'] == 'hex':
config['number'] = int(config['number_str'], 16)
# converet the switch number into a tuple which is:
# (switch number, connection)
config['number'] = (config['number'], config['connection'])
if 'debounce_on' not in config:
if 'default_debounce_on_ms' in self.machine.config['Fast']:
config['debounce_on'] = self.machine.config['Fast']['default_debounce_on_ms']
else:
config['debounce_on'] = 20
if 'debounce_off' not in config:
if 'default_debounce_off_ms' in self.machine.config['Fast']:
config['debounce_off'] = self.machine.config['Fast']['default_debounce_off_ms']
else:
config['debounce_off'] = 20
self.log.debug("FAST Switch hardware tuple: %s", config['number'])
switch = FASTSwitch(config['number'], config['debounce_on'],
config['debounce_off'], self.fast)
state = fastpinball.fpReadSwitch(self.fast, config['number'][0],
config['number'][1])
# Return the switch object and an integer of its current state.
# 1 = active, 0 = inactive
return switch, config['number'], state
def configure_led(self, config):
# if the LED number is in <channel> - <led> format, convert it to a
# FAST hardware number
if '-' in config['number_str']:
num = config['number_str'].split('-')
config['number'] = int((num[0] * 64) + num[1])
else:
config['number'] = str(config['number'])
return FASTDirectLED(config['number'], self.fast)
def configure_gi(self, config):
if self.machine_type == 'WPC': # translate switch number to FAST switch
config['number'] = int(self.wpc_gi_map.get(config['number_str']))
return FASTGIString(config['number'], self.fast), config['number']
def configure_matrixlight(self, config):
if self.machine_type == 'WPC': # translate switch number to FAST switch
config['number'] = int(self.wpc_light_map.get(config['number_str']))
elif self.machine.config['Fast']['config_number_format'] == 'hex':
config['number'] = int(config['number_str'], 16)
return FASTMatrixLight(config['number'], self.fast), config['number']
def hw_loop(self):
"""Loop code which checks the controller for any events (switch state
changes or notification that a DMD frame was updated).
"""
fast_events = fastpinball.fpGetEventObject()
self.log.debug("Starting the hardware loop")
loop_start_time = time.time() - .01
num_loops = 0
while self.machine.done is False:
self.machine.loop_rate = int(num_loops /
(time.time() - loop_start_time))
fastpinball.fpEventPoll(self.fast, fast_events)
eventType = fastpinball.fpGetEventType(fast_events)
# eventType options:
# 0 = none
# 1 = local switch active
# 2 = local switch inactive
# 3 = network switch active
# 4 = network switch inactive
# 5 = local switch cache has been updated
# 6 = network switch cache has been updated
# 7 = timer tick
if eventType == 0:
continue
elif eventType == 7: # timer_tick
num_loops += 1
self.machine.timer_tick()
elif eventType == 1: # local switch has gone active
self.machine.switch_controller.process_switch(state=1,
num=(fastpinball.fpGetEventSwitchID(fast_events), 0))
elif eventType == 2: # local switch has gone inactive
self.machine.switch_controller.process_switch(state=0,
num=(fastpinball.fpGetEventSwitchID(fast_events), 0))
elif eventType == 3: # network switch has gone active
self.machine.switch_controller.process_switch(state=1,
num=(fastpinball.fpGetEventSwitchID(fast_events), 1))
elif eventType == 4: # network switch has gone inactive
self.machine.switch_controller.process_switch(state=0,
num=(fastpinball.fpGetEventSwitchID(fast_events), 1))
else:
if num_loops != 0:
self.log.info("Hardware loop speed: %sHz",
self.machine.loop_rate)
def _do_set_hw_rule(self,
sw,
sw_activity,
coil_action_ms, # 0 = disable, -1 = hold forever
coil=None,
pulse_ms=0,
pwm_on=0,
pwm_off=0,
delay=0,
recycle_time=0,
debounced=True,
drive_now=False):
"""Used to write (or update) a hardware rule to the FAST controller.
*Hardware Rules* are used to configure the hardware controller to
automatically change driver states based on switch changes. These rules
are completely handled by the hardware (i.e. with no interaction from
the Python game code). They're used for things that you want to happen
fast, like firing coils when flipper buttons are pushed, slingshots, pop
bumpers, etc.
You can overwrite existing hardware rules at any time to change or
remove them.
Parameters
----------
sw : switch object
Which switch you're creating this rule for. The parameter is a
reference to the switch object itsef.
sw_activity : int
Do you want this coil to fire when the switch becomes active
(1) or inactive (0)
coil_action_ms : int
The total time (in ms) that this coil action should take place.
A value of -1 means it's forever.
coil : coil object
Which coil is this rule controlling
pulse_ms : int
How long should the coil be pulsed (ms)
pwm_on : int
If the coil should be held on at less than 100% duty cycle,
this is the "on" time (in ms).
pwm_off : int
If the coil should be held on at less than 100% duty cycle,
this is the "off" time (in ms).
delay : int
Not currently implemented
recycle_time : int
How long (in ms) should this switch rule wait before firing
again. Put another way, what's the "fastest" this rule can
fire? This is used to prevent "machine gunning" of slingshots
and pop bumpers. Do not use it with flippers.
debounced : bool
Should the hardware fire this coil after the switch has been
debounced? Typically no.
drive_now : bool
Should the hardware check the state of the switches when this
rule is firts applied, and fire the coils if they should be?
Typically this is True, especially with flippers because you
want them to fire if the player is holding in the buttons when
the machine enables the flippers (which is done via several
calls to this method.)
"""
# todo update documentation for on time and off time for debounce
self.log.debug("Setting HW Rule. Switch:%s, Action ms:%s, Coil:%s, "
"Pulse:%s, pwm_on:%s, pwm_off:%s, Delay:%s, Recycle:%s,"
"Debounced:%s, Now:%s", sw.name, coil_action_ms,
coil.name, pulse_ms, pwm_on, pwm_off, delay,
recycle_time, debounced, drive_now)
mode = 0
on_time = 0
off_time = recycle_time
if coil_action_ms == -1:
if pwm_on and pwm_off:
mode = 3 # pwm mode
on_time = pwm_on
off_time = pwm_off
else:
mode = 1 # latched mode (coil on solid)
elif 0 < coil_action_ms <= 255:
mode = 0 # pulse mode
on_time = pulse_ms
if sw_activity == 0: # fire this rule when switch turns off
sw_activity = 3
elif sw_activity == 1: # fire this coil when switch turns on
sw_activity = 2
self.hw_rules[coil.config['number']] = {'mode': mode,
'switch': sw.number,
'on': on_time,
'off': off_time}
self.log.debug("Writing HW Rule to FAST Controller. Coil: %s, "
"Mode: %s, Switch: %s, On: %s, Off: %s",
coil.number, mode, sw.number,
on_time, off_time)
fastpinball.fpWriteDriver(self.fast, # fast board
coil.number[0], # coil number
mode, # mode
sw_activity, # triggerType
sw.number[0], # switch
on_time, # on time
off_time, # time before can enable again
coil.number[1], # local or network
)
# todo ensure / verify switch & coil are on the same board.
def _do_clear_hw_rule(self, sw_num):
"""Clears a hardware rule.
This is used if you want to remove the linkage between a switch and
some driver activity. For example, if you wanted to disable your
flippers (so that a player pushing the flipper buttons wouldn't cause
the flippers to flip), you'd call this method with your flipper button
as the *sw_num*.
Parameters
----------
sw_num : int
The number of the switch whose rule you want to clear.
"""
self.log.debug("Clearing HW Rule for switch %s", sw_num)
# find the rule(s) based on this switch
coils = [k for k, v in self.hw_rules.iteritems() if v == sw_num]
for coil in coils:
fastpinball.fpWriteDriver(self.fast, # fast board
coil[0], # coil number
0, # mode
0, # triggerType
0, # switch
0, # on time
0, # off time
coil[1], # local or network
)
# todo ensure / verify switch & coil are on the same board.
class FASTSwitch(object):
"""
fpWriteSwitchConfig params:
fp_device (self.fast)
switch number (switch number as int)
mode (0 = no report, 1 = report on, 2 = report inverted
debounce close
debounce open
sound
target (0 = local, 1 = network)
todo add support for different debounce open and close times
"""
def __init__(self, number, debounce_on, debounce_off, fast_device):
self.log = logging.getLogger('FASTSwitch')
self.fast = fast_device
self.number = number[0]
self.connection = number[1]
self.log.debug("fastpinball.fpWriteSwitchConfig(%s, %s, 1, %s, %s, 0, "
"%s)", fast_device, number[0], debounce_on,
debounce_off, number[1])
fastpinball.fpWriteSwitchConfig(fast_device, # fast board
number[0], # switch number
1, # mode (1=report "on")
debounce_on, # debounce on (close)
debounce_off, # debounce off (open)
0, # sound
number[1]) # connection type
class FASTDriver(object):
""" Base class for drivers connected to a FAST Controller.
old - fpWriteDriver(device, driver_id, mode, trigger_sw, on_ms, off_ms)
fpWriteDriver (
device
id
mode (see below)
triggerType (see below)
triggerSwitch (switch id number)
onTime (in ms)
offTime (in ms)
target (connection type. 0 = local, 1 = network)
)
mode options
0 = pulsed
1 = latched
2 = delay
3 = pwm
triggerType options
0 = off
1 = manual
2 = triggered by switch going on
3 = triggered by switch going off
"""
def __init__(self, number, fast_device):
self.log = logging.getLogger('FASTDriver')
self.number = number
self.fast = fast_device
def disable(self):
"""Disables (turns off) this driver."""
self.log.debug('Disabling Driver')
fastpinball.fpWriteDriver(self.fast, # fast board
self.number[0], # driver number
0, # mode
0, # triggerType
0, # switch
0, # on time
0, # off time
self.number[1], # local or network
)
def enable(self):
"""Enables (turns on) this driver."""
self.log.debug('Enabling Driver')
fastpinball.fpWriteDriver(self.fast, # fast board
self.number[0], # driver number
1, # mode
1, # triggerType
0, # switch
0, # on time
0, # off time
self.number[1], # local or network
)
# todo change hold to pulse with re-ups
def pulse(self, milliseconds=None):
"""Pulses this driver.
"""
if not milliseconds in range(256):
raise ValueError('Milliseconds must be in range 0-255.')
self.log.debug('Pulsing Driver for %sms', milliseconds)
fastpinball.fpWriteDriver(self.fast, # fast board
self.number[0], # driver number
0, # mode
1, # triggerType
0, # switch
milliseconds, # on time
0, # off time
self.number[1], # local or network
)
def pwm(self, on_ms=10, off_ms=10, original_on_ms=0, now=True):
"""Enables this driver in a pwm pattern.
"""
if not original_on_ms in range(256):
raise ValueError('original_on_ms must be in range 0-255.')
if not on_ms in range(256):
raise ValueError('on_ms must be in range 0-255.')
if not off_ms in range(256):
raise ValueError('off_ms must be in range 0-255.')
self.log.debug("pwm on:%d, off:%d, now:%s", on_ms,
off_ms, now)
fastpinball.fpWriteDriver(self.fast, # fast board
self.number[0], # driver number
3, # mode
1, # triggerType
0, # switch
on_ms, # on time
off_ms, # off time
self.number[1], # local or network
)
class FASTGIString(object):
def __init__(self, number, fast_device):
""" A FAST GI string in a WPC machine.
TODO: Need to implement the enable_relay and control which strings are
dimmable.
"""
self.log = logging.getLogger('FASTGILight')
self.number = number
self.fast = fast_device
def off(self):
fastpinball.fpWriteGiString(self.fast, self.number, 0)
self.last_time_changed = time.time()
def on(self, brightness=255, fade_ms=0, start=0):
if brightness >= 255:
fastpinball.fpWriteGiString(self.fast, self.number, 1)
elif brightness == 0:
self.off()
else:
fastpinball.fpWriteGiString(self.fast, self.number,
int(brightness/255))
self.last_time_changed = time.time()
class FASTMatrixLight(object):
def __init__(self, number, fast_device):
self.log = logging.getLogger('FASTMatrixLight')
self.number = number
self.fast = fast_device
def off(self):
"""Disables (turns off) this driver."""
fastpinball.fpWriteLamp(self.fast, self.number, 0)
self.last_time_changed = time.time()
def on(self, brightness=255, fade_ms=0, start=0):
"""Enables (turns on) this driver."""
if brightness >= 255:
fastpinball.fpWriteLamp(self.fast, self.number, 1)
elif brightness == 0:
self.off()
else:
pass
# patter rates of 10/1 through 2/9
self.last_time_changed = time.time()
class FASTDirectLED(object):
def __init__(self, number, fast_device):
self.log = logging.getLogger('FASTLED')
self.number = number
self.fast = fast_device
self.current_color = [0, 0, 0]
# All FAST LEDs are 3 element RGB
self.log.debug("Creating FAST RGB LED at hardware address: %s",
self.number)
def color(self, color):
# Pad the color with zeros to make sure we have as many colors as
# elements
# todo verify this is needed with FAST. It might just work without
color += [0] * (3 - len(color))
self.log.info("fastpinball.fpWriteRgb(self.fast, %s, %s, %s, %s)",
self.number, color[0], color[1], color[2])
fastpinball.fpWriteRgb(self.fast, self.number, color[0], color[1],
color[2])
def fade(self, color, fadetime):
# todo
# not yet implemented. For now we'll just immediately set the color
self.log.debug("Fading LED %s over %sms", self.name, fadetime)
self.color(color, fadetime)
def disable(self):
"""Disables (turns off) this LED instantly. For multi-color LEDs it
turns all elements off.
"""
fastpinball.fpWriteRgb(self.fast, self.number, 0, 0, 0)
def enable(self, brightness_compensation=True):
self.color([255, 255, 255], brightness_compensation)
# The MIT License (MIT)
# Oringal code on which this module was based:
# Copyright (c) 2009-2011 Adam Preble and Gerry Stellenberg
# Copyright (c) 2013-2014 Brian Madden and Gabe Knuth
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
|
the-stack_0_2742 | from fastapi import APIRouter, Request
import json
from typing import List
from loguru import logger
from starlette.templating import _TemplateResponse
from app.config import RESOURCES_DIR
from app.dependencies import templates
router = APIRouter()
def credits_from_json() -> List:
path = RESOURCES_DIR / "credits.json"
try:
with open(path, 'r') as json_file:
json_list = json.load(json_file)
except (IOError, ValueError):
logger.exception(
"An error occurred during reading of json file")
return []
return json_list
@router.get("/credits")
def credits(request: Request) -> _TemplateResponse:
credit_list = credits_from_json()
return templates.TemplateResponse("credits.html", {
"request": request,
"credit_list": credit_list
})
|
the-stack_0_2743 | def run(line, start_panel):
num_of_operands = [0, 3, 3, 1, 1, 2, 2, 3, 3, 1]
program = [int(x) for x in line]+[0]*10000
i, base = 0, 0
panels, pos, outputs = {(0,0):start_panel}, (0,0), []
directions, dir_idx = [(-1,0), (0,1), (1,0), (0,-1)], 0
while program[i] != 99:
modes = [int(x) for x in f"{program[i]:0>5}"[:3]][::-1]
instruction = int(f"{program[i]:0>5}"[3:])
base_tmp = [base if modes[x]==2 else 0 for x in range(num_of_operands[instruction])]
operands = [program[i+x+1] if modes[x]==1 else program[base_tmp[x]+program[i+x+1]] for x in range(num_of_operands[instruction])]
if instruction == 1:
program[base_tmp[2]+program[i+3]] = operands[0] + operands[1]
elif instruction == 2:
program[base_tmp[2]+program[i+3]] = operands[0] * operands[1]
elif instruction == 3:
program[base_tmp[0]+program[i+1]] = panels[pos] if pos in panels else 0
elif instruction == 4:
outputs.append(operands[0])
if len(outputs) == 2:
panels[pos] = outputs[0]
dir_idx = (dir_idx + (1 if outputs[1] else -1)) % len(directions)
pos = (pos[0] + directions[dir_idx][0], pos[1] + directions[dir_idx][1])
outputs = []
elif instruction == 5:
i = (operands[1] - 3) if operands[0]!=0 else i
elif instruction == 6:
i = (operands[1] - 3) if operands[0]==0 else i
elif instruction == 7:
program[base_tmp[2]+program[i+3]] = int(operands[0] < operands[1])
elif instruction == 8:
program[base_tmp[2]+program[i+3]] = int(operands[0] == operands[1])
elif instruction == 9:
base += operands[0]
i += num_of_operands[instruction] + 1
return panels
with open("input.txt") as file:
data = file.readline().split(",")
print(len(run(data,0)))
result = run(data, 1)
tmp = [[" "]*50 for _ in range(7)]
for i in zip(result.keys(), result.values()):
tmp[i[0][0]][i[0][1]] = "#" if i[1] else " "
[print(" ".join(x)) for x in tmp] |
the-stack_0_2744 | from nltk.cluster import KMeansClusterer, cosine_distance # will get nan when u v are zero?
import pandas as pd
from sklearn.cluster import KMeans
from gensim.utils import tokenize
import pyLDAvis
from gensim.models import LdaModel
from gensim.corpora.dictionary import Dictionary
import pandas as pd
import numpy as np
################################################
## Majority vote rules
################################################
def link_group_to_label(train_label, train_pred, num_topics=100):
"""with majority vote rule"""
# Maping clusters into labels
df = pd.DataFrame(list(zip(train_label, train_pred)), columns=['actual_class', 'cluster'])
confusion = pd.crosstab(index=df.cluster, columns=df.actual_class)
## handle no group
full = pd.DataFrame(index=range(num_topics), columns=train_label.unique())
full.loc[:, 'no_group'] = 0.1 # the minimum is 1
merge_full = full.combine_first(confusion).fillna(0)
group_to_label = merge_full.idxmax(axis=1)
## print out mapping
print("Group to label mapping: ")
for idx, t in enumerate(group_to_label):
print("Group {} <-> label {}".format(idx, t))
print("\n")
return group_to_label
################################################
## Clustering tools
################################################
def fit_clustering_model(dtm_train, train_label, num_clusters, metric='Cosine', model='KMeans', repeats=20):
'''
'''
assert metric in ['Cosine', 'L2']
assert model in ['KMeans']
# model training
if model == 'KMeans':
if metric == 'Cosine':
# normalise should be true!
clusterer = KMeansClusterer(num_clusters, cosine_distance, normalise=True, repeats=repeats, avoid_empty_clusters=True)
train_cluster_pred = clusterer.cluster(dtm_train, assign_clusters=True)
elif metric == 'L2':
clusterer = KMeans(n_clusters=num_clusters, n_init=repeats).fit(dtm_train)
train_cluster_pred = clusterer.labels_.tolist()
elif model == 'GMM':
pass
# GMM model not good in such case
# clusterer = mixture.GaussianMixture(n_components=num_clusters, n_init=repeats, covariance_type='diag')
# clusterer.fit(dtm_train)
# train_cluster_pred = clusterer.predict(dtm_train)
# Maping clusters into labels
clusters_to_labels = link_group_to_label(train_label, train_cluster_pred, num_clusters)
return clusterer, clusters_to_labels
def pred_clustering_model(dtm_test, clusterer, clusters_to_labels):
try:
test_cluster_pred = clusterer.predict(dtm_test) # for sklearn clustering with L2
except Exception:
test_cluster_pred = [clusterer.classify(v) for v in dtm_test] # for nltk clustering with Cosine similiarity
predict = [clusters_to_labels[i] for i in test_cluster_pred]
return predict
################################################
## Topic modeling tools
################################################
def transform_lda_corpus(docs, vocabulary=None):
assert isinstance(docs, pd.Series)
idx_to_word = vocabulary
tokenized_docs = docs.apply(lambda x: list(tokenize(x))).to_list()
if idx_to_word is None:
idx_to_word = Dictionary(tokenized_docs)
sparse_corpus = [idx_to_word.doc2bow(doc) for doc in tokenized_docs]
return idx_to_word, sparse_corpus
def fit_topic_model(docs, num_topics=100, save_name='lda_gensim_model'):
'''
docs is the pd Series
output lda model and topic prediction on docs
'''
vocabulary, sparse_corpus = transform_lda_corpus(docs, vocabulary=None)
lda = LdaModel(sparse_corpus, num_topics=num_topics, minimum_probability=0.0001, dtype=np.float64)
if save_name is not None:
lda.save(save_name)
lda = LdaModel.load(save_name) # index 会变小吗
return lda, vocabulary
def pred_topic_model(lda, docs, vocabulary):
assert vocabulary is not None
_, sparse_corpus = transform_lda_corpus(docs, vocabulary=vocabulary)
pred = lda[sparse_corpus]
topic_distribution = lil_to_dataframe(pred, nrows=len(docs), ncols=lda.num_topics)
## checking for no topic
a = topic_distribution.sum(axis=1)
print(a[a == 0])
pred = topic_distribution.idxmax(axis=1, skipna=False)
return pred, topic_distribution
def lil_to_dataframe(pred, nrows, ncols):
'''sorted([(1, 0.4), (2,0.6) , (3, 0.3)], key=lambda x:-x[1])[0][0]'''
res = {}
for row, doc_topics in enumerate(pred):
res[row] = dict(doc_topics)
d1 = pd.DataFrame(index=range(nrows), columns=range(ncols))
d2 = pd.DataFrame.from_dict(res, orient='index')
# d3 = d1.combine_first(d2)
d3 = d1.combine_first(d2).fillna(0)
return d3
def visualize_LDA_model(docs, voc, lda):
_, sparse_corpus = transform_lda_corpus(docs, vocabulary=voc)
pyLDAvis.enable_notebook()
panel = pyLDAvis.gensim.prepare(lda, corpus=sparse_corpus, dictionary=voc, mds='tsne')
return panel
def load_gensim_LDA_model(save_name='lda_gensim_model'):
return LdaModel.load(save_name) # key 会少一个
|
the-stack_0_2746 | # Copyright 2017,2018,2019,2020,2021 Sony Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import numpy as np
import nnabla as nn
from nnabla.testing import assert_allclose
def test_manip():
v = nn.Variable([2, 3, 4])
assert v.shape == (2, 3, 4)
with pytest.raises(Exception):
v.reste_shape([1, 2])
v.reset_shape([1, 2], force=True)
assert v.shape == (1, 2)
@pytest.mark.parametrize("need_grad", [True, False])
def test_from_array(need_grad):
data = np.random.randint(0, 10, size=(2, 3, 4))
grad = np.random.randint(0, 10, size=(2, 3, 4))
v1 = nn.Variable.from_numpy_array(data, need_grad=need_grad)
assert np.all(v1.d == data)
assert v1.d.dtype == data.dtype
assert v1.need_grad == need_grad
v2 = nn.Variable.from_numpy_array(data, grad, need_grad)
assert np.all(v2.d == data)
assert v2.d.dtype == data.dtype
assert np.all(v2.g == grad)
assert v2.g.dtype == grad.dtype
assert v2.need_grad == need_grad
def test_data_grad_reference():
v = nn.Variable([2, 3, 4])
assert v.d.dtype == np.float32
assert v.g.dtype == np.float32
def test_dtype_conversion():
v = nn.Variable([2, 3, 4])
a = v.data.cast(np.int)
a[...] = 2
assert (v.data.dtype == np.int)
assert np.all(a == 2)
b = v.data.cast(np.float32)
assert b.dtype == np.float32
assert b is not a
assert np.all(b == 2)
b[...] = np.random.randn(*b.shape) * 10
c = v.data.cast(np.int32)
assert np.all(c == b.astype(np.int32))
def test_data_grad():
v = nn.Variable([2, 3, 4])
v.d[...] = np.random.randn(*v.shape)
assert v.d is not v.g
assert not np.all(v.d == v.g)
def test_get_unlinked_variable():
v = nn.Variable([2, 3, 4], need_grad=True)
grad = np.random.randn(*v.shape).astype(np.float32)
v.g = grad
v.d = np.random.randn(*v.shape)
import nnabla.functions as F
with nn.context_scope(nn.Context()), nn.auto_forward():
v2 = F.identity(v)
v2_u = v2.get_unlinked_variable()
assert v2_u.need_grad
v3 = F.identity(v2_u)
v2_u.grad.zero()
v2_g = v2_u.g.copy()
v3.backward(clear_buffer=False)
assert type(v2_u) == type(v2)
assert np.all(v.g == grad)
assert np.all(v2_u.g == v2.g)
assert np.all(v2_u.g == v2_g + 1)
# Check need_grad option
assert v2.get_unlinked_variable(need_grad=True).need_grad
assert not v2.get_unlinked_variable(need_grad=False).need_grad
def test_reshape():
v = nn.Variable([2, 3, 4], need_grad=True)
grad = np.random.randn(*v.shape).astype(np.float32)
v.g = grad
v.d = np.random.randn(*v.shape)
import nnabla.functions as F
with nn.context_scope(nn.Context()), nn.auto_forward():
v2 = F.identity(v)
v2_s = v2.reshape((3, 4, 2))
v3 = F.identity(v2_s)
v3.backward(clear_buffer=False)
assert np.all(v2_s.g.flat == v2.g.flat)
assert np.all(v2_s.g == 1)
v2.d = 1
assert np.all(v2_s.d == 1)
# Check unlink
v2_su = v2.reshape((3, 4, 2), unlink=True)
assert v2_su.need_grad
assert v2_su.parent is None
v2_su.need_grad = False
v2_su2 = v2_su.reshape((3, 4, 2), unlink=True)
assert not v2_su2.need_grad
assert v2_su2.parent is None
def test_persistent():
x = nn.Variable([2, 3, 4], need_grad=True)
x1 = x + 1
x2 = x1 + 1
x3 = x2 + 1
y = x3 + 1
x3.persistent = True
x.data.zero()
y.forward(clear_buffer=True)
assert_allclose(x3.d, 3)
y.forward(clear_no_need_grad=True)
y.backward(clear_buffer=True)
assert_allclose(x3.d, 3)
assert_allclose(x3.g, 1)
def test_name():
x = nn.Variable([2, 3])
x.name = "VariableName"
assert x.name == "VariableName"
def test_name_all_variables():
def net(h):
import nnabla.functions as F
import nnabla.parametric_functions as PF
h = PF.convolution(h, 3, (3, 3), name="conv1")
h = PF.batch_normalization(h, name="bn1")
h = F.relu(h)
h = F.max_pooling(h, (2, 2))
h = PF.convolution(h, 3, (3, 3), name="conv2")
h = PF.batch_normalization(h, name="bn2")
pred = F.relu(h)
return pred
class Namer(object):
def __init__(self, ):
self.counter = 0
def __call__(self, nnabla_func):
for v in nnabla_func.outputs:
v.name = "{}_output_{:05d}".format(
nnabla_func.name, self.counter)
self.counter += 1
class Confirmer(object):
def __init__(self, ):
self.counter = 0
def __call__(self, nnabla_func):
for v in nnabla_func.outputs:
assert v.name == "{}_output_{:05d}".format(
nnabla_func.name, self.counter)
self.counter += 1
x = nn.Variable([2, 3, 8, 8])
pred = net(x)
pred.visit(Namer())
pred.forward(clear_no_need_grad=True)
pred.backward(clear_buffer=True)
pred.visit(Confirmer())
def test_clear_all_graph_links():
import nnabla.functions as F
import nnabla.parametric_functions as PF
class OneStepRNN(object):
def __init__(self, batch_size=8, state_size=8):
self.lstm0 = PF.LSTMCell(batch_size, state_size, name="lsmt0")
self.lstm1 = PF.LSTMCell(batch_size, state_size, name="lsmt1")
self.affine = PF.affine
def __call__(self, x, n_class=10):
h = self.lstm0(x)
h = self.lstm1(h)
h = self.affine(h, n_class)
return h
T = 3
batch_size = 2
dims = 4
state_size = 8
one_step_rnn = OneStepRNN(batch_size, state_size)
# Forward: unroll over time
loss = 0
for t in range(T):
x = nn.Variable.from_numpy_array(
np.random.randn(batch_size, dims))
y = nn.Variable.from_numpy_array(
np.random.choice(np.arange(10), batch_size, replace=True)).reshape((batch_size, 1))
pred = one_step_rnn(x)
l = F.mean(F.softmax_cross_entropy(pred, y))
loss += l
loss /= T
# Backward then truncate
loss.backward()
loss.clear_all_graph_links()
assert one_step_rnn.lstm0.h.parent == None
assert one_step_rnn.lstm0.c.parent == None
assert one_step_rnn.lstm1.h.parent == None
assert one_step_rnn.lstm1.c.parent == None
def test_function_references():
import nnabla as nn
import nnabla.parametric_functions as PF
v = nn.Variable.from_numpy_array(np.random.randn(2, 4))
assert len(v.function_references) == 0
h1 = PF.affine(v, 10, name="affine1")
assert len(v.function_references) == 1
assert h1.parent in v.function_references
h2 = PF.affine(v, 10, name="affine2")
assert len(v.function_references) == 2
assert h1.parent in v.function_references
assert h2.parent in v.function_references
del h1
assert len(v.function_references) == 1
assert h2.parent in v.function_references
del h2
assert len(v.function_references) == 0
@pytest.mark.parametrize("f", [lambda x: x, hash])
def test_variable_equality_and_hash(f):
shape = (2, 3, 4)
x = nn.Variable(shape)
assert f(x) == f(x)
y = nn.Variable(shape)
assert f(x) != f(y)
y = x.get_unlinked_variable()
assert f(x) == f(y)
y.need_grad = True
assert f(x) == f(y)
def test_variable_set():
# Testing hash and equality operator via set
shape = (2, 3, 4)
x = nn.Variable(shape)
s = set()
s.add(x)
assert x in s
y = nn.Variable(shape)
assert y not in s
y = x.get_unlinked_variable()
assert y in s
y.need_grad = True
assert y in s
def test_prohibit_clear_data():
import nnabla.functions as F
nn.prefer_cached_array(False)
shape = (2, 3, 4)
var_np = np.random.rand(*shape)
# the case of root variable
x1 = nn.Variable.from_numpy_array(var_np)
y1 = F.reshape(x1, (-1,), inplace=True)
y1 = F.reshape(y1, shape, inplace=True) * 2
x2 = nn.Variable.from_numpy_array(var_np)
y2 = F.reshape(x2, (-1,), inplace=False)
y2 = F.reshape(y2, shape, inplace=False) * 2
nn.forward_all([y1, y2], clear_buffer=True)
assert_allclose(x1.d, x2.d)
assert_allclose(y1.d, y2.d)
# the case of persistent variable
x1 = nn.Variable.from_numpy_array(var_np)
p_y1 = F.mul_scalar(x1, 2).apply(persistent=True)
y1 = F.reshape(p_y1, (-1,), inplace=True)
y1 = F.reshape(y1, shape, inplace=True) * 2
x2 = nn.Variable.from_numpy_array(var_np)
p_y2 = F.mul_scalar(x2, 2).apply(persistent=True)
y2 = F.reshape(p_y2, (-1,), inplace=False)
y2 = F.reshape(y2, shape, inplace=False) * 2
nn.forward_all([y1, y2], clear_buffer=True)
assert_allclose(p_y1.d, p_y2.d)
assert_allclose(y1.d, y2.d)
# the case of rewire_on root variable
# graph A: x11 -> f_inplace -> y11
x11 = nn.Variable.from_numpy_array(var_np)
y11 = F.reshape(x11, (-1,), inplace=True)
# graph B: x12 -> f_inplace -> mul_scalar -> y12
x12 = nn.Variable(shape=y11.shape)
y12 = F.reshape(x12, shape, inplace=True) * 2
# graph A->B: x11 -> f_inplace -> f_inplace -> mul_scalar -> y12
x12.rewire_on(y11)
x2 = nn.Variable.from_numpy_array(var_np)
y2 = F.reshape(x2, (-1,), inplace=False)
y2 = F.reshape(y2, shape, inplace=False) * 2
nn.forward_all([y12, y2], clear_buffer=True)
assert_allclose(x11.d, x2.d)
assert_allclose(y12.d, y2.d)
# the case of rewire_on persistent variable
# graph A: x11 -> mul_scalar -> p_x11 -> f_inplace -> y11
x11 = nn.Variable.from_numpy_array(var_np)
p_x11 = F.mul_scalar(x11, 2).apply(persistent=True)
y11 = F.reshape(p_x11, (-1,), inplace=True)
# graph B: x12 -> f_inplace -> mul_scalar -> y12
x12 = nn.Variable(shape=y11.shape)
y12 = F.reshape(x12, shape, inplace=True) * 2
# graph A->B: ... -> p_x11 -> f_inplace -> f_inplace -> mul_scalar -> y12
x12.rewire_on(y11)
x2 = nn.Variable.from_numpy_array(var_np)
p_x2 = F.mul_scalar(x2, 2).apply(persistent=True)
y2 = F.reshape(p_x2, (-1,), inplace=False)
y2 = F.reshape(y2, shape, inplace=False) * 2
nn.forward_all([y12, y2], clear_buffer=True)
assert_allclose(p_x11.d, p_x2.d)
assert_allclose(y12.d, y2.d)
def test_leaf_indexing_access():
import nnabla.functions as F
nn.set_auto_forward(False)
shape_x = (3, 2)
dx = np.random.rand(*shape_x)
shape_y = (2, 2)
dy = np.random.rand(*shape_y)
x = nn.Variable.from_numpy_array(dx)
y = nn.Variable.from_numpy_array(dy)
x[0:2, :] = y
z = F.identity(x)
z.forward()
d1 = x.d.copy()
nn.set_auto_forward(True)
x = nn.Variable.from_numpy_array(dx)
y = nn.Variable.from_numpy_array(dy)
x[0:2, :] = y
z2 = F.identity(x)
d2 = x.d.copy()
nn.set_auto_forward(False)
x = nn.Variable.from_numpy_array(dx)
y = nn.Variable.from_numpy_array(dy)
x[0:2, :] = y
z3 = F.identity(x)
z3.forward()
d3 = x.d.copy()
d4 = z3.d.copy()
assert_allclose(d1, d2)
assert_allclose(d2, d3)
assert_allclose(d3, d4)
|
the-stack_0_2748 | """"Groups UI URLs
Copyright 2015 Archive Analytics Solutions
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from django.conf.urls import url
urlpatterns = [
url(r'^$', 'groups.views.home', name='home'),
url(r'^new/group', 'groups.views.new_group', name='new_group'),
url(r'^delete/group/(?P<name>.*)$', 'groups.views.delete_group', name='delete_group'),
url(r'^edit/group/(?P<name>.*)$', 'groups.views.edit_group', name='edit_group'),
url(r'^rm/(?P<name>.*)/(?P<uname>.*)$', 'groups.views.rm_user', name='rm_user'),
url(r'^add/(?P<name>.*)$', 'groups.views.add_user', name='add_user'),
url(r'^(?P<name>.*)$', 'groups.views.group_view', name='view'),
]
|
the-stack_0_2749 | import warnings
from collections import Counter
from encodings.aliases import aliases
from hashlib import sha256
from json import dumps
from re import compile as re_compile, sub
from typing import Any, Dict, Iterator, List, Optional, Set, Tuple, Union
from .constant import TOO_BIG_SEQUENCE
from .md import mess_ratio
from .utils import iana_name, is_multi_byte_encoding, unicode_range
class CharsetMatch:
def __init__(
self,
payload: bytes,
guessed_encoding: str,
mean_mess_ratio: float,
has_sig_or_bom: bool,
languages: "CoherenceMatches",
decoded_payload: Optional[str] = None,
):
self._payload = payload # type: bytes
self._encoding = guessed_encoding # type: str
self._mean_mess_ratio = mean_mess_ratio # type: float
self._languages = languages # type: CoherenceMatches
self._has_sig_or_bom = has_sig_or_bom # type: bool
self._unicode_ranges = None # type: Optional[List[str]]
self._leaves = [] # type: List[CharsetMatch]
self._mean_coherence_ratio = 0.0 # type: float
self._output_payload = None # type: Optional[bytes]
self._output_encoding = None # type: Optional[str]
self._string = decoded_payload # type: Optional[str]
def __eq__(self, other: object) -> bool:
if not isinstance(other, CharsetMatch):
raise TypeError(
"__eq__ cannot be invoked on {} and {}.".format(
str(other.__class__), str(self.__class__)
)
)
return self.encoding == other.encoding and self.fingerprint == other.fingerprint
def __lt__(self, other: object) -> bool:
"""
Implemented to make sorted available upon CharsetMatches items.
"""
if not isinstance(other, CharsetMatch):
raise ValueError
chaos_difference = abs(self.chaos - other.chaos) # type: float
# Bellow 1% difference --> Use Coherence
if chaos_difference < 0.01:
return self.coherence > other.coherence
return self.chaos < other.chaos
@property
def chaos_secondary_pass(self) -> float:
"""
Check once again chaos in decoded text, except this time, with full content.
Use with caution, this can be very slow.
Notice: Will be removed in 3.0
"""
warnings.warn(
"chaos_secondary_pass is deprecated and will be removed in 3.0",
DeprecationWarning,
)
return mess_ratio(str(self), 1.0)
@property
def coherence_non_latin(self) -> float:
"""
Coherence ratio on the first non-latin language detected if ANY.
Notice: Will be removed in 3.0
"""
warnings.warn(
"coherence_non_latin is deprecated and will be removed in 3.0",
DeprecationWarning,
)
return 0.0
@property
def w_counter(self) -> Counter:
"""
Word counter instance on decoded text.
Notice: Will be removed in 3.0
"""
warnings.warn(
"w_counter is deprecated and will be removed in 3.0", DeprecationWarning
)
not_printable_pattern = re_compile(r"[0-9\W\n\r\t]+")
string_printable_only = sub(not_printable_pattern, " ", str(self).lower())
return Counter(string_printable_only.split())
def __str__(self) -> str:
# Lazy Str Loading
if self._string is None:
self._string = str(self._payload, self._encoding, "strict")
return self._string
def __repr__(self) -> str:
return "<CharsetMatch '{}' bytes({})>".format(self.encoding, self.fingerprint)
def add_submatch(self, other: "CharsetMatch") -> None:
if not isinstance(other, CharsetMatch) or other == self:
raise ValueError(
"Unable to add instance <{}> as a submatch of a CharsetMatch".format(
other.__class__
)
)
other._string = None # Unload RAM usage; dirty trick.
self._leaves.append(other)
@property
def encoding(self) -> str:
return self._encoding
@property
def encoding_aliases(self) -> List[str]:
"""
Encoding name are known by many name, using this could help when searching for IBM855 when it's listed as CP855.
"""
also_known_as = [] # type: List[str]
for u, p in aliases.items():
if self.encoding == u:
also_known_as.append(p)
elif self.encoding == p:
also_known_as.append(u)
return also_known_as
@property
def bom(self) -> bool:
return self._has_sig_or_bom
@property
def byte_order_mark(self) -> bool:
return self._has_sig_or_bom
@property
def languages(self) -> List[str]:
"""
Return the complete list of possible languages found in decoded sequence.
Usually not really useful. Returned list may be empty even if 'language' property return something != 'Unknown'.
"""
return [e[0] for e in self._languages]
@property
def language(self) -> str:
"""
Most probable language found in decoded sequence. If none were detected or inferred, the property will return
"Unknown".
"""
if not self._languages:
# Trying to infer the language based on the given encoding
# Its either English or we should not pronounce ourselves in certain cases.
if "ascii" in self.could_be_from_charset:
return "English"
# doing it there to avoid circular import
from charset_normalizer.cd import encoding_languages, mb_encoding_languages
languages = (
mb_encoding_languages(self.encoding)
if is_multi_byte_encoding(self.encoding)
else encoding_languages(self.encoding)
)
if len(languages) == 0 or "Latin Based" in languages:
return "Unknown"
return languages[0]
return self._languages[0][0]
@property
def chaos(self) -> float:
return self._mean_mess_ratio
@property
def coherence(self) -> float:
if not self._languages:
return 0.0
return self._languages[0][1]
@property
def percent_chaos(self) -> float:
return round(self.chaos * 100, ndigits=3)
@property
def percent_coherence(self) -> float:
return round(self.coherence * 100, ndigits=3)
@property
def raw(self) -> bytes:
"""
Original untouched bytes.
"""
return self._payload
@property
def submatch(self) -> List["CharsetMatch"]:
return self._leaves
@property
def has_submatch(self) -> bool:
return len(self._leaves) > 0
@property
def alphabets(self) -> List[str]:
if self._unicode_ranges is not None:
return self._unicode_ranges
detected_ranges = set() # type: Set[str]
for character in str(self):
detected_range = unicode_range(character) # type: Optional[str]
if detected_range:
detected_ranges.add(detected_range)
self._unicode_ranges = sorted(list(detected_ranges))
return self._unicode_ranges
@property
def could_be_from_charset(self) -> List[str]:
"""
The complete list of encoding that output the exact SAME str result and therefore could be the originating
encoding.
This list does include the encoding available in property 'encoding'.
"""
return [self._encoding] + [m.encoding for m in self._leaves]
def first(self) -> "CharsetMatch":
"""
Kept for BC reasons. Will be removed in 3.0.
"""
return self
def best(self) -> "CharsetMatch":
"""
Kept for BC reasons. Will be removed in 3.0.
"""
return self
def output(self, encoding: str = "utf_8") -> bytes:
"""
Method to get re-encoded bytes payload using given target encoding. Default to UTF-8.
Any errors will be simply ignored by the encoder NOT replaced.
"""
if self._output_encoding is None or self._output_encoding != encoding:
self._output_encoding = encoding
self._output_payload = str(self).encode(encoding, "replace")
return self._output_payload # type: ignore
@property
def fingerprint(self) -> str:
"""
Retrieve the unique SHA256 computed using the transformed (re-encoded) payload. Not the original one.
"""
return sha256(self.output()).hexdigest()
class CharsetMatches:
"""
Container with every CharsetMatch items ordered by default from most probable to the less one.
Act like a list(iterable) but does not implements all related methods.
"""
def __init__(self, results: List[CharsetMatch] = None):
self._results = sorted(results) if results else [] # type: List[CharsetMatch]
def __iter__(self) -> Iterator[CharsetMatch]:
for result in self._results:
yield result
def __getitem__(self, item: Union[int, str]) -> CharsetMatch:
"""
Retrieve a single item either by its position or encoding name (alias may be used here).
Raise KeyError upon invalid index or encoding not present in results.
"""
if isinstance(item, int):
return self._results[item]
if isinstance(item, str):
item = iana_name(item, False)
for result in self._results:
if item in result.could_be_from_charset:
return result
raise KeyError
def __len__(self) -> int:
return len(self._results)
def append(self, item: CharsetMatch) -> None:
"""
Insert a single match. Will be inserted accordingly to preserve sort.
Can be inserted as a submatch.
"""
if not isinstance(item, CharsetMatch):
raise ValueError(
"Cannot append instance '{}' to CharsetMatches".format(
str(item.__class__)
)
)
# We should disable the submatch factoring when the input file is too heavy (conserve RAM usage)
if len(item.raw) <= TOO_BIG_SEQUENCE:
for match in self._results:
if match.fingerprint == item.fingerprint and match.chaos == item.chaos:
match.add_submatch(item)
return
self._results.append(item)
self._results = sorted(self._results)
def best(self) -> Optional["CharsetMatch"]:
"""
Simply return the first match. Strict equivalent to matches[0].
"""
if not self._results:
return None
return self._results[0]
def first(self) -> Optional["CharsetMatch"]:
"""
Redundant method, call the method best(). Kept for BC reasons.
"""
return self.best()
CoherenceMatch = Tuple[str, float]
CoherenceMatches = List[CoherenceMatch]
class CliDetectionResult:
def __init__(
self,
path: str,
encoding: Optional[str],
encoding_aliases: List[str],
alternative_encodings: List[str],
language: str,
alphabets: List[str],
has_sig_or_bom: bool,
chaos: float,
coherence: float,
unicode_path: Optional[str],
is_preferred: bool,
):
self.path = path # type: str
self.unicode_path = unicode_path # type: Optional[str]
self.encoding = encoding # type: Optional[str]
self.encoding_aliases = encoding_aliases # type: List[str]
self.alternative_encodings = alternative_encodings # type: List[str]
self.language = language # type: str
self.alphabets = alphabets # type: List[str]
self.has_sig_or_bom = has_sig_or_bom # type: bool
self.chaos = chaos # type: float
self.coherence = coherence # type: float
self.is_preferred = is_preferred # type: bool
@property
def __dict__(self) -> Dict[str, Any]: # type: ignore
return {
"path": self.path,
"encoding": self.encoding,
"encoding_aliases": self.encoding_aliases,
"alternative_encodings": self.alternative_encodings,
"language": self.language,
"alphabets": self.alphabets,
"has_sig_or_bom": self.has_sig_or_bom,
"chaos": self.chaos,
"coherence": self.coherence,
"unicode_path": self.unicode_path,
"is_preferred": self.is_preferred,
}
def to_json(self) -> str:
return dumps(self.__dict__, ensure_ascii=True, indent=4)
CharsetNormalizerMatch = CharsetMatch
|
the-stack_0_2750 | import ast
from dotmap import DotMap
from typing import Union, List
from .utils import visualize_1D_lcurves
class MetaLog(object):
meta_vars: List[str]
stats_vars: List[str]
time_vars: List[str]
num_configs: int
def __init__(self, meta_log: DotMap, non_aggregated: bool = False):
"""Class wrapper for meta_log dictionary w. additional functionality.
Args:
meta_log (DotMap): Raw reloaded meta-log dotmap dictionary.
non_aggregated (bool, optional):
Whether the meta-log has previously been aggregated across
seeds. Defaults to False.
"""
self.meta_log = meta_log
# Return shallow log if there is only a single experiment stored
self.num_configs = len(list(meta_log.keys()))
ph_run = list(meta_log.keys())[0]
# Extract different variable names from meta log
if not non_aggregated:
self.meta_vars = list(meta_log[ph_run].meta.keys())
self.stats_vars = list(meta_log[ph_run].stats.keys())
self.time_vars = list(meta_log[ph_run].time.keys())
else:
ph_seed = list(meta_log[ph_run].keys())[0]
self.meta_vars = list(meta_log[ph_run][ph_seed].meta.keys())
self.stats_vars = list(meta_log[ph_run][ph_seed].stats.keys())
self.time_vars = list(meta_log[ph_run][ph_seed].time.keys())
# Decode all byte strings in meta data
for run_id in self.meta_log.keys():
if "meta" in self.meta_log[run_id].keys():
self.meta_log[run_id] = decode_meta_strings(
self.meta_log[run_id]
)
else:
for seed_id in self.meta_log[run_id].keys():
self.meta_log[run_id][seed_id] = decode_meta_strings(
self.meta_log[run_id][seed_id]
)
# Make log shallow if there is only a single experiment stored
if self.num_configs == 1:
self.meta_log = self.meta_log[ph_run]
# Make possible that all runs are accessible via attribute as in pd
for key in self.meta_log:
setattr(self, key, self.meta_log[key])
def filter(self, run_ids: List[str]):
"""Subselect the meta log dict based on a list of run ids."""
sub_dict = subselect_meta_log(self.meta_log, run_ids)
return MetaLog(sub_dict)
def plot(
self,
target_to_plot: str,
iter_to_plot: Union[str, None] = None,
smooth_window: int = 1,
plot_title: Union[str, None] = None,
xy_labels: Union[list, None] = None,
base_label: str = "{}",
run_ids: Union[list, None] = None,
curve_labels: list = [],
every_nth_tick: Union[int, None] = None,
plot_std_bar: bool = False,
fname: Union[None, str] = None,
num_legend_cols: Union[int, None] = 1,
fig=None,
ax=None,
figsize: tuple = (9, 6),
plot_labels: bool = True,
legend_title: Union[None, str] = None,
ax_lims: Union[None, list] = None,
):
"""Plot all runs in meta-log for variable 'target_to_plot'."""
if iter_to_plot is None:
iter_to_plot = self.time_vars[0]
assert iter_to_plot in self.time_vars
if run_ids is None:
run_ids = self.eval_ids
fig, ax = visualize_1D_lcurves(
self.meta_log,
iter_to_plot,
target_to_plot,
smooth_window=smooth_window,
every_nth_tick=every_nth_tick,
num_legend_cols=num_legend_cols,
run_ids=run_ids,
plot_title=plot_title,
xy_labels=xy_labels,
base_label=base_label,
curve_labels=curve_labels,
plot_std_bar=plot_std_bar,
fig=fig,
ax=ax,
figsize=figsize,
plot_labels=plot_labels,
legend_title=legend_title,
ax_lims=ax_lims,
)
# Save the figure if a filename was provided
if fname is not None:
fig.savefig(fname, dpi=300)
else:
return fig, ax
@property
def eval_ids(self) -> Union[int, None]:
"""Get ids of runs stored in meta_log instance."""
if self.num_configs > 1:
return list(self.meta_log.keys())
else:
print("Only single aggregated configuration or random seed loaded.")
def __len__(self) -> int:
"""Return number of runs stored in meta_log."""
return len(self.eval_ids)
def __getitem__(self, item):
"""Get run log via string subscription."""
return self.meta_log[item]
def subselect_meta_log(meta_log: DotMap, run_ids: List[str]) -> DotMap:
"""Subselect the meta log dict based on a list of run ids."""
sub_log = DotMap()
for run_id in run_ids:
sub_log[run_id] = meta_log[run_id]
return sub_log
def decode_meta_strings(log: DotMap):
"""Decode all bytes encoded strings."""
for k in log.meta.keys():
temp_list = []
if type(log.meta[k]) != str:
list_to_loop = (
log.meta[k].tolist()
if type(log.meta[k]) != list
else log.meta[k]
)
if type(list_to_loop) in [str, bytes]:
list_to_loop = [list_to_loop]
for i in list_to_loop:
if type(i) == bytes:
if len(i) > 0:
temp_list.append(i.decode())
else:
temp_list.append(i)
else:
temp_list.append(log.meta[k])
if len(temp_list) == 1:
if k == "config_dict":
# Convert config into dict
config_dict = ast.literal_eval(temp_list[0])
log.meta[k] = config_dict
else:
log.meta[k] = temp_list[0]
else:
log.meta[k] = temp_list
return log
|
the-stack_0_2752 | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import logging
import os
import sys
from typing import Dict, List, Optional, Tuple
import numpy as np
from dataclasses import dataclass, field
from fairseq.data import Dictionary, HubertDataset
from fairseq.dataclass.configs import FairseqDataclass
from fairseq.tasks import register_task
from fairseq.tasks.fairseq_task import FairseqTask
from omegaconf import MISSING
logger = logging.getLogger(__name__)
class LabelEncoder(object):
def __init__(self, dictionary: Dictionary) -> None:
self.dictionary = dictionary
def __call__(self, label: str) -> List[str]:
return self.dictionary.encode_line(
label,
append_eos=False,
add_if_not_exist=False,
)
@dataclass
class HubertPretrainingConfig(FairseqDataclass):
data: str = field(default=MISSING, metadata={"help": "path to data directory"})
fine_tuning: bool = field(
default=False, metadata={"help": "set to true if fine-tuning Hubert"}
)
labels: List[str] = field(
default_factory=lambda: ["ltr"],
metadata={
"help": (
"extension of the label files to load, frame-level labels for"
" pre-training, and sequence-level label for fine-tuning"
)
},
)
label_dir: Optional[str] = field(
default=None,
metadata={
"help": "if set, looks for labels in this directory instead",
},
)
label_rate: float = field(
default=-1.0,
metadata={"help": "label frame rate. -1.0 for sequence label"},
)
sample_rate: int = field(
default=16_000,
metadata={
"help": "target sample rate. audio files will be up/down "
"sampled to this rate"
},
)
normalize: bool = field(
default=False,
metadata={"help": "if set, normalizes input to have 0 mean and unit variance"},
)
enable_padding: bool = field(
default=False,
metadata={"help": "pad shorter samples instead of cropping"},
)
max_keep_size: Optional[int] = field(
default=None,
metadata={"help": "exclude sample longer than this"},
)
max_sample_size: Optional[int] = field(
default=None,
metadata={"help": "max sample size to crop to for batching"},
)
min_sample_size: Optional[int] = field(
default=None,
metadata={"help": "min sample size to crop to for batching"},
)
single_target: Optional[bool] = field(
default=False,
metadata={
"help": "if set, AddTargetDatasets outputs same keys " "as AddTargetDataset"
},
)
random_crop: Optional[bool] = field(
default=True,
metadata={"help": "always crop from the beginning if false"},
)
pad_audio: Optional[bool] = field(
default=False,
metadata={"help": "pad audio to the longest one in the batch if true"},
)
@register_task("hubert_pretraining", dataclass=HubertPretrainingConfig)
class HubertPretrainingTask(FairseqTask):
cfg: HubertPretrainingConfig
def __init__(
self,
cfg: HubertPretrainingConfig,
) -> None:
super().__init__(cfg)
logger.info(f"current directory is {os.getcwd()}")
logger.info(f"HubertPretrainingTask Config {cfg}")
self.cfg = cfg
self.fine_tuning = cfg.fine_tuning
if cfg.fine_tuning:
self.state.add_factory("target_dictionary", self.load_dictionaries)
else:
self.state.add_factory("dictionaries", self.load_dictionaries)
self.blank_symbol = "<s>"
@property
def source_dictionary(self) -> Optional[Dictionary]:
return None
@property
def target_dictionary(self) -> Optional[Dictionary]:
return self.state.target_dictionary
@property
def dictionaries(self) -> List[Dictionary]:
return self.state.dictionaries
@classmethod
def setup_task(
cls, cfg: HubertPretrainingConfig, **kwargs
) -> "HubertPretrainingTask":
return cls(cfg)
def load_dictionaries(self):
label_dir = self.cfg.data if self.cfg.label_dir is None else self.cfg.label_dir
dictionaries = [
Dictionary.load(f"{label_dir}/dict.{label}.txt")
for label in self.cfg.labels
]
return dictionaries[0] if self.cfg.fine_tuning else dictionaries
def get_label_dir(self) -> str:
if self.cfg.label_dir is None:
return self.cfg.data
return self.cfg.label_dir
def load_dataset(self, split: str, **kwargs) -> None:
manifest = f"{self.cfg.data}/{split}.tsv"
dicts = [self.target_dictionary] if self.cfg.fine_tuning else self.dictionaries
pad_list = [dict.pad() for dict in dicts]
eos_list = [dict.eos() for dict in dicts]
procs = [LabelEncoder(dict) for dict in dicts]
paths = [f"{self.get_label_dir()}/{split}.{l}" for l in self.cfg.labels]
# hubert v1: pad_audio=True, random_crop=False;
self.datasets[split] = HubertDataset(
manifest,
sample_rate=self.cfg.sample_rate,
label_paths=paths,
label_rates=self.cfg.label_rate,
pad_list=pad_list,
eos_list=eos_list,
label_processors=procs,
max_keep_sample_size=self.cfg.max_keep_size,
min_keep_sample_size=self.cfg.min_sample_size,
max_sample_size=self.cfg.max_sample_size,
pad_audio=self.cfg.pad_audio,
normalize=self.cfg.normalize,
store_labels=False,
random_crop=self.cfg.random_crop,
single_target=self.cfg.single_target,
)
def max_positions(self) -> Tuple[int, int]:
return (sys.maxsize, sys.maxsize)
def filter_indices_by_size(self, indices: np.array, *args, **kwargs) -> np.array:
return indices
|
the-stack_0_2753 | # Copyright 2014 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Class representing a Cloudstack instance. This module uses the csapi
library which calls the cloudstack API. For more information refer to
the Cloudstack documentation at https://github.com/syed/PerfKitBenchmarker.git
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
from perfkitbenchmarker import flags
from perfkitbenchmarker import linux_virtual_machine as linux_vm
from perfkitbenchmarker import virtual_machine
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.providers.cloudstack import cloudstack_disk
from perfkitbenchmarker.providers.cloudstack import cloudstack_network
from perfkitbenchmarker.providers.cloudstack import util
from perfkitbenchmarker import providers
from six.moves import range
FLAGS = flags.FLAGS
class CloudStackVirtualMachine(virtual_machine.BaseVirtualMachine):
"""Object representing a CloudStack Virtual Machine."""
CLOUD = providers.CLOUDSTACK
DEFAULT_ZONE = 'QC-1'
DEFAULT_MACHINE_TYPE = '1vCPU.1GB'
DEFAULT_IMAGE = None
DEFAULT_USER_NAME = 'cca-user'
DEFAULT_PROJECT = 'cloudops-Engineering'
def __init__(self, vm_spec):
"""Initialize a CloudStack virtual machine.
Args:
vm_spec: virtual_machine.BaseVirtualMachineSpec object of the vm.
"""
super(CloudStackVirtualMachine, self).__init__(vm_spec)
self.network = cloudstack_network.CloudStackNetwork.GetNetwork(self)
self.cs = util.CsClient(FLAGS.CS_API_URL,
FLAGS.CS_API_KEY,
FLAGS.CS_API_SECRET)
self.project_id = None
if FLAGS.project:
project = self.cs.get_project(FLAGS.project)
assert project, "Project not found"
self.project_id = project['id']
zone = self.cs.get_zone(self.zone)
assert zone, "Zone not found"
self.zone_id = zone['id']
self.user_name = self.DEFAULT_USER_NAME
self.image = self.image or self.DEFAULT_IMAGE
self.disk_counter = 0
@vm_util.Retry(max_retries=3)
def _CreateDependencies(self):
"""Create VM dependencies."""
# Create an ssh keypair
with open(self.ssh_public_key) as keyfd:
self.ssh_keypair_name = 'perfkit-sshkey-%s' % FLAGS.run_uri
pub_key = keyfd.read()
if not self.cs.get_ssh_keypair(self.ssh_keypair_name, self.project_id):
res = self.cs.register_ssh_keypair(self.ssh_keypair_name,
pub_key,
self.project_id)
assert res, "Unable to create ssh keypair"
# Allocate a public ip
network_id = self.network.id
if self.network.is_vpc:
network_id = self.network.vpc_id
public_ip = self.cs.alloc_public_ip(network_id, self.network.is_vpc)
if public_ip:
self.ip_address = public_ip['ipaddress']
self.ip_address_id = public_ip['id']
else:
logging.warn("Unable to allocate public IP")
def _DeleteDependencies(self):
"""Delete VM dependencies."""
# Remove the keypair
if self.cs.get_ssh_keypair(self.ssh_keypair_name, self.project_id):
self.cs.unregister_ssh_keypair(self.ssh_keypair_name, self.project_id)
# Remove the IP
if self.ip_address_id:
self.cs.release_public_ip(self.ip_address_id)
@vm_util.Retry(max_retries=3)
def _Create(self):
"""Create a Cloudstack VM instance."""
service_offering = self.cs.get_serviceoffering(self.machine_type)
assert service_offering, "No service offering found"
template = self.cs.get_template(self.image, self.project_id)
assert template, "No template found"
network_id = self.network.id
vm = None
vm = self.cs.create_vm(self.name,
self.zone_id,
service_offering['id'],
template['id'],
[network_id],
self.ssh_keypair_name,
self.project_id)
assert vm, "Unable to create VM"
self._vm = vm
self.id = vm['virtualmachine']['id']
@vm_util.Retry(max_retries=3)
def _PostCreate(self):
"""Get the instance's data."""
# assosiate the public ip created with the VMid
network_interface = self._vm['virtualmachine']['nic'][0]
self.internal_ip = network_interface['ipaddress']
# Create a Static NAT rule
if not self.cs.snat_rule_exists(self.ip_address_id, self.id):
snat_rule = self.cs.enable_static_nat(self.ip_address_id,
self.id,
self.network.id)
assert snat_rule, "Unable to create static NAT"
def _Delete(self):
"""Delete the VM instance."""
# Delete the VM
self.cs.delete_vm(self.id)
def _Exists(self):
"""Returns true if the VM exists."""
# Check if VM exisits
vm = self.cs.get_virtual_machine(self.name, self.project_id)
if vm and 'id' in vm:
return True
return False
def CreateScratchDisk(self, disk_spec):
"""Create a VM's scratch disk.
Args:
disk_spec: virtual_machine.BaseDiskSpec object of the disk.
"""
# Cloudstack doesn't really have a concept of local or remote disks A VM
# starts with one disk and all other volumes have to be attached via the
# API
self.disks = []
for i in range(disk_spec.num_striped_disks):
name = 'disk-%s-%d-%d' % (self.name, i + 1, self.disk_counter)
scratch_disk = cloudstack_disk.CloudStackDisk(disk_spec,
name,
self.zone_id,
self.project_id)
self.disks.append(scratch_disk)
self.disk_counter += 1
self._CreateScratchDiskFromDisks(disk_spec, self.disks)
class CentOs7BasedCloudStackVirtualMachine(CloudStackVirtualMachine,
linux_vm.CentOs7Mixin):
DEFAULT_IMAGE = 'CentOS 7 HVM base (64bit)'
|
the-stack_0_2754 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup
# with open('README.rst') as readme_file:
# readme = readme_file.read()
# with open('HISTORY.rst') as history_file:
# history = history_file.read()
requirements = [
'face_recognition_models>=0.3.0',
'Click>=6.0',
'dlib>=19.7',
'numpy',
'Pillow'
]
test_requirements = [
'tox',
'flake8'
]
setup(
name='face_recognition',
version='1.4.0',
description="Recognize faces from Python or from the command line",
# long_description=readme + '\n\n' + history,
author="Adam Geitgey",
author_email='[email protected]',
url='https://github.com/ageitgey/face_recognition',
packages=[
'face_recognition',
],
package_dir={'face_recognition': 'face_recognition'},
package_data={
'face_recognition': ['models/*.dat']
},
entry_points={
'console_scripts': [
'face_recognition=face_recognition.face_recognition_cli:main',
'face_detection=face_recognition.face_detection_cli:main'
]
},
install_requires=requirements,
license="MIT license",
zip_safe=False,
keywords='face_recognition',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
test_suite='tests',
tests_require=test_requirements
) |
the-stack_0_2755 | import copy
from engine.global_config import *
from engine.update_client import Update_Client
from engine.handler.input_handler import Input_Handler
from engine.status_check import Status_Check
from websocket_server.wswrap import WsWrap
from engine.character import Character
from engine.lex import Lex
from engine.inventory import inv
from pprint import pprint
###### Player Class ######
class Player(Character):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.entity_type = kwargs['entity_type']
self.core_attributes = kwargs['core_attributes']
self.player_state = kwargs['player_state']
self.stow_loc = kwargs['stow_loc']
self.client = kwargs['client']
self.unique_id = kwargs['unique_id']
def display_inventory(self):
inv = []
for i in self.inventory:
if self.inventory[i]['contents'] == None:
pass
else:
inv.append("{} {} {} {}".format(self.inventory[i]['contents'].name,
self.inventory[i]['worn'],
"your",
self.inventory[i]['name']))
if inv == []:
WsWrap.ws_send(self.client, {'type': 'text', 'spacing': 1}, "You have nothing.")
else:
WsWrap.ws_send(self.client, {'type': 'text', 'spacing': 1}, "You have {}.".format(", ".join(Lex.converted_contents(inv))))
# WsWrap.ws_send(self.client, {'type': 'text', 'spacing': 1}, rooms[room_num].name)
def echo(self):
if self.conditions['echo'] == True:
self.conditions['echo'] = False
WsWrap.ws_send(self.client, {'type': 'text', 'spacing': 1}, "Echo is now |alert| disabled. |alertx|")
else:
self.conditions['echo'] = True
WsWrap.ws_send(self.client, {'type': 'text', 'spacing': 1}, "Echo is now |success| enabled. |successx|")
def help(self, user_input, input_kwargs):
if len(user_input) < 2:
WsWrap.ws_send(self.client, {'type': 'text', 'spacing': 1}, "|alert| Syntax: |alertx| HELP ON or HELP OFF.")
else:
if user_input[1] == "on":
WsWrap.ws_send(self.client, {'type': 'text', 'spacing': 1}, 'Help is |success| ON.|successx|')
self.conditions['help'] = "enabled"
elif user_input[1] == "off":
WsWrap.ws_send(self.client, {'type': 'text', 'spacing': 1}, 'Help is |alert| OFF|alertx|. ')
self.conditions['help'] = "disabled"
else:
WsWrap.ws_send(self.client, {'type': 'text', 'spacing': 1}, "|alert| Syntax: |alertx| HELP ON or HELP OFF.")
def stow_set(self, user_input, input_kwargs):
stow_item = False
if len(user_input) == 1 or len(user_input) > 3:
WsWrap.ws_send(self.client, {'type': 'text', 'spacing': 1}, "Syntax: STOW SET (container) or STOW (ITEM)")
elif user_input[1] == "set":
input_kwargs['target'] = Input_Handler.target_self_inventory(self, user_input[2], input_kwargs)
self.stow_loc = input_kwargs['target']
if self.stow_loc == None:
WsWrap.ws_send(self.client, {'type': 'text', 'spacing': 1}, "Make sure you are wearing the container.")
else:
WsWrap.ws_send(self.client, {'type': 'text', 'spacing': 1}, "Ok.")
elif user_input[1] != "set":
if self.stow_loc == None:
WsWrap.ws_send(self.client, {'type': 'text', 'spacing': 1}, "You must first STOW SET (CONTAINER) for a container you are wearing.")
elif self.stow_loc.location_body['state'] != "worn":
WsWrap.ws_send(self.client, {'type': 'text', 'spacing': 1}, "You must be wearing that container.")
elif self.inventory['r_hand']['contents'] != None:
if user_input[1] in self.inventory['r_hand']['contents'].name:
stow_item = True
input_kwargs['target'] = self.inventory['r_hand']['contents']
input_kwargs['target_parent'] = self.stow_loc
elif self.inventory['l_hand']['contents'] != None:
if user_input[1] in self.inventory['l_hand']['contents'].name:
stow_item = True
input_kwargs['target'] = self.inventory['l_hand']['contents']
input_kwargs['target_parent'] = self.stow_loc
else:
WsWrap.ws_send(self.client, {'type': 'text', 'spacing': 1}, "You can't stow that.")
if stow_item == True:
status, response = Status_Check.item_open_closed(self, user_input, input_kwargs)
if status == True:
if self.stow_loc == input_kwargs['target']:
WsWrap.ws_send(self.client, {'type': 'text', 'spacing': 1}, "You can't stow something in itself.")
else:
Character.put_item(self, user_input, input_kwargs)
else:
WsWrap.ws_send(self.client, {'type': 'text', 'spacing': 1}, "That is not open.")
else:
WsWrap.ws_send(self.client, {'type': 'text', 'spacing': 1}, "Error with STOW target.")
def target_player(self):
# for i in server.clients:
# if i['id']
pass
def convert_players_to_obj():
inventory = copy.deepcopy(inv)
for i in players:
print(players[i])
new_player = Player(i, # uuid_id
players[i][0], # entity_type
players[i][1], # name
players[i][2], # race
players[i][3], # gender
players[i][4], # vitals
players[i][5], # core_attributes
players[i][6], # conditions
players[i][7], # credit
inventory, # inventory
players[i][8], # location
players[i][9], # player_state
players[i][10], # stow_loc
None, # client
players[i][11]) # client_id / unique_id
players[i] = new_player
print(vars(new_player))
# pprint(vars(new_player))
def list(self):
print("Server.clients:", server.clients) |
the-stack_0_2756 | """Select and extract key frames in a video file.
Key frames are defined as a set of frames where each has an appropriate number
of matching points with its adjacent key frame.
RANSAC is applied to reduce the number of mismatched points and outliers.
"""
import cv2
import numpy as np
import argparse
def main(videofile):
# Construct VideoCapture object to get frame-by-frame stream
vid_cap = cv2.VideoCapture(videofile)
# SIFT descriptors are utilized to describe the overlapping between the
# current frame and its neighbor
sift = cv2.xfeatures2d.SIFT_create()
# The first key frame (frame0.jpg) is selected by default
success, last = vid_cap.read()
cv2.imwrite('key_frames/frame0.jpg', last)
print("Captured frame0.jpg")
count = 1
frame_num = 1
w = int(last.shape[1] * 2 / 3) # the region to detect matching points
stride = 40 # stride for accelerating capturing
min_match_num = 100 # minimum number of matches required (to stitch well)
max_match_num = 600 # maximum number of matches (to avoid redundant frames)
while success:
if count % stride == 0:
# Detect and compute key points and descriptors
kp1, des1 = sift.detectAndCompute(last[:, -w:], None)
kp2, des2 = sift.detectAndCompute(image[:, :w], None)
# Use the Brute-Force matcher to obtain matches
bf = cv2.BFMatcher(normType=cv2.NORM_L2) # Using Euclidean distance
matches = bf.knnMatch(des1, des2, k=2)
# Define Valid Match: whose distance is less than match_ratio times
# the distance of the second best nearest neighbor.
match_ratio = 0.6
# Pick up valid matches
valid_matches = []
for m1, m2 in matches:
if m1.distance < match_ratio * m2.distance:
valid_matches.append(m1)
# At least 4 points are needed to compute Homography
if len(valid_matches) > 4:
img1_pts = []
img2_pts = []
for match in valid_matches:
img1_pts.append(kp1[match.queryIdx].pt)
img2_pts.append(kp2[match.trainIdx].pt)
# Formalize as matrices (for the sake of computing Homography)
img1_pts = np.float32(img1_pts).reshape(-1, 1, 2)
img2_pts = np.float32(img2_pts).reshape(-1, 1, 2)
# Compute the Homography matrix
_, mask = cv2.findHomography(img1_pts, img2_pts,
cv2.RANSAC, 5.0)
if min_match_num < np.count_nonzero(mask) < max_match_num:
# Save key frame as JPG file
last = image
print("Captured frame{}.jpg".format(frame_num))
cv2.imwrite('key_frames/frame%d.jpg' % frame_num, last)
frame_num += 1
success, image = vid_cap.read()
count += 1
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('file', nargs='?', default='360video.mp4',
help="path of the video file (default: 360video.mp4)")
args = parser.parse_args()
main(args.file)
|
the-stack_0_2758 | """distutils.command.bdist
Implements the Distutils 'bdist' command (create a built [binary]
distribution)."""
__revision__ = "$Id$"
import os
from distutils.core import Command
from distutils.errors import *
from distutils.util import get_platform
def show_formats():
"""Print list of available formats (arguments to "--format" option).
"""
from distutils.fancy_getopt import FancyGetopt
formats = []
for format in bdist.format_commands:
formats.append(("formats=" + format, None,
bdist.format_command[format][1]))
pretty_printer = FancyGetopt(formats)
pretty_printer.print_help("List of available distribution formats:")
class bdist(Command):
description = "create a built (binary) distribution"
user_options = [('bdist-base=', 'b',
"temporary directory for creating built distributions"),
('plat-name=', 'p',
"platform name to embed in generated filenames "
"(default: %s)" % get_platform()),
('formats=', None,
"formats for distribution (comma-separated list)"),
('dist-dir=', 'd',
"directory to put final built distributions in "
"[default: dist]"),
('skip-build', None,
"skip rebuilding everything (for testing/debugging)"),
]
boolean_options = ['skip-build']
help_options = [
('help-formats', None,
"lists available distribution formats", show_formats),
]
# The following commands do not take a format option from bdist
no_format_option = ('bdist_rpm',)
# This won't do in reality: will need to distinguish RPM-ish Linux,
# Debian-ish Linux, Solaris, FreeBSD, ..., Windows, Mac OS.
default_format = {'posix': 'gztar',
'nt': 'zip',
'os2': 'zip'}
# Establish the preferred order (for the --help-formats option).
format_commands = ['rpm', 'gztar', 'bztar', 'ztar', 'tar',
'wininst', 'zip', 'msi']
# And the real information.
format_command = {'rpm': ('bdist_rpm', "RPM distribution"),
'gztar': ('bdist_dumb', "gzip'ed tar file"),
'bztar': ('bdist_dumb', "bzip2'ed tar file"),
'ztar': ('bdist_dumb', "compressed tar file"),
'tar': ('bdist_dumb', "tar file"),
'wininst': ('bdist_wininst',
"Windows executable installer"),
'zip': ('bdist_dumb', "ZIP file"),
'msi': ('bdist_msi', "Microsoft Installer")
}
def initialize_options(self):
self.bdist_base = None
self.plat_name = None
self.formats = None
self.dist_dir = None
self.skip_build = 0
def finalize_options(self):
# have to finalize 'plat_name' before 'bdist_base'
if self.plat_name is None:
if self.skip_build:
self.plat_name = get_platform()
else:
self.plat_name = self.get_finalized_command('build').plat_name
# 'bdist_base' -- parent of per-built-distribution-format
# temporary directories (eg. we'll probably have
# "build/bdist.<plat>/dumb", "build/bdist.<plat>/rpm", etc.)
if self.bdist_base is None:
build_base = self.get_finalized_command('build').build_base
self.bdist_base = os.path.join(build_base,
'bdist.' + self.plat_name)
self.ensure_string_list('formats')
if self.formats is None:
try:
self.formats = [self.default_format[os.name]]
except KeyError:
raise DistutilsPlatformError(
"don't know how to create built distributions "
"on platform %s" % os.name)
if self.dist_dir is None:
self.dist_dir = "dist"
def run(self):
# Figure out which sub-commands we need to run.
commands = []
for format in self.formats:
try:
commands.append(self.format_command[format][0])
except KeyError:
raise DistutilsOptionError("invalid format '%s'" % format)
# Reinitialize and run each command.
for i in range(len(self.formats)):
cmd_name = commands[i]
sub_cmd = self.reinitialize_command(cmd_name)
if cmd_name not in self.no_format_option:
sub_cmd.format = self.formats[i]
# If we're going to need to run this command again, tell it to
# keep its temporary files around so subsequent runs go faster.
if cmd_name in commands[i+1:]:
sub_cmd.keep_temp = 1
self.run_command(cmd_name)
|
the-stack_0_2761 | import os
import random
from unittest import mock
import requests
import string
import time
import signal
import socket
import subprocess
import uuid
import sys
import yaml
import pandas as pd
import pytest
import mlflow
import mlflow.pyfunc.scoring_server as pyfunc_scoring_server
import mlflow.pyfunc
from mlflow.tracking.artifact_utils import _download_artifact_from_uri
from mlflow.utils.file_utils import read_yaml, write_yaml
from mlflow.utils.environment import _get_pip_deps, _CONSTRAINTS_FILE_NAME
from mlflow.utils.requirements_utils import _strip_local_version_identifier, _get_installed_version
LOCALHOST = "127.0.0.1"
def get_safe_port():
"""Returns an ephemeral port that is very likely to be free to bind to."""
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind((LOCALHOST, 0))
port = sock.getsockname()[1]
sock.close()
return port
def random_int(lo=1, hi=1e10):
return random.randint(lo, hi)
def random_str(size=10, chars=string.ascii_uppercase + string.digits):
return "".join(random.choice(chars) for _ in range(size))
def random_file(ext):
return "temp_test_%d.%s" % (random_int(), ext)
def score_model_in_sagemaker_docker_container(
model_uri,
data,
content_type,
flavor=mlflow.pyfunc.FLAVOR_NAME,
activity_polling_timeout_seconds=500,
):
"""
:param model_uri: URI to the model to be served.
:param data: The data to send to the docker container for testing. This is either a
Pandas dataframe or string of the format specified by `content_type`.
:param content_type: The type of the data to send to the docker container for testing. This is
one of `mlflow.pyfunc.scoring_server.CONTENT_TYPES`.
:param flavor: Model flavor to be deployed.
:param activity_polling_timeout_seconds: The amount of time, in seconds, to wait before
declaring the scoring process to have failed.
"""
env = dict(os.environ)
env.update(LC_ALL="en_US.UTF-8", LANG="en_US.UTF-8")
proc = _start_scoring_proc(
cmd=["mlflow", "sagemaker", "run-local", "-m", model_uri, "-p", "5000", "-f", flavor],
env=env,
)
return _evaluate_scoring_proc(proc, 5000, data, content_type, activity_polling_timeout_seconds)
def pyfunc_build_image(model_uri, extra_args=None):
"""
Builds a docker image containing the specified model, returning the name of the image.
:param model_uri: URI of model, e.g. runs:/some-run-id/run-relative/path/to/model
:param extra_args: List of extra args to pass to `mlflow models build-docker` command
"""
name = uuid.uuid4().hex
cmd = ["mlflow", "models", "build-docker", "-m", model_uri, "-n", name]
if extra_args:
cmd += extra_args
p = subprocess.Popen(cmd,)
assert p.wait() == 0, "Failed to build docker image to serve model from %s" % model_uri
return name
def pyfunc_serve_from_docker_image(image_name, host_port, extra_args=None):
"""
Serves a model from a docker container, exposing it as an endpoint at the specified port
on the host machine. Returns a handle (Popen object) to the server process.
"""
env = dict(os.environ)
env.update(LC_ALL="en_US.UTF-8", LANG="en_US.UTF-8")
scoring_cmd = ["docker", "run", "-p", "%s:8080" % host_port, image_name]
if extra_args is not None:
scoring_cmd += extra_args
return _start_scoring_proc(cmd=scoring_cmd, env=env)
def pyfunc_serve_from_docker_image_with_env_override(
image_name, host_port, gunicorn_opts, extra_args=None
):
"""
Serves a model from a docker container, exposing it as an endpoint at the specified port
on the host machine. Returns a handle (Popen object) to the server process.
"""
env = dict(os.environ)
env.update(LC_ALL="en_US.UTF-8", LANG="en_US.UTF-8")
scoring_cmd = [
"docker",
"run",
"-e",
"GUNICORN_CMD_ARGS=%s" % gunicorn_opts,
"-p",
"%s:8080" % host_port,
image_name,
]
if extra_args is not None:
scoring_cmd += extra_args
return _start_scoring_proc(cmd=scoring_cmd, env=env)
def pyfunc_serve_and_score_model(
model_uri,
data,
content_type,
activity_polling_timeout_seconds=500,
extra_args=None,
stdout=sys.stdout,
):
"""
:param model_uri: URI to the model to be served.
:param data: The data to send to the pyfunc server for testing. This is either a
Pandas dataframe or string of the format specified by `content_type`.
:param content_type: The type of the data to send to the pyfunc server for testing. This is
one of `mlflow.pyfunc.scoring_server.CONTENT_TYPES`.
:param activity_polling_timeout_seconds: The amount of time, in seconds, to wait before
declaring the scoring process to have failed.
:param extra_args: A list of extra arguments to pass to the pyfunc scoring server command. For
example, passing ``extra_args=["--no-conda"]`` will pass the ``--no-conda``
flag to the scoring server to ensure that conda environment activation
is skipped.
"""
env = dict(os.environ)
env.update(LC_ALL="en_US.UTF-8", LANG="en_US.UTF-8")
env.update(MLFLOW_TRACKING_URI=mlflow.get_tracking_uri())
env.update(MLFLOW_HOME=_get_mlflow_home())
port = get_safe_port()
scoring_cmd = [
"mlflow",
"models",
"serve",
"-m",
model_uri,
"-p",
str(port),
"--install-mlflow",
]
if extra_args is not None:
scoring_cmd += extra_args
proc = _start_scoring_proc(cmd=scoring_cmd, env=env, stdout=stdout, stderr=stdout)
return _evaluate_scoring_proc(proc, port, data, content_type, activity_polling_timeout_seconds)
def _get_mlflow_home():
"""
:return: The path to the MLflow installation root directory
"""
mlflow_module_path = os.path.dirname(os.path.abspath(mlflow.__file__))
# The MLflow root directory is one level about the mlflow module location
return os.path.join(mlflow_module_path, os.pardir)
def _start_scoring_proc(cmd, env, stdout=sys.stdout, stderr=sys.stderr):
if os.name != "nt":
return subprocess.Popen(
cmd,
stdout=stdout,
stderr=stderr,
universal_newlines=True,
env=env,
# Assign the scoring process to a process group. All child processes of the
# scoring process will be assigned to this group as well. This allows child
# processes of the scoring process to be terminated successfully
preexec_fn=os.setsid,
)
else:
return subprocess.Popen(
cmd,
stdout=stdout,
stderr=stderr,
universal_newlines=True,
env=env,
# On Windows, `os.setsid` and `preexec_fn` are unavailable
creationflags=subprocess.CREATE_NEW_PROCESS_GROUP,
)
class RestEndpoint:
def __init__(self, proc, port, activity_polling_timeout_seconds=250):
self._proc = proc
self._port = port
self._activity_polling_timeout_seconds = activity_polling_timeout_seconds
def __enter__(self):
for i in range(0, int(self._activity_polling_timeout_seconds / 5)):
assert self._proc.poll() is None, "scoring process died"
time.sleep(5)
# noinspection PyBroadException
try:
ping_status = requests.get(url="http://localhost:%d/ping" % self._port)
print("connection attempt", i, "server is up! ping status", ping_status)
if ping_status.status_code == 200:
break
except Exception:
print("connection attempt", i, "failed, server is not up yet")
if ping_status.status_code != 200:
raise Exception("ping failed, server is not happy")
print("server up, ping status", ping_status)
return self
def __exit__(self, tp, val, traceback):
if self._proc.poll() is None:
# Terminate the process group containing the scoring process.
# This will terminate all child processes of the scoring process
if os.name != "nt":
pgrp = os.getpgid(self._proc.pid)
os.killpg(pgrp, signal.SIGTERM)
else:
# https://stackoverflow.com/questions/47016723/windows-equivalent-for-spawning-and-killing-separate-process-group-in-python-3 # noqa
self._proc.send_signal(signal.CTRL_BREAK_EVENT)
self._proc.kill()
def invoke(self, data, content_type):
if type(data) == pd.DataFrame:
if content_type == pyfunc_scoring_server.CONTENT_TYPE_JSON_RECORDS_ORIENTED:
data = data.to_json(orient="records")
elif (
content_type == pyfunc_scoring_server.CONTENT_TYPE_JSON
or content_type == pyfunc_scoring_server.CONTENT_TYPE_JSON_SPLIT_ORIENTED
):
data = data.to_json(orient="split")
elif content_type == pyfunc_scoring_server.CONTENT_TYPE_CSV:
data = data.to_csv(index=False)
else:
raise Exception(
"Unexpected content type for Pandas dataframe input %s" % content_type
)
response = requests.post(
url="http://localhost:%d/invocations" % self._port,
data=data,
headers={"Content-Type": content_type},
)
return response
def _evaluate_scoring_proc(proc, port, data, content_type, activity_polling_timeout_seconds=250):
"""
:param activity_polling_timeout_seconds: The amount of time, in seconds, to wait before
declaring the scoring process to have failed.
"""
with RestEndpoint(proc, port, activity_polling_timeout_seconds) as endpoint:
return endpoint.invoke(data, content_type)
@pytest.fixture(scope="module", autouse=True)
def set_boto_credentials():
os.environ["AWS_ACCESS_KEY_ID"] = "NotARealAccessKey"
os.environ["AWS_SECRET_ACCESS_KEY"] = "NotARealSecretAccessKey"
os.environ["AWS_SESSION_TOKEN"] = "NotARealSessionToken"
@pytest.fixture
def mock_s3_bucket():
"""
Creates a mock S3 bucket using moto
:return: The name of the mock bucket
"""
import boto3
import moto
with moto.mock_s3():
bucket_name = "mock-bucket"
s3_client = boto3.client("s3")
s3_client.create_bucket(Bucket=bucket_name)
yield bucket_name
class safe_edit_yaml(object):
def __init__(self, root, file_name, edit_func):
self._root = root
self._file_name = file_name
self._edit_func = edit_func
self._original = read_yaml(root, file_name)
def __enter__(self):
new_dict = self._edit_func(self._original.copy())
write_yaml(self._root, self._file_name, new_dict, overwrite=True)
def __exit__(self, *args):
write_yaml(self._root, self._file_name, self._original, overwrite=True)
def create_mock_response(status_code, text):
"""
Create a mock resposne object with the status_code and text
:param: status_code int HTTP status code
:param: text message from the response
:reutrn: mock HTTP Response
"""
response = mock.MagicMock()
response.status_code = status_code
response.text = text
return response
def _read_yaml(path):
with open(path, "r") as f:
return yaml.safe_load(f)
def _read_lines(path):
with open(path, "r") as f:
return f.read().splitlines()
def _compare_conda_env_requirements(env_path, req_path):
assert os.path.exists(req_path)
custom_env_parsed = _read_yaml(env_path)
requirements = _read_lines(req_path)
assert _get_pip_deps(custom_env_parsed) == requirements
def _assert_pip_requirements(model_uri, requirements, constraints=None):
local_path = _download_artifact_from_uri(model_uri)
txt_reqs = _read_lines(os.path.join(local_path, "requirements.txt"))
conda_reqs = _get_pip_deps(_read_yaml(os.path.join(local_path, "conda.yaml")))
assert txt_reqs == requirements
assert conda_reqs == requirements
if constraints:
assert f"-c {_CONSTRAINTS_FILE_NAME}" in txt_reqs
assert f"-c {_CONSTRAINTS_FILE_NAME}" in conda_reqs
cons = _read_lines(os.path.join(local_path, _CONSTRAINTS_FILE_NAME))
assert cons == constraints
def _is_available_on_pypi(package, version=None, module=None):
"""
Returns True if the specified package version is available on PyPI.
:param package: The name of the package.
:param version: The version of the package. If None, defaults to the installed version.
:param module: The name of the top-level module provided by the package . For example,
if `package` is 'scikit-learn', `module` should be 'sklearn'. If None, defaults
to `package`.
"""
resp = requests.get("https://pypi.python.org/pypi/{}/json".format(package))
if not resp.ok:
return False
module = module or package
version = version or _get_installed_version(module)
version = _strip_local_version_identifier(version)
dist_files = resp.json()["releases"].get(version)
return (
dist_files is not None # specified version exists
and (len(dist_files) > 0) # at least one distribution file exists
and not dist_files[0].get("yanked", False) # specified version is not yanked
)
|
the-stack_0_2763 | """TorchScript
This module contains functionality to support the JIT's scripting frontend, notably:
- torch.jit.script
This is not intended to be imported directly; please use the exposed
functionalities in `torch.jit`.
"""
import functools
import collections
import enum
import inspect
import copy
import pickle
import warnings
from typing import Any, Dict, List, Tuple, Union, Callable
import torch
import torch._jit_internal as _jit_internal
from torch.utils import set_module
from torch.jit._recursive import ScriptMethodStub, wrap_cpp_module, infer_methods_to_compile
from torch.nn import Module
from torch.jit._state import _enabled
from torch.jit._builtins import _register_builtin
from torch._six import with_metaclass
from torch.jit.frontend import get_jit_def, get_default_args, get_jit_class_def
from torch._jit_internal import _qualified_name
from torch.jit._fuser import _graph_for
from torch.jit._state import (
_try_get_jit_cached_function,
_try_get_jit_cached_overloads,
_set_jit_function_cache,
_set_jit_overload_cache,
)
from torch.overrides import (
has_torch_function, has_torch_function_unary, has_torch_function_variadic)
from torch.jit._monkeytype_config import (
monkeytype_trace,
JitTypeTraceConfig ,
JitTypeTraceStore
)
type_trace_db = JitTypeTraceStore() # DB to hold all call traces from MonkeyType
torch._C.ScriptMethod.graph_for = _graph_for # type: ignore[attr-defined]
torch._C.ScriptFunction.graph_for = _graph_for # type: ignore[attr-defined]
ScriptFunction = torch._C.ScriptFunction
ScriptFunction.__doc__ = """
Functionally equivalent to a :class:`ScriptModule`, but represents a single
function and does not have any attributes or Parameters.
"""
set_module(ScriptFunction, "torch.jit")
if _enabled:
Attribute = collections.namedtuple("Attribute", ["value", "type"])
else:
def Attribute(value, type): # type: ignore[no-redef]
return value
Attribute.__doc__ = """
This method is a pass-through function that returns `value`, mostly
used to indicate to the TorchScript compiler that the left-hand side
expression is a class instance attribute with type of `type`. Note that
`torch.jit.Attribute` should only be used in `__init__` method of `nn.Module`
subclasses.
Though TorchScript can infer correct type for most Python expressions, there are some cases where
type inference can be wrong, including:
- Empty containers like `[]` and `{}`, which TorchScript assumes to be container of `Tensor`s
- Optional types like `Optional[T]` but assigned a valid value of type `T`, TorchScript would assume
it is type `T` rather than `Optional[T]`
In eager mode, it is simply a pass-through function that returns `value`
without other implications.
Example:
.. testcode::
import torch
from typing import Dict
class AttributeModule(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.foo = torch.jit.Attribute(0.1, float)
# we should be able to use self.foo as a float here
assert 0.0 < self.foo
self.names_ages = torch.jit.Attribute({}, Dict[str, int])
self.names_ages["someone"] = 20
assert isinstance(self.names_ages["someone"], int)
m = AttributeModule()
# m will contain two attributes
# 1. foo of type float
# 2. names_ages of type Dict[str, int]
.. testcleanup::
del AttributeModule
del m
Args:
value: An initial value to be assigned to attribute.
type: A Python type
Returns:
Returns `value`
"""
def _get_type_trace_db():
# This is a private API. Use of this for external purposes is discouraged.
return type_trace_db
# Gets a function from the name of a method on a type
def _get_function_from_type(cls, name):
return getattr(cls, name, None)
# ScriptClasses must be new-style classes because we construct them using their
# __new__ method.
def _is_new_style_class(cls):
if hasattr(cls, "__class__"):
return "__dict__" in dir(cls) or hasattr(cls, "__slots__")
def _compile_and_register_class(obj, rcb, qualified_name):
ast = get_jit_class_def(obj, obj.__name__)
defaults = torch.jit.frontend.get_default_args_for_class(obj)
script_class = torch._C._jit_script_class_compile(qualified_name, ast, defaults, rcb)
torch.jit._state._add_script_class(obj, script_class)
return script_class
# These OrderedDictWrapper classes replace the actual OrderedDicts in
# module with versions that get/set properties inside of Module.
# This allows us to reuse most of nn.Module while still storing the
# data in C++.
# Each OrderedDict needs to support:
# x not in view
# x in view
# view[name] = ...
# view.values()
# del view[name]
# view.items()
# view.keys()
# len(view)
class OrderedDictWrapper(object):
def __init__(self, _c):
self._c = _c
def keys(self):
return [k for k, v in self.items()]
def values(self):
return [v for k, v in self.items()]
def __len__(self):
return len(self.values())
def __delitem__(self, k):
raise RuntimeError("cannot delete methods or parameters of a script module")
def items(self):
return self._c.items()
def __setitem__(self, k, v):
if k not in self:
raise RuntimeError(
"Can't add a new parameter after ScriptModule construction."
" Tried to add '{}".format(k)
)
self._c.setattr(k, v)
def __contains__(self, k):
return self._c.contains(k)
def __getitem__(self, k):
if k not in self:
raise KeyError(k)
return self._c.getattr(k)
class OrderedModuleDict(OrderedDictWrapper):
def __init__(self, module, python_dict):
super(OrderedModuleDict, self).__init__(torch._C.ModuleDict(module))
# contains _both_ script modules and non-script python-only modules
# because script modules are subclassed in python and the
# C++ Module class will not hold references to them,
# to ensure that you always get the same python value here
# we store it in the python dict as well
self._python_modules = python_dict
def items(self):
r = self._python_modules.items()
return r
def __contains__(self, k):
return k in self._python_modules
def __setitem__(self, k, v):
# Cases where sub-module can be re-assigned after ScriptModule construction
# 1. If the attr is an module interface type, it's guaranteed that the module is
# not inlined in the graph, so it's safe to swap a new ScriptModule in.
# 2. if the new value if a ScriptModule with the same JIT type, IR won't change
# and it's legit to swap a new module in.
# In these two cases we allow swapping a new scripted module and update the
# corresponding python module dict to keep sync.
# Note: the value to be swapped in has to be ScriptModule instead of nn.Module,
# otherwise it's illegal and we throw error.
if isinstance(v, ScriptModule):
self._c.setattr(k, v)
self._python_modules[k] = v
else:
raise RuntimeError(
"Cannot re-assign modules in a ScriptModule with non-scripted "
"module, tried to replace existing module '{}': {}".format(k, v)
)
def __getitem__(self, k):
return self._python_modules[k]
# For each user-defined class that subclasses ScriptModule, this meta-class:
# (1) finds all the methods annotated with @script_method in a ScriptModule and
# removes them from the class attributes
# (2) puts a wrapper around the class's __init__ method to recursively compile
# all of the script_methods with the module after the original __init__ has
# run. This has to occur after the user-defined __init__ so that submodules and
# parameters are initialized _before_ the script compiler resolve references to
# `self.param` or `self.module`.
class ScriptMeta(type):
def __init__(cls, name, bases, attrs): # noqa: B902
# Aggregate all the ScriptMethods and constants from superclasses
cls._methods: Dict[str, Any] = {}
cls._constants_set = set(getattr(cls, "__constants__", ()))
for base in reversed(bases):
for k, v in getattr(base, "_methods", {}).items():
cls._methods[k] = v
base_constants = getattr(base, "_constants_set", set())
cls._constants_set = cls._constants_set.union(base_constants)
# find all the script methods of the current class
for k, v in sorted(attrs.items()):
if isinstance(v, ScriptMethodStub):
delattr(cls, k)
cls._methods[v.original_method.__name__] = v
if getattr(cls, "_disable_script_meta", False):
# We leave built-in ScriptModule types alone, since this metaclass
# is only for compiling user classes that inherit from
# ScriptModule.
return super(ScriptMeta, cls).__init__(name, bases, attrs)
original_init = getattr(cls, "__init__", lambda self: None)
@functools.wraps(original_init)
def init_then_script(self, *args, **kwargs):
num_methods = len(cls._methods)
original_init(self, *args, **kwargs)
added_methods_in_init = len(cls._methods) > num_methods
if type(self) == cls:
def make_stubs(module):
cls = type(module)
if hasattr(cls, "_methods"):
return [v for k, v in sorted(cls._methods.items())]
else:
return infer_methods_to_compile(module)
self.__dict__[
"_actual_script_module"
] = torch.jit._recursive.create_script_module(self, make_stubs, share_types=not added_methods_in_init)
# Delete the Python attributes that now shadow the ScriptModule
# ones, so that __getattr__ and __setattr__ will properly find
# the scripted versions.
concrete_type = self._actual_script_module._concrete_type
for name in concrete_type.get_attributes():
delattr(self, name)
for name, _ in concrete_type.get_modules():
delattr(self, name)
for name in ("_parameters", "_buffers", "_modules"):
delattr(self, name)
cls.__init__ = init_then_script # type: ignore[misc]
return super(ScriptMeta, cls).__init__(name, bases, attrs)
class _CachedForward(object):
def __get__(self, obj, cls):
return self.__getattr__("forward") # type: ignore[attr-defined]
class ScriptWarning(Warning):
pass
def script_method(fn):
if not _enabled:
return fn
# NOTE: we need to traverse two frames here because the meta-class frame
# for ScriptModule will be present, as opposed to invoking @script on a
# a function or invoking define() on a CompilationUnit.
# The stack will look like:
#
# 0. createResolutionCallback()
# 1. script_method()
# 2. ScriptModule metaclass frame
# 3. Surrounding scope
#
# createResolutionCallback internally adds 1 to get us to the scope of this
# function (the calling function). Adding 2 gets us to the proper surrounding scope.
_rcb = _jit_internal.createResolutionCallbackFromFrame(frames_up=2)
ast = get_jit_def(fn, fn.__name__, self_name="ScriptModule")
return ScriptMethodStub(_rcb, ast, fn)
class ConstMap:
def __init__(self, const_mapping):
self.const_mapping = const_mapping
def __getattr__(self, attr):
return self.const_mapping[attr]
if _enabled:
# this is a Python 'non-data descriptor' that causes the first access
# to ScriptModule's forward to lookup the forward method and stash
# it in the objects dict. Due to the standard rules for attribute lookup,
# subsequent lookups will just directly return the previously looked up method.
# This is necessary because nn.Module defines forward as a method. If we
# did nothing, __getattr__ would not be called. Instead we'd get nn.Module.forward
# which always throws an exception.
class ScriptModule(with_metaclass(ScriptMeta, Module)): # type: ignore[misc]
r"""
A wrapper around C++ ``torch::jit::Module``. ``ScriptModule``\s
contain methods, attributes, parameters, and
constants. These can be accessed the same way as on a normal ``nn.Module``.
"""
__jit_unused_properties__ = ['code', 'code_with_constants', 'graph', 'inlined_graph', 'original_name']
def __init__(self):
super(ScriptModule, self).__init__()
forward = _CachedForward()
def __getattr__(self, attr):
if "_actual_script_module" not in self.__dict__:
return super(ScriptModule, self).__getattr__(attr)
return getattr(self._actual_script_module, attr)
def __setattr__(self, attr, value):
if "_actual_script_module" not in self.__dict__:
# Unwrap torch.jit.Attribute into a regular setattr + record
# the provided type in __annotations__.
#
# This ensures that if we use the attr again in `__init__`, it
# will look like the actual value, not an instance of Attribute.
if isinstance(value, Attribute):
# NB: Ensure that we set __annotations__ on the specific
# class in question, and not on a superclass (which would
# be wrong wrong wrong!).
# See also https://github.com/pytorch/pytorch/issues/39463
if "__annotations__" not in self.__class__.__dict__:
self.__class__.__annotations__ = {}
self.__annotations__[attr] = value.type
value = value.value
return super(ScriptModule, self).__setattr__(attr, value)
setattr(self._actual_script_module, attr, value)
def define(self, src):
if "_actual_script_module" in self.__dict__:
# If we have completed initialization, just defer to the
# backing RecursiveScriptModule to eagerly compile the provided
# source.
return self._actual_script_module.define(src)
# Otherwise, we are still in the object's __init__.
# In that case, add `src` as a stub to be compiled.
#
# We use frames_up=1 to get to the proper surrounding scope. The stack
# will look like:
# 0. createResolutionCallback
# 1. define()
# 2. surrounding scope.
#
# createResolutionCallback internally adds 1 to get us to our frame, then
# we add 1 to get to the proper surrounding scope.
rcb = _jit_internal.createResolutionCallbackFromFrame(frames_up=1)
ast = torch._C._parse_source_def(src)
self._methods[ast.name().name] = ScriptMethodStub(rcb, ast, None)
def _replicate_for_data_parallel(self):
return self._actual_script_module._replicate_for_data_parallel()
class RecursiveScriptModule(ScriptModule):
# XXX: RecursiveScriptModule inherits from ScriptModule for the sole
# reason that it retains the existing isinstance(ScriptModule)
# behavior.
r"""
The core data structure in TorchScript is the ``ScriptModule``. It is an
analogue of torch's ``nn.Module`` and represents an entire model as a tree of
submodules. Like normal modules, each individual module in a ``ScriptModule`` can
have submodules, parameters, and methods. In ``nn.Module``\s methods are implemented
as Python functions, but in ``ScriptModule``\s methods are implemented as
TorchScript functions, a statically-typed subset of Python that contains all
of PyTorch's built-in Tensor operations. This difference allows your
``ScriptModule``\s code to run without the need for a Python interpreter.
``ScriptModule``\s should not be created manually, instead use
either :func:`tracing <torch.jit.trace>` or :func:`scripting <torch.jit.script>`.
Tracing and scripting can be applied incrementally and :ref:`composed as necessary <Types>`.
* Tracing records the tensor operations as executed with a set of example inputs and uses these
operations to construct a computation graph. You can use the full dynamic behavior of Python with tracing,
but values other than Tensors and control flow aren't captured in the graph.
* Scripting inspects the Python code of the model
and compiles it to TorchScript. Scripting allows the use of many `types`_ of values and supports dynamic control flow.
Many, but not all features of Python are supported by the compiler, so changes to the source code may be necessary.
"""
_disable_script_meta = True
def __init__(self, cpp_module):
self.__dict__["_initializing"] = True
self._c = cpp_module
super(RecursiveScriptModule, self).__init__()
# Delete the 'training' attribute set up by `Module.__init__`. It
# will get set on the underlying cpp module, so we delete it here
# to avoid this version shadowing the cpp module version.
delattr(self, "training")
@staticmethod
def _construct(cpp_module, init_fn):
"""
Construct a RecursiveScriptModule that's ready for use. PyTorch
code should use this to construct a RecursiveScriptModule instead
of instead of calling `__init__` directly, as it makes sure the
object is properly finalized (and in the future, we may take
control of how the RecursiveScriptModule instance is created).
Args:
cpp_module: The C++ Module that will hold the actual state of
this RecursiveScriptModule instance.
init_fn: Lambda that initializes the RecursiveScriptModule passed to it.
"""
script_module = RecursiveScriptModule(cpp_module)
init_fn(script_module)
# Finalize the ScriptModule: replace the nn.Module state with our
# custom implementations and flip the _initializing bit.
RecursiveScriptModule._finalize_scriptmodule(script_module)
return script_module
@staticmethod
def _finalize_scriptmodule(script_module):
script_module._parameters = OrderedDictWrapper(
torch._C.ParameterDict(script_module._c)
)
script_module._buffers = OrderedDictWrapper(
torch._C.BufferDict(script_module._c)
)
script_module._modules = OrderedModuleDict(
script_module._c, script_module._modules
)
script_module._initializing = False
def _reconstruct(self, cpp_module):
"""
Re-construct an instance of RecursiveScriptModule using an instance of a C++ module.
Args:
cpp_module: The C++ module that this RecursiveScriptModule will be rebuilt around.
"""
self.__init__(cpp_module) # type: ignore[misc]
# Copy the concrete type from the C++ module to this ScriptModule.
self._concrete_type = torch._C.ConcreteModuleType.from_jit_type(
self._c._type()
)
# Copy submodules from the C++ module to this ScriptModule.
modules = {}
for name, cpp_module in torch._C.ModuleDict(self._c).items():
modules[name] = wrap_cpp_module(cpp_module)
self._modules = OrderedModuleDict(self._c, modules)
# Copy parameters and buffers.
self._parameters = OrderedDictWrapper(torch._C.ParameterDict(self._c))
self._buffers = OrderedDictWrapper(torch._C.BufferDict(self._c))
# Get rid of the functions from the old C++ module.
self.__dict__ = {
k: v
for k, v in self.__dict__.items()
if not isinstance(v, torch._C.ScriptMethod)
}
self.__dict__["_initializing"] = False
@property
def graph(self):
r"""
Returns a string representation of the internal graph for the
``forward`` method. See :ref:`interpreting-graphs` for details.
"""
return self._c._get_method("forward").graph
@property
def inlined_graph(self):
r"""
Returns a string representation of the internal graph for the
``forward`` method. This graph will be preprocessed to inline all function and method calls.
See :ref:`interpreting-graphs` for details.
"""
return self.forward.inlined_graph
@property
def code(self):
r"""
Returns a pretty-printed representation (as valid Python syntax) of
the internal graph for the ``forward`` method. See
:ref:`inspecting-code` for details.
"""
return self.forward.code
@property
def code_with_constants(self):
r"""
Returns a tuple of:
[0] a pretty-printed representation (as valid Python syntax) of
the internal graph for the ``forward`` method. See `code`.
[1] a ConstMap following the CONSTANT.cN format of the output in [0].
The indices in the [0] output are keys to the underlying constant's values.
See :ref:`inspecting-code` for details.
"""
r = self.forward.code_with_constants
return (r[0], ConstMap(r[1]))
def save(self, f, **kwargs):
r"""
save(f, _extra_files={})
See :func:`torch.jit.save <torch.jit.save>` for details.
"""
return self._c.save(str(f), **kwargs)
def _save_for_lite_interpreter(self, *args, **kwargs):
r"""
_save_for_lite_interpreter(f)
Add (or update) the bytecode session to the script model. The updated model is used
in lite interpreter for mobile applications.
Args:
f: a string containing a file name.
_extra_files: Map from filename to contents which will be stored as part of 'f'.
"""
return self._c._save_for_mobile(*args, **kwargs)
def _save_to_buffer_for_lite_interpreter(self, *args, **kwargs):
return self._c._save_to_buffer_for_mobile(*args, **kwargs)
def save_to_buffer(self, *args, **kwargs):
return self._c.save_to_buffer(*args, **kwargs)
def get_debug_state(self, *args, **kwargs):
return self._c.get_debug_state()
def extra_repr(self):
return "original_name={}".format(self.original_name)
def graph_for(self, *args, **kwargs):
return self.forward.graph_for(*args, **kwargs)
@property
def original_name(self):
if type(self) == str(self._c._type().name()):
return ""
return str(self._c._type().name())
def define(self, src):
# We use frames_up=1 to get to the proper surrounding scope. The stack
# will look like:
# 0. createResolutionCallback
# 1. define()
# 2. surrounding scope.
#
# createResolutionCallback internally adds 1 to get us to our frame, then
# we add 1 to get to the proper surrounding scope.
rcb = _jit_internal.createResolutionCallbackFromFrame(frames_up=1)
self._c._define(self._concrete_type, src, rcb)
def __getattr__(self, attr):
if "_initializing" not in self.__dict__:
raise RuntimeError(
"ScriptModule has not been initialized, did you forget to call super's init?"
)
if self._initializing:
return super(RecursiveScriptModule, self).__getattr__(attr)
# _modules check is before hasattr since modules are included as attributes in _c,
# but we want to get the python wrapper from _modules instead of the raw _c object.
if attr in self._modules:
return self._modules[attr]
elif self._c.hasattr(attr):
return self._c.getattr(attr)
elif self._c._has_method(attr):
script_method = self._c._get_method(attr)
# cache method so future calls do not go through __getattr__
# to improve invocation performance
self.__dict__[attr] = script_method
return script_method
return super(RecursiveScriptModule, self).__getattr__(attr)
def __setattr__(self, attr, value):
if self._initializing:
return super(RecursiveScriptModule, self).__setattr__(attr, value)
if attr in self._modules:
self._modules[attr] = value
elif self._c.hasattr(attr):
self._c.setattr(attr, value)
elif (
hasattr(self, "_concrete_type")
and attr in self._concrete_type.get_constants().keys()
):
# TODO: we don't have _concrete_type set after load(), and in general we lose constant information.
# We should encode constants as class type attributes (or something) so it persists across save/load.
raise AttributeError(
"Cannot mutate TorchScript constant value: '{}'. Value: '{}'".format(
attr, value
)
)
else:
# We allow setting Python attributes on the ScriptModule, for
# when people want to stash some convenience info on it.
# TODO: it's possible that the following is confusing:
# s = torch.jit.script(...)
# s.python_attr = ...
# s.save() <--- this doesn't have `python_attr`
# It's fairly trivial to save enough info to warn in this case.
return super(RecursiveScriptModule, self).__setattr__(attr, value)
def __getstate__(self):
raise pickle.PickleError(
"ScriptModules cannot be deepcopied using copy.deepcopy or saved using torch.save. "
+ "Mixed serialization of script and non-script modules is not supported. "
+ "For purely script modules use my_script_module.save(<filename>) instead."
)
def __copy__(self):
return torch.jit._recursive.wrap_cpp_module(copy.copy(self._c))
def __deepcopy__(self, memo):
return torch.jit._recursive.wrap_cpp_module(copy.deepcopy(self._c, memo))
# Python magic methods do method lookups on an object's class type, instead of looking up
# the method defines on the class instance. In order to continue to expose the magic methods
# of builtin-containers (ModuleList, Sequential, ModuleDict) to Python, we
# define magic methods here as a shim to the correct attribute.
def forward_magic_method(self, method_name, *args, **kwargs):
self_method = getattr(self, method_name)
if getattr(self_method, "__func__", None) == getattr(
RecursiveScriptModule, method_name
):
raise NotImplementedError()
return self_method(*args, **kwargs)
def __iter__(self):
return self.forward_magic_method("__iter__")
def __getitem__(self, idx):
return self.forward_magic_method("__getitem__", idx)
def __len__(self):
return self.forward_magic_method("__len__")
def __contains__(self, key):
return self.forward_magic_method("__contains__", key)
# dir is defined by the base nn.Module, so instead of throwing if
# it is not overridden, we call into the nn.Module __dir__ method
def __dir__(self):
self_method = self.__dir__
if self_method.__func__ == _get_function_from_type( # type: ignore[attr-defined]
RecursiveScriptModule, "__dir__"
):
return super(RecursiveScriptModule, self).__dir__()
return self_method()
# to resolve bool(value), Python looks if __bool__ is defined then __iter__
# is defined then returns true for classes. Since __iter__() on this
# class throws if it isn't overridden, we define __bool__ to preserve default behavior
def __bool__(self):
self_method = self.__bool__
if self_method.__func__ == _get_function_from_type( # type: ignore[attr-defined]
RecursiveScriptModule, "__bool__"
):
return True
return self_method()
def _replicate_for_data_parallel(self):
# we have to initialize ScriptModule properly so that
# it works with pybind11
def init_fn(script_module):
# Don't do anything here, we'll initialize the ScriptModule below
return
return RecursiveScriptModule._construct(
self._c._replicate_for_data_parallel(), init_fn
)
# Need to copy all RecursiveScriptModule methods to ScriptModule.
#
# This is because `super(MyScriptModule, self).foo()` does not use
# `__getattr__` to look up `foo`. So we need to make each method available on
# the ScriptModule manually.
for name, item in RecursiveScriptModule.__dict__.items():
if not callable(item) and not isinstance(item, property):
continue
if name.startswith("__") or hasattr(ScriptModule, name):
continue
# We can copy over the implementation wholesale because besides the
# `super()` thing above, ScriptModule behaves exactly like
# RecursiveScriptModule
setattr(ScriptModule, name, item)
def _get_methods(cls):
import inspect
# In Python 3 unbound methods are functions, but in Python 2 they are methods
return inspect.getmembers(
cls, predicate=lambda x: inspect.isfunction(x) or inspect.ismethod(x)
)
_compiled_methods_allowlist = {
"forward",
"register_buffer",
"register_parameter",
"add_module",
"_apply",
"apply",
"cuda",
"cpu",
"to",
"type",
"float",
"double",
"half",
"state_dict",
"_save_to_state_dict",
"load_state_dict",
"_load_from_state_dict",
"_named_members",
"parameters",
"named_parameters",
"buffers",
"named_buffers",
"children",
"named_children",
"modules",
"named_modules",
"zero_grad",
"share_memory",
"_get_name",
"extra_repr",
"_slow_forward",
"_tracing_name",
"eval",
"train",
}
def _make_fail(name):
def fail(self, *args, **kwargs):
raise RuntimeError(name + " is not supported on ScriptModules")
return fail
for name, method in _get_methods(torch.nn.Module):
if name.startswith("__"):
continue
if (
name not in RecursiveScriptModule.__dict__
and name not in _compiled_methods_allowlist
):
setattr(RecursiveScriptModule, method.__name__, _make_fail(name))
else:
# TODO MAKE SURE THAT DISABLING WORKS
class ScriptModule(torch.nn.Module): # type: ignore[no-redef]
def __init__(self, arg=None):
super().__init__()
class RecursiveScriptModule(ScriptModule): # type: ignore[no-redef]
def __init__(self, arg=None):
super().__init__()
def call_prepare_scriptable_func_impl(obj, memo):
if not isinstance(obj, torch.nn.Module):
return obj
obj_id = id(obj)
# If obj_id is in memo, obj has already been prepared or is being
# prepared in another call up the stack.
if obj_id in memo:
return memo[id(obj)]
obj = obj.__prepare_scriptable__() if hasattr(obj, '__prepare_scriptable__') else obj # type: ignore[operator]
# Record obj in memo to avoid infinite recursion in the case of cycles in the module
# hierarchy when recursing below.
memo[obj_id] = obj
new_obj_dict = {}
for name, sub_module in obj.__dict__.items():
if name == '_modules':
for k, v in sub_module.items():
sub_module[k] = call_prepare_scriptable_func_impl(v, memo)
new_obj_dict[name] = sub_module
elif isinstance(sub_module, torch.nn.Module) and not isinstance(sub_module, ScriptModule):
new_obj_dict[name] = call_prepare_scriptable_func_impl(sub_module, memo)
else:
new_obj_dict[name] = sub_module
for k, v in new_obj_dict.items():
obj.__dict__[name] = v
return obj
def call_prepare_scriptable_func(obj):
memo: Dict[int, torch.nn.Module] = {}
return call_prepare_scriptable_func_impl(obj, memo)
def _script_pdt(obj, optimize=None, _frames_up=0, _rcb=None,
example_inputs: Union[List[Tuple], Dict[Callable, List[Tuple]], None] = None):
# This is a private API, intended for internal use only. Usage of this API is only for experimental
# purposes only and is highly discouraged.
global type_trace_db
if not _enabled:
return obj
if optimize is not None:
warnings.warn(
"`optimize` is deprecated and has no effect. Use `with torch.jit.optimized_execution() instead"
)
# No-op for modules and functions that are already scripted
if isinstance(obj, ScriptModule):
return obj
if isinstance(obj, ScriptFunction):
return obj
if example_inputs:
# If MonkeyType is installed, enable profile directed type annotation
# Check if example_inputs are defined and generate call traces
# for the method by running eager mode version of the method with
# the provide example inputs. This logs all the traces in type_trace_db
type_trace_db = JitTypeTraceStore()
if monkeytype_trace:
monkeytype_config = JitTypeTraceConfig(type_trace_db)
with monkeytype_trace(monkeytype_config):
if isinstance(example_inputs, Dict):
# If the obj is an nn.Module or a class, then each method is
# executed with the arguments provided in the example inputs.
# example inputs here will be of type Dict(class.method, (arguments))
# This is used to infer type annotations for those methods
# which are not called directly under the hood of monkeytype.
for module, example_input in example_inputs.items():
for example in example_input:
module(*example)
elif isinstance(example_inputs, List):
for examples in example_inputs:
obj(*examples)
else:
warnings.warn("Error: Unable to infer types. Please format the inputs to type `List[Tuple]`"
" or `Dict[Callable, List[Tuple]]` to be run with MonkeyType.")
else:
warnings.warn("Warning: monkeytype is not installed. Please install https://github.com/Instagram/MonkeyType "
"to enable Profile-Directed Typing in TorchScript. Refer to "
"https://github.com/Instagram/MonkeyType/blob/master/README.rst to install MonkeyType. ")
return script(obj, optimize, _frames_up, _rcb)
def script(obj, optimize=None, _frames_up=0, _rcb=None):
r"""
Scripting a function or ``nn.Module`` will inspect the source code, compile
it as TorchScript code using the TorchScript compiler, and return a :class:`ScriptModule` or
:class:`ScriptFunction`. TorchScript itself is a subset of the Python language, so not all
features in Python work, but we provide enough functionality to compute on
tensors and do control-dependent operations. For a complete guide, see the
:ref:`language-reference`.
``torch.jit.script`` can be used as a function for modules and functions, and as a decorator
``@torch.jit.script`` for :ref:`torchscript-classes` and functions.
Args:
obj (callable, class, or ``nn.Module``): The ``nn.Module``, function, or class type to
compile.
Returns:
If ``obj`` is ``nn.Module``, ``script`` returns
a :class:`ScriptModule` object. The returned :class:`ScriptModule` will
have the same set of sub-modules and parameters as the
original ``nn.Module``. If ``obj`` is a standalone function,
a :class:`ScriptFunction` will be returned.
**Scripting a function**
The ``@torch.jit.script`` decorator will construct a :class:`ScriptFunction`
by compiling the body of the function.
Example (scripting a function):
.. testcode::
import torch
@torch.jit.script
def foo(x, y):
if x.max() > y.max():
r = x
else:
r = y
return r
print(type(foo)) # torch.jit.ScriptFunction
# See the compiled graph as Python code
print(foo.code)
# Call the function using the TorchScript interpreter
foo(torch.ones(2, 2), torch.ones(2, 2))
.. testoutput::
:hide:
...
**Scripting an nn.Module**
Scripting an ``nn.Module`` by default will compile the ``forward`` method and recursively
compile any methods, submodules, and functions called by ``forward``. If a ``nn.Module`` only uses
features supported in TorchScript, no changes to the original module code should be necessary. ``script``
will construct :class:`ScriptModule` that has copies of the attributes, parameters, and methods of
the original module.
Example (scripting a simple module with a Parameter):
.. testcode::
import torch
class MyModule(torch.nn.Module):
def __init__(self, N, M):
super(MyModule, self).__init__()
# This parameter will be copied to the new ScriptModule
self.weight = torch.nn.Parameter(torch.rand(N, M))
# When this submodule is used, it will be compiled
self.linear = torch.nn.Linear(N, M)
def forward(self, input):
output = self.weight.mv(input)
# This calls the `forward` method of the `nn.Linear` module, which will
# cause the `self.linear` submodule to be compiled to a `ScriptModule` here
output = self.linear(output)
return output
scripted_module = torch.jit.script(MyModule(2, 3))
Example (scripting a module with traced submodules):
.. testcode::
import torch
import torch.nn as nn
import torch.nn.functional as F
class MyModule(nn.Module):
def __init__(self):
super(MyModule, self).__init__()
# torch.jit.trace produces a ScriptModule's conv1 and conv2
self.conv1 = torch.jit.trace(nn.Conv2d(1, 20, 5), torch.rand(1, 1, 16, 16))
self.conv2 = torch.jit.trace(nn.Conv2d(20, 20, 5), torch.rand(1, 20, 16, 16))
def forward(self, input):
input = F.relu(self.conv1(input))
input = F.relu(self.conv2(input))
return input
scripted_module = torch.jit.script(MyModule())
To compile a method other than ``forward`` (and recursively compile anything it calls), add
the :func:`@torch.jit.export <torch.jit.export>` decorator to the method. To opt out of compilation
use :func:`@torch.jit.ignore <torch.jit.ignore>` or :func:`@torch.jit.unused <torch.jit.unused>`.
Example (an exported and ignored method in a module)::
import torch
import torch.nn as nn
class MyModule(nn.Module):
def __init__(self):
super(MyModule, self).__init__()
@torch.jit.export
def some_entry_point(self, input):
return input + 10
@torch.jit.ignore
def python_only_fn(self, input):
# This function won't be compiled, so any
# Python APIs can be used
import pdb
pdb.set_trace()
def forward(self, input):
if self.training:
self.python_only_fn(input)
return input * 99
scripted_module = torch.jit.script(MyModule())
print(scripted_module.some_entry_point(torch.randn(2, 2)))
print(scripted_module(torch.randn(2, 2)))
"""
if not _enabled:
return obj
if optimize is not None:
warnings.warn(
"`optimize` is deprecated and has no effect. Use `with torch.jit.optimized_execution() instead"
)
# No-op for modules and functions that are already scripted
if isinstance(obj, ScriptModule):
return obj
if isinstance(obj, ScriptFunction):
return obj
if isinstance(obj, torch.nn.Module):
obj = call_prepare_scriptable_func(obj)
return torch.jit._recursive.create_script_module(
obj, torch.jit._recursive.infer_methods_to_compile
)
qualified_name = _qualified_name(obj)
if inspect.isclass(obj):
# If this type is a `nn.Module` subclass, they probably meant to pass
# an instance instead of a Module
if issubclass(obj, torch.nn.Module):
raise RuntimeError(
"Type '{}' cannot be compiled since it inherits"
" from nn.Module,"
" pass an instance instead".format(obj)
)
# Enums are automatically usable in TorchScript, explicitly scripting
# is not necessary, but not harmful either.
if issubclass(obj, enum.Enum):
return obj
if not _is_new_style_class(obj):
raise RuntimeError(
"TorchScript classes must be new-style classes. "
"Please inherit from 'object'."
)
if len(obj.mro()) > 2:
raise RuntimeError(
"TorchScript classes does not support inheritance yet. "
"Please directly inherit from 'object'."
)
if _rcb is None:
_rcb = _jit_internal.createResolutionCallbackFromFrame(_frames_up + 1)
_compile_and_register_class(obj, _rcb, qualified_name)
return obj
else:
# this is a decorated fn, and we need to the underlying fn and its rcb
if hasattr(obj, "__script_if_tracing_wrapper"):
obj = obj.__original_fn
_rcb = _jit_internal.createResolutionCallbackFromClosure(obj)
_check_directly_compile_overloaded(obj)
maybe_already_compiled_fn = _try_get_jit_cached_function(obj)
if maybe_already_compiled_fn:
return maybe_already_compiled_fn
ast = get_jit_def(obj, obj.__name__)
if _rcb is None:
_rcb = _jit_internal.createResolutionCallbackFromClosure(obj)
fn = torch._C._jit_script_compile(
qualified_name, ast, _rcb, get_default_args(obj)
)
# Forward docstrings
fn.__doc__ = obj.__doc__
_set_jit_function_cache(obj, fn)
return fn
# overloads are registered in _jit_internal and compiled here so that _overload
# can be used in nn/functional.py without an import cycle
def _check_overload_defaults(impl_defaults, overload_defaults, loc):
for name, overload_value in overload_defaults.items():
if name not in impl_defaults or impl_defaults[name] != overload_value:
raise torch.jit.frontend.FrontendError(
loc,
"Default parameters on overloads do not affect the runtime so they "
"must equal to the default parameter on the implementation function. Found on "
"parameter {name}".format(name=name),
)
def _compile_function_with_overload(overload_fn, qual_name, impl_fn):
overload_decl = get_jit_def(overload_fn, overload_fn.__name__).decl()
overload_signature = torch.jit.annotations.get_signature(
overload_fn, None, None, inspect.ismethod(overload_fn)
)
impl_ast = get_jit_def(impl_fn, impl_fn.__name__)
overload_defaults = get_default_args(overload_fn)
implementation_defaults = get_default_args(impl_fn)
_rcb = _jit_internal.createResolutionCallbackFromClosure(impl_fn)
_check_overload_defaults(
implementation_defaults, overload_defaults, overload_decl.range()
)
fn = torch._C._jit_script_compile_overload(
qual_name,
overload_decl,
impl_ast,
_rcb,
implementation_defaults,
overload_signature,
)
return fn
def _get_overloads(obj):
# check for cached compiled fns
existing_compiled_fns = _try_get_jit_cached_overloads(obj)
qual_name = _qualified_name(obj)
uncompiled_overloads = _jit_internal._get_fn_overloads(qual_name)
if uncompiled_overloads is None:
return existing_compiled_fns
compiled_fns = []
for overload_fn in uncompiled_overloads:
compiled_fns.append(
_compile_function_with_overload(overload_fn, qual_name, obj)
)
if existing_compiled_fns:
compiled_fns = existing_compiled_fns + compiled_fns
# cache compilation, remove information stored to do compilation
_set_jit_overload_cache(obj, compiled_fns)
_jit_internal._clear_fn_overloads(qual_name)
return compiled_fns
def _check_directly_compile_overloaded(obj):
qual_name = _qualified_name(obj)
if _jit_internal._get_fn_overloads(qual_name) or _try_get_jit_cached_overloads(obj):
raise RuntimeError(
"Function {} cannot be directly compiled because it"
" is overloaded. It must be used in a context of a function"
" where its inputs can determine which overload to call.".format(qual_name)
)
def interface(obj):
if not inspect.isclass(obj):
raise RuntimeError("interface must be applied to a class")
if not _is_new_style_class(obj):
raise RuntimeError("TorchScript interfaces must inherit from 'object'")
# Expected MRO is:
# User module
# torch.nn.modules.module.Module
# object
is_module_interface = issubclass(obj, torch.nn.Module) and len(obj.mro()) == 3
if not is_module_interface and len(obj.mro()) > 2:
raise RuntimeError(
"TorchScript interface does not support inheritance yet. "
"Please directly inherit from 'object' or 'nn.Module'."
)
qualified_name = _qualified_name(obj)
rcb = _jit_internal.createResolutionCallbackFromFrame(1)
# if this type is a `nn.Module` subclass, generate a module interface type
# instead of a class interface type; a module interface type only compiles
# the user provided methods as part of the interface
ast = get_jit_class_def(obj, obj.__name__)
mangled_classname = torch._C._jit_script_interface_compile(
qualified_name, ast, rcb, is_module_interface
)
obj.__torch_script_interface__ = mangled_classname
return obj
def _recursive_compile_class(obj, loc):
_qual_name = _qualified_name(obj)
# We're starting a new compilation, so update the error call stack in
# case it fails
error_stack = torch._C.CallStack(_qual_name, loc)
rcb = _jit_internal.createResolutionCallbackForClassMethods(obj)
return _compile_and_register_class(obj, rcb, _qual_name)
CompilationUnit = torch._C.CompilationUnit
set_module(CompilationUnit, "torch.jit")
def _unwrap_optional(x):
assert x is not None, "Unwrapping null optional"
return x
_register_builtin(_unwrap_optional, "aten::_unwrap_optional")
_register_builtin(_jit_internal.is_scripting, "aten::is_scripting")
_register_builtin(has_torch_function, "aten::has_torch_function")
_register_builtin(has_torch_function_unary, "aten::has_torch_function")
_register_builtin(has_torch_function_variadic, "aten::has_torch_function")
|
the-stack_0_2766 | import os
import re
import yaml
from os.path import join as pjoin
def find_test_file(filename, module=None):
"""Looks for a test case or related file in the following order:
- test_cases/module/filename (if module)
- test_cases/module/filename.yml (if module)
- test_cases/filename
- test_cases/filename/filename
- test_cases/filename/filename.yml
"""
# keep track of all paths attempted, for debugging
tried = []
if module:
# try joining all args
path = pjoin('test_cases', module, filename)
tried.append(path)
# try joining all args + .yml
if os.path.isfile(path):
return path
else:
path += '.yml'
tried.append(path)
if os.path.isfile(path):
return path
# try omitting module
path = pjoin('test_cases', filename)
tried.append(path)
# one of the above should at least be a file or directory
if not os.path.exists(path):
raise FileNotFoundError("No such file or directory: " + repr(tried))
# try getting default file for this directory
if os.path.isdir(path):
path = pjoin(path, os.path.basename(path))
tried.append(path)
if os.path.isfile(path):
return path
else:
path += '.yml'
tried.append(path)
if not os.path.isfile(path):
raise FileNotFoundError("No such file: " + repr(tried))
return path
def setup_custom_options(test_case, module):
test_case = setup_test_inheritance(test_case, module)
map_filename = test_case.get('dict')
if map_filename:
map_filename = find_test_file(map_filename, module=module)
opt_map = read_yaml(map_filename)
for opt, settings in opt_map.items():
if opt in test_case:
value = str(test_case[opt])
pattern = r'\b' + opt + r'\b'
step = settings.get('step')
assert step, "Error: 'step' must be defined for custom options"
step = int(step) - 1
test_step = test_case['steps'][step]
presteps = settings.get('presteps')
if presteps:
for ind, step in enumerate(presteps):
presteps[ind] = re.sub(pattern, value, step)
test_presteps = test_step.setdefault('presteps', [])
test_presteps += presteps
elems = settings.get('elems')
if elems:
for ind, elem in enumerate(elems):
for elem_name, elem_value in elem.items():
elems[ind][elem_name] = re.sub(pattern, value, elem_value)
test_elems = test_step.setdefault('elems', [])
test_elems += elems
poststeps = settings.get('poststeps')
if poststeps:
for ind, step in enumerate(poststeps):
poststeps[ind] = re.sub(pattern, value, step)
test_poststeps = test_step.setdefault('poststeps', [])
test_poststeps += poststeps
# look for "module" in each step
# can't use for loop b/c iteration is nonlinear
ind = 0
while ind < len(test_case['steps']):
step = test_case['steps'][ind]
if 'module' in step:
module_info = step['module']
module_name = module_info['name']
module_template = find_test_file(module_name, module=module_name)
test_template = read_yaml(module_template)
# inherit options from test_template
test_copy = test_case.copy()
# default to test_copy's options except for steps/dict/parent
del test_copy['steps']
del test_copy['dict']
del test_copy['parent']
test_template.update(test_copy)
test_template = test_copy
# generate sub-case as though template were the main case
test_template = setup_custom_options(test_template, module=module_name)
# obtain user's desired slice of module's steps
index = module_info.get('index', None)
if index == None:
start = module_info.get('start', None)
stop = module_info.get('stop', None)
step_slice = slice(start, stop)
module_steps = test_template['steps'][step_slice]
else:
module_steps = [test_template['steps'][index]]
# replace module entry with steps
before = test_case['steps'][:ind]
after = test_case['steps'][ind+1:]
test_template['steps'] = before + module_steps + after
test_case = test_template
ind += len(module_steps)
else:
ind += 1
return test_case
def setup_test_inheritance(child_case, module):
if not 'parent' in child_case:
child_case['parent'] = module
lineage = [child_case]
filenames = [None]
parent = child_case.get('parent', module)
while parent != False:
# defaults to module name
if parent == None:
parent = module
else:
parent = parent
# break if module has itself as parent
parent = find_test_file(parent, module=module)
if parent == filenames[-1]:
break
with open(parent) as parent_file:
parent_case = yaml.load(parent_file.read(), Loader=yaml.FullLoader)
lineage.append(parent_case)
if parent in filenames:
filenames.append(parent)
errmsg = "Multiple/circular inheritance not allowed; got: "
errmsg += repr(filenames)
raise NotImplementedError(errmsg)
filenames.append(parent)
child_case = parent_case
parent = child_case.get('parent', module)
parent_case = lineage.pop()
while lineage:
child_case = lineage.pop()
parent_case.update(child_case)
return parent_case
def read_yaml(filename):
with open(filename) as fh:
return yaml.full_load(fh.read())
|
the-stack_0_2767 | def findDecision(obj): #obj[0]: Passanger, obj[1]: Weather, obj[2]: Time, obj[3]: Coupon, obj[4]: Coupon_validity, obj[5]: Gender, obj[6]: Age, obj[7]: Maritalstatus, obj[8]: Children, obj[9]: Education, obj[10]: Occupation, obj[11]: Income, obj[12]: Bar, obj[13]: Coffeehouse, obj[14]: Restaurant20to50, obj[15]: Direction_same, obj[16]: Distance
# {"feature": "Occupation", "instances": 23, "metric_value": 0.9877, "depth": 1}
if obj[10]<=9:
# {"feature": "Bar", "instances": 18, "metric_value": 0.8524, "depth": 2}
if obj[12]>0.0:
# {"feature": "Time", "instances": 10, "metric_value": 1.0, "depth": 3}
if obj[2]>0:
# {"feature": "Weather", "instances": 7, "metric_value": 0.8631, "depth": 4}
if obj[1]<=0:
# {"feature": "Age", "instances": 6, "metric_value": 0.65, "depth": 5}
if obj[6]>0:
return 'False'
elif obj[6]<=0:
return 'True'
else: return 'True'
elif obj[1]>0:
return 'True'
else: return 'True'
elif obj[2]<=0:
return 'True'
else: return 'True'
elif obj[12]<=0.0:
return 'True'
else: return 'True'
elif obj[10]>9:
return 'False'
else: return 'False'
|
the-stack_0_2768 | #!/usr/bin/python
import sys
import json
def tablevel(tbl):
ret = ""
if tbl < 0:
return ""
else:
for i in xrange(tbl):
ret = ret + "\t"
return ret
def funcstrmkr(func, funcname, type):
tbl = 0
funcstr = ''
# funcstr += "var "+funcname+" = function("
funcstr += "Egg.prototype." + funcname + " = function("
flg = False;
for inp in func["inputs"]:
if(flg):
funcstr += ", "
funcstr += inp["name"]
flg = True
if (flg):
funcstr += ", "
funcstr += "cb){\n"
tbl += 1
# funcstr += tablevel(tbl) + "var GC = getContract(account);\n"
funcstr += tablevel(tbl) + "if (!this.egg) return cb(new Error(\'Egg contract has not been initialized\'));\n"
funcstr += tablevel(tbl) + "egg." + funcname
if type == "get":
funcstr = funcstr + ".call("
elif type == "post":
funcstr = funcstr + ".sendTransaction("
for inp in func["inputs"]:
funcstr += inp["name"] + ", "
funcstr += "function(err, result){\n"
tbl += 1
funcstr += tablevel(tbl) + "if(err) return cb(err, null);\n"
funcstr += tablevel(tbl) + "return cb(null"
if type == "get":
funcstr += ", result.values"
else:
for oup in func["outputs"]:
funcstr += ", result.values." + oup["name"]
funcstr += ");\n"
tbl -= 1
funcstr += tablevel(tbl) + "})\n"
tbl -= 1
funcstr += "}\n\n"
return funcstr
#Read the abi
infile = sys.argv[1]
outfile = sys.argv[2]
inf = open(infile,'r')
jo = json.load(inf)
inf.close()
Magic = False
#One by One take each function of the abi and compose the rest endpoint
restfuncs = []
modstr = "\nmodule.exports = {\n"
for func in jo:
if (func["type"] == "function"):
modstr += tablevel(1) + func["name"] + ":" + func["name"] + ",\n"
if (func["constant"] == False):
restfuncs.append(funcstrmkr(func, func["name"], "post"))
else:
restfuncs.append(funcstrmkr(func, func["name"],"get"))
modstr += "}\n\n"
#Now print out to file
ouf = open(outfile,'w')
#ouf.write("//Don't forget to set the output formatter to json!\n")
#ouf.write("contract.setOutputFormatter(erisC.outputFormatter.jsonStrings)\n\n")
#ouf.write("//Restify endpoints. Copy into appropriate section\n\n")
ouf.write(modstr)
for rf in restfuncs:
ouf.write(rf)
ouf.close()
|
the-stack_0_2770 | # -*- coding: utf-8 -*-
# Copyright (c) 2010-2016, MIT Probabilistic Computing Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import tempfile
import crosscat.LocalEngine
import bayeslite
import bayeslite.core as core
from bayeslite import bql_quote_name
from bayeslite.metamodels.crosscat import CrosscatMetamodel
from bayeslite.metamodels.iid_gaussian import StdNormalMetamodel
examples = {
'crosscat': (
lambda: CrosscatMetamodel(crosscat.LocalEngine.LocalEngine(seed=0)),
't',
'CREATE TABLE t(x NUMERIC, y CYCLIC, z CATEGORICAL)',
'INSERT INTO t (x, y, z) VALUES (?, ?, ?)',
[
(0, 1.57, 'foo'),
(1.83, 3.141, 'bar'),
(1.82, 3.140, 'bar'),
(-1, 6.28, 'foo'),
],
'p',
'p_cc',
'CREATE POPULATION p FOR t'
'(x NUMERICAL; y CYCLIC; z CATEGORICAL)',
'CREATE GENERATOR p_cc FOR p USING crosscat()',
'CREATE GENERATOR p_cc FOR p USING crosscat(DEPENDENT)',
'CREATE GENERATOR p_cc FOR p USING crosscat(INDEPENDENT)',
),
'iid_gaussian': (
lambda: StdNormalMetamodel(seed=0),
't',
'CREATE TABLE t(x NUMERIC, y NUMERIC)',
'INSERT INTO t (x, y) VALUES (?, ?)',
[(0, 1), (1, float('nan')), (2, -1.2)],
'p',
'p_sn',
'CREATE POPULATION p FOR t(x NUMERICAL; y NUMERICAL)',
'CREATE GENERATOR p_sn FOR p USING std_normal()',
# XXX Should invent something that fails for
# metamodel-specific reasons here.
'CREATE GENERATOR p_sn FOR p USING std_normal ...',
'CREATE GENERATOR p_sn FOR p USING std_normal ...'
),
}
@pytest.mark.parametrize('persist,exname',
[(persist, key)
for persist in (True, False)
for key in sorted(examples.keys())])
def test_example(persist, exname):
if persist:
with tempfile.NamedTemporaryFile(prefix='bayeslite') as f:
with bayeslite.bayesdb_open(pathname=f.name,
builtin_metamodels=False) as bdb:
_test_example(bdb, exname)
with bayeslite.bayesdb_open(pathname=f.name,
builtin_metamodels=False) as bdb:
_retest_example(bdb, exname)
else:
with bayeslite.bayesdb_open(builtin_metamodels=False) as bdb:
_test_example(bdb, exname)
def _test_example(bdb, exname):
mm, t, t_sql, data_sql, data, p, g, p_bql, g_bql, g_bqlbad0, g_bqlbad1 = \
examples[exname]
qt = bql_quote_name(t)
qg = bql_quote_name(g)
bayeslite.bayesdb_register_metamodel(bdb, mm())
# Create a table.
assert not core.bayesdb_has_table(bdb, t)
with bdb.savepoint_rollback():
bdb.sql_execute(t_sql)
assert core.bayesdb_has_table(bdb, t)
assert not core.bayesdb_has_table(bdb, t)
bdb.sql_execute(t_sql)
assert core.bayesdb_has_table(bdb, t)
# Insert data into the table.
assert bdb.execute('SELECT COUNT(*) FROM %s' % (qt,)).fetchvalue() == 0
for row in data:
bdb.sql_execute(data_sql, row)
n = len(data)
assert bdb.execute('SELECT COUNT(*) FROM %s' % (qt,)).fetchvalue() == n
# Create a population.
assert not core.bayesdb_has_population(bdb, p)
bdb.execute(p_bql)
p_id = core.bayesdb_get_population(bdb, p)
# Create a generator. Make sure savepoints work for this.
assert not core.bayesdb_has_generator(bdb, p_id, g)
with pytest.raises(Exception):
with bdb.savepoint():
bdb.execute(g_bqlbad0)
assert not core.bayesdb_has_generator(bdb, p_id, g)
with pytest.raises(Exception):
with bdb.savepoint():
bdb.execute(g_bqlbad1)
assert not core.bayesdb_has_generator(bdb, p_id, g)
with bdb.savepoint_rollback():
bdb.execute(g_bql)
assert core.bayesdb_has_generator(bdb, p_id, g)
assert not core.bayesdb_has_generator(bdb, p_id, g)
bdb.execute(g_bql)
assert core.bayesdb_has_generator(bdb, p_id, g)
assert not core.bayesdb_has_generator(bdb, p_id+1, g)
with pytest.raises(Exception):
bdb.execute(g_bql)
assert core.bayesdb_has_generator(bdb, p_id, g)
gid = core.bayesdb_get_generator(bdb, p_id, g)
assert not core.bayesdb_generator_has_model(bdb, gid, 0)
assert [] == core.bayesdb_generator_modelnos(bdb, gid)
with bdb.savepoint_rollback():
bdb.execute('INITIALIZE 1 MODEL FOR %s' % (qg,))
assert core.bayesdb_generator_has_model(bdb, gid, 0)
assert [0] == core.bayesdb_generator_modelnos(bdb, gid)
with bdb.savepoint_rollback():
bdb.execute('INITIALIZE 10 MODELS FOR %s' % (qg,))
for i in range(10):
assert core.bayesdb_generator_has_model(bdb, gid, i)
assert range(10) == core.bayesdb_generator_modelnos(bdb, gid)
bdb.execute('INITIALIZE 2 MODELS FOR %s' % (qg,))
# Test dropping things.
with pytest.raises(bayeslite.BQLError):
bdb.execute('DROP TABLE %s' % (qt,))
with bdb.savepoint_rollback():
# Note that sql_execute does not protect us!
bdb.sql_execute('DROP TABLE %s' % (qt,))
assert not core.bayesdb_has_table(bdb, t)
assert core.bayesdb_has_table(bdb, t)
# XXX Should we reject dropping a generator when there remain
# models? Should we not reject dropping a table when there remain
# generators? A table can be dropped when there remain indices.
#
# with pytest.raises(bayeslite.BQLError):
# # Models remain.
# bdb.execute('DROP GENERATOR %s' % (qg,))
with bdb.savepoint_rollback():
bdb.execute('DROP GENERATOR %s' % (qg,))
assert not core.bayesdb_has_generator(bdb, None, g)
assert core.bayesdb_has_generator(bdb, p_id, g)
with bdb.savepoint_rollback():
bdb.execute('DROP GENERATOR %s' % (qg,))
assert not core.bayesdb_has_generator(bdb, None, g)
bdb.execute(g_bql)
assert core.bayesdb_has_generator(bdb, None, g)
assert core.bayesdb_has_generator(bdb, p_id, g)
assert core.bayesdb_has_generator(bdb, None, g)
assert gid == core.bayesdb_get_generator(bdb, p_id, g)
# Test dropping models.
with bdb.savepoint_rollback():
bdb.execute('DROP MODEL 1 FROM %s' % (qg,))
assert core.bayesdb_generator_has_model(bdb, gid, 0)
assert not core.bayesdb_generator_has_model(bdb, gid, 1)
assert [0] == core.bayesdb_generator_modelnos(bdb, gid)
# Test analyzing models.
bdb.execute('ANALYZE %s FOR 1 ITERATION WAIT' % (qg,))
bdb.execute('ANALYZE %s MODEL 0 FOR 1 ITERATION WAIT' % (qg,))
bdb.execute('ANALYZE %s MODEL 1 FOR 1 ITERATION WAIT' % (qg,))
def _retest_example(bdb, exname):
mm, t, t_sql, data_sql, data, p, g, p_bql, g_bql, g_bqlbad0, g_bqlbad1 = \
examples[exname]
qt = bql_quote_name(t)
qg = bql_quote_name(g)
bayeslite.bayesdb_register_metamodel(bdb, mm())
p_id = core.bayesdb_get_population(bdb, p)
assert core.bayesdb_has_table(bdb, t)
assert core.bayesdb_has_generator(bdb, p_id, g)
gid = core.bayesdb_get_generator(bdb, p_id, g)
assert core.bayesdb_generator_has_model(bdb, gid, 0)
assert core.bayesdb_generator_has_model(bdb, gid, 1)
bdb.execute('ANALYZE %s FOR 1 ITERATION WAIT' % (qg,))
bdb.execute('ANALYZE %s MODEL 0 FOR 1 ITERATION WAIT' % (qg,))
bdb.execute('ANALYZE %s MODEL 1 FOR 1 ITERATION WAIT' % (qg,))
|
the-stack_0_2771 | # not working, not sure why (as parts work separately
# outside of function)
# (User's) Problem
# We have:
# a string
# We need:
# is that string a paindrome? yes/no
# We must:
# boolean output
# name of function is
# checkPalindrome
# Solution (Product)
# Strategy 1:
# turn string into a list(array)
# Make a compare_list which is the reverse order of
# the original list
# compare the two, if they are the same: true, else false
def checkPalindrome(inputString):
# make input a list
input_as_list = list(inputString)
# make a reverse list
# (first make a copy)
reverse_order = input_as_list
# (this function has no input or output, it reverses in place)
reverse_order.reverse()
# compare two lists
if input_as_list == reverse_order:
return True
else:
return False
|
the-stack_0_2772 | import numpy as np
import cv2
import time
import random
from Markov import Get_Markov
P = Get_Markov()
TILE_SIZE = 32
OFS = 50
MARKET = """
##################
##..............##
#R..HA..ME..IB..P#
#R..HA..ME..IB..P#
#R..HA..ME..IB..P#
#Y..HA..ME..IB..P#
#Y..HA..ME..IB..P#
##...............#
##..C#..C#..C#...#
##..##..##..##...#
##...............#
##############GG##
""".strip()
class SupermarketMap:
"""Visualizes the supermarket background"""
def __init__(self, layout, tiles):
"""
layout : a string with each character representing a tile
tile : a numpy array containing the tile image
"""
self.tiles = tiles
self.contents = [list(row) for row in layout.split("\n")]
self.xsize = len(self.contents[0])
self.ysize = len(self.contents)
self.image = np.zeros(
(self.ysize * TILE_SIZE, self.xsize * TILE_SIZE, 3), dtype=np.uint8
)
self.prepare_map()
def extract_tile(self, row, col):
y = (row-1)*32
x = (col-1)*32
return self.tiles[y:y+32, x:x+32]
def get_tile(self, char):
"""returns the array for a given tile character"""
if char == "#":
return self.extract_tile(1,1)
elif char == "G":
return self.extract_tile(8,4)
elif char == "C":
return self.extract_tile(3,9)
elif char == "B":
return self.extract_tile(1,5)
elif char == "E":
return self.extract_tile(8,12)
elif char == "A":
return self.extract_tile(7,14)
elif char == "R":
return self.extract_tile(4,9)
elif char == "Y":
return self.extract_tile(5,9)
elif char == "P":
return self.extract_tile(6,5)
elif char == "I":
return self.extract_tile(5,14)
elif char == "M":
return self.extract_tile(4,14)
elif char == "H":
return self.extract_tile(7,4)
else:
return self.extract_tile(1,3)
def prepare_map(self):
"""prepares the entire image as a big numpy array"""
for y, row in enumerate(self.contents):
for x, tile in enumerate(row):
bm = self.get_tile(tile)
self.image[
y * TILE_SIZE : (y + 1) * TILE_SIZE,
x * TILE_SIZE : (x + 1) * TILE_SIZE,
] = bm
def draw(self, frame, offset=OFS):
"""
draws the image into a frame
offset pixels from the top left corner
"""
frame[
OFS : OFS + self.image.shape[0], OFS : OFS + self.image.shape[1]
] = self.image
def write_image(self, filename):
"""writes the image into a file"""
cv2.imwrite(filename, self.image)
class Customer:
def __init__(self, terrain_map, image, customer_id, state, matrix = P):
self.terrain_map = terrain_map
self.image = image
self.customer_id = customer_id
self.state = state
self.matrix = matrix
def __repr__(self):
return f'the customer is now at {self.state}!'
def draw(self, frame):
location_pos = {'dairy':(10,2),'drinks':(6,2),'fruit':(14,2),
'spices':(2,2),'checkout':(11,8)}
xpos = OFS + location_pos[self.state][0] * TILE_SIZE
ypos = OFS + location_pos[self.state][1] * TILE_SIZE
frame[ypos:ypos+TILE_SIZE, xpos:xpos+TILE_SIZE] = self.image
# overlay the Customer image / sprite onto the frame
def next_state(self):
'''
Propagates the customer to the next state.
Returns nothing.
'''
self.state = random.choices(['checkout','dairy','drinks','fruit','spices'],
self.matrix.loc[self.state])
self.state = self.state[0]
# location_pos = {'dairy':(10,2),'drinks':(6,2),'fruit':(14,2),
# 'spices':(2,2),'checkout':(1,1)}
# self.state = location_pos[self.state]
return self.state
def move(self, direction):
newx = self.x
newy = self.y
if direction == 'up':
newy -= 1
if direction == 'down':
newy += 1
if direction == 'left':
newx -= 1
if direction == 'right':
newx += 1
if self.terrain_map.contents[newy][newx] == '.':
self.x = newx
self.y = newy
if __name__ == "__main__":
background = np.zeros((700, 1000, 3), np.uint8)
tiles = cv2.imread("tiles.png")
market = SupermarketMap(MARKET, tiles)
cust_image = market.extract_tile(5,1)
cust1 = Customer(market, cust_image, 1, state='dairy') # spice
# cust2 = Customer(market, cust_image, 6, 2) # drinks
# cust3 = Customer(market, cust_image, 10, 2) # dairy
# cust4 = Customer(market, cust_image, 14, 2) # fruit
count = 0
minutes = 0
while True: # this script will run forever
frame = background.copy()
market.draw(frame) # it draws in to the supermarket
cust1.draw(frame)
# cust2.draw(frame)
# cust3.draw(frame)
# cust4.draw(frame)
cv2.imshow("frame", frame)
key = chr(cv2.waitKey(1) & 0xFF)
if key == "q":
break
# if key == 'w':
# cust1.move('up')
# if key == 'a':
# cust1.move('left')
# if key == 'd':
# cust1.move('right')
# if key == 'z':
# cust1.move('down')
if count == 48:
count = 0
minutes += 1
cust1.next_state()
count += 1
cv2.destroyAllWindows()
market.write_image("supermarket.png")
|
the-stack_0_2773 | from typing import Any, Dict, List, Optional
import httpx
from ...client import Client
from ...models.suggester import Suggester
from ...types import Response
def _get_kwargs(
project_name: str,
*,
client: Client,
) -> Dict[str, Any]:
url = "{}/projects/{projectName}/suggesters".format(client.base_url, projectName=project_name)
headers: Dict[str, Any] = client.get_headers()
cookies: Dict[str, Any] = client.get_cookies()
return {
"url": url,
"headers": headers,
"cookies": cookies,
"timeout": client.get_timeout(),
}
def _parse_response(*, response: httpx.Response) -> Optional[List[Suggester]]:
if response.status_code == 200:
response_200 = []
_response_200 = response.json()
for componentsschemas_suggester_array_item_data in _response_200:
componentsschemas_suggester_array_item = Suggester.from_dict(componentsschemas_suggester_array_item_data)
response_200.append(componentsschemas_suggester_array_item)
return response_200
return None
def _build_response(*, response: httpx.Response) -> Response[List[Suggester]]:
return Response(
status_code=response.status_code,
content=response.content,
headers=response.headers,
parsed=_parse_response(response=response),
)
def sync_detailed(
project_name: str,
*,
client: Client,
) -> Response[List[Suggester]]:
kwargs = _get_kwargs(
project_name=project_name,
client=client,
)
response = httpx.get(
verify=client.verify_ssl,
**kwargs,
)
return _build_response(response=response)
def sync(
project_name: str,
*,
client: Client,
) -> Optional[List[Suggester]]:
""" """
return sync_detailed(
project_name=project_name,
client=client,
).parsed
async def asyncio_detailed(
project_name: str,
*,
client: Client,
) -> Response[List[Suggester]]:
kwargs = _get_kwargs(
project_name=project_name,
client=client,
)
async with httpx.AsyncClient(verify=client.verify_ssl) as _client:
response = await _client.get(**kwargs)
return _build_response(response=response)
async def asyncio(
project_name: str,
*,
client: Client,
) -> Optional[List[Suggester]]:
""" """
return (
await asyncio_detailed(
project_name=project_name,
client=client,
)
).parsed
|
the-stack_0_2775 | import re
import sys
import uuid
from collections import defaultdict
from contextlib import contextmanager
from io import BytesIO
from hashlib import sha1
from itertools import chain
from os.path import join
from corehq.blobs import get_blob_db, CODES # noqa: F401
from corehq.blobs.exceptions import AmbiguousBlobStorageError, NotFound
from corehq.blobs.util import (
classproperty,
document_method,
random_url_id,
SAFENAME,
)
from corehq.util.io import ClosingContextProxy
from couchdbkit.exceptions import InvalidAttachment, ResourceNotFound
from dimagi.ext.couchdbkit import (
Document,
DocumentSchema,
DictProperty,
IntegerProperty,
StringProperty,
)
from memoized import memoized
import six
class BlobMetaRef(DocumentSchema):
key = StringProperty()
blobmeta_id = IntegerProperty()
content_type = StringProperty()
content_length = IntegerProperty()
@classmethod
def _from_attachment(cls, data):
return cls(
content_type=data.get("content_type"),
content_length=data.get("length"),
)
@staticmethod
def _normalize_json(dbname, doc_id, data):
if "key" in data:
return data
return {
"key": join(dbname, safe_id(doc_id), data["id"]),
"content_length": data.get("content_length"),
"content_type": data.get("content_type"),
}
class BlobMixin(Document):
class Meta(object):
abstract = True
# TODO evaluate all uses of `external_blobs`
external_blobs = DictProperty(BlobMetaRef)
# When true, fallback to couch on fetch and delete if blob is not
# found in blobdb. Set this to True on subclasses that are in the
# process of being migrated. When this is false (the default) the
# methods on this mixin will not touch couchdb.
_migrating_blobs_from_couch = False
_atomic_blobs = None
@classmethod
def wrap(cls, data):
if data.get("external_blobs"):
doc_id = safe_id(data["_id"])
dbname = _get_couchdb_name(cls)
normalize = BlobMetaRef._normalize_json
blobs = {}
normalized = False
for key, value in data["external_blobs"].items():
if value["doc_type"] == "BlobMetaRef":
blobs[key] = value
else:
blobs[key] = normalize(dbname, data['_id'], value)
normalized = True
if normalized:
data = data.copy()
data["external_blobs"] = blobs
return super(BlobMixin, cls).wrap(data)
@classproperty
def _blobdb_type_code(cls):
"""Blob DB type code
This is an abstract attribute that must be set on non-abstract
subclasses of `BlobMixin`. Its value should be one of the codes
in `corehq.blobs.CODES`.
"""
raise NotImplementedError(
"abstract class attribute %s._blobdb_type_code is missing" %
cls.__name__
)
@property
def blobs(self):
"""Get a dictionary of BlobMetaRef objects keyed by attachment name
Includes CouchDB attachments if `_migrating_blobs_from_couch` is true.
The returned value should not be mutated.
"""
if not self._migrating_blobs_from_couch or not self._attachments:
return self.external_blobs
value = {name: BlobMetaRef._from_attachment(info)
for name, info in self._attachments.items()}
value.update(self.external_blobs)
return value
@document_method
def put_attachment(self, content, name=None, content_type=None,
content_length=None, domain=None, type_code=None):
"""Put attachment in blob database
See `get_short_identifier()` for restrictions on the upper bound
for number of attachments per object.
:param content: String or file object.
"""
db = get_blob_db()
if name is None:
name = getattr(content, "name", None)
if name is None:
raise InvalidAttachment("cannot save attachment without name")
if self._id is None:
raise ResourceNotFound("cannot put attachment on unidentified document")
if hasattr(self, "domain"):
if domain is not None and self.domain != domain:
raise ValueError("domain mismatch: %s != %s" % (self.domain, domain))
domain = self.domain
elif domain is None:
raise ValueError("domain attribute or argument is required")
old_meta = self.blobs.get(name)
if isinstance(content, str):
content = BytesIO(content.encode("utf-8"))
elif isinstance(content, bytes):
content = BytesIO(content)
# do we need to worry about BlobDB reading beyond content_length?
meta = db.put(
content,
domain=domain or self.domain,
parent_id=self._id,
name=name,
type_code=(self._blobdb_type_code if type_code is None else type_code),
content_type=content_type,
)
self.external_blobs[name] = BlobMetaRef(
key=meta.key,
blobmeta_id=meta.id,
content_type=content_type,
content_length=meta.content_length,
)
if self._migrating_blobs_from_couch and self._attachments:
self._attachments.pop(name, None)
if self._atomic_blobs is None:
self.save()
if old_meta and old_meta.key:
db.delete(key=old_meta.key)
elif old_meta and old_meta.key:
self._atomic_blobs[name].append(old_meta.key)
return True
@document_method
def fetch_attachment(self, name, stream=False):
"""Get named attachment
:param stream: When true, return a file-like object that can be
read at least once (streamers should not expect to seek within
or read the contents of the returned file more than once).
"""
db = get_blob_db()
try:
try:
key = self.external_blobs[name].key
except KeyError:
if self._migrating_blobs_from_couch:
return super(BlobMixin, self) \
.fetch_attachment(name, stream=stream)
raise NotFound(name)
meta = db.metadb.get(parent_id=self._id, key=key)
blob = meta.open()
except (NotFound, db.metadb.DoesNotExist):
raise ResourceNotFound(
"{model} {model_id} attachment: {name!r}".format(
model=type(self).__name__,
model_id=self._id,
name=name,
))
if stream:
return blob
with blob:
return blob.read()
def has_attachment(self, name):
return name in self.blobs
def delete_attachment(self, name):
if self._migrating_blobs_from_couch and self._attachments:
deleted = bool(self._attachments.pop(name, None))
else:
deleted = False
meta = self.external_blobs.pop(name, None)
if meta is not None:
if self._atomic_blobs is None:
deleted = get_blob_db().delete(key=meta.key) or deleted
else:
self._atomic_blobs[name].append(meta.key)
deleted = True
if self._atomic_blobs is None:
self.save()
return deleted
@document_method
def atomic_blobs(self, save=None):
"""Return a context manager to atomically save doc + blobs
Usage::
with doc.atomic_blobs():
doc.put_attachment(...)
# doc and blob are now saved
Blobs saved inside the context manager will be deleted if an
exception is raised inside the context body.
:param save: A function to be called instead of `self.save()`
"""
@contextmanager
def atomic_blobs_context():
if self._id is None:
self._id = uuid.uuid4().hex
old_external_blobs = dict(self.external_blobs)
if self._migrating_blobs_from_couch:
if self._attachments:
old_attachments = dict(self._attachments)
else:
old_attachments = None
atomicity = self._atomic_blobs
self._atomic_blobs = new_deleted = defaultdict(list)
db = get_blob_db()
success = False
try:
yield
(self.save if save is None else save)()
success = True
except:
typ, exc, tb = sys.exc_info()
# delete new blobs that were not saved
for name, meta in self.external_blobs.items():
old_meta = old_external_blobs.get(name)
if old_meta is None or meta.key != old_meta.key:
db.delete(key=meta.key)
self.external_blobs = old_external_blobs
if self._migrating_blobs_from_couch:
self._attachments = old_attachments
six.reraise(typ, exc, tb)
finally:
self._atomic_blobs = atomicity
if success:
# delete replaced blobs
deleted = set()
blobs = self.blobs
for name, meta in list(old_external_blobs.items()):
if name not in blobs or meta.key != blobs[name].key:
db.delete(key=meta.key)
deleted.add(meta.key)
# delete newly created blobs that were overwritten or deleted
for key in chain.from_iterable(new_deleted.values()):
if key not in deleted:
db.delete(key=key)
return atomic_blobs_context()
class BlobHelper(object):
"""Helper to get/set blobs given a document dict and couch database
NOTE: attachments will be stored in couch and will be inaccessible
using the normal attachments API if this is used to copy a document
having "_attachments" but not "external_blobs" to a database in
which the "doc_type" uses external blob storage and is not in
`_migrating_blobs_from_couch` mode. To work around this limitation,
put `"external_blobs": {}` in documents having a "doc_type" that
uses external blob storage. The same is true when copying a document
with "external_blobs" to a database that is not using an external
blob database. To work around that, remove the "external_blobs" item
from the document (after fetching all blobs) and be sure that the
document has an "_attachments" value that is not `None`.
Modifying "_attachments" or "external_blobs" values in a document is
not recommended while it is wrapped in this class.
"""
def __init__(self, doc, database, type_code):
if doc.get("_id") is None:
raise TypeError("BlobHelper requires a real _id")
self._id = doc["_id"]
self.doc = doc
self.doc_type = doc["doc_type"]
if "domain" in doc:
self.domain = doc["domain"]
elif self.doc_type == "Domain":
self.domain = doc["name"]
self._blobdb_type_code = type_code
self.database = database
self.couch_only = "external_blobs" not in doc
self._migrating_blobs_from_couch = bool(doc.get("_attachments")) \
and not self.couch_only
self._attachments = doc.get("_attachments")
self.external_blobs = {n: BlobMetaRef.wrap(
BlobMetaRef._normalize_json(database.dbname, self._id, m.copy())
) for n, m in doc.get("external_blobs", {}).items()}
def __repr__(self):
return "<%s %s domain=%s id=%s>" % (
type(self).__name__,
self.doc_type,
getattr(self, "domain", ""),
self._id,
)
_atomic_blobs = None
@property
def blobs(self):
return BlobMixin.blobs.fget(self)
def put_attachment(self, content, name=None, *args, **kw):
if self._attachments is None and self.couch_only:
raise AmbiguousBlobStorageError(" ".join("""
Ambiguous blob storage: doc has no _attachments and no
external_blobs. Put a dict (may be empty) in one or both
to indicate where blobs are located (_attachments ->
couch, external_blobs -> blob db). If both are present,
new blobs will be stored in the blob db, but existing
blobs will be fetched from couch if there is no
corresponding key in the external_blobs dict.
""".split()))
if self.couch_only:
self.database.put_attachment(self.doc, content, name, *args, **kw)
else:
BlobMixin.put_attachment(self, content, name, *args, **kw)
self._sync_doc()
return True
def fetch_attachment(self, name, *args, **kw):
if name in self.external_blobs:
return BlobMixin.fetch_attachment(self, name, *args, **kw)
return self.database.fetch_attachment(self._id, name, *args, **kw)
def delete_attachment(self, *args, **kw):
raise NotImplementedError
def atomic_blobs(self, save=None):
if save is not None:
original_save = save
def save():
self._sync_doc()
original_save()
if self.couch_only:
@contextmanager
def context():
(self.save if save is None else save)()
yield
else:
@contextmanager
def context():
try:
with BlobMixin.atomic_blobs(self, save):
yield
except:
self.doc["_attachments"] = self._attachments
self.doc["external_blobs"] = {name: meta.to_json()
for name, meta in self.external_blobs.items()}
raise
return context()
def _sync_doc(self):
if "_attachments" in self.doc:
assert self.doc["_attachments"] == self._attachments
if "external_blobs" in self.doc:
# because put_attachment calls self.save()
self.doc["external_blobs"] = {name: meta.to_json()
for name, meta in self.external_blobs.items()}
def save(self):
self._sync_doc()
self.database.save_doc(self.doc)
class DeferredBlobMixin(BlobMixin):
"""Similar to BlobMixin, but can defer attachment puts until save
This class is intended for backward compatibility with code that set
`_attachments` to a dict of attachments with content. It is not
recommended to use this in new code.
"""
class Meta(object):
abstract = True
_deferred_blobs = None
@property
def blobs(self):
value = super(DeferredBlobMixin, self).blobs
if self._deferred_blobs:
value = dict(value)
for name, info in self._deferred_blobs.items():
if info is not None:
value[name] = BlobMetaRef(
key=None,
content_type=info.get("content_type", None),
content_length=info.get("content_length", None),
)
else:
value.pop(name, None)
return value
@property
def persistent_blobs(self):
"""Get a dict like `blobs` containing only non-deferred items"""
value = super(DeferredBlobMixin, self).blobs
if self._deferred_blobs:
value = value.copy()
for name in self._deferred_blobs:
value.pop(name, None)
return value
def put_attachment(self, content, name=None, *args, **kw):
if self._deferred_blobs:
self._deferred_blobs.pop(name, None)
return super(DeferredBlobMixin, self).put_attachment(content, name,
*args, **kw)
def fetch_attachment(self, name, stream=False):
if self._deferred_blobs and name in self._deferred_blobs:
if self._deferred_blobs[name] is None:
raise ResourceNotFound(
"{model} {model_id} attachment: {name!r}".format(
model=type(self).__name__,
model_id=self._id,
name=name,
))
body = self._deferred_blobs[name]["content"]
if stream:
return ClosingContextProxy(BytesIO(body))
return body
return super(DeferredBlobMixin, self).fetch_attachment(name, stream)
def delete_attachment(self, name):
if self._deferred_blobs:
deleted = bool(self._deferred_blobs.pop(name, None))
else:
deleted = False
return super(DeferredBlobMixin, self).delete_attachment(name) or deleted
def deferred_put_attachment(self, content, name=None, content_type=None,
content_length=None, domain=None, type_code=None):
"""Queue attachment to be persisted on save
WARNING this loads the entire blob content into memory. Use of
this method is discouraged:
- Generally it is bad practice to load large blobs into memory
in their entirety. Ideally blobs should be streamed between
the client and the blob database.
- JSON serialization becomes less efficient because blobs are
base-64 encoded, requiring even more memory.
This method takes the same parameters as `put_attachment`.
"""
if isinstance(content, str):
content = content.encode('utf-8')
elif not isinstance(content, bytes):
content = content.read()
if self._deferred_blobs is None:
self._deferred_blobs = {}
length = len(content) if content_length is None else content_length
self._deferred_blobs[name] = {
"content": content,
"content_type": content_type,
"content_length": length,
"domain": domain or getattr(self, "domain", None),
"type_code": type_code,
}
def deferred_delete_attachment(self, name):
"""Mark attachment to be deleted on save"""
if self._deferred_blobs is None:
self._deferred_blobs = {}
self._deferred_blobs[name] = None
def save(self):
if self._deferred_blobs:
delete_names = []
with self.atomic_blobs(super(DeferredBlobMixin, self).save):
# list deferred blobs to avoid modification during iteration
for name, info in list(self._deferred_blobs.items()):
if info is not None:
self.put_attachment(name=name, **info)
else:
delete_names.append(name)
for name in delete_names:
self.delete_attachment(name)
assert not self._deferred_blobs, self._deferred_blobs
else:
super(DeferredBlobMixin, self).save()
def get_short_identifier():
"""Get a short random identifier
The identifier is chosen from a 64 bit key space, which is suitably
large for no likely collisions in 1000 concurrent keys but kept
small to minimize key length. 1000 is an arbitrary number chosen as
an upper bound of the number of attachments associated with any
given object. We may need to change this if we ever expect an object
to have significantly more than 1000 attachments. The probability of
a collision with a 64 bit ID is:
k = 1000
N = 2 ** 64
(k ** 2) / (2 * N) = 2.7e-14
which is somewhere near the probability of a meteor landing on
your house. For most objects the number of blobs present at any
moment in time will be far lower, and therefore the probability
of a collision will be much lower as well.
http://preshing.com/20110504/hash-collision-probabilities/
"""
return random_url_id(8)
@memoized
def _get_couchdb_name(doc_class):
return doc_class.get_db().dbname
def safe_id(identifier):
if not SAFENAME.match(identifier):
identifier = 'sha1-' + sha1(identifier.encode('utf-8')).hexdigest()
elif SHA1_ID.match(identifier):
# could collide with "safe" id and should never happen anyway
raise ValueError("illegal doc id: {!r}".format(identifier))
return identifier
SHA1_ID = re.compile("sha1-[0-9a-f]{40}$")
|
the-stack_0_2780 | import re
from models import Landmark
from utils import session_scope
NORTH = 0
EAST = 1
SOUTH = 2
WEST = 3
LEFT = -1
RIGHT = 1
SIDES_OF_WORLD = {'north': NORTH, 'east': EAST, 'south': SOUTH, 'west': WEST}
ALL_SIDES_OF_THE_WORLD = ['north', 'east', 'south', 'west']
LEFT_RIGHT = {'left': LEFT, 'right': RIGHT}
class RoutingPointObj:
def __init__(self, start_point='', end_point=''):
self.start_point = start_point
self.end_point = end_point
def __repr__(self):
return '"start_point": {start_point} ' \
'"end_point": {end_point}'.format(start_point=self.start_point,
end_point=self.end_point)
class RoutingException(Exception):
def __int__(self, message=''):
self.message = 'Routing mechanism can\'t handle this route, plz adhere to the established format'
class RouteParser:
routing_points = []
looking_at = 0
def _parse_command(self, data):
original_command = next(data)
command = original_command.lower()
if 'start' in command:
return self._parse_start_command(command)
elif command.lower().startswith('turn'):
return self._parse_turn_command(command, data)
elif 'landmark' in command:
return self._get_landmark_point(original_command)
elif {'north', 'south', 'west', 'east'}.intersection(command.split(' ')):
return self._calc_distance_with_side(command)
else:
# that's mean command like 'go 3 blocks
return self._calc_distance(command)
@staticmethod
def _get_landmark_point(command):
# search landmark by name
landmark_name = re.search(r"'(.*?)'", command, re.DOTALL).group(1)
with session_scope() as session:
landmark = session.query(Landmark).filter_by(name=landmark_name).scalar()
return landmark.coordinate
def parse_routing_points(self, route):
result = []
data = self._read_route_file(route)
try:
while True:
stop_point = self._parse_command(data)
self.routing_points.append(stop_point)
except StopIteration:
for idx, val in enumerate(self.routing_points):
try:
result.append([val, self.routing_points[idx + 1]])
except IndexError:
break
return result
@staticmethod
def _parse_start_command(command):
pattern = '\((.+?)\)'
result = re.search(pattern, command)
if result:
return result.group()
@staticmethod
def _read_route_file(file):
f = open(file, 'r')
while True:
data = f.readline().rstrip()
if not data:
break
yield data
def _parse_turn_command(self, command, data):
# this method should parse the command like 'Turn right/left'
# return new side of the world
turn_command = command.lower()
side_str = 'right' if 'right' in turn_command else 'left'
side = int(LEFT_RIGHT[side_str])
if self.looking_at + side < 0:
self.looking_at = 3
elif self.looking_at + side > 3:
self.looking_at = 0
else:
self.looking_at = self.looking_at + side
# according to rules after turn we should start movement to landmark or just go to some blocks
next_original_command = next(data)
next_command = next_original_command.lower()
if 'landmark' in next_command:
landmark = self._get_landmark_point(next_original_command)
if self._is_landmark_valid(self._get_current_point(), self._convert_points(landmark)):
return landmark
else:
# unit never meet that landmark
raise RoutingException
else:
return self._calc_distance(next_command)
def _get_current_point(self):
if self.routing_points:
current_point = self.routing_points[-1]
return self._convert_points(current_point)
else:
raise RoutingException
def _calc_distance_with_side(self, command):
next_view = set(ALL_SIDES_OF_THE_WORLD).intersection(command.split(' ')).pop()
self.looking_at = SIDES_OF_WORLD[next_view]
return self._calc_distance(command)
def _is_landmark_valid(self, current_point, landmark):
curr_x, curr_y = current_point
land_x, land_y = landmark
if (self.looking_at == NORTH and land_y < curr_y) or \
(self.looking_at == SOUTH and land_y > curr_y) or \
(self.looking_at == EAST and land_x < curr_x) or \
(self.looking_at == WEST and land_x > curr_x):
return False
return True
@staticmethod
def _convert_points(points):
'''
:param points: coordinate points like "(0,0)"
:return: tuple of int value (0,0)
'''
result = [int(s.strip('()')) for s in points.split(',')]
x, y = result
return x, y
def _calc_distance(self, command):
x, y = self._get_current_point()
value = [int(s) for s in command.split(' ') if s.isdigit()]
if len(value) > 1:
raise RoutingException
else:
value = value[0]
if self.looking_at == NORTH:
y += value
elif self.looking_at == EAST:
x += value
elif self.looking_at == SOUTH:
y -= value
elif self.looking_at == WEST:
x -= value
if x < 0: x = 0
if y < 0: y = 0
return '({x},{y})'.format(x=x, y=y)
|
the-stack_0_2781 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Plotting terminal based histograms
"""
from __future__ import print_function
from __future__ import division
import os
import sys
import math
import optparse
from os.path import dirname
from .utils.helpers import *
from .utils.commandhelp import hist
def calc_bins(n, min_val, max_val, h=None, binwidth=None):
"""
Calculate number of bins for the histogram
"""
if not h:
h = max(10, math.log(n + 1, 2))
if binwidth == 0:
binwidth = 0.1
if binwidth is None:
binwidth = (max_val - min_val) / h
for b in drange(min_val, max_val, step=binwidth, include_stop=True):
if b.is_integer():
yield int(b)
else:
yield b
def read_numbers(numbers):
"""
Read the input data in the most optimal way
"""
if isiterable(numbers):
for number in numbers:
yield float(str(number).strip())
else:
with open(numbers) as fh:
for number in fh:
yield float(number.strip())
def run_demo():
"""
Run a demonstration
"""
module_dir = dirname(dirname(os.path.realpath(__file__)))
demo_file = os.path.join(module_dir, 'examples/data/exp.txt')
if not os.path.isfile(demo_file):
sys.stderr.write("demo input file not found!\n")
sys.stderr.write("run the downloaddata.sh script in the example first\n")
sys.exit(1)
# plotting a histogram
print("plotting a basic histogram")
print("plot_hist('%s')" % demo_file)
print("hist -f %s" % demo_file)
print("cat %s | hist" % demo_file)
plot_hist(demo_file)
print("*" * 80)
# with colours
print("histogram with colours")
print("plot_hist('%s', colour='blue')" % demo_file)
print("hist -f %s -c blue" % demo_file)
plot_hist(demo_file, colour='blue')
print("*" * 80)
# changing the shape of the point
print("changing the shape of the bars")
print("plot_hist('%s', pch='.')" % demo_file)
print("hist -f %s -p ." % demo_file)
plot_hist(demo_file, pch='.')
print("*" * 80)
# changing the size of the plot
print("changing the size of the plot")
print("plot_hist('%s', height=35.0, bincount=40)" % demo_file)
print("hist -f %s -s 35.0 -b 40" % demo_file)
plot_hist(demo_file, height=35.0, bincount=40)
def plot_hist(f, height=20.0, bincount=None, binwidth=None, pch="o", colour="default", title="", xlab=None, showSummary=False, regular=False, xtitle=None, ytitle=None):
"""
Make a histogram
Arguments:
height -- the height of the histogram in # of lines
bincount -- number of bins in the histogram
binwidth -- width of bins in the histogram
pch -- shape of the bars in the plot
colour -- colour of the bars in the terminal
title -- title at the top of the plot
xlab -- boolen value for whether or not to display x-axis labels
showSummary -- boolean value for whether or not to display a summary
regular -- boolean value for whether or not to start y-labels at 0
"""
if pch is None:
pch = "o"
if isinstance(f, str):
with open(f) as fh:
f = fh.readlines()
min_val, max_val = None, None
n, mean, sd = 0.0, 0.0, 0.0
for number in read_numbers(f):
n += 1
if min_val is None or number < min_val:
min_val = number
if max_val is None or number > max_val:
max_val = number
mean += number
mean /= n
for number in read_numbers(f):
sd += (mean - number)**2
sd /= (n - 1)
sd **= 0.5
bins = list(calc_bins(n, min_val, max_val, bincount, binwidth))
hist = dict((i, 0) for i in range(len(bins)))
for number in read_numbers(f):
for i, b in enumerate(bins):
if number <= b:
hist[i] += 1
break
if number == max_val and max_val > bins[len(bins) - 1]:
hist[len(hist) - 1] += 1
min_y, max_y = min(hist.values()), max(hist.values())
start = max(min_y, 1)
stop = max_y + 1
if regular:
start = 1
if height is None:
height = stop - start
if height > 20:
height = 20
ys = list(drange(start, stop, float(stop - start) / height))
ys.reverse()
nlen = max(len(str(min_y)), len(str(max_y))) + 1
if title:
print(box_text([title], max(len(hist) * 2, len(title)), nlen))
print()
if ytitle:
print(" " + "y: "+ ytitle + "\n")
# return_string += "y: "+ ytitle + "\n"
used_labs = set()
for y in ys:
ylab = str(int(y))
if ylab in used_labs:
ylab = ""
else:
used_labs.add(ylab)
ylab = " " * (nlen - len(ylab)) + ylab + "|"
print(ylab, end=' ')
for i in range(len(hist)):
if int(y) <= hist[i]:
printcolour(pch, True, colour)
else:
printcolour(" ", True, colour)
print('')
xs = hist.keys()
print(" " * (nlen + 1) + "-" * len(xs))
if xlab:
labels = abbreviate([str(b) for b in bins])
xlen = len(labels[0])
for i in range(0, xlen):
printcolour(" " * (nlen + 1), True, colour)
for x in range(0, len(hist)):
num = labels[x]
if x % 2 != 0:
pass
elif i < len(num):
print(num[i], end=' ')
else:
print(" ", end=' ')
print('')
if xtitle:
full_title = "x: "+ xtitle
print(" " * ((nlen + 1) + len(xs) - len(full_title)) + full_title + "\n")
# return_string += " " * (xs - len(full_title)) + full_title + "\n"
center = max(map(len, map(str, [n, min_val, mean, max_val])))
center += 15
if showSummary:
print()
title = ["Summary"]
print(box_text(title, max(len(hist) * 2, len(title)), nlen))
stats = ["observations: %d" % n, "min value: %f" % min_val,
"mean : %f" % mean, "std dev : %f" % sd, "max value: %f" % max_val]
print(box_text(stats, max(len(hist) * 2, len(title)), nlen))
# print("-" * (2 + center))
# print("|" + "Summary".center(center) + "|")
# print("-" * (2 + center))
# summary = "|" + ("observations: %d" % n).center(center) + "|\n"
# summary += "|" + ("min value: %f" % min_val).center(center) + "|\n"
# summary += "|" + ("mean : %f" % mean).center(center) + "|\n"
# summary += "|" + ("std dev : %f" % sd).center(center) + "|\n"
# summary += "|" + ("max value: %f" % max_val).center(center) + "|\n"
# summary += "-" * (2 + center)
# print(summary)
def main():
parser = optparse.OptionParser(usage=hist['usage'])
parser.add_option(
'-f', '--file', help='a file containing a column of numbers', default=None, dest='f')
parser.add_option('-t', '--title', help='title for the chart', default="", dest='t')
parser.add_option(
'-b', '--bins', help='number of bins in the histogram', type='int', default=None, dest='b')
parser.add_option('-w', '--binwidth', help='width of bins in the histogram',
type='float', default=None, dest='binwidth')
parser.add_option('-s', '--height', help='height of the histogram (in lines)',
type='int', default=None, dest='h')
parser.add_option('-p', '--pch', help='shape of each bar', default='o', dest='p')
parser.add_option('-x', '--xlab', help='label bins on x-axis',
default=None, action="store_true", dest='x')
parser.add_option('-c', '--colour', help='colour of the plot (%s)' %
colour_help, default='default', dest='colour')
parser.add_option('-d', '--demo', help='run demos', action='store_true', dest='demo')
parser.add_option('-n', '--nosummary', help='hide summary',
action='store_false', dest='showSummary', default=True)
parser.add_option('-r', '--regular',
help='use regular y-scale (0 - maximum y value), instead of truncated y-scale (minimum y-value - maximum y-value)',
default=False, action="store_true", dest='regular')
opts, args = parser.parse_args()
if opts.f is None:
if len(args) > 0:
opts.f = args[0]
elif opts.demo is None or opts.demo is False:
opts.f = sys.stdin.readlines()
if opts.demo:
run_demo()
elif opts.f:
plot_hist(opts.f, opts.h, opts.b, opts.binwidth, opts.p, opts.colour,
opts.t, opts.x, opts.showSummary, opts.regular)
else:
print("nothing to plot!")
if __name__ == "__main__":
main()
|
the-stack_0_2785 | from flask import Flask
import pytest
import os
import importlib
import sys
import traceback
MODULE_NAMES = ['numpy']
modules = {}
for m in MODULE_NAMES:
try:
modules[m] = importlib.import_module(m)
except ImportError:
modules[m] = None
app = Flask(__name__)
@app.route('/<module_name>')
def in_module_tests(module_name):
if module_name not in modules:
return "This module is not listed"
try:
result = modules[module_name].test()
num_failures = result.failures
result_string = "{}: number of failures={}".format(module_name, len(num_failures))
except (NameError, ImportError, AttributeError):
result_string = "{}: Error running test!".format(module_name)
return result_string
@app.route('/all')
def run_all():
results = "<br>\n".join([in_module_tests(m) for m in MODULE_NAMES])
return str(results)
def module_version(module_name):
m = modules[module_name]
if m is None:
version_string = "{}: unable to import".format(module_name)
else:
version_string = "{}: {}".format(module_name, m.__version__)
return version_string
@app.route('/')
def root():
versions = "<br>\n".join([module_version(m) for m in MODULE_NAMES])
python_version = "\npython-version%s\n" % sys.version
r = """<br><br>
Imports Successful!<br>
To test each module go to /numpy
or test all at /all.<br>
Test suites can take up to 10 minutes to run, main output is in app logs."""
return python_version + versions + r
if __name__ == '__main__':
try:
port = int(os.getenv("PORT", 8080))
app.run(host='0.0.0.0', port=port, debug=True)
except Exception as e:
traceback.print_exc()
raise e
|
the-stack_0_2789 | from blueman.Functions import *
import gettext
from blueman.plugins.AppletPlugin import AppletPlugin
from blueman.main.SignalTracker import SignalTracker
from gi.repository import GObject
from gi.repository import Gtk
class DiscvManager(AppletPlugin):
__depends__ = ["Menu"]
__author__ = "Walmis"
__icon__ = "gtk-find"
__description__ = _(
"Provides a menu item for making the default adapter temporarily visible when it is set to hidden by default")
__options__ = {
"time": {
"type": int,
"default": 60,
"name": _("Discoverable timeout"),
"desc": _("Amount of time in seconds discoverable mode will last"),
"range": (60, 600)
}
}
def on_load(self, applet):
self.Signals = SignalTracker()
self.item = create_menuitem(_("_Make Discoverable"), get_icon("gtk-find", 16))
applet.Plugins.Menu.Register(self, self.item, 20, False)
self.Applet = applet
self.adapter = None
self.time_left = -1
self.Signals.Handle(self.item, "activate", self.on_set_discoverable)
self.item.props.tooltip_text = _("Make the default adapter temporarily visible")
self.timeout = None
def on_unload(self):
self.Applet.Plugins.Menu.Unregister(self)
del self.item
if self.timeout:
GObject.source_remove(self.timeout)
self.Signals.DisconnectAll()
def on_manager_state_changed(self, state):
if state:
self.init_adapter()
self.update_menuitems()
else:
self.Signals.Disconnect(0)
self.adapter = None
self.update_menuitems()
def on_update(self):
self.time_left -= 1
self.item.get_child().props.label = _("Discoverable... %ss") % self.time_left
self.item.props.sensitive = False
return True
def on_set_discoverable(self, item):
if self.adapter:
self.adapter.set("Discoverable", True)
self.adapter.set("DiscoverableTimeout", self.get_option("time"))
def init_adapter(self):
try:
self.adapter = self.Applet.Manager.get_adapter()
except:
self.adapter = None
def on_adapter_removed(self, path):
dprint(path)
if path == self.adapter.get_object_path():
self.init_adapter()
self.update_menuitems()
def on_adapter_property_changed(self, path, key, value):
if self.adapter and path == self.adapter.get_object_path():
dprint("prop", key, value)
if key == "DiscoverableTimeout":
if value == 0: #always visible
if self.timeout != None:
GObject.source_remove(self.timeout)
self.time_left = -1
self.timeout = None
else:
if self.time_left > -1:
if self.timeout != None:
GObject.source_remove(self.timeout)
self.time_left = value
self.timeout = GObject.timeout_add(1000, self.on_update)
return
elif (key == "Discoverable" and not value) or (key == "Powered" and not value):
dprint("Stop")
if self.timeout != None:
GObject.source_remove(self.timeout)
self.time_left = -1
self.timeout = None
self.update_menuitems()
def update_menuitems(self):
try:
props = self.adapter.get_properties()
except Exception as e:
dprint("warning: Adapter is None")
self.item.props.visible = False
else:
if (not props["Discoverable"] or props["DiscoverableTimeout"] > 0) and props["Powered"]:
self.item.props.visible = True
self.item.get_child().props.label = _("_Make Discoverable")
self.item.props.sensitive = True
else:
self.item.props.visible = False
|
the-stack_0_2790 | #!/usr/bin/env python
# *****************************************************************
# (C) Copyright IBM Corp. 2021. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# *****************************************************************
import sys
import os
import pathlib
sys.path.append(os.path.join(pathlib.Path(__file__).parent.absolute(), '..'))
import open_ce.utils as utils # pylint: disable=wrong-import-position
import open_ce.inputs as inputs # pylint: disable=wrong-import-position
from common import get_configs, make_parser, check_recipes
def main(arg_strings=None):
'''
Entry function.
'''
parser = make_parser()
args = inputs.parse_args(parser, arg_strings)
variants = utils.make_variants(args.python_versions, args.build_types, args.mpi_types, args.cuda_versions)
check_result = True
for variant in variants:
main_build_config_data, main_config = get_configs(variant, args.conda_build_configs)
if main_build_config_data["recipes"] is None:
continue
if not check_recipes(main_build_config_data, main_config, variant):
check_result = False
print("Recipe validation failed for variant '{}'.".format(variant))
assert check_result, "All recipes must be valid."
if __name__ == '__main__':
try:
main()
print("RECIPE VALIDATION SUCCESS")
except Exception as exc: # pylint: disable=broad-except
print("RECIPE VALIDATION ERROR: ", exc)
sys.exit(1)
|
the-stack_0_2791 | import functools
import operator
import os
from collections import OrderedDict
from datetime import date, datetime, time
from operator import methodcaller
import numpy as np
import pandas as pd
import pytest
import toolz
import ibis
import ibis.common.exceptions as com
import ibis.expr.analysis as L
import ibis.expr.api as api
import ibis.expr.datatypes as dt
import ibis.expr.operations as ops
import ibis.expr.rules as rlz
import ibis.expr.types as ir
from ibis import literal
from ibis.common.exceptions import IbisTypeError
from ibis.expr.signature import Argument as Arg
from ibis.tests.util import assert_equal
def test_null():
expr = ibis.literal(None)
assert isinstance(expr, ir.NullScalar)
assert isinstance(expr.op(), ops.NullLiteral)
assert expr._arg.value is None
expr2 = ibis.null()
assert_equal(expr, expr2)
assert expr is expr2
assert expr.type() is dt.null
assert expr2.type() is dt.null
@pytest.mark.xfail(
raises=AssertionError,
reason='UTF-8 support in Impala non-existent at the moment?',
)
def test_unicode():
assert False
@pytest.mark.parametrize(
['value', 'expected_type'],
[
(5, 'int8'),
(127, 'int8'),
(128, 'int16'),
(32767, 'int16'),
(32768, 'int32'),
(2147483647, 'int32'),
(2147483648, 'int64'),
(-5, 'int8'),
(-128, 'int8'),
(-129, 'int16'),
(-32769, 'int32'),
(-2147483649, 'int64'),
(1.5, 'double'),
('foo', 'string'),
([1, 2, 3], 'array<int8>'),
],
)
def test_literal_with_implicit_type(value, expected_type):
expr = ibis.literal(value)
assert isinstance(expr, ir.ScalarExpr)
assert expr.type() == dt.dtype(expected_type)
assert isinstance(expr.op(), ops.Literal)
assert expr.op().value is value
pointA = (1, 2)
pointB = (-3, 4)
pointC = (5, 19)
lineAB = [pointA, pointB]
lineBC = [pointB, pointC]
lineCA = [pointC, pointA]
polygon1 = [lineAB, lineBC, lineCA]
polygon2 = [lineAB, lineBC, lineCA]
multilinestring = [lineAB, lineBC, lineCA]
multipoint = [pointA, pointB, pointC]
multipolygon1 = [polygon1, polygon2]
@pytest.mark.parametrize(
['value', 'expected_type'],
[
(5, 'int16'),
(127, 'double'),
(128, 'int64'),
(32767, 'double'),
(32768, 'float'),
(2147483647, 'int64'),
(-5, 'int16'),
(-128, 'int32'),
(-129, 'int64'),
(-32769, 'float'),
(-2147483649, 'double'),
(1.5, 'double'),
('foo', 'string'),
(list(pointA), 'point'),
(tuple(pointA), 'point'),
(list(lineAB), 'linestring'),
(tuple(lineAB), 'linestring'),
(list(polygon1), 'polygon'),
(tuple(polygon1), 'polygon'),
(list(multilinestring), 'multilinestring'),
(tuple(multilinestring), 'multilinestring'),
(list(multipoint), 'multipoint'),
(tuple(multipoint), 'multipoint'),
(list(multipolygon1), 'multipolygon'),
(tuple(multipolygon1), 'multipolygon'),
],
)
def test_literal_with_explicit_type(value, expected_type):
expr = ibis.literal(value, type=expected_type)
assert expr.type().equals(dt.validate_type(expected_type))
@pytest.mark.parametrize(
['value', 'expected_type', 'expected_class'],
[
(list('abc'), 'array<string>', ir.ArrayScalar),
([1, 2, 3], 'array<int8>', ir.ArrayScalar),
({'a': 1, 'b': 2, 'c': 3}, 'map<string, int8>', ir.MapScalar),
({1: 2, 3: 4, 5: 6}, 'map<int8, int8>', ir.MapScalar),
(
{'a': [1.0, 2.0], 'b': [], 'c': [3.0]},
'map<string, array<double>>',
ir.MapScalar,
),
(
OrderedDict(
[
('a', 1),
('b', list('abc')),
('c', OrderedDict([('foo', [1.0, 2.0])])),
]
),
'struct<a: int8, b: array<string>, c: struct<foo: array<double>>>',
ir.StructScalar,
),
],
)
def test_literal_complex_types(value, expected_type, expected_class):
expr = ibis.literal(value)
expr_type = expr.type()
assert expr_type.equals(dt.validate_type(expected_type))
assert isinstance(expr, expected_class)
assert isinstance(expr.op(), ops.Literal)
assert expr.op().value is value
def test_simple_map_operations():
value = {'a': [1.0, 2.0], 'b': [], 'c': [3.0]}
value2 = {'a': [1.0, 2.0], 'c': [3.0], 'd': [4.0, 5.0]}
expr = ibis.literal(value)
expr2 = ibis.literal(value2)
assert isinstance(expr, ir.MapValue)
assert isinstance(expr.length().op(), ops.MapLength)
assert isinstance((expr + expr2).op(), ops.MapConcat)
assert isinstance((expr2 + expr).op(), ops.MapConcat)
default = ibis.literal([0.0])
assert isinstance(expr.get('d', default).op(), ops.MapValueOrDefaultForKey)
# test for an invalid default type, nulls are ok
with pytest.raises(IbisTypeError):
expr.get('d', ibis.literal('foo'))
assert isinstance(
expr.get('d', ibis.literal(None)).op(), ops.MapValueOrDefaultForKey
)
assert isinstance(expr['b'].op(), ops.MapValueForKey)
assert isinstance(expr.keys().op(), ops.MapKeys)
assert isinstance(expr.values().op(), ops.MapValues)
@pytest.mark.parametrize(
['value', 'expected_type'],
[
(32767, 'int8'),
(32768, 'int16'),
(2147483647, 'int16'),
(2147483648, 'int32'),
('foo', 'double'),
],
)
def test_literal_with_non_coercible_type(value, expected_type):
expected_msg = 'Value .* cannot be safely coerced to .*'
with pytest.raises(TypeError, match=expected_msg):
ibis.literal(value, type=expected_type)
def test_non_inferrable_literal():
expected_msg = (
'The datatype of value .* cannot be inferred, try '
'passing it explicitly with the `type` keyword.'
)
value = tuple(pointA)
with pytest.raises(TypeError, match=expected_msg):
ibis.literal(value)
point = ibis.literal(value, type='point')
assert point.type() == dt.point
def test_literal_list():
what = [1, 2, 1000]
expr = api.literal(what)
assert isinstance(expr, ir.ArrayScalar)
# it works!
repr(expr)
def test_literal_array():
what = []
expr = api.literal(what)
assert isinstance(expr, ir.ArrayValue)
assert expr.type().equals(dt.Array(dt.null))
def test_mixed_arity(table):
what = ["bar", table.g, "foo"]
expr = api.as_value_expr(what)
values = expr.op().values
assert isinstance(values[1], ir.StringColumn)
# it works!
repr(expr)
@pytest.mark.parametrize('container', [list, tuple, set, frozenset])
def test_isin_notin_list(table, container):
values = container([1, 2, 3, 4])
expr = table.a.isin(values)
not_expr = table.a.notin(values)
assert isinstance(expr, ir.BooleanColumn)
assert isinstance(expr.op(), ops.Contains)
assert isinstance(not_expr, ir.BooleanColumn)
assert isinstance(not_expr.op(), ops.NotContains)
def test_value_counts(table, string_col):
bool_clause = table[string_col].notin(['1', '4', '7'])
expr = table[bool_clause][string_col].value_counts()
assert isinstance(expr, ir.TableExpr)
@pytest.mark.xfail(raises=AssertionError, reason='NYT')
def test_isin_not_comparable():
assert False
@pytest.mark.xfail(raises=AssertionError, reason='NYT')
def test_isin_array_expr():
assert False
@pytest.mark.xfail(raises=AssertionError, reason='NYT')
def test_isin_invalid_cases():
# For example, array expression in a list of values, where the inner
# array values originate from some other table
assert False
def test_isin_notin_scalars():
a, b, c = [ibis.literal(x) for x in [1, 1, 2]]
result = a.isin([1, 2])
assert isinstance(result, ir.BooleanScalar)
result = a.notin([b, c, 3])
assert isinstance(result, ir.BooleanScalar)
@pytest.mark.xfail(raises=AssertionError, reason='NYT')
def test_isin_null():
assert False
@pytest.mark.xfail(raises=AssertionError, reason='NYT')
def test_negate_isin():
# Should yield a NotContains
assert False
def test_scalar_isin_list_with_array(table):
val = ibis.literal(2)
options = [table.a, table.b, table.c]
expr = val.isin(options)
assert isinstance(expr, ir.BooleanColumn)
not_expr = val.notin(options)
assert isinstance(not_expr, ir.BooleanColumn)
def test_distinct_basic(functional_alltypes):
expr = functional_alltypes.distinct()
assert isinstance(expr.op(), ops.Distinct)
assert isinstance(expr, ir.TableExpr)
assert expr.op().table is functional_alltypes
expr = functional_alltypes.string_col.distinct()
assert isinstance(expr.op(), ops.DistinctColumn)
assert isinstance(expr, ir.StringColumn)
@pytest.mark.xfail(reason='NYT')
def test_distinct_array_interactions(functional_alltypes):
# array cardinalities / shapes are likely to be different.
a = functional_alltypes.int_col.distinct()
b = functional_alltypes.bigint_col
with pytest.raises(ir.RelationError):
a + b
@pytest.mark.parametrize('where', [lambda t: None, lambda t: t.int_col != 0])
def test_distinct_count(functional_alltypes, where):
result = functional_alltypes.string_col.distinct().count(
where=where(functional_alltypes)
)
assert isinstance(result.op(), ops.CountDistinct)
expected = functional_alltypes.string_col.nunique(
where=where(functional_alltypes)
).name('count')
assert result.equals(expected)
def test_distinct_unnamed_array_expr():
table = ibis.table(
[('year', 'int32'), ('month', 'int32'), ('day', 'int32')], 'foo'
)
# it works!
expr = (
ibis.literal('-')
.join(
[
table.year.cast('string'),
table.month.cast('string'),
table.day.cast('string'),
]
)
.distinct()
)
repr(expr)
def test_distinct_count_numeric_types(functional_alltypes):
metric = (
functional_alltypes.bigint_col.distinct()
.count()
.name('unique_bigints')
)
functional_alltypes.group_by('string_col').aggregate(metric)
def test_nunique(functional_alltypes):
expr = functional_alltypes.string_col.nunique()
assert isinstance(expr.op(), ops.CountDistinct)
@pytest.mark.xfail(raises=AssertionError, reason='NYT')
def test_project_with_distinct():
assert False
def test_isnull(table):
expr = table['g'].isnull()
assert isinstance(expr, ir.BooleanColumn)
assert isinstance(expr.op(), ops.IsNull)
expr = ibis.literal('foo').isnull()
assert isinstance(expr, ir.BooleanScalar)
assert isinstance(expr.op(), ops.IsNull)
def test_notnull(table):
expr = table['g'].notnull()
assert isinstance(expr, ir.BooleanColumn)
assert isinstance(expr.op(), ops.NotNull)
expr = ibis.literal('foo').notnull()
assert isinstance(expr, ir.BooleanScalar)
assert isinstance(expr.op(), ops.NotNull)
@pytest.mark.parametrize('column', ['e', 'f'], ids=['float', 'double'])
def test_isnan_isinf_column(table, column):
expr = table[column].isnan()
assert isinstance(expr, ir.BooleanColumn)
assert isinstance(expr.op(), ops.IsNan)
expr = table[column].isinf()
assert isinstance(expr, ir.BooleanColumn)
assert isinstance(expr.op(), ops.IsInf)
@pytest.mark.parametrize('value', [1.3, np.nan, np.inf, -np.inf])
def test_isnan_isinf_scalar(value):
expr = ibis.literal(value).isnan()
assert isinstance(expr, ir.BooleanScalar)
assert isinstance(expr.op(), ops.IsNan)
expr = ibis.literal(value).isinf()
assert isinstance(expr, ir.BooleanScalar)
assert isinstance(expr.op(), ops.IsInf)
@pytest.mark.xfail(raises=AssertionError, reason='NYT')
def test_null_literal():
assert False
@pytest.mark.parametrize(
['column', 'operation'],
[
('d', 'cumsum'),
('d', 'cummean'),
('d', 'cummin'),
('d', 'cummax'),
('h', 'cumany'),
('h', 'cumall'),
],
)
def test_cumulative_yield_array_types(table, column, operation):
expr = getattr(getattr(table, column), operation)()
assert isinstance(expr, ir.ColumnExpr)
@pytest.fixture(params=['ln', 'log', 'log2', 'log10'])
def log(request):
return operator.methodcaller(request.param)
@pytest.mark.parametrize('column', list('abcdef'))
def test_log(table, log, column):
result = log(table[column])
assert isinstance(result, ir.FloatingColumn)
# is this what we want?
# assert result.get_name() == c
def test_log_string(table):
g = table.g
with pytest.raises(IbisTypeError):
ops.Log(g, None).to_expr()
@pytest.mark.parametrize('klass', [ops.Ln, ops.Log2, ops.Log10])
def test_log_variants_string(table, klass):
g = table.g
with pytest.raises(IbisTypeError):
klass(g).to_expr()
def test_log_boolean(table, log):
# boolean not implemented for these
h = table['h']
with pytest.raises(IbisTypeError):
log(h)
def test_log_literal(log):
assert isinstance(log(ibis.literal(5)), ir.FloatingScalar)
assert isinstance(log(ibis.literal(5.5)), ir.FloatingScalar)
@pytest.mark.xfail(raises=AssertionError, reason='NYT')
def test_exp():
assert False
@pytest.mark.xfail(raises=AssertionError, reason='NYT')
def test_sqrt():
assert False
@pytest.mark.xfail(raises=AssertionError, reason='NYT')
def test_trig_functions():
assert False
@pytest.mark.xfail(raises=AssertionError, reason='NYT')
def test_round():
assert False
def test_cast_same_type_noop(table):
c = table.g
assert c.cast('string') is c
i = ibis.literal(5)
assert i.cast('int8') is i
@pytest.mark.parametrize('type', ['int8', 'int32', 'double', 'float'])
def test_string_to_number(table, type):
casted = table.g.cast(type)
casted_literal = ibis.literal('5').cast(type).name('bar')
assert isinstance(casted, ir.ColumnExpr)
assert casted.type() == dt.dtype(type)
assert isinstance(casted_literal, ir.ScalarExpr)
assert casted_literal.type() == dt.dtype(type)
assert casted_literal.get_name() == 'bar'
@pytest.mark.parametrize('col', list('abcdefh'))
def test_number_to_string_column(table, col):
casted = table[col].cast('string')
assert isinstance(casted, ir.StringColumn)
def test_number_to_string_scalar():
casted_literal = ibis.literal(5).cast('string').name('bar')
assert isinstance(casted_literal, ir.StringScalar)
assert casted_literal.get_name() == 'bar'
def test_casted_exprs_are_named(table):
expr = table.f.cast('string')
assert expr.get_name() == 'cast(f, string)'
# it works! per GH #396
expr.value_counts()
@pytest.mark.xfail(raises=AssertionError, reason='NYT')
def test_nonzero():
assert False
@pytest.mark.parametrize('col', list('abcdefh'))
def test_negate(table, col):
c = table[col]
result = -c
assert isinstance(result, type(c))
assert isinstance(result.op(), ops.Negate)
def test_negate_boolean_scalar():
result = -(ibis.literal(False))
assert isinstance(result, ir.BooleanScalar)
assert isinstance(result.op(), ops.Negate)
@pytest.mark.xfail(raises=AssertionError, reason='NYT')
def test_isnull_notnull():
assert False
@pytest.mark.parametrize('column', ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h'])
@pytest.mark.parametrize('how', [None, 'first', 'last', 'heavy'])
@pytest.mark.parametrize('condition_fn', [lambda t: None, lambda t: t.a > 8])
def test_arbitrary(table, column, how, condition_fn):
col = table[column]
where = condition_fn(table)
expr = col.arbitrary(how=how, where=where)
assert expr.type() == col.type()
assert isinstance(expr, ir.ScalarExpr)
assert L.is_reduction(expr)
@pytest.mark.parametrize(
['column', 'operation'],
[
('h', lambda column: column.any()),
('h', lambda column: column.notany()),
('h', lambda column: column.all()),
('c', lambda column: (column == 0).any()),
('c', lambda column: (column == 0).all()),
],
)
def test_any_all_notany(table, column, operation):
expr = operation(table[column])
assert isinstance(expr, ir.BooleanScalar)
assert L.is_reduction(expr)
@pytest.mark.parametrize(
'operation',
[
operator.lt,
operator.gt,
operator.ge,
operator.le,
operator.eq,
operator.ne,
],
)
@pytest.mark.parametrize('column', list('abcdef'))
@pytest.mark.parametrize('case', [2, 2 ** 9, 2 ** 17, 2 ** 33, 1.5])
def test_numbers_compare_numeric_literal(table, operation, column, case):
ex_op_class = {
operator.eq: ops.Equals,
operator.ne: ops.NotEquals,
operator.le: ops.LessEqual,
operator.lt: ops.Less,
operator.ge: ops.GreaterEqual,
operator.gt: ops.Greater,
}
col = table[column]
result = operation(col, case)
assert isinstance(result, ir.BooleanColumn)
assert isinstance(result.op(), ex_op_class[operation])
def test_boolean_comparisons(table):
bool_col = table.h
result = bool_col == True # noqa
assert isinstance(result, ir.BooleanColumn)
result = bool_col == False # noqa
assert isinstance(result, ir.BooleanColumn)
@pytest.mark.parametrize(
'operation',
[
operator.lt,
operator.gt,
operator.ge,
operator.le,
operator.eq,
operator.ne,
],
)
def test_string_comparisons(table, operation):
string_col = table.g
result = operation(string_col, 'foo')
assert isinstance(result, ir.BooleanColumn)
@pytest.mark.parametrize(
'operation', [operator.xor, operator.or_, operator.and_]
)
def test_boolean_logical_ops(table, operation):
expr = table.a > 0
result = operation(expr, table.h)
assert isinstance(result, ir.BooleanColumn)
result = operation(expr, True)
refl_result = operation(True, expr)
assert isinstance(result, ir.BooleanColumn)
assert isinstance(refl_result, ir.BooleanColumn)
true = ibis.literal(True)
false = ibis.literal(False)
result = operation(true, false)
assert isinstance(result, ir.BooleanScalar)
def test_null_column():
t = ibis.table([('a', 'string')], name='t')
s = t.mutate(b=ibis.NA)
assert s.b.type() == dt.null
assert isinstance(s.b, ir.NullColumn)
def test_null_column_union():
s = ibis.table([('a', 'string'), ('b', 'double')])
t = ibis.table([('a', 'string')])
with pytest.raises(ibis.common.exceptions.RelationError):
s.union(t.mutate(b=ibis.NA)) # needs a type
assert s.union(t.mutate(b=ibis.NA.cast('double'))).schema() == s.schema()
def test_string_compare_numeric_array(table):
with pytest.raises(TypeError):
table.g == table.f
with pytest.raises(TypeError):
table.g == table.c
def test_string_compare_numeric_literal(table):
with pytest.raises(TypeError):
table.g == ibis.literal(1.5)
with pytest.raises(TypeError):
table.g == ibis.literal(5)
def test_between(table):
result = table.f.between(0, 1)
assert isinstance(result, ir.BooleanColumn)
assert isinstance(result.op(), ops.Between)
# it works!
result = table.g.between('a', 'f')
assert isinstance(result, ir.BooleanColumn)
result = ibis.literal(1).between(table.a, table.c)
assert isinstance(result, ir.BooleanColumn)
result = ibis.literal(7).between(5, 10)
assert isinstance(result, ir.BooleanScalar)
# Cases where between should immediately fail, e.g. incomparables
with pytest.raises(TypeError):
table.f.between('0', '1')
with pytest.raises(TypeError):
table.f.between(0, '1')
with pytest.raises(TypeError):
table.f.between('0', 1)
def test_chained_comparisons_not_allowed(table):
with pytest.raises(ValueError):
0 < table.f < 1
@pytest.mark.parametrize(
'operation', [operator.add, operator.mul, operator.truediv, operator.sub]
)
def test_binop_string_type_error(table, operation):
# Strings are not valid for any numeric arithmetic
ints = table.d
strs = table.g
with pytest.raises(TypeError):
operation(ints, strs)
with pytest.raises(TypeError):
operation(strs, ints)
@pytest.mark.parametrize(
['op', 'name', 'case', 'ex_type'],
[
(operator.add, 'a', 0, 'int8'),
(operator.add, 'a', 5, 'int16'),
(operator.add, 'a', 100000, 'int32'),
(operator.add, 'a', -100000, 'int32'),
(operator.add, 'a', 1.5, 'double'),
(operator.add, 'b', 0, 'int16'),
(operator.add, 'b', 5, 'int32'),
(operator.add, 'b', -5, 'int32'),
(operator.add, 'c', 0, 'int32'),
(operator.add, 'c', 5, 'int64'),
(operator.add, 'c', -5, 'int64'),
# technically this can overflow, but we allow it
(operator.add, 'd', 5, 'int64'),
(operator.mul, 'a', 0, 'int8'),
(operator.mul, 'a', 5, 'int16'),
(operator.mul, 'a', 2 ** 24, 'int32'),
(operator.mul, 'a', -(2 ** 24) + 1, 'int32'),
(operator.mul, 'a', 1.5, 'double'),
(operator.mul, 'b', 0, 'int16'),
(operator.mul, 'b', 5, 'int32'),
(operator.mul, 'b', -5, 'int32'),
(operator.mul, 'c', 0, 'int32'),
(operator.mul, 'c', 5, 'int64'),
(operator.mul, 'c', -5, 'int64'),
# technically this can overflow, but we allow it
(operator.mul, 'd', 5, 'int64'),
(operator.sub, 'a', 5, 'int16'),
(operator.sub, 'a', 100000, 'int32'),
(operator.sub, 'a', -100000, 'int32'),
(operator.sub, 'a', 1.5, 'double'),
(operator.sub, 'b', 5, 'int32'),
(operator.sub, 'b', -5, 'int32'),
(operator.sub, 'c', 5, 'int64'),
(operator.sub, 'c', -5, 'int64'),
# technically this can overflow, but we allow it
(operator.sub, 'd', 5, 'int64'),
(operator.truediv, 'a', 5, 'double'),
(operator.truediv, 'a', 1.5, 'double'),
(operator.truediv, 'b', 5, 'double'),
(operator.truediv, 'b', -5, 'double'),
(operator.truediv, 'c', 5, 'double'),
(operator.pow, 'a', 0, 'double'),
(operator.pow, 'b', 0, 'double'),
(operator.pow, 'c', 0, 'double'),
(operator.pow, 'd', 0, 'double'),
(operator.pow, 'e', 0, 'float'),
(operator.pow, 'f', 0, 'double'),
(operator.pow, 'a', 2, 'double'),
(operator.pow, 'b', 2, 'double'),
(operator.pow, 'c', 2, 'double'),
(operator.pow, 'd', 2, 'double'),
(operator.pow, 'a', 1.5, 'double'),
(operator.pow, 'b', 1.5, 'double'),
(operator.pow, 'c', 1.5, 'double'),
(operator.pow, 'd', 1.5, 'double'),
(operator.pow, 'e', 2, 'float'),
(operator.pow, 'f', 2, 'double'),
(operator.pow, 'a', -2, 'double'),
(operator.pow, 'b', -2, 'double'),
(operator.pow, 'c', -2, 'double'),
(operator.pow, 'd', -2, 'double'),
],
ids=lambda arg: str(getattr(arg, '__name__', arg)),
)
def test_literal_promotions(table, op, name, case, ex_type):
col = table[name]
result = op(col, case)
assert result.type() == dt.dtype(ex_type)
result = op(case, col)
assert result.type() == dt.dtype(ex_type)
@pytest.mark.parametrize(
('op', 'left_fn', 'right_fn', 'ex_type'),
[
(operator.sub, lambda t: t['a'], lambda t: 0, 'int8'),
(operator.sub, lambda t: 0, lambda t: t['a'], 'int16'),
(operator.sub, lambda t: t['b'], lambda t: 0, 'int16'),
(operator.sub, lambda t: 0, lambda t: t['b'], 'int32'),
(operator.sub, lambda t: t['c'], lambda t: 0, 'int32'),
(operator.sub, lambda t: 0, lambda t: t['c'], 'int64'),
],
ids=lambda arg: str(getattr(arg, '__name__', arg)),
)
def test_zero_subtract_literal_promotions(
table, op, left_fn, right_fn, ex_type
):
# in case of zero subtract the order of operands matters
left, right = left_fn(table), right_fn(table)
result = op(left, right)
assert result.type() == dt.dtype(ex_type)
@pytest.mark.xfail(raises=AssertionError, reason='NYT')
def test_add_array_promotions():
assert False
@pytest.mark.xfail(raises=AssertionError, reason='NYT')
def test_subtract_array_promotions():
assert False
@pytest.mark.xfail(raises=AssertionError, reason='NYT')
def test_multiply_array_promotions():
assert False
@pytest.mark.xfail(raises=AssertionError, reason='NYT')
def test_divide_array_promotions():
assert False
@pytest.mark.xfail(raises=AssertionError, reason='NYT')
def test_string_add_concat():
assert False
@pytest.fixture
def expr():
exprs = [ibis.literal(1).name('a'), ibis.literal(2).name('b')]
return ibis.expr_list(exprs)
def test_names(expr):
assert expr.names() == ['a', 'b']
def test_prefix(expr):
prefixed = expr.prefix('foo_')
result = prefixed.names()
assert result == ['foo_a', 'foo_b']
def test_rename(expr):
renamed = expr.rename(lambda x: 'foo({0})'.format(x))
result = renamed.names()
assert result == ['foo(a)', 'foo(b)']
def test_suffix(expr):
suffixed = expr.suffix('.x')
result = suffixed.names()
assert result == ['a.x', 'b.x']
def test_concat():
exprs = [ibis.literal(1).name('a'), ibis.literal(2).name('b')]
exprs2 = [ibis.literal(3).name('c'), ibis.literal(4).name('d')]
list1 = ibis.expr_list(exprs)
list2 = ibis.expr_list(exprs2)
result = list1.concat(list2)
expected = ibis.expr_list(exprs + exprs2)
assert_equal(result, expected)
def test_substitute_dict():
table = ibis.table([('foo', 'string'), ('bar', 'string')], 't1')
subs = {'a': 'one', 'b': table.bar}
result = table.foo.substitute(subs)
expected = (
table.foo.case()
.when('a', 'one')
.when('b', table.bar)
.else_(table.foo)
.end()
)
assert_equal(result, expected)
result = table.foo.substitute(subs, else_=ibis.NA)
expected = (
table.foo.case()
.when('a', 'one')
.when('b', table.bar)
.else_(ibis.NA)
.end()
)
assert_equal(result, expected)
@pytest.mark.parametrize(
'typ',
[
'array<map<string, array<array<double>>>>',
'string',
'double',
'float',
'int64',
],
)
def test_not_without_boolean(typ):
t = ibis.table([('a', typ)], name='t')
c = t.a
with pytest.raises(TypeError):
~c
@pytest.mark.parametrize(
('position', 'names'),
[
(0, 'foo'),
(1, 'bar'),
([0], ['foo']),
([1], ['bar']),
([0, 1], ['foo', 'bar']),
([1, 0], ['bar', 'foo']),
],
)
@pytest.mark.parametrize(
'expr_func',
[
lambda t, args: t[args],
lambda t, args: t.sort_by(args),
lambda t, args: t.group_by(args).aggregate(bar_avg=t.bar.mean()),
],
)
def test_table_operations_with_integer_column(position, names, expr_func):
t = ibis.table([('foo', 'string'), ('bar', 'double')])
result = expr_func(t, position)
expected = expr_func(t, names)
assert result.equals(expected)
@pytest.mark.parametrize('value', ['abcdefg', ['a', 'b', 'c'], [1, 2, 3]])
@pytest.mark.parametrize(
'operation', ['pow', 'sub', 'truediv', 'floordiv', 'mod']
)
def test_generic_value_api_no_arithmetic(value, operation):
func = getattr(operator, operation)
expr = ibis.literal(value)
with pytest.raises(TypeError):
func(expr, expr)
@pytest.mark.parametrize(
('value', 'expected'), [(5, dt.int8), (5.4, dt.double), ('abc', dt.string)]
)
def test_fillna_null(value, expected):
assert ibis.NA.fillna(value).type().equals(expected)
@pytest.mark.parametrize(
('left', 'right'),
[
(literal('2017-04-01'), date(2017, 4, 2)),
(date(2017, 4, 2), literal('2017-04-01')),
(literal('2017-04-01 01:02:33'), datetime(2017, 4, 1, 1, 3, 34)),
(datetime(2017, 4, 1, 1, 3, 34), literal('2017-04-01 01:02:33')),
],
)
@pytest.mark.parametrize(
'op',
[
operator.eq,
operator.ne,
operator.lt,
operator.le,
operator.gt,
operator.ge,
lambda left, right: ibis.timestamp('2017-04-01 00:02:34').between(
left, right
),
lambda left, right: ibis.timestamp('2017-04-01')
.cast(dt.date)
.between(left, right),
],
)
def test_string_temporal_compare(op, left, right):
result = op(left, right)
assert result.type().equals(dt.boolean)
@pytest.mark.parametrize(
('value', 'type', 'expected_type_class'),
[
(2.21, 'decimal', dt.Decimal),
(3.14, 'double', dt.Double),
(4.2, 'int64', dt.Double),
(4, 'int64', dt.Int64),
],
)
def test_decimal_modulo_output_type(value, type, expected_type_class):
t = ibis.table([('a', type)])
expr = t.a % value
assert isinstance(expr.type(), expected_type_class)
@pytest.mark.parametrize(
('left', 'right'),
[(literal('10:00'), time(10, 0)), (time(10, 0), literal('10:00'))],
)
@pytest.mark.parametrize(
'op',
[
operator.eq,
operator.ne,
operator.lt,
operator.le,
operator.gt,
operator.ge,
],
)
def test_time_compare(op, left, right):
result = op(left, right)
assert result.type().equals(dt.boolean)
@pytest.mark.parametrize(
('left', 'right'),
[
(literal('10:00'), date(2017, 4, 2)),
(literal('10:00'), datetime(2017, 4, 2, 1, 1)),
(literal('10:00'), literal('2017-04-01')),
],
)
@pytest.mark.parametrize(
'op', [operator.eq, operator.lt, operator.le, operator.gt, operator.ge]
)
def test_time_timestamp_invalid_compare(op, left, right):
result = op(left, right)
assert result.type().equals(dt.boolean)
def test_scalar_parameter_set():
value = ibis.param({dt.int64})
assert isinstance(value.op(), ops.ScalarParameter)
assert value.type().equals(dt.Set(dt.int64))
def test_scalar_parameter_repr():
value = ibis.param(dt.timestamp).name('value')
assert repr(value) == 'value = ScalarParameter[timestamp]'
value_op = value.op()
assert repr(value_op) == "ScalarParameter(type=timestamp)"
@pytest.mark.parametrize(
('left', 'right', 'expected'),
[
(
# same value type, same name
ibis.param(dt.timestamp),
ibis.param(dt.timestamp),
False,
),
(
# different value type, same name
ibis.param(dt.date),
ibis.param(dt.timestamp),
False,
),
(
# same value type, different name
ibis.param(dt.timestamp),
ibis.param(dt.timestamp),
False,
),
(
# different value type, different name
ibis.param(dt.date),
ibis.param(dt.timestamp),
False,
),
(
# different Python class, left side is param
ibis.param(dt.timestamp),
dt.date,
False,
),
(
# different Python class, right side is param
dt.date,
ibis.param(dt.timestamp),
False,
),
],
)
def test_scalar_parameter_compare(left, right, expected):
assert left.equals(right) == expected
@pytest.mark.parametrize(
('case', 'creator'),
[
(datetime.now(), toolz.compose(methodcaller('time'), ibis.timestamp)),
('now', toolz.compose(methodcaller('time'), ibis.timestamp)),
(datetime.now().time(), ibis.time),
('10:37', ibis.time),
],
)
@pytest.mark.parametrize(
('left', 'right'), [(1, 'a'), ('a', 1), (1.0, 2.0), (['a'], [1])]
)
def test_between_time_failure_time(case, creator, left, right):
value = creator(case)
with pytest.raises(TypeError):
value.between(left, right)
def test_custom_type_binary_operations():
class Foo(ir.ValueExpr):
def __add__(self, other):
op = self.op()
return type(op)(op.value + other).to_expr()
__radd__ = __add__
class FooNode(ops.ValueOp):
value = Arg(rlz.integer)
def output_type(self):
return functools.partial(Foo, dtype=dt.int64)
left = ibis.literal(2)
right = FooNode(3).to_expr()
result = left + right
assert isinstance(result, Foo)
assert isinstance(result.op(), FooNode)
left = FooNode(3).to_expr()
right = ibis.literal(2)
result = left + right
assert isinstance(result, Foo)
assert isinstance(result.op(), FooNode)
def test_empty_array_as_argument():
class Foo(ir.Expr):
pass
class FooNode(ops.ValueOp):
value = Arg(rlz.value(dt.Array(dt.int64)))
def output_type(self):
return Foo
node = FooNode([])
value = node.value
expected = literal([]).cast(dt.Array(dt.int64))
assert value.type().equals(dt.Array(dt.null))
assert value.cast(dt.Array(dt.int64)).equals(expected)
def test_nullable_column_propagated():
t = ibis.table(
[
('a', dt.Int32(nullable=True)),
('b', dt.Int32(nullable=False)),
('c', dt.String(nullable=False)),
('d', dt.double), # nullable by default
('f', dt.Double(nullable=False)),
]
)
assert t.a.type().nullable is True
assert t.b.type().nullable is False
assert t.c.type().nullable is False
assert t.d.type().nullable is True
assert t.f.type().nullable is False
s = t.a + t.d
assert s.type().nullable is True
s = t.b + t.d
assert s.type().nullable is True
s = t.b + t.f
assert s.type().nullable is False
@pytest.mark.parametrize(
'base_expr',
[
ibis.table([('interval_col', dt.Interval(unit='D'))]).interval_col,
ibis.interval(seconds=42),
],
)
def test_interval_negate(base_expr):
expr = -base_expr
expr2 = base_expr.negate()
expr3 = ibis.negate(base_expr)
assert isinstance(expr.op(), ops.Negate)
assert expr.equals(expr2)
assert expr.equals(expr3)
def test_large_timestamp():
expr = ibis.timestamp('4567-02-03')
expected = datetime(year=4567, month=2, day=3)
result = expr.op().value
assert result == expected
@pytest.mark.parametrize('tz', [None, 'UTC'])
def test_timestamp_with_timezone(tz):
expr = ibis.timestamp('2017-01-01', timezone=tz)
expected = pd.Timestamp('2017-01-01', tz=tz)
result = expr.op().value
assert expected == result
@pytest.mark.parametrize('tz', [None, 'UTC'])
def test_timestamp_timezone_type(tz):
expr = ibis.timestamp('2017-01-01', timezone=tz)
expected = dt.Timestamp(timezone=tz)
assert expected == expr.op().dtype
def test_map_get_broadcast():
t = ibis.table([('a', 'string')], name='t')
lookup_table = ibis.literal({'a': 1, 'b': 2})
expr = lookup_table.get(t.a)
assert isinstance(expr, ir.IntegerColumn)
def test_map_getitem_broadcast():
t = ibis.table([('a', 'string')], name='t')
lookup_table = ibis.literal({'a': 1, 'b': 2})
expr = lookup_table[t.a]
assert isinstance(expr, ir.IntegerColumn)
def test_map_keys_output_type():
mapping = ibis.literal({'a': 1, 'b': 2})
assert mapping.keys().type() == dt.Array(dt.string)
def test_map_values_output_type():
mapping = ibis.literal({'a': 1, 'b': 2})
assert mapping.values().type() == dt.Array(dt.int8)
def test_scalar_isin_map_keys():
mapping = ibis.literal({'a': 1, 'b': 2})
key = ibis.literal('a')
expr = key.isin(mapping.keys())
assert isinstance(expr, ir.BooleanScalar)
def test_column_isin_map_keys():
t = ibis.table([('a', 'string')], name='t')
mapping = ibis.literal({'a': 1, 'b': 2})
expr = t.a.isin(mapping.keys())
assert isinstance(expr, ir.BooleanColumn)
def test_map_get_with_compatible_value_smaller():
value = ibis.literal({'A': 1000, 'B': 2000})
expr = value.get('C', 3)
assert value.type() == dt.Map(dt.string, dt.int16)
assert expr.type() == dt.int16
def test_map_get_with_compatible_value_bigger():
value = ibis.literal({'A': 1, 'B': 2})
expr = value.get('C', 3000)
assert value.type() == dt.Map(dt.string, dt.int8)
assert expr.type() == dt.int16
def test_map_get_with_incompatible_value_different_kind():
value = ibis.literal({'A': 1000, 'B': 2000})
with pytest.raises(IbisTypeError):
value.get('C', 3.0)
@pytest.mark.parametrize('null_value', [None, ibis.NA])
def test_map_get_with_null_on_not_nullable(null_value):
map_type = dt.Map(dt.string, dt.Int16(nullable=False))
value = ibis.literal({'A': 1000, 'B': 2000}).cast(map_type)
assert value.type() == map_type
with pytest.raises(IbisTypeError):
assert value.get('C', null_value)
@pytest.mark.parametrize('null_value', [None, ibis.NA])
def test_map_get_with_null_on_nullable(null_value):
value = ibis.literal({'A': 1000, 'B': None})
result = value.get('C', null_value)
assert result.type().nullable
@pytest.mark.parametrize('null_value', [None, ibis.NA])
def test_map_get_with_null_on_null_type_with_null(null_value):
value = ibis.literal({'A': None, 'B': None})
result = value.get('C', null_value)
assert result.type().nullable
def test_map_get_with_null_on_null_type_with_non_null():
value = ibis.literal({'A': None, 'B': None})
assert value.get('C', 1).type() == dt.int8
def test_map_get_with_incompatible_value():
value = ibis.literal({'A': 1000, 'B': 2000})
with pytest.raises(IbisTypeError):
value.get('C', ['A'])
@pytest.mark.parametrize(
('value', 'expected_type'),
[
(datetime.now(), dt.timestamp),
(datetime.now().date(), dt.date),
(datetime.now().time(), dt.time),
],
)
def test_invalid_negate(value, expected_type):
expr = ibis.literal(value)
assert expr.type() == expected_type
with pytest.raises(TypeError):
-expr
@pytest.mark.parametrize(
'type',
[
np.float16,
np.float32,
np.float64,
np.int16,
np.int32,
np.int64,
np.int64,
np.int8,
np.timedelta64,
np.uint16,
np.uint32,
np.uint64,
np.uint64,
np.uint8,
float,
int,
],
)
def test_valid_negate(type):
value = type(1)
expr = ibis.literal(value)
assert -expr is not None
@pytest.mark.xfail(
reason='Type not supported in most backends', raises=TypeError
)
@pytest.mark.skipif(
os.name == 'nt', reason='np.float128 not appear to exist on windows'
)
def test_valid_negate_float128():
value = np.float128(1)
expr = ibis.literal(value)
assert -expr is not None
@pytest.mark.parametrize(
('kind', 'begin', 'end'),
[
('preceding', None, None),
('preceding', 1, None),
('preceding', -1, 1),
('preceding', 1, -1),
('preceding', -1, -1),
('following', None, None),
('following', None, 1),
('following', -1, 1),
('following', 1, -1),
('following', -1, -1),
],
)
def test_window_unbounded_invalid(kind, begin, end):
kwargs = {kind: (begin, end)}
with pytest.raises(com.IbisInputError):
ibis.window(**kwargs)
@pytest.mark.parametrize(
('left', 'right', 'expected'),
[
(ibis.literal(1), ibis.literal(1.0), dt.float64),
(ibis.literal('a'), ibis.literal('b'), dt.string),
(ibis.literal(1.0), ibis.literal(1), dt.float64),
(ibis.literal(1), ibis.literal(1), dt.int8),
(ibis.literal(1), ibis.literal(1000), dt.int16),
(ibis.literal(2 ** 16), ibis.literal(2 ** 17), dt.int32),
(ibis.literal(2 ** 50), ibis.literal(1000), dt.int64),
(ibis.literal([1, 2]), ibis.literal([1, 2]), dt.Array(dt.int8)),
(ibis.literal(['a']), ibis.literal([]), dt.Array(dt.string)),
(ibis.literal([]), ibis.literal(['a']), dt.Array(dt.string)),
(ibis.literal([]), ibis.literal([]), dt.Array(dt.null)),
],
)
def test_nullif_type(left, right, expected):
assert left.nullif(right).type() == expected
@pytest.mark.parametrize(
('left', 'right'), [(ibis.literal(1), ibis.literal('a'))]
)
def test_nullif_fail(left, right):
with pytest.raises(com.IbisTypeError):
left.nullif(right)
with pytest.raises(com.IbisTypeError):
right.nullif(left)
@pytest.mark.parametrize(
"join_method",
[
"left_join",
pytest.param(
"right_join",
marks=pytest.mark.xfail(
raises=AttributeError, reason="right_join is not an ibis API"
),
),
"inner_join",
"outer_join",
"asof_join",
pytest.param(
"semi_join",
marks=pytest.mark.xfail(
raises=com.IbisTypeError,
reason=(
"semi_join only gives access to the left table's "
"columns"
),
),
),
],
)
@pytest.mark.xfail(
raises=(com.IbisError, AttributeError),
reason="Select from unambiguous joins not implemented",
)
def test_select_on_unambiguous_join(join_method):
t = ibis.table([("a0", dt.int64), ("b1", dt.string)], name="t")
s = ibis.table([("a1", dt.int64), ("b2", dt.string)], name="s")
method = getattr(t, join_method)
join = method(s, t.b1 == s.b2)
expr1 = join["a0", "a1"]
expr2 = join[["a0", "a1"]]
expr3 = join.select(["a0", "a1"])
assert expr1.equals(expr2)
assert expr1.equals(expr3)
def test_chained_select_on_join():
t = ibis.table([("a", dt.int64)], name="t")
s = ibis.table([("a", dt.int64), ("b", dt.string)], name="s")
join = t.join(s)[t.a, s.b]
expr1 = join["a", "b"]
expr2 = join.select(["a", "b"])
assert expr1.equals(expr2)
def test_repr_list_of_lists():
lit = ibis.literal([[1]])
result = repr(lit)
expected = """\
Literal[array<array<int8>>]
[[1]]"""
assert result == expected
def test_repr_list_of_lists_in_table():
t = ibis.table([('a', 'int64')], name='t')
lit = ibis.literal([[1]])
expr = t[t, lit.name('array_of_array')]
result = repr(expr)
expected = """\
ref_0
UnboundTable[table]
name: t
schema:
a : int64
Selection[table]
table:
Table: ref_0
selections:
Table: ref_0
array_of_array = Literal[array<array<int8>>]
[[1]]"""
assert result == expected
|
the-stack_0_2792 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim: et sw=4 ts=4
'''
Copyright (c) 2008, Yahoo! Inc. All rights reserved.
Code licensed under the BSD License:
http://developer.yahoo.net/yui/license.html
version: 1.0.0b1
'''
import yuidoc_parse, yuidoc_highlight, yuidoc_generate
def main():
from optparse import OptionParser
optparser = OptionParser("usage: %prog inputdir [options] inputdir")
optparser.set_defaults(extension=".js",
newext=".highlighted",
parseroutdir="/tmp",
outputdir="docs",
parserfile="parsed.json",
showprivate=False,
project="Yahoo! UI Library",
version="",
projecturl="http://developer.yahoo.com/yui/",
yuiversion=False,
ydn=False
)
optparser.add_option( "-p", "--parseroutdir",
action="store", dest="parseroutdir", type="string",
help="Directory to write the parser temp data" )
optparser.add_option( "-o", "--outputdir",
action="store", dest="outputdir", type="string",
help="Directory to write the html documentation" )
optparser.add_option( "-f", "--file",
action="store", dest="parserfile", type="string",
help="The name of the file that contains the JSON doc info" )
optparser.add_option( "-t", "--template",
action="store", dest="templatedir", type="string",
help="The directory containing the html tmplate" )
optparser.add_option( "-c", "--crosslink",
action="store", dest="crosslinkdir", type="string",
help="The directory containing json data for other modules to crosslink" )
optparser.add_option( "-s", "--showprivate",
action="store_true", dest="showprivate",
help="Should private properties/methods be in the docs?" )
optparser.add_option( "-e", "--extension",
action="store", dest="extension", type="string",
help="The extension to parse" )
optparser.add_option( "-n", "--newextension",
action="store", dest="newext", type="string",
help="The extension to append to the yuisyntax highlighted output file" )
optparser.add_option( "-m", "--project",
action="store", dest="project", type="string",
help="The name of the project" )
optparser.add_option( "-v", "--version",
action="store", dest="version", type="string",
help="The version of the project" )
optparser.add_option( "-u", "--projecturl",
action="store", dest="projecturl", type="string",
help="The project url" )
optparser.add_option( "-Y", "--yuiversion",
action="store", dest="yuiversion", type="string",
help="The version of YUI library used in the project. This parameter applies to the output for attributes, which differs between YUI2 and YUI3." )
optparser.add_option( "-y", "--ydn",
action="store_true", dest="ydn",
help="Add YDN MyBlogLog intrumentation?" )
(opts, inputdirs) = optparser.parse_args()
if len(inputdirs) > 0:
docparser = yuidoc_parse.DocParser( inputdirs,
opts.parseroutdir,
opts.parserfile,
opts.extension,
opts.version,
opts.yuiversion
)
highlighter = yuidoc_highlight.DocHighlighter( [opts.parseroutdir],
opts.parseroutdir,
opts.extension,
opts.newext )
gen = yuidoc_generate.DocGenerator( opts.parseroutdir,
opts.parserfile,
opts.outputdir,
opts.templatedir,
opts.newext,
opts.showprivate,
opts.project,
opts.version,
opts.projecturl,
opts.ydn
)
gen.process()
else:
optparser.error("Incorrect number of arguments")
if __name__ == '__main__':
main()
|
the-stack_0_2793 | import ipaddress
import os
import re
from urllib.parse import urlsplit, urlunsplit
from django.core.exceptions import ValidationError
from django.utils.deconstruct import deconstructible
from django.utils.functional import SimpleLazyObject
from django.utils.ipv6 import is_valid_ipv6_address
from django.utils.translation import gettext_lazy as _, ngettext_lazy
# These values, if given to validate(), will trigger the self.required check.
EMPTY_VALUES = (None, '', [], (), {})
def _lazy_re_compile(regex, flags=0):
"""Lazily compile a regex with flags."""
def _compile():
# Compile the regex if it was not passed pre-compiled.
if isinstance(regex, str):
return re.compile(regex, flags)
else:
assert not flags, "flags must be empty if regex is passed pre-compiled"
return regex
return SimpleLazyObject(_compile)
@deconstructible
class RegexValidator:
regex = ''
message = _('Enter a valid value.')
code = 'invalid'
inverse_match = False
flags = 0
def __init__(self, regex=None, message=None, code=None, inverse_match=None, flags=None):
if regex is not None:
self.regex = regex
if message is not None:
self.message = message
if code is not None:
self.code = code
if inverse_match is not None:
self.inverse_match = inverse_match
if flags is not None:
self.flags = flags
if self.flags and not isinstance(self.regex, str):
raise TypeError("If the flags are set, regex must be a regular expression string.")
self.regex = _lazy_re_compile(self.regex, self.flags)
def __call__(self, value):
"""
Validate that the input contains (or does *not* contain, if
inverse_match is True) a match for the regular expression.
"""
regex_matches = bool(self.regex.search(str(value)))
invalid_input = regex_matches if self.inverse_match else not regex_matches
if invalid_input:
raise ValidationError(self.message, code=self.code)
def __eq__(self, other):
return (
isinstance(other, RegexValidator) and
self.regex.pattern == other.regex.pattern and
self.regex.flags == other.regex.flags and
(self.message == other.message) and
(self.code == other.code) and
(self.inverse_match == other.inverse_match)
)
@deconstructible
class URLValidator(RegexValidator):
ul = '\u00a1-\uffff' # unicode letters range (must not be a raw string)
# IP patterns
ipv4_re = r'(?:25[0-5]|2[0-4]\d|[0-1]?\d?\d)(?:\.(?:25[0-5]|2[0-4]\d|[0-1]?\d?\d)){3}'
ipv6_re = r'\[[0-9a-f:\.]+\]' # (simple regex, validated later)
# Host patterns
hostname_re = r'[a-z' + ul + r'0-9](?:[a-z' + ul + r'0-9-]{0,61}[a-z' + ul + r'0-9])?'
# Max length for domain name labels is 63 characters per RFC 1034 sec. 3.1
domain_re = r'(?:\.(?!-)[a-z' + ul + r'0-9-]{1,63}(?<!-))*'
tld_re = (
r'\.' # dot
r'(?!-)' # can't start with a dash
r'(?:[a-z' + ul + '-]{2,63}' # domain label
r'|xn--[a-z0-9]{1,59})' # or punycode label
r'(?<!-)' # can't end with a dash
r'\.?' # may have a trailing dot
)
host_re = '(' + hostname_re + domain_re + tld_re + '|localhost)'
regex = _lazy_re_compile(
r'^(?:[a-z0-9\.\-\+]*)://' # scheme is validated separately
r'(?:\S+(?::\S*)?@)?' # user:pass authentication
r'(?:' + ipv4_re + '|' + ipv6_re + '|' + host_re + ')'
r'(?::\d{2,5})?' # port
r'(?:[/?#][^\s]*)?' # resource path
r'\Z', re.IGNORECASE)
message = _('Enter a valid URL.')
schemes = ['http', 'https', 'ftp', 'ftps']
def __init__(self, schemes=None, **kwargs):
super().__init__(**kwargs)
if schemes is not None:
self.schemes = schemes
def __call__(self, value):
# Check first if the scheme is valid
scheme = value.split('://')[0].lower()
if scheme not in self.schemes:
raise ValidationError(self.message, code=self.code)
# Then check full URL
try:
super().__call__(value)
except ValidationError as e:
# Trivial case failed. Try for possible IDN domain
if value:
try:
scheme, netloc, path, query, fragment = urlsplit(value)
except ValueError: # for example, "Invalid IPv6 URL"
raise ValidationError(self.message, code=self.code)
try:
netloc = netloc.encode('idna').decode('ascii') # IDN -> ACE
except UnicodeError: # invalid domain part
raise e
url = urlunsplit((scheme, netloc, path, query, fragment))
super().__call__(url)
else:
raise
else:
# Now verify IPv6 in the netloc part
host_match = re.search(r'^\[(.+)\](?::\d{2,5})?$', urlsplit(value).netloc)
if host_match:
potential_ip = host_match.groups()[0]
try:
validate_ipv6_address(potential_ip)
except ValidationError:
raise ValidationError(self.message, code=self.code)
# The maximum length of a full host name is 253 characters per RFC 1034
# section 3.1. It's defined to be 255 bytes or less, but this includes
# one byte for the length of the name and one byte for the trailing dot
# that's used to indicate absolute names in DNS.
if len(urlsplit(value).netloc) > 253:
raise ValidationError(self.message, code=self.code)
integer_validator = RegexValidator(
_lazy_re_compile(r'^-?\d+\Z'),
message=_('Enter a valid integer.'),
code='invalid',
)
def validate_integer(value):
return integer_validator(value)
@deconstructible
class EmailValidator:
message = _('Enter a valid email address.')
code = 'invalid'
user_regex = _lazy_re_compile(
r"(^[-!#$%&'*+/=?^_`{}|~0-9A-Z]+(\.[-!#$%&'*+/=?^_`{}|~0-9A-Z]+)*\Z" # dot-atom
r'|^"([\001-\010\013\014\016-\037!#-\[\]-\177]|\\[\001-\011\013\014\016-\177])*"\Z)', # quoted-string
re.IGNORECASE)
domain_regex = _lazy_re_compile(
# max length for domain name labels is 63 characters per RFC 1034
r'((?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+)(?:[A-Z0-9-]{2,63}(?<!-))\Z',
re.IGNORECASE)
literal_regex = _lazy_re_compile(
# literal form, ipv4 or ipv6 address (SMTP 4.1.3)
r'\[([A-f0-9:\.]+)\]\Z',
re.IGNORECASE)
domain_whitelist = ['localhost']
def __init__(self, message=None, code=None, whitelist=None):
if message is not None:
self.message = message
if code is not None:
self.code = code
if whitelist is not None:
self.domain_whitelist = whitelist
def __call__(self, value):
if not value or '@' not in value:
raise ValidationError(self.message, code=self.code)
user_part, domain_part = value.rsplit('@', 1)
if not self.user_regex.match(user_part):
raise ValidationError(self.message, code=self.code)
if (domain_part not in self.domain_whitelist and
not self.validate_domain_part(domain_part)):
# Try for possible IDN domain-part
try:
domain_part = domain_part.encode('idna').decode('ascii')
if self.validate_domain_part(domain_part):
return
except UnicodeError:
pass
raise ValidationError(self.message, code=self.code)
def validate_domain_part(self, domain_part):
if self.domain_regex.match(domain_part):
return True
literal_match = self.literal_regex.match(domain_part)
if literal_match:
ip_address = literal_match.group(1)
try:
validate_ipv46_address(ip_address)
return True
except ValidationError:
pass
return False
def __eq__(self, other):
return (
isinstance(other, EmailValidator) and
(self.domain_whitelist == other.domain_whitelist) and
(self.message == other.message) and
(self.code == other.code)
)
validate_email = EmailValidator()
slug_re = _lazy_re_compile(r'^[-a-zA-Z0-9_]+\Z')
validate_slug = RegexValidator(
slug_re,
# Translators: "letters" means latin letters: a-z and A-Z.
_("Enter a valid 'slug' consisting of letters, numbers, underscores or hyphens."),
'invalid'
)
slug_unicode_re = _lazy_re_compile(r'^[-\w]+\Z')
validate_unicode_slug = RegexValidator(
slug_unicode_re,
_("Enter a valid 'slug' consisting of Unicode letters, numbers, underscores, or hyphens."),
'invalid'
)
def validate_ipv4_address(value):
try:
ipaddress.IPv4Address(value)
except ValueError:
raise ValidationError(_('Enter a valid IPv4 address.'), code='invalid')
def validate_ipv6_address(value):
if not is_valid_ipv6_address(value):
raise ValidationError(_('Enter a valid IPv6 address.'), code='invalid')
def validate_ipv46_address(value):
try:
validate_ipv4_address(value)
except ValidationError:
try:
validate_ipv6_address(value)
except ValidationError:
raise ValidationError(_('Enter a valid IPv4 or IPv6 address.'), code='invalid')
ip_address_validator_map = {
'both': ([validate_ipv46_address], _('Enter a valid IPv4 or IPv6 address.')),
'ipv4': ([validate_ipv4_address], _('Enter a valid IPv4 address.')),
'ipv6': ([validate_ipv6_address], _('Enter a valid IPv6 address.')),
}
def ip_address_validators(protocol, unpack_ipv4):
"""
Depending on the given parameters, return the appropriate validators for
the GenericIPAddressField.
"""
if protocol != 'both' and unpack_ipv4:
raise ValueError(
"You can only use `unpack_ipv4` if `protocol` is set to 'both'")
try:
return ip_address_validator_map[protocol.lower()]
except KeyError:
raise ValueError("The protocol '%s' is unknown. Supported: %s"
% (protocol, list(ip_address_validator_map)))
def int_list_validator(sep=',', message=None, code='invalid', allow_negative=False):
regexp = _lazy_re_compile(r'^%(neg)s\d+(?:%(sep)s%(neg)s\d+)*\Z' % {
'neg': '(-)?' if allow_negative else '',
'sep': re.escape(sep),
})
return RegexValidator(regexp, message=message, code=code)
validate_comma_separated_integer_list = int_list_validator(
message=_('Enter only digits separated by commas.'),
)
@deconstructible
class BaseValidator:
message = _('Ensure this value is %(limit_value)s (it is %(show_value)s).')
code = 'limit_value'
def __init__(self, limit_value, message=None):
self.limit_value = limit_value
if message:
self.message = message
def __call__(self, value):
cleaned = self.clean(value)
params = {'limit_value': self.limit_value, 'show_value': cleaned, 'value': value}
if self.compare(cleaned, self.limit_value):
raise ValidationError(self.message, code=self.code, params=params)
def __eq__(self, other):
return (
isinstance(other, self.__class__) and
self.limit_value == other.limit_value and
self.message == other.message and
self.code == other.code
)
def compare(self, a, b):
return a is not b
def clean(self, x):
return x
@deconstructible
class MaxValueValidator(BaseValidator):
message = _('Ensure this value is less than or equal to %(limit_value)s.')
code = 'max_value'
def compare(self, a, b):
return a > b
@deconstructible
class MinValueValidator(BaseValidator):
message = _('Ensure this value is greater than or equal to %(limit_value)s.')
code = 'min_value'
def compare(self, a, b):
return a < b
@deconstructible
class MinLengthValidator(BaseValidator):
message = ngettext_lazy(
'Ensure this value has at least %(limit_value)d character (it has %(show_value)d).',
'Ensure this value has at least %(limit_value)d characters (it has %(show_value)d).',
'limit_value')
code = 'min_length'
def compare(self, a, b):
return a < b
def clean(self, x):
return len(x)
@deconstructible
class MaxLengthValidator(BaseValidator):
message = ngettext_lazy(
'Ensure this value has at most %(limit_value)d character (it has %(show_value)d).',
'Ensure this value has at most %(limit_value)d characters (it has %(show_value)d).',
'limit_value')
code = 'max_length'
def compare(self, a, b):
return a > b
def clean(self, x):
return len(x)
@deconstructible
class DecimalValidator:
"""
Validate that the input does not exceed the maximum number of digits
expected, otherwise raise ValidationError.
"""
messages = {
'max_digits': ngettext_lazy(
'Ensure that there are no more than %(max)s digit in total.',
'Ensure that there are no more than %(max)s digits in total.',
'max'
),
'max_decimal_places': ngettext_lazy(
'Ensure that there are no more than %(max)s decimal place.',
'Ensure that there are no more than %(max)s decimal places.',
'max'
),
'max_whole_digits': ngettext_lazy(
'Ensure that there are no more than %(max)s digit before the decimal point.',
'Ensure that there are no more than %(max)s digits before the decimal point.',
'max'
),
}
def __init__(self, max_digits, decimal_places):
self.max_digits = max_digits
self.decimal_places = decimal_places
def __call__(self, value):
digit_tuple, exponent = value.as_tuple()[1:]
decimals = abs(exponent)
# digit_tuple doesn't include any leading zeros.
digits = len(digit_tuple)
if decimals > digits:
# We have leading zeros up to or past the decimal point. Count
# everything past the decimal point as a digit. We do not count
# 0 before the decimal point as a digit since that would mean
# we would not allow max_digits = decimal_places.
digits = decimals
whole_digits = digits - decimals
if self.max_digits is not None and digits > self.max_digits:
raise ValidationError(
self.messages['max_digits'],
code='max_digits',
params={'max': self.max_digits},
)
if self.decimal_places is not None and decimals > self.decimal_places:
raise ValidationError(
self.messages['max_decimal_places'],
code='max_decimal_places',
params={'max': self.decimal_places},
)
if (self.max_digits is not None and self.decimal_places is not None and
whole_digits > (self.max_digits - self.decimal_places)):
raise ValidationError(
self.messages['max_whole_digits'],
code='max_whole_digits',
params={'max': (self.max_digits - self.decimal_places)},
)
def __eq__(self, other):
return (
isinstance(other, self.__class__) and
self.max_digits == other.max_digits and
self.decimal_places == other.decimal_places
)
@deconstructible
class FileExtensionValidator:
message = _(
"File extension '%(extension)s' is not allowed. "
"Allowed extensions are: '%(allowed_extensions)s'."
)
code = 'invalid_extension'
def __init__(self, allowed_extensions=None, message=None, code=None):
if allowed_extensions is not None:
allowed_extensions = [allowed_extension.lower() for allowed_extension in allowed_extensions]
self.allowed_extensions = allowed_extensions
if message is not None:
self.message = message
if code is not None:
self.code = code
def __call__(self, value):
extension = os.path.splitext(value.name)[1][1:].lower()
if self.allowed_extensions is not None and extension not in self.allowed_extensions:
raise ValidationError(
self.message,
code=self.code,
params={
'extension': extension,
'allowed_extensions': ', '.join(self.allowed_extensions)
}
)
def __eq__(self, other):
return (
isinstance(other, self.__class__) and
self.allowed_extensions == other.allowed_extensions and
self.message == other.message and
self.code == other.code
)
def get_available_image_extensions():
try:
from PIL import Image
except ImportError:
return []
else:
Image.init()
return [ext.lower()[1:] for ext in Image.EXTENSION]
validate_image_file_extension = FileExtensionValidator(
allowed_extensions=get_available_image_extensions(),
)
|
the-stack_0_2794 | # -*- coding: utf-8 -*-
'''
The AWS Cloud Module
====================
The AWS cloud module is used to interact with the Amazon Web Services system.
This module has been replaced by the EC2 cloud module, and is no longer
supported. The documentation shown here is for reference only; it is highly
recommended to change all usages of this driver over to the EC2 driver.
If this driver is still needed, set up the cloud configuration at
``/etc/salt/cloud.providers`` or ``/etc/salt/cloud.providers.d/aws.conf``:
.. code-block:: yaml
my-aws-config:
# The AWS API authentication id
id: GKTADJGHEIQSXMKKRBJ08H
# The AWS API authentication key
key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs
# The ssh keyname to use
keyname: default
# The amazon security group
securitygroup: ssh_open
# The location of the private key which corresponds to the keyname
private_key: /root/default.pem
provider: aws
'''
# pylint: disable=E0102
# Import python libs
import os
import stat
import uuid
import pprint
import logging
# Import salt.cloud libs
import salt.utils.cloud
import salt.config as config
from salt.utils import namespaced_function
from salt.cloud.libcloudfuncs import * # pylint: disable=W0614,W0401
from salt.cloud.libcloudfuncs import destroy as libcloudfuncs_destroy
from salt.cloud.exceptions import (
SaltCloudException,
SaltCloudSystemExit,
SaltCloudConfigError,
SaltCloudExecutionTimeout,
SaltCloudExecutionFailure
)
# Get logging started
log = logging.getLogger(__name__)
# Define the module's virtual name
__virtualname__ = 'aws'
# Only load in this module if the AWS configurations are in place
def __virtual__():
'''
Set up the libcloud funcstions and check for AWS configs
'''
try:
import botocore
# Since we have botocore, we won't load the libcloud AWS module
log.debug(
'The \'botocore\' library is installed. The libcloud AWS support '
'will not be loaded.'
)
return False
except ImportError:
pass
if get_configured_provider() is False:
log.debug(
'There is no AWS cloud provider configuration available. Not '
'loading module'
)
return False
for provider, details in __opts__['providers'].iteritems():
if 'provider' not in details or details['provider'] != 'aws':
continue
if not os.path.exists(details['private_key']):
raise SaltCloudException(
'The AWS key file {0!r} used in the {1!r} provider '
'configuration does not exist\n'.format(
details['private_key'],
provider
)
)
keymode = str(
oct(stat.S_IMODE(os.stat(details['private_key']).st_mode))
)
if keymode not in ('0400', '0600'):
raise SaltCloudException(
'The AWS key file {0!r} used in the {1!r} provider '
'configuration needs to be set to mode 0400 or 0600\n'.format(
details['private_key'],
provider
)
)
global avail_images, avail_sizes, script, list_nodes
global avail_locations, list_nodes_full, list_nodes_select, get_image
global get_size, libcloudfuncs_destroy, show_instance
# open a connection in a specific region
conn = get_conn(**{'location': get_location()})
# Init the libcloud functions
get_size = namespaced_function(get_size, globals(), (conn,))
get_image = namespaced_function(get_image, globals(), (conn,))
avail_locations = namespaced_function(avail_locations, globals(), (conn,))
avail_images = namespaced_function(avail_images, globals(), (conn,))
avail_sizes = namespaced_function(avail_sizes, globals(), (conn,))
script = namespaced_function(script, globals(), (conn,))
list_nodes = namespaced_function(list_nodes, globals(), (conn,))
list_nodes_full = namespaced_function(list_nodes_full, globals(), (conn,))
list_nodes_select = namespaced_function(
list_nodes_select, globals(), (conn,)
)
libcloudfuncs_destroy = namespaced_function(
libcloudfuncs_destroy, globals(), (conn,)
)
show_instance = namespaced_function(show_instance, globals())
log.debug('Loading Libcloud AWS cloud module')
return __virtualname__
EC2_LOCATIONS = {
'ap-northeast-1': Provider.EC2_AP_NORTHEAST,
'ap-southeast-1': Provider.EC2_AP_SOUTHEAST,
'eu-west-1': Provider.EC2_EU_WEST,
'sa-east-1': Provider.EC2_SA_EAST,
'us-east-1': Provider.EC2_US_EAST,
'us-west-1': Provider.EC2_US_WEST,
'us-west-2': Provider.EC2_US_WEST_OREGON
}
DEFAULT_LOCATION = 'us-east-1'
if hasattr(Provider, 'EC2_AP_SOUTHEAST2'):
EC2_LOCATIONS['ap-southeast-2'] = Provider.EC2_AP_SOUTHEAST2
def get_configured_provider():
'''
Return the first configured instance.
'''
return config.is_provider_configured(
__opts__,
__active_provider_name__ or 'aws',
('id', 'key', 'keyname', 'securitygroup', 'private_key')
)
def get_conn(**kwargs):
'''
Return a conn object for the passed VM data
'''
if 'location' in kwargs:
location = kwargs['location']
if location not in EC2_LOCATIONS:
raise SaltCloudException(
'The specified location does not seem to be valid: '
'{0}\n'.format(
location
)
)
else:
location = DEFAULT_LOCATION
driver = get_driver(EC2_LOCATIONS[location])
vm_ = get_configured_provider()
return driver(
config.get_cloud_config_value('id', vm_, __opts__, search_global=False),
config.get_cloud_config_value('key', vm_, __opts__, search_global=False)
)
def keyname(vm_):
'''
Return the keyname
'''
return config.get_cloud_config_value(
'keyname', vm_, __opts__, search_global=False
)
def securitygroup(vm_):
'''
Return the security group
'''
return config.get_cloud_config_value(
'securitygroup', vm_, __opts__, search_global=False
)
def iam_profile(vm_):
'''
Return the IAM role
'''
return config.get_cloud_config_value(
'iam_profile', vm_, __opts__, search_global=False
)
def block_device_mappings(vm_):
'''
Return the block device mapping:
::
[{'DeviceName': '/dev/sdb', 'VirtualName': 'ephemeral0'},
{'DeviceName': '/dev/sdc', 'VirtualName': 'ephemeral1'}]
'''
return config.get_cloud_config_value(
'block_device_mappings', vm_, __opts__, search_global=False
)
def ssh_username(vm_):
'''
Return the ssh_username. Defaults to 'ec2-user'.
'''
usernames = config.get_cloud_config_value(
'ssh_username', vm_, __opts__
)
if not isinstance(usernames, list):
usernames = [usernames]
# get rid of None's or empty names
usernames = filter(lambda x: x, usernames)
# Keep a copy of the usernames the user might have provided
initial = usernames[:]
# Add common usernames to the list to be tested
for name in ('ec2-user', 'ubuntu', 'admin', 'bitnami', 'root'):
if name not in usernames:
usernames.append(name)
# Add the user provided usernames to the end of the list since enough time
# might need to pass before the remote service is available for logins and
# the proper username might have passed it's iteration.
# This has detected in a CentOS 5.7 EC2 image
usernames.extend(initial)
return usernames
def ssh_interface(vm_):
'''
Return the ssh_interface type to connect to. Either 'public_ips' (default)
or 'private_ips'.
'''
return config.get_cloud_config_value(
'ssh_interface', vm_, __opts__, default='public_ips',
search_global=False
)
def get_location(vm_=None):
'''
Return the AWS region to use, in this order:
- CLI parameter
- Cloud profile setting
- Global salt-cloud config
'''
return __opts__.get(
'location',
config.get_cloud_config_value(
'location',
vm_ or get_configured_provider(), __opts__,
default=DEFAULT_LOCATION
)
)
def get_availability_zone(conn, vm_):
'''
Return the availability zone to use
'''
avz = config.get_cloud_config_value(
'availability_zone', vm_, __opts__, search_global=False
)
locations = conn.list_locations()
if avz is None:
# Default to first zone
return locations[0]
for loc in locations:
if loc.availability_zone.name == avz:
return loc
def create(vm_):
'''
Create a single VM from a data dict
'''
key_filename = config.get_cloud_config_value(
'private_key', vm_, __opts__, search_global=False, default=None
)
if key_filename is not None and not os.path.isfile(key_filename):
raise SaltCloudConfigError(
'The defined key_filename {0!r} does not exist'.format(
key_filename
)
)
location = get_location(vm_)
log.info('Creating Cloud VM {0} in {1}'.format(vm_['name'], location))
conn = get_conn(location=location)
usernames = ssh_username(vm_)
kwargs = {
'ssh_key': config.get_cloud_config_value(
'private_key', vm_, __opts__, search_global=False
),
'name': vm_['name'],
'image': get_image(conn, vm_),
'size': get_size(conn, vm_),
'location': get_availability_zone(conn, vm_)
}
ex_keyname = keyname(vm_)
if ex_keyname:
kwargs['ex_keyname'] = ex_keyname
ex_securitygroup = securitygroup(vm_)
if ex_securitygroup:
kwargs['ex_securitygroup'] = ex_securitygroup
ex_blockdevicemappings = block_device_mappings(vm_)
if ex_blockdevicemappings:
kwargs['ex_blockdevicemappings'] = ex_blockdevicemappings
ex_iam_profile = iam_profile(vm_)
if ex_iam_profile:
# libcloud does not implement 'iam_profile' yet.
# A pull request has been suggested
# https://github.com/apache/libcloud/pull/150
raise SaltCloudConfigError(
'libcloud does not implement \'iam_profile\' yet. '
'Use EC2 driver instead.'
)
tags = config.get_cloud_config_value('tag', vm_, __opts__, {}, search_global=False)
if not isinstance(tags, dict):
raise SaltCloudConfigError(
'\'tag\' should be a dict.'
)
kwargs['ex_metadata'] = config.get_cloud_config_value('metadata', vm_, __opts__, default={}, search_global=False)
if not isinstance(kwargs['ex_metadata'], dict):
raise SaltCloudConfigError(
'\'metadata\' should be a dict.'
)
try:
data = conn.create_node(**kwargs)
except Exception as exc:
log.error(
'Error creating {0} on AWS\n\n'
'The following exception was thrown by libcloud when trying to '
'run the initial deployment: {1}\n'.format(
vm_['name'], exc
),
# Show the traceback if the debug logging level is enabled
exc_info=log.isEnabledFor(logging.DEBUG)
)
return False
log.info('Created node {0}'.format(vm_['name']))
def __get_node_data(conn, vm_name):
data = get_node(conn, vm_name)
if data is None:
# Trigger a failure in the waiting function
return False
if ssh_interface(vm_) == 'private_ips' and data.private_ips:
return data
if ssh_interface(vm_) == 'public_ips' and data.public_ips:
return data
try:
data = salt.utils.cloud.wait_for_ip(
__get_node_data,
update_args=(conn, vm_['name']),
timeout=config.get_cloud_config_value(
'wait_for_ip_timeout', vm_, __opts__, default=5 * 60),
interval=config.get_cloud_config_value(
'wait_for_ip_interval', vm_, __opts__, default=0.5),
)
except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc:
try:
# It might be already up, let's destroy it!
destroy(vm_['name'])
except SaltCloudSystemExit:
pass
finally:
raise SaltCloudSystemExit(exc.message)
if tags:
set_tags(vm_['name'], tags, call='action')
if ssh_interface(vm_) == 'private_ips':
log.info('Salt node data. Private_ip: {0}'.format(data.private_ips[0]))
ip_address = data.private_ips[0]
else:
log.info('Salt node data. Public_ip: {0}'.format(data.public_ips[0]))
ip_address = data.public_ips[0]
username = 'ec2-user'
ssh_connect_timeout = config.get_cloud_config_value(
'ssh_connect_timeout', vm_, __opts__, 900 # 15 minutes
)
if salt.utils.cloud.wait_for_port(ip_address, timeout=ssh_connect_timeout):
for user in usernames:
if salt.utils.cloud.wait_for_passwd(
host=ip_address,
username=user,
ssh_timeout=config.get_cloud_config_value(
'wait_for_passwd_timeout', vm_, __opts__,
default=1 * 60),
key_filename=key_filename):
username = user
break
else:
raise SaltCloudSystemExit(
'Failed to authenticate against remote ssh'
)
ret = {}
if config.get_cloud_config_value('deploy', vm_, __opts__) is True:
deploy_script = script(vm_)
deploy_kwargs = {
'host': ip_address,
'username': username,
'key_filename': key_filename,
'tmp_dir': config.get_cloud_config_value(
'tmp_dir', vm_, __opts__, default='/tmp/.saltcloud'
),
'deploy_command': config.get_cloud_config_value(
'deploy_command', vm_, __opts__,
default='/tmp/.saltcloud/deploy.sh',
),
'tty': config.get_cloud_config_value(
'tty', vm_, __opts__, default=True
),
'script': deploy_script.script,
'name': vm_['name'],
'sudo': config.get_cloud_config_value(
'sudo', vm_, __opts__, default=(username != 'root')
),
'sudo_password': config.get_cloud_config_value(
'sudo_password', vm_, __opts__, default=None
),
'start_action': __opts__['start_action'],
'parallel': __opts__['parallel'],
'conf_file': __opts__['conf_file'],
'sock_dir': __opts__['sock_dir'],
'minion_pem': vm_['priv_key'],
'minion_pub': vm_['pub_key'],
'keep_tmp': __opts__['keep_tmp'],
'preseed_minion_keys': vm_.get('preseed_minion_keys', None),
'display_ssh_output': config.get_cloud_config_value(
'display_ssh_output', vm_, __opts__, default=True
),
'script_args': config.get_cloud_config_value(
'script_args', vm_, __opts__
),
'script_env': config.get_cloud_config_value('script_env', vm_, __opts__),
'minion_conf': salt.utils.cloud.minion_config(__opts__, vm_)
}
# Deploy salt-master files, if necessary
if config.get_cloud_config_value('make_master', vm_, __opts__) is True:
deploy_kwargs['make_master'] = True
deploy_kwargs['master_pub'] = vm_['master_pub']
deploy_kwargs['master_pem'] = vm_['master_pem']
master_conf = salt.utils.cloud.master_config(__opts__, vm_)
deploy_kwargs['master_conf'] = master_conf
if master_conf.get('syndic_master', None):
deploy_kwargs['make_syndic'] = True
deploy_kwargs['make_minion'] = config.get_cloud_config_value(
'make_minion', vm_, __opts__, default=True
)
# Check for Windows install params
win_installer = config.get_cloud_config_value('win_installer', vm_, __opts__)
if win_installer:
deploy_kwargs['win_installer'] = win_installer
minion = salt.utils.cloud.minion_config(__opts__, vm_)
deploy_kwargs['master'] = minion['master']
deploy_kwargs['username'] = config.get_cloud_config_value(
'win_username', vm_, __opts__, default='Administrator'
)
deploy_kwargs['password'] = config.get_cloud_config_value(
'win_password', vm_, __opts__, default=''
)
# Store what was used to the deploy the VM
ret['deploy_kwargs'] = deploy_kwargs
deployed = False
if win_installer:
deployed = salt.utils.cloud.deploy_windows(**deploy_kwargs)
else:
deployed = salt.utils.cloud.deploy_script(**deploy_kwargs)
if deployed:
log.info('Salt installed on {name}'.format(**vm_))
else:
log.error('Failed to start Salt on Cloud VM {name}'.format(**vm_))
log.info('Created Cloud VM {0[name]!r}'.format(vm_))
log.debug(
'{0[name]!r} VM creation details:\n{1}'.format(
vm_, pprint.pformat(data.__dict__)
)
)
volumes = config.get_cloud_config_value(
'volumes', vm_, __opts__, search_global=True
)
if volumes:
log.info('Create and attach volumes to node {0}'.format(data.name))
create_attach_volumes(volumes, location, data)
ret.update(data.__dict__)
return ret
def create_attach_volumes(volumes, location, data):
'''
Create and attach volumes to created node
'''
conn = get_conn(location=location)
node_avz = data.__dict__.get('extra').get('availability')
avz = None
for avz in conn.list_locations():
if avz.availability_zone.name == node_avz:
break
for volume in volumes:
volume_name = '{0} on {1}'.format(volume['device'], data.name)
created_volume = conn.create_volume(volume['size'], volume_name, avz)
attach = conn.attach_volume(data, created_volume, volume['device'])
if attach:
log.info(
'{0} attached to {1} (aka {2}) as device {3}'.format(
created_volume.id, data.id, data.name, volume['device']
)
)
def stop(name, call=None):
'''
Stop a node
'''
data = {}
if call != 'action':
raise SaltCloudSystemExit(
'This action must be called with -a or --action.'
)
location = get_location()
conn = get_conn(location=location)
node = get_node(conn, name)
try:
data = conn.ex_stop_node(node=node)
log.debug(data)
log.info('Stopped node {0}'.format(name))
except Exception:
log.error('Failed to stop node {0}\n'.format(name), exc_info=True)
return data
def start(name, call=None):
'''
Start a node
'''
data = {}
if call != 'action':
raise SaltCloudSystemExit(
'This action must be called with -a or --action.'
)
location = get_location()
conn = get_conn(location=location)
node = get_node(conn, name)
try:
data = conn.ex_start_node(node=node)
log.debug(data)
log.info('Started node {0}'.format(name))
except Exception:
log.error('Failed to start node {0}\n'.format(name), exc_info=True)
return data
def set_tags(name, tags, call=None):
'''
Set tags for a node
CLI Example::
salt-cloud -a set_tags mymachine tag1=somestuff tag2='Other stuff'
'''
if call != 'action':
raise SaltCloudSystemExit(
'This action must be called with -a or --action.'
)
location = get_location()
conn = get_conn(location=location)
node = get_node(conn, name)
try:
log.info('Setting tags for {0}'.format(name))
conn.ex_create_tags(resource=node, tags=tags)
# print the new tags- with special handling for renaming of a node
if 'Name' in tags:
return get_tags(tags['Name'])
return get_tags(name)
except Exception:
log.error('Failed to set tags for {0}\n'.format(name), exc_info=True)
def get_tags(name, call=None):
'''
Retrieve tags for a node
'''
data = {}
if call != 'action':
raise SaltCloudSystemExit(
'This action must be called with -a or --action.'
)
location = get_location()
conn = get_conn(location=location)
node = get_node(conn, name)
try:
log.info('Retrieving tags from {0}'.format(name))
data = conn.ex_describe_tags(resource=node)
log.info(data)
except Exception:
log.error(
'Failed to retrieve tags from {0}\n'.format(name),
exc_info=True
)
return data
def del_tags(name, kwargs, call=None):
'''
Delete tags for a node
CLI Example::
salt-cloud -a del_tags mymachine tag1,tag2,tag3
'''
ret = {}
if call != 'action':
raise SaltCloudSystemExit(
'This action must be called with -a or --action.'
)
location = get_location()
conn = get_conn(location=location)
node = get_node(conn, name)
current_tags = conn.ex_describe_tags(resource=node)
tags = {}
for tag in kwargs['tags'].split(','):
tags[tag] = current_tags[tag]
try:
conn.ex_delete_tags(resource=node, tags=tags)
log.info('Deleting tags from {0}'.format(name))
ret = get_tags(name)
except Exception:
log.error(
'Failed to delete tags from {0}\n'.format(name),
exc_info=True
)
return ret
def rename(name, kwargs, call=None):
'''
Properly rename a node. Pass in the new name as "new name".
CLI Example::
salt-cloud -a rename mymachine newname=yourmachine
'''
if call != 'action':
raise SaltCloudSystemExit(
'This action must be called with -a or --action.'
)
location = get_location()
conn = get_conn(location=location)
node = get_node(conn, name)
tags = {'Name': kwargs['newname']}
try:
log.info('Renaming {0} to {1}'.format(name, kwargs['newname']))
conn.ex_create_tags(resource=node, tags=tags)
salt.utils.cloud.rename_key(
__opts__['pki_dir'], name, kwargs['newname']
)
except Exception as exc:
log.error(
'Failed to rename {0} to {1}: {2}\n'.format(
name, kwargs['newname'], exc
),
# Show the traceback if the debug logging level is enabled
exc_info=log.isEnabledFor(logging.DEBUG)
)
return kwargs['newname']
def destroy(name):
'''
Wrap core libcloudfuncs destroy method, adding check for termination
protection
'''
ret = {}
newname = name
if config.get_cloud_config_value('rename_on_destroy',
get_configured_provider(),
__opts__, search_global=False) is True:
newname = '{0}-DEL{1}'.format(name, uuid.uuid4().hex)
rename(name, kwargs={'newname': newname}, call='action')
log.info(
'Machine will be identified as {0} until it has been '
'cleaned up by AWS.'.format(
newname
)
)
ret['newname'] = newname
try:
result = libcloudfuncs_destroy(newname, get_conn())
ret.update({'Destroyed': result})
except Exception as exc:
if not exc.message.startswith('OperationNotPermitted'):
log.exception(exc)
raise exc
log.info(
'Failed: termination protection is enabled on {0}'.format(
name
)
)
return ret
|
the-stack_0_2795 | # Copyright 2019 Ross Wightman
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" RMSProp modified to behave like Tensorflow impl
Originally cut & paste from PyTorch RMSProp
https://github.com/pytorch/pytorch/blob/063946d2b3f3f1e953a2a3b54e0b34f1393de295/torch/optim/rmsprop.py
Licensed under BSD-Clause 3 (ish), https://github.com/pytorch/pytorch/blob/master/LICENSE
Modifications Copyright 2020 Ross Wightman
"""
import torch
from torch.optim import Optimizer
class RMSpropTF(Optimizer):
"""Implements RMSprop algorithm (TensorFlow style epsilon)
NOTE: This is a direct cut-and-paste of PyTorch RMSprop with eps applied before sqrt
and a few other modifications to closer match Tensorflow for matching hyper-params.
Noteworthy changes include:
1. Epsilon applied inside square-root
2. square_avg initialized to ones
3. LR scaling of update accumulated in momentum buffer
Proposed by G. Hinton in his
`course <http://www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf>`_.
The centered version first appears in `Generating Sequences
With Recurrent Neural Networks <https://arxiv.org/pdf/1308.0850v5.pdf>`_.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 1e-2)
momentum (float, optional): momentum factor (default: 0)
alpha (float, optional): smoothing (decay) constant (default: 0.9)
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-10)
centered (bool, optional) : if ``True``, compute the centered RMSProp,
the gradient is normalized by an estimation of its variance
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
decoupled_decay (bool, optional): decoupled weight decay as per https://arxiv.org/abs/1711.05101
lr_in_momentum (bool, optional): learning rate scaling is included in the momentum buffer
update as per defaults in Tensorflow
"""
def __init__(self, params, lr=1e-2, alpha=0.9, eps=1e-10, weight_decay=0, momentum=0., centered=False,
decoupled_decay=False, lr_in_momentum=True):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= momentum:
raise ValueError("Invalid momentum value: {}".format(momentum))
if not 0.0 <= weight_decay:
raise ValueError("Invalid weight_decay value: {}".format(weight_decay))
if not 0.0 <= alpha:
raise ValueError("Invalid alpha value: {}".format(alpha))
defaults = dict(lr=lr, momentum=momentum, alpha=alpha, eps=eps, centered=centered, weight_decay=weight_decay,
decoupled_decay=decoupled_decay, lr_in_momentum=lr_in_momentum)
super(RMSpropTF, self).__init__(params, defaults)
def __setstate__(self, state):
super(RMSpropTF, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('momentum', 0)
group.setdefault('centered', False)
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('RMSprop does not support sparse gradients')
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
state['square_avg'] = torch.ones_like(p.data) # PyTorch inits to zero
if group['momentum'] > 0:
state['momentum_buffer'] = torch.zeros_like(p.data)
if group['centered']:
state['grad_avg'] = torch.zeros_like(p.data)
square_avg = state['square_avg']
one_minus_alpha = 1. - group['alpha']
state['step'] += 1
if group['weight_decay'] != 0:
if 'decoupled_decay' in group and group['decoupled_decay']:
p.data.add_(-group['weight_decay'], p.data)
else:
grad = grad.add(group['weight_decay'], p.data)
# Tensorflow order of ops for updating squared avg
square_avg.add_(one_minus_alpha, grad.pow(2) - square_avg)
# square_avg.mul_(alpha).addcmul_(1 - alpha, grad, grad) # PyTorch original
if group['centered']:
grad_avg = state['grad_avg']
grad_avg.add_(one_minus_alpha, grad - grad_avg)
# grad_avg.mul_(alpha).add_(1 - alpha, grad) # PyTorch original
avg = square_avg.addcmul(-1, grad_avg, grad_avg).add(group['eps']).sqrt_() # eps moved in sqrt
else:
avg = square_avg.add(group['eps']).sqrt_() # eps moved in sqrt
if group['momentum'] > 0:
buf = state['momentum_buffer']
# Tensorflow accumulates the LR scaling in the momentum buffer
if 'lr_in_momentum' in group and group['lr_in_momentum']:
buf.mul_(group['momentum']).addcdiv_(group['lr'], grad, avg)
p.data.add_(-buf)
else:
# PyTorch scales the param update by LR
buf.mul_(group['momentum']).addcdiv_(grad, avg)
p.data.add_(-group['lr'], buf)
else:
p.data.addcdiv_(-group['lr'], grad, avg)
return loss
|
the-stack_0_2796 | import random
import numpy as np
from xgboost.sklearn import XGBClassifier
action_list = []
observation_list = []
result_list = []
def i_win(me, you):
return int((me - you + 4) % 3) - 1
# for i in range(3):
# text = ""
# for j in range(3):
# text += f'{i_win(i, j)} '
# print(f'{text}')
def Agent(observation, configuration):
global action_list, observation_list, result_list
if observation.step == 0:
action = random.randint(0, 2)
action_list.append(action)
return action
if observation.step < 20:
observation_list.append(observation.lastOpponentAction)
result_list.append(
i_win(action_list[-1], observation.lastOpponentAction))
action = random.randint(0, 2)
action_list.append(action)
return action
observation_list.append(observation.lastOpponentAction)
result_list.append(i_win(action_list[-1], observation.lastOpponentAction))
if observation.step < 25:
start_from = 0
else:
start_from = -1*random.randint(16, 20)
X_train = np.array([action_list[start_from:-1],
observation_list[start_from:-1], result_list[start_from:-1]]).T
y_train = np.roll(observation_list[start_from:-1], -1).T
model = XGBClassifier(
learning_rate=0.01,
n_estimators=20,
nthread=4,
use_label_encoder=False)
model.fit(X_train, y_train)
last_data = np.array(
[action_list[-1], observation_list[-1], result_list[-1]])
expected_observation = model.predict(last_data.reshape(1, -1))
if sum(result_list) < -3 and observation.step > 30:
if random.randint(0, 1):
action = int((expected_observation - 1) % 3)
else:
action = expected_observation
else:
action = int((expected_observation + 1) % 3)
action_list.append(action)
return action
|
the-stack_0_2798 | import pytest
from hiku.executors.asyncio import AsyncIOExecutor
from hiku.federation.endpoint import (
FederatedGraphQLEndpoint,
AsyncFederatedGraphQLEndpoint,
)
from hiku.federation.engine import Engine
from hiku.executors.sync import SyncExecutor
from tests.test_federation.utils import (
GRAPH,
ASYNC_GRAPH,
)
def execute(graph, query_dict):
graphql_endpoint = FederatedGraphQLEndpoint(
Engine(SyncExecutor()),
graph,
)
return graphql_endpoint.dispatch(query_dict)
async def execute_async(graph, query_dict):
graphql_endpoint = AsyncFederatedGraphQLEndpoint(
Engine(AsyncIOExecutor()),
graph,
)
return await graphql_endpoint.dispatch(query_dict)
ENTITIES_QUERY = {
'query': """
query($representations:[_Any!]!) {
_entities(representations:$representations) {
...on Order {
cart {
id
status
items { id name }
}
}
}
}
""",
'variables': {
'representations': [
{'__typename': 'Order', 'cartId': 1},
{'__typename': 'Order', 'cartId': 2},
]
}
}
SDL_QUERY = {'query': '{_service {sdl}}'}
def test_execute_sdl():
result = execute(GRAPH, SDL_QUERY)
assert result['data']['_service']['sdl'] is not None
def test_execute_sync_executor():
result = execute(GRAPH, ENTITIES_QUERY)
expect = [
{'cart': {'id': 1, 'status': 'NEW',
'items': [{'id': 10, 'name': 'Ipad'}]}},
{'cart': {'id': 2, 'status': 'ORDERED',
'items': [{'id': 20, 'name': 'Book'},
{'id': 21, 'name': 'Pen'}]}}
]
assert expect == result['data']['_entities']
@pytest.mark.asyncio
async def test_execute_async_executor():
result = await execute_async(ASYNC_GRAPH, ENTITIES_QUERY)
expect = [
{'cart': {'id': 1, 'status': 'NEW',
'items': [{'id': 10, 'name': 'Ipad'}]}},
{'cart': {'id': 2, 'status': 'ORDERED',
'items': [{'id': 20, 'name': 'Book'},
{'id': 21, 'name': 'Pen'}]}}
]
assert expect == result['data']['_entities']
|
the-stack_0_2799 | from setuptools import setup
dependencies = ["numpy",
"scipy",
"numba"]
def readme():
with open('README.md') as f:
return f.read()
setup(name='PyRADS',
version='0.1.0',
description='PyRADS is the "Python line-by-line RADiation model for planetary atmosphereS"',
long_description=readme(),
url='',
author='Daniel D.B. Koll',
author_email='[email protected]',
license='MIT',
packages=['pyrads'],
install_requires=dependencies,
zip_safe=False)
|
the-stack_0_2800 | import os
import time
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.patches import Rectangle
from source import utils
from source.constants import Constants
class DataPlotBuilder(object):
@staticmethod
def timestamp_to_string(ts):
return time.strftime('%H:%M:%S', time.localtime(ts))
@staticmethod
def convert_labels_for_hypnogram(labels):
processed_labels = np.array([])
for epoch in labels:
if epoch == -1:
processed_labels = np.append(processed_labels, 0)
elif epoch == 5:
processed_labels = np.append(processed_labels, 1)
else:
processed_labels = np.append(processed_labels, -1 * epoch)
return processed_labels
@staticmethod
def tidy_data_plot(x_min, x_max, dt, ax):
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_visible(True)
ax.spines['left'].set_visible(True)
ax.yaxis.set_ticks_position('left')
ax.xaxis.set_ticks_position('bottom')
xticks = np.arange(x_min, x_max, dt)
plt.xticks(xticks)
labels = []
for xt in xticks:
labels.append(DataPlotBuilder.timestamp_to_string(xt))
ax.set_xticklabels(labels)
plt.xlim(x_min, x_max)
@staticmethod
def make_data_demo(subject_id="16", snippet=False):
hr_color = [0.8, 0.2, 0.1]
motion_color = [0.3, 0.2, 0.8]
circ_color = [0.9, 0.7, 0]
psg_color = [0.1, 0.7, 0.1]
font_size = 16
font_name = "Arial"
data_path = str(Constants.CROPPED_FILE_PATH) + '/'
circadian_data_path = str(utils.get_project_root().joinpath('data/circadian_predictions/')) + '/'
output_path = str(Constants.FIGURE_FILE_PATH) + '/'
if snippet is False:
fig = plt.figure(figsize=(10, 12))
else:
fig = plt.figure(figsize=(3, 12))
num_v_plots = 5
fig.patch.set_facecolor('white')
if (os.path.isfile(data_path + subject_id + '_cleaned_hr.out') and os.path.isfile(
data_path + subject_id + '_cleaned_motion.out') and os.path.isfile(
data_path + subject_id + '_cleaned_psg.out') and
os.path.isfile(data_path + subject_id + '_cleaned_counts.out') and
os.stat(data_path + subject_id + '_cleaned_motion.out').st_size > 0) and os.path.isfile(
circadian_data_path + subject_id + '_clock_proxy.txt'):
hr = np.genfromtxt(data_path + subject_id + '_cleaned_hr.out', delimiter=' ')
motion = np.genfromtxt(data_path + subject_id + '_cleaned_motion.out', delimiter=' ')
scores = np.genfromtxt(data_path + subject_id + '_cleaned_psg.out', delimiter=' ')
counts = np.genfromtxt(data_path + subject_id + '_cleaned_counts.out', delimiter=',')
circ_model = np.genfromtxt(circadian_data_path + subject_id + '_clock_proxy.txt', delimiter=',')
min_time = min(scores[:, 0])
max_time = max(scores[:, 0])
dt = 60 * 60
sample_point_fraction = 0.92
sample_point = sample_point_fraction * (max_time - min_time) + min_time
window_size = 10
if snippet:
min_time = sample_point
max_time = sample_point + window_size
ax = plt.subplot(num_v_plots, 1, 1)
ax.plot(motion[:, 0], motion[:, 1], color=motion_color)
ax.plot(motion[:, 0], motion[:, 2], color=[0.4, 0.2, 0.7])
ax.plot(motion[:, 0], motion[:, 3], color=[0.5, 0.2, 0.6])
plt.ylabel('Motion (g)', fontsize=font_size, fontname=font_name)
DataPlotBuilder.tidy_data_plot(min_time, max_time, dt, ax)
if snippet:
ax.spines['bottom'].set_visible(True)
ax.spines['left'].set_visible(True)
ax.spines['top'].set_visible(True)
ax.spines['right'].set_visible(True)
ax.yaxis.label.set_visible(False)
inds = np.intersect1d(np.where(motion[:, 0] > sample_point)[0],
np.where(motion[:, 0] <= sample_point + window_size)[0])
y_min = np.amin(motion[inds, 1:3])
plt.ylim(y_min - 0.005, y_min + 0.025)
# Get rid of the ticks
ax.set_xticks([])
ax.yaxis.set_ticks_position("right")
plt.ylabel('')
plt.xlabel(str(window_size) + ' sec window', fontsize=font_size, fontname=font_name)
else:
y_min = -3.2
y_max = 2.5
plt.ylim(y_min, y_max)
current_axis = plt.gca()
current_axis.add_patch(
Rectangle((sample_point, y_min), window_size, y_max - y_min, alpha=0.7, facecolor="gray"))
ax = plt.subplot(num_v_plots, 1, 2)
ax.plot(counts[:, 0], counts[:, 1], color=[0.2, 0.2, 0.7])
DataPlotBuilder.tidy_data_plot(min_time, max_time, dt, ax)
plt.ylabel('Counts', fontsize=font_size, fontname=font_name)
if snippet:
plt.axis('off')
plt.ylim(-1, -1)
ax = plt.subplot(num_v_plots, 1, 3)
ax.plot(hr[:, 0], hr[:, 1], color=hr_color)
plt.ylabel('Heart rate (bpm)', fontsize=font_size, fontname=font_name)
DataPlotBuilder.tidy_data_plot(min_time, max_time, dt, ax)
sample_point = sample_point_fraction * (max_time - min_time) + min_time
window_size = 1200
if snippet:
min_time = sample_point
max_time = sample_point + window_size
DataPlotBuilder.tidy_data_plot(min_time, max_time, dt, ax)
ax.spines['bottom'].set_visible(True)
ax.spines['left'].set_visible(True)
ax.spines['top'].set_visible(True)
ax.spines['right'].set_visible(True)
ax.yaxis.label.set_visible(False)
ax.set_xticks([])
ax.yaxis.set_ticks_position("right")
plt.ylabel('')
plt.xlabel(str(window_size) + ' sec window', fontsize=font_size, fontname=font_name)
plt.ylim(35, 100)
else:
y_min = 40
y_max = 130
plt.ylim(y_min, y_max)
current_axis = plt.gca()
current_axis.add_patch(
Rectangle((sample_point, y_min), window_size, y_max - y_min, alpha=0.35, facecolor="gray"))
plt.ylim(40, 130)
ax = plt.subplot(num_v_plots, 1, 4)
ax.plot(circ_model[:, 0], -circ_model[:, 1], color=circ_color)
plt.ylabel('Clock Proxy', fontsize=font_size, fontname=font_name)
DataPlotBuilder.tidy_data_plot(min_time, max_time, dt, ax)
if snippet:
plt.axis('off')
plt.ylim(-1, -1)
else:
plt.ylim(.2, 1.2)
ax = plt.subplot(num_v_plots, 1, 5)
relabeled_scores = DataPlotBuilder.convert_labels_for_hypnogram(scores[:, 1])
ax.step(scores[:, 0], relabeled_scores, color=psg_color)
plt.ylabel('Stage', fontsize=font_size, fontname=font_name)
plt.xlabel('Time', fontsize=font_size, fontname=font_name)
DataPlotBuilder.tidy_data_plot(min_time, max_time, dt, ax)
ax.set_yticks([-4, -3, -2, -1, 0, 1])
ax.set_yticklabels(['N4', 'N3', 'N2', 'N1', 'Wake', 'REM'])
if snippet:
plt.axis('off')
plt.ylim(5, 5)
else:
plt.ylim(-5, 2)
if not snippet:
plt.savefig(output_path + 'data_validation_' + subject_id + '.png', bbox_inches='tight', pad_inches=0.1,
dpi=300)
else:
plt.savefig(output_path + 'data_validation_zoom_' + subject_id + '.png', bbox_inches='tight',
pad_inches=0.1, dpi=300)
plt.close()
|
the-stack_0_2801 | """Utility functions with no non-trivial dependencies."""
import os
import pathlib
import re
import subprocess
import sys
import hashlib
import io
import shutil
import time
from typing import (
TypeVar, List, Tuple, Optional, Dict, Sequence, Iterable, Container, IO, Callable
)
from typing_extensions import Final, Type, Literal
try:
import curses
import _curses # noqa
CURSES_ENABLED = True
except ImportError:
CURSES_ENABLED = False
T = TypeVar('T')
ENCODING_RE: Final = re.compile(br"([ \t\v]*#.*(\r\n?|\n))??[ \t\v]*#.*coding[:=][ \t]*([-\w.]+)")
DEFAULT_SOURCE_OFFSET: Final = 4
DEFAULT_COLUMNS: Final = 80
# At least this number of columns will be shown on each side of
# error location when printing source code snippet.
MINIMUM_WIDTH: Final = 20
# VT100 color code processing was added in Windows 10, but only the second major update,
# Threshold 2. Fortunately, everyone (even on LTSB, Long Term Support Branch) should
# have a version of Windows 10 newer than this. Note that Windows 8 and below are not
# supported, but are either going out of support, or make up only a few % of the market.
MINIMUM_WINDOWS_MAJOR_VT100: Final = 10
MINIMUM_WINDOWS_BUILD_VT100: Final = 10586
default_python2_interpreter: Final = [
"python2",
"python",
"/usr/bin/python",
"C:\\Python27\\python.exe",
]
SPECIAL_DUNDERS: Final = frozenset((
"__init__", "__new__", "__call__", "__init_subclass__", "__class_getitem__",
))
def is_dunder(name: str, exclude_special: bool = False) -> bool:
"""Returns whether name is a dunder name.
Args:
exclude_special: Whether to return False for a couple special dunder methods.
"""
if exclude_special and name in SPECIAL_DUNDERS:
return False
return name.startswith("__") and name.endswith("__")
def is_sunder(name: str) -> bool:
return not is_dunder(name) and name.startswith('_') and name.endswith('_')
def split_module_names(mod_name: str) -> List[str]:
"""Return the module and all parent module names.
So, if `mod_name` is 'a.b.c', this function will return
['a.b.c', 'a.b', and 'a'].
"""
out = [mod_name]
while '.' in mod_name:
mod_name = mod_name.rsplit('.', 1)[0]
out.append(mod_name)
return out
def module_prefix(modules: Iterable[str], target: str) -> Optional[str]:
result = split_target(modules, target)
if result is None:
return None
return result[0]
def split_target(modules: Iterable[str], target: str) -> Optional[Tuple[str, str]]:
remaining: List[str] = []
while True:
if target in modules:
return target, '.'.join(remaining)
components = target.rsplit('.', 1)
if len(components) == 1:
return None
target = components[0]
remaining.insert(0, components[1])
def short_type(obj: object) -> str:
"""Return the last component of the type name of an object.
If obj is None, return 'nil'. For example, if obj is 1, return 'int'.
"""
if obj is None:
return 'nil'
t = str(type(obj))
return t.split('.')[-1].rstrip("'>")
def find_python_encoding(text: bytes, pyversion: Tuple[int, int]) -> Tuple[str, int]:
"""PEP-263 for detecting Python file encoding"""
result = ENCODING_RE.match(text)
if result:
line = 2 if result.group(1) else 1
encoding = result.group(3).decode('ascii')
# Handle some aliases that Python is happy to accept and that are used in the wild.
if encoding.startswith(('iso-latin-1-', 'latin-1-')) or encoding == 'iso-latin-1':
encoding = 'latin-1'
return encoding, line
else:
default_encoding = 'utf8' if pyversion[0] >= 3 else 'ascii'
return default_encoding, -1
def bytes_to_human_readable_repr(b: bytes) -> str:
"""Converts bytes into some human-readable representation. Unprintable
bytes such as the nul byte are escaped. For example:
>>> b = bytes([102, 111, 111, 10, 0])
>>> s = bytes_to_human_readable_repr(b)
>>> print(s)
foo\n\x00
>>> print(repr(s))
'foo\\n\\x00'
"""
return repr(b)[2:-1]
class DecodeError(Exception):
"""Exception raised when a file cannot be decoded due to an unknown encoding type.
Essentially a wrapper for the LookupError raised by `bytearray.decode`
"""
def decode_python_encoding(source: bytes, pyversion: Tuple[int, int]) -> str:
"""Read the Python file with while obeying PEP-263 encoding detection.
Returns the source as a string.
"""
# check for BOM UTF-8 encoding and strip it out if present
if source.startswith(b'\xef\xbb\xbf'):
encoding = 'utf8'
source = source[3:]
else:
# look at first two lines and check if PEP-263 coding is present
encoding, _ = find_python_encoding(source, pyversion)
try:
source_text = source.decode(encoding)
except LookupError as lookuperr:
raise DecodeError(str(lookuperr)) from lookuperr
return source_text
def read_py_file(path: str, read: Callable[[str], bytes],
pyversion: Tuple[int, int]) -> Optional[List[str]]:
"""Try reading a Python file as list of source lines.
Return None if something goes wrong.
"""
try:
source = read(path)
except OSError:
return None
else:
try:
source_lines = decode_python_encoding(source, pyversion).splitlines()
except DecodeError:
return None
return source_lines
def trim_source_line(line: str, max_len: int, col: int, min_width: int) -> Tuple[str, int]:
"""Trim a line of source code to fit into max_len.
Show 'min_width' characters on each side of 'col' (an error location). If either
start or end is trimmed, this is indicated by adding '...' there.
A typical result looks like this:
...some_variable = function_to_call(one_arg, other_arg) or...
Return the trimmed string and the column offset to to adjust error location.
"""
if max_len < 2 * min_width + 1:
# In case the window is too tiny it is better to still show something.
max_len = 2 * min_width + 1
# Trivial case: line already fits in.
if len(line) <= max_len:
return line, 0
# If column is not too large so that there is still min_width after it,
# the line doesn't need to be trimmed at the start.
if col + min_width < max_len:
return line[:max_len] + '...', 0
# Otherwise, if the column is not too close to the end, trim both sides.
if col < len(line) - min_width - 1:
offset = col - max_len + min_width + 1
return '...' + line[offset:col + min_width + 1] + '...', offset - 3
# Finally, if the column is near the end, just trim the start.
return '...' + line[-max_len:], len(line) - max_len - 3
def get_mypy_comments(source: str) -> List[Tuple[int, str]]:
PREFIX = '# mypy: '
# Don't bother splitting up the lines unless we know it is useful
if PREFIX not in source:
return []
lines = source.split('\n')
results = []
for i, line in enumerate(lines):
if line.startswith(PREFIX):
results.append((i + 1, line[len(PREFIX):]))
return results
_python2_interpreter: Optional[str] = None
def try_find_python2_interpreter() -> Optional[str]:
global _python2_interpreter
if _python2_interpreter:
return _python2_interpreter
for interpreter in default_python2_interpreter:
try:
retcode = subprocess.Popen([
interpreter, '-c',
'import sys, typing; assert sys.version_info[:2] == (2, 7)'
]).wait()
if not retcode:
_python2_interpreter = interpreter
return interpreter
except OSError:
pass
return None
PASS_TEMPLATE: Final = """<?xml version="1.0" encoding="utf-8"?>
<testsuite errors="0" failures="0" name="mypy" skips="0" tests="1" time="{time:.3f}">
<testcase classname="mypy" file="mypy" line="1" name="mypy-py{ver}-{platform}" time="{time:.3f}">
</testcase>
</testsuite>
"""
FAIL_TEMPLATE: Final = """<?xml version="1.0" encoding="utf-8"?>
<testsuite errors="0" failures="1" name="mypy" skips="0" tests="1" time="{time:.3f}">
<testcase classname="mypy" file="mypy" line="1" name="mypy-py{ver}-{platform}" time="{time:.3f}">
<failure message="mypy produced messages">{text}</failure>
</testcase>
</testsuite>
"""
ERROR_TEMPLATE: Final = """<?xml version="1.0" encoding="utf-8"?>
<testsuite errors="1" failures="0" name="mypy" skips="0" tests="1" time="{time:.3f}">
<testcase classname="mypy" file="mypy" line="1" name="mypy-py{ver}-{platform}" time="{time:.3f}">
<error message="mypy produced errors">{text}</error>
</testcase>
</testsuite>
"""
def write_junit_xml(dt: float, serious: bool, messages: List[str], path: str,
version: str, platform: str) -> None:
from xml.sax.saxutils import escape
if not messages and not serious:
xml = PASS_TEMPLATE.format(time=dt, ver=version, platform=platform)
elif not serious:
xml = FAIL_TEMPLATE.format(text=escape('\n'.join(messages)), time=dt,
ver=version, platform=platform)
else:
xml = ERROR_TEMPLATE.format(text=escape('\n'.join(messages)), time=dt,
ver=version, platform=platform)
# checks for a directory structure in path and creates folders if needed
xml_dirs = os.path.dirname(os.path.abspath(path))
if not os.path.isdir(xml_dirs):
os.makedirs(xml_dirs)
with open(path, 'wb') as f:
f.write(xml.encode('utf-8'))
class IdMapper:
"""Generate integer ids for objects.
Unlike id(), these start from 0 and increment by 1, and ids won't
get reused across the life-time of IdMapper.
Assume objects don't redefine __eq__ or __hash__.
"""
def __init__(self) -> None:
self.id_map: Dict[object, int] = {}
self.next_id = 0
def id(self, o: object) -> int:
if o not in self.id_map:
self.id_map[o] = self.next_id
self.next_id += 1
return self.id_map[o]
def get_prefix(fullname: str) -> str:
"""Drop the final component of a qualified name (e.g. ('x.y' -> 'x')."""
return fullname.rsplit('.', 1)[0]
def get_top_two_prefixes(fullname: str) -> Tuple[str, str]:
"""Return one and two component prefixes of a fully qualified name.
Given 'a.b.c.d', return ('a', 'a.b').
If fullname has only one component, return (fullname, fullname).
"""
components = fullname.split('.', 3)
return components[0], '.'.join(components[:2])
def correct_relative_import(cur_mod_id: str,
relative: int,
target: str,
is_cur_package_init_file: bool) -> Tuple[str, bool]:
if relative == 0:
return target, True
parts = cur_mod_id.split(".")
rel = relative
if is_cur_package_init_file:
rel -= 1
ok = len(parts) >= rel
if rel != 0:
cur_mod_id = ".".join(parts[:-rel])
return cur_mod_id + (("." + target) if target else ""), ok
fields_cache: Final[Dict[Type[object], List[str]]] = {}
def get_class_descriptors(cls: 'Type[object]') -> Sequence[str]:
import inspect # Lazy import for minor startup speed win
# Maintain a cache of type -> attributes defined by descriptors in the class
# (that is, attributes from __slots__ and C extension classes)
if cls not in fields_cache:
members = inspect.getmembers(
cls,
lambda o: inspect.isgetsetdescriptor(o) or inspect.ismemberdescriptor(o))
fields_cache[cls] = [x for x, y in members if x != '__weakref__' and x != '__dict__']
return fields_cache[cls]
def replace_object_state(new: object, old: object, copy_dict: bool = False) -> None:
"""Copy state of old node to the new node.
This handles cases where there is __dict__ and/or attribute descriptors
(either from slots or because the type is defined in a C extension module).
Assume that both objects have the same __class__.
"""
if hasattr(old, '__dict__'):
if copy_dict:
new.__dict__ = dict(old.__dict__)
else:
new.__dict__ = old.__dict__
for attr in get_class_descriptors(old.__class__):
try:
if hasattr(old, attr):
setattr(new, attr, getattr(old, attr))
elif hasattr(new, attr):
delattr(new, attr)
# There is no way to distinguish getsetdescriptors that allow
# writes from ones that don't (I think?), so we just ignore
# AttributeErrors if we need to.
# TODO: What about getsetdescriptors that act like properties???
except AttributeError:
pass
def is_sub_path(path1: str, path2: str) -> bool:
"""Given two paths, return if path1 is a sub-path of path2."""
return pathlib.Path(path2) in pathlib.Path(path1).parents
def hard_exit(status: int = 0) -> None:
"""Kill the current process without fully cleaning up.
This can be quite a bit faster than a normal exit() since objects are not freed.
"""
sys.stdout.flush()
sys.stderr.flush()
os._exit(status)
def unmangle(name: str) -> str:
"""Remove internal suffixes from a short name."""
return name.rstrip("'")
def get_unique_redefinition_name(name: str, existing: Container[str]) -> str:
"""Get a simple redefinition name not present among existing.
For example, for name 'foo' we try 'foo-redefinition', 'foo-redefinition2',
'foo-redefinition3', etc. until we find one that is not in existing.
"""
r_name = name + '-redefinition'
if r_name not in existing:
return r_name
i = 2
while r_name + str(i) in existing:
i += 1
return r_name + str(i)
def check_python_version(program: str) -> None:
"""Report issues with the Python used to run mypy, dmypy, or stubgen"""
# Check for known bad Python versions.
if sys.version_info[:2] < (3, 6):
sys.exit("Running {name} with Python 3.5 or lower is not supported; "
"please upgrade to 3.6 or newer".format(name=program))
def count_stats(messages: List[str]) -> Tuple[int, int, int]:
"""Count total number of errors, notes and error_files in message list."""
errors = [e for e in messages if ': error:' in e]
error_files = {e.split(':')[0] for e in errors}
notes = [e for e in messages if ': note:' in e]
return len(errors), len(notes), len(error_files)
def split_words(msg: str) -> List[str]:
"""Split line of text into words (but not within quoted groups)."""
next_word = ''
res: List[str] = []
allow_break = True
for c in msg:
if c == ' ' and allow_break:
res.append(next_word)
next_word = ''
continue
if c == '"':
allow_break = not allow_break
next_word += c
res.append(next_word)
return res
def get_terminal_width() -> int:
"""Get current terminal width if possible, otherwise return the default one."""
return (int(os.getenv('MYPY_FORCE_TERMINAL_WIDTH', '0'))
or shutil.get_terminal_size().columns
or DEFAULT_COLUMNS)
def soft_wrap(msg: str, max_len: int, first_offset: int,
num_indent: int = 0) -> str:
"""Wrap a long error message into few lines.
Breaks will only happen between words, and never inside a quoted group
(to avoid breaking types such as "Union[int, str]"). The 'first_offset' is
the width before the start of first line.
Pad every next line with 'num_indent' spaces. Every line will be at most 'max_len'
characters, except if it is a single word or quoted group.
For example:
first_offset
------------------------
path/to/file: error: 58: Some very long error message
that needs to be split in separate lines.
"Long[Type, Names]" are never split.
^^^^--------------------------------------------------
num_indent max_len
"""
words = split_words(msg)
next_line = words.pop(0)
lines: List[str] = []
while words:
next_word = words.pop(0)
max_line_len = max_len - num_indent if lines else max_len - first_offset
# Add 1 to account for space between words.
if len(next_line) + len(next_word) + 1 <= max_line_len:
next_line += ' ' + next_word
else:
lines.append(next_line)
next_line = next_word
lines.append(next_line)
padding = '\n' + ' ' * num_indent
return padding.join(lines)
def hash_digest(data: bytes) -> str:
"""Compute a hash digest of some data.
We use a cryptographic hash because we want a low probability of
accidental collision, but we don't really care about any of the
cryptographic properties.
"""
# Once we drop Python 3.5 support, we should consider using
# blake2b, which is faster.
return hashlib.sha256(data).hexdigest()
def parse_gray_color(cup: bytes) -> str:
"""Reproduce a gray color in ANSI escape sequence"""
if sys.platform == "win32":
assert False, "curses is not available on Windows"
set_color = ''.join([cup[:-1].decode(), 'm'])
gray = curses.tparm(set_color.encode('utf-8'), 1, 89).decode()
return gray
class FancyFormatter:
"""Apply color and bold font to terminal output.
This currently only works on Linux and Mac.
"""
def __init__(self, f_out: IO[str], f_err: IO[str], show_error_codes: bool) -> None:
self.show_error_codes = show_error_codes
# Check if we are in a human-facing terminal on a supported platform.
if sys.platform not in ('linux', 'darwin', 'win32'):
self.dummy_term = True
return
force_color = int(os.getenv('MYPY_FORCE_COLOR', '0'))
if not force_color and (not f_out.isatty() or not f_err.isatty()):
self.dummy_term = True
return
if sys.platform == 'win32':
self.dummy_term = not self.initialize_win_colors()
else:
self.dummy_term = not self.initialize_unix_colors()
if not self.dummy_term:
self.colors = {'red': self.RED, 'green': self.GREEN,
'blue': self.BLUE, 'yellow': self.YELLOW,
'none': ''}
def initialize_win_colors(self) -> bool:
"""Return True if initialization was successful and we can use colors, False otherwise"""
# Windows ANSI escape sequences are only supported on Threshold 2 and above.
# we check with an assert at runtime and an if check for mypy, as asserts do not
# yet narrow platform
assert sys.platform == 'win32'
if sys.platform == 'win32':
winver = sys.getwindowsversion()
if (winver.major < MINIMUM_WINDOWS_MAJOR_VT100
or winver.build < MINIMUM_WINDOWS_BUILD_VT100):
return False
import ctypes
kernel32 = ctypes.windll.kernel32
ENABLE_PROCESSED_OUTPUT = 0x1
ENABLE_WRAP_AT_EOL_OUTPUT = 0x2
ENABLE_VIRTUAL_TERMINAL_PROCESSING = 0x4
STD_OUTPUT_HANDLE = -11
kernel32.SetConsoleMode(kernel32.GetStdHandle(STD_OUTPUT_HANDLE),
ENABLE_PROCESSED_OUTPUT
| ENABLE_WRAP_AT_EOL_OUTPUT
| ENABLE_VIRTUAL_TERMINAL_PROCESSING)
self.BOLD = '\033[1m'
self.UNDER = '\033[4m'
self.BLUE = '\033[94m'
self.GREEN = '\033[92m'
self.RED = '\033[91m'
self.YELLOW = '\033[93m'
self.NORMAL = '\033[0m'
self.DIM = '\033[2m'
return True
return False
def initialize_unix_colors(self) -> bool:
"""Return True if initialization was successful and we can use colors, False otherwise"""
if sys.platform == "win32" or not CURSES_ENABLED:
return False
try:
# setupterm wants a fd to potentially write an "initialization sequence".
# We override sys.stdout for the daemon API so if stdout doesn't have an fd,
# just give it /dev/null.
try:
fd = sys.stdout.fileno()
except io.UnsupportedOperation:
with open("/dev/null", "rb") as f:
curses.setupterm(fd=f.fileno())
else:
curses.setupterm(fd=fd)
except curses.error:
# Most likely terminfo not found.
return False
bold = curses.tigetstr('bold')
under = curses.tigetstr('smul')
set_color = curses.tigetstr('setaf')
set_eseq = curses.tigetstr('cup')
normal = curses.tigetstr('sgr0')
if not (bold and under and set_color and set_eseq and normal):
return False
self.NORMAL = normal.decode()
self.BOLD = bold.decode()
self.UNDER = under.decode()
self.DIM = parse_gray_color(set_eseq)
self.BLUE = curses.tparm(set_color, curses.COLOR_BLUE).decode()
self.GREEN = curses.tparm(set_color, curses.COLOR_GREEN).decode()
self.RED = curses.tparm(set_color, curses.COLOR_RED).decode()
self.YELLOW = curses.tparm(set_color, curses.COLOR_YELLOW).decode()
return True
def style(self, text: str, color: Literal['red', 'green', 'blue', 'yellow', 'none'],
bold: bool = False, underline: bool = False, dim: bool = False) -> str:
"""Apply simple color and style (underlined or bold)."""
if self.dummy_term:
return text
if bold:
start = self.BOLD
else:
start = ''
if underline:
start += self.UNDER
if dim:
start += self.DIM
return start + self.colors[color] + text + self.NORMAL
def fit_in_terminal(self, messages: List[str],
fixed_terminal_width: Optional[int] = None) -> List[str]:
"""Improve readability by wrapping error messages and trimming source code."""
width = fixed_terminal_width or get_terminal_width()
new_messages = messages.copy()
for i, error in enumerate(messages):
if ': error:' in error:
loc, msg = error.split('error:', maxsplit=1)
msg = soft_wrap(msg, width, first_offset=len(loc) + len('error: '))
new_messages[i] = loc + 'error:' + msg
if error.startswith(' ' * DEFAULT_SOURCE_OFFSET) and '^' not in error:
# TODO: detecting source code highlights through an indent can be surprising.
# Restore original error message and error location.
error = error[DEFAULT_SOURCE_OFFSET:]
column = messages[i+1].index('^') - DEFAULT_SOURCE_OFFSET
# Let source have some space also on the right side, plus 6
# to accommodate ... on each side.
max_len = width - DEFAULT_SOURCE_OFFSET - 6
source_line, offset = trim_source_line(error, max_len, column, MINIMUM_WIDTH)
new_messages[i] = ' ' * DEFAULT_SOURCE_OFFSET + source_line
# Also adjust the error marker position.
new_messages[i+1] = ' ' * (DEFAULT_SOURCE_OFFSET + column - offset) + '^'
return new_messages
def colorize(self, error: str) -> str:
"""Colorize an output line by highlighting the status and error code."""
if ': error:' in error:
loc, msg = error.split('error:', maxsplit=1)
if not self.show_error_codes:
return (loc + self.style('error:', 'red', bold=True) +
self.highlight_quote_groups(msg))
codepos = msg.rfind('[')
if codepos != -1:
code = msg[codepos:]
msg = msg[:codepos]
else:
code = "" # no error code specified
return (loc + self.style('error:', 'red', bold=True) +
self.highlight_quote_groups(msg) + self.style(code, 'yellow'))
elif ': note:' in error:
loc, msg = error.split('note:', maxsplit=1)
formatted = self.highlight_quote_groups(self.underline_link(msg))
return loc + self.style('note:', 'blue') + formatted
elif error.startswith(' ' * DEFAULT_SOURCE_OFFSET):
# TODO: detecting source code highlights through an indent can be surprising.
if '^' not in error:
return self.style(error, 'none', dim=True)
return self.style(error, 'red')
else:
return error
def highlight_quote_groups(self, msg: str) -> str:
"""Make groups quoted with double quotes bold (including quotes).
This is used to highlight types, attribute names etc.
"""
if msg.count('"') % 2:
# Broken error message, don't do any formatting.
return msg
parts = msg.split('"')
out = ''
for i, part in enumerate(parts):
if i % 2 == 0:
out += self.style(part, 'none')
else:
out += self.style('"' + part + '"', 'none', bold=True)
return out
def underline_link(self, note: str) -> str:
"""Underline a link in a note message (if any).
This assumes there is at most one link in the message.
"""
match = re.search(r'https?://\S*', note)
if not match:
return note
start = match.start()
end = match.end()
return (note[:start] +
self.style(note[start:end], 'none', underline=True) +
note[end:])
def format_success(self, n_sources: int, use_color: bool = True) -> str:
"""Format short summary in case of success.
n_sources is total number of files passed directly on command line,
i.e. excluding stubs and followed imports.
"""
msg = 'Success: no issues found in {}' \
' source file{}'.format(n_sources, 's' if n_sources != 1 else '')
if not use_color:
return msg
return self.style(msg, 'green', bold=True)
def format_error(
self, n_errors: int, n_files: int, n_sources: int, *,
blockers: bool = False, use_color: bool = True
) -> str:
"""Format a short summary in case of errors."""
msg = 'Found {} error{} in {} file{}'.format(
n_errors, 's' if n_errors != 1 else '',
n_files, 's' if n_files != 1 else ''
)
if blockers:
msg += ' (errors prevented further checking)'
else:
msg += ' (checked {} source file{})'.format(n_sources, 's' if n_sources != 1 else '')
if not use_color:
return msg
return self.style(msg, 'red', bold=True)
def is_typeshed_file(file: str) -> bool:
# gross, but no other clear way to tell
return 'typeshed' in os.path.abspath(file).split(os.sep)
def is_stub_package_file(file: str) -> bool:
# Use hacky heuristics to check whether file is part of a PEP 561 stub package.
if not file.endswith('.pyi'):
return False
return any(component.endswith('-stubs')
for component in os.path.abspath(file).split(os.sep))
def unnamed_function(name: Optional[str]) -> bool:
return name is not None and name == "_"
# TODO: replace with uses of perf_counter_ns when support for py3.6 is dropped
# (or when mypy properly handles alternate definitions based on python version check
time_ref = time.perf_counter
def time_spent_us(t0: float) -> int:
return int((time.perf_counter() - t0) * 1e6)
|
the-stack_0_2802 | """ Regression Template
"""
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
def main():
# Importing the dataset
dataset = pd.read_csv('Position_Salaries.csv')
X = dataset.iloc[:, 1:2].values
y = dataset.iloc[:, 2].values
# Splitting the dataset into the Training set and Test set
"""from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)"""
# Feature Scaling
"""from sklearn.preprocessing import StandardScaler
sc_X = StandardScaler()
X_train = sc_X.fit_transform(X_train)
X_test = sc_X.transform(X_test)
sc_y = StandardScaler()
y_train = sc_y.fit_transform(y_train)"""
# Fitting the Regression Model to the dataset
# Create your regressor here
# Predicting a new result
y_pred = regressor.predict(6.5)
# Visualising the Regression results
plt.scatter(X, y, color='red')
plt.plot(X, regressor.predict(X), color='blue')
plt.title('Truth or Bluff (Regression Model)')
plt.xlabel('Position level')
plt.ylabel('Salary')
plt.show()
# Visualising the Regression results (for higher resolution and smoother curve)
X_grid = np.arange(min(X), max(X), 0.1)
X_grid = X_grid.reshape((len(X_grid), 1))
plt.scatter(X, y, color='red')
plt.plot(X_grid, regressor.predict(X_grid), color='blue')
plt.title('Truth or Bluff (Regression Model)')
plt.xlabel('Position level')
plt.ylabel('Salary')
plt.show()
if __name__ == '__main__':
main()
|
the-stack_0_2805 | def graph_to_tree(N, edges, root):
from collections import defaultdict
children = defaultdict(list)
parents = [None] * N
root = 0
parents[root] = root
stack = [root]
while stack:
v = stack.pop()
for u in edges[v]:
if parents[u] is not None:
# already visited
continue
parents[u] = v
children[v].append(u)
stack.append(u)
return children, parents
|
the-stack_0_2806 |
from dask import dataframe as dd
import datetime
def time_exe():
now = datetime.datetime.now()
print (now.strftime("%Y-%m-%d %H:%M:%S"))
def chunk_filtering_yearwise_data(data_):
return data_[(data_[5]>1994) & (data_[5] <2006)]
chunksize = 64000000*1 #64000000 is equl to 64 MB, making it as around 512 mb
original_files_dir = "E:/download/eng-all-5gram-2012-extracted/7/"
dataset_path = "E:/download/proj/n5grm/small_ds/yearwise/"
import os
start = 1995
end = 2005
step = 1
folderpath = dataset_path
def split_data_year_wise(startyear,stopyear,yearsetp,basepath,fname,dataset):
print("start time")
time_exe()
stopyear= stopyear+1
for i in range(startyear, stopyear, yearsetp):
year_dd = dataset[dataset[5]==i]
path = os.path.join(basepath,str(i),fname)
if not os.path.exists(path):
os.makedirs(path)
print("processing year "+str(i))
year_dd.to_parquet(path,engine='pyarrow')
#year_dd.to_csv(path)
print("finisheed time")
time_exe()
def process_start():
for filename in os.listdir(original_files_dir):
print("file processing started "+ filename)
from dask import dataframe as dd
df = dd.read_csv(os.path.join(original_files_dir,filename),
sep='\s+',
header=None, blocksize=chunksize,error_bad_lines=False,
encoding='utf-8',engine='python')
split_data_year_wise(start,end,step,folderpath,filename,df)
def main():
print("starting......")
process_start()
if __name__ == "__main__":
main()
|
the-stack_0_2808 | from pathlib import Path
import sys
from selenium.common.exceptions import TimeoutException
import re
import subprocess
import json
from typing import List, Dict
# pycharm complains that build_assets is an unresolved ref
# don't worry about it, the script still runs
from build_assets.selenium_runner.BuildSeleniumRunner import BuildSeleniumRunner
from build_assets import filehandler, arg_getters, util, api_handler
def main():
"""
Build the icons using Icomoon. Also optimize the svgs.
"""
runner = None
try:
args = arg_getters.get_selenium_runner_args()
new_icons = get_icons_for_building(args.icomoon_json_path, args.devicon_json_path, args.token)
if len(new_icons) == 0:
sys.exit("No files need to be uploaded. Ending script...")
print(f"There are {len(new_icons)} icons to be build. Here are they:", *new_icons, sep = "\n")
print("Begin optimizing files...")
optimize_svgs(new_icons, args.icons_folder_path)
print("Updating the icomoon json...")
update_icomoon_json(new_icons, args.icomoon_json_path)
print("Start the building icons process...")
icon_svgs = filehandler.get_svgs_paths(
new_icons, args.icons_folder_path, icon_versions_only=True)
zip_name = "devicon-v1.0.zip"
zip_path = Path(args.download_path, zip_name)
screenshot_folder = filehandler.create_screenshot_folder("./")
runner = BuildSeleniumRunner(args.download_path,
args.geckodriver_path, args.headless)
runner.build_icons(args.icomoon_json_path, zip_path,
icon_svgs, screenshot_folder)
filehandler.extract_files(str(zip_path), args.download_path)
filehandler.rename_extracted_files(args.download_path)
print("Creating the release message by querying the GitHub API...")
get_release_message(args.token)
print("Task completed.")
except TimeoutException as e:
util.exit_with_err("Selenium Time Out Error: \n" + str(e))
except Exception as e:
util.exit_with_err(e)
finally:
if runner is not None:
runner.close()
def get_icons_for_building(icomoon_json_path: str, devicon_json_path: str, token: str):
"""
Get the icons for building.
:param icomoon_json_path - the path to the `icomoon.json`.
:param devicon_json_path - the path to the `devicon.json`.
:param token - the token to access the GitHub API.
:return a list of dict containing info on the icons. These are
from the `devicon.json`.
"""
devicon_json = filehandler.get_json_file_content(devicon_json_path)
pull_reqs = api_handler.get_merged_pull_reqs_since_last_release(token)
new_icons = []
for pull_req in pull_reqs:
if api_handler.is_feature_icon(pull_req):
filtered_icon = util.find_object_added_in_pr(devicon_json, pull_req["title"])
if filtered_icon not in new_icons:
new_icons.append(filtered_icon)
# get any icons that might not have been found by the API
# sometimes happen due to the PR being opened before the latest build release
new_icons_from_devicon_json = filehandler.find_new_icons_in_devicon_json(
devicon_json_path, icomoon_json_path)
for icon in new_icons_from_devicon_json:
if icon not in new_icons:
new_icons.append(icon)
return new_icons
def optimize_svgs(new_icons: List[str], icons_folder_path: str):
"""
Optimize the newly added svgs. This is done in batches
since the command line has a limit on characters allowed.
:param new_icons - the new icons that need to be optimized.
:param icons_folder_path - the path to the /icons folder.
"""
svgs = filehandler.get_svgs_paths(new_icons, icons_folder_path, icon_versions_only=False)
start = 0
step = 10
for i in range(start, len(svgs), step):
batch = svgs[i:i + step]
subprocess.run(["npm", "run", "optimize-svg", "--", f"--svgFiles={json.dumps(batch)}"], shell=True)
def update_icomoon_json(new_icons: List[str], icomoon_json_path: str):
"""
Update the `icomoon.json` if it contains any icons
that needed to be updated. This will remove the icons
from the `icomoon.json` so the build script will reupload
it later.
"""
icomoon_json = filehandler.get_json_file_content(icomoon_json_path)
cur_len = len(icomoon_json["icons"])
messages = []
wrapper_function = lambda icomoon_icon : find_icomoon_icon_not_in_new_icons(
icomoon_icon, new_icons, messages)
icons_to_keep = filter(wrapper_function, icomoon_json["icons"])
icomoon_json["icons"] = list(icons_to_keep)
new_len = len(icomoon_json["icons"])
print(f"Update completed. Removed {cur_len - new_len} icons:", *messages, sep='\n')
filehandler.write_to_file(icomoon_json_path, json.dumps(icomoon_json))
def find_icomoon_icon_not_in_new_icons(icomoon_icon: Dict, new_icons: List, messages: List):
"""
Find all the icomoon icons that are not listed in the new icons.
This also add logging for which icons were removed.
:param icomoon_icon - a dict object from the icomoon.json's `icons` attribute.
:param new_icons - a list of new icons. Each element is an object from the `devicon.json`.
:param messages - an empty list where the function can attach logging on which
icon were removed.
"""
for new_icon in new_icons:
pattern = re.compile(f"^{new_icon['name']}-")
if pattern.search(icomoon_icon["properties"]["name"]):
message = f"-'{icomoon_icon['properties']['name']}' cause it matches '{new_icon['name']}'"
messages.append(message)
return False
return True
def get_release_message(token):
"""
Get the release message for the latest build and write
the result in a file.
:param token: the GitHub API token to access the API.
"""
# fetch first page by default
data = api_handler.get_merged_pull_reqs_since_last_release(token)
newIcons = []
features = []
print("Parsing through the pull requests...")
for pullData in data:
authors = api_handler.find_all_authors(pullData, token)
markdown = f"- [{pullData['title']}]({pullData['html_url']}) by {authors}."
if api_handler.is_feature_icon(pullData):
newIcons.append(markdown)
else:
features.append(markdown)
print("Constructing message...")
thankYou = "A huge thanks to all our maintainers and contributors for making this release possible!"
iconTitle = f"**{len(newIcons)} New Icons**"
featureTitle = f"**{len(features)} New Features**"
finalString = "{0}\n\n {1}\n{2}\n\n {3}\n{4}".format(thankYou,
iconTitle, "\n".join(newIcons),
featureTitle, "\n".join(features))
print("--------------Here is the build message--------------\n", finalString)
release_message_path = "./release_message.txt"
filehandler.write_to_file(release_message_path, finalString)
print("Script finished")
if __name__ == "__main__":
main()
|
the-stack_0_2809 | import rdkit
import rdkit.Chem as Chem
import numpy as np
import pandas as pd
import os
# import tensorflow as tf
elem_list = ['C', 'O', 'N', 'F', 'Br', 'Cl', 'S',
'Si', 'B', 'I', 'K', 'Na', 'P', 'Mg', 'Li', 'Al', 'H']
atom_fdim_geo = len(elem_list) + 6 + 6 + 6 + 1
bond_fdim_geo = 6
bond_fdim_qm = 25 + 40
max_nb = 10
qm_descriptors = None
def initialize_qm_descriptors(df=None, path=None):
global qm_descriptors
if path is not None:
qm_descriptors = pd.read_pickle(path).set_index('smiles')
elif df is not None:
qm_descriptors = df
def get_atom_classes():
atom_classes = {}
token = 0
for e in elem_list: #element
for d in [0, 1, 2, 3, 4, 5]: #degree
for ev in [1, 2, 3, 4, 5, 6]: #explicit valence
for iv in [0, 1, 2, 3, 4, 5]: #inexplicit valence
atom_classes[str((e, d, ev, iv))] = token
token += 1
return atom_classes
def rbf_expansion(expanded, mu=0, delta=0.01, kmax=8):
k = np.arange(0, kmax)
return np.exp(-(expanded - (mu + delta * k))**2 / delta)
def onek_encoding_unk(x, allowable_set):
if x not in allowable_set:
x = allowable_set[-1]
return list(map(lambda s: x == s, allowable_set))
def atom_features(atom):
return np.array(onek_encoding_unk(atom.GetSymbol(), elem_list)
+ onek_encoding_unk(atom.GetDegree(), [0, 1, 2, 3, 4, 5])
+ onek_encoding_unk(atom.GetExplicitValence(), [1, 2, 3, 4, 5, 6])
+ onek_encoding_unk(atom.GetImplicitValence(), [0, 1, 2, 3, 4, 5])
+ [atom.GetIsAromatic()], dtype=np.float32)
def bond_features(bond):
bt = bond.GetBondType()
return np.array(
[bt == Chem.rdchem.BondType.SINGLE, bt == Chem.rdchem.BondType.DOUBLE, bt == Chem.rdchem.BondType.TRIPLE,
bt == Chem.rdchem.BondType.AROMATIC, bond.GetIsConjugated(), bond.IsInRing()], dtype=np.float32)
def _mol2graph(rs, selected_descriptors, core=[]):
atom_fdim_qm = 50 * len(selected_descriptors)
mol_rs = Chem.MolFromSmiles(rs)
if not mol_rs:
raise ValueError("Could not parse smiles string:", smiles)
fatom_index = {a.GetIntProp('molAtomMapNumber') - 1: a.GetIdx() for a in mol_rs.GetAtoms()}
fbond_index = {'{}-{}'.format(*sorted([b.GetBeginAtom().GetIntProp('molAtomMapNumber') - 1,
b.GetEndAtom().GetIntProp('molAtomMapNumber') - 1])): b.GetIdx()
for b in mol_rs.GetBonds()}
n_atoms = mol_rs.GetNumAtoms()
n_bonds = max(mol_rs.GetNumBonds(), 1)
fatoms_geo = np.zeros((n_atoms, atom_fdim_geo))
fatoms_qm = np.zeros((n_atoms, atom_fdim_qm))
fbonds_geo = np.zeros((n_bonds, bond_fdim_geo))
fbonds_qm = np.zeros((n_bonds, bond_fdim_qm))
atom_nb = np.zeros((n_atoms, max_nb), dtype=np.int32)
bond_nb = np.zeros((n_atoms, max_nb), dtype=np.int32)
num_nbs = np.zeros((n_atoms,), dtype=np.int32)
core_mask = np.zeros((n_atoms,), dtype=np.int32)
for smiles in rs.split('.'):
mol = Chem.MolFromSmiles(smiles)
fatom_index_mol = {a.GetIntProp('molAtomMapNumber') - 1: a.GetIdx() for a in mol.GetAtoms()}
qm_series = qm_descriptors.loc[smiles]
partial_charge = qm_series['partial_charge'].reshape(-1, 1)
partial_charge = np.apply_along_axis(rbf_expansion, -1, partial_charge, -2.0, 0.06, 50)
fukui_elec = qm_series['fukui_elec'].reshape(-1, 1)
fukui_elec = np.apply_along_axis(rbf_expansion, -1, fukui_elec, 0, 0.02, 50)
fukui_neu = qm_series['fukui_neu'].reshape(-1, 1)
fukui_neu = np.apply_along_axis(rbf_expansion, -1, fukui_neu, 0, 0.02, 50)
nmr = qm_series['NMR'].reshape(-1, 1)
nmr = np.apply_along_axis(rbf_expansion, -1, nmr, 0.0, 0.06, 50)
bond_index = np.expand_dims(qm_series['bond_order_matrix'], -1)
bond_index = np.apply_along_axis(rbf_expansion, -1, bond_index, 0.5, 0.1, 25)
bond_distance = np.expand_dims(qm_series['distance_matrix'], -1)
bond_distance = np.apply_along_axis(rbf_expansion, -1, bond_distance, 0.5, 0.05, 40)
selected_descriptors = set(selected_descriptors)
if selected_descriptors == {"partial_charge", "fukui_elec", "fukui_neu", "nmr"}:
atom_qm_descriptor = np.concatenate([partial_charge, fukui_elec, fukui_neu, nmr], axis=-1)
elif selected_descriptors == {"partial_charge", "nmr"}:
atom_qm_descriptor = np.concatenate([partial_charge, nmr], axis=-1)
elif selected_descriptors == {"fukui_elec", "fukui_neu"}:
atom_qm_descriptor = np.concatenate([fukui_elec, fukui_neu], axis=-1)
elif selected_descriptors == {"only_bonds"}:
atom_qm_descriptor = partial_charge
for map_idx in fatom_index_mol:
fatoms_geo[fatom_index[map_idx], :] = atom_features(mol_rs.GetAtomWithIdx(fatom_index[map_idx]))
fatoms_qm[fatom_index[map_idx], :] = atom_qm_descriptor[fatom_index_mol[map_idx], :]
if fatom_index[map_idx] in core:
core_mask[fatom_index[map_idx]] = 1
for bond in mol.GetBonds():
a1i, a2i = bond.GetBeginAtom().GetIntProp('molAtomMapNumber'), \
bond.GetEndAtom().GetIntProp('molAtomMapNumber')
idx = fbond_index['{}-{}'.format(*sorted([a1i-1, a2i-1]))]
a1 = fatom_index[a1i-1]
a2 = fatom_index[a2i-1]
a1i = fatom_index_mol[a1i-1]
a2i = fatom_index_mol[a2i-1]
if num_nbs[a1] == max_nb or num_nbs[a2] == max_nb:
raise Exception(smiles)
atom_nb[a1, num_nbs[a1]] = a2
atom_nb[a2, num_nbs[a2]] = a1
bond_nb[a1, num_nbs[a1]] = idx
bond_nb[a2, num_nbs[a2]] = idx
num_nbs[a1] += 1
num_nbs[a2] += 1
fbonds_geo[idx, :] = bond_features(bond)
fbonds_qm[idx, :25] = bond_index[a1i, a2i]
fbonds_qm[idx, 25:] = bond_distance[a1i, a2i]
return fatoms_geo, fatoms_qm, fbonds_qm, atom_nb, bond_nb, num_nbs, core_mask
def smiles2graph_pr(r_smiles, p_smiles, selected_descriptors=["partial_charge", "fukui_elec", "fukui_neu", "nmr"],
core_buffer=0):
rs, rs_core, p_core = _get_reacting_core(r_smiles, p_smiles, core_buffer)
rs_features = _mol2graph(r_smiles, selected_descriptors, core=rs_core)
return rs_features, r_smiles
def _get_reacting_core(rs, p, buffer):
'''
use molAtomMapNumber of molecules
buffer: neighbor to be cosidered as reacting center
return: atomidx of reacting core
'''
r_mols = Chem.MolFromSmiles(rs)
p_mol = Chem.MolFromSmiles(p)
rs_dict = {a.GetIntProp('molAtomMapNumber'): a for a in r_mols.GetAtoms()}
p_dict = {a.GetIntProp('molAtomMapNumber'): a for a in p_mol.GetAtoms()}
rs_reactants = []
for r_smiles in rs.split('.'):
for a in Chem.MolFromSmiles(r_smiles).GetAtoms():
if a.GetIntProp('molAtomMapNumber') in p_dict:
rs_reactants.append(r_smiles)
break
rs_reactants = '.'.join(rs_reactants)
core_mapnum = set()
for a_map in p_dict:
# FIXME chiral change
# if str(p_dict[a_map].GetChiralTag()) != str(rs_dict[a_map].GetChiralTag()):
# core_mapnum.add(a_map)
a_neighbor_in_p = set([a.GetIntProp('molAtomMapNumber') for a in p_dict[a_map].GetNeighbors()])
a_neighbor_in_rs = set([a.GetIntProp('molAtomMapNumber') for a in rs_dict[a_map].GetNeighbors()])
if a_neighbor_in_p != a_neighbor_in_rs:
core_mapnum.add(a_map)
else:
for a_neighbor in a_neighbor_in_p:
b_in_p = p_mol.GetBondBetweenAtoms(p_dict[a_neighbor].GetIdx(), p_dict[a_map].GetIdx())
b_in_r = r_mols.GetBondBetweenAtoms(rs_dict[a_neighbor].GetIdx(), rs_dict[a_map].GetIdx())
if b_in_p.GetBondType() != b_in_r.GetBondType():
core_mapnum.add(a_map)
core_rs = _get_buffer(r_mols, [rs_dict[a].GetIdx() for a in core_mapnum], buffer)
core_p = _get_buffer(p_mol, [p_dict[a].GetIdx() for a in core_mapnum], buffer)
fatom_index = \
{a.GetIntProp('molAtomMapNumber') - 1: a.GetIdx() for a in Chem.MolFromSmiles(rs_reactants).GetAtoms()}
core_rs = [fatom_index[x] for x in core_rs]
core_p = [fatom_index[x] for x in core_p]
return rs_reactants, core_rs, core_p
def _get_buffer(m, cores, buffer):
neighbors = set(cores)
for i in range(buffer):
neighbors_temp = list(neighbors)
for c in neighbors_temp:
neighbors.update([n.GetIdx() for n in m.GetAtomWithIdx(c).GetNeighbors()])
neighbors = [m.GetAtomWithIdx(x).GetIntProp('molAtomMapNumber') - 1 for x in neighbors]
return neighbors
def pack2D(arr_list):
N = max([x.shape[0] for x in arr_list])
M = max([x.shape[1] for x in arr_list])
a = np.zeros((len(arr_list), N, M))
for i, arr in enumerate(arr_list):
n = arr.shape[0]
m = arr.shape[1]
a[i, 0:n, 0:m] = arr
return a
def pack2D_withidx(arr_list):
N = max([x.shape[0] for x in arr_list])
M = max([x.shape[1] for x in arr_list])
a = np.zeros((len(arr_list), N, M, 2))
for i, arr in enumerate(arr_list):
n = arr.shape[0]
m = arr.shape[1]
a[i, 0:n, 0:m, 0] = i
a[i, 0:n, 0:m, 1] = arr
return a
def pack1D(arr_list):
N = max([x.shape[0] for x in arr_list])
a = np.zeros((len(arr_list), N))
for i, arr in enumerate(arr_list):
n = arr.shape[0]
a[i, 0:n] = arr
return a
def get_mask(arr_list):
N = max([x.shape[0] for x in arr_list])
a = np.zeros((len(arr_list), N))
for i, arr in enumerate(arr_list):
for j in range(arr.shape[0]):
a[i][j] = 1
return a
def smiles2graph_list(smiles_list, idxfunc=lambda x: x.GetIdx()):
res = list(map(lambda x: smiles2graph(x, idxfunc), smiles_list))
fatom_list, fbond_list, gatom_list, gbond_list, nb_list = zip(*res)
return pack2D(fatom_list), pack2D(fbond_list), pack2D_withidx(gatom_list), pack2D_withidx(gbond_list), pack1D(
nb_list), get_mask(fatom_list)
def get_bond_edits(reactant_smi, product_smi):
reactants = Chem.MolFromSmiles(reactant_smi)
products = Chem.MolFromSmiles(product_smi)
conserved_maps = [a.GetAtomMapNum() for a in reactants.GetAtoms() if a.GetAtomMapNum()]
bond_changes = set()
bonds_prev = {}
for bond in reactants.GetBonds():
nums = sorted(
[bond.GetBeginAtom().GetAtomMapNum(), bond.GetEndAtom().GetAtomMapNum()])
bonds_prev['{}~{}'.format(nums[0], nums[1])] = bond.GetBondTypeAsDouble()
bonds_new = {}
for bond in products.GetBonds():
nums = sorted(
[bond.GetBeginAtom().GetAtomMapNum(), bond.GetEndAtom().GetAtomMapNum()])
if (nums[0] not in conserved_maps) or (nums[1] not in conserved_maps): continue
bonds_new['{}~{}'.format(nums[0], nums[1])] = bond.GetBondTypeAsDouble()
for bond in bonds_prev:
if bond not in bonds_new:
bond_changes.add((bond.split('~')[0], bond.split('~')[1], 0.0)) # lost bond
else:
if bonds_prev[bond] != bonds_new[bond]:
bond_changes.add((bond.split('~')[0], bond.split('~')[1], bonds_new[bond])) # changed bond
for bond in bonds_new:
if bond not in bonds_prev:
bond_changes.add((bond.split('~')[0], bond.split('~')[1], bonds_new[bond])) # new bond
return bond_changes
if __name__ == "__main__":
graph = smiles2graph_pr("[CH3:1][C@@H:2]([NH2:3])[CH2:4][Cl:5].[F-:6]", "[3, 4, 1]")
|
the-stack_0_2811 | #!/usr/bin/env python
# D. Jones - 1/10/14
"""This code is from the IDL Astronomy Users Library with
modifications from Dan Scolnic.
(adapted for IDL from DAOPHOT, then translated from IDL to Python).
Subroutine of GETPSF to perform a one-star least-squares fit,
part of the DAOPHOT PSF photometry sequence. This version requires
input noise and mask images.
CALLING SEQUENCE:
from PythonPhot import pkfit_noise as pkfit
pk = pkfit.pkfit_class(f, gauss, psf,
ronois, phpadu, noise_image, mask_image )
errmag,chi,sharp,niter,scale,xnew,ynew = pk.pkfit_noise(scale,x,y,sky,radius)
PKFIT CLASS INPUTS:
f - NX by NY array containing actual picture data.
ronois - readout noise per pixel, scalar
phpadu - photons per analog digital unit, scalar
gauss - vector containing the values of the five parameters defining
the analytic Gaussian which approximates the core of the PSF.
psf - an NPSF by NPSF look-up table containing corrections from
the Gaussian approximation of the PSF to the true PSF.
noise_image - the noise image corresponding to f
mask_image - the mask image corresponding to f. Masked pixels are not used.
PKFIT FUNCTION INPUTS:
x, y - the initial estimates of the centroid of the star relative
to the corner (0,0) of the subarray. Upon return, the
final computed values of X and Y will be passed back to the
calling routine.
sky - the local sky brightness value, as obtained from APER
radius - the fitting radius-- only pixels within RADIUS of the
instantaneous estimate of the star's centroid will be
included in the fit, scalar
OPTIONAL PKFIT FUNCTION INPUTS:
xyout - if True, return new x and y positions
maxiter - maximum iterations (default = 25)
INPUT-OUTPUT:
scale - the initial estimate of the brightness of the star,
expressed as a fraction of the brightness of the PSF.
Upon return, the final computed value of SCALE will be
passed back to the calling routine.
RETURNS:
errmag - the estimated standard error of the value of SCALE
returned by this routine.
chi - the estimated goodness-of-fit statistic: the ratio
of the observed pixel-to-pixel mean absolute deviation from
the profile fit, to the value expected on the basis of the
noise as determined from Poisson statistics and the
readout noise.
sharp - a goodness-of-fit statistic describing how much broader
the actual profile of the object appears than the
profile of the PSF.
niter - the number of iterations the solution required to achieve
convergence. If NITER = 25, the solution did not converge.
If for some reason a singular matrix occurs during the least-
squares solution, this will be flagged by setting NITER = -1.
EXAMPLE:
from astropy.io import fits as pyfits
from PythonPhot import pkfit_noise as pkfit
# read in the FITS images
image = pyfits.getdata(fits_filename)
noiseim = pyfits.getdata(fits_noise_filename)
maskim = pyfits.getdata(fits__mask_filename)
# read in the PSF image
psf = pyfits.getdata(psf_filename)
hpsf = pyfits.getheader(psf_filename)
gauss = [hpsf['GAUSS1'],hpsf['GAUSS2'],hpsf['GAUSS3'],hpsf['GAUSS4'],hpsf['GAUSS5']]
# x and y points for PSF fitting
xpos,ypos = np.array([1450,1400]),np.array([1550,1600])
# run 'aper' on x,y coords to get sky values
mag,magerr,flux,fluxerr,sky,skyerr,badflag,outstr = \
aper.aper(image,xpos,ypos,phpadu=1,apr=5,zeropoint=25,
skyrad=[40,50],badpix=[-12000,60000],exact=True)
# load the pkfit class
pk = pkfit.pkfit_class(image,gauss,psf,1,1,noiseim,maskim)
# do the PSF fitting
for x,y,s in zip(xpos,ypos,sky):
errmag,chi,sharp,niter,scale = \
pk.pkfit_norecent_noise(1,x,y,s,5)
flux = scale*10**(0.4*(25.-hpsf['PSFMAG']))
dflux = errmag*10**(0.4*(25.-hpsf['PSFMAG']))
print('PSF fit to coords %.2f,%.2f gives flux %s +/- %s'%(x,y,flux,dflux))
RESTRICTIONS:
No parameter checking is performed
REVISON HISTORY:
Adapted from the official DAO version of 1985 January 25
Version 2.0 W. Landsman STX November, 1988
Converted to IDL V5.0 W. Landsman September, 1997
Converted to Python D. Jones January, 2014
"""
import numpy as np
from numpy import sqrt
from scipy import linalg
from . import dao_value
sqrt,where,abs,shape,zeros,array,isnan,\
arange,matrix,exp,sum,isinf,median,ones,bool = \
np.sqrt,np.where,np.abs,np.shape,\
np.zeros,np.array,np.isnan,\
np.arange,np.matrix,np.exp,\
np.sum,np.isinf,np.median,np.ones,np.bool
class pkfit_class:
def __init__(self,image,gauss,psf,
ronois,phpadu,
noise_image,mask_image):
self.f = image
self.gauss = gauss
self.psf = psf
self.fnoise = noise_image
self.fmask = mask_image
self.ronois = ronois
self.phpadu = phpadu
def pkfit_noise(self,scale,x,y,sky,radius,
maxiter=25,
debug=False,debug2=False,
xyout = False):
f = self.f; gauss = self.gauss; psf = self.psf
fnoise = self.fnoise; fmask = self.fmask
if debug2:
import time
tstart = time.time()
if f.dtype != 'float64': f = f.astype('float64')
# psf1d = psf.reshape(shape(psf)[0]**2.)
s = shape(f) #Get array dimensions
nx = s[1] ; ny = s[0] #Initialize a few things for the solution
redo = 0
pkerr = 0.027/(gauss[3]*gauss[4])**2.
clamp = zeros(3) + 1.
dtold = zeros(3)
niter = 0
chiold = 1.
if debug:
print('PKFIT: ITER X Y SCALE ERRMAG CHI SHARP')
loop=True
while loop: #Begin the big least-squares loop
niter = niter+1
if isnan(x) or isnan(y):
scale=np.nan
errmag=np.nan
chi=np.nan
sharp=np.nan
if xyout:
return(errmag,chi,sharp,niter,scale,x,y)
else:
return(errmag,chi,sharp,niter,scale)
ixlo = int(x-radius)
if ixlo < 0: ixlo = 0 #Choose boundaries of subarray containing
iylo = int(y-radius)
if iylo < 0: iylo = 0 # 3points inside the fitting radius
ixhi = int(x+radius) +1
if ixhi > (nx-1): ixhi = nx-1
iyhi = int(y+radius) +1
if iyhi > ny-1: iyhi = ny-1
ixx = ixhi-ixlo+1
iyy = iyhi-iylo+1
dy = arange(iyy) + iylo - y #X distance vector from stellar centroid
dysq = dy**2
dx = arange(ixx) + ixlo - x
dxsq = dx**2
rsq = zeros([iyy,ixx]) #RSQ - array of squared
for j in range(iyy): rsq[j,:] = (dxsq+dysq[j])/radius**2
# The fitting equation is of the form
#
# Observed brightness =
# SCALE + delta(SCALE) * PSF + delta(Xcen)*d(PSF)/d(Xcen) +
# delta(Ycen)*d(PSF)/d(Ycen)
#
# and is solved for the unknowns delta(SCALE) ( = the correction to
# the brightness ratio between the program star and the PSF) and
# delta(Xcen) and delta(Ycen) ( = corrections to the program star's
# centroid).
#
# The point-spread function is equal to the sum of the integral under
# a two-dimensional Gaussian profile plus a value interpolated from
# a look-up table.
# D. Jones - noise edit from Scolnic
good = where((rsq < 1.) &
(fnoise[iylo:iyhi+1,ixlo:ixhi+1] > 0) &
(fmask[iylo:iyhi+1,ixlo:ixhi+1] == 0))
ngood = len(good[0])
if ngood < 1: ngood = 1
t = zeros([3,ngood])
if not len(good[0]):
scale=np.nan
errmag=np.nan
chi=np.nan
sharp=np.nan
if xyout:
return(errmag,chi,sharp,niter,scale,x,y)
else:
return(errmag,chi,sharp,niter,scale)
dx = dx[good[1]]
dy = dy[good[0]]
model,dvdx,dvdy = dao_value.dao_value(dx, dy, gauss,
psf, #psf1d=psf1d,
deriv=True)#,ps1d=True)
if debug:
print('model created ')
if xyout:
return(errmag,chi,sharp,niter,scale,x,y)
else:
return(errmag,chi,sharp,niter,scale)
t[0,:] = model
sa=shape(dvdx)
if sa[0] > ngood or len(sa) == 0:
scale=0
if xyout:
return(errmag,chi,sharp,niter,scale,x,y)
else:
return(errmag,chi,sharp,niter,scale)
t[1,:] = -scale*dvdx
t[2,:] = -scale*dvdy
fsub = f[iylo:iyhi+1,ixlo:ixhi+1]
fsub = fsub[good[0],good[1]]
# D. Jones - added for noise version from Scolnic
fsubnoise=fnoise[iylo:iyhi+1,ixlo:ixhi+1]
rsq = rsq[good[0],good[1]]
# D. Jones - noise addition from Scolnic
fsubnoise = fsubnoise[good[0],good[1]]
sig=fsubnoise
sigsq = fsubnoise**2.
# D. Jones - added for noise version from Scolnic
# Scolnic Added!!!
#
yx=zeros(1)
yx[0]=sky
skys=yx[0]
sky=skys
df = fsub - scale*model - sky #Residual of the brightness from the PSF fit
# The expected random error in the pixel is the quadratic sum of
# the Poisson statistics, plus the readout noise, plus an estimated
# error of 0.75% of the total brightness for the difficulty of flat-
# fielding and bias-correcting the chip, plus an estimated error of
# of some fraction of the fourth derivative at the peak of the profile,
# to account for the difficulty of accurately interpolating within the
# point-spread function. The fourth derivative of the PSF is
# proportional to H/sigma**4 (sigma is the Gaussian width parameter for
# the stellar core); using the geometric mean of sigma(x) and sigma(y),
# this becomes H/ sigma(x)*sigma(y) **2. The ratio of the fitting
# error to this quantity is estimated from a good-seeing CTIO frame to
# be approximately 0.027 (see definition of PKERR above.)
fpos = (fsub-df) #Raw data - residual = model predicted intensity
fposrow = where(fpos < 0.)[0]
if len(fposrow): fpos[fposrow] = 0
# D. Jones - noise addition from Scolnic - but ronois is never referenced, so I've omitted this
# self.ronois=median(fsubnoise**2.-(fpos/self.phpadu + (0.0075*fpos)**2. + (pkerr*(fpos-skys))**2.))
# D. Jones - noise addition from Scolnic
sig=fsubnoise
sigsq = fsubnoise**2
relerr = df/sig
# SIG is the anticipated standard error of the intensity
# including readout noise, Poisson photon statistics, and an estimate
# of the standard error of interpolating within the PSF.
rhosq = zeros([iyy,ixx])
for j in range(iyy): rhosq[j,:] = (dxsq/gauss[3]**2+dysq[j]/gauss[4]**2)
rhosq = rhosq[good[0],good[1]]
badflag = False
if niter >= 2: #Reject any pixel with 10 sigma residual
badpix = where( abs(relerr/chiold) >= 10. )[0]
nbad = len(badpix)
# scolnic added
sbd=shape(badpix)
sdf=shape(df)
if sbd[0] == sdf[0]:
scale=np.nan
errmag=np.nan
if xyout:
return(errmag,chi,sharp,niter,scale,x,y)
else:
return(errmag,chi,sharp,niter,scale)
if nbad > 0:
# D. Jones - to fix a bug in the original code
goodind = arange(len(rsq))
goodind = item_remove(badpix,goodind)
badflag = True
fsub = item_remove(badpix, fsub)
df = item_remove(badpix,df)
sigsq = item_remove(badpix,sigsq)
sig = item_remove(badpix,sig)
relerr = item_remove(badpix,relerr)
rsq = item_remove(badpix,rsq)
rhosq = item_remove(badpix,rhosq)
fsubnoise = item_remove(badpix,fsubnoise)
ngood = ngood-badpix
wt = 5./(5.+rsq/(1.-rsq))
lilrho = where(rhosq <= 36.)[0] #Include only pixels within 6 sigma of centroid
if not len(lilrho):
scale=np.nan
errmag=np.nan
sharp=np.nan
chi=np.nan
if xyout:
return(errmag,chi,sharp,niter,scale,x,y)
else:
return(errmag,chi,sharp,niter,scale)
rhosq[lilrho] = 0.5*rhosq[lilrho]
dfdsig = exp(-rhosq[lilrho])*(rhosq[lilrho]-1.)
fpos = fsub[lilrho]
fposrow = where(fsub[lilrho]-sky < 0.)[0]
fpos[fposrow] = sky
# FPOS-SKY = raw data minus sky = estimated value of the stellar
# intensity (which presumably is non-negative).
# sig = fpos/self.phpadu + self.ronois + (0.0075*fpos)**2 + (pkerr*(fpos-sky))**2
# D. Jones - noise addition from Scolnic
sig = fsubnoise[lilrho]**2
numer = sum(dfdsig*df[lilrho]/sig)
denom = sum(dfdsig**2/sig)
# Derive the weight of this pixel. First of all, the weight depends
# upon the distance of the pixel from the centroid of the star-- it
# is determined from a function which is very nearly unity for radii
# much smaller than the fitting radius, and which goes to zero for
# radii very near the fitting radius.
chi = sum(wt*abs(relerr))
sumwt = sum(wt)
wt = wt/sigsq #Scale weight to inverse square of expected mean error
if niter >= 2: #Reduce weight of a bad pixel
wt = wt/(1.+(0.4*relerr/chiold)**8)
v = zeros(3) #Compute vector of residuals and the normal matrix.
c = zeros([3,3])
if not badflag:
for kk in range(3):
v[kk] = sum(df*t[kk,:]*wt)
for ll in range(3): c[ll,kk] = sum(t[kk,:]*t[ll,:]*wt)
else:
for kk in range(3):
v[kk] = sum(df*t[kk,goodind]*wt)
for ll in range(3): c[ll,kk] = sum(t[kk,goodind]*t[ll,goodind]*wt)
# Compute the (robust) goodness-of-fit index CHI.
# CHI is pulled toward its expected value of unity before being stored
# in CHIOLD to keep the statistics of a small number of pixels from
# completely dominating the error analysis.
if sumwt > 3.0:
chi = 1.2533*chi*sqrt(1./(sumwt*(sumwt-3.)))
chiold = ((sumwt-3.)*chi+3.)/sumwt
if not isnan(sum(c)):
try:
c = linalg.inv(c) #Invert the normal matrix
except:
scale=np.nan
errmag=np.nan
chi=np.nan
sharp=np.nan
if xyout:
return(errmag,chi,sharp,niter,scale,x,y)
else:
return(errmag,chi,sharp,niter,scale)
dt = matrix(v)*c #Compute parameter corrections
dt = array(dt)[0]
# In the beginning, the brightness of the star will not be permitted
# to change by more than two magnitudes per iteration (that is to say,
# if the estimate is getting brighter, it may not get brighter by
# more than 525% per iteration, and if it is getting fainter, it may
# not get fainter by more than 84% per iteration). The x and y
# coordinates of the centroid will be allowed to change by no more
# than one-half pixel per iteration. Any time that a parameter
# correction changes sign, the maximum permissible change in that
# parameter will be reduced by a factor of 2.
div = where( dtold*dt < -1.e-38)[0]
nbad = len(div)
if nbad > 0: clamp[div] = clamp[div]/2.
dtold = dt
adt = abs(dt)
denom2 = ( dt[0]/(5.25*scale))
if denom2 < (-1*dt[0]/(0.84*scale)): denom2 = (-1*dt[0]/(0.84*scale))
scale = scale+dt[0]/(1 + denom2/clamp[0])
x = x + dt[1]/(1.+adt[1]/(0.5*clamp[1]))
y = y + dt[2]/(1.+adt[2]/(0.5*clamp[2]))
redo = 0
# Convergence criteria: if the most recent computed correction to the
# brightness is larger than 0.1% or than 0.05 * sigma(brightness),
# whichever is larger, OR if the absolute change in X or Y is
# greater than 0.01 pixels, convergence has not been achieved.
sharp = 2.*gauss[3]*gauss[4]*numer/(gauss[0]*scale*denom)
errmag = chiold*sqrt(c[0,0])
if ( adt[0] > 0.05*errmag) or (adt[0] > 0.001*scale): redo = 1
if (adt[1] > 0.01) or (adt[2] > 0.01): redo = 1
if debug: print(niter,x,y,scale,errmag,chiold,sharp)
if niter >= 3: loop=False #At least 3 iterations required
# If the solution has gone 25 iterations, OR if the standard error of
# the brightness is greater than 200%, give up.
if (redo and (errmag <= 1.9995) and (niter < maxiter) ): loop=True
# if sharp < -99.999: sharp = -99.999
# elif sharp > 99.999: sharp = 99.999
if xyout:
return(errmag,chi,sharp,niter,scale,x,y)
else:
return(errmag,chi,sharp,niter,scale)
def item_remove(index,array):
mask = ones(array.shape,dtype=bool)
mask[index] = False
smaller_array = array[mask]
return(smaller_array)
|
the-stack_0_2812 | import tensorflow as tf
data_path = 'train.tfrecord'
with tf.Session() as sess:
feature = {"image_raw": tf.FixedLenFeature([], tf.string),
"label": tf.FixedLenFeature([], tf.int64)}
# Create a list of filenames and pass it to a queue
filename_queue = tf.train.string_input_producer([data_path], num_epochs=1)
# Define a reader and read the next record
reader = tf.TFRecordReader()
_, serialized_example = reader.read(filename_queue)
# Decode the record read by the reader
features = tf.parse_single_example(serialized_example, features=feature)
# Convert the image data from string back to the numbers
image = tf.decode_raw(features["image_raw"], tf.float32)
# Cast label data into int32
label = tf.cast(features["label"], tf.int32)
# Reshape image data into the original shape
image = tf.reshape(image, [224, 224, 3])
# Any preprocessing here ...
# Creates batches by randomly shuffling tensors
images, labels = tf.train.shuffle_batch(
[image, label], batch_size=10, capacity=30, num_threads=1, min_after_dequeue=10)
|
the-stack_0_2815 | import asyncio
import json
import logging
import time
from pathlib import Path
from typing import Any, Callable, Dict, List, Optional, Tuple
import traceback
import aiohttp
from blspy import AugSchemeMPL, G1Element, G2Element, PrivateKey
import chia.server.ws_connection as ws # lgtm [py/import-and-import-from]
from chia.consensus.coinbase import create_puzzlehash_for_pk
from chia.consensus.constants import ConsensusConstants
from chia.daemon.keychain_proxy import (
KeychainProxy,
KeychainProxyConnectionFailure,
connect_to_keychain_and_validate,
wrap_local_keychain,
)
from chia.pools.pool_config import PoolWalletConfig, load_pool_config
from chia.protocols import farmer_protocol, harvester_protocol
from chia.protocols.pool_protocol import (
ErrorResponse,
get_current_authentication_token,
GetFarmerResponse,
PoolErrorCode,
PostFarmerPayload,
PostFarmerRequest,
PutFarmerPayload,
PutFarmerRequest,
AuthenticationPayload,
)
from chia.protocols.protocol_message_types import ProtocolMessageTypes
from chia.server.outbound_message import NodeType, make_msg
from chia.server.server import ssl_context_for_root
from chia.server.ws_connection import WSChiaConnection
from chia.ssl.create_ssl import get_mozilla_ca_crt
from chia.types.blockchain_format.proof_of_space import ProofOfSpace
from chia.types.blockchain_format.sized_bytes import bytes32
from chia.util.bech32m import decode_puzzle_hash
from chia.util.byte_types import hexstr_to_bytes
from chia.util.config import load_config, save_config, config_path_for_filename
from chia.util.hash import std_hash
from chia.util.ints import uint8, uint16, uint32, uint64
from chia.util.keychain import Keychain
from chia.wallet.derive_keys import (
master_sk_to_farmer_sk,
master_sk_to_pool_sk,
master_sk_to_wallet_sk,
find_authentication_sk,
find_owner_sk,
)
from chia.wallet.puzzles.singleton_top_layer import SINGLETON_MOD
singleton_mod_hash = SINGLETON_MOD.get_tree_hash()
log = logging.getLogger(__name__)
UPDATE_POOL_INFO_INTERVAL: int = 3600
UPDATE_POOL_FARMER_INFO_INTERVAL: int = 300
UPDATE_HARVESTER_CACHE_INTERVAL: int = 90
"""
HARVESTER PROTOCOL (FARMER <-> HARVESTER)
"""
class HarvesterCacheEntry:
def __init__(self):
self.data: Optional[dict] = None
self.last_update: float = 0
def bump_last_update(self):
self.last_update = time.time()
def set_data(self, data):
self.data = data
self.bump_last_update()
def needs_update(self, update_interval: int):
return time.time() - self.last_update > update_interval
def expired(self, update_interval: int):
return time.time() - self.last_update > update_interval * 10
class Farmer:
def __init__(
self,
root_path: Path,
farmer_config: Dict,
pool_config: Dict,
consensus_constants: ConsensusConstants,
local_keychain: Optional[Keychain] = None,
):
self.keychain_proxy: Optional[KeychainProxy] = None
self.local_keychain = local_keychain
self._root_path = root_path
self.config = farmer_config
self.pool_config = pool_config
# Keep track of all sps, keyed on challenge chain signage point hash
self.sps: Dict[bytes32, List[farmer_protocol.NewSignagePoint]] = {}
# Keep track of harvester plot identifier (str), target sp index, and PoSpace for each challenge
self.proofs_of_space: Dict[bytes32, List[Tuple[str, ProofOfSpace]]] = {}
# Quality string to plot identifier and challenge_hash, for use with harvester.RequestSignatures
self.quality_str_to_identifiers: Dict[bytes32, Tuple[str, bytes32, bytes32, bytes32]] = {}
# number of responses to each signage point
self.number_of_responses: Dict[bytes32, int] = {}
# A dictionary of keys to time added. These keys refer to keys in the above 4 dictionaries. This is used
# to periodically clear the memory
self.cache_add_time: Dict[bytes32, uint64] = {}
# Interval to request plots from connected harvesters
self.update_harvester_cache_interval = UPDATE_HARVESTER_CACHE_INTERVAL
self.cache_clear_task: asyncio.Task
self.update_pool_state_task: asyncio.Task
self.constants = consensus_constants
self._shut_down = False
self.server: Any = None
self.state_changed_callback: Optional[Callable] = None
self.log = log
async def ensure_keychain_proxy(self) -> KeychainProxy:
if not self.keychain_proxy:
if self.local_keychain:
self.keychain_proxy = wrap_local_keychain(self.local_keychain, log=self.log)
else:
self.keychain_proxy = await connect_to_keychain_and_validate(self._root_path, self.log)
if not self.keychain_proxy:
raise KeychainProxyConnectionFailure("Failed to connect to keychain service")
return self.keychain_proxy
async def get_all_private_keys(self):
keychain_proxy = await self.ensure_keychain_proxy()
return await keychain_proxy.get_all_private_keys()
async def setup_keys(self):
self.all_root_sks: List[PrivateKey] = [sk for sk, _ in await self.get_all_private_keys()]
self._private_keys = [master_sk_to_farmer_sk(sk) for sk in self.all_root_sks] + [
master_sk_to_pool_sk(sk) for sk in self.all_root_sks
]
if len(self.get_public_keys()) == 0:
error_str = "No keys exist. Please run 'chia keys generate' or open the UI."
raise RuntimeError(error_str)
# This is the farmer configuration
self.farmer_target_encoded = self.config["xfl_target_address"]
self.farmer_target = decode_puzzle_hash(self.farmer_target_encoded)
self.pool_public_keys = [G1Element.from_bytes(bytes.fromhex(pk)) for pk in self.config["pool_public_keys"]]
# This is the self pooling configuration, which is only used for original self-pooled plots
self.pool_target_encoded = self.pool_config["xfl_target_address"]
self.pool_target = decode_puzzle_hash(self.pool_target_encoded)
self.pool_sks_map: Dict = {}
for key in self.get_private_keys():
self.pool_sks_map[bytes(key.get_g1())] = key
assert len(self.farmer_target) == 32
assert len(self.pool_target) == 32
if len(self.pool_sks_map) == 0:
error_str = "No keys exist. Please run 'chia keys generate' or open the UI."
raise RuntimeError(error_str)
# The variables below are for use with an actual pool
# From p2_singleton_puzzle_hash to pool state dict
self.pool_state: Dict[bytes32, Dict] = {}
# From public key bytes to PrivateKey
self.authentication_keys: Dict[bytes, PrivateKey] = {}
# Last time we updated pool_state based on the config file
self.last_config_access_time: uint64 = uint64(0)
self.harvester_cache: Dict[str, Dict[str, HarvesterCacheEntry]] = {}
async def _start(self):
await self.setup_keys()
self.update_pool_state_task = asyncio.create_task(self._periodically_update_pool_state_task())
self.cache_clear_task = asyncio.create_task(self._periodically_clear_cache_and_refresh_task())
def _close(self):
self._shut_down = True
async def _await_closed(self):
await self.cache_clear_task
await self.update_pool_state_task
def _set_state_changed_callback(self, callback: Callable):
self.state_changed_callback = callback
async def on_connect(self, peer: WSChiaConnection):
# Sends a handshake to the harvester
self.state_changed("add_connection", {})
handshake = harvester_protocol.HarvesterHandshake(
self.get_public_keys(),
self.pool_public_keys,
)
if peer.connection_type is NodeType.HARVESTER:
msg = make_msg(ProtocolMessageTypes.harvester_handshake, handshake)
await peer.send_message(msg)
def set_server(self, server):
self.server = server
def state_changed(self, change: str, data: Dict[str, Any]):
if self.state_changed_callback is not None:
self.state_changed_callback(change, data)
def handle_failed_pool_response(self, p2_singleton_puzzle_hash: bytes32, error_message: str):
self.log.error(error_message)
self.pool_state[p2_singleton_puzzle_hash]["pool_errors_24h"].append(
ErrorResponse(uint16(PoolErrorCode.REQUEST_FAILED.value), error_message).to_json_dict()
)
def on_disconnect(self, connection: ws.WSChiaConnection):
self.log.info(f"peer disconnected {connection.get_peer_logging()}")
self.state_changed("close_connection", {})
async def _pool_get_pool_info(self, pool_config: PoolWalletConfig) -> Optional[Dict]:
try:
async with aiohttp.ClientSession(trust_env=True) as session:
async with session.get(
f"{pool_config.pool_url}/pool_info", ssl=ssl_context_for_root(get_mozilla_ca_crt(), log=self.log)
) as resp:
if resp.ok:
response: Dict = json.loads(await resp.text())
self.log.info(f"GET /pool_info response: {response}")
return response
else:
self.handle_failed_pool_response(
pool_config.p2_singleton_puzzle_hash,
f"Error in GET /pool_info {pool_config.pool_url}, {resp.status}",
)
except Exception as e:
self.handle_failed_pool_response(
pool_config.p2_singleton_puzzle_hash, f"Exception in GET /pool_info {pool_config.pool_url}, {e}"
)
return None
async def _pool_get_farmer(
self, pool_config: PoolWalletConfig, authentication_token_timeout: uint8, authentication_sk: PrivateKey
) -> Optional[Dict]:
assert authentication_sk.get_g1() == pool_config.authentication_public_key
authentication_token = get_current_authentication_token(authentication_token_timeout)
message: bytes32 = std_hash(
AuthenticationPayload(
"get_farmer", pool_config.launcher_id, pool_config.target_puzzle_hash, authentication_token
)
)
signature: G2Element = AugSchemeMPL.sign(authentication_sk, message)
get_farmer_params = {
"launcher_id": pool_config.launcher_id.hex(),
"authentication_token": authentication_token,
"signature": bytes(signature).hex(),
}
try:
async with aiohttp.ClientSession(trust_env=True) as session:
async with session.get(
f"{pool_config.pool_url}/farmer",
params=get_farmer_params,
ssl=ssl_context_for_root(get_mozilla_ca_crt(), log=self.log),
) as resp:
if resp.ok:
response: Dict = json.loads(await resp.text())
self.log.info(f"GET /farmer response: {response}")
if "error_code" in response:
self.pool_state[pool_config.p2_singleton_puzzle_hash]["pool_errors_24h"].append(response)
return response
else:
self.handle_failed_pool_response(
pool_config.p2_singleton_puzzle_hash,
f"Error in GET /farmer {pool_config.pool_url}, {resp.status}",
)
except Exception as e:
self.handle_failed_pool_response(
pool_config.p2_singleton_puzzle_hash, f"Exception in GET /farmer {pool_config.pool_url}, {e}"
)
return None
async def _pool_post_farmer(
self, pool_config: PoolWalletConfig, authentication_token_timeout: uint8, owner_sk: PrivateKey
) -> Optional[Dict]:
post_farmer_payload: PostFarmerPayload = PostFarmerPayload(
pool_config.launcher_id,
get_current_authentication_token(authentication_token_timeout),
pool_config.authentication_public_key,
pool_config.payout_instructions,
None,
)
assert owner_sk.get_g1() == pool_config.owner_public_key
signature: G2Element = AugSchemeMPL.sign(owner_sk, post_farmer_payload.get_hash())
post_farmer_request = PostFarmerRequest(post_farmer_payload, signature)
try:
async with aiohttp.ClientSession() as session:
async with session.post(
f"{pool_config.pool_url}/farmer",
json=post_farmer_request.to_json_dict(),
ssl=ssl_context_for_root(get_mozilla_ca_crt(), log=self.log),
) as resp:
if resp.ok:
response: Dict = json.loads(await resp.text())
self.log.info(f"POST /farmer response: {response}")
if "error_code" in response:
self.pool_state[pool_config.p2_singleton_puzzle_hash]["pool_errors_24h"].append(response)
return response
else:
self.handle_failed_pool_response(
pool_config.p2_singleton_puzzle_hash,
f"Error in POST /farmer {pool_config.pool_url}, {resp.status}",
)
except Exception as e:
self.handle_failed_pool_response(
pool_config.p2_singleton_puzzle_hash, f"Exception in POST /farmer {pool_config.pool_url}, {e}"
)
return None
async def _pool_put_farmer(
self, pool_config: PoolWalletConfig, authentication_token_timeout: uint8, owner_sk: PrivateKey
) -> Optional[Dict]:
put_farmer_payload: PutFarmerPayload = PutFarmerPayload(
pool_config.launcher_id,
get_current_authentication_token(authentication_token_timeout),
pool_config.authentication_public_key,
pool_config.payout_instructions,
None,
)
assert owner_sk.get_g1() == pool_config.owner_public_key
signature: G2Element = AugSchemeMPL.sign(owner_sk, put_farmer_payload.get_hash())
put_farmer_request = PutFarmerRequest(put_farmer_payload, signature)
try:
async with aiohttp.ClientSession() as session:
async with session.put(
f"{pool_config.pool_url}/farmer",
json=put_farmer_request.to_json_dict(),
ssl=ssl_context_for_root(get_mozilla_ca_crt(), log=self.log),
) as resp:
if resp.ok:
response: Dict = json.loads(await resp.text())
self.log.info(f"PUT /farmer response: {response}")
if "error_code" in response:
self.pool_state[pool_config.p2_singleton_puzzle_hash]["pool_errors_24h"].append(response)
return response
else:
self.handle_failed_pool_response(
pool_config.p2_singleton_puzzle_hash,
f"Error in PUT /farmer {pool_config.pool_url}, {resp.status}",
)
except Exception as e:
self.handle_failed_pool_response(
pool_config.p2_singleton_puzzle_hash, f"Exception in PUT /farmer {pool_config.pool_url}, {e}"
)
return None
async def update_pool_state(self):
config = load_config(self._root_path, "config.yaml")
pool_config_list: List[PoolWalletConfig] = load_pool_config(self._root_path)
for pool_config in pool_config_list:
p2_singleton_puzzle_hash = pool_config.p2_singleton_puzzle_hash
try:
authentication_sk: Optional[PrivateKey] = await find_authentication_sk(
self.all_root_sks, pool_config.authentication_public_key
)
if authentication_sk is None:
self.log.error(f"Could not find authentication sk for pk: {pool_config.authentication_public_key}")
continue
if p2_singleton_puzzle_hash not in self.pool_state:
self.authentication_keys[bytes(pool_config.authentication_public_key)] = authentication_sk
self.pool_state[p2_singleton_puzzle_hash] = {
"points_found_since_start": 0,
"points_found_24h": [],
"points_acknowledged_since_start": 0,
"points_acknowledged_24h": [],
"next_farmer_update": 0,
"next_pool_info_update": 0,
"current_points": 0,
"current_difficulty": None,
"pool_errors_24h": [],
"authentication_token_timeout": None,
}
self.log.info(f"Added pool: {pool_config}")
pool_state = self.pool_state[p2_singleton_puzzle_hash]
pool_state["pool_config"] = pool_config
# Skip state update when self pooling
if pool_config.pool_url == "":
continue
enforce_https = config["full_node"]["selected_network"] == "mainnet"
if enforce_https and not pool_config.pool_url.startswith("https://"):
self.log.error(f"Pool URLs must be HTTPS on mainnet {pool_config.pool_url}")
continue
# TODO: Improve error handling below, inform about unexpected failures
if time.time() >= pool_state["next_pool_info_update"]:
# Makes a GET request to the pool to get the updated information
pool_info = await self._pool_get_pool_info(pool_config)
if pool_info is not None and "error_code" not in pool_info:
pool_state["authentication_token_timeout"] = pool_info["authentication_token_timeout"]
pool_state["next_pool_info_update"] = time.time() + UPDATE_POOL_INFO_INTERVAL
# Only update the first time from GET /pool_info, gets updated from GET /farmer later
if pool_state["current_difficulty"] is None:
pool_state["current_difficulty"] = pool_info["minimum_difficulty"]
if time.time() >= pool_state["next_farmer_update"]:
authentication_token_timeout = pool_state["authentication_token_timeout"]
async def update_pool_farmer_info() -> Tuple[Optional[GetFarmerResponse], Optional[bool]]:
# Run a GET /farmer to see if the farmer is already known by the pool
response = await self._pool_get_farmer(
pool_config, authentication_token_timeout, authentication_sk
)
farmer_response: Optional[GetFarmerResponse] = None
farmer_known: Optional[bool] = None
if response is not None:
if "error_code" not in response:
farmer_response = GetFarmerResponse.from_json_dict(response)
if farmer_response is not None:
pool_state["current_difficulty"] = farmer_response.current_difficulty
pool_state["current_points"] = farmer_response.current_points
pool_state["next_farmer_update"] = time.time() + UPDATE_POOL_FARMER_INFO_INTERVAL
else:
farmer_known = response["error_code"] != PoolErrorCode.FARMER_NOT_KNOWN.value
self.log.error(
"update_pool_farmer_info failed: "
f"{response['error_code']}, {response['error_message']}"
)
return farmer_response, farmer_known
if authentication_token_timeout is not None:
farmer_info, farmer_is_known = await update_pool_farmer_info()
if farmer_info is None and farmer_is_known is not None and not farmer_is_known:
# Make the farmer known on the pool with a POST /farmer
owner_sk = await find_owner_sk(self.all_root_sks, pool_config.owner_public_key)
post_response = await self._pool_post_farmer(
pool_config, authentication_token_timeout, owner_sk
)
if post_response is not None and "error_code" not in post_response:
self.log.info(
f"Welcome message from {pool_config.pool_url}: "
f"{post_response['welcome_message']}"
)
# Now we should be able to update the local farmer info
farmer_info, farmer_is_known = await update_pool_farmer_info()
if farmer_info is None and not farmer_is_known:
self.log.error("Failed to update farmer info after POST /farmer.")
# Update the payout instructions on the pool if required
if (
farmer_info is not None
and pool_config.payout_instructions.lower() != farmer_info.payout_instructions.lower()
):
owner_sk = await find_owner_sk(self.all_root_sks, pool_config.owner_public_key)
put_farmer_response_dict = await self._pool_put_farmer(
pool_config, authentication_token_timeout, owner_sk
)
try:
# put_farmer_response: PutFarmerResponse = PutFarmerResponse.from_json_dict(
# put_farmer_response_dict
# )
# if put_farmer_response.payout_instructions:
# self.log.info(
# f"Farmer information successfully updated on the pool {pool_config.pool_url}"
# )
# TODO: Fix Streamable implementation and recover the above.
if put_farmer_response_dict["payout_instructions"]:
self.log.info(
f"Farmer information successfully updated on the pool {pool_config.pool_url}"
)
else:
raise Exception
except Exception:
self.log.error(
f"Failed to update farmer information on the pool {pool_config.pool_url}"
)
else:
self.log.warning(
f"No pool specific authentication_token_timeout has been set for {p2_singleton_puzzle_hash}"
f", check communication with the pool."
)
except Exception as e:
tb = traceback.format_exc()
self.log.error(f"Exception in update_pool_state for {pool_config.pool_url}, {e} {tb}")
def get_public_keys(self):
return [child_sk.get_g1() for child_sk in self._private_keys]
def get_private_keys(self):
return self._private_keys
async def get_reward_targets(self, search_for_private_key: bool) -> Dict:
if search_for_private_key:
all_sks = await self.get_all_private_keys()
stop_searching_for_farmer, stop_searching_for_pool = False, False
for i in range(500):
if stop_searching_for_farmer and stop_searching_for_pool and i > 0:
break
for sk, _ in all_sks:
ph = create_puzzlehash_for_pk(master_sk_to_wallet_sk(sk, uint32(i)).get_g1())
if ph == self.farmer_target:
stop_searching_for_farmer = True
if ph == self.pool_target:
stop_searching_for_pool = True
return {
"farmer_target": self.farmer_target_encoded,
"pool_target": self.pool_target_encoded,
"have_farmer_sk": stop_searching_for_farmer,
"have_pool_sk": stop_searching_for_pool,
}
return {
"farmer_target": self.farmer_target_encoded,
"pool_target": self.pool_target_encoded,
}
def set_reward_targets(self, farmer_target_encoded: Optional[str], pool_target_encoded: Optional[str]):
config = load_config(self._root_path, "config.yaml")
if farmer_target_encoded is not None:
self.farmer_target_encoded = farmer_target_encoded
self.farmer_target = decode_puzzle_hash(farmer_target_encoded)
config["farmer"]["xfl_target_address"] = farmer_target_encoded
if pool_target_encoded is not None:
self.pool_target_encoded = pool_target_encoded
self.pool_target = decode_puzzle_hash(pool_target_encoded)
config["pool"]["xfl_target_address"] = pool_target_encoded
save_config(self._root_path, "config.yaml", config)
async def set_payout_instructions(self, launcher_id: bytes32, payout_instructions: str):
for p2_singleton_puzzle_hash, pool_state_dict in self.pool_state.items():
if launcher_id == pool_state_dict["pool_config"].launcher_id:
config = load_config(self._root_path, "config.yaml")
new_list = []
for list_element in config["pool"]["pool_list"]:
if hexstr_to_bytes(list_element["launcher_id"]) == bytes(launcher_id):
list_element["payout_instructions"] = payout_instructions
new_list.append(list_element)
config["pool"]["pool_list"] = new_list
save_config(self._root_path, "config.yaml", config)
# Force a GET /farmer which triggers the PUT /farmer if it detects the changed instructions
pool_state_dict["next_farmer_update"] = 0
return
self.log.warning(f"Launcher id: {launcher_id} not found")
async def generate_login_link(self, launcher_id: bytes32) -> Optional[str]:
for pool_state in self.pool_state.values():
pool_config: PoolWalletConfig = pool_state["pool_config"]
if pool_config.launcher_id == launcher_id:
authentication_sk: Optional[PrivateKey] = await find_authentication_sk(
self.all_root_sks, pool_config.authentication_public_key
)
if authentication_sk is None:
self.log.error(f"Could not find authentication sk for pk: {pool_config.authentication_public_key}")
continue
assert authentication_sk.get_g1() == pool_config.authentication_public_key
authentication_token_timeout = pool_state["authentication_token_timeout"]
authentication_token = get_current_authentication_token(authentication_token_timeout)
message: bytes32 = std_hash(
AuthenticationPayload(
"get_login", pool_config.launcher_id, pool_config.target_puzzle_hash, authentication_token
)
)
signature: G2Element = AugSchemeMPL.sign(authentication_sk, message)
return (
pool_config.pool_url
+ f"/login?launcher_id={launcher_id.hex()}&authentication_token={authentication_token}"
f"&signature={bytes(signature).hex()}"
)
return None
async def update_cached_harvesters(self) -> bool:
# First remove outdated cache entries
self.log.debug(f"update_cached_harvesters cache entries: {len(self.harvester_cache)}")
remove_hosts = []
for host, host_cache in self.harvester_cache.items():
remove_peers = []
for peer_id, peer_cache in host_cache.items():
# If the peer cache is expired it means the harvester didn't respond for too long
if peer_cache.expired(self.update_harvester_cache_interval):
remove_peers.append(peer_id)
for key in remove_peers:
del host_cache[key]
if len(host_cache) == 0:
self.log.debug(f"update_cached_harvesters remove host: {host}")
remove_hosts.append(host)
for key in remove_hosts:
del self.harvester_cache[key]
# Now query each harvester and update caches
updated = False
for connection in self.server.get_connections(NodeType.HARVESTER):
cache_entry = await self.get_cached_harvesters(connection)
if cache_entry.needs_update(self.update_harvester_cache_interval):
self.log.debug(f"update_cached_harvesters update harvester: {connection.peer_node_id}")
cache_entry.bump_last_update()
response = await connection.request_plots(
harvester_protocol.RequestPlots(), timeout=self.update_harvester_cache_interval
)
if response is not None:
if isinstance(response, harvester_protocol.RespondPlots):
new_data: Dict = response.to_json_dict()
if cache_entry.data != new_data:
updated = True
self.log.debug(f"update_cached_harvesters cache updated: {connection.peer_node_id}")
else:
self.log.debug(f"update_cached_harvesters no changes for: {connection.peer_node_id}")
cache_entry.set_data(new_data)
else:
self.log.error(
f"Invalid response from harvester:"
f"peer_host {connection.peer_host}, peer_node_id {connection.peer_node_id}"
)
else:
self.log.error(
f"Harvester '{connection.peer_host}/{connection.peer_node_id}' did not respond: "
f"(version mismatch or time out {UPDATE_HARVESTER_CACHE_INTERVAL}s)"
)
return updated
async def get_cached_harvesters(self, connection: WSChiaConnection) -> HarvesterCacheEntry:
host_cache = self.harvester_cache.get(connection.peer_host)
if host_cache is None:
host_cache = {}
self.harvester_cache[connection.peer_host] = host_cache
node_cache = host_cache.get(connection.peer_node_id.hex())
if node_cache is None:
node_cache = HarvesterCacheEntry()
host_cache[connection.peer_node_id.hex()] = node_cache
return node_cache
async def get_harvesters(self) -> Dict:
harvesters: List = []
for connection in self.server.get_connections(NodeType.HARVESTER):
self.log.debug(f"get_harvesters host: {connection.peer_host}, node_id: {connection.peer_node_id}")
cache_entry = await self.get_cached_harvesters(connection)
if cache_entry.data is not None:
harvester_object: dict = dict(cache_entry.data)
harvester_object["connection"] = {
"node_id": connection.peer_node_id.hex(),
"host": connection.peer_host,
"port": connection.peer_port,
}
harvesters.append(harvester_object)
else:
self.log.debug(f"get_harvesters no cache: {connection.peer_host}, node_id: {connection.peer_node_id}")
return {"harvesters": harvesters}
async def _periodically_update_pool_state_task(self):
time_slept: uint64 = uint64(0)
config_path: Path = config_path_for_filename(self._root_path, "config.yaml")
while not self._shut_down:
# Every time the config file changes, read it to check the pool state
stat_info = config_path.stat()
if stat_info.st_mtime > self.last_config_access_time:
# If we detect the config file changed, refresh private keys first just in case
self.all_root_sks: List[PrivateKey] = [sk for sk, _ in await self.get_all_private_keys()]
self.last_config_access_time = stat_info.st_mtime
await self.update_pool_state()
time_slept = uint64(0)
elif time_slept > 60:
await self.update_pool_state()
time_slept = uint64(0)
time_slept += 1
await asyncio.sleep(1)
async def _periodically_clear_cache_and_refresh_task(self):
time_slept: uint64 = uint64(0)
refresh_slept = 0
while not self._shut_down:
try:
if time_slept > self.constants.SUB_SLOT_TIME_TARGET:
now = time.time()
removed_keys: List[bytes32] = []
for key, add_time in self.cache_add_time.items():
if now - float(add_time) > self.constants.SUB_SLOT_TIME_TARGET * 3:
self.sps.pop(key, None)
self.proofs_of_space.pop(key, None)
self.quality_str_to_identifiers.pop(key, None)
self.number_of_responses.pop(key, None)
removed_keys.append(key)
for key in removed_keys:
self.cache_add_time.pop(key, None)
time_slept = uint64(0)
log.debug(
f"Cleared farmer cache. Num sps: {len(self.sps)} {len(self.proofs_of_space)} "
f"{len(self.quality_str_to_identifiers)} {len(self.number_of_responses)}"
)
time_slept += 1
refresh_slept += 1
# Periodically refresh GUI to show the correct download/upload rate.
if refresh_slept >= 30:
self.state_changed("add_connection", {})
refresh_slept = 0
# Handles harvester plots cache cleanup and updates
if await self.update_cached_harvesters():
self.state_changed("new_plots", await self.get_harvesters())
except Exception:
log.error(f"_periodically_clear_cache_and_refresh_task failed: {traceback.format_exc()}")
await asyncio.sleep(1)
|
the-stack_0_2816 | import os
import tensorflow as tf
from datetime import datetime
import sys
sys.path.append('')
import helper
# Load the dataset
(train_images, train_labels), (test_images, test_labels) = helper.load_data()
# Flat and normalize
train_images = train_images /255.0
test_images = test_images / 255.0
# Define a model
def create_model():
model = tf.keras.models.Sequential([
tf.keras.layers.Flatten(input_shape=(28, 28)),
tf.keras.layers.Dense(256, activation='relu'),
tf.keras.layers.Dropout(0.5),
tf.keras.layers.Dense(10, activation='softmax')
])
model.compile(optimizer='adam',
loss='categorical_crossentropy',
metrics=['accuracy'])
return model
# Create a model
model = create_model()
# Display the model's archtecture
model.summary()
# Train
logdir="SimpleANN/logs/fit/" + datetime.now().strftime("%Y%m%d-%H%M%S")
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=logdir)
model.fit(train_images,
train_labels,
epochs=10,
batch_size=32,
validation_data=(test_images, test_labels),
callbacks=[tensorboard_callback])
# Evaluate the model
loss, acc = model.evaluate(test_images, test_labels, verbose=2)
print("Accuracy: {:5.2f}%".format(100*acc))
# Save the model
model.save('SimpleANN/simple_ann_model.model')
|
the-stack_0_2821 | #!/usr/bin/env python
import tensorflow as tf
import math
import os
import numpy as np
# Define parameters
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_float('learning_rate', 0.01, 'Initial learning rate.')
flags.DEFINE_integer('epoch_number', None, 'Number of epochs to run trainer.')
flags.DEFINE_integer("batch_size", 1024,
"indicates batch size in a single gpu, default is 1024")
flags.DEFINE_integer("thread_number", 1, "Number of thread to read data")
flags.DEFINE_integer("min_after_dequeue", 100,
"indicates min_after_dequeue of shuffle queue")
flags.DEFINE_string("output_dir", "./tensorboard/",
"indicates training output")
flags.DEFINE_string("model", "deep",
"Model to train, option model: deep, linear")
flags.DEFINE_string("optimizer", "sgd", "optimizer to import")
flags.DEFINE_integer('hidden1', 10, 'Number of units in hidden layer 1.')
flags.DEFINE_integer('hidden2', 20, 'Number of units in hidden layer 2.')
flags.DEFINE_integer('steps_to_validate', 10,
'Steps to validate and print loss')
flags.DEFINE_string("mode", "train",
"Option mode: train, train_from_scratch, inference")
# For distributed
tf.app.flags.DEFINE_string("ps_hosts", "",
"Comma-separated list of hostname:port pairs")
tf.app.flags.DEFINE_string("worker_hosts", "",
"Comma-separated list of hostname:port pairs")
tf.app.flags.DEFINE_string("job_name", "", "One of 'ps', 'worker'")
tf.app.flags.DEFINE_integer("task_index", 0, "Index of task within the job")
# Hyperparameters
learning_rate = FLAGS.learning_rate
epoch_number = FLAGS.epoch_number
thread_number = FLAGS.thread_number
batch_size = FLAGS.batch_size
min_after_dequeue = FLAGS.min_after_dequeue
capacity = thread_number * batch_size + min_after_dequeue
FEATURE_SIZE = 9
# Read serialized examples from filename queue
def read_and_decode(filename_queue):
reader = tf.TFRecordReader()
_, serialized_example = reader.read(filename_queue)
features = tf.parse_single_example(
serialized_example,
features={
"label": tf.FixedLenFeature([], tf.float32),
"features": tf.FixedLenFeature([FEATURE_SIZE], tf.float32),
})
label = features["label"]
features = features["features"]
return label, features
def main(_):
ps_hosts = FLAGS.ps_hosts.split(",")
worker_hosts = FLAGS.worker_hosts.split(",")
cluster = tf.train.ClusterSpec({"ps": ps_hosts, "worker": worker_hosts})
server = tf.train.Server(cluster,
job_name=FLAGS.job_name,
task_index=FLAGS.task_index)
if FLAGS.job_name == "ps":
server.join()
elif FLAGS.job_name == "worker":
with tf.device(tf.train.replica_device_setter(
worker_device="/job:worker/task:%d" % FLAGS.task_index,
cluster=cluster)):
# Read TFRecords files
filename_queue = tf.train.string_input_producer(
tf.train.match_filenames_once("../data/cancer/cancer_train.csv.tfrecords"),
num_epochs=epoch_number)
label, features = read_and_decode(filename_queue)
batch_labels, batch_features = tf.train.shuffle_batch(
[label, features],
batch_size=batch_size,
num_threads=thread_number,
capacity=capacity,
min_after_dequeue=min_after_dequeue)
validate_filename_queue = tf.train.string_input_producer(
tf.train.match_filenames_once(
"../data/cancer/cancer_test.csv.tfrecords"),
num_epochs=epoch_number)
validate_label, validate_features = read_and_decode(
validate_filename_queue)
validate_batch_labels, validate_batch_features = tf.train.shuffle_batch(
[validate_label, validate_features],
batch_size=batch_size,
num_threads=thread_number,
capacity=capacity,
min_after_dequeue=min_after_dequeue)
# Define the model
input_units = FEATURE_SIZE
hidden1_units = FLAGS.hidden1
hidden2_units = FLAGS.hidden2
output_units = 2
# Hidden 1
weights1 = tf.Variable(
tf.truncated_normal([input_units, hidden1_units]),
dtype=tf.float32,
name='weights')
biases1 = tf.Variable(
tf.truncated_normal([hidden1_units]),
name='biases',
dtype=tf.float32)
hidden1 = tf.nn.relu(tf.matmul(batch_features, weights1) + biases1)
# Hidden 2
weights2 = tf.Variable(
tf.truncated_normal([hidden1_units, hidden2_units]),
dtype=tf.float32,
name='weights')
biases2 = tf.Variable(
tf.truncated_normal([hidden2_units]),
name='biases',
dtype=tf.float32)
hidden2 = tf.nn.relu(tf.matmul(hidden1, weights2) + biases2)
# Linear
weights3 = tf.Variable(
tf.truncated_normal([hidden2_units, output_units]),
dtype=tf.float32,
name='weights')
biases3 = tf.Variable(
tf.truncated_normal([output_units]),
name='biases',
dtype=tf.float32)
logits = tf.matmul(hidden2, weights3) + biases3
batch_labels = tf.to_int64(batch_labels)
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=logits, labels=batch_labels)
loss = tf.reduce_mean(cross_entropy, name='xentropy_mean')
if FLAGS.optimizer == "sgd":
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
else:
optimizer = tf.train.MomentumOptimizer(learning_rate)
global_step = tf.Variable(0, name='global_step', trainable=False)
train_op = optimizer.minimize(loss, global_step=global_step)
# Compute accuracy
accuracy_hidden1 = tf.nn.relu(tf.matmul(validate_batch_features,
weights1) + biases1)
accuracy_hidden2 = tf.nn.relu(tf.matmul(accuracy_hidden1, weights2)
+ biases2)
accuracy_logits = tf.matmul(accuracy_hidden2, weights3) + biases3
validate_softmax = tf.nn.softmax(accuracy_logits)
validate_batch_labels = tf.to_int64(validate_batch_labels)
correct_prediction = tf.equal(
tf.argmax(validate_softmax, 1), validate_batch_labels)
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# Compute auc
validate_batch_labels = tf.cast(validate_batch_labels, tf.int32)
num_labels = 2
sparse_labels = tf.reshape(validate_batch_labels, [-1, 1])
derived_size = tf.shape(validate_batch_labels)[0]
indices = tf.reshape(tf.range(0, derived_size, 1), [-1, 1])
concated = tf.concat(axis=1, values=[indices, sparse_labels])
outshape = tf.stack([derived_size, num_labels])
new_validate_batch_labels = tf.sparse_to_dense(concated, outshape,
1.0, 0.0)
_, auc_op = tf.contrib.metrics.streaming_auc(
validate_softmax, new_validate_batch_labels)
# Define inference op
inference_features = tf.placeholder("float", [None, 9])
inference_hidden1 = tf.nn.relu(tf.matmul(inference_features,
weights1) + biases1)
inference_hidden2 = tf.nn.relu(tf.matmul(inference_hidden1,
weights2) + biases2)
inference_logits = tf.matmul(inference_hidden2, weights3) + biases3
inference_softmax = tf.nn.softmax(inference_logits)
inference_op = tf.argmax(inference_softmax, 1)
saver = tf.train.Saver()
steps_to_validate = FLAGS.steps_to_validate
init_op = tf.global_variables_initializer()
tf.summary.scalar('loss', loss)
tf.summary.scalar('accuracy', accuracy)
tf.summary.scalar('auc', auc_op)
summary_op = tf.summary.merge_all()
sv = tf.train.Supervisor(is_chief=(FLAGS.task_index == 0),
logdir="./checkpoint/",
init_op=init_op,
summary_op=summary_op,
saver=saver,
global_step=global_step,
save_model_secs=60)
with sv.managed_session(server.target) as sess:
step = 0
while not sv.should_stop() and step < 1000000:
# Get coordinator and run queues to read data
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord, sess=sess)
try:
while not coord.should_stop():
# Run train op
_, loss_value, step = sess.run([train_op, loss,
global_step])
if step % steps_to_validate == 0:
accuracy_value, auc_value, summary_value = sess.run(
[accuracy, auc_op, summary_op])
print(
"Step: {}, loss: {}, accuracy: {}, auc: {}".format(
step, loss_value, accuracy_value,
auc_value))
except tf.errors.OutOfRangeError:
print("Done training after reading all data")
finally:
coord.request_stop()
# Wait for threads to exit
coord.join(threads)
if __name__ == "__main__":
tf.app.run()
|
the-stack_0_2823 | import urllib
import requests
from appi.debugging.log_handling import setup_logger, close_log_handlers
class APIController:
def __init__(self, base_url, table_name, log_filename):
self.base_url = base_url
self.table_name = table_name
self.column_url = self.base_url + f"api/v1/resources/{self.table_name}/columns"
self.add_url = self.base_url + "add_animal"
self.filter_url = self.base_url + f"api/v1/resources/{self.table_name}"
self.delete_url = self.base_url + f"delete_animal"
self.log = setup_logger(log_filename)
self.columns = self.get_columns()
self.log.info(f"Available columns are {self.columns}")
def __del__(self):
close_log_handlers(self.log)
def get_columns(self):
columns, success = self.make_and_log_http_call(self.column_url, "Getting table columns")
return columns
def query_data(self, filter):
payload = urllib.parse.urlencode(filter)
data, success = self.make_and_log_http_call(self.filter_url, f"Getting data for {filter}", payload=payload)
return data
def add_data(self, data):
self.make_and_log_http_call(self.add_url, f"Adding data: {data}", json=False, payload=data)
def delete_data(self, name):
self.make_and_log_http_call(self.delete_url, f"Deleting data: {name}", json=False, payload=name)
def make_and_log_http_call(self, url, code_str, json=True, payload=None):
self.log.info("Calling: " + str(url))
try:
if payload:
response = requests.get(url, params=payload)
else:
response = requests.get(url)
self.log.info(code_str + " code: " + str(response.status_code))
self.log.debug(code_str + " text: " + response.text)
if json:
return response.json(), response.status_code == 200
else:
return response, response.status_code == 200
except Exception as e:
self.log.warning("Request failed")
self.log.debug(str(e))
return None, False
def main():
animals_controller = APIController("http://localhost/", "animals", "animals_controller.log")
data = {"name": "Bob", "animal_type": "Dog", "age": 1, "price": 30}
animals_controller.add_data(data)
data = {"name": "Lars", "animal_type": "Horse", "age": 2, "price": 10}
animals_controller.add_data(data)
data = {"name": "Helen", "animal_type": "Cat", "age": 3, "price": 20}
animals_controller.add_data(data)
data = {"name": "Max", "animal_type": "Fish", "age": 4, "price": 25}
animals_controller.add_data(data)
filter = {"price": {"gte": 20}}
print(animals_controller.query_data(filter))
filter = {"name": "Max", "price": {"gte": 20, "lt": 30}}
print(animals_controller.query_data(filter))
animals_controller.delete_data({"name": "Max"})
print(animals_controller.query_data(filter))
books_controller = APIController("http://localhost/", "books", "books_controller.log")
filter = {"title": "Ancillary Justice"}
print(books_controller.query_data(filter))
if __name__ == '__main__':
main()
|
the-stack_0_2826 | import numpy as np
from lazy import lazy
from .cec2013lsgo import CEC2013LSGO
class F7(CEC2013LSGO):
"""
7-nonseparable, 1-separable Shifted and Rotated Elliptic Function
"""
def __init__(
self,
*,
rng_seed: int = 42,
use_shuffle: bool = False,
verbose: int = 0
):
super(F7, self).__init__(
rng_seed=rng_seed,
use_shuffle=use_shuffle,
verbose=verbose,
)
@property
def genome_size(self) -> np.ndarray:
return 1_000
@lazy
def lower_bound(self) -> np.ndarray:
lower_bound = [-100] * self.genome_size
return np.array(lower_bound)
@lazy
def upper_bound(self) -> np.ndarray:
upper_bound = [100] * self.genome_size
return np.array(upper_bound)
def _evaluate(self, x: np.ndarray) -> np.ndarray:
out_of_bounds = self.check_bounds(x)
out_of_bounds = np.any(out_of_bounds, axis=1)
x = x - self.xopt
fitness = 0
ldim = 0
for i in range(len(self.s)):
f: np.ndarray
z = x[:, self.p[ldim:ldim + self.s[i]] - 1].T
ldim += self.s[i]
if self.s[i] == 25:
f = self.R25
elif self.s[i] == 50:
f = self.R50
elif self.s[i] == 100:
f = self.R100
f = f @ z
f = self._schwefel(f.T)
fitness += self.w[i] * f
fitness += self._sphere(x[:, self.p[ldim:] - 1])
fitness[out_of_bounds] = None
return fitness
|
the-stack_0_2827 | #CODE3---First concatenating the required files into one based on the specfic attribute columns from SMPDB database and protein network---
#Python 3.6.5 |Anaconda, Inc.
import sys
import glob
import errno
import csv
path = '/home/16AT72P01/Excelra/SMPDB/smpdb_proteins/*.csv'
files = glob.glob(path)
with open("/home/16AT72P01/Excelra/SMPDB/output/metabolic_proteins.csv" ,'w') as csv_file:
writer = csv.writer(csv_file,quotechar='"', delimiter='\t', quoting=csv.QUOTE_ALL, skipinitialspace=True)
writer.writerow(["SMPDB_ID","PATHWAY_NAME","PATHWAY_LABEL","PROTEIN_NAME","GENE_NAME","LOCUS","UNIPROT_ID","GENEBANK_ID"])
for name in files:
try:
with open(name) as f1:
#reader = csv.reader(f1)
print(name)
reader = csv.DictReader(f1, quotechar='"', delimiter=',', quoting=csv.QUOTE_ALL, skipinitialspace=True)
print(reader)
for row in reader:
print(row)
writer.writerow([row["SMPDB ID"],row["Pathway Name"],row["Pathway Subject"],row["Protein Name"],row["Gene Name"],row["Locus"],row["Uniprot ID"],row["GenBank ID"]]) #writer.writerow([row[0],row[1],row[2],row[3],row[4],row[8],row[6]])
f1.close()
except IOError as exc:
if exc.errno != errno.EISDIR: # Do not fail if a directory is found, just ignore it.
raise # Propagate other kinds of IOError.
csv_file.close()
|
the-stack_0_2828 | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
from dataclasses import dataclass
import crypten
import torch
from ..util import ConfigBase
__all__ = [
"exp",
"log",
"reciprocal",
"inv_sqrt",
"sqrt",
"_eix",
"cossin",
"cos",
"sin",
"sigmoid",
"tanh",
"erf",
"softmax",
"log_softmax",
]
@dataclass
class ApproxConfig:
"""
A configuration object for use by the MPCTensor.
"""
# exponential function
exp_iterations: int = 8
# reciprocal configuration
reciprocal_method: str = "NR"
reciprocal_nr_iters: int = 10
reciprocal_log_iters: int = 1
reciprocal_all_pos: bool = False
reciprocal_initial: any = None
# sqrt configuration
sqrt_nr_iters: int = 3
sqrt_nr_initial: any = None
# sigmoid / tanh configuration
sigmoid_tanh_method: str = "reciprocal"
sigmoid_tanh_terms: int = 32
# log configuration
log_iterations: int = 2
log_exp_iterations: int = 8
log_order: int = 8
# trigonometry configuration
trig_iterations: int = 10
# error function configuration:
erf_iterations: int = 8
# Global config
config = ApproxConfig()
def set_config(new_config):
global config
config = new_config
class ConfigManager(ConfigBase):
r"""
Use this to temporarily change a value in the `approximations.config` object. The
following sets `config.exp_iterations` to `10` for one function
invocation and then sets it back to the previous value::
with ConfigManager("exp_iterations", 10):
tensor.exp()
"""
def __init__(self, *args):
super().__init__(config, *args)
# Iterative methods:
def exp(self):
"""Approximates the exponential function using a limit approximation:
.. math::
exp(x) = \lim_{n \\rightarrow \\infty} (1 + x / n) ^ n
Here we compute exp by choosing n = 2 ** d for some large d equal to
`iterations`. We then compute (1 + x / n) once and square `d` times.
Set the number of iterations for the limit approximation with
config.exp_iterations.
""" # noqa: W605
result = 1 + self.div(2 ** config.exp_iterations)
for _ in range(config.exp_iterations):
result = result.square()
return result
def log(self, input_in_01=False):
r"""
Approximates the natural logarithm using 8th order modified
Householder iterations. This approximation is accurate within 2% relative
error on [0.0001, 250].
Iterations are computed by: :math:`h = 1 - x * exp(-y_n)`
.. math::
y_{n+1} = y_n - \sum_k^{order}\frac{h^k}{k}
Args:
input_in_01 (bool) : Allows a user to indicate that the input is in the domain [0, 1],
causing the function optimize for this domain. This is useful for computing
log-probabilities for entropy functions.
We shift the domain of convergence by a constant :math:`a` using the following identity:
.. math::
\ln{u} = \ln {au} - \ln{a}
Since the domain of convergence for CrypTen's log() function is approximately [1e-4, 1e2],
we can set :math:`a=100`.
Configuration parameters:
iterations (int): number of Householder iterations for the approximation
exp_iterations (int): number of iterations for limit approximation of exp
order (int): number of polynomial terms used (order of Householder approx)
"""
if input_in_01:
return log(self.mul(100)) - 4.605170
# Initialization to a decent estimate (found by qualitative inspection):
# ln(x) = x/120 - 20exp(-2x - 1.0) + 3.0
iterations = config.log_iterations
exp_iterations = config.log_exp_iterations
order = config.log_order
term1 = self.div(120)
term2 = exp(self.mul(2).add(1.0).neg()).mul(20)
y = term1 - term2 + 3.0
# 8th order Householder iterations
with ConfigManager("exp_iterations", exp_iterations):
for _ in range(iterations):
h = 1 - self * exp(-y)
y -= h.polynomial([1 / (i + 1) for i in range(order)])
return y
def reciprocal(self, input_in_01=False):
"""
Args:
input_in_01 (bool) : Allows a user to indicate that the input is in the range [0, 1],
causing the function optimize for this range. This is useful for improving
the accuracy of functions on probabilities (e.g. entropy functions).
Methods:
'NR' : `Newton-Raphson`_ method computes the reciprocal using iterations
of :math:`x_{i+1} = (2x_i - self * x_i^2)` and uses
:math:`3*exp(1 - 2x) + 0.003` as an initial guess by default
'log' : Computes the reciprocal of the input from the observation that:
:math:`x^{-1} = exp(-log(x))`
Configuration params:
reciprocal_method (str): One of 'NR' or 'log'.
reciprocal_nr_iters (int): determines the number of Newton-Raphson iterations to run
for the `NR` method
reciprocal_log_iters (int): determines the number of Householder
iterations to run when computing logarithms for the `log` method
reciprocal_all_pos (bool): determines whether all elements of the
input are known to be positive, which optimizes the step of
computing the sign of the input.
reciprocal_initial (tensor): sets the initial value for the
Newton-Raphson method. By default, this will be set to :math:
`3*exp(-(x-.5)) + 0.003` as this allows the method to converge over
a fairly large domain
.. _Newton-Raphson:
https://en.wikipedia.org/wiki/Newton%27s_method
"""
if input_in_01:
with ConfigManager("reciprocal_all_pos", True):
rec = reciprocal(self.mul(64)).mul(64)
return rec
method = config.reciprocal_method
if not config.reciprocal_all_pos:
sgn = self.sign()
pos = sgn * self
with ConfigManager("reciprocal_all_pos", True):
return sgn * reciprocal(pos)
if method == "NR":
if config.reciprocal_initial is None:
# Initialization to a decent estimate (found by qualitative inspection):
# 1/x = 3exp(1 - 2x) + 0.003
result = 3 * (1 - 2 * self).exp() + 0.003
else:
result = config.reciprocal_initial
for _ in range(config.reciprocal_nr_iters):
if hasattr(result, "square"):
result += result - result.square().mul_(self)
else:
result = 2 * result - result * result * self
return result
elif method == "log":
with ConfigManager("log_iters", config.reciprocal_log_iters):
return exp(-log(self))
else:
raise ValueError(f"Invalid method {method} given for reciprocal function")
def inv_sqrt(self):
"""
Computes the inverse square root of the input using the Newton-Raphson method.
Configuration params:
sqrt_nr_iters (int): determines the number of Newton-Raphson iterations to run.
sqrt_nr_initial (tensor): sets the initial value for the Newton-Raphson iterations.
By default, this will be set to allow the method to converge over a
fairly large domain.
.. _Newton-Raphson:
https://en.wikipedia.org/wiki/Fast_inverse_square_root#Newton's_method
"""
# Initialize using decent approximation
if config.sqrt_nr_initial is None:
y = exp(self.div(2).add(0.2).neg()).mul(2.2).add(0.2)
y -= self.div(1024)
else:
y = config.sqrt_nr_initial
# Newton Raphson iterations for inverse square root
for _ in range(config.sqrt_nr_iters):
y = y.mul_(3 - self * y.square()).div_(2)
return y
def sqrt(self):
"""
Computes the square root of the input by computing its inverse square root using
the Newton-Raphson method and multiplying by the input.
Configuration params:
sqrt_nr_iters (int): determines the number of Newton-Raphson iterations to run
sqrt_initial (tensor): sets the initial value for the inverse square root
Newton-Raphson iterations. By default, this will be set to allow convergence
over a fairly large domain.
.. _Newton-Raphson:
https://en.wikipedia.org/wiki/Fast_inverse_square_root#Newton's_method
"""
return inv_sqrt(self).mul_(self)
def _eix(self):
"""Computes e^(i * self) where i is the imaginary unit.
Returns (Re{e^(i * self)}, Im{e^(i * self)} = cos(self), sin(self)
"""
iterations = config.trig_iterations
re = 1
im = self.div(2 ** iterations)
# First iteration uses knowledge that `re` is public and = 1
re -= im.square()
im *= 2
# Compute (a + bi)^2 -> (a^2 - b^2) + (2ab)i `iterations` times
for _ in range(iterations - 1):
a2 = re.square()
b2 = im.square()
im = im.mul_(re)
im._tensor *= 2
re = a2 - b2
return re, im
def cossin(self):
"""Computes cosine and sine of input via exp(i * x).
Args:
iterations (int): for approximating exp(i * x)
"""
return self._eix()
def cos(self):
"""Computes the cosine of the input using cos(x) = Re{exp(i * x)}
Args:
iterations (int): for approximating exp(i * x)
"""
return cossin(self)[0]
def sin(self):
"""Computes the sine of the input using sin(x) = Im{exp(i * x)}
Args:
iterations (int): for approximating exp(i * x)
"""
return cossin(self)[1]
# Logistic Functions
def sigmoid(self):
"""Computes the sigmoid function using the following definition
.. math::
\sigma(x) = (1 + e^{-x})^{-1}
If a valid method is given, this function will compute sigmoid
using that method:
"chebyshev" - computes tanh via Chebyshev approximation with
truncation and uses the identity:
.. math::
\sigma(x) = \frac{1}{2}tanh(\frac{x}{2}) + \frac{1}{2}
"reciprocal" - computes sigmoid using :math:`1 + e^{-x}` and computing
the reciprocal
""" # noqa: W605
method = config.sigmoid_tanh_method
if method == "chebyshev":
tanh_approx = tanh(self.div(2))
return tanh_approx.div(2) + 0.5
elif method == "reciprocal":
ltz = self._ltz()
sign = 1 - 2 * ltz
pos_input = self.mul(sign)
denominator = pos_input.neg().exp().add(1)
with ConfigManager(
"exp_iterations",
9,
"reciprocal_nr_iters",
3,
"reciprocal_all_pos",
True,
"reciprocal_initial",
0.75,
):
pos_output = denominator.reciprocal()
result = pos_output.where(1 - ltz, 1 - pos_output)
# TODO: Support addition with different encoder scales
# result = pos_output + ltz - 2 * pos_output * ltz
return result
else:
raise ValueError(f"Unrecognized method {method} for sigmoid")
def tanh(self):
r"""Computes the hyperbolic tangent function using the identity
.. math::
tanh(x) = 2\sigma(2x) - 1
If a valid method is given, this function will compute tanh using that method:
"chebyshev" - computes tanh via Chebyshev approximation with truncation.
.. math::
tanh(x) = \sum_{j=1}^terms c_{2j - 1} P_{2j - 1} (x / maxval)
where c_i is the ith Chebyshev series coefficient and P_i is ith polynomial.
The approximation is truncated to +/-1 outside [-1, 1].
Args:
terms (int): highest degree of Chebyshev polynomials.
Must be even and at least 6.
"""
method = config.sigmoid_tanh_method
terms = config.sigmoid_tanh_terms
if method == "reciprocal":
return self.mul(2).sigmoid().mul(2).sub(1)
elif method == "chebyshev":
coeffs = crypten.common.util.chebyshev_series(torch.tanh, 1, terms)[1::2]
tanh_polys = _chebyshev_polynomials(self, terms)
tanh_polys_flipped = (
tanh_polys.unsqueeze(dim=-1).transpose(0, -1).squeeze(dim=0)
)
out = tanh_polys_flipped.matmul(coeffs)
# truncate outside [-maxval, maxval]
return out.hardtanh()
else:
raise ValueError(f"Unrecognized method {method} for tanh")
def _chebyshev_polynomials(self, terms):
r"""Evaluates odd degree Chebyshev polynomials at x
Chebyshev Polynomials of the first kind are defined as
.. math::
P_0(x) = 1, P_1(x) = x, P_n(x) = 2 P_{n - 1}(x) - P_{n-2}(x)
Args:
self (MPCTensor): input at which polynomials are evaluated
terms (int): highest degree of Chebyshev polynomials.
Must be even and at least 6.
Returns:
MPCTensor of polynomials evaluated at self of shape `(terms, *self)`
"""
if terms % 2 != 0 or terms < 6:
raise ValueError("Chebyshev terms must be even and >= 6")
polynomials = [self.clone()]
y = 4 * self.square() - 2
z = y - 1
polynomials.append(z.mul(self))
for k in range(2, terms // 2):
next_polynomial = y * polynomials[k - 1] - polynomials[k - 2]
polynomials.append(next_polynomial)
return crypten.stack(polynomials)
def erf(tensor):
"""
Approximates the error function of the input tensor using a Taylor approximation.
"""
output = tensor.clone()
for n in range(1, config.erf_iterations + 1):
multiplier = ((-1) ** n) / (math.factorial(n) * (2 * n + 1))
output = output.add(tensor.pos_pow(2 * n + 1).mul(multiplier))
return output.mul(2.0 / math.sqrt(math.pi))
# NOTE: This approximation is not unstable for large tensor values.
def softmax(self, dim, **kwargs):
"""Compute the softmax of a tensor's elements along a given dimension"""
# 0-d case
if self.dim() == 0:
assert dim == 0, "Improper dim argument"
return self.new(torch.ones_like((self.data)))
if self.size(dim) == 1:
return self.new(torch.ones_like(self.data))
maximum_value = self.max(dim, keepdim=True)[0]
logits = self - maximum_value
numerator = logits.exp()
with ConfigManager("reciprocal_all_pos", True):
inv_denominator = numerator.sum(dim, keepdim=True).reciprocal()
return numerator * inv_denominator
def log_softmax(self, dim, **kwargs):
"""Applies a softmax followed by a logarithm.
While mathematically equivalent to log(softmax(x)), doing these two
operations separately is slower, and numerically unstable. This function
uses an alternative formulation to compute the output and gradient correctly.
"""
# 0-d case
if self.dim() == 0:
assert dim == 0, "Improper dim argument"
return self.new(torch.zeros((), device=self.device))
if self.size(dim) == 1:
return self.new(torch.zeros_like(self.data))
maximum_value = self.max(dim, keepdim=True)[0]
logits = self - maximum_value
normalize_term = exp(logits).sum(dim, keepdim=True)
result = logits - normalize_term.log()
return result
|
the-stack_0_2831 | #!/usr/bin/python
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
import sys
import re
import os
import shutil
import subprocess
"""Copy Special exercise
"""
# +++your code here+++
# Write functions and modify main() to call them
def is_special(filename):
return True if re.search(r'__\w+__', filename) else False
def sp_files_list(dir):
filenames = os.listdir(dir)
result = []
for filename in filenames:
if is_special(filename):
result.append(os.path.abspath(os.path.join(dir, filename)))
return result
def to_dir(orig, dst):
# copy all the special files located in the directories in "orig"
#to the "dst" directory
if not os.path.exists(dst): os.makedirs(dst)
for dir in orig:
filenames = os.listdir(dir)
for filename in filenames:
if is_special(filename):
shutil.copy(os.path.abspath(os.path.join(dir, filename)), dst)
def to_zip(zip_file, args):
files = []
for arg in args:
files += sp_files_list(arg)
cmd = '7z a ' + zip_file + ' ' + ' '.join(files)
print('Command I\'m about to do: ' + cmd)
try:
output = subprocess.check_output(cmd, stderr=subprocess.STDOUT,
shell=True)
except subprocess.CalledProcessError as exc:
print("Error: ", exc.returncode, exc.output)
else:
print(output)
def main():
# This basic command line argument parsing code is provided.
# Add code to call your functions below.
# Make a list of command line arguments, omitting the [0] element
# which is the script itself.
args = sys.argv[1:]
if not args:
print("usage: [--todir dir][--tozip zipfile] dir [dir ...]")
sys.exit(1)
# todir and tozip are either set from command line
# or left as the empty string.
# The args array is left just containing the dirs.
todir = ''
if args[0] == '--todir':
todir = args[1]
del args[0:2]
to_dir(args, todir)
tozip = ''
if args[0] == '--tozip':
tozip = args[1]
del args[0:2]
to_zip(tozip, args)
if len(args) == 0:
print("error: must specify one or more dirs")
sys.exit(1)
if not todir and not tozip:
for arg in args:
print('\n'.join(sp_files_list(arg)))
if __name__ == "__main__":
main()
|
the-stack_0_2833 | import asyncio
import logging
import os
from watchdog.events import FileModifiedEvent, PatternMatchingEventHandler
from watchdog.observers import Observer
from watchdog.utils.patterns import match_any_paths
class WatcherHandler(PatternMatchingEventHandler):
"""Watcher class to observe changes in all specified files in the folder"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.observed = {}
def match_file(self, path):
"""Check if the path matches the patterns and folder"""
return match_any_paths(
[path],
included_patterns=self.patterns,
excluded_patterns=self.ignore_patterns,
case_sensitive=self.case_sensitive,
)
def get_size(self, path):
return self.observed.get(path, 0)
def set_size(self, path, size):
self.observed[path] = size
def read_initial_size(self, path):
"""Read the initial size of the file to not send the entire file on start"""
if os.path.isfile(path):
if self.match_file(path):
self.observed[path] = os.path.getsize(path)
return
for dirname, _, files in os.walk(path):
for file in files:
path = os.path.join(dirname, file)
if self.match_file(path):
self.observed[path] = os.path.getsize(path)
def on_new_line(self, path, line):
"""Send the line to the logging"""
logging.getLogger(path).info(line)
def on_modified(self, event):
"""React on modified files and append the new lines"""
if not isinstance(event, FileModifiedEvent):
return
size = os.path.getsize(event.src_path)
# Get the already observed lines
current = self.get_size(event.src_path)
if current >= size:
self.set_size(event.src_path, size)
return
# Open the file and seek to the last position
with open(event.src_path) as fp:
fp.seek(current)
# Read line by line and only use full lines
for line in fp:
stripped = line.strip()
if line.endswith("\n") and stripped:
current += len(line)
self.on_new_line(event.src_path, stripped)
# Update the position
self.set_size(event.src_path, current)
async def watch(path, **kwargs):
"""Watch on files of in a directory and log new lines"""
handler = WatcherHandler(**kwargs)
handler.read_initial_size(path)
observer = Observer()
observer.schedule(handler, path=path, recursive=True)
observer.start()
try:
while observer.is_alive():
await asyncio.sleep(0.1)
finally:
observer.stop()
observer.join()
|
the-stack_0_2834 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# (c) 2016, Tomoyuki Sakurai <[email protected]>
#
# This file is NOT part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
import time
import os
import hashlib
DOCUMENTATION = '''
---
module: logrotate
short_description: Manage config files for logrotate
description:
- Creates a config flie for I(logrotate)
version_added: "1.0"
options:
name:
description:
- Unique name of the config
required: true
default: null
files:
description:
- An array of path to files the I(logrotate) program to rotate
required: true
default: null
state:
description:
- The state of the logrotate config
required: true
default: null
choices: [ "present", "absent" ]
frequency:
description:
- rotate frequency
required: false
choices: [ "daily", "weekly", "monthly", "yearly" ]
default: "daily
rotate:
description:
- number of times before being removed
required: false
default: 30
files:
description:
- an array of paths to files to rotate
required: true
default: null
compress:
description:
- compress the rotated file if true
required: false
choices: [ "yes", "no" ]
default: true
compresscmd:
description:
- command to use to compress log files
required: false
default: False
uncompresscmd:
description:
- command to use to uncompress log files
required: false
default: False
compressext
description:
- extension to use on compressed logfiles, if compression is enabled
required: false
default: False
delaycompress:
description:
- delay compress
required: false
choices: [ "yes", "no" ]
default: true
copytruncate:
description:
- Truncate the original log file to zero size in place after creating a copy, instead of moving the old log file and optionally creating a new one.
required: false
choices: [ "yes", "no" ]
default: false
missingok:
description:
- proceed without a warning if the file to rotate is missing
required: false
choices: [ "yes", "no" ]
default: true
sharedscripts:
description:
- postrotate commands for multiple files are run only once
required: false
choices: [ "yes", "no" ]
default: false
notifempty:
description:
- do not rotate the log if it is empty
required: false
choices: [ "yes", "no" ]
default: no
postrotate:
description:
- an array of commands to run in postrotate
required: false
default: null
config_dir:
description:
- base directory of config files
required: false
default: /etc/logrotate.d
create:
description:
- Immediately after rotation (before the postrotate script is run) the log file is created
required: false
default: False
nocreate:
description:
- disable 'create' option
required: false
default: False
su:
description:
- Rotate log files set under this user and group instead of using default user/group
required: false
default: False
maxsize:
description:
- Log files are rotated when they grow bigger than size bytes even before the additionally specified time interval
required: false
default: False
minsize:
description:
- Log files are rotated when they grow bigger than size bytes, but not before the additionally specified time interval
required: false
default: False
size:
description:
- Log files are rotated only if they grow bigger then size bytes
required: false
default: False
requirements: [ ]
author: "Tomoyuki Sakurai <[email protected]>"
'''
EXAMPLES = '''
# lotate /var/log/messages and maillog daily, keep 30 files and restart syslog only once
- logrotate: frequency="daily", rotate="30", files=[ "/var/log/messages", "/bar/log/maillog" ] postrotate="kill -HUP `cat /var/run/syslog.pid`" sharedscripts=yes
'''
def validate_config(module):
"""Validate a file given with logrotate -d file"""
name = module.params.get('name')
contents = generate_config(module)
fd, temppath = tempfile.mkstemp(prefix='ansible-logrotate')
fh = os.fdopen(fd, 'w')
fh.write(contents)
fh.close()
LOGROTATE = module.get_bin_path('logrotate', True)
# read not only the file to validate but the default configuration because
# some defaults are needed to validate, notably `su` directive
default_config_path = get_default_config_path(module)
rc, out, err = module.run_command('%s -d %s %s' % (LOGROTATE, default_config_path, temppath), check_rc=True)
os.unlink(temppath)
if rc != 0:
module.fail_json(msg='failed to validate config for: %s' % (name), stdout=out, stderr=err)
def get_default_config_path(module):
"""Look for the default configuration and return the first one found"""
locations = [
# Linux
'/etc/logrotate.conf',
# FreeBSD
'/usr/local/etc/logrotate.conf'
]
found = ''
for path in locations:
if os.path.exists(path):
found = path
break
if not found:
module.fail_json(msg='cannot find logrotate.conf in default locations')
return found
def get_config_path(module):
return os.path.join(module.params.get('config_dir'), module.params.get('name'))
def create_config(module):
with open(get_config_path(module), 'w') as f:
f.write(generate_config(module))
def generate_config(module):
files = "\n".join(module.params.get('files'))
options = []
if module.params.get('compress'):
options += [ 'compress' ]
if module.params.get('compresscmd'):
options += [ 'compresscmd %s' % module.params.get('compresscmd') ]
if module.params.get('uncompresscmd'):
options += [ 'uncompresscmd %s' % module.params.get('uncompresscmd') ]
if module.params.get('compressext'):
options += [ 'compressext %s' % module.params.get('compressext') ]
if module.params.get('delaycompress'):
options += [ 'delaycompress' ]
if module.params.get('missingok'):
options += [ 'missingok' ]
if module.params.get('notifempty'):
options += [ 'notifempty' ]
if module.params.get('copytruncate'):
options += [ 'copytruncate' ]
if module.params.get('create'):
options += [ 'create %s' % module.params.get('create') ]
if module.params.get('nocreate'):
options += [ 'nocreate' ]
if module.params.get('su'):
options += [ 'su %s' % module.params.get('su') ]
if module.params.get('maxsize'):
options += [ 'maxsize %s' % module.params.get('maxsize') ]
if module.params.get('minsize'):
options += [ 'minsize %s' % module.params.get('minsize') ]
if module.params.get('size'):
options += [ 'size %s' % module.params.get('size') ]
options += [ '%s' % module.params.get('frequency') ]
options += [ 'rotate %s' % module.params.get('rotate') ]
if module.params.get('postrotate'):
if module.params.get('sharedscripts'):
options += [ 'sharedscripts' ]
options += [ 'postrotate' ]
options += map(lambda x: " %s" % x, module.params.get('postrotate'))
options += [ 'endscript' ]
TEMPLATE = """\
# Generated by ansible logrotate module
{files_text}
{{
{option_text}
}}
"""
return TEMPLATE.format(files_text=files, option_text='\n '.join(options))
def is_identical(a, b):
a_hash = hashlib.sha1(a.encode('utf-8')).hexdigest()
b_hash = hashlib.sha1(b.encode('utf-8')).hexdigest()
return a_hash == b_hash
def create_if_absent(module):
# XXX disable validation. recent logrotate fails when duplicate log entry
# for a log file is found.
# validate_config(module)
path = get_config_path(module)
if os.path.isfile(path):
data = None
with open(path) as f:
data = f.read()
if is_identical(data, generate_config(module)):
module.exit_json(changed=False, result="Success")
else:
create_config(module)
module.exit_json(changed=True, result="Created")
else:
create_config(module)
module.exit_json(changed=True, result="Created")
def remove_if_present(module):
path = get_config_path(module)
if os.path.isfile(path):
os.remove(path)
module.exit_json(changed=True, result="Removed")
else:
module.exit_json(changed=False, result="Success")
def main():
arg_spec = dict(
name = dict(required=True),
files = dict(required=True, type='list'),
state = dict(required=True, choices=['present', 'absent']),
frequency = dict(required=False, default='daily', choices=['daily', 'weekly', 'monthly', 'yearly']),
rotate = dict(required=False, default=30, type='int'),
compress = dict(required=False, default='yes', type='bool'),
compresscmd = dict(required=False),
uncompresscmd = dict(required=False),
compressext = dict(required=False),
copytruncate = dict(required=False, default='no', type='bool'),
delaycompress = dict(required=False, default='yes', type='bool'),
missingok = dict(required=False, default='yes', type='bool'),
sharedscripts = dict(required=False, default='yes', type='bool'),
notifempty = dict(required=False, default='no', type='bool'),
postrotate = dict(required=False, type='list'),
config_dir = dict(required=False, default='/etc/logrotate.d'),
create = dict(required=False),
nocreate = dict(required=False, type='bool'),
su = dict(required=False),
maxsize = dict(required=False),
minsize = dict(required=False),
size = dict(required=False)
)
module = AnsibleModule(argument_spec=arg_spec, supports_check_mode=False)
if module.check_mode:
module.exit_json(changed=True)
else:
if module.params.get('state') == 'present':
create_if_absent(module)
elif module.params.get('state') == 'absent':
remove_if_present(module)
else:
module.fail_json('Unknown state: %s' % mudule.params.get('state'))
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
|
the-stack_0_2835 | """
Author: Ce Li
Tool for generator
"""
import copy
import math
import numpy as np
from tensorflow.keras import utils as np_utils
EPSILON = 1e-7
class Generator(np_utils.Sequence):
def __init__(self, x, x_authors, y, b_size, max_papers, max_seq, max_authors):
self.x, self.x_authors, self.y = x, x_authors, y
self.batch_size = b_size
self.max_papers = max_papers
self.max_seq = max_seq
self.max_authors = max_authors
self.author_emb_dim = 128
self.paper_emb_dim = 256
def __len__(self):
return math.ceil(len(self.x)/self.batch_size) # ceil or floor
def __getitem__(self, idx):
b_x = copy.deepcopy(
self.x[idx*self.batch_size:(idx+1)*self.batch_size])
b_x_authors = copy.deepcopy(
self.x_authors[idx * self.batch_size:(idx + 1) * self.batch_size])
b_y = copy.deepcopy(self.y[idx*self.batch_size:(idx+1)*self.batch_size])
for temp in b_x_authors:
for tem in temp:
for te in tem:
while len(te) < self.max_authors:
te.append(np.zeros(self.author_emb_dim))
while len(tem) < self.max_seq:
tem.append(np.zeros(shape=(self.max_authors, self.author_emb_dim)))
while len(temp) < self.max_papers:
temp.append(np.zeros(shape=(self.max_seq, self.max_authors, self.author_emb_dim)))
b_x_authors = np.array(b_x_authors)
for temp in b_x:
for tem in temp:
while len(tem) < self.max_seq:
tem.append(np.zeros(tem[0].shape))
while len(temp) < self.max_papers:
temp.append(np.zeros(shape=(self.max_seq, self.paper_emb_dim)))
b_x = np.array(b_x)
return (b_x, b_x_authors), np.array(b_y)
|
the-stack_0_2836 | # This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Reference circuits used by the tests."""
from qiskit.circuit import QuantumCircuit, QuantumRegister, ClassicalRegister
class ReferenceCircuits:
"""Container for reference circuits used by the tests."""
@staticmethod
def bell():
"""Return a Bell circuit."""
qr = QuantumRegister(2, name='qr')
cr = ClassicalRegister(2, name='qc')
qc = QuantumCircuit(qr, cr, name='bell')
qc.h(qr[0])
qc.cx(qr[0], qr[1])
qc.measure(qr, cr)
return qc
@staticmethod
def bell_no_measure():
"""Return a Bell circuit."""
qr = QuantumRegister(2, name='qr')
qc = QuantumCircuit(qr, name='bell_no_measure')
qc.h(qr[0])
qc.cx(qr[0], qr[1])
return qc
|
the-stack_0_2837 | from argparse import Namespace
import asyncio
import logging
import signal
import sys
from typing import Type
from evm.chains.mainnet import (
MAINNET_NETWORK_ID,
)
from evm.chains.ropsten import (
ROPSTEN_NETWORK_ID,
)
from evm.db.backends.base import BaseDB
from evm.db.backends.level import LevelDB
from p2p.service import BaseService
from trinity.exceptions import (
AmbigiousFileSystem,
MissingPath,
)
from trinity.chains import (
initialize_data_dir,
is_data_dir_initialized,
serve_chaindb,
)
from trinity.console import (
console,
)
from trinity.cli_parser import (
parser,
)
from trinity.config import (
ChainConfig,
)
from trinity.extensibility import (
PluginManager,
)
from trinity.extensibility.events import (
TrinityStartupEvent
)
from trinity.plugins.registry import (
ENABLED_PLUGINS
)
from trinity.utils.ipc import (
wait_for_ipc,
kill_process_gracefully,
)
from trinity.utils.logging import (
setup_trinity_stdout_logging,
setup_trinity_file_and_queue_logging,
with_queued_logging,
)
from trinity.utils.mp import (
ctx,
)
from trinity.utils.profiling import (
setup_cprofiler,
)
from trinity.utils.version import (
construct_trinity_client_identifier,
)
PRECONFIGURED_NETWORKS = {MAINNET_NETWORK_ID, ROPSTEN_NETWORK_ID}
TRINITY_HEADER = (
"\n"
" ______ _ _ __ \n"
" /_ __/____(_)___ (_) /___ __\n"
" / / / ___/ / __ \/ / __/ / / /\n"
" / / / / / / / / / / /_/ /_/ / \n"
" /_/ /_/ /_/_/ /_/_/\__/\__, / \n"
" /____/ "
)
TRINITY_AMBIGIOUS_FILESYSTEM_INFO = (
"Could not initialize data directory\n\n"
" One of these conditions must be met:\n"
" * HOME environment variable set\n"
" * XDG_TRINITY_ROOT environment variable set\n"
" * TRINITY_DATA_DIR environment variable set\n"
" * --data-dir command line argument is passed\n"
"\n"
" In case the data directory is outside of the trinity root directory\n"
" Make sure all paths are pre-initialized as Trinity won't attempt\n"
" to create directories outside of the trinity root directory\n"
)
def main() -> None:
plugin_manager = setup_plugins()
plugin_manager.amend_argparser_config(parser)
args = parser.parse_args()
log_level = getattr(logging, args.log_level.upper())
if args.network_id not in PRECONFIGURED_NETWORKS:
raise NotImplementedError(
"Unsupported network id: {0}. Only the ropsten and mainnet "
"networks are supported.".format(args.network_id)
)
logger, formatter, handler_stream = setup_trinity_stdout_logging(log_level)
try:
chain_config = ChainConfig.from_parser_args(args)
except AmbigiousFileSystem:
exit_because_ambigious_filesystem(logger)
if not is_data_dir_initialized(chain_config):
# TODO: this will only work as is for chains with known genesis
# parameters. Need to flesh out how genesis parameters for custom
# chains are defined and passed around.
try:
initialize_data_dir(chain_config)
except AmbigiousFileSystem:
exit_because_ambigious_filesystem(logger)
except MissingPath as e:
msg = (
"\n"
"It appears that {} does not exist.\n"
"Trinity does not attempt to create directories outside of its root path\n"
"Either manually create the path or ensure you are using a data directory\n"
"inside the XDG_TRINITY_ROOT path"
).format(e.path)
logger.error(msg)
sys.exit(1)
logger, log_queue, listener = setup_trinity_file_and_queue_logging(
logger,
formatter,
handler_stream,
chain_config,
log_level
)
display_launch_logs(chain_config)
# if console command, run the trinity CLI
if args.subcommand == 'attach':
run_console(chain_config, not args.vanilla_shell)
sys.exit(0)
# start the listener thread to handle logs produced by other processes in
# the local logger.
listener.start()
extra_kwargs = {
'log_queue': log_queue,
'log_level': log_level,
'profile': args.profile,
}
# First initialize the database process.
database_server_process = ctx.Process(
target=run_database_process,
args=(
chain_config,
LevelDB,
),
kwargs=extra_kwargs,
)
networking_process = ctx.Process(
target=launch_node,
args=(args, chain_config, ),
kwargs=extra_kwargs,
)
# start the processes
database_server_process.start()
logger.info("Started DB server process (pid=%d)", database_server_process.pid)
wait_for_ipc(chain_config.database_ipc_path)
networking_process.start()
logger.info("Started networking process (pid=%d)", networking_process.pid)
try:
if args.subcommand == 'console':
run_console(chain_config, not args.vanilla_shell)
else:
networking_process.join()
except KeyboardInterrupt:
# When a user hits Ctrl+C in the terminal, the SIGINT is sent to all processes in the
# foreground *process group*, so both our networking and database processes will terminate
# at the same time and not sequentially as we'd like. That shouldn't be a problem but if
# we keep getting unhandled BrokenPipeErrors/ConnectionResetErrors like reported in
# https://github.com/ethereum/py-evm/issues/827, we might want to change the networking
# process' signal handler to wait until the DB process has terminated before doing its
# thing.
# Notice that we still need the kill_process_gracefully() calls here, for when the user
# simply uses 'kill' to send a signal to the main process, but also because they will
# perform a non-gracefull shutdown if the process takes too long to terminate.
logger.info('Keyboard Interrupt: Stopping')
kill_process_gracefully(database_server_process, logger)
logger.info('DB server process (pid=%d) terminated', database_server_process.pid)
# XXX: This short sleep here seems to avoid us hitting a deadlock when attempting to
# join() the networking subprocess: https://github.com/ethereum/py-evm/issues/940
import time; time.sleep(0.2) # noqa: E702
kill_process_gracefully(networking_process, logger)
logger.info('Networking process (pid=%d) terminated', networking_process.pid)
def run_console(chain_config: ChainConfig, vanilla_shell_args: bool) -> None:
logger = logging.getLogger("trinity")
try:
console(chain_config.jsonrpc_ipc_path, use_ipython=vanilla_shell_args)
except FileNotFoundError as err:
logger.error(str(err))
sys.exit(1)
@setup_cprofiler('run_database_process')
@with_queued_logging
def run_database_process(chain_config: ChainConfig, db_class: Type[BaseDB]) -> None:
base_db = db_class(db_path=chain_config.database_dir)
serve_chaindb(chain_config, base_db)
def exit_because_ambigious_filesystem(logger: logging.Logger) -> None:
logger.error(TRINITY_AMBIGIOUS_FILESYSTEM_INFO)
sys.exit(1)
async def exit_on_signal(service_to_exit: BaseService) -> None:
loop = asyncio.get_event_loop()
sigint_received = asyncio.Event()
for sig in [signal.SIGINT, signal.SIGTERM]:
# TODO also support Windows
loop.add_signal_handler(sig, sigint_received.set)
await sigint_received.wait()
try:
await service_to_exit.cancel()
finally:
loop.stop()
@setup_cprofiler('launch_node')
@with_queued_logging
def launch_node(args: Namespace, chain_config: ChainConfig) -> None:
NodeClass = chain_config.node_class
# Temporary hack: We setup a second instance of the PluginManager.
# The first instance was only to configure the ArgumentParser whereas
# for now, the second instance that lives inside the networking process
# performs the bulk of the work. In the future, the PluginManager
# should probably live in its own process and manage whether plugins
# run in the shared plugin process or spawn their own.
plugin_manager = setup_plugins()
plugin_manager.broadcast(TrinityStartupEvent(
args,
chain_config
))
node = NodeClass(plugin_manager, chain_config)
run_service_until_quit(node)
def display_launch_logs(chain_config: ChainConfig) -> None:
logger = logging.getLogger('trinity')
logger.info(TRINITY_HEADER)
logger.info(construct_trinity_client_identifier())
logger.info("Trinity DEBUG log file is created at %s", str(chain_config.logfile_path))
def run_service_until_quit(service: BaseService) -> None:
loop = asyncio.get_event_loop()
asyncio.ensure_future(exit_on_signal(service))
asyncio.ensure_future(service.run())
loop.run_forever()
loop.close()
def setup_plugins() -> PluginManager:
plugin_manager = PluginManager()
# TODO: Implement auto-discovery of plugins based on some convention/configuration scheme
plugin_manager.register(ENABLED_PLUGINS)
return plugin_manager
|
the-stack_0_2838 | # Princeton University licenses this file to You under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. You may obtain a copy of the License at:
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
# ********************************************* AutodiffComposition *************************************************
"""
.. _AutodiffComposition_Overview:
Overview
--------
AutodiffComposition is a subclass of `Composition <Composition>` that trains models more quickly by integrating with
`PyTorch <https://pytorch.org/>`_, a popular machine learning library. In situations with training,
AutodiffComposition is used similarly to a Composition, but is much faster.
The `xor_in_psyneulink_and_pytorch.py` script (in the Scripts folder of the PsyNeuLink source code) is an example of
how to use AutodiffComposition. The script also gives a comparison of runtimes.
.. _AutodiffComposition_Creation:
Creating an AutodiffComposition
-------------------------------
An AutodiffComposition can be created by calling the constructor, and then adding `Components <Component>` using the
add methods of its parent class `Composition`. The most unusual argument in initialization is
**param_init_from_pnl**, which controls how parameters are set up for the internal PyTorch representation of the model.
If set to True:
* Only weight parameters that correspond to projections are created. No trainable bias parameters are created, as they
don’t exist for the autodiff composition’s mechanisms.
* The weight parameters are initialized to be perfectly identical to the autodiff composition’s projections - the
tensor of the parameter object corresponding to a particular projection not only has the same dimensionality as
the projection’s matrix, it has the same exact values.
* Pytorch functions representing mechanism functions incorporate their scalar, untrainable biases.
If set to False:
* Both weight parameters corresponding to projections and trainable bias parameters for mechanisms are created.
* Weight parameters have the same dimensionality as their corresponding projections. However, their values - and those
of the bias parameters - are sampled from a random distribution.
* Though trainable biases now exist, Pytorch functions representing mechanism functions still incorporate their scalar,
untrainable biases.
.. warning:: Do not add or remove Mechanisms or Projections to an AutodiffComposition after it has been run for the
first time. Unlike an ordinary Composition, AutodiffComposition does not support this functionality.
Two other initialization arguments are **patience** and **min_delta**, allow the model to halt training early. The
model tracks how many consecutive 'bad' epochs of training have failed to significantly reduce the model's loss. Once
this number exceeds **patience**, the model stops training. By default, **patience** is ``None``, and the model
will train for the number of specified epochs and will not stop training early.
**min_delta** defines what threshold counts as a significant reduction in model loss. By default it is zero, in which
case any reduction in loss counts as a significant reduction. If **min_delta** is large and positive, the model tends to
stop earlier because it views fewer epochs as 'good'.
**learning_rate** specifies the learning rate for this run (default 0.001), which is passed to the **optimizer**
argument. **optimizer** specifies the kind of optimizer used in training. The current options are 'sgd' (the default)
or 'adam'.
**learning_enabled** specifies whether the AutodiffComposition should learn, and it defaults to True. When True, the
AutodiffComposition trains using PyTorch, as normal. When False, the AutodiffComposition acts like an ordinary
Composition, which does not change weights. `learning_enabled <AutodiffComposition.learning_enabled>` is also an
attribute, which can be toggled between runs.
**optimizer_type** specifies the kind of optimizer used in training. The current options are 'sgd' (which is the
default) or 'adam'.
**weight_decay** specifies the L2 penalty (which discourages large weights) used by the optimizer. This defaults to 0.
**loss_spec** specifies the loss function for training. It can be a string or a PyTorch loss function. The current
options for strings are 'mse' (the default), 'crossentropy', 'l1', 'nll', 'poissonnll', and 'kldiv'. These refer to
Mean Squared Error, Cross Entropy, L1 loss, Negative Log Likelihood loss, Poisson Negative Log Likelihood, and KL
Divergence respectively. The **loss_spec** can also be any PyTorch loss function, including a custom-written one. For a
list of PyTorch loss functions, see https://pytorch.org/docs/stable/nn.html#loss-functions. For information on writing
a custom loss function, see https://pytorch.org/docs/master/notes/extending.html and
https://discuss.pytorch.org/t/build-your-own-loss-function-in-pytorch/235
**randomize** specifies whether the order of inputs will be randomized in each epoch. (In each epoch, all inputs are
run, but if **randomize** is True then the order in which inputs are within an epoch is random.)
**refresh_losses** specifies whether the `losses` attribute is refreshed for each call to `run()`. If False, the losses
of each run are appended to the `losses` attribute. If True, the losses of each run overwrite `losses` instead.
**force_no_retain_graph** defaults to False. If True, the AutodiffComposition does not use the `retain_graph` option
when computing PyTorch gradient. This can reduce memory usage. However, it breaks recurrent networks, so it should only
be used when the network is not recurrent.
.. note::
The AutodiffComposition detachs all gradients between epochs of training. For more information on why this is done,
see `here <bit.ly/2t2ZkyR>` or `here <bit.ly/2RGuMNg>`.
.. _AutodiffComposition_Structure:
Structure
---------
AutodiffComposition has all the attributes of its parent class `Composition`, in addition to several more.
The `target_CIM <AutodiffComposition.target_CIM>` attribute is analogous to the `input_CIM <Composition.input_CIM>` of
any Composition, but instead of providing inputs, provides targets for the AutodiffComposition.
The `pytorch_representation <AutodiffComposition.pytorch_representation>` attribute holds the PyTorch representation
of the PsyNeuLink model that AutodiffComposition contains.
The `losses <AutodiffComposition.losses>` attribute tracks the average loss for each training epoch.
As mentioned above, the `learning_enabled <AutodiffComposition.learning_enabled>` attribute can be toggled to determine
whether the AutodiffComposition learns or whether it executes like an ordinary Composition.
The `optimizer <AutodiffComposition.optimizer>` attribute contains the PyTorch optimizer function used for learning. It
is determined at initialization by the **optimizer_type**, **learning_rate**, and **weight_decay** arguments.
The `loss <AutodiffComposition.loss>` attribute contains the PyTorch loss function used for learning. It is determined
at initialization by the **loss_spec** argument.
.. _AutodiffComposition_Execution:
Execution
---------
Most arguments to AutodiffComposition's `run` or `execute` methods are the same as in a Composition. When
`learning_enabled <AutodiffComposition.learning_enabled>` is False, the arguments are the same, since in this
case the AutodiffComposition executes like a Composition.
However, if `learning_enabled <AutodiffComposition.learning_enabled>` is True, the **inputs** argument
format is different. If `learning_enabled <AutodiffComposition.learning_enabled>` is True, then **inputs** should be a
dictionary with required keys "inputs" and "targets", and optional key "epochs". The value at "inputs" should be a
dictionary relating origin mechanisms to their inputs. The value at "targets" should be a dictionary relating terminal
mechanisms to their inputs. The value at "epochs" is an integer stating the number of epochs of training (i.e. how many
times all inputs and targets are run). It defaults to 1. Here is an example of creating a simple AutodiffComposition
and specifying inputs and targets:
>>> import psyneulink as pnl
>>> # set up PsyNeuLink Components
>>> my_mech_1 = pnl.TransferMechanism(function=pnl.Linear, size = 3)
>>> my_mech_2 = pnl.TransferMechanism(function=pnl.Linear, size = 2)
>>> my_projection = pnl.MappingProjection(matrix=np.random.randn(3,2),
... sender=my_mech_1,
... receiver=my_mech_2)
>>> # create AutodiffComposition
>>> my_autodiff = pnl.AutodiffComposition()
>>> my_autodiff.add_node(my_mech_1)
>>> my_autodiff.add_node(my_mech_1)
>>> my_autodiff.add_projection(sender=my_mech_1, projection=my_projection, receiver=my_mech_2)
>>> # input specification
>>> my_inputs = {my_mech_1: [[1, 2, 3]]}
>>> my_targets = {my_mech_2: [[4, 5]]}
>>> input_dict = {"inputs": my_inputs, "targets": my_targets, "epochs": 2}
>>> my_autodiff.run(inputs = input_dict)
Logging
-------
Logging currently works differently in AutodiffComposition than in Composition. In an AutodiffComposition, no logging
is done by default, because logging substantially (roughly by 30%) slows down AutodiffComposition. If you wish for all
projection weights and mechanism values to be logged during execution or training of AutodiffComposition, you must
set the **do_logging** argument of the ``run()`` method to ``True``. Logging with AutodiffComposition is slightly hacked
together, so the time and context in the log are not meaningful, only the logged value is meaningful.
Nested Execution
----------------
COMMENT:
Need to add link to docs about nesting ordinary Compositions, once those docs are written.
COMMENT
In general, an AutodiffComposition may be nested inside another Composition, like ordinary Composition nesting. However,
there are a few differences. The input format of an AutodiffComposition with learning enabled is quite unusual. Thus,
when learning is enabled, the AutodiffComposition must be an origin mechanism of the Composition.
.. note::
Like with all nested Compositions, you must call an AutodiffComposition's ``_analyze_graph()`` method
(or execute the AutodiffComposition) before nesting it.
However, when learning is not enabled, AutodiffComposition works just like an ordinary Composition, in theory. Thus, an
AutodiffComposition with learning not enabled receives input in the same format as an ordinary Composition, and can
therefore be placed anywhere in a Composition.
.. note::
Using an AutodiffComposition not as an origin mechanism is currently buggy, and might produce unexpected results.
Below is an example script showing how to nest an AutodiffComposition with learning enabled.
>>> import psyneulink as pnl
>>> # set up PsyNeuLink Components
>>> my_mech_1 = pnl.TransferMechanism(function=pnl.Linear, size = 3)
>>> my_mech_2 = pnl.TransferMechanism(function=pnl.Linear, size = 2)
>>> my_projection = pnl.MappingProjection(matrix=np.random.randn(3,2),
... sender=my_mech_1,
... receiver=my_mech_2)
>>> # create AutodiffComposition
>>> my_autodiff = pnl.AutodiffComposition()
>>> my_autodiff.add_node(my_mech_1)
>>> my_autodiff.add_node(my_mech_1)
>>> my_autodiff.add_projection(sender=my_mech_1, projection=my_projection, receiver=my_mech_2)
>>> my_autodiff._analyze_graph() # alternatively, my_autodiff.run( ... )
>>>
>>> # input specification
>>> my_inputs = {my_mech_1: [[1, 2, 3]]}
>>> my_targets = {my_mech_2: [[4, 5]]}
>>> input_dict = {"inputs": my_inputs, "targets": my_targets, "epochs": 2}
>>>
>>> parentComposition = pnl.Composition()
>>> parentComposition.add_node(my_autodiff)
>>>
>>> training_input = {my_autodiff: input_dict}
>>> result1 = parentComposition.run(inputs=input)
>>>
>>> my_autodiff.learning_enabled = False
>>> no_training_input = {my_autodiff: my_inputs}
>>> result2 = parentComposition.run(inputs=no_training_input)
.. _Composition_Class_Reference:
Class Reference
---------------
"""
from psyneulink.core.components.functions.transferfunctions import Linear, Logistic, ReLU
from psyneulink.core.components.mechanisms.processing.compositioninterfacemechanism import CompositionInterfaceMechanism
from psyneulink.core.components.projections.pathway.mappingprojection import MappingProjection
from psyneulink.core.compositions.composition import Composition
from psyneulink.core.compositions.composition import CompositionError
from psyneulink.core.globals.context import ContextFlags
from psyneulink.core.globals.keywords import SOFT_CLAMP
from psyneulink.core.scheduling.scheduler import Scheduler
import numpy as np
import copy
from collections import Iterable
from toposort import toposort
import logging
try:
import torch
from torch import nn
import torch.optim as optim
from psyneulink.library.compositions.pytorchmodelcreator import PytorchModelCreator
torch_available = True
except ImportError:
torch_available = False
logger = logging.getLogger(__name__)
__all__ = [
'AutodiffComposition', 'AutodiffCompositionError'
]
class AutodiffCompositionError(CompositionError):
def __init__(self, error_value):
self.error_value = error_value
def __str__(self):
return repr(self.error_value)
class AutodiffComposition(Composition):
"""
AutodiffComposition( \
param_init_from_pnl=True, \
patience=None, \
min_delta=0, \
learning_rate=0.001, \
learning_enabled=True, \
optimizer_type=None, \
loss_spec=None, \
randomize=False, \
refresh_losses=False, \
name="autodiff_composition")
Subclass of `Composition` that trains models more quickly by integrating with PyTorch.
Arguments
---------
param_init_from_pnl : boolean : default True
a Boolean specifying how parameters are initialized. (See
`Creating an AutodiffComposition <AutodiffComposition_Creation>` for details)
patience : int or None : default None
**patience** allows the model to stop training early, if training stops reducing loss. The model tracks how many
consecutive epochs of training have failed to reduce the model's loss. When this number exceeds **patience**,
the model stops training early. If **patience** is ``None``, the model will train for the number
of specified epochs and will not stop training early.
min_delta : float : default 0
the minimum reduction in average loss that an epoch must provide in order to qualify as a 'good' epoch.
Used for early stopping of training, in combination with **patience**.
learning_rate : float : default 0.001
the learning rate, which is passed to the optimizer.
learning_enabled : boolean : default True
specifies whether the AutodiffComposition should learn. When True, the AutodiffComposition trains using PyTorch.
When False, the AutodiffComposition executes just like an ordinary Composition
optimizer_type : str : default 'sgd'
the kind of optimizer used in training. The current options are 'sgd' or 'adam'.
weight_decay : float : default 0
specifies the L2 penalty (which discourages large weights) used by the optimizer.
loss_spec : str or PyTorch loss function : default 'mse'
specifies the loss function for training. The current string options are 'mse' (the default), 'crossentropy',
'l1', 'nll', 'poissonnll', and 'kldiv'. Any PyTorch loss function can work here, such as ones from
https://pytorch.org/docs/stable/nn.html#loss-functions
randomize: boolean : default False
specifies whether the order of inputs will be randomized in each epoch. (In each epoch, all inputs are run, but
if **randomize** is True then the order of inputs within an epoch is random.)
refresh_losses : boolean: default False
specifies whether the `losses` attribute is refreshed for each call to `run()`. If False, the losses of each run
are appended to the `losses` attribute. If True, the losses of each run overwrite `losses` instead.
Attributes
----------
pytorch_representation : PytorchModelCreator
the PyTorch representation of the PsyNeuLink model
losses : list of floats
tracks the average loss for each training epoch
patience : int or None : default None
allows the model to stop training early, if training stops reducing loss. The model tracks how many
consecutive epochs of training have failed to reduce the model's loss. When this number exceeds **patience**,
the model stops training early. If **patience** is ``None``, the model will train for the number
of specified epochs and will not stop training early.
min_delta : float : default 0
the minimum reduction in average loss that an epoch must provide in order to qualify as a 'good' epoch.
Used for early stopping of training, in combination with **patience**.
learning_enabled : boolean : default True
specifies whether the AutodiffComposition should learn. When True, the AutodiffComposition trains using PyTorch.
When False, the AutodiffComposition executes just like an ordinary Composition. This attribute can be toggled.
learning_rate : float: default 0.001
the learning rate for training. Currently only used to initialize the `optimizer` attribute.
optimizer : PyTorch optimizer function
the optimizer used for training. Depends on the **optimizer_type**, **learning_rate**, and **weight_decay**
arguments from initialization.
loss : PyTorch loss function
the loss function used for training. Depends on the **loss_spec** argument from initialization.
name : str : default LeabraMechanism-<index>
the name of the Mechanism.
Specified in the **name** argument of the constructor for the Projection;
if not specified, a default is assigned by `MechanismRegistry`
(see :doc:`Registry <LINK>` for conventions used in naming, including for default and duplicate names).
Returns
-------
instance of AutodiffComposition : AutodiffComposition
"""
class Parameters(Composition.Parameters):
"""
Attributes
----------
learning_rate
see `learning_rate <AutodiffComposition.learning_rate>`
:default value: 0.001
:type: float
losses
see `losses <AutodiffComposition.losses>`
:default value: None
:type:
min_delta
see `min_delta <AutodiffComposition.min_delta>`
:default value: 0
:type: int
optimizer
see `optimizer <AutodiffComposition.optimizer>`
:default value: None
:type:
patience
see `patience <AutodiffComposition.patience>`
:default value: None
:type:
pytorch_representation
see `pytorch_representation <AutodiffComposition.pytorch_representation>`
:default value: None
:type:
"""
optimizer = None
learning_rate = .001
losses = None
patience = None
min_delta = 0
pytorch_representation = None
# TODO (CW 9/28/18): add compositions to registry so default arg for name is no longer needed
def __init__(self,
param_init_from_pnl=True,
patience=None,
min_delta=0,
learning_rate=0.001,
learning_enabled=True,
optimizer_type='sgd',
weight_decay=0,
loss_spec='mse',
randomize=None,
refresh_losses=False,
disable_cuda=False,
cuda_index=None,
force_no_retain_graph=False,
name="autodiff_composition"):
self.learning_enabled = True
if not torch_available:
raise AutodiffCompositionError('Pytorch python module (torch) is not installed. Please install it with '
'`pip install torch` or `pip3 install torch`')
# params = self._assign_args_to_param_dicts(learning_rate=learning_rate)
# since this does not pass params argument, defaults will not be automatically set..
super(AutodiffComposition, self).__init__(name=name)
# super(AutodiffComposition, self).__init__(params=params, name=name)
self.learning_enabled = learning_enabled
self.optimizer_type = optimizer_type
self.loss_spec = loss_spec
self.randomize = randomize
self.refresh_losses = refresh_losses
# pytorch representation of model and associated training parameters
self.pytorch_representation = None
self.learning_rate = learning_rate
self.weight_decay = weight_decay
self.optimizer = None
self.loss = None
self.force_no_retain_graph = force_no_retain_graph
# user indication of how to initialize pytorch parameters
self.param_init_from_pnl = param_init_from_pnl
# keeps track of average loss per epoch
self.losses = []
# ordered execution sets for the pytorch model
self.execution_sets = None
# patience is the "bad" epochs (with no progress in average loss) the model tolerates in one training session
# before ending training
self.patience = patience
self.min_delta = min_delta
# CW 11/1/18: maybe we should make scheduler a property, like in Composition
self.scheduler = None
if not disable_cuda and torch.cuda.is_available():
if cuda_index is None:
self.device = torch.device('cuda')
else:
self.device = torch.device('cuda:' + cuda_index)
else:
self.device = torch.device('cpu')
# CLEANUP: move some of what's done in the methods below to a "validate_params" type of method
def _build_pytorch_representation(self, execution_id = None):
if self.scheduler is None: # if learning_enabled has never been run yet
self.scheduler = Scheduler(graph=self.graph_processing)
if self.execution_sets is None:
self.execution_sets = list(self.scheduler.run())
if self.parameters.pytorch_representation.get(execution_id) is None:
model = PytorchModelCreator(self.graph_processing,
self.param_init_from_pnl,
self.execution_sets,
self.device,
execution_id)
self.parameters.pytorch_representation.set(model, execution_id)
# Set up optimizer function
old_opt = self.parameters.optimizer.get(execution_id)
if old_opt is not None:
logger.warning("Overwriting optimizer for AutodiffComposition {}! Old optimizer: {}".format(
self, old_opt))
opt = self._make_optimizer(self.optimizer_type, self.learning_rate, self.weight_decay, execution_id)
self.parameters.optimizer.set(opt, execution_id)
# Set up loss function
if self.loss is not None:
logger.warning("Overwriting loss function for AutodiffComposition {}! Old loss function: {}".format(
self, self.loss))
self.loss = self._get_loss(self.loss_spec)
def _make_optimizer(self, optimizer_type, learning_rate, weight_decay, execution_id):
if not isinstance(learning_rate, (int, float)):
raise AutodiffCompositionError("Learning rate must be an integer or float value.")
if optimizer_type not in ['sgd', 'adam']:
raise AutodiffCompositionError("Invalid optimizer specified. Optimizer argument must be a string. "
"Currently, Stochastic Gradient Descent and Adam are the only available "
"optimizers (specified as 'sgd' or 'adam').")
params = self.parameters.pytorch_representation.get(execution_id).parameters()
if optimizer_type == 'sgd':
return optim.SGD(params, lr=learning_rate, weight_decay=weight_decay)
else:
return optim.Adam(params, lr=learning_rate, weight_decay=weight_decay)
def _get_loss(self, loss_spec):
if not isinstance(self.loss_spec, str):
return self.loss_spec
elif loss_spec == 'mse':
return nn.MSELoss(reduction='sum')
elif loss_spec == 'crossentropy':
return nn.CrossEntropyLoss(reduction='sum')
elif loss_spec == 'l1':
return nn.L1Loss(reduction='sum')
elif loss_spec == 'nll':
return nn.NLLLoss(reduction='sum')
elif loss_spec == 'poissonnll':
return nn.PoissonNLLLoss(reduction='sum')
elif loss_spec == 'kldiv':
return nn.KLDivLoss(reduction='sum')
else:
raise AutodiffCompositionError("Loss type {} not recognized. Loss argument must be a string or function. "
"Currently, the recognized loss types are Mean Squared Error, Cross Entropy,"
" L1 loss, Negative Log Likelihood loss, Poisson Negative Log Likelihood, "
"and KL Divergence. These are specified as 'mse', 'crossentropy', 'l1', "
"'nll', 'poissonnll', and 'kldiv' respectively.".format(loss_spec))
def _has_required_keys(self, input_dict):
required_keys = {"inputs", "targets"}
return required_keys.issubset(set(input_dict.keys()))
def _adjust_stimulus_dict(self, inputs):
if self.learning_enabled:
if isinstance(inputs, dict):
if self._has_required_keys(inputs):
return [inputs]
raise AutodiffCompositionError("Invalid input specification.")
elif isinstance(inputs, list):
for input_dict in inputs:
if not self._has_required_keys(input_dict):
raise AutodiffCompositionError("Invalid input specification.")
return inputs
return super(AutodiffComposition, self)._adjust_stimulus_dict(inputs)
# performs forward computation for one input
def autodiff_processing(self, inputs, execution_id=None, do_logging=False):
pytorch_representation = self.parameters.pytorch_representation.get(execution_id)
# run the model on inputs - switch autograd off for this (we don't need it)
with torch.no_grad():
tensor_outputs = pytorch_representation.forward(inputs, execution_id=execution_id, do_logging=do_logging)
# get outputs back into numpy
outputs = []
for i in range(len(tensor_outputs)):
outputs.append(tensor_outputs[i].numpy().copy())
return outputs
# performs learning/training on all input-target pairs it recieves for given number of epochs
def autodiff_training(self, inputs, targets, epochs, execution_id=None, do_logging=False):
# FIX CW 11/1/18: this value of num_inputs assumes all inputs have same length, and that the length of
# the input for an origin component equals the number of desired trials. We could clean this up
# by perhaps using modular arithmetic on t, or by being more explicit about number of desired trials
first_input_value = list(inputs.values())[0]
num_inputs = len(first_input_value)
patience = self.parameters.patience.get(execution_id)
if patience is not None:
# set up object for early stopping
early_stopper = EarlyStopping(patience=patience, min_delta=self.parameters.min_delta.get(execution_id))
# if training over trial sets in random order, set up array for mapping random order back to original order
if self.randomize:
rand_train_order_reverse = np.zeros(num_inputs)
# get total number of output neurons from the dimensionality of targets on the first trial
# (this is for computing average loss across neurons on each trial later)
out_size = 0
for target in targets.values():
out_size += len(target)
# iterate over epochs
for epoch in range(epochs):
# if training in random order, generate random order and set up mapping
# from random order back to original order
if self.randomize:
rand_train_order = np.random.permutation(num_inputs)
rand_train_order_reverse[rand_train_order] = np.arange(num_inputs)
# set up array to keep track of losses on epoch
curr_losses = np.zeros(num_inputs)
# reset temporary list to keep track of most recent outputs
outputs = []
self.parameters.pytorch_representation.get(execution_id).detach_all()
# self.parameters.pytorch_representation.get(execution_id).reset_all()
# iterate over inputs, targets
for t in range(num_inputs):
if self.randomize:
input_index = rand_train_order[t]
else:
input_index = t
curr_tensor_inputs = {}
curr_tensor_targets = {}
for component in inputs.keys():
input = inputs[component][input_index]
curr_tensor_inputs[component] = torch.tensor(input, device=self.device).double()
for component in targets.keys():
target = targets[component][input_index]
curr_tensor_targets[component] = torch.tensor(target, device=self.device).double()
# do forward computation on current inputs
curr_tensor_outputs = self.parameters.pytorch_representation.get(execution_id).forward(
curr_tensor_inputs,
execution_id,
do_logging
)
# compute total loss across output neurons for current trial
curr_loss = torch.zeros(1).double()
for component in curr_tensor_outputs.keys():
# possibly add custom loss option, which is a loss function that takes many args
# (outputs, targets, weights, and more) and returns a scalar
curr_loss += self.loss(curr_tensor_outputs[component], curr_tensor_targets[component])
# save average loss across all output neurons on current trial
curr_losses[t] = (curr_loss[0].item())/out_size
optimizer = self.parameters.optimizer.get(execution_id)
# backpropagate to compute gradients and perform learning update for parameters
optimizer.zero_grad()
curr_loss = curr_loss/2
if self.force_no_retain_graph:
curr_loss.backward(retain_graph=False)
else:
curr_loss.backward(retain_graph=True)
self.parameters.pytorch_representation.get(execution_id).copy_weights_to_psyneulink(execution_id)
optimizer.step()
# save outputs of model if this is final epoch
curr_output_list = []
for input_state in self.output_CIM.input_states:
assert(len(input_state.all_afferents) == 1) # CW 12/05/18, this assert may eventually be outdated
component = input_state.all_afferents[0].sender.owner
curr_output_list.append(curr_tensor_outputs[component].detach().numpy().copy())
# for component in curr_tensor_outputs.keys():
# curr_output_list.append(curr_tensor_outputs[component].detach().numpy().copy())
outputs.append(curr_output_list)
# save average loss on the current epoch
average_loss = np.mean(curr_losses)
self.parameters.losses.get(execution_id).append(average_loss)
# update early stopper with most recent average loss
if self.parameters.patience.get(execution_id) is not None:
should_stop = early_stopper.step(average_loss)
if should_stop:
logger.warning('Stopped training early after {} epochs'.format(epoch))
if self.randomize:
outputs_list = [None] * len(outputs)
for i in range(len(outputs)):
outputs_list[i] = outputs[int(rand_train_order_reverse[i])]
return outputs_list
else:
return outputs
if self.randomize: # save outputs in a list in correct order, return them
outputs_list = [None] * len(outputs)
for i in range(len(outputs)):
outputs_list[i] = outputs[int(rand_train_order_reverse[i])]
return outputs_list
else:
return outputs
def execute(self,
inputs=None,
autodiff_stimuli=None,
do_logging=False,
scheduler_processing=None,
termination_processing=None,
call_before_time_step=None,
call_before_pass=None,
call_after_time_step=None,
call_after_pass=None,
execution_id=None,
base_execution_id=None,
clamp_input=SOFT_CLAMP,
targets=None,
runtime_params=None,
skip_initialization=False,
bin_execute=False,
context=None
):
execution_id = self._assign_execution_ids(execution_id)
if self.learning_enabled:
# TBI: How are we supposed to use base_execution_id and statefulness here?
# TBI: can we call _build_pytorch_representation in _analyze_graph so that pytorch
# model may be modified between runs?
self._analyze_graph() # ADDED by CW 12/17/18: unsure if correct here
self._build_pytorch_representation(execution_id)
autodiff_inputs = inputs["inputs"]
autodiff_targets = inputs["targets"]
autodiff_epochs = 1
if "epochs" in inputs:
autodiff_epochs = inputs["epochs"]
output = self.autodiff_training(autodiff_inputs, autodiff_targets, autodiff_epochs, execution_id, do_logging)
ctx = self.output_CIM.parameters.context.get(execution_id)
# new_ctx = copy.deepcopy(ctx)
# new_ctx.execution_phase = ContextFlags.PROCESSING
# self.output_CIM.parameters.context.set(new_ctx, execution_id=execution_id)
if ctx is not None: # HACK: CW 12/18/18 for some reason context isn't set correctly
ctx.execution_phase = ContextFlags.PROCESSING
# note that output[-1] might not be the truly most recent value
# HACK CW 2/5/19: the line below is a hack. In general, the output_CIM of an AutodiffComposition
# is not having its parameters populated correctly, and this should be fixed in the long run.
self.output_CIM.execute(input=output[-1], execution_id=execution_id, context=ContextFlags.PROCESSING)
return output
# learning not enabled. execute as a normal composition
return super(AutodiffComposition, self).execute(inputs=inputs,
scheduler_processing=scheduler_processing,
termination_processing=termination_processing,
call_before_time_step=call_before_time_step,
call_before_pass=call_before_pass,
call_after_time_step=call_after_time_step,
call_after_pass=call_after_pass,
execution_id=execution_id,
base_execution_id=base_execution_id,
clamp_input=clamp_input,
runtime_params=runtime_params,
skip_initialization=skip_initialization,
bin_execute=bin_execute,
context=context)
# what the user calls for doing processing/training, similar to the run function of the normal composition
def run(
self,
inputs=None,
do_logging=False,
scheduler_processing=None,
termination_processing=None,
execution_id=None,
num_trials=1,
call_before_time_step=None,
call_after_time_step=None,
call_before_pass=None,
call_after_pass=None,
call_before_trial=None,
call_after_trial=None,
clamp_input=SOFT_CLAMP,
bin_execute=False,
initial_values=None,
reinitialize_values=None,
runtime_params=None,
context=None):
# TBI: Handle trials, timesteps, etc
execution_id = self._assign_execution_ids(execution_id)
if self.learning_enabled:
self._analyze_graph()
if self.refresh_losses or (self.parameters.losses.get(execution_id) is None):
self.parameters.losses.set([], execution_id)
adjusted_stimuli = self._adjust_stimulus_dict(inputs)
if num_trials is None:
num_trials = len(adjusted_stimuli)
results = []
for trial_num in range(num_trials):
stimulus_index = trial_num % len(adjusted_stimuli)
trial_output = self.execute(
inputs=adjusted_stimuli[stimulus_index],
execution_id=execution_id,
do_logging=do_logging,
)
results.append(trial_output)
return results
else:
return super(AutodiffComposition, self).run(inputs=inputs,
scheduler_processing=scheduler_processing,
termination_processing=termination_processing,
execution_id=execution_id,
num_trials=num_trials,
call_before_time_step=call_before_time_step,
call_after_time_step=call_after_time_step,
call_before_pass=call_before_pass,
call_after_pass=call_after_pass,
call_before_trial=call_before_trial,
call_after_trial=call_after_trial,
clamp_input=clamp_input,
bin_execute=bin_execute,
initial_values=initial_values,
reinitialize_values=reinitialize_values,
runtime_params=runtime_params,
context=context)
# validates properties of the autodiff composition, and arguments to run, when run is called
def _validate_params(self, targets, epochs):
# set up processing graph and dictionary (for checking if recurrence is present later)
processing_graph = self.graph_processing
topo_dict = {}
# raise error if composition is empty
if len([vert.component for vert in self.graph.vertices]) == 0:
raise AutodiffCompositionError("{0} has no mechanisms or projections to execute."
.format(self.name))
# iterate over nodes in processing graph
for node in processing_graph.vertices:
# raise error if a node is a composition
if isinstance(node.component, Composition):
raise AutodiffCompositionError("{0} was added as a node to {1}. Compositions cannot be "
"added as nodes to Autodiff Compositions."
.format(node.component, self.name))
# raise error if a node's mechanism doesn't have a Linear, Logistic, or ReLU function
if not isinstance(node.component.function, (Linear, Logistic, ReLU)):
raise AutodiffCompositionError("Function {0} of mechanism {1} in {2} is not a valid function "
"for a Autodiff Composition. Functions of mechanisms in "
"Autodiff Compositions can only be Linear, Logistic, or ReLU."
.format(node.component.function, node.component, self.name))
# raise error if a node has more than one input state
if len(node.component.input_states) > 1:
raise AutodiffCompositionError("Mechanism {0} of {1} has more than one input state. Autodiff "
"Compositions only allow mechanisms to have one input state. The "
"dimensionality of this state's value will become the dimensionality of "
"the tensor representing the state's mechanism in the underlying "
"Pytorch model."
.format(node.component, self.name))
# raise error if any parent of current node creates a cycle in the composition (ie. if there's recurrence)
topo_dict[node.component] = set()
for parent in processing_graph.get_parents_from_component(node.component):
topo_dict[node.component].add(parent.component)
try:
list(toposort(topo_dict))
except ValueError:
raise AutodiffCompositionError("Mechanisms {0} and {1} are part of a recurrent path in {2}. "
"Autodiff Compositions currently do not support recurrence."
.format(node.component, parent.component, self.name))
# raise errors if arguments to run are not consistent or we're doing training but there are
# no trainable parameters
if targets is None:
if epochs is not None:
raise AutodiffCompositionError("Number of training epochs specified for {0} but no targets given."
.format(self.name))
else:
if epochs is None:
raise AutodiffCompositionError("Targets specified for {0}, but no number of training epochs given."
.format(self.name))
if len([vert.component for vert in self.graph.vertices if isinstance(vert.component, MappingProjection)]) == 0:
raise AutodiffCompositionError("Targets specified for {0}, but {0} has no trainable parameters."
.format(self.name))
# gives user weights and biases of the model (from the pytorch representation)
def get_parameters(self, execution_id=NotImplemented):
if execution_id is NotImplemented:
execution_id = self.default_execution_id
pytorch_representation = self.parameters.pytorch_representation.get(execution_id)
if pytorch_representation is None:
raise AutodiffCompositionError("{0} has not been run yet so parameters have not been created "
"in Pytorch."
.format(self.name))
weights = pytorch_representation.get_weights_for_projections()
biases = pytorch_representation.get_biases_for_mechanisms()
return weights, biases
class EarlyStopping(object):
def __init__(self, mode='min', min_delta=0, patience=10):
self.mode = mode
self.min_delta = min_delta
self.patience = patience
self.best = None
self.num_bad_epochs = 0
self.is_better = None
self._init_is_better(mode, min_delta)
if patience == 0:
self.is_better = lambda a, b: True
def step(self, metrics):
if self.best is None:
self.best = metrics
return False
if np.isnan(metrics):
return True
if self.is_better(metrics, self.best):
self.num_bad_epochs = 0
self.best = metrics
else:
self.num_bad_epochs += 1
if self.num_bad_epochs >= self.patience:
return True
return False
def _init_is_better(self, mode, min_delta):
if mode not in {'min', 'max'}:
raise ValueError('mode ' + mode + ' is unknown!')
if mode == 'min':
self.is_better = lambda a, best: a < best - min_delta
if mode == 'max':
self.is_better = lambda a, best: a > best + min_delta
|
the-stack_0_2840 | # coding: utf-8
import pprint
import re
import six
class Tag:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'key': 'str',
'value': 'str'
}
attribute_map = {
'key': 'key',
'value': 'value'
}
def __init__(self, key=None, value=None):
"""Tag - a model defined in huaweicloud sdk"""
self._key = None
self._value = None
self.discriminator = None
if key is not None:
self.key = key
if value is not None:
self.value = value
@property
def key(self):
"""Gets the key of this Tag.
功能描述:标签键
:return: The key of this Tag.
:rtype: str
"""
return self._key
@key.setter
def key(self, key):
"""Sets the key of this Tag.
功能描述:标签键
:param key: The key of this Tag.
:type: str
"""
self._key = key
@property
def value(self):
"""Gets the value of this Tag.
功能描述:标签值
:return: The value of this Tag.
:rtype: str
"""
return self._value
@value.setter
def value(self, value):
"""Sets the value of this Tag.
功能描述:标签值
:param value: The value of this Tag.
:type: str
"""
self._value = value
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Tag):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
the-stack_0_2841 | # coding: utf-8
"""Dumb VPR model development"""
import matplotlib.pyplot as plt
def vpr_median(cc_r, km_above_ml=1100):
"""vpr diffs based on median ze above ml"""
z = cc_r.data.zh.iloc[0, :]
zt = cc_r.cl_data.zh.loc[:, km_above_ml]
cl = cc_r.classes()
mz = z.groupby(cl).median()
mzt = zt.groupby(cl).median()
return mz-mzt
if __name__ == '__main__':
plt.ion()
vpr = vpr_median(cc_r)
|
the-stack_0_2842 | """
# Copyright 2021 21CN Corporation Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
import unittest
from unittest import mock
from pony.orm import db_session, commit
import utils
from config import base_dir
from core.models import AppInsMapper, VmImageInfoMapper, AppPkgMapper
from task.app_instance_task import do_check_stack_status
from task.app_package_task import do_check_package_status
from task.image_task import do_check_image_status, do_download_then_compress_image, do_check_compress_status, \
do_push_image
from tests.resources.test_data import mock_heat_client, mock_glance_client, MockResponse
class TasksTest(unittest.TestCase):
"""
定时任务单元测试
"""
@mock.patch("task.app_instance_task.create_heat_client")
def test_do_check_stack_status(self, create_heat_client):
"""
测试检查实例状态任务
Returns:
"""
create_heat_client.return_value = mock_heat_client
with db_session:
AppInsMapper(
app_instance_id='appIns01',
host_ip='10.10.10.10',
tenant_id='tenant001',
stack_id='stack001',
operational_status=utils.INSTANTIATING
)
commit()
do_check_stack_status('appIns01')
with db_session:
app_ins_info = AppInsMapper.get(app_instance_id='appIns01')
self.assertEqual(utils.FAILURE, app_ins_info.operational_status)
@mock.patch('task.image_task.add_download_then_compress_image_task')
@mock.patch('task.image_task.create_glance_client')
def test_do_check_image_status(self, create_glance_client, add_download_then_compress_image_task):
"""
Args:
create_glance_client:
Returns:
"""
create_glance_client.return_value = mock_glance_client
add_download_then_compress_image_task.return_value = None
with db_session:
VmImageInfoMapper(
image_id='test_image',
host_ip='10.10.10.10',
image_name='test_image',
status='queued',
tenant_id='test_tenant',
app_package_id='test_package'
)
commit()
do_check_image_status('test_image', '10.10.10.10')
with db_session:
image_info = VmImageInfoMapper.get(image_id='test_image', host_ip='10.10.10.10')
self.assertEqual(utils.ACTIVE, image_info.status)
@mock.patch('task.image_task.add_check_compress_image_task')
@mock.patch('task.image_task.requests')
@mock.patch('task.image_task.create_glance_client')
def test_do_download_then_compress_image(self, create_glance_client, requests, add_check_compress_image_task):
"""
Args:
create_glance_client:
Returns:
"""
create_glance_client.return_value = mock_glance_client
requests.post.return_value = MockResponse({
'status_code': 200,
'json': {
'requestId': 'abcabcabcabc'
}
})
add_check_compress_image_task.return_value = None
with db_session:
VmImageInfoMapper(
image_id='test_image1',
host_ip='10.10.10.11',
image_name='test_image1',
status='active',
tenant_id='test_tenant',
compress_task_status='waiting'
)
commit()
do_download_then_compress_image('test_image1', '10.10.10.11')
with db_session:
image_info = VmImageInfoMapper.get(image_id='test_image1', host_ip='10.10.10.11')
self.assertEqual(utils.COMPRESSING, image_info.compress_task_status)
@mock.patch('task.image_task.add_push_image_task')
@mock.patch('task.image_task.requests')
def test_do_check_compress_status(self, requests, add_push_image_task):
"""
Returns:
"""
requests.get.return_value = MockResponse({
'status_code': 200,
'json': {
'status': 0
}
})
add_push_image_task.return_value = None
with db_session:
VmImageInfoMapper(
image_id='test_image2',
host_ip='10.10.10.10',
image_name='test_image2',
status='active',
tenant_id='test_tenant',
compress_task_status='compressing'
)
commit()
do_check_compress_status('test_image2', '10.10.10.10')
utils.delete_dir(f'{base_dir}/vmImage')
with db_session:
image_info = VmImageInfoMapper.get(image_id='test_image2', host_ip='10.10.10.10')
self.assertEqual(utils.PUSHING, image_info.compress_task_status)
@mock.patch('task.image_task.requests')
def test_do_push_image(self, requests):
requests.post.return_value = MockResponse({
'status_code': 200,
'json': {
'imageId': 'mock_image_id'
}
})
with db_session:
VmImageInfoMapper(
image_id='test_image3',
host_ip='10.10.10.10',
image_name='test_image3',
status='active',
tenant_id='test_tenant',
compress_task_status='pushing'
)
commit()
utils.create_dir(f'{base_dir}/vmImage/10.10.10.10')
with open(f'{base_dir}/vmImage/10.10.10.10/test_image3.qcow2', 'w') as image_file:
image_file.writelines('abcabcabc')
do_push_image('test_image3', '10.10.10.10')
utils.delete_dir(f'{base_dir}/vmImage')
with db_session:
image_info = VmImageInfoMapper.get(image_id='test_image3', host_ip='10.10.10.10')
self.assertEqual(utils.SUCCESS, image_info.compress_task_status)
@mock.patch('task.app_package_task.start_check_package_status')
def test_do_check_package_status(self, start_check_package_status):
start_check_package_status.return_value = None
with db_session:
AppPkgMapper(
app_package_id='app_package_id1',
host_ip='10.10.10.10',
status='uploading'
)
VmImageInfoMapper(
image_id='image_id1',
image_name='image_name1',
tenant_id='tenant001',
app_package_id='app_package_id1',
host_ip='10.10.10.10',
status='active'
)
VmImageInfoMapper(
image_id='image_id2',
image_name='image_name2',
tenant_id='tenant001',
app_package_id='app_package_id1',
host_ip='10.10.10.10',
status='active'
)
commit()
do_check_package_status('app_package_id1', '10.10.10.10')
with db_session:
app_package_info = AppPkgMapper.get(app_package_id='app_package_id1', host_ip='10.10.10.10')
self.assertEqual(utils.UPLOADED, app_package_info.status)
|
the-stack_0_2845 | #!/usr/bin/env python
###############################################################################
# $Id: gdal2grd.py 18195 2009-12-06 20:24:39Z rouault $
#
# Project: GDAL Python samples
# Purpose: Script to write out ASCII GRD rasters (used in Golden Software
# Surfer)
# from any source supported by GDAL.
# Author: Andrey Kiselev, [email protected]
#
###############################################################################
# Copyright (c) 2003, Andrey Kiselev <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###############################################################################
try:
from osgeo import gdal
from osgeo.gdalconst import *
gdal.TermProgress = gdal.TermProgress_nocb
except ImportError:
import gdal
from gdalconst import *
try:
import numpy as Numeric
Numeric.arrayrange = Numeric.arange
except ImportError:
import Numeric
import sys
# =============================================================================
def Usage():
print('Usage: gdal2grd.py [-b band] [-quiet] infile outfile')
print('Write out ASCII GRD rasters (used in Golden Software Surfer)')
print('')
print(' -b band Select a band number to convert (1 based)')
print(' -quiet Do not report any diagnostic information')
print(' infile Name of the input GDAL supported file')
print(' outfile Name of the output GRD file')
print('')
sys.exit(1)
# =============================================================================
infile = None
outfile = None
iBand = 1
quiet = 0
# Parse command line arguments.
i = 1
while i < len(sys.argv):
arg = sys.argv[i]
if arg == '-b':
i = i + 1
iBand = int(sys.argv[i])
elif arg == '-quiet':
quiet = 1
elif infile is None:
infile = arg
elif outfile is None:
outfile = arg
else:
Usage()
i = i + 1
if infile is None:
Usage()
if outfile is None:
Usage()
indataset = gdal.Open(infile, GA_ReadOnly)
if infile == None:
print('Cannot open', infile)
sys.exit(2)
geotransform = indataset.GetGeoTransform()
band = indataset.GetRasterBand(iBand)
if band == None:
print('Cannot load band', iBand, 'from the', infile)
sys.exit(2)
if not quiet:
print('Size is ',indataset.RasterXSize,'x',indataset.RasterYSize,'x',indataset.RasterCount)
print('Projection is ',indataset.GetProjection())
print('Origin = (',geotransform[0], ',',geotransform[3],')')
print('Pixel Size = (',geotransform[1], ',',geotransform[5],')')
print('Converting band number',iBand,'with type',gdal.GetDataTypeName(band.DataType))
# Header printing
fpout = open(outfile, "wt")
fpout.write("DSAA\n")
fpout.write(str(band.XSize) + " " + str(band.YSize) + "\n")
fpout.write(str(geotransform[0] + geotransform[1] / 2) + " " +
str(geotransform[0] + geotransform[1] * (band.XSize - 0.5)) + "\n")
if geotransform[5] < 0:
fpout.write(str(geotransform[3] + geotransform[5] * (band.YSize - 0.5)) + " " +
str(geotransform[3] + geotransform[5] / 2) + "\n")
else:
fpout.write(str(geotransform[3] + geotransform[5] / 2) + " " +
str(geotransform[3] + geotransform[5] * (band.YSize - 0.5)) + "\n")
fpout.write(str(band.ComputeRasterMinMax(0)[0]) + " " +
str(band.ComputeRasterMinMax(0)[1]) + "\n")
for i in range(band.YSize - 1, -1, -1):
scanline = band.ReadAsArray(0, i, band.XSize, 1, band.XSize, 1)
j = 0
while j < band.XSize:
fpout.write(str(scanline[0, j]))
j = j + 1
if j % 10: # Print no more than 10 values per line
fpout.write(" ")
else:
fpout.write("\n")
fpout.write("\n")
# Display progress report on terminal
if not quiet:
gdal.TermProgress(float(band.YSize - i) / band.YSize)
|
the-stack_0_2846 | """Test cases for AST merge (used for fine-grained incremental checking)"""
import os
import shutil
from typing import List, Tuple, Dict, Optional
from mypy import build
from mypy.build import BuildResult
from mypy.modulefinder import BuildSource
from mypy.defaults import PYTHON3_VERSION
from mypy.errors import CompileError
from mypy.nodes import (
Node, MypyFile, SymbolTable, SymbolTableNode, TypeInfo, Expression, Var, TypeVarExpr,
UNBOUND_IMPORTED
)
from mypy.options import Options
from mypy.server.subexpr import get_subexpressions
from mypy.server.update import FineGrainedBuildManager
from mypy.strconv import StrConv
from mypy.test.config import test_temp_dir
from mypy.test.data import DataDrivenTestCase, DataSuite
from mypy.test.helpers import assert_string_arrays_equal, normalize_error_messages
from mypy.types import TypeStrVisitor, Type
from mypy.util import short_type, IdMapper
# Which data structures to dump in a test case?
SYMTABLE = 'SYMTABLE'
TYPEINFO = ' TYPEINFO'
TYPES = 'TYPES'
AST = 'AST'
NOT_DUMPED_MODULES = (
'builtins',
'typing',
'abc',
'contextlib',
'sys',
'mypy_extensions',
'enum',
)
class ASTMergeSuite(DataSuite):
files = ['merge.test']
def setup(self) -> None:
super().setup()
self.str_conv = StrConv(show_ids=True)
assert self.str_conv.id_mapper is not None
self.id_mapper = self.str_conv.id_mapper # type: IdMapper
self.type_str_conv = TypeStrVisitor(self.id_mapper)
def run_case(self, testcase: DataDrivenTestCase) -> None:
name = testcase.name
# We use the test case name to decide which data structures to dump.
# Dumping everything would result in very verbose test cases.
if name.endswith('_symtable'):
kind = SYMTABLE
elif name.endswith('_typeinfo'):
kind = TYPEINFO
elif name.endswith('_types'):
kind = TYPES
else:
kind = AST
main_src = '\n'.join(testcase.input)
result = self.build(main_src)
assert result is not None, 'cases where CompileError occurred should not be run'
result.manager.fscache.flush()
fine_grained_manager = FineGrainedBuildManager(result)
a = []
if result.errors:
a.extend(result.errors)
target_path = os.path.join(test_temp_dir, 'target.py')
shutil.copy(os.path.join(test_temp_dir, 'target.py.next'), target_path)
a.extend(self.dump(fine_grained_manager, kind))
old_subexpr = get_subexpressions(result.manager.modules['target'])
a.append('==>')
new_file, new_types = self.build_increment(fine_grained_manager, 'target', target_path)
a.extend(self.dump(fine_grained_manager, kind))
for expr in old_subexpr:
if isinstance(expr, TypeVarExpr):
# These are merged so we can't perform the check.
continue
# Verify that old AST nodes are removed from the expression type map.
assert expr not in new_types
a = normalize_error_messages(a)
assert_string_arrays_equal(
testcase.output, a,
'Invalid output ({}, line {})'.format(testcase.file,
testcase.line))
def build(self, source: str) -> Optional[BuildResult]:
options = Options()
options.incremental = True
options.fine_grained_incremental = True
options.use_builtins_fixtures = True
options.show_traceback = True
options.python_version = PYTHON3_VERSION
main_path = os.path.join(test_temp_dir, 'main')
with open(main_path, 'w') as f:
f.write(source)
try:
result = build.build(sources=[BuildSource(main_path, None, None)],
options=options,
alt_lib_path=test_temp_dir)
except CompileError:
# TODO: Is it okay to return None?
return None
return result
def build_increment(self, manager: FineGrainedBuildManager,
module_id: str, path: str) -> Tuple[MypyFile,
Dict[Expression, Type]]:
manager.update([(module_id, path)], [])
module = manager.manager.modules[module_id]
type_map = manager.graph[module_id].type_map()
return module, type_map
def dump(self,
manager: FineGrainedBuildManager,
kind: str) -> List[str]:
modules = manager.manager.modules
if kind == AST:
return self.dump_asts(modules)
elif kind == TYPEINFO:
return self.dump_typeinfos(modules)
elif kind == SYMTABLE:
return self.dump_symbol_tables(modules)
elif kind == TYPES:
return self.dump_types(manager)
assert False, 'Invalid kind %s' % kind
def dump_asts(self, modules: Dict[str, MypyFile]) -> List[str]:
a = []
for m in sorted(modules):
if m in NOT_DUMPED_MODULES:
# We don't support incremental checking of changes to builtins, etc.
continue
s = modules[m].accept(self.str_conv)
a.extend(s.splitlines())
return a
def dump_symbol_tables(self, modules: Dict[str, MypyFile]) -> List[str]:
a = []
for id in sorted(modules):
if not is_dumped_module(id):
# We don't support incremental checking of changes to builtins, etc.
continue
a.extend(self.dump_symbol_table(id, modules[id].names))
return a
def dump_symbol_table(self, module_id: str, symtable: SymbolTable) -> List[str]:
a = ['{}:'.format(module_id)]
for name in sorted(symtable):
if name.startswith('__'):
continue
a.append(' {}: {}'.format(name, self.format_symbol_table_node(symtable[name])))
return a
def format_symbol_table_node(self, node: SymbolTableNode) -> str:
if node.node is None:
if node.kind == UNBOUND_IMPORTED:
return 'UNBOUND_IMPORTED'
return 'None'
if isinstance(node.node, Node):
s = '{}<{}>'.format(str(type(node.node).__name__),
self.id_mapper.id(node.node))
else:
s = '? ({})'.format(type(node.node))
if (isinstance(node.node, Var) and node.node.type and
not node.node.fullname().startswith('typing.')):
typestr = self.format_type(node.node.type)
s += '({})'.format(typestr)
return s
def dump_typeinfos(self, modules: Dict[str, MypyFile]) -> List[str]:
a = []
for id in sorted(modules):
if not is_dumped_module(id):
continue
a.extend(self.dump_typeinfos_recursive(modules[id].names))
return a
def dump_typeinfos_recursive(self, names: SymbolTable) -> List[str]:
a = []
for name, node in sorted(names.items(), key=lambda x: x[0]):
if isinstance(node.node, TypeInfo):
a.extend(self.dump_typeinfo(node.node))
a.extend(self.dump_typeinfos_recursive(node.node.names))
return a
def dump_typeinfo(self, info: TypeInfo) -> List[str]:
if info.fullname() == 'enum.Enum':
# Avoid noise
return []
s = info.dump(str_conv=self.str_conv,
type_str_conv=self.type_str_conv)
return s.splitlines()
def dump_types(self, manager: FineGrainedBuildManager) -> List[str]:
a = []
# To make the results repeatable, we try to generate unique and
# deterministic sort keys.
for module_id in sorted(manager.manager.modules):
if not is_dumped_module(module_id):
continue
type_map = manager.graph[module_id].type_map()
if type_map:
a.append('## {}'.format(module_id))
for expr in sorted(type_map, key=lambda n: (n.line, short_type(n),
str(n) + str(type_map[n]))):
typ = type_map[expr]
a.append('{}:{}: {}'.format(short_type(expr),
expr.line,
self.format_type(typ)))
return a
def format_type(self, typ: Type) -> str:
return typ.accept(self.type_str_conv)
def is_dumped_module(id: str) -> bool:
return id not in NOT_DUMPED_MODULES and (not id.startswith('_') or id == '__main__')
|
the-stack_0_2848 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import unittest
import shade
from ospurge.resources import cinder
from ospurge.tests import mock
class TestBackups(unittest.TestCase):
def setUp(self):
self.cloud = mock.Mock(spec_set=shade.openstackcloud.OpenStackCloud)
self.creds_manager = mock.Mock(cloud=self.cloud)
def test_list(self):
self.assertIs(self.cloud.list_volume_backups.return_value,
cinder.Backups(self.creds_manager).list())
self.cloud.list_volume_backups.assert_called_once_with()
def test_delete(self):
backup = mock.MagicMock()
self.assertIsNone(cinder.Backups(self.creds_manager).delete(backup))
self.cloud.delete_volume_backup.assert_called_once_with(backup['id'])
def test_to_string(self):
backup = mock.MagicMock()
self.assertIn("Volume Backup",
cinder.Backups(self.creds_manager).to_str(backup))
class TestSnapshots(unittest.TestCase):
def setUp(self):
self.cloud = mock.Mock(spec_set=shade.openstackcloud.OpenStackCloud)
self.creds_manager = mock.Mock(cloud=self.cloud)
def test_list(self):
self.assertIs(self.cloud.list_volume_snapshots.return_value,
cinder.Snapshots(self.creds_manager).list())
self.cloud.list_volume_snapshots.assert_called_once_with()
def test_delete(self):
snapshot = mock.MagicMock()
self.assertIsNone(
cinder.Snapshots(self.creds_manager).delete(snapshot))
self.cloud.delete_volume_snapshot.assert_called_once_with(
snapshot['id'])
def test_to_string(self):
snapshot = mock.MagicMock()
self.assertIn("Volume Snapshot ",
cinder.Snapshots(self.creds_manager).to_str(snapshot))
class TestVolumes(unittest.TestCase):
def setUp(self):
self.cloud = mock.Mock(spec_set=shade.openstackcloud.OpenStackCloud)
self.creds_manager = mock.Mock(cloud=self.cloud, project_id=42)
def test_check_prerequisite(self):
self.cloud.list_volume_snapshots.return_value = []
self.assertEqual(
False,
cinder.Volumes(self.creds_manager).check_prerequisite()
)
self.cloud.list_volume_snapshots.assert_called_once_with()
self.cloud.list_servers.assert_called_once_with()
def test_list(self):
self.assertIs(self.cloud.list_volumes.return_value,
cinder.Volumes(self.creds_manager).list())
self.cloud.list_volumes.assert_called_once_with()
def test_should_delete(self):
self.assertEqual(
False,
cinder.Volumes(self.creds_manager).should_delete(
{'os-vol-tenant-attr:tenant_id': 84})
)
self.assertEqual(
True,
cinder.Volumes(self.creds_manager).should_delete(
{'os-vol-tenant-attr:tenant_id': 42})
)
def test_delete(self):
volume = mock.MagicMock()
self.assertIsNone(cinder.Volumes(self.creds_manager).delete(volume))
self.cloud.delete_volume.assert_called_once_with(volume['id'])
def test_to_string(self):
volume = mock.MagicMock()
self.assertIn("Volume ",
cinder.Volumes(self.creds_manager).to_str(volume))
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.