content
stringlengths 7
928k
| avg_line_length
float64 3.5
33.8k
| max_line_length
int64 6
139k
| alphanum_fraction
float64 0.08
0.96
| licenses
sequence | repository_name
stringlengths 7
104
| path
stringlengths 4
230
| size
int64 7
928k
| lang
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|
#!/usr/bin/env python3
# Copyright 2020 Efabless Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import re
import opendb as odb
parser = argparse.ArgumentParser(
description='Creates obstructions in def files.')
parser.add_argument('--lef', '-l',
nargs='+',
type=str,
default=None,
required=True,
help='LEF file needed to have a proper view of the DEF files.')
parser.add_argument('--input-def', '-id', required=True,
help='DEF view of the design that needs to be obstructed.')
parser.add_argument('--obstructions', '-obs', required=True,
help='Format: layer llx lly urx ury, ... (in microns)')
parser.add_argument('--output', '-o', required=True,
help='Output DEF file.')
args = parser.parse_args()
input_lef_file_names = args.lef
input_def_file_name = args.input_def
obs_args = args.obstructions
output_def_file_name = args.output
RE_NUMBER = r'[\-]?[0-9]+(\.[0-9]+)?'
RE_OBS = r'(?P<layer>\S+)\s+' r'(?P<bbox>' + RE_NUMBER + r'\s+' + RE_NUMBER + r'\s+' + RE_NUMBER + r'\s+' + RE_NUMBER + r')'
obses = obs_args.split(',')
obs_list = []
for obs in obses:
obs = obs.strip()
m = re.match(RE_OBS, obs)
assert m,\
"Incorrectly formatted input (%s).\n Format: layer llx lly urx ury, ..." % (obs)
layer = m.group('layer')
bbox = [float(x) for x in m.group('bbox').split()]
obs_list.append((layer, bbox))
design_db = odb.dbDatabase.create()
for lef in input_lef_file_names:
odb.read_lef(design_db, lef)
odb.read_def(design_db, input_def_file_name)
design_chip = design_db.getChip()
design_block = design_chip.getBlock()
design_insts = design_block.getInsts()
design_tech = design_db.getTech()
for obs in obs_list:
layer = obs[0]
bbox = obs[1]
dbu = design_tech.getDbUnitsPerMicron()
bbox = [int(x*dbu) for x in bbox]
print("Creating an obstruction on", layer, "at", *bbox, "(DBU)")
odb.dbObstruction_create(design_block, design_tech.findLayer(layer), *bbox)
odb.write_def(design_block, output_def_file_name)
| 33.3625 | 125 | 0.665043 | [
"Apache-2.0"
] | donns-sandbox/openlane | scripts/add_def_obstructions.py | 2,669 | Python |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="google.cloud.aiplatform.v1.schema.predict.instance",
manifest={
"TextSentimentPredictionInstance",
},
)
class TextSentimentPredictionInstance(proto.Message):
r"""Prediction input format for Text Sentiment.
Attributes:
content (str):
The text snippet to make the predictions on.
mime_type (str):
The MIME type of the text snippet. The
supported MIME types are listed below.
- text/plain
"""
content = proto.Field(
proto.STRING,
number=1,
)
mime_type = proto.Field(
proto.STRING,
number=2,
)
__all__ = tuple(sorted(__protobuf__.manifest))
| 27.12 | 74 | 0.676254 | [
"Apache-2.0"
] | TheMichaelHu/python-aiplatform | google/cloud/aiplatform/v1/schema/predict/instance_v1/types/text_sentiment.py | 1,356 | Python |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for training routines."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python import keras
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util as tf_test_util
from tensorflow.python.keras import testing_utils
from tensorflow.python.platform import test
from tensorflow.python.training.rmsprop import RMSPropOptimizer
class TrainingTest(test.TestCase):
def test_fit_on_arrays(self):
a = keras.layers.Input(shape=(3,), name='input_a')
b = keras.layers.Input(shape=(3,), name='input_b')
dense = keras.layers.Dense(4, name='dense')
c = dense(a)
d = dense(b)
e = keras.layers.Dropout(0.5, name='dropout')(c)
model = keras.models.Model([a, b], [d, e])
optimizer = RMSPropOptimizer(learning_rate=0.001)
loss = 'mse'
loss_weights = [1., 0.5]
metrics = ['mae']
model.compile(optimizer, loss, metrics=metrics, loss_weights=loss_weights)
input_a_np = np.random.random((10, 3))
input_b_np = np.random.random((10, 3))
output_d_np = np.random.random((10, 4))
output_e_np = np.random.random((10, 4))
# Test fit at different verbosity
model.fit(
[input_a_np, input_b_np], [output_d_np, output_e_np],
epochs=1,
batch_size=5,
verbose=0)
model.fit(
[input_a_np, input_b_np], [output_d_np, output_e_np],
epochs=1,
batch_size=5,
verbose=1)
model.fit(
[input_a_np, input_b_np], [output_d_np, output_e_np],
epochs=2,
batch_size=5,
verbose=2)
# Test with validation data
model.fit(
[input_a_np, input_b_np], [output_d_np, output_e_np],
validation_data=([input_a_np, input_b_np], [output_d_np,
output_e_np]),
epochs=1,
batch_size=5,
verbose=0)
model.fit(
[input_a_np, input_b_np], [output_d_np, output_e_np],
validation_data=([input_a_np, input_b_np], [output_d_np,
output_e_np]),
epochs=2,
batch_size=5,
verbose=1)
model.fit(
[input_a_np, input_b_np], [output_d_np, output_e_np],
validation_data=([input_a_np, input_b_np], [output_d_np,
output_e_np]),
epochs=2,
batch_size=5,
verbose=2)
model.train_on_batch([input_a_np, input_b_np], [output_d_np, output_e_np])
# Test with validation split
model.fit(
[input_a_np, input_b_np], [output_d_np, output_e_np],
epochs=2,
batch_size=5,
verbose=0,
validation_split=0.2)
# Test with dictionary inputs
model.fit(
{
'input_a': input_a_np,
'input_b': input_b_np
}, {'dense': output_d_np,
'dropout': output_e_np},
epochs=1,
batch_size=5,
verbose=0)
model.fit(
{
'input_a': input_a_np,
'input_b': input_b_np
}, {'dense': output_d_np,
'dropout': output_e_np},
epochs=1,
batch_size=5,
verbose=1)
model.fit(
{
'input_a': input_a_np,
'input_b': input_b_np
}, {'dense': output_d_np,
'dropout': output_e_np},
validation_data=({'input_a': input_a_np,
'input_b': input_b_np
},
{
'dense': output_d_np,
'dropout': output_e_np
}),
epochs=1,
batch_size=5,
verbose=0)
model.train_on_batch({
'input_a': input_a_np,
'input_b': input_b_np
}, {'dense': output_d_np,
'dropout': output_e_np})
# Test with lists for loss, metrics
loss = ['mae', 'mse']
metrics = ['acc', 'mae']
model.compile(optimizer, loss, metrics=metrics)
model.fit(
[input_a_np, input_b_np], [output_d_np, output_e_np],
epochs=1,
batch_size=5,
verbose=0)
# Test with dictionaries for loss, metrics, loss weights
loss = {'dense': 'mse', 'dropout': 'mae'}
loss_weights = {'dense': 1., 'dropout': 0.5}
metrics = {'dense': 'mse', 'dropout': 'mae'}
model.compile(optimizer, loss, metrics=metrics, loss_weights=loss_weights)
model.fit(
[input_a_np, input_b_np], [output_d_np, output_e_np],
epochs=1,
batch_size=5,
verbose=0)
# Invalid use cases
with self.assertRaises(AttributeError):
model.fit(
[input_a_np, input_b_np], [output_d_np, output_e_np],
epochs=1,
validation_data=([input_a_np, input_b_np], 0, 0),
verbose=0)
with self.assertRaises(ValueError):
model.train_on_batch({'input_a': input_a_np},
[output_d_np, output_e_np])
with self.assertRaises(ValueError):
model.train_on_batch([input_a_np], [output_d_np, output_e_np])
with self.assertRaises(AttributeError):
model.train_on_batch(1, [output_d_np, output_e_np])
with self.assertRaises(ValueError):
model.train_on_batch(input_a_np, [output_d_np, output_e_np])
with self.assertRaises(ValueError):
bad_input = np.random.random((11, 3))
model.train_on_batch([bad_input, input_b_np],
[output_d_np, output_e_np])
with self.assertRaises(ValueError):
bad_target = np.random.random((11, 4))
model.train_on_batch([input_a_np, input_b_np],
[bad_target, output_e_np])
# Build single-input model
x = keras.layers.Input(shape=(3,), name='input_a')
y = keras.layers.Dense(4)(x)
model = keras.models.Model(x, y)
model.compile(optimizer=RMSPropOptimizer(learning_rate=0.001), loss='mse')
# This will work
model.fit([input_a_np], output_d_np, epochs=1)
with self.assertRaises(ValueError):
model.fit([input_a_np, input_a_np], output_d_np, epochs=1)
def test_evaluate_predict_on_arrays(self):
a = keras.layers.Input(shape=(3,), name='input_a')
b = keras.layers.Input(shape=(3,), name='input_b')
dense = keras.layers.Dense(4, name='dense')
c = dense(a)
d = dense(b)
e = keras.layers.Dropout(0.5, name='dropout')(c)
model = keras.models.Model([a, b], [d, e])
optimizer = RMSPropOptimizer(learning_rate=0.001)
loss = 'mse'
loss_weights = [1., 0.5]
metrics = ['acc', 'mae']
model.compile(
optimizer,
loss,
metrics=metrics,
loss_weights=loss_weights,
sample_weight_mode=None)
input_a_np = np.random.random((10, 3))
input_b_np = np.random.random((10, 3))
output_d_np = np.random.random((10, 4))
output_e_np = np.random.random((10, 4))
# Test evaluate at different verbosity
out = model.evaluate(
[input_a_np, input_b_np], [output_d_np, output_e_np],
batch_size=5,
verbose=0)
self.assertEqual(len(out), 7)
out = model.evaluate(
[input_a_np, input_b_np], [output_d_np, output_e_np],
batch_size=5,
verbose=1)
self.assertEqual(len(out), 7)
out = model.evaluate(
[input_a_np, input_b_np], [output_d_np, output_e_np],
batch_size=5,
verbose=2)
self.assertEqual(len(out), 7)
out = model.test_on_batch([input_a_np, input_b_np],
[output_d_np, output_e_np])
self.assertEqual(len(out), 7)
# Test evaluate with dictionary inputs
model.evaluate(
{
'input_a': input_a_np,
'input_b': input_b_np
}, {'dense': output_d_np,
'dropout': output_e_np},
batch_size=5,
verbose=0)
model.evaluate(
{
'input_a': input_a_np,
'input_b': input_b_np
}, {'dense': output_d_np,
'dropout': output_e_np},
batch_size=5,
verbose=1)
# Test predict
out = model.predict([input_a_np, input_b_np], batch_size=5)
self.assertEqual(len(out), 2)
out = model.predict({'input_a': input_a_np, 'input_b': input_b_np})
self.assertEqual(len(out), 2)
out = model.predict_on_batch({
'input_a': input_a_np,
'input_b': input_b_np
})
self.assertEqual(len(out), 2)
def test_invalid_loss_or_metrics(self):
num_classes = 5
train_samples = 1000
test_samples = 1000
input_dim = 5
model = keras.models.Sequential()
model.add(keras.layers.Dense(10, input_shape=(input_dim,)))
model.add(keras.layers.Activation('relu'))
model.add(keras.layers.Dense(num_classes))
model.add(keras.layers.Activation('softmax'))
model.compile(loss='categorical_crossentropy',
optimizer=RMSPropOptimizer(learning_rate=0.001))
np.random.seed(1337)
(x_train, y_train), (_, _) = testing_utils.get_test_data(
train_samples=train_samples,
test_samples=test_samples,
input_shape=(input_dim,),
num_classes=num_classes)
with self.assertRaises(ValueError):
model.fit(x_train, np.concatenate([y_train, y_train], axis=-1))
with self.assertRaises(TypeError):
model.compile(loss='categorical_crossentropy',
optimizer=RMSPropOptimizer(learning_rate=0.001),
metrics=set(0))
with self.assertRaises(ValueError):
model.compile(loss=None,
optimizer='rms')
def test_model_methods_with_eager_tensors_multi_io(self):
a = keras.layers.Input(shape=(3,), name='input_a')
b = keras.layers.Input(shape=(3,), name='input_b')
dense = keras.layers.Dense(4, name='dense')
c = dense(a)
d = dense(b)
e = keras.layers.Dropout(0.5, name='dropout')(c)
model = keras.models.Model([a, b], [d, e])
optimizer = RMSPropOptimizer(learning_rate=0.001)
loss = 'mse'
loss_weights = [1., 0.5]
metrics = ['mae']
model.compile(
optimizer,
loss,
metrics=metrics,
loss_weights=loss_weights,
sample_weight_mode=None)
input_a = keras.backend.zeros(shape=(10, 3))
input_b = keras.backend.zeros(shape=(10, 3))
target_d = keras.backend.zeros(shape=(10, 4))
target_e = keras.backend.zeros(shape=(10, 4))
model.fit(
[input_a, input_b], [target_d, target_e],
epochs=1,
batch_size=5,
verbose=0)
# Test: no shuffle.
model.fit(
[input_a, input_b], [target_d, target_e],
epochs=1,
batch_size=5,
verbose=0,
shuffle=False)
# Test: validation data.
model.fit([input_a, input_b], [target_d, target_e],
epochs=1, batch_size=2, verbose=0,
validation_data=([input_a, input_b], [target_d, target_e]))
model.train_on_batch([input_a, input_b], [target_d, target_e])
model.predict([input_a, input_b], batch_size=5)
model.evaluate([input_a, input_b], [target_d, target_e],
batch_size=2, verbose=0)
model.test_on_batch([input_a, input_b], [target_d, target_e])
# Test: mix np and tensors.
input_b = np.zeros(shape=(10, 3)).astype('float32')
target_e = np.zeros(shape=(10, 4)).astype('float32')
model.fit(
[input_a, input_b], [target_d, target_e],
epochs=1,
batch_size=5,
verbose=0)
model.fit([input_a, input_b], [target_d, target_e],
epochs=1, batch_size=2, verbose=0,
validation_data=([input_a, input_b], [target_d, target_e]))
model.fit(
[input_a, input_b], [target_d, target_e],
epochs=1,
batch_size=5,
verbose=0,
shuffle=False)
model.train_on_batch([input_a, input_b], [target_d, target_e])
model.predict([input_a, input_b], batch_size=5)
model.evaluate([input_a, input_b], [target_d, target_e],
batch_size=2, verbose=0)
model.test_on_batch([input_a, input_b], [target_d, target_e])
def test_model_methods_with_eager_tensors_single_io(self):
x = keras.layers.Input(shape=(3,), name='input')
y = keras.layers.Dense(4, name='dense')(x)
model = keras.Model(x, y)
optimizer = RMSPropOptimizer(learning_rate=0.001)
loss = 'mse'
metrics = ['mae']
model.compile(optimizer, loss, metrics=metrics)
inputs = keras.backend.zeros(shape=(10, 3))
targets = keras.backend.zeros(shape=(10, 4))
model.fit(inputs, targets, epochs=1, batch_size=2, verbose=0)
model.fit(inputs, targets, epochs=1, batch_size=3, verbose=0, shuffle=False)
model.fit(inputs, targets, epochs=1, batch_size=4, verbose=0,
validation_data=(inputs, targets))
model.evaluate(inputs, targets, batch_size=2, verbose=0)
model.predict(inputs, batch_size=2)
model.train_on_batch(inputs, targets)
model.test_on_batch(inputs, targets)
class LossWeightingTest(test.TestCase):
def test_class_weights(self):
num_classes = 5
batch_size = 5
weighted_class = 3
train_samples = 300
test_samples = 300
input_dim = 5
model = keras.models.Sequential()
model.add(keras.layers.Dense(10, input_shape=(input_dim,)))
model.add(keras.layers.Activation('relu'))
model.add(keras.layers.Dense(num_classes))
model.add(keras.layers.Activation('softmax'))
model.compile(loss='categorical_crossentropy',
optimizer=RMSPropOptimizer(learning_rate=0.001))
np.random.seed(1337)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=train_samples,
test_samples=test_samples,
input_shape=(input_dim,),
num_classes=num_classes)
int_y_test = y_test.copy()
int_y_train = y_train.copy()
# convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
test_ids = np.where(int_y_test == np.array(weighted_class))[0]
class_weight = dict([(i, 1.) for i in range(num_classes)])
class_weight[weighted_class] = 4.
sample_weight = np.ones((y_train.shape[0]))
sample_weight[int_y_train == weighted_class] = 4.
model.fit(
x_train,
y_train,
batch_size=batch_size,
epochs=2,
verbose=0,
class_weight=class_weight,
validation_data=(x_train, y_train, sample_weight))
model.fit(
x_train,
y_train,
batch_size=batch_size,
epochs=2,
verbose=0,
class_weight=class_weight)
model.fit(
x_train,
y_train,
batch_size=batch_size,
epochs=2,
verbose=0,
class_weight=class_weight,
validation_split=0.1)
model.train_on_batch(
x_train[:batch_size], y_train[:batch_size], class_weight=class_weight)
ref_score = model.evaluate(x_test, y_test, verbose=0)
score = model.evaluate(
x_test[test_ids, :], y_test[test_ids, :], verbose=0)
self.assertLess(score, ref_score)
def test_sample_weights(self):
num_classes = 5
batch_size = 5
weighted_class = 3
train_samples = 300
test_samples = 300
input_dim = 5
model = keras.models.Sequential()
model.add(keras.layers.Dense(10, input_shape=(input_dim,)))
model.add(keras.layers.Activation('relu'))
model.add(keras.layers.Dense(num_classes))
model.add(keras.layers.Activation('softmax'))
model.compile(loss='categorical_crossentropy',
optimizer=RMSPropOptimizer(learning_rate=0.001))
np.random.seed(43)
(x_train, y_train), _ = testing_utils.get_test_data(
train_samples=train_samples,
test_samples=test_samples,
input_shape=(input_dim,),
num_classes=num_classes)
int_y_train = y_train.copy()
y_train = keras.utils.to_categorical(y_train, num_classes)
class_weight = dict([(i, 1.) for i in range(num_classes)])
class_weight[weighted_class] = 4.
sample_weight = np.ones((y_train.shape[0]))
sample_weight[int_y_train == weighted_class] = 4.
model.fit(
x_train,
y_train,
batch_size=batch_size,
epochs=2,
verbose=0,
sample_weight=sample_weight)
model.fit(
x_train,
y_train,
batch_size=batch_size,
epochs=2,
verbose=0,
sample_weight=sample_weight,
validation_split=0.1)
model.train_on_batch(
x_train[:batch_size],
y_train[:batch_size],
sample_weight=sample_weight[:batch_size])
model.test_on_batch(
x_train[:batch_size],
y_train[:batch_size],
sample_weight=sample_weight[:batch_size])
def test_temporal_sample_weights(self):
num_classes = 5
weighted_class = 3
train_samples = 1000
test_samples = 1000
input_dim = 5
timesteps = 3
model = keras.models.Sequential()
model.add(
keras.layers.TimeDistributed(
keras.layers.Dense(num_classes),
input_shape=(timesteps, input_dim)))
model.add(keras.layers.Activation('softmax'))
np.random.seed(1337)
(_, y_train), _ = testing_utils.get_test_data(
train_samples=train_samples,
test_samples=test_samples,
input_shape=(input_dim,),
num_classes=num_classes)
int_y_train = y_train.copy()
# convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, num_classes)
class_weight = dict([(i, 1.) for i in range(num_classes)])
class_weight[weighted_class] = 2.
sample_weight = np.ones((y_train.shape[0]))
sample_weight[int_y_train == weighted_class] = 2.
with self.assertRaises(ValueError):
model.compile(
loss='binary_crossentropy',
optimizer=RMSPropOptimizer(learning_rate=0.001),
sample_weight_mode='temporal')
def test_class_weight_invalid_use_case(self):
num_classes = 5
train_samples = 1000
test_samples = 1000
input_dim = 5
timesteps = 3
model = keras.models.Sequential()
model.add(
keras.layers.TimeDistributed(
keras.layers.Dense(num_classes),
input_shape=(timesteps, input_dim)))
model.add(keras.layers.Activation('softmax'))
model.compile(
loss='binary_crossentropy',
optimizer=RMSPropOptimizer(learning_rate=0.001))
(x_train, y_train), _ = testing_utils.get_test_data(
train_samples=train_samples,
test_samples=test_samples,
input_shape=(input_dim,),
num_classes=num_classes)
# convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, num_classes)
class_weight = dict([(i, 1.) for i in range(num_classes)])
del class_weight[1]
with self.assertRaises(ValueError):
model.fit(x_train, y_train,
epochs=0, verbose=0, class_weight=class_weight)
with self.assertRaises(ValueError):
model.compile(
loss='binary_crossentropy',
optimizer=RMSPropOptimizer(learning_rate=0.001),
sample_weight_mode=[])
# Build multi-output model
x = keras.Input((3,))
y1 = keras.layers.Dense(4, name='1')(x)
y2 = keras.layers.Dense(4, name='2')(x)
model = keras.models.Model(x, [y1, y2])
model.compile(optimizer=RMSPropOptimizer(learning_rate=0.001), loss='mse')
x_np = np.random.random((10, 3))
y_np = np.random.random((10, 4))
w_np = np.random.random((10,))
# This will work
model.fit(x_np, [y_np, y_np], epochs=1, sample_weight={'1': w_np})
# These will not
with self.assertRaises(ValueError):
model.fit(x_np, [y_np, y_np], epochs=1, sample_weight=[w_np])
with self.assertRaises(TypeError):
model.fit(x_np, [y_np, y_np], epochs=1, sample_weight=w_np)
with self.assertRaises(ValueError):
bad_w_np = np.random.random((11,))
model.fit(x_np, [y_np, y_np], epochs=1, sample_weight={'1': bad_w_np})
with self.assertRaises(ValueError):
bad_w_np = np.random.random((10, 2))
model.fit(x_np, [y_np, y_np], epochs=1, sample_weight={'1': bad_w_np})
with self.assertRaises(ValueError):
bad_w_np = np.random.random((10, 2, 2))
model.fit(x_np, [y_np, y_np], epochs=1, sample_weight={'1': bad_w_np})
class CorrectnessTest(test.TestCase):
@tf_test_util.run_in_graph_and_eager_modes()
def test_loss_correctness(self):
# Test that training loss is the same in eager and graph
# (by comparing it to a reference value in a deterministic case)
model = keras.Sequential()
model.add(keras.layers.Dense(3,
activation='relu',
input_dim=4,
kernel_initializer='ones'))
model.add(keras.layers.Dense(2,
activation='softmax',
kernel_initializer='ones'))
model.compile(loss='sparse_categorical_crossentropy',
optimizer=RMSPropOptimizer(learning_rate=0.001))
x = np.ones((100, 4))
np.random.seed(123)
y = np.random.randint(0, 1, size=(100, 1))
history = model.fit(x, y, epochs=1, batch_size=10)
self.assertEqual(
np.around(history.history['loss'][-1], decimals=4), 0.6173)
@tf_test_util.run_in_graph_and_eager_modes()
def test_metrics_correctness(self):
model = keras.Sequential()
model.add(keras.layers.Dense(3,
activation='relu',
input_dim=4,
kernel_initializer='ones'))
model.add(keras.layers.Dense(1,
activation='sigmoid',
kernel_initializer='ones'))
model.compile(loss='mae',
metrics=['acc'],
optimizer=RMSPropOptimizer(learning_rate=0.001))
x = np.ones((100, 4))
y = np.ones((100, 1))
outs = model.evaluate(x, y)
self.assertEqual(outs[1], 1.)
y = np.zeros((100, 1))
outs = model.evaluate(x, y)
self.assertEqual(outs[1], 0.)
@tf_test_util.run_in_graph_and_eager_modes()
def test_loss_correctness_with_iterator(self):
# Test that training loss is the same in eager and graph
# (by comparing it to a reference value in a deterministic case)
model = keras.Sequential()
model.add(
keras.layers.Dense(
3, activation='relu', input_dim=4, kernel_initializer='ones'))
model.add(
keras.layers.Dense(2, activation='softmax', kernel_initializer='ones'))
model.compile(
loss='sparse_categorical_crossentropy',
optimizer=RMSPropOptimizer(learning_rate=0.001))
x = np.ones((100, 4), dtype=np.float32)
np.random.seed(123)
y = np.random.randint(0, 1, size=(100, 1))
dataset = dataset_ops.Dataset.from_tensor_slices((x, y))
dataset = dataset.repeat(100)
dataset = dataset.batch(10)
iterator = dataset.make_one_shot_iterator()
history = model.fit(iterator, epochs=1, steps_per_epoch=10)
self.assertEqual(np.around(history.history['loss'][-1], decimals=4), 0.6173)
@tf_test_util.run_in_graph_and_eager_modes()
def test_metrics_correctness_with_iterator(self):
model = keras.Sequential()
model.add(
keras.layers.Dense(
8, activation='relu', input_dim=4, kernel_initializer='ones'))
model.add(
keras.layers.Dense(1, activation='sigmoid', kernel_initializer='ones'))
model.compile(
loss='binary_crossentropy',
metrics=['accuracy'],
optimizer=RMSPropOptimizer(learning_rate=0.001))
np.random.seed(123)
x = np.random.randint(10, size=(100, 4)).astype(np.float32)
y = np.random.randint(2, size=(100, 1)).astype(np.float32)
dataset = dataset_ops.Dataset.from_tensor_slices((x, y))
dataset = dataset.batch(10)
iterator = dataset.make_one_shot_iterator()
outs = model.evaluate(iterator, steps=10)
self.assertEqual(np.around(outs[1], decimals=1), 0.5)
y = np.zeros((100, 1), dtype=np.float32)
dataset = dataset_ops.Dataset.from_tensor_slices((x, y))
dataset = dataset.repeat(100)
dataset = dataset.batch(10)
iterator = dataset.make_one_shot_iterator()
outs = model.evaluate(iterator, steps=10)
self.assertEqual(outs[1], 0.)
if __name__ == '__main__':
ops.enable_eager_execution()
test.main()
| 34.512329 | 80 | 0.629237 | [
"Apache-2.0"
] | Caesar-github/tensorflow | tensorflow/python/keras/engine/training_eager_test.py | 25,194 | Python |
import unittest
class SampleTest(unittest.TestCase):
def setUp(self):
return
def tearDown(self):
return
def test_non_unique_name(self):
pass
def test_asdf2(self):
pass
def test_i_am_a_unique_test_name(self):
pass
if __name__ == '__main__':
unittest.main()
| 14.26087 | 43 | 0.625 | [
"Apache-2.0"
] | mrknow/filmkodi | plugin.video.mrknow/mylib/tests_pydevd_runfiles/samples/nested_dir/nested2/deep_nest_test.py | 328 | Python |
### Package Import ###
from bson import ObjectId
from pydantic import BaseModel
from pydantic import fields
from pydantic.fields import Field
from typing import Optional
### AppCode Import ###
from Server.Model.POID import PyObjectId
###############################################################################
class User(BaseModel):
Id: PyObjectId = Field(default_factory=PyObjectId, alias='_id')
FirstName: str = Field(alias='FirstName')
LastName: str = Field(alias='LastName')
Email: str = Field(alias='Email')
PhoneNumber: str = Field(alias='PhoneNumber')
Password: str = Field(alias='Password')
About: Optional[str] = Field(alias = 'About')
ProfileUrl: Optional[str] = Field(alias='ProfileUrl')
class Config:
allow_population_by_field_name = True
arbitrary_types_allowed = True
json_encoders = {ObjectId: str}
schema_extra = {
"example": {
"FirstName": "Jane",
"LastName": "Doe",
"Email": "[email protected]",
"PhoneNumber": "6285588974456",
"Password": "jdoee"
}
}
###############################################################################
class UserUpdateModel(BaseModel):
FirstName: Optional[str] = Field(alias ='FirstName')
LastName: Optional[str] = Field(alias='LastName')
Email: Optional[str] = Field(alias='Email')
PhoneNumber: Optional[str] = Field(alias='PhoneNumber')
Password: Optional[str] = Field(alias='Password')
About: Optional[str] = Field(alias = 'About')
ProfileUrl: Optional[str] = Field(alias='ProfileUrl')
class Config:
arbitrary_types_allowed = True
json_encoders = {ObjectId: str}
schema_extra = {
"example": {
"FirstName": "Jane",
"LastName": "Doe",
"Email": "[email protected]",
"PhoneNumber": "6285588974456",
"Password": "jdoee",
"About": "About jane doe",
"ProfileUrl": "https://profileurlembed.com/file/janedoe"
}
}
############################################################################### | 35.854839 | 79 | 0.530364 | [
"MIT"
] | CorneliusTantius/TCON-API-V2 | Server/Model/ModelUser.py | 2,223 | Python |
import numpy as np
import pyarrow as pa
import pyarrow.parquet as pq
import pyarrow.compute as pc
import matplotlib.pyplot as plt
import seaborn as sns
from pr3d.nonbayesian import ConditionalGammaEVM
# load dataset first
file_addresses = ['dataset_onehop_processed.parquet']
table = pa.concat_tables(
pq.read_table(
file_address,columns=None,
) for file_address in file_addresses
)
df = table.to_pandas()
print(df)
# load the trained model
dtype = 'float64'
conditional_delay_model = ConditionalGammaEVM(
h5_addr = "onehop_tis_model.h5",
)
# find n most common queue_length occurances
n = 3
values_count = df[['queue_length']].value_counts()[:n].index.tolist()
print("{0} most common queue states: {1}".format(n,values_count))
# divide the service delay into n segments based on quantiles
m = 5
service_delays = np.squeeze(df[['service_delay']].to_numpy())
quants = np.linspace(0, 1, num=m+1)
intervals = [ (quant,quants[idx+1]) for idx, quant in enumerate(quants) if (idx+1)<len(quants) ]
print("{0} longer_delay_prob intervals: {1}".format(n,intervals))
#sns.set_palette("rocket")
# plot the conditional distributions of them
fig, axes = plt.subplots(nrows=n, ncols=m, figsize=(m*4,n*4))
for i in range(n):
for j in range(m):
ax = axes[i,j]
# take the empirical samples
conditional_df = df[
(df.queue_length==values_count[i][0]) &
(df.longer_delay_prob>=intervals[j][0]) &
(df.longer_delay_prob<intervals[j][1])
]
# sample the predictor with x (conditions) from the empirical data
X = np.squeeze(conditional_df[['queue_length','longer_delay_prob']].to_numpy())
conditional_samples = conditional_delay_model.sample_n(
x = X,
random_generator=np.random.default_rng(0),
)
# insert it to the dataset
conditional_df['predicted distribution'] = conditional_samples
conditional_df.rename(columns = {'end2end_delay':'empirical distribution'}, inplace = True)
# plot
sns.histplot(
conditional_df[['empirical distribution','predicted distribution']],
kde=True,
ax=ax,
stat="density",
).set(title="x={}, interval={}, count={}".format(
values_count[i],
["{:0.2f}".format(inter) for inter in intervals[j]],
len(conditional_df))
)
ax.title.set_size(10)
fig.tight_layout()
plt.savefig('conditional_delay_tis.png') | 31.049383 | 99 | 0.662425 | [
"MIT"
] | samiemostafavi/conditional-latency-probability-prediction | plot_conditionals_with_tis.py | 2,515 | Python |
import glob
import os
import shutil
from tests import get_device_id, get_tests_output_path, run_cli
from TTS.config.shared_configs import BaseAudioConfig
from TTS.speaker_encoder.speaker_encoder_config import SpeakerEncoderConfig
def run_test_train():
command = (
f"CUDA_VISIBLE_DEVICES='{get_device_id()}' python TTS/bin/train_encoder.py --config_path {config_path} "
f"--coqpit.output_path {output_path} "
"--coqpit.datasets.0.name ljspeech "
"--coqpit.datasets.0.meta_file_train metadata.csv "
"--coqpit.datasets.0.meta_file_val metadata.csv "
"--coqpit.datasets.0.path tests/data/ljspeech "
)
run_cli(command)
config_path = os.path.join(get_tests_output_path(), "test_speaker_encoder_config.json")
output_path = os.path.join(get_tests_output_path(), "train_outputs")
config = SpeakerEncoderConfig(
batch_size=4,
num_speakers_in_batch=1,
num_utters_per_speaker=10,
num_loader_workers=0,
max_train_step=2,
print_step=1,
save_step=1,
print_eval=True,
audio=BaseAudioConfig(num_mels=80),
)
config.audio.do_trim_silence = True
config.audio.trim_db = 60
config.save_json(config_path)
print(config)
# train the model for one epoch
run_test_train()
# Find latest folder
continue_path = max(glob.glob(os.path.join(output_path, "*/")), key=os.path.getmtime)
# restore the model and continue training for one more epoch
command_train = (
f"CUDA_VISIBLE_DEVICES='{get_device_id()}' python TTS/bin/train_encoder.py --continue_path {continue_path} "
)
run_cli(command_train)
shutil.rmtree(continue_path)
# test resnet speaker encoder
config.model_params["model_name"] = "resnet"
config.save_json(config_path)
# train the model for one epoch
run_test_train()
# Find latest folder
continue_path = max(glob.glob(os.path.join(output_path, "*/")), key=os.path.getmtime)
# restore the model and continue training for one more epoch
command_train = (
f"CUDA_VISIBLE_DEVICES='{get_device_id()}' python TTS/bin/train_encoder.py --continue_path {continue_path} "
)
run_cli(command_train)
shutil.rmtree(continue_path)
# test model with ge2e loss function
config.loss = "ge2e"
config.save_json(config_path)
run_test_train()
# test model with angleproto loss function
config.loss = "angleproto"
config.save_json(config_path)
run_test_train()
# test model with softmaxproto loss function
config.loss = "softmaxproto"
config.save_json(config_path)
run_test_train()
| 28.941176 | 112 | 0.762195 | [
"MPL-2.0"
] | AI-Unicamp/TTS | tests/aux_tests/test_speaker_encoder_train.py | 2,460 | Python |
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains definitions of generators to generate the final detections."""
import contextlib
from typing import List, Optional, Mapping
# Import libraries
import tensorflow as tf
from official.vision.beta.ops import box_ops
from official.vision.beta.ops import nms
from official.vision.beta.ops import preprocess_ops
def _generate_detections_v1(boxes: tf.Tensor,
scores: tf.Tensor,
attributes: Optional[Mapping[str,
tf.Tensor]] = None,
pre_nms_top_k: int = 5000,
pre_nms_score_threshold: float = 0.05,
nms_iou_threshold: float = 0.5,
max_num_detections: int = 100,
soft_nms_sigma: Optional[float] = None):
"""Generates the final detections given the model outputs.
The implementation unrolls the batch dimension and process images one by one.
It required the batch dimension to be statically known and it is TPU
compatible.
Args:
boxes: A `tf.Tensor` with shape `[batch_size, N, num_classes, 4]` or
`[batch_size, N, 1, 4]` for box predictions on all feature levels. The
N is the number of total anchors on all levels.
scores: A `tf.Tensor` with shape `[batch_size, N, num_classes]`, which
stacks class probability on all feature levels. The N is the number of
total anchors on all levels. The num_classes is the number of classes
predicted by the model. Note that the class_outputs here is the raw score.
attributes: None or a dict of (attribute_name, attributes) pairs. Each
attributes is a `tf.Tensor` with shape
`[batch_size, N, num_classes, attribute_size]` or
`[batch_size, N, 1, attribute_size]` for attribute predictions on all
feature levels. The N is the number of total anchors on all levels. Can
be None if no attribute learning is required.
pre_nms_top_k: An `int` number of top candidate detections per class before
NMS.
pre_nms_score_threshold: A `float` representing the threshold for deciding
when to remove boxes based on score.
nms_iou_threshold: A `float` representing the threshold for deciding whether
boxes overlap too much with respect to IOU.
max_num_detections: A scalar representing maximum number of boxes retained
over all classes.
soft_nms_sigma: A `float` representing the sigma parameter for Soft NMS.
When soft_nms_sigma=0.0 (which is default), we fall back to standard NMS.
Returns:
nms_boxes: A `float` type `tf.Tensor` of shape
`[batch_size, max_num_detections, 4]` representing top detected boxes in
`[y1, x1, y2, x2]`.
nms_scores: A `float` type `tf.Tensor` of shape
`[batch_size, max_num_detections]` representing sorted confidence scores
for detected boxes. The values are between `[0, 1]`.
nms_classes: An `int` type `tf.Tensor` of shape
`[batch_size, max_num_detections]` representing classes for detected
boxes.
valid_detections: An `int` type `tf.Tensor` of shape `[batch_size]` only the
top `valid_detections` boxes are valid detections.
nms_attributes: None or a dict of (attribute_name, attributes). Each
attribute is a `float` type `tf.Tensor` of shape
`[batch_size, max_num_detections, attribute_size]` representing attribute
predictions for detected boxes. Can be an empty dict if no attribute
learning is required.
"""
with tf.name_scope('generate_detections'):
batch_size = scores.get_shape().as_list()[0]
nmsed_boxes = []
nmsed_classes = []
nmsed_scores = []
valid_detections = []
if attributes:
nmsed_attributes = {att_name: [] for att_name in attributes.keys()}
else:
nmsed_attributes = {}
for i in range(batch_size):
(nmsed_boxes_i, nmsed_scores_i, nmsed_classes_i, valid_detections_i,
nmsed_att_i) = _generate_detections_per_image(
boxes[i],
scores[i],
attributes={
att_name: att[i] for att_name, att in attributes.items()
} if attributes else {},
pre_nms_top_k=pre_nms_top_k,
pre_nms_score_threshold=pre_nms_score_threshold,
nms_iou_threshold=nms_iou_threshold,
max_num_detections=max_num_detections,
soft_nms_sigma=soft_nms_sigma)
nmsed_boxes.append(nmsed_boxes_i)
nmsed_scores.append(nmsed_scores_i)
nmsed_classes.append(nmsed_classes_i)
valid_detections.append(valid_detections_i)
if attributes:
for att_name in attributes.keys():
nmsed_attributes[att_name].append(nmsed_att_i[att_name])
nmsed_boxes = tf.stack(nmsed_boxes, axis=0)
nmsed_scores = tf.stack(nmsed_scores, axis=0)
nmsed_classes = tf.stack(nmsed_classes, axis=0)
valid_detections = tf.stack(valid_detections, axis=0)
if attributes:
for att_name in attributes.keys():
nmsed_attributes[att_name] = tf.stack(nmsed_attributes[att_name], axis=0)
return nmsed_boxes, nmsed_scores, nmsed_classes, valid_detections, nmsed_attributes
def _generate_detections_per_image(
boxes: tf.Tensor,
scores: tf.Tensor,
attributes: Optional[Mapping[str, tf.Tensor]] = None,
pre_nms_top_k: int = 5000,
pre_nms_score_threshold: float = 0.05,
nms_iou_threshold: float = 0.5,
max_num_detections: int = 100,
soft_nms_sigma: Optional[float] = None):
"""Generates the final detections per image given the model outputs.
Args:
boxes: A `tf.Tensor` with shape `[N, num_classes, 4]` or `[N, 1, 4]`, which
box predictions on all feature levels. The N is the number of total
anchors on all levels.
scores: A `tf.Tensor` with shape `[N, num_classes]`, which stacks class
probability on all feature levels. The N is the number of total anchors on
all levels. The num_classes is the number of classes predicted by the
model. Note that the class_outputs here is the raw score.
attributes: If not None, a dict of `tf.Tensor`. Each value is in shape
`[N, num_classes, attribute_size]` or `[N, 1, attribute_size]` of
attribute predictions on all feature levels. The N is the number of total
anchors on all levels.
pre_nms_top_k: An `int` number of top candidate detections per class before
NMS.
pre_nms_score_threshold: A `float` representing the threshold for deciding
when to remove boxes based on score.
nms_iou_threshold: A `float` representing the threshold for deciding whether
boxes overlap too much with respect to IOU.
max_num_detections: A `scalar` representing maximum number of boxes retained
over all classes.
soft_nms_sigma: A `float` representing the sigma parameter for Soft NMS.
When soft_nms_sigma=0.0, we fall back to standard NMS.
If set to None, `tf.image.non_max_suppression_padded` is called instead.
Returns:
nms_boxes: A `float` tf.Tensor of shape `[max_num_detections, 4]`
representing top detected boxes in `[y1, x1, y2, x2]`.
nms_scores: A `float` tf.Tensor of shape `[max_num_detections]` representing
sorted confidence scores for detected boxes. The values are between [0,
1].
nms_classes: An `int` tf.Tensor of shape `[max_num_detections]` representing
classes for detected boxes.
valid_detections: An `int` tf.Tensor of shape [1] only the top
`valid_detections` boxes are valid detections.
nms_attributes: None or a dict. Each value is a `float` tf.Tensor of shape
`[max_num_detections, attribute_size]` representing attribute predictions
for detected boxes. Can be an empty dict if `attributes` is None.
"""
nmsed_boxes = []
nmsed_scores = []
nmsed_classes = []
num_classes_for_box = boxes.get_shape().as_list()[1]
num_classes = scores.get_shape().as_list()[1]
if attributes:
nmsed_attributes = {att_name: [] for att_name in attributes.keys()}
else:
nmsed_attributes = {}
for i in range(num_classes):
boxes_i = boxes[:, min(num_classes_for_box - 1, i)]
scores_i = scores[:, i]
# Obtains pre_nms_top_k before running NMS.
scores_i, indices = tf.nn.top_k(
scores_i, k=tf.minimum(tf.shape(scores_i)[-1], pre_nms_top_k))
boxes_i = tf.gather(boxes_i, indices)
if soft_nms_sigma is not None:
(nmsed_indices_i,
nmsed_scores_i) = tf.image.non_max_suppression_with_scores(
tf.cast(boxes_i, tf.float32),
tf.cast(scores_i, tf.float32),
max_num_detections,
iou_threshold=nms_iou_threshold,
score_threshold=pre_nms_score_threshold,
soft_nms_sigma=soft_nms_sigma,
name='nms_detections_' + str(i))
nmsed_boxes_i = tf.gather(boxes_i, nmsed_indices_i)
nmsed_boxes_i = preprocess_ops.clip_or_pad_to_fixed_size(
nmsed_boxes_i, max_num_detections, 0.0)
nmsed_scores_i = preprocess_ops.clip_or_pad_to_fixed_size(
nmsed_scores_i, max_num_detections, -1.0)
else:
(nmsed_indices_i,
nmsed_num_valid_i) = tf.image.non_max_suppression_padded(
tf.cast(boxes_i, tf.float32),
tf.cast(scores_i, tf.float32),
max_num_detections,
iou_threshold=nms_iou_threshold,
score_threshold=pre_nms_score_threshold,
pad_to_max_output_size=True,
name='nms_detections_' + str(i))
nmsed_boxes_i = tf.gather(boxes_i, nmsed_indices_i)
nmsed_scores_i = tf.gather(scores_i, nmsed_indices_i)
# Sets scores of invalid boxes to -1.
nmsed_scores_i = tf.where(
tf.less(tf.range(max_num_detections), [nmsed_num_valid_i]),
nmsed_scores_i, -tf.ones_like(nmsed_scores_i))
nmsed_classes_i = tf.fill([max_num_detections], i)
nmsed_boxes.append(nmsed_boxes_i)
nmsed_scores.append(nmsed_scores_i)
nmsed_classes.append(nmsed_classes_i)
if attributes:
for att_name, att in attributes.items():
num_classes_for_attr = att.get_shape().as_list()[1]
att_i = att[:, min(num_classes_for_attr - 1, i)]
att_i = tf.gather(att_i, indices)
nmsed_att_i = tf.gather(att_i, nmsed_indices_i)
nmsed_att_i = preprocess_ops.clip_or_pad_to_fixed_size(
nmsed_att_i, max_num_detections, 0.0)
nmsed_attributes[att_name].append(nmsed_att_i)
# Concats results from all classes and sort them.
nmsed_boxes = tf.concat(nmsed_boxes, axis=0)
nmsed_scores = tf.concat(nmsed_scores, axis=0)
nmsed_classes = tf.concat(nmsed_classes, axis=0)
nmsed_scores, indices = tf.nn.top_k(
nmsed_scores, k=max_num_detections, sorted=True)
nmsed_boxes = tf.gather(nmsed_boxes, indices)
nmsed_classes = tf.gather(nmsed_classes, indices)
valid_detections = tf.reduce_sum(
tf.cast(tf.greater(nmsed_scores, -1), tf.int32))
if attributes:
for att_name in attributes.keys():
nmsed_attributes[att_name] = tf.concat(nmsed_attributes[att_name], axis=0)
nmsed_attributes[att_name] = tf.gather(nmsed_attributes[att_name],
indices)
return nmsed_boxes, nmsed_scores, nmsed_classes, valid_detections, nmsed_attributes
def _select_top_k_scores(scores_in: tf.Tensor, pre_nms_num_detections: int):
"""Selects top_k scores and indices for each class.
Args:
scores_in: A `tf.Tensor` with shape `[batch_size, N, num_classes]`, which
stacks class logit outputs on all feature levels. The N is the number of
total anchors on all levels. The num_classes is the number of classes
predicted by the model.
pre_nms_num_detections: Number of candidates before NMS.
Returns:
scores and indices: A `tf.Tensor` with shape
`[batch_size, pre_nms_num_detections, num_classes]`.
"""
batch_size, num_anchors, num_class = scores_in.get_shape().as_list()
if batch_size is None:
batch_size = tf.shape(scores_in)[0]
scores_trans = tf.transpose(scores_in, perm=[0, 2, 1])
scores_trans = tf.reshape(scores_trans, [-1, num_anchors])
top_k_scores, top_k_indices = tf.nn.top_k(
scores_trans, k=pre_nms_num_detections, sorted=True)
top_k_scores = tf.reshape(top_k_scores,
[batch_size, num_class, pre_nms_num_detections])
top_k_indices = tf.reshape(top_k_indices,
[batch_size, num_class, pre_nms_num_detections])
return tf.transpose(top_k_scores,
[0, 2, 1]), tf.transpose(top_k_indices, [0, 2, 1])
def _generate_detections_v2(boxes: tf.Tensor,
scores: tf.Tensor,
pre_nms_top_k: int = 5000,
pre_nms_score_threshold: float = 0.05,
nms_iou_threshold: float = 0.5,
max_num_detections: int = 100):
"""Generates the final detections given the model outputs.
This implementation unrolls classes dimension while using the tf.while_loop
to implement the batched NMS, so that it can be parallelized at the batch
dimension. It should give better performance comparing to v1 implementation.
It is TPU compatible.
Args:
boxes: A `tf.Tensor` with shape `[batch_size, N, num_classes, 4]` or
`[batch_size, N, 1, 4]`, which box predictions on all feature levels. The
N is the number of total anchors on all levels.
scores: A `tf.Tensor` with shape `[batch_size, N, num_classes]`, which
stacks class probability on all feature levels. The N is the number of
total anchors on all levels. The num_classes is the number of classes
predicted by the model. Note that the class_outputs here is the raw score.
pre_nms_top_k: An `int` number of top candidate detections per class before
NMS.
pre_nms_score_threshold: A `float` representing the threshold for deciding
when to remove boxes based on score.
nms_iou_threshold: A `float` representing the threshold for deciding whether
boxes overlap too much with respect to IOU.
max_num_detections: A `scalar` representing maximum number of boxes retained
over all classes.
Returns:
nms_boxes: A `float` tf.Tensor of shape [batch_size, max_num_detections, 4]
representing top detected boxes in [y1, x1, y2, x2].
nms_scores: A `float` tf.Tensor of shape [batch_size, max_num_detections]
representing sorted confidence scores for detected boxes. The values are
between [0, 1].
nms_classes: An `int` tf.Tensor of shape [batch_size, max_num_detections]
representing classes for detected boxes.
valid_detections: An `int` tf.Tensor of shape [batch_size] only the top
`valid_detections` boxes are valid detections.
"""
with tf.name_scope('generate_detections'):
nmsed_boxes = []
nmsed_classes = []
nmsed_scores = []
valid_detections = []
batch_size, _, num_classes_for_box, _ = boxes.get_shape().as_list()
if batch_size is None:
batch_size = tf.shape(boxes)[0]
_, total_anchors, num_classes = scores.get_shape().as_list()
# Selects top pre_nms_num scores and indices before NMS.
scores, indices = _select_top_k_scores(
scores, min(total_anchors, pre_nms_top_k))
for i in range(num_classes):
boxes_i = boxes[:, :, min(num_classes_for_box - 1, i), :]
scores_i = scores[:, :, i]
# Obtains pre_nms_top_k before running NMS.
boxes_i = tf.gather(boxes_i, indices[:, :, i], batch_dims=1, axis=1)
# Filter out scores.
boxes_i, scores_i = box_ops.filter_boxes_by_scores(
boxes_i, scores_i, min_score_threshold=pre_nms_score_threshold)
(nmsed_scores_i, nmsed_boxes_i) = nms.sorted_non_max_suppression_padded(
tf.cast(scores_i, tf.float32),
tf.cast(boxes_i, tf.float32),
max_num_detections,
iou_threshold=nms_iou_threshold)
nmsed_classes_i = tf.fill([batch_size, max_num_detections], i)
nmsed_boxes.append(nmsed_boxes_i)
nmsed_scores.append(nmsed_scores_i)
nmsed_classes.append(nmsed_classes_i)
nmsed_boxes = tf.concat(nmsed_boxes, axis=1)
nmsed_scores = tf.concat(nmsed_scores, axis=1)
nmsed_classes = tf.concat(nmsed_classes, axis=1)
nmsed_scores, indices = tf.nn.top_k(
nmsed_scores, k=max_num_detections, sorted=True)
nmsed_boxes = tf.gather(nmsed_boxes, indices, batch_dims=1, axis=1)
nmsed_classes = tf.gather(nmsed_classes, indices, batch_dims=1)
valid_detections = tf.reduce_sum(
input_tensor=tf.cast(tf.greater(nmsed_scores, -1), tf.int32), axis=1)
return nmsed_boxes, nmsed_scores, nmsed_classes, valid_detections
def _generate_detections_batched(boxes: tf.Tensor, scores: tf.Tensor,
pre_nms_score_threshold: float,
nms_iou_threshold: float,
max_num_detections: int):
"""Generates detected boxes with scores and classes for one-stage detector.
The function takes output of multi-level ConvNets and anchor boxes and
generates detected boxes. Note that this used batched nms, which is not
supported on TPU currently.
Args:
boxes: A `tf.Tensor` with shape `[batch_size, N, num_classes, 4]` or
`[batch_size, N, 1, 4]`, which box predictions on all feature levels. The
N is the number of total anchors on all levels.
scores: A `tf.Tensor` with shape `[batch_size, N, num_classes]`, which
stacks class probability on all feature levels. The N is the number of
total anchors on all levels. The num_classes is the number of classes
predicted by the model. Note that the class_outputs here is the raw score.
pre_nms_score_threshold: A `float` representing the threshold for deciding
when to remove boxes based on score.
nms_iou_threshold: A `float` representing the threshold for deciding whether
boxes overlap too much with respect to IOU.
max_num_detections: A `scalar` representing maximum number of boxes retained
over all classes.
Returns:
nms_boxes: A `float` tf.Tensor of shape [batch_size, max_num_detections, 4]
representing top detected boxes in [y1, x1, y2, x2].
nms_scores: A `float` tf.Tensor of shape [batch_size, max_num_detections]
representing sorted confidence scores for detected boxes. The values are
between [0, 1].
nms_classes: An `int` tf.Tensor of shape [batch_size, max_num_detections]
representing classes for detected boxes.
valid_detections: An `int` tf.Tensor of shape [batch_size] only the top
`valid_detections` boxes are valid detections.
"""
with tf.name_scope('generate_detections'):
nmsed_boxes, nmsed_scores, nmsed_classes, valid_detections = (
tf.image.combined_non_max_suppression(
boxes,
scores,
max_output_size_per_class=max_num_detections,
max_total_size=max_num_detections,
iou_threshold=nms_iou_threshold,
score_threshold=pre_nms_score_threshold,
pad_per_class=False,
clip_boxes=False))
nmsed_classes = tf.cast(nmsed_classes, tf.int32)
return nmsed_boxes, nmsed_scores, nmsed_classes, valid_detections
@tf.keras.utils.register_keras_serializable(package='Vision')
class DetectionGenerator(tf.keras.layers.Layer):
"""Generates the final detected boxes with scores and classes."""
def __init__(self,
apply_nms: bool = True,
pre_nms_top_k: int = 5000,
pre_nms_score_threshold: float = 0.05,
nms_iou_threshold: float = 0.5,
max_num_detections: int = 100,
nms_version: str = 'v2',
use_cpu_nms: bool = False,
soft_nms_sigma: Optional[float] = None,
**kwargs):
"""Initializes a detection generator.
Args:
apply_nms: A `bool` of whether or not apply non maximum suppression.
If False, the decoded boxes and their scores are returned.
pre_nms_top_k: An `int` of the number of top scores proposals to be kept
before applying NMS.
pre_nms_score_threshold: A `float` of the score threshold to apply before
applying NMS. Proposals whose scores are below this threshold are
thrown away.
nms_iou_threshold: A `float` in [0, 1], the NMS IoU threshold.
max_num_detections: An `int` of the final number of total detections to
generate.
nms_version: A string of `batched`, `v1` or `v2` specifies NMS version.
use_cpu_nms: A `bool` of whether or not enforce NMS to run on CPU.
soft_nms_sigma: A `float` representing the sigma parameter for Soft NMS.
When soft_nms_sigma=0.0, we fall back to standard NMS.
**kwargs: Additional keyword arguments passed to Layer.
"""
self._config_dict = {
'apply_nms': apply_nms,
'pre_nms_top_k': pre_nms_top_k,
'pre_nms_score_threshold': pre_nms_score_threshold,
'nms_iou_threshold': nms_iou_threshold,
'max_num_detections': max_num_detections,
'nms_version': nms_version,
'use_cpu_nms': use_cpu_nms,
'soft_nms_sigma': soft_nms_sigma,
}
super(DetectionGenerator, self).__init__(**kwargs)
def __call__(self,
raw_boxes: tf.Tensor,
raw_scores: tf.Tensor,
anchor_boxes: tf.Tensor,
image_shape: tf.Tensor,
regression_weights: Optional[List[float]] = None,
bbox_per_class: bool = True):
"""Generates final detections.
Args:
raw_boxes: A `tf.Tensor` of shape of `[batch_size, K, num_classes * 4]`
representing the class-specific box coordinates relative to anchors.
raw_scores: A `tf.Tensor` of shape of `[batch_size, K, num_classes]`
representing the class logits before applying score activiation.
anchor_boxes: A `tf.Tensor` of shape of `[batch_size, K, 4]` representing
the corresponding anchor boxes w.r.t `box_outputs`.
image_shape: A `tf.Tensor` of shape of `[batch_size, 2]` storing the image
height and width w.r.t. the scaled image, i.e. the same image space as
`box_outputs` and `anchor_boxes`.
regression_weights: A list of four float numbers to scale coordinates.
bbox_per_class: A `bool`. If True, perform per-class box regression.
Returns:
If `apply_nms` = True, the return is a dictionary with keys:
`detection_boxes`: A `float` tf.Tensor of shape
[batch, max_num_detections, 4] representing top detected boxes in
[y1, x1, y2, x2].
`detection_scores`: A `float` `tf.Tensor` of shape
[batch, max_num_detections] representing sorted confidence scores for
detected boxes. The values are between [0, 1].
`detection_classes`: An `int` tf.Tensor of shape
[batch, max_num_detections] representing classes for detected boxes.
`num_detections`: An `int` tf.Tensor of shape [batch] only the first
`num_detections` boxes are valid detections
If `apply_nms` = False, the return is a dictionary with keys:
`decoded_boxes`: A `float` tf.Tensor of shape [batch, num_raw_boxes, 4]
representing all the decoded boxes.
`decoded_box_scores`: A `float` tf.Tensor of shape
[batch, num_raw_boxes] representing socres of all the decoded boxes.
"""
box_scores = tf.nn.softmax(raw_scores, axis=-1)
# Removes the background class.
box_scores_shape = tf.shape(box_scores)
box_scores_shape_list = box_scores.get_shape().as_list()
batch_size = box_scores_shape[0]
num_locations = box_scores_shape_list[1]
num_classes = box_scores_shape_list[-1]
box_scores = tf.slice(box_scores, [0, 0, 1], [-1, -1, -1])
if bbox_per_class:
num_detections = num_locations * (num_classes - 1)
raw_boxes = tf.reshape(raw_boxes,
[batch_size, num_locations, num_classes, 4])
raw_boxes = tf.slice(raw_boxes, [0, 0, 1, 0], [-1, -1, -1, -1])
anchor_boxes = tf.tile(
tf.expand_dims(anchor_boxes, axis=2), [1, 1, num_classes - 1, 1])
raw_boxes = tf.reshape(raw_boxes, [batch_size, num_detections, 4])
anchor_boxes = tf.reshape(anchor_boxes, [batch_size, num_detections, 4])
# Box decoding.
decoded_boxes = box_ops.decode_boxes(
raw_boxes, anchor_boxes, weights=regression_weights)
# Box clipping
decoded_boxes = box_ops.clip_boxes(
decoded_boxes, tf.expand_dims(image_shape, axis=1))
if bbox_per_class:
decoded_boxes = tf.reshape(
decoded_boxes, [batch_size, num_locations, num_classes - 1, 4])
else:
decoded_boxes = tf.expand_dims(decoded_boxes, axis=2)
if not self._config_dict['apply_nms']:
return {
'decoded_boxes': decoded_boxes,
'decoded_box_scores': box_scores,
}
# Optionally force the NMS be run on CPU.
if self._config_dict['use_cpu_nms']:
nms_context = tf.device('cpu:0')
else:
nms_context = contextlib.nullcontext()
with nms_context:
if self._config_dict['nms_version'] == 'batched':
(nmsed_boxes, nmsed_scores, nmsed_classes, valid_detections) = (
_generate_detections_batched(
decoded_boxes, box_scores,
self._config_dict['pre_nms_score_threshold'],
self._config_dict['nms_iou_threshold'],
self._config_dict['max_num_detections']))
elif self._config_dict['nms_version'] == 'v1':
(nmsed_boxes, nmsed_scores, nmsed_classes, valid_detections, _) = (
_generate_detections_v1(
decoded_boxes,
box_scores,
pre_nms_top_k=self._config_dict['pre_nms_top_k'],
pre_nms_score_threshold=self
._config_dict['pre_nms_score_threshold'],
nms_iou_threshold=self._config_dict['nms_iou_threshold'],
max_num_detections=self._config_dict['max_num_detections'],
soft_nms_sigma=self._config_dict['soft_nms_sigma']))
elif self._config_dict['nms_version'] == 'v2':
(nmsed_boxes, nmsed_scores, nmsed_classes, valid_detections) = (
_generate_detections_v2(
decoded_boxes,
box_scores,
pre_nms_top_k=self._config_dict['pre_nms_top_k'],
pre_nms_score_threshold=self
._config_dict['pre_nms_score_threshold'],
nms_iou_threshold=self._config_dict['nms_iou_threshold'],
max_num_detections=self._config_dict['max_num_detections']))
else:
raise ValueError('NMS version {} not supported.'.format(
self._config_dict['nms_version']))
# Adds 1 to offset the background class which has index 0.
nmsed_classes += 1
return {
'num_detections': valid_detections,
'detection_boxes': nmsed_boxes,
'detection_classes': nmsed_classes,
'detection_scores': nmsed_scores,
}
def get_config(self):
return self._config_dict
@classmethod
def from_config(cls, config):
return cls(**config)
@tf.keras.utils.register_keras_serializable(package='Vision')
class MultilevelDetectionGenerator(tf.keras.layers.Layer):
"""Generates detected boxes with scores and classes for one-stage detector."""
def __init__(self,
apply_nms: bool = True,
pre_nms_top_k: int = 5000,
pre_nms_score_threshold: float = 0.05,
nms_iou_threshold: float = 0.5,
max_num_detections: int = 100,
nms_version: str = 'v1',
use_cpu_nms: bool = False,
soft_nms_sigma: Optional[float] = None,
**kwargs):
"""Initializes a multi-level detection generator.
Args:
apply_nms: A `bool` of whether or not apply non maximum suppression. If
False, the decoded boxes and their scores are returned.
pre_nms_top_k: An `int` of the number of top scores proposals to be kept
before applying NMS.
pre_nms_score_threshold: A `float` of the score threshold to apply before
applying NMS. Proposals whose scores are below this threshold are thrown
away.
nms_iou_threshold: A `float` in [0, 1], the NMS IoU threshold.
max_num_detections: An `int` of the final number of total detections to
generate.
nms_version: A string of `batched`, `v1` or `v2` specifies NMS version
use_cpu_nms: A `bool` of whether or not enforce NMS to run on CPU.
soft_nms_sigma: A `float` representing the sigma parameter for Soft NMS.
When soft_nms_sigma=0.0, we fall back to standard NMS.
**kwargs: Additional keyword arguments passed to Layer.
"""
self._config_dict = {
'apply_nms': apply_nms,
'pre_nms_top_k': pre_nms_top_k,
'pre_nms_score_threshold': pre_nms_score_threshold,
'nms_iou_threshold': nms_iou_threshold,
'max_num_detections': max_num_detections,
'nms_version': nms_version,
'use_cpu_nms': use_cpu_nms,
'soft_nms_sigma': soft_nms_sigma,
}
super(MultilevelDetectionGenerator, self).__init__(**kwargs)
def _decode_multilevel_outputs(
self,
raw_boxes: Mapping[str, tf.Tensor],
raw_scores: Mapping[str, tf.Tensor],
anchor_boxes: tf.Tensor,
image_shape: tf.Tensor,
raw_attributes: Optional[Mapping[str, tf.Tensor]] = None):
"""Collects dict of multilevel boxes, scores, attributes into lists."""
boxes = []
scores = []
if raw_attributes:
attributes = {att_name: [] for att_name in raw_attributes.keys()}
else:
attributes = {}
levels = list(raw_boxes.keys())
min_level = int(min(levels))
max_level = int(max(levels))
for i in range(min_level, max_level + 1):
raw_boxes_i = raw_boxes[str(i)]
raw_scores_i = raw_scores[str(i)]
batch_size = tf.shape(raw_boxes_i)[0]
(_, feature_h_i, feature_w_i,
num_anchors_per_locations_times_4) = raw_boxes_i.get_shape().as_list()
num_locations = feature_h_i * feature_w_i
num_anchors_per_locations = num_anchors_per_locations_times_4 // 4
num_classes = raw_scores_i.get_shape().as_list(
)[-1] // num_anchors_per_locations
# Applies score transformation and remove the implicit background class.
scores_i = tf.sigmoid(
tf.reshape(raw_scores_i, [
batch_size, num_locations * num_anchors_per_locations, num_classes
]))
scores_i = tf.slice(scores_i, [0, 0, 1], [-1, -1, -1])
# Box decoding.
# The anchor boxes are shared for all data in a batch.
# One stage detector only supports class agnostic box regression.
anchor_boxes_i = tf.reshape(
anchor_boxes[str(i)],
[batch_size, num_locations * num_anchors_per_locations, 4])
raw_boxes_i = tf.reshape(
raw_boxes_i,
[batch_size, num_locations * num_anchors_per_locations, 4])
boxes_i = box_ops.decode_boxes(raw_boxes_i, anchor_boxes_i)
# Box clipping.
boxes_i = box_ops.clip_boxes(
boxes_i, tf.expand_dims(image_shape, axis=1))
boxes.append(boxes_i)
scores.append(scores_i)
if raw_attributes:
for att_name, raw_att in raw_attributes.items():
attribute_size = raw_att[str(
i)].get_shape().as_list()[-1] // num_anchors_per_locations
att_i = tf.reshape(raw_att[str(i)], [
batch_size, num_locations * num_anchors_per_locations,
attribute_size
])
attributes[att_name].append(att_i)
boxes = tf.concat(boxes, axis=1)
boxes = tf.expand_dims(boxes, axis=2)
scores = tf.concat(scores, axis=1)
if raw_attributes:
for att_name in raw_attributes.keys():
attributes[att_name] = tf.concat(attributes[att_name], axis=1)
attributes[att_name] = tf.expand_dims(attributes[att_name], axis=2)
return boxes, scores, attributes
def __call__(self,
raw_boxes: Mapping[str, tf.Tensor],
raw_scores: Mapping[str, tf.Tensor],
anchor_boxes: tf.Tensor,
image_shape: tf.Tensor,
raw_attributes: Optional[Mapping[str, tf.Tensor]] = None):
"""Generates final detections.
Args:
raw_boxes: A `dict` with keys representing FPN levels and values
representing box tenors of shape `[batch, feature_h, feature_w,
num_anchors * 4]`.
raw_scores: A `dict` with keys representing FPN levels and values
representing logit tensors of shape `[batch, feature_h, feature_w,
num_anchors]`.
anchor_boxes: A `tf.Tensor` of shape of [batch_size, K, 4] representing
the corresponding anchor boxes w.r.t `box_outputs`.
image_shape: A `tf.Tensor` of shape of [batch_size, 2] storing the image
height and width w.r.t. the scaled image, i.e. the same image space as
`box_outputs` and `anchor_boxes`.
raw_attributes: If not None, a `dict` of (attribute_name,
attribute_prediction) pairs. `attribute_prediction` is a dict that
contains keys representing FPN levels and values representing tenors of
shape `[batch, feature_h, feature_w, num_anchors * attribute_size]`.
Returns:
If `apply_nms` = True, the return is a dictionary with keys:
`detection_boxes`: A `float` tf.Tensor of shape
[batch, max_num_detections, 4] representing top detected boxes in
[y1, x1, y2, x2].
`detection_scores`: A `float` tf.Tensor of shape
[batch, max_num_detections] representing sorted confidence scores for
detected boxes. The values are between [0, 1].
`detection_classes`: An `int` tf.Tensor of shape
[batch, max_num_detections] representing classes for detected boxes.
`num_detections`: An `int` tf.Tensor of shape [batch] only the first
`num_detections` boxes are valid detections
`detection_attributes`: A dict. Values of the dict is a `float`
tf.Tensor of shape [batch, max_num_detections, attribute_size]
representing attribute predictions for detected boxes.
If `apply_nms` = False, the return is a dictionary with keys:
`decoded_boxes`: A `float` tf.Tensor of shape [batch, num_raw_boxes, 4]
representing all the decoded boxes.
`decoded_box_scores`: A `float` tf.Tensor of shape
[batch, num_raw_boxes] representing socres of all the decoded boxes.
`decoded_box_attributes`: A dict. Values in the dict is a
`float` tf.Tensor of shape [batch, num_raw_boxes, attribute_size]
representing attribute predictions of all the decoded boxes.
"""
boxes, scores, attributes = self._decode_multilevel_outputs(
raw_boxes, raw_scores, anchor_boxes, image_shape, raw_attributes)
if not self._config_dict['apply_nms']:
return {
'decoded_boxes': boxes,
'decoded_box_scores': scores,
'decoded_box_attributes': attributes,
}
# Optionally force the NMS to run on CPU.
if self._config_dict['use_cpu_nms']:
nms_context = tf.device('cpu:0')
else:
nms_context = contextlib.nullcontext()
with nms_context:
if raw_attributes and (self._config_dict['nms_version'] != 'v1'):
raise ValueError(
'Attribute learning is only supported for NMSv1 but NMS {} is used.'
.format(self._config_dict['nms_version']))
if self._config_dict['nms_version'] == 'batched':
(nmsed_boxes, nmsed_scores, nmsed_classes, valid_detections) = (
_generate_detections_batched(
boxes, scores, self._config_dict['pre_nms_score_threshold'],
self._config_dict['nms_iou_threshold'],
self._config_dict['max_num_detections']))
# Set `nmsed_attributes` to None for batched NMS.
nmsed_attributes = {}
elif self._config_dict['nms_version'] == 'v1':
(nmsed_boxes, nmsed_scores, nmsed_classes, valid_detections,
nmsed_attributes) = (
_generate_detections_v1(
boxes,
scores,
attributes=attributes if raw_attributes else None,
pre_nms_top_k=self._config_dict['pre_nms_top_k'],
pre_nms_score_threshold=self
._config_dict['pre_nms_score_threshold'],
nms_iou_threshold=self._config_dict['nms_iou_threshold'],
max_num_detections=self._config_dict['max_num_detections'],
soft_nms_sigma=self._config_dict['soft_nms_sigma']))
elif self._config_dict['nms_version'] == 'v2':
(nmsed_boxes, nmsed_scores, nmsed_classes, valid_detections) = (
_generate_detections_v2(
boxes,
scores,
pre_nms_top_k=self._config_dict['pre_nms_top_k'],
pre_nms_score_threshold=self
._config_dict['pre_nms_score_threshold'],
nms_iou_threshold=self._config_dict['nms_iou_threshold'],
max_num_detections=self._config_dict['max_num_detections']))
# Set `nmsed_attributes` to None for v2.
nmsed_attributes = {}
else:
raise ValueError('NMS version {} not supported.'.format(
self._config_dict['nms_version']))
# Adds 1 to offset the background class which has index 0.
nmsed_classes += 1
return {
'num_detections': valid_detections,
'detection_boxes': nmsed_boxes,
'detection_classes': nmsed_classes,
'detection_scores': nmsed_scores,
'detection_attributes': nmsed_attributes,
}
def get_config(self):
return self._config_dict
@classmethod
def from_config(cls, config):
return cls(**config)
| 45.275498 | 85 | 0.674029 | [
"Apache-2.0"
] | 915067906/models | official/vision/beta/modeling/layers/detection_generator.py | 38,620 | Python |
class Script(object):
START_MSG = """<b>Hello {} How are you🌹,
I'm an advanced filter bot with many capabilities!
Edit By @Yash_607
See <i>/help</i> for commands and more details.</b>
"""
HELP_MSG = """
<i>Add me as admin in your group and start filtering :)</i>
<b>Basic Commands;</b>
/start - Check if I'm alive!
/help - Command help
/about - Something about me!
<b>Filter Commands;</b>
<code>/add name reply</code> - Add filter for name
<code>/del name</code> - Delete filter
<code>/delall</code> - Delete entire filters (Group Owner Only!)
<code>/viewfilters</code> - List all filters in chat
<b>Connection Commands;</b>
<code>/connect groupid</code> - Connect your group to my PM. You can also simply use,
<code>/connect</code> in groups.
<code>/connections</code> - Manage your connections.
<b>Extras;</b>
/status - Shows current status of your bot (Auth User Only)
/id - Shows ID information
<code>/info userid</code> - Shows User Information. Use <code>/info</code> as reply to some message for their details!
<b>© @RJMALLU </b>
"""
ABOUT_MSG = """⭕️<b>My Name :</b> <a href='http://t.me/Poli_ano_bot/'UNLIMITED FILTER BOT RJ</a>
⭕️<b>Creater :</b> <a href= 'https://t.me/RJMALLU/'RJ</a>
⭕️<b>Language :</b> <code>Python3</code>
⭕️<b>Library :</b> <a href='https://docs.pyrogram.org/'>Pyrogram 1.0.7</a>
"""
| 21.53125 | 120 | 0.650943 | [
"MIT"
] | Apnamovi/Unlimited-filter-bot-RJ | script.py | 1,398 | Python |
from django.contrib import admin
from .models import Car, CarShop, RepairStation, RepairWork, Reapir, Person, Component
# Register your models here.
admin.site.register(Car)
admin.site.register(CarShop)
admin.site.register(Reapir)
admin.site.register(RepairWork)
admin.site.register(RepairStation)
admin.site.register(Person)
admin.site.register(Component)
| 29.833333 | 86 | 0.818436 | [
"MIT"
] | mav10/dataVisualization | term_project/backend/api/admin.py | 358 | Python |
# -*- coding: utf-8 -*-
"""Launchd plist plugin."""
from __future__ import unicode_literals
from dfdatetime import semantic_time as dfdatetime_semantic_time
from plaso.containers import plist_event
from plaso.containers import time_events
from plaso.lib import definitions
from plaso.parsers import plist
from plaso.parsers.plist_plugins import interface
class LaunchdPlugin(interface.PlistPlugin):
"""Basic plugin to extract launchd configuration information.
Further details about fields within the key:
Label:
the required key for uniquely identifying the launchd service.
Program:
absolute path to the executable. required in the absence of the
ProgramArguments key.
ProgramArguments:
command-line flags for the executable. required in the absence of the
Program key.
UserName:
the job run as the specified user.
GroupName:
the job run as the specified group.
"""
NAME = 'launchd_plist'
DESCRIPTION = 'Parser for Launchd plist files.'
# The PLIST_PATH is dynamic, the prefix filename is, by default, named using
# reverse-domain notation. For example, Chrome is com.google.chrome.plist.
# /System/Library/LaunchDaemons/*.plist
# /System/Library/LaunchAgents/*.plist
# /Library/LaunchDaemons/*.plist
# /Library/LaunchAgents/*.plist
# ~/Library/LaunchAgents
PLIST_KEYS = frozenset([
'Label',
'Program',
'ProgramArguments',
'UserName',
'GroupName',
])
# pylint: disable=arguments-differ
def Process(self, parser_mediator, plist_name, top_level, **kwargs):
"""Check if it is a valid MacOS plist file name.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
plist_name (str): name of the plist.
top_level (dict[str, object]): plist top-level key.
"""
super(LaunchdPlugin, self).Process(
parser_mediator, plist_name=self.PLIST_PATH, top_level=top_level)
# pylint: disable=arguments-differ
def GetEntries(self, parser_mediator, top_level=None, **unused_kwargs):
"""Extracts launchd information from the plist.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
top_level (Optional[dict[str: object]]): keys extracted from PLIST_KEYS.
"""
label = top_level.get('Label')
command = top_level.get('Program', '')
program_arguments = top_level.get('ProgramArguments')
for argument in program_arguments:
command += " %s" % argument
user_name = top_level.get('UserName')
group_name = top_level.get('GroupName')
event_data = plist_event.PlistTimeEventData()
event_data.desc = ('Launchd service config {0:s} points to {1:s} with '
'user:{2:s} group:{3:s}').format(label, command,
user_name, group_name)
event_data.key = 'launchdServiceConfig'
event_data.root = '/'
date_time = dfdatetime_semantic_time.SemanticTime('Not set')
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_NOT_A_TIME)
parser_mediator.ProduceEventWithEventData(event, event_data)
plist.PlistParser.RegisterPlugin(LaunchdPlugin)
| 33.777778 | 78 | 0.704246 | [
"Apache-2.0"
] | ddm1004/plaso | plaso/parsers/plist_plugins/launchd.py | 3,344 | Python |
import atexit
from .MecanumRover_MotorDriver import MecanumRover_MotorDriver
import traitlets
from traitlets.config.configurable import Configurable
class Motor(Configurable):
value = traitlets.Float()
# config
alpha = traitlets.Float(default_value=1.0).tag(config=True)
beta = traitlets.Float(default_value=0.0).tag(config=True)
def __init__(self, driver, channel, *args, **kwargs):
super(Motor, self).__init__(*args, **kwargs) # initializes traitlets
self._driver = driver
self._motor = self._driver.getMotor(channel)
atexit.register(self._release)
@traitlets.observe('value')
def _observe_value(self, change):
self._write_value(change['new'])
def _write_value(self, value):
"""Sets motor value between [-1, 1]"""
# ジョイスティック等の値ブレ対策
if abs(value) <= 0.05:
value = 0.0
#モータの目標速度(mm/s)に変換。※最高1300mm/s
mapped_value = int(1300.0 * (self.alpha * value + self.beta))
speed = min(max(mapped_value, -1300), 1300)
self._motor.setSpeed(speed)
def _release(self):
"""Stops motor by releasing control"""
self._motor.setSpeed(0)
| 29.7 | 77 | 0.655724 | [
"MIT"
] | vstoneofficial/jetbot-mecanum | jetbot/motor.py | 1,248 | Python |
from krgram.tl.core_types.native import TL_string
from krgram.tl.base import *
class getConfig(TLFunction):
ID = 0xc4f9186b
TLRegister.register(getConfig)
class getNearestDc(TLFunction):
ID = 0x1fb33026
TLRegister.register(getNearestDc)
class getAppUpdate(TLFunction):
ID = 0xc812ac7e
def get_structure(self):
return ("device_model", TL_string()), ("system_version", TL_string()), \
("app_version", TL_string()), ("lang_code", TL_string()),
TLRegister.register(getAppUpdate)
class saveAppLog(TLFunction):
ID = 0x6f02f748
def get_structure(self):
return ("events", Vector()),
TLRegister.register(saveAppLog)
class getInviteText(TLFunction):
ID = 0xa4a95186
def get_structure(self):
return ("lang_code", TL_string()),
TLRegister.register(getInviteText)
class getSupport(TLFunction):
ID = 0x9cdf08cd
TLRegister.register(getSupport)
| 15.945455 | 74 | 0.749145 | [
"MIT"
] | krow89/krgram | krgram/tl/api/functions/help.py | 877 | Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
函数
在python中函数默认的返回对象是None
"""
# 默认返回值为None
def hello():
print("Hello World!")
print(type(hello()))
# 可以返回多个对象,默认是元组
def foo():
return ['xyz', 1000, -98.6]
x, y, z = foo()
print(x, y, z)
# 关键字参数
def foo1(x):
print(x)
foo1(x='abc')
"""
创建函数
def function_name(arguments):
"function documentation string"
function body suite
"""
def helloSomeOne(who):
"""hello to someone"""
print("hello" + who)
print(helloSomeOne.__doc__)
"""
内部/内嵌函数
如果内部函数的定义包含了在外部函数里定义的对象的引用,内部函数被称为闭包
"""
def fo():
def ba():
print("ba called")
print("fo called")
ba()
fo()
"""
传递函数
函数是可以被引用的(访问或者以其他变量作为别名)
对对象是函数,这个对象的所有别名都是可以调用的
"""
def foo():
print("in foo()")
bar = foo
bar()
def convert(func, seq):
return [func(eachNum) for eachNum in seq]
myseq = (123, 45.67, -6.2e8, 999999L)
print(convert(int, myseq))
print(convert(float, myseq))
| 10.896552 | 45 | 0.613924 | [
"Apache-2.0"
] | technonac/studycode | python/python_function/func.py | 1,216 | Python |
# -*- coding: utf-8 -*-
"""
Disaster Victim Identification, Controllers
@author: nursix
"""
module = request.controller
resourcename = request.function
if not settings.has_module(module):
raise HTTP(404, body="Module disabled: %s" % module)
# -----------------------------------------------------------------------------
def s3_menu_postp():
# @todo: rewrite this for new framework
menu_selected = []
body_id = s3mgr.get_session("dvi", "body")
if body_id:
body = s3db.dvi_body
query = (body.id == body_id)
record = db(query).select(body.id, body.pe_label,
limitby=(0,1)).first()
if record:
label = record.pe_label
response.menu_options[-3][-1].append(
[T("Candidate Matches for Body %s" % label),
False, URL(f="person",
vars=dict(match=record.id))]
)
menu_selected.append(
["%s: %s" % (T("Body"), label),
False, URL(f="body", args=[record.id])]
)
person_id = s3mgr.get_session("pr", "person")
if person_id:
person = s3db.pr_person
query = (person.id == person_id)
record = db(query).select(person.id, limitby=(0, 1)).first()
if record:
name = s3db.pr_person_represent(record.id)
menu_selected.append(
["%s: %s" % (T("Person"), name),
False, URL(f="person", args=[record.id])]
)
if menu_selected:
menu_selected = [T("Open recent"), True, None, menu_selected]
response.menu_options.append(menu_selected)
# -----------------------------------------------------------------------------
def index():
""" Module's Home Page """
try:
module_name = settings.modules[module].name_nice
except:
module_name = T("Disaster Victim Identification")
table = s3db.dvi_body
total = db(table.deleted == False).count()
itable = s3db.dvi_identification
query = (table.deleted == False) & \
(itable.pe_id == table.pe_id) & \
(itable.deleted == False) & \
(itable.status == 3)
identified = db(query).count()
status = [[str(T("identified")), int(identified)],
[str(T("unidentified")), int(total-identified)]]
response.title = module_name
return dict(module_name=module_name,
total=total,
status=json.dumps(status))
# -----------------------------------------------------------------------------
def recreq():
""" Recovery Requests List """
table = s3db.dvi_recreq
table.person_id.default = s3_logged_in_person()
def prep(r):
if r.interactive and not r.record:
table.status.readable = False
table.status.writable = False
table.bodies_recovered.readable = False
table.bodies_recovered.writable = False
return True
s3.prep = prep
output = s3_rest_controller()
return output
# -----------------------------------------------------------------------------
def morgue():
""" Morgue Registry """
morgue_tabs = [(T("Morgue Details"), ""),
(T("Bodies"), "body")]
rheader = S3ResourceHeader([
[(T("Morgue"), "name")]
], tabs=morgue_tabs)
# Pre-processor
def prep(r):
# Location Filter
s3db.gis_location_filter(r)
if r.interactive and r.id and not r.component:
field = r.table.obsolete
field.readable = field.writable = True
return True
s3.prep = prep
output = s3_rest_controller(rheader=rheader)
return output
# -----------------------------------------------------------------------------
def body():
""" Dead Bodies Registry """
gender_opts = s3db.pr_gender_opts
gender_opts[1] = T("unknown")
btable = s3db.dvi_body
itable = s3db.dvi_identification
status = request.get_vars.get("status", None)
if status == "unidentified":
query = (itable.deleted == False) & \
(itable.status == 3)
ids = db(query).select(itable.pe_id)
ids = [i.pe_id for i in ids]
if ids:
s3.filter = (~(btable.pe_id.belongs(ids)))
s3db.configure("dvi_body", main="pe_label", extra="gender")
ntable = s3db.pr_note
ntable.status.readable = False
ntable.status.writable = False
dvi_tabs = [(T("Recovery"), ""),
(T("Checklist"), "checklist"),
(T("Images"), "image"),
(T("Physical Description"), "physical_description"),
(T("Effects Inventory"), "effects"),
(T("Journal"), "note"),
(T("Identification"), "identification")]
rheader = S3ResourceHeader([
[(T("ID Tag Number"), "pe_label")],
["gender"],
["age_group"],
], tabs=dvi_tabs)
output = s3_rest_controller(rheader=rheader)
return output
# -----------------------------------------------------------------------------
def person():
""" Missing Persons Registry (Match Finder) """
table = s3db.pr_person
s3.crud_strings["pr_person"].update(
title_display = T("Missing Person Details"),
title_list = T("Missing Persons"),
label_list_button = T("List Missing Persons"),
msg_list_empty = T("No Persons found"),
msg_no_match = T("No Persons currently reported missing"))
s3db.configure("pr_group_membership",
list_fields=["id",
"group_id",
"group_head",
"description"
])
s3db.configure("pr_person",
listadd=False,
editable=False,
deletable=False,
list_fields=["id",
"first_name",
"middle_name",
"last_name",
"picture",
"gender",
"age_group"
])
def prep(r):
if not r.id and not r.method and not r.component:
body_id = r.get_vars.get("match", None)
body = db(db.dvi_body.id == body_id).select(
db.dvi_body.pe_label, limitby=(0, 1)).first()
label = body and body.pe_label or "#%s" % body_id
if body_id:
query = dvi_match_query(body_id)
r.resource.add_filter(query)
s3.crud_strings["pr_person"].update(
#subtitle_list = T("Candidate Matches for Body %s" % label),
msg_no_match = T("No matching records found"))
return True
s3.prep = prep
field = table.missing
field.readable = False
field.writable = False
field.default = True
table.age_group.readable = True
table.age_group.writable = True
# Show only missing persons in list views
if len(request.args) == 0:
s3.filter = (db.pr_person.missing == True)
mpr_tabs = [
(T("Missing Report"), "missing_report"),
(T("Person Details"), None),
(T("Physical Description"), "physical_description"),
(T("Images"), "image"),
(T("Identity"), "identity"),
(T("Address"), "address"),
(T("Contact Data"), "contact"),
(T("Journal"), "note"),
]
rheader = lambda r: s3db.pr_rheader(r, tabs=mpr_tabs)
output = s3_rest_controller("pr", "person",
main="first_name",
extra="last_name",
rheader=rheader)
return output
# -------------------------------------------------------------------------
def dvi_match_query(body_id):
"""
Get a query for candidate matches between the missing
persons registry and a dead body
@param body_id: the dvi_body record ID
"""
ptable = s3db.pr_person
ntable = s3db.pr_note
btable = s3db.dvi_body
query = ((ptable.deleted == False) &
(ptable.missing == True) &
(ntable.pe_id == ptable.pe_id) &
(ntable.status == 1))
body = btable[body_id]
if not body:
return query
# last seen should be before date of recovery
if body.date_of_recovery:
q = ((ntable.timestmp <= body.date_of_recovery) |
(ntable.timestmp == None))
query = query & q
# age group should match
if body.age_group and body.age_group != 1:
q = ((ptable.age_group == None) |
(ptable.age_group == 1) |
(ptable.age_group == body.age_group))
query = query & q
# gender should match
if body.gender and body.gender != 1:
q = ((ptable.gender == None) |
(ptable.gender == 1) |
(ptable.gender == body.gender))
return query
# -----------------------------------------------------------------------------
def tooltip():
""" Ajax Tooltips """
formfield = request.vars.get("formfield", None)
if formfield:
response.view = "pr/ajaxtips/%s.html" % formfield
return dict()
# END =========================================================================
| 32.571429 | 80 | 0.487573 | [
"MIT"
] | andygimma/eden | controllers/dvi.py | 9,576 | Python |
# -*- coding: utf-8 -*-
# Authors: Mainak Jas <[email protected]>
# Teon Brooks <[email protected]>
#
# License: BSD (3-clause)
import copy
import glob
import os
import os.path as op
import shutil
import numpy as np
from numpy.testing import assert_equal
import pytest
from matplotlib import pyplot as plt
from mne import Epochs, read_events, read_evokeds
from mne.io import read_raw_fif
from mne.datasets import testing
from mne.report import Report, open_report, _ReportScraper
from mne.utils import (_TempDir, requires_mayavi, requires_nibabel, Bunch,
run_tests_if_main, traits_test, requires_h5py)
from mne.viz import plot_alignment
data_dir = testing.data_path(download=False)
subjects_dir = op.join(data_dir, 'subjects')
report_dir = op.join(data_dir, 'MEG', 'sample')
raw_fname = op.join(report_dir, 'sample_audvis_trunc_raw.fif')
ms_fname = op.join(data_dir, 'SSS', 'test_move_anon_raw.fif')
event_fname = op.join(report_dir, 'sample_audvis_trunc_raw-eve.fif')
cov_fname = op.join(report_dir, 'sample_audvis_trunc-cov.fif')
fwd_fname = op.join(report_dir, 'sample_audvis_trunc-meg-eeg-oct-6-fwd.fif')
trans_fname = op.join(report_dir, 'sample_audvis_trunc-trans.fif')
inv_fname = op.join(report_dir,
'sample_audvis_trunc-meg-eeg-oct-6-meg-inv.fif')
mri_fname = op.join(subjects_dir, 'sample', 'mri', 'T1.mgz')
base_dir = op.realpath(op.join(op.dirname(__file__), '..', 'io', 'tests',
'data'))
evoked_fname = op.join(base_dir, 'test-ave.fif')
def _get_example_figures():
"""Create two example figures."""
fig1 = plt.plot([1, 2], [1, 2])[0].figure
fig2 = plt.plot([3, 4], [3, 4])[0].figure
return [fig1, fig2]
@pytest.mark.slowtest
@testing.requires_testing_data
def test_render_report():
"""Test rendering -*.fif files for mne report."""
tempdir = _TempDir()
raw_fname_new = op.join(tempdir, 'temp_raw.fif')
ms_fname_new = op.join(tempdir, 'temp_ms_raw.fif')
event_fname_new = op.join(tempdir, 'temp_raw-eve.fif')
cov_fname_new = op.join(tempdir, 'temp_raw-cov.fif')
fwd_fname_new = op.join(tempdir, 'temp_raw-fwd.fif')
inv_fname_new = op.join(tempdir, 'temp_raw-inv.fif')
for a, b in [[raw_fname, raw_fname_new],
[ms_fname, ms_fname_new],
[event_fname, event_fname_new],
[cov_fname, cov_fname_new],
[fwd_fname, fwd_fname_new],
[inv_fname, inv_fname_new]]:
shutil.copyfile(a, b)
# create and add -epo.fif and -ave.fif files
epochs_fname = op.join(tempdir, 'temp-epo.fif')
evoked_fname = op.join(tempdir, 'temp-ave.fif')
# Speed it up by picking channels
raw = read_raw_fif(raw_fname_new, preload=True)
raw.pick_channels(['MEG 0111', 'MEG 0121'])
raw.del_proj()
epochs = Epochs(raw, read_events(event_fname), 1, -0.2, 0.2)
epochs.save(epochs_fname, overwrite=True)
# This can take forever (stall Travis), so let's make it fast
# Also, make sure crop range is wide enough to avoid rendering bug
epochs.average().crop(0.1, 0.2).save(evoked_fname)
report = Report(info_fname=raw_fname_new, subjects_dir=subjects_dir)
with pytest.warns(RuntimeWarning, match='Cannot render MRI'):
report.parse_folder(data_path=tempdir, on_error='raise')
assert repr(report)
# Check correct paths and filenames
fnames = glob.glob(op.join(tempdir, '*.fif'))
for fname in fnames:
assert (op.basename(fname) in
[op.basename(x) for x in report.fnames])
assert (''.join(report.html).find(op.basename(fname)) != -1)
assert_equal(len(report.fnames), len(fnames))
assert_equal(len(report.html), len(report.fnames))
assert_equal(len(report.fnames), len(report))
# Check saving functionality
report.data_path = tempdir
fname = op.join(tempdir, 'report.html')
report.save(fname=fname, open_browser=False)
assert (op.isfile(fname))
with open(fname, 'rb') as fid:
html = fid.read().decode('utf-8')
assert '(MaxShield on)' in html
assert_equal(len(report.html), len(fnames))
assert_equal(len(report.html), len(report.fnames))
# Check saving same report to new filename
report.save(fname=op.join(tempdir, 'report2.html'), open_browser=False)
assert (op.isfile(op.join(tempdir, 'report2.html')))
# Check overwriting file
report.save(fname=op.join(tempdir, 'report.html'), open_browser=False,
overwrite=True)
assert (op.isfile(op.join(tempdir, 'report.html')))
# Check pattern matching with multiple patterns
pattern = ['*raw.fif', '*eve.fif']
with pytest.warns(RuntimeWarning, match='Cannot render MRI'):
report.parse_folder(data_path=tempdir, pattern=pattern)
assert (repr(report))
fnames = glob.glob(op.join(tempdir, '*.raw')) + \
glob.glob(op.join(tempdir, '*.raw'))
for fname in fnames:
assert (op.basename(fname) in
[op.basename(x) for x in report.fnames])
assert (''.join(report.html).find(op.basename(fname)) != -1)
pytest.raises(ValueError, Report, image_format='foo')
pytest.raises(ValueError, Report, image_format=None)
# SVG rendering
report = Report(info_fname=raw_fname_new, subjects_dir=subjects_dir,
image_format='svg')
with pytest.warns(RuntimeWarning, match='Cannot render MRI'):
report.parse_folder(data_path=tempdir, on_error='raise')
# ndarray support smoke test
report.add_figs_to_section(np.zeros((2, 3, 3)), 'caption', 'section')
with pytest.raises(TypeError, match='Each fig must be a'):
report.add_figs_to_section('foo', 'caption', 'section')
with pytest.raises(TypeError, match='Each fig must be a'):
report.add_figs_to_section(['foo'], 'caption', 'section')
@testing.requires_testing_data
def test_report_raw_psd_and_date():
"""Test report raw PSD and DATE_NONE functionality."""
with pytest.raises(TypeError, match='dict'):
Report(raw_psd='foo')
tempdir = _TempDir()
raw = read_raw_fif(raw_fname).crop(0, 1.).load_data()
raw_fname_new = op.join(tempdir, 'temp_raw.fif')
raw.save(raw_fname_new)
report = Report(raw_psd=True)
report.parse_folder(data_path=tempdir, render_bem=False,
on_error='raise')
assert isinstance(report.html, list)
assert 'PSD' in ''.join(report.html)
assert 'GMT' in ''.join(report.html)
# DATE_NONE functionality
report = Report()
raw.anonymize()
raw.save(raw_fname_new, overwrite=True)
report.parse_folder(data_path=tempdir, render_bem=False,
on_error='raise')
assert isinstance(report.html, list)
assert 'GMT' not in ''.join(report.html)
@testing.requires_testing_data
@requires_mayavi
@traits_test
def test_render_add_sections():
"""Test adding figures/images to section."""
tempdir = _TempDir()
report = Report(subjects_dir=subjects_dir)
# Check add_figs_to_section functionality
fig = plt.plot([1, 2], [1, 2])[0].figure
report.add_figs_to_section(figs=fig, # test non-list input
captions=['evoked response'], scale=1.2,
image_format='svg')
pytest.raises(ValueError, report.add_figs_to_section, figs=[fig, fig],
captions='H')
pytest.raises(ValueError, report.add_figs_to_section, figs=fig,
captions=['foo'], scale=0, image_format='svg')
pytest.raises(ValueError, report.add_figs_to_section, figs=fig,
captions=['foo'], scale=1e-10, image_format='svg')
# need to recreate because calls above change size
fig = plt.plot([1, 2], [1, 2])[0].figure
# Check add_images_to_section with png
img_fname = op.join(tempdir, 'testimage.png')
fig.savefig(img_fname)
report.add_images_to_section(fnames=[img_fname],
captions=['evoked response'])
report.add_images_to_section(fnames=[img_fname],
captions=['evoked response'])
pytest.raises(ValueError, report.add_images_to_section,
fnames=[img_fname, img_fname], captions='H')
pytest.raises(ValueError, report.add_images_to_section,
fnames=['foobar.xxx'], captions='H')
evoked = read_evokeds(evoked_fname, condition='Left Auditory',
baseline=(-0.2, 0.0))
fig = plot_alignment(evoked.info, trans_fname, subject='sample',
subjects_dir=subjects_dir)
report.add_figs_to_section(figs=fig, # test non-list input
captions='random image', scale=1.2)
assert (repr(report))
@pytest.mark.slowtest
@testing.requires_testing_data
@requires_mayavi
@traits_test
@requires_nibabel()
def test_render_mri():
"""Test rendering MRI for mne report."""
tempdir = _TempDir()
trans_fname_new = op.join(tempdir, 'temp-trans.fif')
for a, b in [[trans_fname, trans_fname_new]]:
shutil.copyfile(a, b)
report = Report(info_fname=raw_fname,
subject='sample', subjects_dir=subjects_dir)
report.parse_folder(data_path=tempdir, mri_decim=30, pattern='*')
report.save(op.join(tempdir, 'report.html'), open_browser=False)
assert repr(report)
report.add_bem_to_section('sample', caption='extra', section='foo',
subjects_dir=subjects_dir, decim=30)
report.save(op.join(tempdir, 'report.html'), open_browser=False,
overwrite=True)
@testing.requires_testing_data
@requires_nibabel()
def test_render_mri_without_bem():
"""Test rendering MRI without BEM for mne report."""
tempdir = _TempDir()
os.mkdir(op.join(tempdir, 'sample'))
os.mkdir(op.join(tempdir, 'sample', 'mri'))
shutil.copyfile(mri_fname, op.join(tempdir, 'sample', 'mri', 'T1.mgz'))
report = Report(info_fname=raw_fname,
subject='sample', subjects_dir=tempdir)
report.parse_folder(tempdir, render_bem=False)
report.save(op.join(tempdir, 'report.html'), open_browser=False)
@testing.requires_testing_data
@requires_nibabel()
def test_add_htmls_to_section():
"""Test adding html str to mne report."""
report = Report(info_fname=raw_fname,
subject='sample', subjects_dir=subjects_dir)
html = '<b>MNE-Python is AWESOME</b>'
caption, section = 'html', 'html_section'
report.add_htmls_to_section(html, caption, section)
idx = report._sectionlabels.index('report_' + section)
html_compare = report.html[idx]
assert (html in html_compare)
assert (repr(report))
def test_add_slider_to_section():
"""Test adding a slider with a series of images to mne report."""
tempdir = _TempDir()
report = Report(info_fname=raw_fname,
subject='sample', subjects_dir=subjects_dir)
section = 'slider_section'
figs = _get_example_figures()
report.add_slider_to_section(figs, section=section, title='my title')
assert report.fnames[0] == 'my title-#-report_slider_section-#-custom'
report.save(op.join(tempdir, 'report.html'), open_browser=False)
pytest.raises(NotImplementedError, report.add_slider_to_section,
[figs, figs])
pytest.raises(ValueError, report.add_slider_to_section, figs, ['wug'])
pytest.raises(TypeError, report.add_slider_to_section, figs, 'wug')
# need at least 2
pytest.raises(ValueError, report.add_slider_to_section, figs[:1], 'wug')
# Smoke test that SVG w/unicode can be added
report = Report()
fig, ax = plt.subplots()
ax.set_xlabel(u'μ')
report.add_slider_to_section([fig] * 2, image_format='svg')
def test_validate_input():
"""Test Report input validation."""
report = Report()
items = ['a', 'b', 'c']
captions = ['Letter A', 'Letter B', 'Letter C']
section = 'ABCs'
comments = ['First letter of the alphabet.',
'Second letter of the alphabet',
'Third letter of the alphabet']
pytest.raises(ValueError, report._validate_input, items, captions[:-1],
section, comments=None)
pytest.raises(ValueError, report._validate_input, items, captions, section,
comments=comments[:-1])
values = report._validate_input(items, captions, section, comments=None)
items_new, captions_new, comments_new = values
assert_equal(len(comments_new), len(items))
@requires_h5py
def test_open_report():
"""Test the open_report function."""
tempdir = _TempDir()
hdf5 = op.join(tempdir, 'report.h5')
# Test creating a new report through the open_report function
fig1 = _get_example_figures()[0]
with open_report(hdf5, subjects_dir=subjects_dir) as report:
assert report.subjects_dir == subjects_dir
assert report._fname == hdf5
report.add_figs_to_section(figs=fig1, captions=['evoked response'])
# Exiting the context block should have triggered saving to HDF5
assert op.exists(hdf5)
# Load the HDF5 version of the report and check equivalence
report2 = open_report(hdf5)
assert report2._fname == hdf5
assert report2.subjects_dir == report.subjects_dir
assert report2.html == report.html
assert report2.__getstate__() == report.__getstate__()
assert '_fname' not in report2.__getstate__()
# Check parameters when loading a report
pytest.raises(ValueError, open_report, hdf5, foo='bar') # non-existing
pytest.raises(ValueError, open_report, hdf5, subjects_dir='foo')
open_report(hdf5, subjects_dir=subjects_dir) # This should work
# Check that the context manager doesn't swallow exceptions
with pytest.raises(ZeroDivisionError):
with open_report(hdf5, subjects_dir=subjects_dir) as report:
1 / 0
def test_remove():
"""Test removing figures from a report."""
r = Report()
fig1, fig2 = _get_example_figures()
r.add_figs_to_section(fig1, 'figure1', 'mysection')
r.add_slider_to_section([fig1, fig2], title='figure1',
section='othersection')
r.add_figs_to_section(fig2, 'figure1', 'mysection')
r.add_figs_to_section(fig2, 'figure2', 'mysection')
# Test removal by caption
r2 = copy.deepcopy(r)
removed_index = r2.remove(caption='figure1')
assert removed_index == 2
assert len(r2.html) == 3
assert r2.html[0] == r.html[0]
assert r2.html[1] == r.html[1]
assert r2.html[2] == r.html[3]
# Test restricting to section
r2 = copy.deepcopy(r)
removed_index = r2.remove(caption='figure1', section='othersection')
assert removed_index == 1
assert len(r2.html) == 3
assert r2.html[0] == r.html[0]
assert r2.html[1] == r.html[2]
assert r2.html[2] == r.html[3]
# Test removal of empty sections
r2 = copy.deepcopy(r)
r2.remove(caption='figure1', section='othersection')
assert r2.sections == ['mysection']
assert r2._sectionvars == {'mysection': 'report_mysection'}
def test_add_or_replace():
"""Test replacing existing figures in a report."""
r = Report()
fig1, fig2 = _get_example_figures()
r.add_figs_to_section(fig1, 'duplicate', 'mysection')
r.add_figs_to_section(fig1, 'duplicate', 'mysection')
r.add_figs_to_section(fig1, 'duplicate', 'othersection')
r.add_figs_to_section(fig2, 'nonduplicate', 'mysection')
# By default, replace=False, so all figures should be there
assert len(r.html) == 4
old_r = copy.deepcopy(r)
# Re-add fig1 with replace=True, it should overwrite the last occurrence of
# fig1 in section 'mysection'.
r.add_figs_to_section(fig2, 'duplicate', 'mysection', replace=True)
assert len(r.html) == 4
assert r.html[1] != old_r.html[1] # This figure should have changed
# All other figures should be the same
assert r.html[0] == old_r.html[0]
assert r.html[2] == old_r.html[2]
assert r.html[3] == old_r.html[3]
def test_scraper(tmpdir):
"""Test report scraping."""
r = Report()
fig1, fig2 = _get_example_figures()
r.add_figs_to_section(fig1, 'a', 'mysection')
r.add_figs_to_section(fig2, 'b', 'mysection')
# Mock a Sphinx + sphinx_gallery config
app = Bunch(builder=Bunch(srcdir=str(tmpdir),
outdir=op.join(str(tmpdir), '_build', 'html')))
scraper = _ReportScraper()
scraper.app = app
gallery_conf = dict(src_dir=app.builder.srcdir, builder_name='html')
img_fname = op.join(app.builder.srcdir, 'auto_examples', 'images',
'sg_img.png')
target_file = op.join(app.builder.srcdir, 'auto_examples', 'sg.py')
os.makedirs(op.dirname(img_fname))
os.makedirs(app.builder.outdir)
block_vars = dict(image_path_iterator=(img for img in [img_fname]),
example_globals=dict(a=1), target_file=target_file)
# Nothing yet
block = None
rst = scraper(block, block_vars, gallery_conf)
assert rst == ''
# Still nothing
block_vars['example_globals']['r'] = r
rst = scraper(block, block_vars, gallery_conf)
# Once it's saved, add it
assert rst == ''
fname = op.join(str(tmpdir), 'my_html.html')
r.save(fname, open_browser=False)
rst = scraper(block, block_vars, gallery_conf)
out_html = op.join(app.builder.outdir, 'auto_examples', 'my_html.html')
assert not op.isfile(out_html)
os.makedirs(op.join(app.builder.outdir, 'auto_examples'))
scraper.copyfiles()
assert op.isfile(out_html)
assert rst.count('"') == 6
assert "<iframe" in rst
assert op.isfile(img_fname.replace('png', 'svg'))
run_tests_if_main()
| 39.116998 | 79 | 0.66772 | [
"BSD-3-Clause"
] | NataKozh/mne-python | mne/tests/test_report.py | 17,721 | Python |
from django.apps import AppConfig
class UbfCoreConfig(AppConfig):
name = 'ubfcore'
| 14.833333 | 33 | 0.752809 | [
"MIT"
] | himasnhu1/example | ubfcore/apps.py | 89 | Python |
# This file was automatically generated by SWIG (http://www.swig.org).
# Version 1.3.31
#
# Don't modify this file, modify the SWIG interface instead.
# This file is compatible with both classic and new-style classes.
import _sequencer_osx
import new
new_instancemethod = new.instancemethod
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
def _swig_setattr_nondynamic(self, class_type, name, value, static=1):
if name == "thisown":
return self.this.own(value)
if name == "this":
if type(value).__name__ == "PySwigObject":
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name, None)
if method:
return method(self, value)
if (not static) or hasattr(self, name):
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self, class_type, name, value):
return _swig_setattr_nondynamic(self, class_type, name, value, 0)
def _swig_getattr(self, class_type, name):
if name == "thisown":
return self.this.own()
method = class_type.__swig_getmethods__.get(name, None)
if method:
return method(self)
raise AttributeError(name)
def _swig_repr(self):
try:
strthis = "proxy of " + self.this.__repr__()
except:
strthis = ""
return "<%s.%s; %s >" % (
self.__class__.__module__,
self.__class__.__name__,
strthis,
)
import types
try:
_object = object
_newclass = 1
except AttributeError:
class _object:
pass
_newclass = 0
del types
_MIDIGetNumberOfDevices = _sequencer_osx._MIDIGetNumberOfDevices
_MIDIClientCreate = _sequencer_osx._MIDIClientCreate
_MIDIClientDispose = _sequencer_osx._MIDIClientDispose
_MIDISourceCreate = _sequencer_osx._MIDISourceCreate
_MIDIOutputPortCreate = _sequencer_osx._MIDIOutputPortCreate
_MIDIPortConnectSource = _sequencer_osx._MIDIPortConnectSource
| 26.025641 | 70 | 0.698522 | [
"MIT"
] | NFJones/python-midi | src/sequencer_osx/sequencer_osx.py | 2,030 | Python |
import argparse
import binascii
import os
from enum import Enum
from stor.plotters.bladebit import get_bladebit_install_info, plot_bladebit
from stor.plotters.chiapos import get_chiapos_install_info, plot_stor
from stor.plotters.madmax import get_madmax_install_info, plot_madmax
from stor.plotters.install_plotter import install_plotter
from pathlib import Path
from typing import Any, Dict, Optional
class Options(Enum):
TMP_DIR = 1
TMP_DIR2 = 2
FINAL_DIR = 3
K = 4
MEMO = 5
ID = 6
BUFF = 7
NUM_BUCKETS = 8
STRIPE_SIZE = 9
NUM_THREADS = 10
NOBITFIELD = 11
PLOT_COUNT = 12
MADMAX_NUM_BUCKETS_PHRASE3 = 13
MADMAX_WAITFORCOPY = 14
POOLKEY = 15
FARMERKEY = 16
MADMAX_TMPTOGGLE = 17
POOLCONTRACT = 18
MADMAX_RMULTI2 = 19
BLADEBIT_WARMSTART = 20
BLADEBIT_NONUMA = 21
VERBOSE = 22
OVERRIDE_K = 23
ALT_FINGERPRINT = 24
EXCLUDE_FINAL_DIR = 25
CONNECT_TO_DAEMON = 26
stor_plotter = [
Options.TMP_DIR,
Options.TMP_DIR2,
Options.FINAL_DIR,
Options.K,
Options.MEMO,
Options.ID,
Options.BUFF,
Options.NUM_BUCKETS,
Options.STRIPE_SIZE,
Options.NUM_THREADS,
Options.NOBITFIELD,
Options.OVERRIDE_K,
Options.ALT_FINGERPRINT,
Options.POOLCONTRACT,
Options.FARMERKEY,
Options.POOLKEY,
Options.PLOT_COUNT,
Options.EXCLUDE_FINAL_DIR,
Options.CONNECT_TO_DAEMON,
]
madmax_plotter = [
Options.K,
Options.PLOT_COUNT,
Options.NUM_THREADS,
Options.NUM_BUCKETS,
Options.MADMAX_NUM_BUCKETS_PHRASE3,
Options.TMP_DIR,
Options.TMP_DIR2,
Options.FINAL_DIR,
Options.MADMAX_WAITFORCOPY,
Options.POOLKEY,
Options.FARMERKEY,
Options.POOLCONTRACT,
Options.MADMAX_TMPTOGGLE,
Options.MADMAX_RMULTI2,
Options.CONNECT_TO_DAEMON,
]
bladebit_plotter = [
Options.NUM_THREADS,
Options.PLOT_COUNT,
Options.FARMERKEY,
Options.POOLKEY,
Options.POOLCONTRACT,
Options.ID,
Options.BLADEBIT_WARMSTART,
Options.BLADEBIT_NONUMA,
Options.FINAL_DIR,
Options.VERBOSE,
Options.CONNECT_TO_DAEMON,
]
def get_plotters_root_path(root_path: Path) -> Path:
return root_path / "plotters"
def build_parser(subparsers, root_path, option_list, name, plotter_desc):
parser = subparsers.add_parser(name, description=plotter_desc)
for option in option_list:
if option is Options.K:
parser.add_argument(
"-k",
"--size",
type=int,
help="K value.",
default=32,
)
u_default = 0 if name == "chiapos" else 256
if option is Options.NUM_BUCKETS:
parser.add_argument(
"-u",
"--buckets",
type=int,
help="Number of buckets.",
default=u_default,
)
if option is Options.STRIPE_SIZE:
parser.add_argument(
"-s",
"--stripes",
type=int,
help="Stripe size.",
default=0,
)
if option is Options.TMP_DIR:
parser.add_argument(
"-t",
"--tmp_dir",
type=str,
dest="tmpdir",
help="Temporary directory 1.",
default=str(root_path) + "/",
)
if option is Options.TMP_DIR2:
parser.add_argument(
"-2",
"--tmp_dir2",
type=str,
dest="tmpdir2",
help="Temporary directory 2.",
default=str(root_path) + "/",
)
if option is Options.FINAL_DIR:
parser.add_argument(
"-d",
"--final_dir",
type=str,
dest="finaldir",
help="Final directory.",
default=str(root_path) + "/",
)
if option is Options.BUFF:
parser.add_argument(
"-b",
"--buffer",
type=int,
help="Size of the buffer, in MB.",
default=0,
)
r_default = 4 if name == "madmax" else 0
if option is Options.NUM_THREADS:
parser.add_argument(
"-r",
"--threads",
type=int,
help="Num threads.",
default=r_default,
)
if option is Options.NOBITFIELD:
parser.add_argument(
"-e",
"--nobitfield",
action="store_true",
help="Disable bitfield.",
default=False,
)
if option is Options.MEMO:
parser.add_argument(
"-m",
"--memo",
type=binascii.unhexlify,
help="Memo variable.",
)
if option is Options.ID:
parser.add_argument(
"-i",
"--id",
type=binascii.unhexlify,
help="Plot id",
)
if option is Options.PLOT_COUNT:
parser.add_argument(
"-n",
"--count",
type=int,
help="Number of plots to create (default = 1)",
default=1,
)
if option is Options.MADMAX_NUM_BUCKETS_PHRASE3:
parser.add_argument(
"-v",
"--buckets3",
type=int,
help="Number of buckets for phase 3+4 (default = 256)",
default=256,
)
if option is Options.MADMAX_WAITFORCOPY:
parser.add_argument(
"-w",
"--waitforcopy",
action="store_true",
help="Wait for copy to start next plot",
default=False,
)
if option is Options.MADMAX_TMPTOGGLE:
parser.add_argument(
"-G",
"--tmptoggle",
action="store_true",
help="Alternate tmpdir/tmpdir2 (default = false)",
default=False,
)
if option is Options.POOLCONTRACT:
parser.add_argument(
"-c",
"--contract",
type=str,
help="Pool Contract Address (64 chars)",
default="",
)
if option is Options.MADMAX_RMULTI2:
parser.add_argument(
"-K",
"--rmulti2",
type=int,
help="Thread multiplier for P2 (default = 1)",
default=1,
)
if option is Options.POOLKEY:
parser.add_argument(
"-p",
"--pool-key",
type=binascii.unhexlify,
help="Pool Public Key (48 bytes)",
default="",
)
if option is Options.FARMERKEY:
parser.add_argument(
"-f",
"--farmerkey",
type=binascii.unhexlify,
help="Farmer Public Key (48 bytes)",
default="",
)
if option is Options.BLADEBIT_WARMSTART:
parser.add_argument(
"-w",
"--warmstart",
action="store_true",
help="Warm start",
default=False,
)
if option is Options.BLADEBIT_NONUMA:
parser.add_argument(
"-m",
"--nonuma",
action="store_true",
help="Disable numa",
default=False,
)
if option is Options.VERBOSE:
parser.add_argument(
"-v",
"--verbose",
action="store_true",
help="Set verbose",
default=False,
)
if option is Options.OVERRIDE_K:
parser.add_argument(
"--override-k",
dest="override",
action="store_true",
help="Force size smaller than 32",
default=False,
)
if option is Options.ALT_FINGERPRINT:
parser.add_argument(
"-a",
"--alt_fingerprint",
type=int,
default=None,
help="Enter the alternative fingerprint of the key you want to use",
)
if option is Options.EXCLUDE_FINAL_DIR:
parser.add_argument(
"-x",
"--exclude_final_dir",
action="store_true",
help="Skips adding [final dir] to harvester for farming",
default=False,
)
if option is Options.CONNECT_TO_DAEMON:
parser.add_argument(
"-D",
"--connect-to-daemon",
action="store_true",
help=argparse.SUPPRESS,
default=False,
)
def call_plotters(root_path: Path, args):
# Add `plotters` section in STOR_ROOT.
stor_root_path = root_path
root_path = get_plotters_root_path(root_path)
if not root_path.is_dir():
if os.path.exists(root_path):
try:
os.remove(root_path)
except Exception as e:
print(f"Exception deleting old root path: {type(e)} {e}.")
if not os.path.exists(root_path):
print(f"Creating plotters folder within STOR_ROOT: {root_path}")
try:
os.mkdir(root_path)
except Exception as e:
print(f"Cannot create plotters root path {root_path} {type(e)} {e}.")
plotters = argparse.ArgumentParser(description="Available options.")
subparsers = plotters.add_subparsers(help="Available options", dest="plotter")
build_parser(subparsers, root_path, stor_plotter, "chiapos", "Storpos Plotter")
build_parser(subparsers, root_path, madmax_plotter, "madmax", "Madmax Plotter")
build_parser(subparsers, root_path, bladebit_plotter, "bladebit", "Bladebit Plotter")
install_parser = subparsers.add_parser("install", description="Install custom plotters.")
install_parser.add_argument(
"install_plotter", type=str, help="The plotters available for installing. Choose from madmax or bladebit."
)
args = plotters.parse_args(args)
if args.plotter == "chiapos":
plot_stor(args, stor_root_path)
if args.plotter == "madmax":
plot_madmax(args, stor_root_path, root_path)
if args.plotter == "bladebit":
plot_bladebit(args, stor_root_path, root_path)
if args.plotter == "install":
install_plotter(args.install_plotter, root_path)
def get_available_plotters(root_path) -> Dict[str, Any]:
plotters_root_path: Path = get_plotters_root_path(root_path)
plotters: Dict[str, Any] = {}
chiapos: Optional[Dict[str, Any]] = get_chiapos_install_info()
bladebit: Optional[Dict[str, Any]] = get_bladebit_install_info(plotters_root_path)
madmax: Optional[Dict[str, Any]] = get_madmax_install_info(plotters_root_path)
if chiapos is not None:
plotters["chiapos"] = chiapos
if bladebit is not None:
plotters["bladebit"] = bladebit
if madmax is not None:
plotters["madmax"] = madmax
return plotters
| 31.075676 | 114 | 0.529483 | [
"Apache-2.0"
] | Stor-Network/stor-blockchain | stor/plotters/plotters.py | 11,498 | Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
readme = open('README.rst').read()
requirements = [
'tweepy>=2.1',
'pymongo>=2.8.0',
'tendo>=0.0.18',
'boto>=0.0.1',
'nltk>=0.0.1',
'zc.lockfile>=0.0.1',
'flask>=0.0.1',
'flask-bootstrap>=0.0.1'
]
test_requirements = [
# TODO: put package test requirements here
]
setup(
name='chattersum',
version='0.1.0',
description='test',
author='Shane Eller',
author_email='[email protected]',
url='https://github.com/ellerrs/chattersum',
packages=[
'chattersum',
],
package_dir={'chattersum':
'chattersum'},
include_package_data=True,
install_requires=requirements,
license="BSD",
zip_safe=False,
keywords='chattersum',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
test_suite='tests'
)
| 23.694915 | 49 | 0.584406 | [
"MIT"
] | Chattersum/tweetProcessor | setup.py | 1,398 | Python |
# coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: v1.20.7
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kubernetes.client.configuration import Configuration
class IoXK8sClusterV1alpha4MachineSpecBootstrap(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'config_ref': 'IoXK8sClusterV1alpha4MachineSpecBootstrapConfigRef',
'data_secret_name': 'str'
}
attribute_map = {
'config_ref': 'configRef',
'data_secret_name': 'dataSecretName'
}
def __init__(self, config_ref=None, data_secret_name=None, local_vars_configuration=None): # noqa: E501
"""IoXK8sClusterV1alpha4MachineSpecBootstrap - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._config_ref = None
self._data_secret_name = None
self.discriminator = None
if config_ref is not None:
self.config_ref = config_ref
if data_secret_name is not None:
self.data_secret_name = data_secret_name
@property
def config_ref(self):
"""Gets the config_ref of this IoXK8sClusterV1alpha4MachineSpecBootstrap. # noqa: E501
:return: The config_ref of this IoXK8sClusterV1alpha4MachineSpecBootstrap. # noqa: E501
:rtype: IoXK8sClusterV1alpha4MachineSpecBootstrapConfigRef
"""
return self._config_ref
@config_ref.setter
def config_ref(self, config_ref):
"""Sets the config_ref of this IoXK8sClusterV1alpha4MachineSpecBootstrap.
:param config_ref: The config_ref of this IoXK8sClusterV1alpha4MachineSpecBootstrap. # noqa: E501
:type: IoXK8sClusterV1alpha4MachineSpecBootstrapConfigRef
"""
self._config_ref = config_ref
@property
def data_secret_name(self):
"""Gets the data_secret_name of this IoXK8sClusterV1alpha4MachineSpecBootstrap. # noqa: E501
DataSecretName is the name of the secret that stores the bootstrap data script. If nil, the Machine should remain in the Pending state. # noqa: E501
:return: The data_secret_name of this IoXK8sClusterV1alpha4MachineSpecBootstrap. # noqa: E501
:rtype: str
"""
return self._data_secret_name
@data_secret_name.setter
def data_secret_name(self, data_secret_name):
"""Sets the data_secret_name of this IoXK8sClusterV1alpha4MachineSpecBootstrap.
DataSecretName is the name of the secret that stores the bootstrap data script. If nil, the Machine should remain in the Pending state. # noqa: E501
:param data_secret_name: The data_secret_name of this IoXK8sClusterV1alpha4MachineSpecBootstrap. # noqa: E501
:type: str
"""
self._data_secret_name = data_secret_name
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, IoXK8sClusterV1alpha4MachineSpecBootstrap):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, IoXK8sClusterV1alpha4MachineSpecBootstrap):
return True
return self.to_dict() != other.to_dict()
| 33.57047 | 157 | 0.645942 | [
"Apache-2.0"
] | mariusgheorghies/python | kubernetes/client/models/io_xk8s_cluster_v1alpha4_machine_spec_bootstrap.py | 5,002 | Python |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
from __future__ import division
from __future__ import print_function
import os
import abc
import six
import time
import queue
import bisect
import logging
import importlib
import traceback
import numpy as np
import pandas as pd
from multiprocessing import Pool
from .cache import H
from ..config import C
from .ops import *
from ..log import get_module_logger
from ..utils import parse_field, read_bin, hash_args, normalize_cache_fields
from .base import Feature
from .cache import DiskDatasetCache, DiskExpressionCache
@six.add_metaclass(abc.ABCMeta)
class CalendarProvider(object):
"""Calendar provider base class
Provide calendar data.
"""
@abc.abstractmethod
def calendar(self, start_time=None, end_time=None, freq="day", future=False):
"""Get calendar of certain market in given time range.
Parameters
----------
start_time : str
start of the time range
end_time : str
end of the time range
freq : str
time frequency, available: year/quarter/month/week/day
future : bool
whether including future trading day
Returns
----------
list
calendar list
"""
raise NotImplementedError("Subclass of CalendarProvider must implement `calendar` method")
def locate_index(self, start_time, end_time, freq, future):
"""Locate the start time index and end time index in a calendar under certain frequency.
Parameters
----------
start_time : str
start of the time range
end_time : str
end of the time range
freq : str
time frequency, available: year/quarter/month/week/day
future : bool
whether including future trading day
Returns
-------
pd.Timestamp
the real start time
pd.Timestamp
the real end time
int
the index of start time
int
the index of end time
"""
start_time = pd.Timestamp(start_time)
end_time = pd.Timestamp(end_time)
calendar, calendar_index = self._get_calendar(freq=freq, future=future)
if start_time not in calendar_index:
try:
start_time = calendar[bisect.bisect_left(calendar, start_time)]
except IndexError:
raise IndexError(
"`start_time` uses a future date, if you want to get future trading days, you can use: `future=True`"
)
start_index = calendar_index[start_time]
if end_time not in calendar_index:
end_time = calendar[bisect.bisect_right(calendar, end_time) - 1]
end_index = calendar_index[end_time]
return start_time, end_time, start_index, end_index
def _get_calendar(self, freq, future):
"""Load calendar using memcache.
Parameters
----------
freq : str
frequency of read calendar file
future : bool
whether including future trading day
Returns
-------
list
list of timestamps
dict
dict composed by timestamp as key and index as value for fast search
"""
flag = f"{freq}_future_{future}"
if flag in H["c"]:
_calendar, _calendar_index = H["c"][flag]
else:
_calendar = np.array(self._load_calendar(freq, future))
_calendar_index = {x: i for i, x in enumerate(_calendar)} # for fast search
H["c"][flag] = _calendar, _calendar_index
return _calendar, _calendar_index
def _uri(self, start_time, end_time, freq, future=False):
"""Get the uri of calendar generation task."""
return hash_args(start_time, end_time, freq, future)
@six.add_metaclass(abc.ABCMeta)
class InstrumentProvider(object):
"""Instrument provider base class
Provide instrument data.
"""
@staticmethod
def instruments(market="all", filter_pipe=None):
"""Get the general config dictionary for a base market adding several dynamic filters.
Parameters
----------
market : str
market/industry/index shortname, e.g. all/sse/szse/sse50/csi300/csi500
filter_pipe : list
the list of dynamic filters
Returns
----------
dict
dict of stockpool config
{`market`=>base market name, `filter_pipe`=>list of filters}
example :
{'market': 'csi500',
'filter_pipe': [{'filter_type': 'ExpressionDFilter',
'rule_expression': '$open<40',
'filter_start_time': None,
'filter_end_time': None,
'keep': False},
{'filter_type': 'NameDFilter',
'name_rule_re': 'SH[0-9]{4}55',
'filter_start_time': None,
'filter_end_time': None}]}
"""
if filter_pipe is None:
filter_pipe = []
config = {"market": market, "filter_pipe": []}
# the order of the filters will affect the result, so we need to keep
# the order
for filter_t in filter_pipe:
config["filter_pipe"].append(filter_t.to_config())
return config
@abc.abstractmethod
def list_instruments(self, instruments, start_time=None, end_time=None, freq="day", as_list=False):
"""List the instruments based on a certain stockpool config.
Parameters
----------
instruments : dict
stockpool config
start_time : str
start of the time range
end_time : str
end of the time range
as_list : bool
return instruments as list or dict
Returns
-------
dict or list
instruments list or dictionary with time spans
"""
raise NotImplementedError("Subclass of InstrumentProvider must implement `list_instruments` method")
def _uri(self, instruments, start_time=None, end_time=None, freq="day", as_list=False):
return hash_args(instruments, start_time, end_time, freq, as_list)
# instruments type
LIST = "LIST"
DICT = "DICT"
CONF = "CONF"
@classmethod
def get_inst_type(cls, inst):
if "market" in inst:
return cls.CONF
if isinstance(inst, dict):
return cls.DICT
if isinstance(inst, (list, tuple, pd.Index, np.ndarray)):
return cls.LIST
raise ValueError(f"Unknown instrument type {inst}")
@six.add_metaclass(abc.ABCMeta)
class FeatureProvider(object):
"""Feature provider class
Provide feature data.
"""
@abc.abstractmethod
def feature(self, instrument, field, start_time, end_time, freq):
"""Get feature data.
Parameters
----------
instrument : str
a certain instrument
field : str
a certain field of feature
start_time : str
start of the time range
end_time : str
end of the time range
freq : str
time frequency, available: year/quarter/month/week/day
Returns
-------
pd.Series
data of a certain feature
"""
raise NotImplementedError("Subclass of FeatureProvider must implement `feature` method")
@six.add_metaclass(abc.ABCMeta)
class ExpressionProvider(object):
"""Expression provider class
Provide Expression data.
"""
def __init__(self):
self.expression_instance_cache = {}
def get_expression_instance(self, field):
try:
if field in self.expression_instance_cache:
expression = self.expression_instance_cache[field]
else:
expression = eval(parse_field(field))
self.expression_instance_cache[field] = expression
except NameError as e:
get_module_logger("data").exception(
"ERROR: field [%s] contains invalid operator/variable [%s]" % (str(field), str(e).split()[1])
)
raise
except SyntaxError:
get_module_logger("data").exception("ERROR: field [%s] contains invalid syntax" % str(field))
raise
return expression
@abc.abstractmethod
def expression(self, instrument, field, start_time=None, end_time=None, freq="day"):
"""Get Expression data.
Parameters
----------
instrument : str
a certain instrument
field : str
a certain field of feature
start_time : str
start of the time range
end_time : str
end of the time range
freq : str
time frequency, available: year/quarter/month/week/day
Returns
-------
pd.Series
data of a certain expression
"""
raise NotImplementedError("Subclass of ExpressionProvider must implement `Expression` method")
@six.add_metaclass(abc.ABCMeta)
class DatasetProvider(object):
"""Dataset provider class
Provide Dataset data.
"""
@abc.abstractmethod
def dataset(self, instruments, fields, start_time=None, end_time=None, freq="day"):
"""Get dataset data.
Parameters
----------
instruments : list or dict
list/dict of instruments or dict of stockpool config
fields : list
list of feature instances
start_time : str
start of the time range
end_time : str
end of the time range
freq : str
time frequency
Returns
----------
pd.DataFrame
a pandas dataframe with <instrument, datetime> index
"""
raise NotImplementedError("Subclass of DatasetProvider must implement `Dataset` method")
def _uri(
self,
instruments,
fields,
start_time=None,
end_time=None,
freq="day",
disk_cache=1,
**kwargs,
):
"""Get task uri, used when generating rabbitmq task in qlib_server
Parameters
----------
instruments : list or dict
list/dict of instruments or dict of stockpool config
fields : list
list of feature instances
start_time : str
start of the time range
end_time : str
end of the time range
freq : str
time frequency
disk_cache : int
whether to skip(0)/use(1)/replace(2) disk_cache
"""
return DiskDatasetCache._uri(instruments, fields, start_time, end_time, freq, disk_cache)
@staticmethod
def get_instruments_d(instruments, freq):
"""
Parse different types of input instruments to output instruments_d
Wrong format of input instruments will lead to exception.
"""
if isinstance(instruments, dict):
if "market" in instruments:
# dict of stockpool config
instruments_d = Inst.list_instruments(instruments=instruments, freq=freq, as_list=False)
else:
# dict of instruments and timestamp
instruments_d = instruments
elif isinstance(instruments, (list, tuple, pd.Index, np.ndarray)):
# list or tuple of a group of instruments
instruments_d = list(instruments)
else:
raise ValueError("Unsupported input type for param `instrument`")
return instruments_d
@staticmethod
def get_column_names(fields):
"""
Get column names from input fields
"""
if len(fields) == 0:
raise ValueError("fields cannot be empty")
fields = fields.copy()
column_names = [str(f) for f in fields]
return column_names
@staticmethod
def parse_fields(fields):
# parse and check the input fields
return [ExpressionD.get_expression_instance(f) for f in fields]
@staticmethod
def dataset_processor(instruments_d, column_names, start_time, end_time, freq):
"""
Load and process the data, return the data set.
- default using multi-kernel method.
"""
normalize_column_names = normalize_cache_fields(column_names)
data = dict()
# One process for one task, so that the memory will be freed quicker.
if C.maxtasksperchild is None:
p = Pool(processes=C.kernels)
else:
p = Pool(processes=C.kernels, maxtasksperchild=C.maxtasksperchild)
if isinstance(instruments_d, dict):
for inst, spans in instruments_d.items():
data[inst] = p.apply_async(
DatasetProvider.expression_calculator,
args=(
inst,
start_time,
end_time,
freq,
normalize_column_names,
spans,
C,
),
)
else:
for inst in instruments_d:
data[inst] = p.apply_async(
DatasetProvider.expression_calculator,
args=(
inst,
start_time,
end_time,
freq,
normalize_column_names,
None,
C,
),
)
p.close()
p.join()
new_data = dict()
for inst in sorted(data.keys()):
if len(data[inst].get()) > 0:
# NOTE: Python version >= 3.6; in versions after python3.6, dict will always guarantee the insertion order
new_data[inst] = data[inst].get()
if len(new_data) > 0:
data = pd.concat(new_data, names=["instrument"], sort=False)
data = DiskDatasetCache.cache_to_origin_data(data, column_names)
else:
data = pd.DataFrame(columns=column_names)
return data
@staticmethod
def expression_calculator(inst, start_time, end_time, freq, column_names, spans=None, C=None):
"""
Calculate the expressions for one instrument, return a df result.
If the expression has been calculated before, load from cache.
return value: A data frame with index 'datetime' and other data columns.
"""
# NOTE: This place is compatible with windows, windows multi-process is spawn
if getattr(ExpressionD, "_provider", None) is None:
register_all_wrappers()
obj = dict()
for field in column_names:
# The client does not have expression provider, the data will be loaded from cache using static method.
obj[field] = ExpressionD.expression(inst, field, start_time, end_time, freq)
data = pd.DataFrame(obj)
_calendar = Cal.calendar(freq=freq)
data.index = _calendar[data.index.values.astype(np.int)]
data.index.names = ["datetime"]
if spans is None:
return data
else:
mask = np.zeros(len(data), dtype=np.bool)
for begin, end in spans:
mask |= (data.index >= begin) & (data.index <= end)
return data[mask]
class LocalCalendarProvider(CalendarProvider):
"""Local calendar data provider class
Provide calendar data from local data source.
"""
def __init__(self, **kwargs):
self.remote = kwargs.get("remote", False)
@property
def _uri_cal(self):
"""Calendar file uri."""
if self.remote:
return os.path.join(C.mount_path, "calendars", "{}.txt")
else:
return os.path.join(C.provider_uri, "calendars", "{}.txt")
def _load_calendar(self, freq, future):
"""Load original calendar timestamp from file.
Parameters
----------
freq : str
frequency of read calendar file
Returns
----------
list
list of timestamps
"""
if future:
fname = self._uri_cal.format(freq + "_future")
# if future calendar not exists, return current calendar
if not os.path.exists(fname):
get_module_logger("data").warning(f"{freq}_future.txt not exists, return current calendar!")
fname = self._uri_cal.format(freq)
else:
fname = self._uri_cal.format(freq)
if not os.path.exists(fname):
raise ValueError("calendar not exists for freq " + freq)
with open(fname) as f:
return [pd.Timestamp(x.strip()) for x in f]
def calendar(self, start_time=None, end_time=None, freq="day", future=False):
_calendar, _calendar_index = self._get_calendar(freq, future)
if start_time == "None":
start_time = None
if end_time == "None":
end_time = None
# strip
if start_time:
start_time = pd.Timestamp(start_time)
if start_time > _calendar[-1]:
return np.array([])
else:
start_time = _calendar[0]
if end_time:
end_time = pd.Timestamp(end_time)
if end_time < _calendar[0]:
return np.array([])
else:
end_time = _calendar[-1]
_, _, si, ei = self.locate_index(start_time, end_time, freq, future)
return _calendar[si : ei + 1]
class LocalInstrumentProvider(InstrumentProvider):
"""Local instrument data provider class
Provide instrument data from local data source.
"""
def __init__(self):
pass
@property
def _uri_inst(self):
"""Instrument file uri."""
return os.path.join(C.provider_uri, "instruments", "{}.txt")
def _load_instruments(self, market):
fname = self._uri_inst.format(market)
if not os.path.exists(fname):
raise ValueError("instruments not exists for market " + market)
_instruments = dict()
with open(fname) as f:
for line in f:
inst_time = line.strip().split()
inst = inst_time[0]
if len(inst_time) == 3:
# `day`
begin = inst_time[1]
end = inst_time[2]
elif len(inst_time) == 5:
# `1min`
begin = inst_time[1] + " " + inst_time[2]
end = inst_time[3] + " " + inst_time[4]
_instruments.setdefault(inst, []).append((pd.Timestamp(begin), pd.Timestamp(end)))
return _instruments
def list_instruments(self, instruments, start_time=None, end_time=None, freq="day", as_list=False):
market = instruments["market"]
if market in H["i"]:
_instruments = H["i"][market]
else:
_instruments = self._load_instruments(market)
H["i"][market] = _instruments
# strip
# use calendar boundary
cal = Cal.calendar(freq=freq)
start_time = pd.Timestamp(start_time or cal[0])
end_time = pd.Timestamp(end_time or cal[-1])
_instruments_filtered = {
inst: list(
filter(
lambda x: x[0] <= x[1],
[(max(start_time, x[0]), min(end_time, x[1])) for x in spans],
)
)
for inst, spans in _instruments.items()
}
_instruments_filtered = {key: value for key, value in _instruments_filtered.items() if value}
# filter
filter_pipe = instruments["filter_pipe"]
for filter_config in filter_pipe:
from . import filter as F
filter_t = getattr(F, filter_config["filter_type"]).from_config(filter_config)
_instruments_filtered = filter_t(_instruments_filtered, start_time, end_time, freq)
# as list
if as_list:
return list(_instruments_filtered)
return _instruments_filtered
class LocalFeatureProvider(FeatureProvider):
"""Local feature data provider class
Provide feature data from local data source.
"""
def __init__(self, **kwargs):
self.remote = kwargs.get("remote", False)
@property
def _uri_data(self):
"""Static feature file uri."""
if self.remote:
return os.path.join(C.mount_path, "features", "{}", "{}.{}.bin")
else:
return os.path.join(C.provider_uri, "features", "{}", "{}.{}.bin")
def feature(self, instrument, field, start_index, end_index, freq):
# validate
field = str(field).lower()[1:]
uri_data = self._uri_data.format(instrument.lower(), field, freq)
if not os.path.exists(uri_data):
get_module_logger("data").warning("WARN: data not found for %s.%s" % (instrument, field))
return pd.Series()
# raise ValueError('uri_data not found: ' + uri_data)
# load
series = read_bin(uri_data, start_index, end_index)
return series
class LocalExpressionProvider(ExpressionProvider):
"""Local expression data provider class
Provide expression data from local data source.
"""
def __init__(self):
super().__init__()
def expression(self, instrument, field, start_time=None, end_time=None, freq="day"):
expression = self.get_expression_instance(field)
start_time = pd.Timestamp(start_time)
end_time = pd.Timestamp(end_time)
_, _, start_index, end_index = Cal.locate_index(start_time, end_time, freq, future=False)
lft_etd, rght_etd = expression.get_extended_window_size()
series = expression.load(instrument, max(0, start_index - lft_etd), end_index + rght_etd, freq)
# Ensure that each column type is consistent
# FIXME: The stock data is currently float. If there is other types of data, this part needs to be re-implemented.
try:
series = series.astype(float)
except ValueError:
pass
if not series.empty:
series = series.loc[start_index:end_index]
return series
class LocalDatasetProvider(DatasetProvider):
"""Local dataset data provider class
Provide dataset data from local data source.
"""
def __init__(self):
pass
def dataset(self, instruments, fields, start_time=None, end_time=None, freq="day"):
instruments_d = self.get_instruments_d(instruments, freq)
column_names = self.get_column_names(fields)
cal = Cal.calendar(start_time, end_time, freq)
if len(cal) == 0:
return pd.DataFrame(columns=column_names)
start_time = cal[0]
end_time = cal[-1]
data = self.dataset_processor(instruments_d, column_names, start_time, end_time, freq)
return data
@staticmethod
def multi_cache_walker(instruments, fields, start_time=None, end_time=None, freq="day"):
"""
This method is used to prepare the expression cache for the client.
Then the client will load the data from expression cache by itself.
"""
instruments_d = DatasetProvider.get_instruments_d(instruments, freq)
column_names = DatasetProvider.get_column_names(fields)
cal = Cal.calendar(start_time, end_time, freq)
if len(cal) == 0:
return
start_time = cal[0]
end_time = cal[-1]
if C.maxtasksperchild is None:
p = Pool(processes=C.kernels)
else:
p = Pool(processes=C.kernels, maxtasksperchild=C.maxtasksperchild)
for inst in instruments_d:
p.apply_async(
LocalDatasetProvider.cache_walker,
args=(
inst,
start_time,
end_time,
freq,
column_names,
),
)
p.close()
p.join()
@staticmethod
def cache_walker(inst, start_time, end_time, freq, column_names):
"""
If the expressions of one instrument haven't been calculated before,
calculate it and write it into expression cache.
"""
for field in column_names:
ExpressionD.expression(inst, field, start_time, end_time, freq)
class ClientCalendarProvider(CalendarProvider):
"""Client calendar data provider class
Provide calendar data by requesting data from server as a client.
"""
def __init__(self):
self.conn = None
self.queue = queue.Queue()
def set_conn(self, conn):
self.conn = conn
def calendar(self, start_time=None, end_time=None, freq="day", future=False):
self.conn.send_request(
request_type="calendar",
request_content={
"start_time": str(start_time),
"end_time": str(end_time),
"freq": freq,
"future": future,
},
msg_queue=self.queue,
msg_proc_func=lambda response_content: [pd.Timestamp(c) for c in response_content],
)
result = self.queue.get(timeout=C["timeout"])
return result
class ClientInstrumentProvider(InstrumentProvider):
"""Client instrument data provider class
Provide instrument data by requesting data from server as a client.
"""
def __init__(self):
self.conn = None
self.queue = queue.Queue()
def set_conn(self, conn):
self.conn = conn
def list_instruments(self, instruments, start_time=None, end_time=None, freq="day", as_list=False):
def inst_msg_proc_func(response_content):
if isinstance(response_content, dict):
instrument = {
i: [(pd.Timestamp(s), pd.Timestamp(e)) for s, e in t] for i, t in response_content.items()
}
else:
instrument = response_content
return instrument
self.conn.send_request(
request_type="instrument",
request_content={
"instruments": instruments,
"start_time": str(start_time),
"end_time": str(end_time),
"freq": freq,
"as_list": as_list,
},
msg_queue=self.queue,
msg_proc_func=inst_msg_proc_func,
)
result = self.queue.get(timeout=C["timeout"])
if isinstance(result, Exception):
raise result
get_module_logger("data").debug("get result")
return result
class ClientDatasetProvider(DatasetProvider):
"""Client dataset data provider class
Provide dataset data by requesting data from server as a client.
"""
def __init__(self):
self.conn = None
def set_conn(self, conn):
self.conn = conn
self.queue = queue.Queue()
def dataset(
self,
instruments,
fields,
start_time=None,
end_time=None,
freq="day",
disk_cache=0,
return_uri=False,
):
if Inst.get_inst_type(instruments) == Inst.DICT:
get_module_logger("data").warning(
"Getting features from a dict of instruments is not recommended because the features will not be "
"cached! "
"The dict of instruments will be cleaned every day."
)
if disk_cache == 0:
"""
Call the server to generate the expression cache.
Then load the data from the expression cache directly.
- default using multi-kernel method.
"""
self.conn.send_request(
request_type="feature",
request_content={
"instruments": instruments,
"fields": fields,
"start_time": start_time,
"end_time": end_time,
"freq": freq,
"disk_cache": 0,
},
msg_queue=self.queue,
)
feature_uri = self.queue.get(timeout=C["timeout"])
if isinstance(feature_uri, Exception):
raise feature_uri
else:
instruments_d = self.get_instruments_d(instruments, freq)
column_names = self.get_column_names(fields)
cal = Cal.calendar(start_time, end_time, freq)
if len(cal) == 0:
return pd.DataFrame(columns=column_names)
start_time = cal[0]
end_time = cal[-1]
data = self.dataset_processor(instruments_d, column_names, start_time, end_time, freq)
if return_uri:
return data, feature_uri
else:
return data
else:
"""
Call the server to generate the data-set cache, get the uri of the cache file.
Then load the data from the file on NFS directly.
- using single-process implementation.
"""
self.conn.send_request(
request_type="feature",
request_content={
"instruments": instruments,
"fields": fields,
"start_time": start_time,
"end_time": end_time,
"freq": freq,
"disk_cache": 1,
},
msg_queue=self.queue,
)
# - Done in callback
feature_uri = self.queue.get(timeout=C["timeout"])
if isinstance(feature_uri, Exception):
raise feature_uri
get_module_logger("data").debug("get result")
try:
# pre-mound nfs, used for demo
mnt_feature_uri = os.path.join(C.mount_path, C.dataset_cache_dir_name, feature_uri)
df = DiskDatasetCache.read_data_from_cache(mnt_feature_uri, start_time, end_time, fields)
get_module_logger("data").debug("finish slicing data")
if return_uri:
return df, feature_uri
return df
except AttributeError:
raise IOError("Unable to fetch instruments from remote server!")
class BaseProvider:
"""Local provider class
To keep compatible with old qlib provider.
"""
def calendar(self, start_time=None, end_time=None, freq="day", future=False):
return Cal.calendar(start_time, end_time, freq, future=future)
def instruments(self, market="all", filter_pipe=None, start_time=None, end_time=None):
if start_time is not None or end_time is not None:
get_module_logger("Provider").warning(
"The instruments corresponds to a stock pool. "
"Parameters `start_time` and `end_time` does not take effect now."
)
return InstrumentProvider.instruments(market, filter_pipe)
def list_instruments(self, instruments, start_time=None, end_time=None, freq="day", as_list=False):
return Inst.list_instruments(instruments, start_time, end_time, freq, as_list)
def features(
self,
instruments,
fields,
start_time=None,
end_time=None,
freq="day",
disk_cache=None,
):
"""
disk_cache : int
whether to skip(0)/use(1)/replace(2) disk_cache
This function will try to use cache method which has a keyword `disk_cache`,
and will use provider method if a type error is raised because the DatasetD instance
is a provider class.
"""
disk_cache = C.default_disk_cache if disk_cache is None else disk_cache
if C.disable_disk_cache:
disk_cache = False
try:
return DatasetD.dataset(instruments, fields, start_time, end_time, freq, disk_cache)
except TypeError:
return DatasetD.dataset(instruments, fields, start_time, end_time, freq)
class LocalProvider(BaseProvider):
def _uri(self, type, **kwargs):
"""_uri
The server hope to get the uri of the request. The uri will be decided
by the dataprovider. For ex, different cache layer has different uri.
:param type: The type of resource for the uri
:param **kwargs:
"""
if type == "calendar":
return Cal._uri(**kwargs)
elif type == "instrument":
return Inst._uri(**kwargs)
elif type == "feature":
return DatasetD._uri(**kwargs)
def features_uri(self, instruments, fields, start_time, end_time, freq, disk_cache=1):
"""features_uri
Return the uri of the generated cache of features/dataset
:param disk_cache:
:param instruments:
:param fields:
:param start_time:
:param end_time:
:param freq:
"""
return DatasetD._dataset_uri(instruments, fields, start_time, end_time, freq, disk_cache)
class ClientProvider(BaseProvider):
"""Client Provider
Requesting data from server as a client. Can propose requests:
- Calendar : Directly respond a list of calendars
- Instruments (without filter): Directly respond a list/dict of instruments
- Instruments (with filters): Respond a list/dict of instruments
- Features : Respond a cache uri
The general workflow is described as follows:
When the user use client provider to propose a request, the client provider will connect the server and send the request. The client will start to wait for the response. The response will be made instantly indicating whether the cache is available. The waiting procedure will terminate only when the client get the reponse saying `feature_available` is true.
`BUG` : Everytime we make request for certain data we need to connect to the server, wait for the response and disconnect from it. We can't make a sequence of requests within one connection. You can refer to https://python-socketio.readthedocs.io/en/latest/client.html for documentation of python-socketIO client.
"""
def __init__(self):
from .client import Client
self.client = Client(C.flask_server, C.flask_port)
self.logger = get_module_logger(self.__class__.__name__)
if isinstance(Cal, ClientCalendarProvider):
Cal.set_conn(self.client)
Inst.set_conn(self.client)
if hasattr(DatasetD, "provider"):
DatasetD.provider.set_conn(self.client)
else:
DatasetD.set_conn(self.client)
class Wrapper(object):
"""Data Provider Wrapper"""
def __init__(self):
self._provider = None
def register(self, provider):
self._provider = provider
def __getattr__(self, key):
if self._provider is None:
raise AttributeError("Please run qlib.init() first using qlib")
return getattr(self._provider, key)
def get_cls_from_name(cls_name):
return getattr(importlib.import_module(".data", package="qlib"), cls_name)
def get_provider_obj(config, **params):
if isinstance(config, dict):
params.update(config["kwargs"])
config = config["class"]
return get_cls_from_name(config)(**params)
def register_wrapper(wrapper, cls_or_obj):
"""register_wrapper
:param wrapper: A wrapper of all kinds of providers
:param cls_or_obj: A class or class name or object instance in data/data.py
"""
if isinstance(cls_or_obj, str):
cls_or_obj = get_cls_from_name(cls_or_obj)
obj = cls_or_obj() if isinstance(cls_or_obj, type) else cls_or_obj
wrapper.register(obj)
Cal = Wrapper()
Inst = Wrapper()
FeatureD = Wrapper()
ExpressionD = Wrapper()
DatasetD = Wrapper()
D = Wrapper()
def register_all_wrappers():
"""register_all_wrappers"""
logger = get_module_logger("data")
_calendar_provider = get_provider_obj(C.calendar_provider)
if getattr(C, "calendar_cache", None) is not None:
_calendar_provider = get_provider_obj(C.calendar_cache, provider=_calendar_provider)
register_wrapper(Cal, _calendar_provider)
logger.debug(f"registering Cal {C.calendar_provider}-{C.calenar_cache}")
register_wrapper(Inst, C.instrument_provider)
logger.debug(f"registering Inst {C.instrument_provider}")
if getattr(C, "feature_provider", None) is not None:
feature_provider = get_provider_obj(C.feature_provider)
register_wrapper(FeatureD, feature_provider)
logger.debug(f"registering FeatureD {C.feature_provider}")
if getattr(C, "expression_provider", None) is not None:
# This provider is unnecessary in client provider
_eprovider = get_provider_obj(C.expression_provider)
if getattr(C, "expression_cache", None) is not None:
_eprovider = get_provider_obj(C.expression_cache, provider=_eprovider)
register_wrapper(ExpressionD, _eprovider)
logger.debug(f"registering ExpressioneD {C.expression_provider}-{C.expression_cache}")
_dprovider = get_provider_obj(C.dataset_provider)
if getattr(C, "dataset_cache", None) is not None:
_dprovider = get_provider_obj(C.dataset_cache, provider=_dprovider)
register_wrapper(DatasetD, _dprovider)
logger.debug(f"registering DataseteD {C.dataset_provider}-{C.dataset_cache}")
register_wrapper(D, C.provider)
logger.debug(f"registering D {C.provider}")
| 34.017102 | 362 | 0.594872 | [
"MIT"
] | Tirbo06/qlib | qlib/data/data.py | 37,793 | Python |
from flask_unchained.bundles.sqlalchemy import SessionManager, SQLAlchemyUnchained
def setup(db: SQLAlchemyUnchained):
session_manager = SessionManager(db)
class Foo(db.Model):
class Meta:
lazy_mapped = False
name = db.Column(db.String)
db.create_all()
return Foo, session_manager
class TestSessionManager:
def test_save(self, db: SQLAlchemyUnchained):
Foo, session_manager = setup(db)
foo = Foo(name='foo')
session_manager.save(foo)
# check it's added to the session but not committed
assert foo in db.session
with db.session.no_autoflush:
assert Foo.q.get_by(name='foo') is None
# check the commit kwarg works
session_manager.save(foo, commit=True)
assert Foo.q.get_by(name='foo') == foo
def test_save_all(self, db: SQLAlchemyUnchained):
Foo, session_manager = setup(db)
foo1 = Foo(name='one')
foo2 = Foo(name='two')
foo3 = Foo(name='three')
all_ = [foo1, foo2, foo3]
session_manager.save_all(all_)
with db.session.no_autoflush:
for foo in all_:
assert foo in db.session
assert Foo.q.get_by(name=foo.name) is None
session_manager.save_all(all_, commit=True)
for foo in all_:
assert Foo.q.get_by(name=foo.name) == foo
def test_delete(self, db: SQLAlchemyUnchained):
Foo, session_manager = setup(db)
foo1 = Foo(name='one')
foo2 = Foo(name='two')
all_ = [foo1, foo2]
session_manager.save_all(all_, commit=True)
for foo in all_:
assert foo in db.session
assert Foo.q.get_by(name=foo.name) == foo
session_manager.delete(foo1, commit=True)
assert foo1 not in db.session
assert Foo.q.get_by(name='one') is None
assert foo2 in db.session
assert Foo.q.get_by(name='two') == foo2
| 28.970588 | 82 | 0.613198 | [
"MIT"
] | achiang/flask-unchained | tests/bundles/sqlalchemy/services/test_session_manager.py | 1,970 | Python |
#corresponde ao video 6 do curso
# Primeiros passos
n = input('Digite algo: ')
print(n.isnumeric()) # se é numerico
print(n.isalpha()) # se é letra
print(n.isalnum()) # se é alpha numerico
print(n.isupper()) # ta em letra maiuscula | 29 | 42 | 0.706897 | [
"MIT"
] | Carlosouzavalle/Python | Aula 1/aula2.py | 235 | Python |
from service import Service
class ICQ(Service):
async def run(self):
await self.client.post('https://www.icq.com/smsreg/requestPhoneValidation.php',
data={'msisdn': self.formatted_phone, "locale": 'en', 'countryCode': 'ru',
'version': '1', "k": "ic1rtwz1s1Hj1O0r", "r": "46763"})
| 41.111111 | 106 | 0.537838 | [
"MPL-2.0",
"MPL-2.0-no-copyleft-exception"
] | spam-sms/b0mb3r-beta | b0mb3r/services/icq.py | 370 | Python |
#!/usr/bin/env python3
import numpy as np
import qiskit
num_params = 2 # make sure you set this correctly to the number of parameters used by the ansatz
## Previously used for Helium VQE in Rigetti implementation
#
def tiny_ansatz_2(current_params):
q = qiskit.QuantumRegister(2, "q")
qc = qiskit.QuantumCircuit(q, qiskit.ClassicalRegister(2, "c"))
qc.x(q[0])
qc.x(q[1])
qc.rx( np.pi/2, q[0])
qc.h(q[1])
qc.cx(q[0], q[1])
qc.rz(current_params[0], q[1])
qc.cx(q[0], q[1])
qc.rx(-np.pi/2, q[0])
qc.h(q[1])
qc.h(q[0])
qc.rx( np.pi/2, q[1])
qc.cx(q[0], q[1])
qc.rz(current_params[1], q[1])
qc.cx(q[0], q[1])
qc.h(q[0])
qc.rx(-np.pi/2, q[1])
return qc
| 22.90625 | 101 | 0.581173 | [
"BSD-3-Clause"
] | ctuning/ck-qisk | soft/template.qiskit.ansatz/python_code/tiny2/custom_ansatz.py | 733 | Python |
import os
import time
import traceback
from conans.client.tools.files import human_size
from conans.errors import AuthenticationException, ConanConnectionError, ConanException, \
NotFoundException
from conans.util.files import mkdir, save_append, sha1sum, to_file_bytes
from conans.util.log import logger
from conans.util.tracer import log_download
class Uploader(object):
def __init__(self, requester, output, verify, chunk_size=1000):
self.chunk_size = chunk_size
self.output = output
self.requester = requester
self.verify = verify
def upload(self, url, abs_path, auth=None, dedup=False, retry=1, retry_wait=0, headers=None):
# Send always the header with the Sha1
headers = headers or {}
headers["X-Checksum-Sha1"] = sha1sum(abs_path)
if dedup:
dedup_headers = {"X-Checksum-Deploy": "true"}
if headers:
dedup_headers.update(headers)
response = self.requester.put(url, data="", verify=self.verify, headers=dedup_headers,
auth=auth)
if response.status_code == 403:
if auth.token is None:
raise AuthenticationException(response.content)
raise ForbiddenException(response.content)
if response.status_code == 201: # Artifactory returns 201 if the file is there
return response
self.output.info("")
# Actual transfer of the real content
it = load_in_chunks(abs_path, self.chunk_size)
# Now it is a chunked read file
file_size = os.stat(abs_path).st_size
it = upload_with_progress(file_size, it, self.chunk_size, self.output)
# Now it will print progress in each iteration
iterable_to_file = IterableToFileAdapter(it, file_size)
# Now it is prepared to work with request
ret = call_with_retry(self.output, retry, retry_wait, self._upload_file, url,
data=iterable_to_file, headers=headers, auth=auth)
return ret
def _upload_file(self, url, data, headers, auth):
try:
response = self.requester.put(url, data=data, verify=self.verify,
headers=headers, auth=auth)
if response.status_code == 403:
if auth.token is None:
raise AuthenticationException(response.content)
raise ForbiddenException(response.content)
except ConanException:
raise
except Exception as exc:
raise ConanException(exc)
return response
class IterableToFileAdapter(object):
def __init__(self, iterable, total_size):
self.iterator = iter(iterable)
self.total_size = total_size
def read(self, size=-1): # @UnusedVariable
return next(self.iterator, b'')
def __len__(self):
return self.total_size
def __iter__(self):
return self.iterator.__iter__()
class upload_with_progress(object):
def __init__(self, totalsize, iterator, chunk_size, output):
self.totalsize = totalsize
self.output = output
self.chunk_size = chunk_size
self.aprox_chunks = self.totalsize * 1.0 / chunk_size
self.groups = iterator
def __iter__(self):
last_progress = None
for index, chunk in enumerate(self.groups):
if self.aprox_chunks == 0:
index = self.aprox_chunks
units = progress_units(index, self.aprox_chunks)
progress = human_readable_progress(index * self.chunk_size, self.totalsize)
if last_progress != units: # Avoid screen refresh if nothing has change
print_progress(self.output, units, progress)
last_progress = units
yield chunk
progress = human_readable_progress(self.totalsize, self.totalsize)
print_progress(self.output, progress_units(100, 100), progress)
def __len__(self):
return self.totalsize
def load_in_chunks(path, chunk_size=1024):
"""Lazy function (generator) to read a file piece by piece.
Default chunk size: 1k."""
with open(path, 'rb') as file_object:
while True:
data = file_object.read(chunk_size)
if not data:
break
yield data
class Downloader(object):
def __init__(self, requester, output, verify, chunk_size=1000):
self.chunk_size = chunk_size
self.output = output
self.requester = requester
self.verify = verify
def download(self, url, file_path=None, auth=None, retry=3, retry_wait=0, overwrite=False,
headers=None):
if file_path and not os.path.isabs(file_path):
file_path = os.path.abspath(file_path)
if file_path and os.path.exists(file_path):
if overwrite:
if self.output:
self.output.warn("file '%s' already exists, overwriting" % file_path)
else:
# Should not happen, better to raise, probably we had to remove
# the dest folder before
raise ConanException("Error, the file to download already exists: '%s'" % file_path)
return call_with_retry(self.output, retry, retry_wait, self._download_file, url, auth,
headers, file_path)
def _download_file(self, url, auth, headers, file_path):
t1 = time.time()
try:
response = self.requester.get(url, stream=True, verify=self.verify, auth=auth,
headers=headers)
except Exception as exc:
raise ConanException("Error downloading file %s: '%s'" % (url, exc))
if not response.ok:
if response.status_code == 404:
raise NotFoundException("Not found: %s" % url)
elif response.status_code == 401:
raise AuthenticationException()
raise ConanException("Error %d downloading file %s" % (response.status_code, url))
try:
logger.debug("DOWNLOAD: %s" % url)
data = self._download_data(response, file_path)
duration = time.time() - t1
log_download(url, duration)
return data
except Exception as e:
logger.debug(e.__class__)
logger.debug(traceback.format_exc())
# If this part failed, it means problems with the connection to server
raise ConanConnectionError("Download failed, check server, possibly try again\n%s"
% str(e))
def _download_data(self, response, file_path):
ret = bytearray()
total_length = response.headers.get('content-length')
if total_length is None: # no content length header
if not file_path:
ret += response.content
else:
if self.output:
total_length = len(response.content)
progress = human_readable_progress(total_length, total_length)
print_progress(self.output, 50, progress)
save_append(file_path, response.content)
else:
total_length = int(total_length)
encoding = response.headers.get('content-encoding')
gzip = (encoding == "gzip")
# chunked can be a problem: https://www.greenbytes.de/tech/webdav/rfc2616.html#rfc.section.4.4
# It will not send content-length or should be ignored
def download_chunks(file_handler=None, ret_buffer=None):
"""Write to a buffer or to a file handler"""
chunk_size = 1024 if not file_path else 1024 * 100
download_size = 0
last_progress = None
for data in response.iter_content(chunk_size):
download_size += len(data)
if ret_buffer is not None:
ret_buffer.extend(data)
if file_handler is not None:
file_handler.write(to_file_bytes(data))
if self.output:
units = progress_units(download_size, total_length)
progress = human_readable_progress(download_size, total_length)
if last_progress != units: # Avoid screen refresh if nothing has change
print_progress(self.output, units, progress)
last_progress = units
return download_size
if file_path:
mkdir(os.path.dirname(file_path))
with open(file_path, 'wb') as handle:
dl_size = download_chunks(file_handler=handle)
else:
dl_size = download_chunks(ret_buffer=ret)
response.close()
if dl_size != total_length and not gzip:
raise ConanException("Transfer interrupted before "
"complete: %s < %s" % (dl_size, total_length))
if not file_path:
return bytes(ret)
else:
return
def progress_units(progress, total):
if total == 0:
return 0
return min(50, int(50 * progress / total))
def human_readable_progress(bytes_transferred, total_bytes):
return "%s/%s" % (human_size(bytes_transferred), human_size(total_bytes))
def print_progress(output, units, progress=""):
if output.is_terminal:
output.rewrite_line("[%s%s] %s" % ('=' * units, ' ' * (50 - units), progress))
def call_with_retry(out, retry, retry_wait, method, *args, **kwargs):
for counter in range(retry):
try:
return method(*args, **kwargs)
except NotFoundException:
raise
except ConanException as exc:
if counter == (retry - 1):
raise
else:
if out:
out.error(exc)
out.info("Waiting %d seconds to retry..." % retry_wait)
time.sleep(retry_wait)
| 38.655303 | 106 | 0.590691 | [
"MIT"
] | AKhranovskiy/conan | conans/client/rest/uploader_downloader.py | 10,205 | Python |
"""example URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path("polls/", include("polls.urls")),
path("admin/", admin.site.urls),
]
| 34.826087 | 77 | 0.702871 | [
"MIT"
] | lambdalisue/django-codemirror-widget | example/example/urls.py | 801 | Python |
#!/usr/bin/env python3
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test privcyd with different proxy configuration.
Test plan:
- Start privcyd's with different proxy configurations
- Use addnode to initiate connections
- Verify that proxies are connected to, and the right connection command is given
- Proxy configurations to test on privcyd side:
- `-proxy` (proxy everything)
- `-onion` (proxy just onions)
- `-proxyrandomize` Circuit randomization
- Proxy configurations to test on proxy side,
- support no authentication (other proxy)
- support no authentication + user/pass authentication (Tor)
- proxy on IPv6
- Create various proxies (as threads)
- Create privcyds that connect to them
- Manipulate the privcyds using addnode (onetry) an observe effects
addnode connect to IPv4
addnode connect to IPv6
addnode connect to onion
addnode connect to generic DNS name
"""
import socket
import os
from test_framework.socks5 import Socks5Configuration, Socks5Command, Socks5Server, AddressType
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
PORT_MIN,
PORT_RANGE,
assert_equal,
)
from test_framework.netutil import test_ipv6_local
RANGE_BEGIN = PORT_MIN + 2 * PORT_RANGE # Start after p2p and rpc ports
class ProxyTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 4
def setup_nodes(self):
self.have_ipv6 = test_ipv6_local()
# Create two proxies on different ports
# ... one unauthenticated
self.conf1 = Socks5Configuration()
self.conf1.addr = ('127.0.0.1', RANGE_BEGIN + (os.getpid() % 1000))
self.conf1.unauth = True
self.conf1.auth = False
# ... one supporting authenticated and unauthenticated (Tor)
self.conf2 = Socks5Configuration()
self.conf2.addr = ('127.0.0.1', RANGE_BEGIN + 1000 + (os.getpid() % 1000))
self.conf2.unauth = True
self.conf2.auth = True
if self.have_ipv6:
# ... one on IPv6 with similar configuration
self.conf3 = Socks5Configuration()
self.conf3.af = socket.AF_INET6
self.conf3.addr = ('::1', RANGE_BEGIN + 2000 + (os.getpid() % 1000))
self.conf3.unauth = True
self.conf3.auth = True
else:
self.log.warning("Testing without local IPv6 support")
self.serv1 = Socks5Server(self.conf1)
self.serv1.start()
self.serv2 = Socks5Server(self.conf2)
self.serv2.start()
if self.have_ipv6:
self.serv3 = Socks5Server(self.conf3)
self.serv3.start()
# Note: proxies are not used to connect to local nodes
# this is because the proxy to use is based on CService.GetNetwork(), which return NET_UNROUTABLE for localhost
args = [
['-listen', '-proxy=%s:%i' % (self.conf1.addr),'-proxyrandomize=1'],
['-listen', '-proxy=%s:%i' % (self.conf1.addr),'-onion=%s:%i' % (self.conf2.addr),'-proxyrandomize=0'],
['-listen', '-proxy=%s:%i' % (self.conf2.addr),'-proxyrandomize=1'],
[]
]
if self.have_ipv6:
args[3] = ['-listen', '-proxy=[%s]:%i' % (self.conf3.addr),'-proxyrandomize=0', '-noonion']
self.add_nodes(self.num_nodes, extra_args=args)
self.start_nodes()
def node_test(self, node, proxies, auth, test_onion=True):
rv = []
# Test: outgoing IPv4 connection through node
node.addnode("15.61.23.23:1234", "onetry")
cmd = proxies[0].queue.get()
assert(isinstance(cmd, Socks5Command))
# Note: privcyd's SOCKS5 implementation only sends atyp DOMAINNAME, even if connecting directly to IPv4/IPv6
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, b"15.61.23.23")
assert_equal(cmd.port, 1234)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
if self.have_ipv6:
# Test: outgoing IPv6 connection through node
node.addnode("[1233:3432:2434:2343:3234:2345:6546:4534]:5443", "onetry")
cmd = proxies[1].queue.get()
assert(isinstance(cmd, Socks5Command))
# Note: privcyd's SOCKS5 implementation only sends atyp DOMAINNAME, even if connecting directly to IPv4/IPv6
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, b"1233:3432:2434:2343:3234:2345:6546:4534")
assert_equal(cmd.port, 5443)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
if test_onion:
# Test: outgoing onion connection through node
node.addnode("bitcoinostk4e4re.onion:8333", "onetry")
cmd = proxies[2].queue.get()
assert(isinstance(cmd, Socks5Command))
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, b"bitcoinostk4e4re.onion")
assert_equal(cmd.port, 8333)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
# Test: outgoing DNS name connection through node
node.addnode("node.noumenon:8333", "onetry")
cmd = proxies[3].queue.get()
assert(isinstance(cmd, Socks5Command))
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, b"node.noumenon")
assert_equal(cmd.port, 8333)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
return rv
def run_test(self):
# basic -proxy
self.node_test(self.nodes[0], [self.serv1, self.serv1, self.serv1, self.serv1], False)
# -proxy plus -onion
self.node_test(self.nodes[1], [self.serv1, self.serv1, self.serv2, self.serv1], False)
# -proxy plus -onion, -proxyrandomize
rv = self.node_test(self.nodes[2], [self.serv2, self.serv2, self.serv2, self.serv2], True)
# Check that credentials as used for -proxyrandomize connections are unique
credentials = set((x.username,x.password) for x in rv)
assert_equal(len(credentials), len(rv))
if self.have_ipv6:
# proxy on IPv6 localhost
self.node_test(self.nodes[3], [self.serv3, self.serv3, self.serv3, self.serv3], False, False)
def networks_dict(d):
r = {}
for x in d['networks']:
r[x['name']] = x
return r
# test RPC getnetworkinfo
n0 = networks_dict(self.nodes[0].getnetworkinfo())
for net in ['ipv4','ipv6','onion']:
assert_equal(n0[net]['proxy'], '%s:%i' % (self.conf1.addr))
assert_equal(n0[net]['proxy_randomize_credentials'], True)
assert_equal(n0['onion']['reachable'], True)
n1 = networks_dict(self.nodes[1].getnetworkinfo())
for net in ['ipv4','ipv6']:
assert_equal(n1[net]['proxy'], '%s:%i' % (self.conf1.addr))
assert_equal(n1[net]['proxy_randomize_credentials'], False)
assert_equal(n1['onion']['proxy'], '%s:%i' % (self.conf2.addr))
assert_equal(n1['onion']['proxy_randomize_credentials'], False)
assert_equal(n1['onion']['reachable'], True)
n2 = networks_dict(self.nodes[2].getnetworkinfo())
for net in ['ipv4','ipv6','onion']:
assert_equal(n2[net]['proxy'], '%s:%i' % (self.conf2.addr))
assert_equal(n2[net]['proxy_randomize_credentials'], True)
assert_equal(n2['onion']['reachable'], True)
if self.have_ipv6:
n3 = networks_dict(self.nodes[3].getnetworkinfo())
for net in ['ipv4','ipv6']:
assert_equal(n3[net]['proxy'], '[%s]:%i' % (self.conf3.addr))
assert_equal(n3[net]['proxy_randomize_credentials'], False)
assert_equal(n3['onion']['reachable'], False)
if __name__ == '__main__':
ProxyTest().main()
| 41.282178 | 120 | 0.624535 | [
"MIT"
] | Bob-bit-PRIV/PRiVCY | test/functional/proxy_test.py | 8,339 | Python |
# -*- coding: utf-8 -*-
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin as OriginalUserAdmin
from django.contrib.auth.models import User as OriginalUser
from cms.utils.compat.dj import get_user_model
if getattr(OriginalUser._meta, 'swapped', False):
class UserAdmin(OriginalUserAdmin):
list_display = ('username', 'email', 'get_full_name', 'is_staff')
search_fields = ('username', 'email',)
admin.site.register(get_user_model(), UserAdmin)
| 35.714286 | 73 | 0.74 | [
"BSD-3-Clause"
] | MagicSolutions/django-cms | cms/test_utils/project/customuserapp/admin.py | 500 | Python |
"""Author: Brandon Trabucco
Calculate the part of speech tagger using the brown corpus.
"""
import glove.configuration
import glove.tagger
config = glove.configuration.TaggerConfiguration(
tagger_dir="./")
glove.tagger.dump(config)
| 15.1875 | 59 | 0.761317 | [
"MIT"
] | brandontrabucco/glove | tagger/calculate_tagger.py | 243 | Python |
import torch
from transformers import *
import pdb
import operator
from collections import OrderedDict
import sys
# OPTIONAL: if you want to have more information on what's happening, activate the logger as follows
import logging
logging.basicConfig(level=logging.INFO)
PATH='bert-base-cased'
# Load pre-trained model tokenizer (vocabulary)
tokenizer = BertTokenizer.from_pretrained(PATH,do_lower_case=False)
model = BertForMaskedLM.from_pretrained(PATH)
model.eval()
def get_sent():
print("Enter sentence:")
sent = input()
if (not sent.endswith(".")):
print("Appending period to do dummy masking")
sent = sent + " ."
return '[CLS] ' + sent + '[SEP]'
def print_tokens(tokenized_text):
dstr = ""
for i in range(len(tokenized_text)):
dstr += " " + str(i) + ":"+tokenized_text[i]
print(dstr)
print()
def get_pos():
while True:
masked_index = 0
try:
masked_index = int(input())
return masked_index
except:
print("Enter valid number: (0 to quit)")
masked_index = int(input())
if (masked_index == 0):
print("Quitting")
sys.exit()
return masked_index
while (True):
text = get_sent()
tokenized_text = tokenizer.tokenize(text)
print_tokens(tokenized_text)
#pdb.set_trace()
indexed_tokens = tokenizer.convert_tokens_to_ids(tokenized_text)
# Create the segments tensors.
segments_ids = [0] * len(tokenized_text)
masked_index = len(tokenized_text) - 2
tokenized_text[masked_index] = "[MASK]"
indexed_tokens[masked_index] = 103
results_dict = {}
# Convert inputs to PyTorch tensors
tokens_tensor = torch.tensor([indexed_tokens])
segments_tensors = torch.tensor([segments_ids])
with torch.no_grad():
predictions = model(tokens_tensor, segments_tensors)
while True:
print_tokens(tokenized_text)
print("Enter any term position neighbor:")
masked_index = get_pos()
results_dict = {}
for i in range(len(predictions[0][0,masked_index])):
tok = tokenizer.convert_ids_to_tokens([i])[0]
results_dict[tok] = float(predictions[0][0,masked_index][i].tolist())
k = 0
hist_d = {}
sorted_d = OrderedDict(sorted(results_dict.items(), key=lambda kv: kv[1], reverse=True))
first = True
max_val = 0
for i in sorted_d:
if (first):
max_val = sorted_d[i]
first = False
val = round(float(sorted_d[i])/max_val,1)
if (val in hist_d):
hist_d[val] += 1
else:
hist_d[val] = 1
k += 1
if (k <= 20):
print(i,sorted_d[i])
fp = open("top_k.txt","w")
hist_d_sorted = OrderedDict(sorted(hist_d.items(), key=lambda kv: kv[0], reverse=False))
for i in hist_d_sorted:
fp.write(str(i) + " " + str(hist_d_sorted[i]) + "\n")
fp.close()
| 30.147059 | 100 | 0.59935 | [
"MIT"
] | ajitrajasekharan/bert_mask | examine_vectors.py | 3,075 | Python |
# -*- coding: utf-8 -*-
"""
slicr.resources.links
~~~~~~~~~~~~~~~~~~~~~
Slicr link resource.
:copyright: © 2018
"""
from flask import current_app
from flask_restful import Resource
from webargs import fields
from webargs.flaskparser import use_args
from slicr.models import Link, LinkSchema
from slicr.utils import convert_args
link_args = {
'url': fields.Str(required=True),
'domain_id': fields.Int(missing=None)
}
# pylint: disable=R0201
class LinkResource(Resource):
"""Link resource."""
endpoints = ['/links', '/links/<int:link_id>']
schema = LinkSchema()
def get(self, link_id):
"""Get link resource.
.. :quickref: Link collection.
**Example request**:
.. sourcecode:: http
GET /links/1 HTTP/1.1
Host: example.com
Accept: application/json, text/javascript
**Example response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Vary: Accept
Content-Type: text/javascript
{
"data": {
"clicks": 0,
"created": "2018-08-21T19:13:34.157470+00:00",
"short_link": "b",
"updated": null,
"url": "https://www.google.com"
},
"id": 1,
"type": "links",
"url": "/links"
}
:jsonparam string url: url for which to create short link.
:reqheader Accept: The response content type depends on
:mailheader:`Accept` header
:reqheader Authorization: Optional authentication token.
:resheader Content-Type: this depends on :mailheader:`Accept`
header of request
:statuscode 201: Link created
"""
link = Link.query.filter_by(id=link_id).first()
link_data, errors = self.schema.dump(link)
if errors:
current_app.logger.warning(errors)
response_out = {
'id': link.id,
'data': link_data,
'url': '/links',
'type': 'link'
}
return response_out, 200
@use_args(link_args)
def post(self, args):
"""Create shortened link.
.. :quickref: Link collection.
**Example request**:
.. sourcecode:: http
POST /links HTTP/1.1
Host: example.com
Accept: application/json, text/javascript
{
"url": "https://www.google.com"
}
**Example response**:
.. sourcecode:: http
HTTP/1.1 201 OK
Vary: Accept
Content-Type: text/javascript
{
"data": {
"clicks": 0,
"created": "2018-08-21T19:13:34.157470+00:00",
"short_link": "b",
"updated": null,
"url": "https://www.google.com"
},
"id": 1,
"type": "links",
"url": "/links"
}
:jsonparam string url: url for which to create short link.
:reqheader Accept: The response content type depends on
:mailheader:`Accept` header
:reqheader Authorization: Optional authentication token.
:resheader Content-Type: this depends on :mailheader:`Accept`
header of request
:statuscode 201: Link created
"""
args = convert_args(args)
link = Link(
url=args.url,
domain_id=args.domain_id,
salt=int(current_app.config.get('ENCODER_SALT'))
).save()
link_data, errors = self.schema.dump(link)
if errors:
current_app.logger.warning(errors)
response_out = {
'id': link.id,
'data': link_data,
'url': '/links',
'type': 'link'
}
return response_out, 201
| 24.732919 | 69 | 0.506781 | [
"MIT"
] | travisbyrum/slicr | slicr/resources/links.py | 3,983 | Python |
import requests
import json
from .config import auth_token, base_url
from .recommendation_client import RecommendationClient
from .json_parser import json_parser
class ExperimentClient(object):
"""Experiment Client Class
This object defines a Thor experiment within the Python environment. In
particular, an experiment is defined by its name, the date at which it was
created, and the dimensions of the machine learning model. Moreover, an
authentication token is required for requesting new parameter
configurations, for submitting observations of parameters, for viewing
pending parameter configurations and for obtaining the best configuration
of parameters that has been evaluated so far.
Parameters:
identifier (int): A unique identifier that indicates which experiment
on the server-side is being interacted with by the client.
name (str): A name for the machine learning experiment. Consumers of the
Thor service must have unique experiment names, so make sure all of
your experiments are named different things!
date (datetime): The datetime at which the experiment was created on the
server side.
dims (list of dictionaries): A list of dictionaries describing the
parameter space of the optimization problem. Each dimension is given
a name, a maximum value, a minimum value, and a dimension type that
roughly describes how points are spaced.
auth_token (str): String containing a user's specific API key provided
by the Thor server. This is used to authenticate with the Thor
server as a handshake that these experiments belong to a user and
can be viewed and edited by them.
base_url (str): String indicating the URL template for API calls.
"""
def __init__(self, identifier, name, date, dims, auth_token=auth_token,
base_url=base_url):
"""Initialize parameters of the experiment client object."""
self.experiment_id = identifier
self.name = name
self.date = date
self.dims = dims
self.auth_token = auth_token
self.base_url = base_url
def submit_observation(self, config, target):
"""Upload a pairing of a configuration alongside an observed target
variable.
Parameters:
config (dictionary): A dictionary mapping dimension names to values
indicating the configuration of parameters.
target (float): A number indicating the performance of this
configuration of model parameters.
Examples:
This utility is helpful in the event that a machine learning
practitioner already has a few existing evaluations of the system at
given inputs. For instance, the consumer may have already performed
a grid search to obtain parameter values.
Suppose that a particular experiment has two dimensions named "x"
and "y". Then to upload a configuration to the Thor server, we
proceed as follows:
>>> d = {"x": 1.5, "y": 3.1}
>>> v = f(d["x"], d["y"])
>>> exp.submit_observation(d, v)
"""
post_data = {
"auth_token": self.auth_token,
"experiment_id": self.experiment_id,
"configuration": json.dumps(config),
"target": target
}
result = requests.post(
url=self.base_url.format("submit_observation"),
json=post_data
)
return json_parser(result, self.auth_token)
def create_recommendation(
self,
rand_prob=0.,
n_models=5,
description="",
acq_func="expected_improvement",
integrate_acq=True
):
"""Get a recommendation for a point to evaluate next.
The create recommendation utility represents the core of the Thor
Bayesian optimization software. This function will contact the Thor
server and request a new configuration of machine learning parameters
that serve the object of maximizing the metric of interest.
Parameters:
rand_prob (optional, float): This parameter represents that a random
point in the input space is chosen instead of selecting a
configuration of parameters using Bayesian optimization. As
such, this parameter can be used to benchmark against random
search and otherwise to perform pure exploration of the
parameter space.
n_models (optional, int): The number of Gaussian process models to
sample using elliptical slice sampling. Setting this to a large
number will produce a better characterization of uncertainty in
the acquisition function.
description (optional, str): An optional per-observation
descriptor, potentially useful for identifying one observation
among many others in a large experiment. Defaults to "".
acq_func (optional, str): A string specifying which acquisition
function should be used to construct the newest recommendation.
It can be useful to sometimes vary the acquisition function to
enable exploitation towards the end of an experiment.
integrate_acq (optional, bool): An indicator for whether or not we
should construct an integrated acquisition function using models
sampled from the posterior. The alternative is to not integrate
and to return a single recommendation for each of the sampled
models, of which there are `n_models`.
Returns:
RecommendationClient: A recommendation client object
corresponding to the recommended set of parameters. If the
acquisition function is not integrated, a list of
RecommendationClient objects may be returned instead, one for
each sampled model.
"""
post_data = {
"auth_token": self.auth_token,
"experiment_id": self.experiment_id,
"n_models": n_models,
"rand_prob": rand_prob,
"description": description,
"acq_func": acq_func,
"integrate_acq": integrate_acq
}
result = requests.post(
url=self.base_url.format("create_recommendation"),
json=post_data
)
recs = json_parser(result, self.auth_token, RecommendationClient)
return recs[0] if len(recs) == 1 else recs
def best_configuration(self):
"""Get the configuration of parameters that produced the best value of
the objective function.
Returns:
dictionary: A dictionary containing a detailed view of the
configuration of model parameters that produced the maximal
value of the metric. This includes the date the observation was
created, the value of the metric, and the configuration itself.
"""
post_data = {
"auth_token": self.auth_token,
"experiment_id": self.experiment_id
}
result = requests.post(
url=self.base_url.format("best_configuration"),
json=post_data
)
return json_parser(result, self.auth_token)
def pending_recommendations(self):
"""Query for pending recommendations that have yet to be evaluated.
Sometimes client-side computations may fail for a given input
configuration of model parameters, leaving the recommendation in a kind
of "limbo" state in which is not being evaluated but still exists. In
this case, it can be advantageous for the client to query for such
pending observations and to evaluate them. This function returns a list
of pending recommendations which can then be evaluated by the client.
Returns:
list of RecommendationClient: A list of
recommendation client objects, where each element in the list
corresponds to a pending observation.
"""
post_data = {
"auth_token": self.auth_token,
"experiment_id": self.experiment_id
}
result = requests.post(
url=self.base_url.format("pending_recommendations"),
json=post_data
)
return json_parser(result, self.auth_token, RecommendationClient)
@classmethod
def from_dict(cls, dictionary, auth_token):
"""Create an experiment object from a dictionary representation. Pass
the authentication token as an additional parameter.
TODO:
Can the authentication token be a return parameter?
"""
return cls(
identifier=dictionary["id"],
name=dictionary["name"],
date=dictionary["date"],
dims=dictionary["dimensions"],
auth_token=auth_token
)
| 44.912195 | 80 | 0.641686 | [
"MIT"
] | JamesBrofos/Thor-Python-Client | thor_client/experiment_client.py | 9,207 | Python |
# -*- coding: utf-8 -*-
"""
pygments.lexers.math
~~~~~~~~~~~~~~~~~~~~
Lexers for math languages.
:copyright: Copyright 2006-2010 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
try:
set
except NameError:
from sets import Set as set
from pygments.lexer import Lexer, RegexLexer, bygroups, include, do_insertions
from pygments.token import Comment, String, Punctuation, Keyword, Name, \
Operator, Number, Text, Generic
from pygments.lexers.agile import PythonLexer
__all__ = ['MuPADLexer', 'MatlabLexer', 'MatlabSessionLexer', 'NumPyLexer',
'SLexer']
class MuPADLexer(RegexLexer):
"""
A `MuPAD <http://www.mupad.com>`_ lexer.
Contributed by Christopher Creutzig <[email protected]>.
*New in Pygments 0.8.*
"""
name = 'MuPAD'
aliases = ['mupad']
filenames = ['*.mu']
tokens = {
'root' : [
(r'//.*?$', Comment.Single),
(r'/\*', Comment.Multiline, 'comment'),
(r'"(?:[^"\\]|\\.)*"', String),
(r'\(|\)|\[|\]|\{|\}', Punctuation),
(r'''(?x)\b(?:
next|break|end|
axiom|end_axiom|category|end_category|domain|end_domain|inherits|
if|%if|then|elif|else|end_if|
case|of|do|otherwise|end_case|
while|end_while|
repeat|until|end_repeat|
for|from|to|downto|step|end_for|
proc|local|option|save|begin|end_proc|
delete|frame
)\b''', Keyword),
(r'''(?x)\b(?:
DOM_ARRAY|DOM_BOOL|DOM_COMPLEX|DOM_DOMAIN|DOM_EXEC|DOM_EXPR|
DOM_FAIL|DOM_FLOAT|DOM_FRAME|DOM_FUNC_ENV|DOM_HFARRAY|DOM_IDENT|
DOM_INT|DOM_INTERVAL|DOM_LIST|DOM_NIL|DOM_NULL|DOM_POLY|DOM_PROC|
DOM_PROC_ENV|DOM_RAT|DOM_SET|DOM_STRING|DOM_TABLE|DOM_VAR
)\b''', Name.Class),
(r'''(?x)\b(?:
PI|EULER|E|CATALAN|
NIL|FAIL|undefined|infinity|
TRUE|FALSE|UNKNOWN
)\b''',
Name.Constant),
(r'\b(?:dom|procname)\b', Name.Builtin.Pseudo),
(r'\.|,|:|;|=|\+|-|\*|/|\^|@|>|<|\$|\||!|\'|%|~=', Operator),
(r'''(?x)\b(?:
and|or|not|xor|
assuming|
div|mod|
union|minus|intersect|in|subset
)\b''',
Operator.Word),
(r'\b(?:I|RDN_INF|RD_NINF|RD_NAN)\b', Number),
#(r'\b(?:adt|linalg|newDomain|hold)\b', Name.Builtin),
(r'''(?x)
((?:[a-zA-Z_#][a-zA-Z_#0-9]*|`[^`]*`)
(?:::[a-zA-Z_#][a-zA-Z_#0-9]*|`[^`]*`)*)\s*([(])''',
bygroups(Name.Function, Punctuation)),
(r'''(?x)
(?:[a-zA-Z_#][a-zA-Z_#0-9]*|`[^`]*`)
(?:::[a-zA-Z_#][a-zA-Z_#0-9]*|`[^`]*`)*''', Name.Variable),
(r'[0-9]+(?:\.[0-9]*)?(?:e[0-9]+)?', Number),
(r'\.[0-9]+(?:e[0-9]+)?', Number),
(r'.', Text)
],
'comment' : [
(r'[^*/]', Comment.Multiline),
(r'/\*', Comment.Multiline, '#push'),
(r'\*/', Comment.Multiline, '#pop'),
(r'[*/]', Comment.Multiline)
]
}
class MatlabLexer(RegexLexer):
"""
For Matlab (or GNU Octave) source code.
Contributed by Ken Schutte <[email protected]>.
*New in Pygments 0.10.*
"""
name = 'Matlab'
aliases = ['matlab', 'octave']
filenames = ['*.m']
mimetypes = ['text/matlab']
#
# These lists are generated automatically.
# Run the following in bash shell:
#
# for f in elfun specfun elmat; do
# echo -n "$f = "
# matlab -nojvm -r "help $f;exit;" | perl -ne \
# 'push(@c,$1) if /^ (\w+)\s+-/; END {print q{["}.join(q{","},@c).qq{"]\n};}'
# done
#
# elfun: Elementary math functions
# specfun: Special Math functions
# elmat: Elementary matrices and matrix manipulation
#
# taken from Matlab version 7.4.0.336 (R2007a)
#
elfun = ["sin","sind","sinh","asin","asind","asinh","cos","cosd","cosh",
"acos","acosd","acosh","tan","tand","tanh","atan","atand","atan2",
"atanh","sec","secd","sech","asec","asecd","asech","csc","cscd",
"csch","acsc","acscd","acsch","cot","cotd","coth","acot","acotd",
"acoth","hypot","exp","expm1","log","log1p","log10","log2","pow2",
"realpow","reallog","realsqrt","sqrt","nthroot","nextpow2","abs",
"angle","complex","conj","imag","real","unwrap","isreal","cplxpair",
"fix","floor","ceil","round","mod","rem","sign"]
specfun = ["airy","besselj","bessely","besselh","besseli","besselk","beta",
"betainc","betaln","ellipj","ellipke","erf","erfc","erfcx",
"erfinv","expint","gamma","gammainc","gammaln","psi","legendre",
"cross","dot","factor","isprime","primes","gcd","lcm","rat",
"rats","perms","nchoosek","factorial","cart2sph","cart2pol",
"pol2cart","sph2cart","hsv2rgb","rgb2hsv"]
elmat = ["zeros","ones","eye","repmat","rand","randn","linspace","logspace",
"freqspace","meshgrid","accumarray","size","length","ndims","numel",
"disp","isempty","isequal","isequalwithequalnans","cat","reshape",
"diag","blkdiag","tril","triu","fliplr","flipud","flipdim","rot90",
"find","end","sub2ind","ind2sub","bsxfun","ndgrid","permute",
"ipermute","shiftdim","circshift","squeeze","isscalar","isvector",
"ans","eps","realmax","realmin","pi","i","inf","nan","isnan",
"isinf","isfinite","j","why","compan","gallery","hadamard","hankel",
"hilb","invhilb","magic","pascal","rosser","toeplitz","vander",
"wilkinson"]
tokens = {
'root': [
# line starting with '!' is sent as a system command. not sure what
# label to use...
(r'^!.*', String.Other),
(r'%.*$', Comment),
(r'^\s*function', Keyword, 'deffunc'),
# from 'iskeyword' on version 7.4.0.336 (R2007a):
(r'(break|case|catch|classdef|continue|else|elseif|end|for|function|'
r'global|if|otherwise|parfor|persistent|return|switch|try|while)\b',
Keyword),
("(" + "|".join(elfun+specfun+elmat) + r')\b', Name.Builtin),
# operators:
(r'-|==|~=|<|>|<=|>=|&&|&|~|\|\|?', Operator),
# operators requiring escape for re:
(r'\.\*|\*|\+|\.\^|\.\\|\.\/|\/|\\', Operator),
# punctuation:
(r'\[|\]|\(|\)|\{|\}|:|@|\.|,', Punctuation),
(r'=|:|;', Punctuation),
# quote can be transpose, instead of string:
# (not great, but handles common cases...)
(r'(?<=[\w\)\]])\'', Operator),
(r'(?<![\w\)\]])\'', String, 'string'),
('[a-zA-Z_][a-zA-Z0-9_]*', Name),
(r'.', Text),
],
'string': [
(r'[^\']*\'', String, '#pop')
],
'deffunc': [
(r'(\s*)(?:(.+)(\s*)(=)(\s*))?(.+)(\()(.*)(\))(\s*)',
bygroups(Text.Whitespace, Text, Text.Whitespace, Punctuation,
Text.Whitespace, Name.Function, Punctuation, Text,
Punctuation, Text.Whitespace), '#pop'),
],
}
def analyse_text(text):
if re.match('^\s*%', text, re.M): # comment
return 0.9
elif re.match('^!\w+', text, re.M): # system cmd
return 0.9
return 0.1
line_re = re.compile('.*?\n')
class MatlabSessionLexer(Lexer):
"""
For Matlab (or GNU Octave) sessions. Modeled after PythonConsoleLexer.
Contributed by Ken Schutte <[email protected]>.
*New in Pygments 0.10.*
"""
name = 'Matlab session'
aliases = ['matlabsession']
def get_tokens_unprocessed(self, text):
mlexer = MatlabLexer(**self.options)
curcode = ''
insertions = []
for match in line_re.finditer(text):
line = match.group()
if line.startswith('>>'):
insertions.append((len(curcode),
[(0, Generic.Prompt, line[:3])]))
curcode += line[3:]
elif line.startswith('???'):
idx = len(curcode)
# without is showing error on same line as before...?
line = "\n" + line
token = (0, Generic.Traceback, line)
insertions.append( (idx, [token,]) )
else:
if curcode:
for item in do_insertions(
insertions, mlexer.get_tokens_unprocessed(curcode)):
yield item
curcode = ''
insertions = []
yield match.start(), Generic.Output, line
if curcode: # or item:
for item in do_insertions(
insertions, mlexer.get_tokens_unprocessed(curcode)):
yield item
class NumPyLexer(PythonLexer):
'''
A Python lexer recognizing Numerical Python builtins.
*New in Pygments 0.10.*
'''
name = 'NumPy'
aliases = ['numpy']
# override the mimetypes to not inherit them from python
mimetypes = []
filenames = []
EXTRA_KEYWORDS = set([
'abs', 'absolute', 'accumulate', 'add', 'alen', 'all', 'allclose',
'alltrue', 'alterdot', 'amax', 'amin', 'angle', 'any', 'append',
'apply_along_axis', 'apply_over_axes', 'arange', 'arccos', 'arccosh',
'arcsin', 'arcsinh', 'arctan', 'arctan2', 'arctanh', 'argmax', 'argmin',
'argsort', 'argwhere', 'around', 'array', 'array2string', 'array_equal',
'array_equiv', 'array_repr', 'array_split', 'array_str', 'arrayrange',
'asanyarray', 'asarray', 'asarray_chkfinite', 'ascontiguousarray',
'asfarray', 'asfortranarray', 'asmatrix', 'asscalar', 'astype',
'atleast_1d', 'atleast_2d', 'atleast_3d', 'average', 'bartlett',
'base_repr', 'beta', 'binary_repr', 'bincount', 'binomial',
'bitwise_and', 'bitwise_not', 'bitwise_or', 'bitwise_xor', 'blackman',
'bmat', 'broadcast', 'byte_bounds', 'bytes', 'byteswap', 'c_',
'can_cast', 'ceil', 'choose', 'clip', 'column_stack', 'common_type',
'compare_chararrays', 'compress', 'concatenate', 'conj', 'conjugate',
'convolve', 'copy', 'corrcoef', 'correlate', 'cos', 'cosh', 'cov',
'cross', 'cumprod', 'cumproduct', 'cumsum', 'delete', 'deprecate',
'diag', 'diagflat', 'diagonal', 'diff', 'digitize', 'disp', 'divide',
'dot', 'dsplit', 'dstack', 'dtype', 'dump', 'dumps', 'ediff1d', 'empty',
'empty_like', 'equal', 'exp', 'expand_dims', 'expm1', 'extract', 'eye',
'fabs', 'fastCopyAndTranspose', 'fft', 'fftfreq', 'fftshift', 'fill',
'finfo', 'fix', 'flat', 'flatnonzero', 'flatten', 'fliplr', 'flipud',
'floor', 'floor_divide', 'fmod', 'frexp', 'fromarrays', 'frombuffer',
'fromfile', 'fromfunction', 'fromiter', 'frompyfunc', 'fromstring',
'generic', 'get_array_wrap', 'get_include', 'get_numarray_include',
'get_numpy_include', 'get_printoptions', 'getbuffer', 'getbufsize',
'geterr', 'geterrcall', 'geterrobj', 'getfield', 'gradient', 'greater',
'greater_equal', 'gumbel', 'hamming', 'hanning', 'histogram',
'histogram2d', 'histogramdd', 'hsplit', 'hstack', 'hypot', 'i0',
'identity', 'ifft', 'imag', 'index_exp', 'indices', 'inf', 'info',
'inner', 'insert', 'int_asbuffer', 'interp', 'intersect1d',
'intersect1d_nu', 'inv', 'invert', 'iscomplex', 'iscomplexobj',
'isfinite', 'isfortran', 'isinf', 'isnan', 'isneginf', 'isposinf',
'isreal', 'isrealobj', 'isscalar', 'issctype', 'issubclass_',
'issubdtype', 'issubsctype', 'item', 'itemset', 'iterable', 'ix_',
'kaiser', 'kron', 'ldexp', 'left_shift', 'less', 'less_equal', 'lexsort',
'linspace', 'load', 'loads', 'loadtxt', 'log', 'log10', 'log1p', 'log2',
'logical_and', 'logical_not', 'logical_or', 'logical_xor', 'logspace',
'lstsq', 'mat', 'matrix', 'max', 'maximum', 'maximum_sctype',
'may_share_memory', 'mean', 'median', 'meshgrid', 'mgrid', 'min',
'minimum', 'mintypecode', 'mod', 'modf', 'msort', 'multiply', 'nan',
'nan_to_num', 'nanargmax', 'nanargmin', 'nanmax', 'nanmin', 'nansum',
'ndenumerate', 'ndim', 'ndindex', 'negative', 'newaxis', 'newbuffer',
'newbyteorder', 'nonzero', 'not_equal', 'obj2sctype', 'ogrid', 'ones',
'ones_like', 'outer', 'permutation', 'piecewise', 'pinv', 'pkgload',
'place', 'poisson', 'poly', 'poly1d', 'polyadd', 'polyder', 'polydiv',
'polyfit', 'polyint', 'polymul', 'polysub', 'polyval', 'power', 'prod',
'product', 'ptp', 'put', 'putmask', 'r_', 'randint', 'random_integers',
'random_sample', 'ranf', 'rank', 'ravel', 'real', 'real_if_close',
'recarray', 'reciprocal', 'reduce', 'remainder', 'repeat', 'require',
'reshape', 'resize', 'restoredot', 'right_shift', 'rint', 'roll',
'rollaxis', 'roots', 'rot90', 'round', 'round_', 'row_stack', 's_',
'sample', 'savetxt', 'sctype2char', 'searchsorted', 'seed', 'select',
'set_numeric_ops', 'set_printoptions', 'set_string_function',
'setbufsize', 'setdiff1d', 'seterr', 'seterrcall', 'seterrobj',
'setfield', 'setflags', 'setmember1d', 'setxor1d', 'shape',
'show_config', 'shuffle', 'sign', 'signbit', 'sin', 'sinc', 'sinh',
'size', 'slice', 'solve', 'sometrue', 'sort', 'sort_complex', 'source',
'split', 'sqrt', 'square', 'squeeze', 'standard_normal', 'std',
'subtract', 'sum', 'svd', 'swapaxes', 'take', 'tan', 'tanh', 'tensordot',
'test', 'tile', 'tofile', 'tolist', 'tostring', 'trace', 'transpose',
'trapz', 'tri', 'tril', 'trim_zeros', 'triu', 'true_divide', 'typeDict',
'typename', 'uniform', 'union1d', 'unique', 'unique1d', 'unravel_index',
'unwrap', 'vander', 'var', 'vdot', 'vectorize', 'view', 'vonmises',
'vsplit', 'vstack', 'weibull', 'where', 'who', 'zeros', 'zeros_like'
])
def get_tokens_unprocessed(self, text):
for index, token, value in \
PythonLexer.get_tokens_unprocessed(self, text):
if token is Name and value in self.EXTRA_KEYWORDS:
yield index, Keyword.Pseudo, value
else:
yield index, token, value
class SLexer(RegexLexer):
"""
For S, S-plus, and R source code.
*New in Pygments 0.10.*
"""
name = 'S'
aliases = ['splus', 's', 'r']
filenames = ['*.S', '*.R']
mimetypes = ['text/S-plus', 'text/S', 'text/R']
tokens = {
'comments': [
(r'#.*$', Comment.Single),
],
'valid_name': [
(r'[a-zA-Z][0-9a-zA-Z\._]+', Text),
(r'`.+`', String.Backtick),
],
'punctuation': [
(r'\[|\]|\[\[|\]\]|\$|\(|\)|@|:::?|;|,', Punctuation),
],
'keywords': [
(r'for(?=\s*\()|while(?=\s*\()|if(?=\s*\()|(?<=\s)else|'
r'(?<=\s)break(?=;|$)|return(?=\s*\()|function(?=\s*\()',
Keyword.Reserved)
],
'operators': [
(r'<-|-|==|<=|>=|<|>|&&|&|!=|\|\|?', Operator),
(r'\*|\+|\^|/|%%|%/%|=', Operator),
(r'%in%|%*%', Operator)
],
'builtin_symbols': [
(r'(NULL|NA|TRUE|FALSE|NaN)\b', Keyword.Constant),
(r'(T|F)\b', Keyword.Variable),
],
'numbers': [
(r'(?<![0-9a-zA-Z\)\}\]`\"])(?=\s*)[-\+]?[0-9]+'
r'(\.[0-9]*)?(E[0-9][-\+]?(\.[0-9]*)?)?', Number),
(r'\.[0-9]*(E[0-9][-\+]?(\.[0-9]*)?)?', Number),
],
'statements': [
include('comments'),
# whitespaces
(r'\s+', Text),
(r'\'', String, 'string_squote'),
(r'\"', String, 'string_dquote'),
include('builtin_symbols'),
include('numbers'),
include('keywords'),
include('punctuation'),
include('operators'),
include('valid_name'),
],
'root': [
include('statements'),
# blocks:
(r'\{|\}', Punctuation),
#(r'\{', Punctuation, 'block'),
(r'.', Text),
],
#'block': [
# include('statements'),
# ('\{', Punctuation, '#push'),
# ('\}', Punctuation, '#pop')
#],
'string_squote': [
(r'[^\']*\'', String, '#pop'),
],
'string_dquote': [
(r'[^\"]*\"', String, '#pop'),
],
}
def analyse_text(text):
return '<-' in text
| 40.07381 | 86 | 0.503297 | [
"Apache-2.0"
] | TheDleo/backplanejs | tools/yuidoc/bin/pygments/lexers/math.py | 16,831 | Python |
def get_strings(city):
| 12 | 22 | 0.75 | [
"Apache-2.0"
] | vijaykumawat256/Prompt-Summarization | data/studio21_generated/introductory/3210/starter_code.py | 24 | Python |
# -*- coding: utf-8 -*-
"""
Indices library
===============
This module describes climate indicator functions. Functions are listed in alphabetical order and describe the raw
computation performed over xarray.DataArrays. DataArrays should carry unit information to allow for any needed
unit conversions. The output's attributes (CF-Convention) are not modified. Validation checks and output attributes
are handled by indicator classes described in files named by the physical variable (temperature, precip, streamflow).
Notes for docstring
-------------------
The docstrings adhere to the `NumPy`_ style convention and is meant as a way to store CF-Convention metadata as
well as information relevant to third party libraries (such as a WPS server).
The first line of the docstring (the short summary), will be assigned to the output's `long_name` attribute. The
`long_name` attribute is defined by the NetCDF User Guide to contain a long descriptive name which may, for example,
be used for labeling plots
The second paragraph will be considered as the "*abstract*", or the CF global "*comment*" (miscellaneous information
about the data or methods used to produce it).
The third and fourth sections are the **Parameters** and **Returns** sections describing the input and output values
respectively.
.. code-block:: python
Parameters
----------
<standard_name> : xarray.DataArray
<Long_name> of variable [acceptable units].
threshold : string
Description of the threshold / units.
e.g. The 10th percentile of historical temperature [K].
freq : str, optional
Resampling frequency.
Returns
-------
xarray.DataArray
Output's <long_name> [units]
The next sections would be **Notes** and **References**:
.. code-block:: python
Notes
-----
This is where the mathematical equation is described.
At the end of the description, convention suggests
to add a reference [example]_:
.. math::
3987^12 + 4365^12 = 4472^12
References
----------
.. [example] Smith, T.J. and Huard, D. (2018). "CF Docstrings:
A manifesto on conventions and the metaphysical nature
of ontological python documentation." Climate Aesthetics,
vol. 1, pp. 121-155.
Indice descriptions
===================
.. _`NumPy`: https://numpydoc.readthedocs.io/en/latest/format.html#docstring-standard
"""
from ._simple import *
from ._threshold import *
from ._multivariate import *
# TODO: Define a unit conversion system for temperature [K, C, F] and precipitation [mm h-1, Kg m-2 s-1] metrics
# TODO: Move utility functions to another file.
# TODO: Should we reference the standard vocabulary we're using ?
# E.g. http://vocab.nerc.ac.uk/collection/P07/current/BHMHISG2/
| 35.088608 | 117 | 0.712843 | [
"Apache-2.0"
] | gacou54/xclim | xclim/indices/__init__.py | 2,772 | Python |
import asyncio
import logging
from typing import List, Optional, Set, Tuple
import aiosqlite
from blspy import G1Element
from chia.types.blockchain_format.sized_bytes import bytes32
from chia.util.db_wrapper import DBWrapper
from chia.util.ints import uint32
from chia.wallet.derivation_record import DerivationRecord
from chia.wallet.util.wallet_types import WalletType
log = logging.getLogger(__name__)
class WalletPuzzleStore:
"""
WalletPuzzleStore keeps track of all generated puzzle_hashes and their derivation path / wallet.
"""
db_connection: aiosqlite.Connection
lock: asyncio.Lock
cache_size: uint32
all_puzzle_hashes: Set[bytes32]
db_wrapper: DBWrapper
@classmethod
async def create(cls, db_wrapper: DBWrapper, cache_size: uint32 = uint32(600000)):
self = cls()
self.cache_size = cache_size
self.db_wrapper = db_wrapper
self.db_connection = self.db_wrapper.db
await self.db_connection.execute("pragma journal_mode=wal")
await self.db_connection.execute("pragma synchronous=2")
await self.db_connection.execute(
(
"CREATE TABLE IF NOT EXISTS derivation_paths("
"derivation_index int,"
" pubkey text,"
" puzzle_hash text PRIMARY_KEY,"
" wallet_type int,"
" wallet_id int,"
" used tinyint)"
)
)
await self.db_connection.execute(
"CREATE INDEX IF NOT EXISTS derivation_index_index on derivation_paths(derivation_index)"
)
await self.db_connection.execute("CREATE INDEX IF NOT EXISTS ph on derivation_paths(puzzle_hash)")
await self.db_connection.execute("CREATE INDEX IF NOT EXISTS pubkey on derivation_paths(pubkey)")
await self.db_connection.execute("CREATE INDEX IF NOT EXISTS wallet_type on derivation_paths(wallet_type)")
await self.db_connection.execute("CREATE INDEX IF NOT EXISTS wallet_id on derivation_paths(wallet_id)")
await self.db_connection.execute("CREATE INDEX IF NOT EXISTS used on derivation_paths(wallet_type)")
await self.db_connection.commit()
# Lock
self.lock = asyncio.Lock() # external
await self._init_cache()
return self
async def close(self):
await self.db_connection.close()
async def _init_cache(self):
self.all_puzzle_hashes = await self.get_all_puzzle_hashes()
async def _clear_database(self):
cursor = await self.db_connection.execute("DELETE FROM derivation_paths")
await cursor.close()
await self.db_connection.commit()
async def add_derivation_paths(self, records: List[DerivationRecord]) -> None:
"""
Insert many derivation paths into the database.
"""
async with self.db_wrapper.lock:
sql_records = []
for record in records:
self.all_puzzle_hashes.add(record.puzzle_hash)
sql_records.append(
(
record.index,
bytes(record.pubkey).hex(),
record.puzzle_hash.hex(),
record.wallet_type,
record.wallet_id,
0,
),
)
cursor = await self.db_connection.executemany(
"INSERT OR REPLACE INTO derivation_paths VALUES(?, ?, ?, ?, ?, ?)",
sql_records,
)
await cursor.close()
await self.db_connection.commit()
async def get_derivation_record(self, index: uint32, wallet_id: uint32) -> Optional[DerivationRecord]:
"""
Returns the derivation record by index and wallet id.
"""
cursor = await self.db_connection.execute(
"SELECT * FROM derivation_paths WHERE derivation_index=? and wallet_id=?;",
(
index,
wallet_id,
),
)
row = await cursor.fetchone()
await cursor.close()
if row is not None and row[0] is not None:
return DerivationRecord(
uint32(row[0]),
bytes32.fromhex(row[2]),
G1Element.from_bytes(bytes.fromhex(row[1])),
WalletType(row[3]),
uint32(row[4]),
)
return None
async def get_derivation_record_for_puzzle_hash(self, puzzle_hash: str) -> Optional[DerivationRecord]:
"""
Returns the derivation record by index and wallet id.
"""
cursor = await self.db_connection.execute(
"SELECT * FROM derivation_paths WHERE puzzle_hash=?;",
(puzzle_hash,),
)
row = await cursor.fetchone()
await cursor.close()
if row is not None and row[0] is not None:
return DerivationRecord(
uint32(row[0]),
bytes32.fromhex(row[2]),
G1Element.from_bytes(bytes.fromhex(row[1])),
WalletType(row[3]),
uint32(row[4]),
)
return None
async def set_used_up_to(self, index: uint32, in_transaction=False) -> None:
"""
Sets a derivation path to used so we don't use it again.
"""
if not in_transaction:
await self.db_wrapper.lock.acquire()
try:
cursor = await self.db_connection.execute(
"UPDATE derivation_paths SET used=1 WHERE derivation_index<=?",
(index,),
)
await cursor.close()
finally:
if not in_transaction:
await self.db_connection.commit()
self.db_wrapper.lock.release()
async def puzzle_hash_exists(self, puzzle_hash: bytes32) -> bool:
"""
Checks if passed puzzle_hash is present in the db.
"""
cursor = await self.db_connection.execute(
"SELECT * from derivation_paths WHERE puzzle_hash=?", (puzzle_hash.hex(),)
)
row = await cursor.fetchone()
await cursor.close()
return row is not None
async def one_of_puzzle_hashes_exists(self, puzzle_hashes: List[bytes32]) -> bool:
"""
Checks if one of the passed puzzle_hashes is present in the db.
"""
if len(puzzle_hashes) < 1:
return False
for ph in puzzle_hashes:
if ph in self.all_puzzle_hashes:
return True
return False
async def index_for_pubkey(self, pubkey: G1Element) -> Optional[uint32]:
"""
Returns derivation paths for the given pubkey.
Returns None if not present.
"""
cursor = await self.db_connection.execute(
"SELECT * from derivation_paths WHERE pubkey=?", (bytes(pubkey).hex(),)
)
row = await cursor.fetchone()
await cursor.close()
if row is not None:
return uint32(row[0])
return None
async def index_for_puzzle_hash(self, puzzle_hash: bytes32) -> Optional[uint32]:
"""
Returns the derivation path for the puzzle_hash.
Returns None if not present.
"""
cursor = await self.db_connection.execute(
"SELECT * from derivation_paths WHERE puzzle_hash=?", (puzzle_hash.hex(),)
)
row = await cursor.fetchone()
await cursor.close()
if row is not None:
return uint32(row[0])
return None
async def index_for_puzzle_hash_and_wallet(self, puzzle_hash: bytes32, wallet_id: uint32) -> Optional[uint32]:
"""
Returns the derivation path for the puzzle_hash.
Returns None if not present.
"""
cursor = await self.db_connection.execute(
"SELECT * from derivation_paths WHERE puzzle_hash=? and wallet_id=?;",
(
puzzle_hash.hex(),
wallet_id,
),
)
row = await cursor.fetchone()
await cursor.close()
if row is not None:
return uint32(row[0])
return None
async def wallet_info_for_puzzle_hash(self, puzzle_hash: bytes32) -> Optional[Tuple[uint32, WalletType]]:
"""
Returns the derivation path for the puzzle_hash.
Returns None if not present.
"""
cursor = await self.db_connection.execute(
"SELECT * from derivation_paths WHERE puzzle_hash=?", (puzzle_hash.hex(),)
)
row = await cursor.fetchone()
await cursor.close()
if row is not None:
return row[4], WalletType(row[3])
return None
async def get_all_puzzle_hashes(self) -> Set[bytes32]:
"""
Return a set containing all puzzle_hashes we generated.
"""
cursor = await self.db_connection.execute("SELECT * from derivation_paths")
rows = await cursor.fetchall()
await cursor.close()
result: Set[bytes32] = set()
for row in rows:
result.add(bytes32(bytes.fromhex(row[2])))
return result
async def get_last_derivation_path(self) -> Optional[uint32]:
"""
Returns the last derivation path by derivation_index.
"""
cursor = await self.db_connection.execute("SELECT MAX(derivation_index) FROM derivation_paths;")
row = await cursor.fetchone()
await cursor.close()
if row is not None and row[0] is not None:
return uint32(row[0])
return None
async def get_last_derivation_path_for_wallet(self, wallet_id: int) -> Optional[uint32]:
"""
Returns the last derivation path by derivation_index.
"""
cursor = await self.db_connection.execute(
f"SELECT MAX(derivation_index) FROM derivation_paths WHERE wallet_id={wallet_id};"
)
row = await cursor.fetchone()
await cursor.close()
if row is not None and row[0] is not None:
return uint32(row[0])
return None
async def get_current_derivation_record_for_wallet(self, wallet_id: uint32) -> Optional[DerivationRecord]:
"""
Returns the current derivation record by derivation_index.
"""
cursor = await self.db_connection.execute(
f"SELECT MAX(derivation_index) FROM derivation_paths WHERE wallet_id={wallet_id} and used=1;"
)
row = await cursor.fetchone()
await cursor.close()
if row is not None and row[0] is not None:
index = uint32(row[0])
return await self.get_derivation_record(index, wallet_id)
return None
async def get_unused_derivation_path(self) -> Optional[uint32]:
"""
Returns the first unused derivation path by derivation_index.
"""
cursor = await self.db_connection.execute("SELECT MIN(derivation_index) FROM derivation_paths WHERE used=0;")
row = await cursor.fetchone()
await cursor.close()
if row is not None and row[0] is not None:
return uint32(row[0])
return None
| 32.74269 | 117 | 0.59841 | [
"Apache-2.0"
] | 1SecureANDROID/chia-blockchain | chia/wallet/wallet_puzzle_store.py | 11,198 | Python |
solution = 132Pattern()
assert X == solution.find132pattern( ) | 31 | 38 | 0.758065 | [
"Apache-2.0"
] | spencercjh/sync-leetcode-today-problem-python3-example | test/test_132_pattern.py | 62 | Python |
# Copyright (C) 2019 by Landmark Acoustics LLC
r"""A class to write a WAV-formatted file."""
import wave
class WaveFile:
'''A wrapper for `Wave_write` from Python STL's `wave` module.
Parameters
----------
name : str
The name to save the file as. It should include path and extension.
sample_rate : int
The number of samples per second that the file will use.
bit_rate : int
The number of bits the file will use per sample.
channels : int
The number of channels that the file has.
See Also
--------
wave : the Python STL module
'''
def __init__(self,
name: str,
sample_rate: int,
bit_rate: int,
channels: int) -> None:
self._channels = channels
self._sample_rate = sample_rate
self._byte_rate = bit_rate // 8
self._filehandle = wave.open(name, 'wb')
self._filehandle.setnchannels(self.channels)
self._filehandle.setsampwidth(self.byte_rate)
self._filehandle.setframerate(self.sample_rate)
@property
def channels(self) -> int:
'''The number of channels the file has.'''
return self._channels
@property
def sample_rate(self) -> int:
'''The number of samples per second.'''
return self._sample_rate
@property
def byte_rate(self) -> int:
'''The number of bytes per sample.'''
return self._byte_rate
@property
def bit_rate(self) -> int:
'''The number of bits per sample.'''
return self.byte_rate * 8
def write_frames(self, data) -> int:
'''Add some data to the file.
Parameters
----------
data : bytes-like object
The user must ensure that the data's format matches the file's!
Returns
-------
int : the number of frames written
'''
pos = self._filehandle.tell()
self._filehandle.writeframes(data)
return self._filehandle.tell() - pos
@property
def frame_size(self) -> int:
'''The number of bytes per frame.'''
return self.byte_rate * self.channels
def __enter__(self):
self._filehandle.__enter__()
return self
def __exit__(self, *args, **kwargs):
return self._filehandle.__exit__(*args, **kwargs)
if __name__ == '__main__':
import array
import sys
wvf = WaveFile(sys.argv[1], 44100, 28, 3)
a = array.array('b')
a.extend([0 for i in range(12000 * wvf.frame_size)])
N = wvf.write_frames(a)
print(f'Wrote {N} frames in {wvf.channels} {wvf.bit_rate}-bit channels.')
| 25.932039 | 77 | 0.592662 | [
"MIT"
] | landmarkacoustics/lac-audio-files | lacaudiofiles/wave/wavefile.py | 2,671 | Python |
#Nota Média do aluno
n1 = float(input('Primeira nota do aluno: '))
n2 = float(input('Segundo nota do aluno: '))
média = (n1 + n2) / 2
print('A média entre {:.1f} e {:.1f} é igual a {:.1f}'.format(n1, n2, média))
| 36.166667 | 78 | 0.608295 | [
"MIT"
] | ErosMLima/python-server-connection | ex007.1.py | 222 | Python |
# Generated by Django 3.1.2 on 2020-10-08 05:13
from django.conf import settings
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('notes', '0003_auto_20201006_0607'),
]
operations = [
migrations.AlterUniqueTogether(
name='publicsharednote',
unique_together={('user', 'note')},
),
]
| 22.95 | 66 | 0.649237 | [
"MIT"
] | Namnetsy/simple-notes-django-app | simple_notes/notes/migrations/0004_auto_20201008_0513.py | 459 | Python |
"""
Deployment helpers
==================
"""
import os
import logging
from ..definitions import ROOT_DIR
from .docker import Docker
from .ecr import ECR
from .s3 import S3
from .sagemaker import Sagemaker
logger = logging.getLogger(__name__)
def build(run, project, model_type):
docker = Docker()
docker_path = os.path.join(ROOT_DIR, 'sagemaker', model_type)
image_name = get_image_name(run, project)
docker.build(docker_path, image_name)
def push(run, project, model_type):
docker = Docker()
s3 = S3()
image_name = get_image_name(run, project)
docker.push(image_name)
s3.upload_model(run, image_name, model_type=model_type)
def build_and_push(run, project, model_type):
build(run, project, model_type)
push(run, project, model_type)
def run_local(run, project, model_type):
# build image
build(run, project, model_type)
# run it
docker = Docker()
image_name = get_image_name(run, project)
docker.run(image_name, run, model_type)
def create_model_and_configuration(run, project, question_tag, model_type, instance_type):
# init helpers
ecr = ECR()
s3 = S3()
sm = Sagemaker()
# build deploy arguments
image_name = get_image_name(run, project)
ecr_image_name = ecr.get_ecr_image_name(image_name)
s3_model_path = s3.get_model_s3_path(image_name)
tags = [{'Key': 'project_name', 'Value': project},
{'Key': 'question_tag', 'Value': question_tag},
{'Key': 'run_name', 'Value': run},
{'Key': 'model_type', 'Value': model_type}]
# create model and endpoint configuration
sm.create_model_and_configuration(ecr_image_name, s3_model_path, tags=tags, instance_type=instance_type)
def deploy(run, project, question_tag, model_type, instance_type):
# initialize stuff
# build image and push to ECR
build_and_push(run, project, model_type)
# create model and endpoint configuration
create_model_and_configuration(run, project, question_tag, model_type, instance_type)
def get_image_name(run, project):
return f'crowdbreaks_{project}_{run}'
| 31.462687 | 108 | 0.70778 | [
"MIT"
] | crowdbreaks/text-classification | txcl/utils/deploy_helpers.py | 2,108 | Python |
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'goodshare.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| 28.913043 | 73 | 0.679699 | [
"MIT"
] | nikhilchaudhary0126/goodshare | manage.py | 665 | Python |
import json
import os
import httpx
import time
def get_cities(cfg):
return cfg['cities'].keys()
def get_usable_bounding_boxes(nominal_boxes, cfg):
FLICKR_PUBLIC = get_secret('flickr_api_key')
FLICKR_SECRET = get_secret('flickr_api_secret')
flickr = FlickrAPI(FLICKR_PUBLIC, FLICKR_SECRET, format='parsed-json')
boxes = []
working = nominal_boxes.copy()
license = "1,2,3,4,5,6,7,8,9,10"
extras ='description,license,date_upload,date_taken,original_format,'
extras+='last_update,geo,tags, machine_tags, o_dims, media,'
extras+='url_m,url_n,url_z,url_c,url_l,url_o'
city_total=0
# print(' area_km2 count type bounding_box')
while len(working) > 0:
box = working.pop()
temp = list(map(str, box))
str_box = ",".join(temp)
box_area = est_area(box)
divide_flag = False
if box_area > cfg["max_area"]:
total_imgs = -1
divide_flag = True
else:
time.sleep(cfg["time_delay"])
try:
box_pics = flickr.photos.search(
privacy_filter=PRIVACY_FILTER, bbox=str_box,
content_type=CONTENT_TYPE,
has_geo=HAS_GEO, geo_context=GEO_CTX,
license=license, extras=extras, per_page=cfg["page_size"])
total_imgs = int(box_pics['photos']['total'])
divide_flag = (total_imgs >= cfg["density_limit"] and box_area > cfg["min_area"])
except FlickrError as err:
print(f'Error retrieving intitial page for bounding box {bbox}')
print(f'{err}')
# print('%10.4f %5i %s %s' % (box_area/1.E6, total_imgs, 'branch'
# if divide_flag else 'leaf ', box))
if divide_flag:
new_box_1 = box.copy()
new_box_2 = box.copy()
if box[2] - box[0] > box[3] - box[1]: #wide
border = (box[0] + box[2])/2
new_box_1[2] = border
new_box_2[0] = border
else: #tall
border = (box[1] + box[3])/2
new_box_1[3] = border
new_box_2[1] = border
working.append(new_box_1)
working.append(new_box_2)
elif total_imgs == 0:
continue
else:
city_total += total_imgs
boxes.append(box)
print(city_total)
return boxes
def read_metadata(file_root, cities, url_field):
metadata = {}
urls = {}
# for key in cfg['cities']:
# city=key.replace(" ", "_")
for city in cities:
urls[city]=set()
file_path=f'{file_root}/{city}/metadata.json'
if os.path.exists(file_path):
with open(file_path, 'r') as f:
loaded = json.load(f)
for img in loaded['images']:
if url_field in img and not img[url_field] in urls:
urls[city].add(img[url_field])
metadata[city]= loaded
return metadata, urls
def get_known_urls(file_root, cities):
urls = {}
for key in cities:
city=key.replace(" ", "_")
file_path=f'{file_root}/{city}/urls.txt'
city_urls=set()
if os.path.exists(file_path):
with open(file_path, 'r') as f:
lines = f.readlines()
for line in lines:
city_urls.add(line.strip())
urls[key] = city_urls
return urls
def write_urls(urls, cfg):
for key in cfg['cities']:
city=key.replace(" ", "_")
directory=os.path.join('/data', city)
if not os.path.exists(directory):
os.mkdir(directory)
file_path=os.path.join(directory, 'urls')
if cfg['cities'][key]['download'] != 'photos':
print(f"printing {len(urls[city])} urls for city {city} at {file_path}")
try:
with open(file_path, 'w') as f:
for url in urls[city]:
f.write(f'{url}\n')
f.flush()
f.close()
except Exception as err:
print(f"error: {err} opening file {file_path}")
def get_metadata(cfg, file_root):
metadata = None
cities = get_cities(cfg)
url_field = cfg['url_field']
urls = get_known_urls(file_root, cities)
metadata, urls = read_metadata(file_root, cities, url_field)
if cfg['refresh_metadata']:
print('fetching metadata')
metadata,urls = fetch_metadata(cfg, metadata, urls)
print('writing metadata')
write_metadata(metadata, cfg, file_root)
print('writing url list')
write_urls(urls, cfg)
return metadata
def fetch_metadata(cfg, metadata, urls):
FLICKR_PUBLIC = get_secret('flickr_api_key')
FLICKR_SECRET = get_secret('flickr_api_secret')
flickr = FlickrAPI(FLICKR_PUBLIC, FLICKR_SECRET, format='parsed-json')
license = "1,2,3,4,5,6,7,8,9,10"
extras ='description,license,date_upload,date_taken,original_format,'
extras+='last_update,geo,tags, machine_tags, o_dims, media,'
extras+='url_m,url_n,url_z,url_c,url_l,url_o'
inserted_ids=[]
for key in cfg['cities']:
count=0
dl_limit = cfg['cities'][key]['download_limit']
if dl_limit != -1 and dl_limit > 1000:
boxes = get_usable_bounding_boxes(list(cfg['cities'][key]['bounding_boxes']), cfg)
else:
boxes = list(cfg['cities'][key]['bounding_boxes'])
city_urls = urls[key]
if not key in metadata:
metadata[key]={}
metadata[key]['image_count'] = 0
metadata[key]['images'] = []
total = 0
for bbox in tqdm(boxes, desc=key):
temp = list(map(str, bbox))
bbox_str = ",".join(temp)
time.sleep(cfg["time_delay"])
total_pages=0
try:
city_pics = flickr.photos.search(
privacy_filter=PRIVACY_FILTER, bbox=bbox_str,
content_type=CONTENT_TYPE,
has_geo=HAS_GEO, geo_context=GEO_CTX,
license=license, extras=extras, per_page=cfg["page_size"])
total_pages = city_pics['photos']['pages']
total += int(city_pics['photos']['total'])
except FlickrError as err:
print(f'Error retrieving intitial page for bounding box {bbox}')
print(f'{err}')
for p in range(1, total_pages):
try:
time.sleep(cfg["time_delay"])
city_pics = flickr.photos.search(
privacy_filter=PRIVACY_FILTER, bbox=bbox_str,
content_type=CONTENT_TYPE,
has_geo=HAS_GEO, geo_context=GEO_CTX,
license=license, extras=extras, per_page=cfg["page_size"],
page=p)
for ph in city_pics['photos']['photo']:
# metadata[key]['images'].append(ph)
if dl_limit != -1 and count > dl_limit:
break
if cfg["url_field"] in ph and not ph[cfg["url_field"]] in city_urls:
metadata[key]['images'].append(ph)
city_urls.add(ph[cfg["url_field"]])
metadata[key]['image_count']+=1
count += 1
except FlickrError as err:
print(f'Error retrieving page {p} for bounding box {bbox}')
print(f'{err}')
# metadata[key]['image_count'] = total
# print(f"length of inserted ids for {key}: {len(inserted_ids)}")
# print(f"total for {key}: {len(metadata[key]['images'])}")
return metadata, urls
def write_metadata(metadata, cfg, file_root):
for key in metadata:
city=key.replace(" ", "_")
directory=os.path.join(file_root,city)
if not os.path.exists(directory):
os.mkdir(directory)
file_path=os.path.join(directory,'metadata.json')
dl_flag =cfg['cities'][key]['download']
if cfg['cities'][key]['download'] != 'photos':
with open(file_path, 'w') as f:
json.dump(metadata[key], f, indent=2)
| 37.168142 | 97 | 0.542143 | [
"Apache-2.0"
] | IQTLabs/WITW | tools/download/flickr/src/metadata.py | 8,400 | Python |
# -*- coding: utf-8 -*-
#
# ColorHandPose3DNetwork - Network for estimating 3D Hand Pose from a single RGB Image
# Copyright (C) 2017 Christian Zimmermann
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import print_function, unicode_literals
import tensorflow as tf
from tensorflow.python import pywrap_tensorflow
import numpy as np
import math
import cv2
class NetworkOps(object):
""" Operations that are frequently used within networks. """
neg_slope_of_relu = 0.01
@classmethod
def leaky_relu(cls, tensor, name='relu'):
out_tensor = tf.maximum(tensor, cls.neg_slope_of_relu*tensor, name=name)
return out_tensor
@classmethod
def conv(cls, in_tensor, layer_name, kernel_size, stride, out_chan, trainable=True):
with tf.variable_scope(layer_name):
in_size = in_tensor.get_shape().as_list()
strides = [1, stride, stride, 1]
kernel_shape = [kernel_size, kernel_size, in_size[3], out_chan]
# conv
kernel = tf.get_variable('weights', kernel_shape, tf.float32,
tf.contrib.layers.xavier_initializer_conv2d(), trainable=trainable, collections=['wd', 'variables', 'filters'])
tmp_result = tf.nn.conv2d(in_tensor, kernel, strides, padding='SAME')
# bias
biases = tf.get_variable('biases', [kernel_shape[3]], tf.float32,
tf.constant_initializer(0.0001), trainable=trainable, collections=['wd', 'variables', 'biases'])
out_tensor = tf.nn.bias_add(tmp_result, biases, name='out')
return out_tensor
@classmethod
def conv_relu(cls, in_tensor, layer_name, kernel_size, stride, out_chan, trainable=True):
tensor = cls.conv(in_tensor, layer_name, kernel_size, stride, out_chan, trainable)
out_tensor = cls.leaky_relu(tensor, name='out')
return out_tensor
@classmethod
def max_pool(cls, bottom, name='pool'):
pooled = tf.nn.max_pool(bottom, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],
padding='VALID', name=name)
return pooled
@classmethod
def upconv(cls, in_tensor, layer_name, output_shape, kernel_size, stride, trainable=True):
with tf.variable_scope(layer_name):
in_size = in_tensor.get_shape().as_list()
kernel_shape = [kernel_size, kernel_size, in_size[3], in_size[3]]
strides = [1, stride, stride, 1]
# conv
kernel = cls.get_deconv_filter(kernel_shape, trainable)
tmp_result = tf.nn.conv2d_transpose(value=in_tensor, filter=kernel, output_shape=output_shape,
strides=strides, padding='SAME')
# bias
biases = tf.get_variable('biases', [kernel_shape[2]], tf.float32,
tf.constant_initializer(0.0), trainable=trainable, collections=['wd', 'variables', 'biases'])
out_tensor = tf.nn.bias_add(tmp_result, biases)
return out_tensor
@classmethod
def upconv_relu(cls, in_tensor, layer_name, output_shape, kernel_size, stride, trainable=True):
tensor = cls.upconv(in_tensor, layer_name, output_shape, kernel_size, stride, trainable)
out_tensor = cls.leaky_relu(tensor, name='out')
return out_tensor
@staticmethod
def get_deconv_filter(f_shape, trainable):
width = f_shape[0]
height = f_shape[1]
f = math.ceil(width/2.0)
c = (2 * f - 1 - f % 2) / (2.0 * f)
bilinear = np.zeros([f_shape[0], f_shape[1]])
for x in range(width):
for y in range(height):
value = (1 - abs(x / f - c)) * (1 - abs(y / f - c))
bilinear[x, y] = value
weights = np.zeros(f_shape)
for i in range(f_shape[2]):
weights[:, :, i, i] = bilinear
init = tf.constant_initializer(value=weights,
dtype=tf.float32)
return tf.get_variable(name="weights", initializer=init,
shape=weights.shape, trainable=trainable, collections=['wd', 'variables', 'filters'])
@staticmethod
def fully_connected(in_tensor, layer_name, out_chan, trainable=True):
with tf.variable_scope(layer_name):
in_size = in_tensor.get_shape().as_list()
assert len(in_size) == 2, 'Input to a fully connected layer must be a vector.'
weights_shape = [in_size[1], out_chan]
# weight matrix
weights = tf.get_variable('weights', weights_shape, tf.float32,
tf.contrib.layers.xavier_initializer(), trainable=trainable)
weights = tf.check_numerics(weights, 'weights: %s' % layer_name)
# bias
biases = tf.get_variable('biases', [out_chan], tf.float32,
tf.constant_initializer(0.0001), trainable=trainable)
biases = tf.check_numerics(biases, 'biases: %s' % layer_name)
out_tensor = tf.matmul(in_tensor, weights) + biases
return out_tensor
@classmethod
def fully_connected_relu(cls, in_tensor, layer_name, out_chan, trainable=True):
tensor = cls.fully_connected(in_tensor, layer_name, out_chan, trainable)
out_tensor = tf.maximum(tensor, cls.neg_slope_of_relu*tensor, name='out')
return out_tensor
@staticmethod
def dropout(in_tensor, keep_prob, evaluation):
""" Dropout: Each neuron is dropped independently. """
with tf.variable_scope('dropout'):
tensor_shape = in_tensor.get_shape().as_list()
out_tensor = tf.cond(evaluation,
lambda: tf.nn.dropout(in_tensor, 1.0,
noise_shape=tensor_shape),
lambda: tf.nn.dropout(in_tensor, keep_prob,
noise_shape=tensor_shape))
return out_tensor
@staticmethod
def spatial_dropout(in_tensor, keep_prob, evaluation):
""" Spatial dropout: Not each neuron is dropped independently, but feature map wise. """
with tf.variable_scope('spatial_dropout'):
tensor_shape = in_tensor.get_shape().as_list()
out_tensor = tf.cond(evaluation,
lambda: tf.nn.dropout(in_tensor, 1.0,
noise_shape=tensor_shape),
lambda: tf.nn.dropout(in_tensor, keep_prob,
noise_shape=[tensor_shape[0], 1, 1, tensor_shape[3]]))
return out_tensor
def crop_image_from_xy(image, crop_location, crop_size, scale=1.0):
"""
Crops an image. When factor is not given does an central crop.
Inputs:
image: 4D tensor, [batch, height, width, channels] which will be cropped in height and width dimension
crop_location: tensor, [batch, 2] which represent the height and width location of the crop
crop_size: int, describes the extension of the crop
Outputs:
image_crop: 4D tensor, [batch, crop_size, crop_size, channels]
"""
with tf.name_scope('crop_image_from_xy'):
s = image.get_shape().as_list()
assert len(s) == 4, "Image needs to be of shape [batch, width, height, channel]"
scale = tf.reshape(scale, [-1])
crop_location = tf.cast(crop_location, tf.float32)
crop_location = tf.reshape(crop_location, [s[0], 2])
crop_size = tf.cast(crop_size, tf.float32)
crop_size_scaled = crop_size / scale
y1 = crop_location[:, 0] - crop_size_scaled//2
y2 = y1 + crop_size_scaled
x1 = crop_location[:, 1] - crop_size_scaled//2
x2 = x1 + crop_size_scaled
y1 /= s[1]
y2 /= s[1]
x1 /= s[2]
x2 /= s[2]
boxes = tf.stack([y1, x1, y2, x2], -1)
crop_size = tf.cast(tf.stack([crop_size, crop_size]), tf.int32)
box_ind = tf.range(s[0])
image_c = tf.image.crop_and_resize(tf.cast(image, tf.float32), boxes, box_ind, crop_size, name='crop')
return image_c
def find_max_location(scoremap):
""" Returns the coordinates of the given scoremap with maximum value. """
with tf.variable_scope('find_max_location'):
s = scoremap.get_shape().as_list()
if len(s) == 4:
scoremap = tf.squeeze(scoremap, [3])
if len(s) == 2:
scoremap = tf.expand_dims(scoremap, 0)
s = scoremap.get_shape().as_list()
assert len(s) == 3, "Scoremap must be 3D."
assert (s[0] < s[1]) and (s[0] < s[2]), "Scoremap must be [Batch, Width, Height]"
# my meshgrid
x_range = tf.expand_dims(tf.range(s[1]), 1)
y_range = tf.expand_dims(tf.range(s[2]), 0)
X = tf.tile(x_range, [1, s[2]])
Y = tf.tile(y_range, [s[1], 1])
x_vec = tf.reshape(X, [-1])
y_vec = tf.reshape(Y, [-1])
scoremap_vec = tf.reshape(scoremap, [s[0], -1])
max_ind_vec = tf.cast(tf.argmax(scoremap_vec, dimension=1), tf.int32)
xy_loc = list()
for i in range(s[0]):
x_loc = tf.reshape(x_vec[max_ind_vec[i]], [1])
y_loc = tf.reshape(y_vec[max_ind_vec[i]], [1])
xy_loc.append(tf.concat([x_loc, y_loc], 0))
xy_loc = tf.stack(xy_loc, 0)
return xy_loc
def single_obj_scoremap(scoremap):
""" Applies my algorithm to figure out the most likely object from a given segmentation scoremap. """
with tf.variable_scope('single_obj_scoremap'):
filter_size = 21
s = scoremap.get_shape().as_list()
assert len(s) == 4, "Scoremap must be 4D."
scoremap_softmax = tf.nn.softmax(scoremap) #B, H, W, C --> normalizes across last dimension
scoremap_fg = tf.reduce_max(scoremap_softmax[:, :, :, 1:], 3) # B, H, W
detmap_fg = tf.round(scoremap_fg) # B, H, W
# find maximum in the fg scoremap
max_loc = find_max_location(scoremap_fg)
# use maximum to start "growing" our objectmap
objectmap_list = list()
kernel_dil = tf.ones((filter_size, filter_size, 1)) / float(filter_size*filter_size)
for i in range(s[0]):
# create initial objectmap (put a one at the maximum)
sparse_ind = tf.reshape(max_loc[i, :], [1, 2]) # reshape that its one point with 2dim)
objectmap = tf.sparse_to_dense(sparse_ind, [s[1], s[2]], 1.0)
# grow the map by dilation and pixelwise and
num_passes = max(s[1], s[2]) // (filter_size//2) # number of passes needes to make sure the map can spread over the whole image
for j in range(num_passes):
objectmap = tf.reshape(objectmap, [1, s[1], s[2], 1])
objectmap_dil = tf.nn.dilation2d(objectmap, kernel_dil, [1, 1, 1, 1], [1, 1, 1, 1], 'SAME')
objectmap_dil = tf.reshape(objectmap_dil, [s[1], s[2]])
objectmap = tf.round(tf.multiply(detmap_fg[i, :, :], objectmap_dil))
objectmap = tf.reshape(objectmap, [s[1], s[2], 1])
objectmap_list.append(objectmap)
objectmap = tf.stack(objectmap_list)
return objectmap
def calc_center_bb(binary_class_mask):
""" Returns the center of mass coordinates for the given binary_class_mask. """
with tf.variable_scope('calc_center_bb'):
binary_class_mask = tf.cast(binary_class_mask, tf.int32)
binary_class_mask = tf.equal(binary_class_mask, 1)
s = binary_class_mask.get_shape().as_list()
if len(s) == 4:
binary_class_mask = tf.squeeze(binary_class_mask, [3])
s = binary_class_mask.get_shape().as_list()
assert len(s) == 3, "binary_class_mask must be 3D."
assert (s[0] < s[1]) and (s[0] < s[2]), "binary_class_mask must be [Batch, Width, Height]"
# my meshgrid
x_range = tf.expand_dims(tf.range(s[1]), 1)
y_range = tf.expand_dims(tf.range(s[2]), 0)
X = tf.tile(x_range, [1, s[2]])
Y = tf.tile(y_range, [s[1], 1])
bb_list = list()
center_list = list()
crop_size_list = list()
for i in range(s[0]):
X_masked = tf.cast(tf.boolean_mask(X, binary_class_mask[i, :, :]), tf.float32)
Y_masked = tf.cast(tf.boolean_mask(Y, binary_class_mask[i, :, :]), tf.float32)
x_min = tf.reduce_min(X_masked)
x_max = tf.reduce_max(X_masked)
y_min = tf.reduce_min(Y_masked)
y_max = tf.reduce_max(Y_masked)
start = tf.stack([x_min, y_min])
end = tf.stack([x_max, y_max])
bb = tf.stack([start, end], 1)
bb_list.append(bb)
center_x = 0.5*(x_max + x_min)
center_y = 0.5*(y_max + y_min)
center = tf.stack([center_x, center_y], 0)
center = tf.cond(tf.reduce_all(tf.is_finite(center)), lambda: center,
lambda: tf.constant([160.0, 160.0]))
center.set_shape([2])
center_list.append(center)
crop_size_x = x_max - x_min
crop_size_y = y_max - y_min
crop_size = tf.expand_dims(tf.maximum(crop_size_x, crop_size_y), 0)
crop_size = tf.cond(tf.reduce_all(tf.is_finite(crop_size)), lambda: crop_size,
lambda: tf.constant([100.0]))
crop_size.set_shape([1])
crop_size_list.append(crop_size)
bb = tf.stack(bb_list)
center = tf.stack(center_list)
crop_size = tf.stack(crop_size_list)
return center, bb, crop_size
def detect_keypoints(scoremaps):
""" Performs detection per scoremap for the hands keypoints. """
if len(scoremaps.shape) == 4:
scoremaps = np.squeeze(scoremaps)
s = scoremaps.shape
assert len(s) == 3, "This function was only designed for 3D Scoremaps."
assert (s[2] < s[1]) and (s[2] < s[0]), "Probably the input is not correct, because [H, W, C] is expected."
keypoint_coords = np.zeros((s[2], 2))
for i in range(s[2]):
v, u = np.unravel_index(np.argmax(scoremaps[:, :, i]), (s[0], s[1]))
keypoint_coords[i, 0] = v
keypoint_coords[i, 1] = u
return keypoint_coords
def trafo_coords(keypoints_crop_coords, centers, scale, crop_size):
""" Transforms coords into global image coordinates. """
keypoints_coords = np.copy(keypoints_crop_coords)
keypoints_coords -= crop_size // 2
keypoints_coords /= scale
keypoints_coords += centers
return keypoints_coords
def plot_hand(coords_hw, axis, color_fixed=None, linewidth='1'):
""" Plots a hand stick figure into a matplotlib figure. """
colors = np.array([[0., 0., 0.5],
[0., 0., 0.73172906],
[0., 0., 0.96345811],
[0., 0.12745098, 1.],
[0., 0.33137255, 1.],
[0., 0.55098039, 1.],
[0., 0.75490196, 1.],
[0.06008855, 0.9745098, 0.90765338],
[0.22454143, 1., 0.74320051],
[0.40164453, 1., 0.56609741],
[0.56609741, 1., 0.40164453],
[0.74320051, 1., 0.22454143],
[0.90765338, 1., 0.06008855],
[1., 0.82861293, 0.],
[1., 0.63979666, 0.],
[1., 0.43645606, 0.],
[1., 0.2476398, 0.],
[0.96345811, 0.0442992, 0.],
[0.73172906, 0., 0.],
[0.5, 0., 0.]])
# define connections and colors of the bones
bones = [((0, 4), colors[0, :]),
((4, 3), colors[1, :]),
((3, 2), colors[2, :]),
((2, 1), colors[3, :]),
((0, 8), colors[4, :]),
((8, 7), colors[5, :]),
((7, 6), colors[6, :]),
((6, 5), colors[7, :]),
((0, 12), colors[8, :]),
((12, 11), colors[9, :]),
((11, 10), colors[10, :]),
((10, 9), colors[11, :]),
((0, 16), colors[12, :]),
((16, 15), colors[13, :]),
((15, 14), colors[14, :]),
((14, 13), colors[15, :]),
((0, 20), colors[16, :]),
((20, 19), colors[17, :]),
((19, 18), colors[18, :]),
((18, 17), colors[19, :])]
for connection, color in bones:
coord1 = coords_hw[connection[0], :]
coord2 = coords_hw[connection[1], :]
coords = np.stack([coord1, coord2])
if color_fixed is None:
axis.plot(coords[:, 1], coords[:, 0], color=color, linewidth=linewidth)
else:
axis.plot(coords[:, 1], coords[:, 0], color_fixed, linewidth=linewidth)
def plot_hand_3d(coords_xyz, axis, color_fixed=None, linewidth='1'):
""" Plots a hand stick figure into a matplotlib figure. """
colors = np.array([[0., 0., 0.5],
[0., 0., 0.73172906],
[0., 0., 0.96345811],
[0., 0.12745098, 1.],
[0., 0.33137255, 1.],
[0., 0.55098039, 1.],
[0., 0.75490196, 1.],
[0.06008855, 0.9745098, 0.90765338],
[0.22454143, 1., 0.74320051],
[0.40164453, 1., 0.56609741],
[0.56609741, 1., 0.40164453],
[0.74320051, 1., 0.22454143],
[0.90765338, 1., 0.06008855],
[1., 0.82861293, 0.],
[1., 0.63979666, 0.],
[1., 0.43645606, 0.],
[1., 0.2476398, 0.],
[0.96345811, 0.0442992, 0.],
[0.73172906, 0., 0.],
[0.5, 0., 0.]])
# define connections and colors of the bones
bones = [((0, 4), colors[0, :]),
((4, 3), colors[1, :]),
((3, 2), colors[2, :]),
((2, 1), colors[3, :]),
((0, 8), colors[4, :]),
((8, 7), colors[5, :]),
((7, 6), colors[6, :]),
((6, 5), colors[7, :]),
((0, 12), colors[8, :]),
((12, 11), colors[9, :]),
((11, 10), colors[10, :]),
((10, 9), colors[11, :]),
((0, 16), colors[12, :]),
((16, 15), colors[13, :]),
((15, 14), colors[14, :]),
((14, 13), colors[15, :]),
((0, 20), colors[16, :]),
((20, 19), colors[17, :]),
((19, 18), colors[18, :]),
((18, 17), colors[19, :])]
for connection, color in bones:
coord1 = coords_xyz[connection[0], :]
coord2 = coords_xyz[connection[1], :]
coords = np.stack([coord1, coord2])
if color_fixed is None:
axis.plot(coords[:, 0], coords[:, 1], coords[:, 2], color=color, linewidth=linewidth)
else:
axis.plot(coords[:, 0], coords[:, 1], coords[:, 2], color_fixed, linewidth=linewidth)
axis.view_init(azim=-90., elev=90.)
def plot_hand_2d(coords_hw, image, color_fixed=None, linewidth=2):
""" Plots a hand stick figure into a matplotlib figure. """
colors = [(0, 0, 127),
(0, 0, 187),
(0, 0, 246),
(0, 32, 255),
(0, 85, 255),
(0, 140, 255),
(0, 192, 255),
(15, 248, 231),
(57, 255, 190),
(102, 1, 144),
(144, 1, 102),
(190, 1, 57),
(231, 1, 15),
(1, 211, 0),
(1, 163, 0),
(1, 111, 0),
(1, 63, 0),
(246, 11, 0),
(187, 0, 0),
(127, 0, 0)]
# define connections and colors of the bones
bones = [((0, 4), colors[0]),
((4, 3), colors[1]),
((3, 2), colors[2]),
((2, 1), colors[3]),
((0, 8), colors[4]),
((8, 7), colors[5]),
((7, 6), colors[6]),
((6, 5), colors[7]),
((0, 12), colors[8]),
((12, 11), colors[9]),
((11, 10), colors[10]),
((10, 9), colors[11]),
((0, 16), colors[12]),
((16, 15), colors[13]),
((15, 14), colors[14]),
((14, 13), colors[15]),
((0, 20), colors[16]),
((20, 19), colors[17]),
((19, 18), colors[18]),
((18, 17), colors[19])]
for connection, color in bones:
coord1 = coords_hw[connection[0], :]
coord2 = coords_hw[connection[1], :]
coords = np.stack([coord1, coord2])
coord1_t = (int(coord1[1]), int(coord1[0]))
coord2_t = (int(coord2[1]), int(coord2[0]))
if color_fixed is None:
cv2.line(image, coord2_t, coord1_t, color, linewidth)
else:
cv2.line(image, coord1_t, coord2_t, color_fixed, linewidth)
class LearningRateScheduler:
"""
Provides scalar tensors at certain iteration as is needed for a multistep learning rate schedule.
"""
def __init__(self, steps, values):
self.steps = steps
self.values = values
assert len(steps)+1 == len(values), "There must be one more element in value as step."
def get_lr(self, global_step):
with tf.name_scope('lr_scheduler'):
if len(self.values) == 1: #1 value -> no step
learning_rate = tf.constant(self.values[0])
elif len(self.values) == 2: #2 values -> one step
cond = tf.greater(global_step, self.steps[0])
learning_rate = tf.where(cond, self.values[1], self.values[0])
else: # n values -> n-1 steps
cond_first = tf.less(global_step, self.steps[0])
cond_between = list()
for ind, step in enumerate(range(0, len(self.steps)-1)):
cond_between.append(tf.logical_and(tf.less(global_step, self.steps[ind+1]),
tf.greater_equal(global_step, self.steps[ind])))
cond_last = tf.greater_equal(global_step, self.steps[-1])
cond_full = [cond_first]
cond_full.extend(cond_between)
cond_full.append(cond_last)
cond_vec = tf.stack(cond_full)
lr_vec = tf.stack(self.values)
learning_rate = tf.where(cond_vec, lr_vec, tf.zeros_like(lr_vec))
learning_rate = tf.reduce_sum(learning_rate)
return learning_rate
class EvalUtil:
""" Util class for evaluation networks.
"""
def __init__(self, num_kp=21):
# init empty data storage
self.data = list()
self.num_kp = num_kp
for _ in range(num_kp):
self.data.append(list())
def feed(self, keypoint_gt, keypoint_vis, keypoint_pred):
""" Used to feed data to the class. Stores the euclidean distance between gt and pred, when it is visible. """
keypoint_gt = np.squeeze(keypoint_gt)
keypoint_pred = np.squeeze(keypoint_pred)
keypoint_vis = np.squeeze(keypoint_vis).astype('bool')
assert len(keypoint_gt.shape) == 2
assert len(keypoint_pred.shape) == 2
assert len(keypoint_vis.shape) == 1
# calc euclidean distance
diff = keypoint_gt - keypoint_pred
euclidean_dist = np.sqrt(np.sum(np.square(diff), axis=1))
num_kp = keypoint_gt.shape[0]
for i in range(num_kp):
if keypoint_vis[i]:
self.data[i].append(euclidean_dist[i])
def _get_pck(self, kp_id, threshold):
""" Returns pck for one keypoint for the given threshold. """
if len(self.data[kp_id]) == 0:
return None
data = np.array(self.data[kp_id])
pck = np.mean((data <= threshold).astype('float'))
return pck
def _get_epe(self, kp_id):
""" Returns end point error for one keypoint. """
if len(self.data[kp_id]) == 0:
return None, None
data = np.array(self.data[kp_id])
epe_mean = np.mean(data)
epe_median = np.median(data)
return epe_mean, epe_median
def get_measures(self, val_min, val_max, steps):
""" Outputs the average mean and median error as well as the pck score. """
thresholds = np.linspace(val_min, val_max, steps)
thresholds = np.array(thresholds)
norm_factor = np.trapz(np.ones_like(thresholds), thresholds)
# init mean measures
epe_mean_all = list()
epe_median_all = list()
auc_all = list()
pck_curve_all = list()
# Create one plot for each part
for part_id in range(self.num_kp):
# mean/median error
mean, median = self._get_epe(part_id)
if mean is None:
# there was no valid measurement for this keypoint
continue
epe_mean_all.append(mean)
epe_median_all.append(median)
# pck/auc
pck_curve = list()
for t in thresholds:
pck = self._get_pck(part_id, t)
pck_curve.append(pck)
pck_curve = np.array(pck_curve)
pck_curve_all.append(pck_curve)
auc = np.trapz(pck_curve, thresholds)
auc /= norm_factor
auc_all.append(auc)
epe_mean_all = np.mean(np.array(epe_mean_all))
epe_median_all = np.mean(np.array(epe_median_all))
auc_all = np.mean(np.array(auc_all))
pck_curve_all = np.mean(np.array(pck_curve_all), 0) # mean only over keypoints
return epe_mean_all, epe_median_all, auc_all, pck_curve_all, thresholds
def load_weights_from_snapshot(session, checkpoint_path, discard_list=None, rename_dict=None):
""" Loads weights from a snapshot except the ones indicated with discard_list. Others are possibly renamed. """
reader = pywrap_tensorflow.NewCheckpointReader(checkpoint_path)
var_to_shape_map = reader.get_variable_to_shape_map()
# Remove everything from the discard list
if discard_list is not None:
num_disc = 0
var_to_shape_map_new = dict()
for k, v in var_to_shape_map.items():
good = True
for dis_str in discard_list:
if dis_str in k:
good = False
if good:
var_to_shape_map_new[k] = v
else:
num_disc += 1
var_to_shape_map = dict(var_to_shape_map_new)
print('Discarded %d items' % num_disc)
# rename everything according to rename_dict
num_rename = 0
var_to_shape_map_new = dict()
for name in var_to_shape_map.keys():
new_name = name
if rename_dict is not None:
for rename_str in rename_dict.keys():
if rename_str in name:
new_name = new_name.replace(rename_str, rename_dict[rename_str])
num_rename += 1
var_to_shape_map_new[new_name] = reader.get_tensor(name)
var_to_shape_map = dict(var_to_shape_map_new)
init_op, init_feed = tf.contrib.framework.assign_from_values(var_to_shape_map)
session.run(init_op, init_feed)
print('Initialized %d variables from %s.' % (len(var_to_shape_map), checkpoint_path))
def calc_auc(x, y):
""" Given x and y values it calculates the approx. integral and normalizes it: area under curve"""
integral = np.trapz(y, x)
norm = np.trapz(np.ones_like(y), x)
return integral / norm
def get_stb_ref_curves():
"""
Returns results of various baseline methods on the Stereo Tracking Benchmark Dataset reported by:
Zhang et al., ‘3d Hand Pose Tracking and Estimation Using Stereo Matching’, 2016
"""
curve_list = list()
thresh_mm = np.array([20.0, 25, 30, 35, 40, 45, 50])
pso_b1 = np.array([0.32236842, 0.53947368, 0.67434211, 0.75657895, 0.80921053, 0.86513158, 0.89473684])
curve_list.append((thresh_mm, pso_b1, 'PSO (AUC=%.3f)' % calc_auc(thresh_mm, pso_b1)))
icppso_b1 = np.array([ 0.51973684, 0.64473684, 0.71710526, 0.77302632, 0.80921053, 0.84868421, 0.86842105])
curve_list.append((thresh_mm, icppso_b1, 'ICPPSO (AUC=%.3f)' % calc_auc(thresh_mm, icppso_b1)))
chpr_b1 = np.array([ 0.56578947, 0.71710526, 0.82236842, 0.88157895, 0.91447368, 0.9375, 0.96052632])
curve_list.append((thresh_mm, chpr_b1, 'CHPR (AUC=%.3f)' % calc_auc(thresh_mm, chpr_b1)))
return curve_list
| 40.05668 | 148 | 0.552018 | [
"MIT"
] | vivekkhurana/handsign | utils/general.py | 29,686 | Python |
# -*- coding: utf-8 -*-
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score, average_precision_score, precision_score, recall_score, f1_score, roc_auc_score, matthews_corrcoef
from sklearn.multiclass import OneVsRestClassifier
from sklearn.preprocessing import MultiLabelBinarizer
from bionev.utils import *
def LinkPrediction(embedding_look_up, original_graph, train_graph, test_pos_edges, seed):
random.seed(seed)
train_neg_edges = generate_neg_edges(original_graph, len(train_graph.edges()), seed)
# create a auxiliary graph to ensure that testing negative edges will not used in training
G_aux = copy.deepcopy(original_graph)
G_aux.add_edges_from(train_neg_edges)
test_neg_edges = generate_neg_edges(G_aux, len(test_pos_edges), seed)
# construct X_train, y_train, X_test, y_test
X_train = []
y_train = []
for edge in train_graph.edges():
node_u_emb = embedding_look_up[edge[0]]
node_v_emb = embedding_look_up[edge[1]]
feature_vector = np.append(node_u_emb, node_v_emb)
X_train.append(feature_vector)
y_train.append(1)
for edge in train_neg_edges:
node_u_emb = embedding_look_up[edge[0]]
node_v_emb = embedding_look_up[edge[1]]
feature_vector = np.append(node_u_emb, node_v_emb)
X_train.append(feature_vector)
y_train.append(0)
X_test = []
y_test = []
for edge in test_pos_edges:
node_u_emb = embedding_look_up[edge[0]]
node_v_emb = embedding_look_up[edge[1]]
feature_vector = np.append(node_u_emb, node_v_emb)
X_test.append(feature_vector)
y_test.append(1)
for edge in test_neg_edges:
node_u_emb = embedding_look_up[edge[0]]
node_v_emb = embedding_look_up[edge[1]]
feature_vector = np.append(node_u_emb, node_v_emb)
X_test.append(feature_vector)
y_test.append(0)
# shuffle for training and testing
c = list(zip(X_train, y_train))
random.shuffle(c)
X_train, y_train = zip(*c)
c = list(zip(X_test, y_test))
random.shuffle(c)
X_test, y_test = zip(*c)
X_train = np.array(X_train)
y_train = np.array(y_train)
X_test = np.array(X_test)
y_test = np.array(y_test)
clf1 = LogisticRegression(random_state=seed, max_iter=1000, solver='lbfgs')
clf1.fit(X_train, y_train)
y_pred_proba = clf1.predict_proba(X_test)[:, 1]
y_pred = clf1.predict(X_test)
auc_roc = roc_auc_score(y_test, y_pred_proba)
avg_pr = average_precision_score(y_test, y_pred_proba)
precision = precision_score(y_test, y_pred, average='binary')
recall = recall_score(y_test, y_pred, average='binary')
accuracy = accuracy_score(y_test, y_pred)
f1 = f1_score(y_test, y_pred)
mcc = matthews_corrcoef(y_test, y_pred)
top_1, top_3 = predHits(y_test, y_pred, clf1.predict(X_test), clf1.predict(X_test))
print('#' * 35 + ' Link Prediction Performance ' + '#' * 35)
print(f'AUC-ROC: {auc_roc:.3f}, AVG-PR: {avg_pr:.3f}, Precision: {precision:.3f}, Recall: {recall:.3f}, Accuracy: {accuracy:.3f}, F1: {f1:.3f}, MCC: {mcc:.3f}, Top_1: {top_1:.3f}, Top_3: {top_3:.3f}')
print('#' * 100)
return auc_roc, avg_pr, precision, recall, accuracy, f1, mcc, top_1, top_3
def NodeClassification(embedding_look_up, node_list, labels, testing_ratio, seed):
X_train, y_train, X_test, y_test = split_train_test_classify(embedding_look_up, node_list, labels,
testing_ratio=testing_ratio,seed=seed)
binarizer = MultiLabelBinarizer(sparse_output=True)
y_all = np.append(y_train, y_test)
binarizer.fit(y_all)
y_train = binarizer.transform(y_train).todense()
y_test = binarizer.transform(y_test).todense()
model = OneVsRestClassifier(LogisticRegression(random_state=seed, max_iter=1000, solver='lbfgs'))
model.fit(X_train, y_train)
y_pred_prob = model.predict_proba(X_test)
## small trick : we assume that we know how many label to predict
y_pred = get_y_pred(y_test, y_pred_prob)
accuracy = accuracy_score(y_test, y_pred)
micro_f1 = f1_score(y_test, y_pred, average="micro")
macro_f1 = f1_score(y_test, y_pred, average="macro")
print('#' * 9 + ' Node Classification Performance ' + '#' * 9)
print(f'Accuracy: {accuracy:.3f}, Micro-F1: {micro_f1:.3f}, Macro-F1: {macro_f1:.3f}')
print('#' * 50)
return accuracy, micro_f1, macro_f1
def predHits(truth, pred1, pred2, pred3):
hits_1 = 0
hits_3 = 0
pred1 = np.rint(pred1).astype(np.int32)
pred2 = np.rint(pred2).astype(np.int32)
pred3 = np.rint(pred3).astype(np.int32)
for i in range(len(truth)):
if truth[i] == pred1[i]:
hits_1 = hits_1 + 1
if (truth[i] == pred1[i]) or (truth[i] == pred2[i]) or (truth[i] == pred3[i]):
hits_3 = hits_3 + 1
top_1 = hits_1/len(truth)
top_3 = hits_3/len(truth)
return top_1, top_3
| 39.15625 | 204 | 0.67937 | [
"MIT"
] | bhevencious/BioNEV | src/bionev/evaluation.py | 5,012 | Python |
'''
A command library help user upload their results to dashboard.
'''
#!/usr/bin/env python
import json
import argparse
from .._utils import file_utils
from . import main
def import_local_resources(args):
'''Entrance of importing local resources'''
parser = argparse.ArgumentParser(prog="cotk import", \
description="Import local resources")
parser.add_argument("file_id", type=str, help="Name of resource")
parser.add_argument("file_path", type=str, help="Path to resource")
cargs = parser.parse_args(args)
file_utils.import_local_resources(cargs.file_id, cargs.file_path)
main.LOGGER.info("Successfully import local resource {}.".format(cargs.file_id))
| 34.3 | 82 | 0.749271 | [
"Apache-2.0"
] | JianGuanTHU/cotk | cotk/scripts/import_local_resources.py | 686 | Python |
# coding: utf-8
import numpy as np
from frequent_direction import FrequentDirection
from sklearn.preprocessing import normalize
from sklearn.metrics.pairwise import pairwise_kernels
def laplacian_sketch(X,ell,k,do_normalize_feature,normed,callback,**args):
fd = FrequentDirection(ell,k)
D = np.array([np.sum(callback(X,i,**args)) for i in range(len(X))])
if normed:
D = np.sqrt(D)
isolation_mask = D==0
if do_normalize_feature:
# normalize original feature (for cosine distance)
X[-isolation_mask] = normalize(X[-isolation_mask],norm='l2', axis=1, copy=False)
D[:] = 1 # set 1 even to D==0 samples to avoid 0 division.
for i,isolation in enumerate(isolation_mask):
A_i = -1 * callback(X,i,**args)
if normed:
A_i /= D[i]
A_i /= D
A_i[i] = 1 - isolation # set 0 to isolated node.
else:
A_i[i] = D[i]
fd.add_sample(-A_i)
return fd.get_result().T, D
def laplacian_sketch_rbf_kernel(X,ell,k,normed=True,gamma=None):
return laplacian_sketch(X,ell,k,False,normed,one_row_rbf_kernel,gamma=None)
def laplacian_sketch_cosine_similarity(X,ell,k,normed=True):
return laplacian_sketch(X,ell,k,True,normed,one_row_cosine_similarity)
def one_row_rbf_kernel(X,i,gamma=None):
"""
X : array of shape (n_samples_X, n_features)
i : target sample in X (X[i])
gamma : float, default None
If None, defaults to 1.0 / n_samples_X
K(x, y) = exp(-gamma ||x-xi||^2)
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
"""
if gamma is None:
gamma = 1.0 / X.shape[0]
d = np.sum(np.power(X-X[i],2),axis=1)
return np.array(np.exp(-gamma * d))
def one_row_cosine_similarity(X,i):
"""
X : normalized matrix
i : target sample in X
"""
a = (np.dot(X,X[i].T)+1)/2
a[a<0]=0
return a
def debug_one_row_rbf_kernel(X,gamma=None):
W = np.zeros((X.shape[0],X.shape[0]))
W_gt = pairwise_kernels(X, metric='rbf',
filter_params=True,
gamma=gamma)
for i,row in enumerate(X):
W[i] = one_row_rbf_kernel(X,i,gamma=gamma)
#print(W)
#print(W_gt)
#print(np.sum(W-W_gt))
def debug_one_row_cosine_similarity(X):
W = np.zeros((X.shape[0],X.shape[0]))
W_gt = pairwise_kernels(X, metric='cosine',
filter_params=True)
for i,row in enumerate(X):
W[i] = one_row_cosine_similarity(X,i)
print(W)
print(W_gt)
print(np.sum(W-W_gt))
| 30.482353 | 88 | 0.618294 | [
"BSD-2-Clause"
] | AtsushiHashimoto/SpectralClusteringFD | spectral_clustering_fd/laplacian_sketch.py | 2,591 | Python |
#!/usr/bin/env python
import os
import xmltodict # sudo easy_install xmltodict
import subprocess
import zipfile
class PackAndroid(object):
def __init__(self, root, project_folder, project, input_apk, destination, keystore, keystore_alias, apk_name=None, zipalign=None, jarsigner=None, configuration='Release', keystore_password=None):
self.name = project_folder
self.proj_folder = project_folder
self.project = project
self.input_apk = input_apk
self.destination = os.path.expanduser(destination)
self.configuration = configuration
self.keystore = keystore
self.keystore_alias = keystore_alias
self.keystore_password = keystore_password
# Name of the final apk
self.apk_name = apk_name
if self.apk_name is None and self.keystore_alias is not None:
self.apk_name = self.keystore_alias.lower()
if self.apk_name is None:
projf = os.path.basename(project)
self.apk_name = projf.replace('.csproj', '')
self.final_apk = os.path.join(self.destination, "%s-" % self.apk_name)
self.signed_apk = os.path.join(self.destination, "%s-signed.apk" % self.apk_name)
self.zipalign = zipalign
if self.zipalign is None:
self.zipalign = '/usr/bin/zipalign'
self.jarsigner = jarsigner
if self.jarsigner is None:
self.jarsigner = "/usr/bin/jarsigner"
self.keystore = os.path.join(root, self.keystore)
self.project = os.path.join(root, self.project)
self.proj_folder = os.path.join(root, self.proj_folder)
self.input_apk = os.path.join(self.proj_folder, self.input_apk)
if not os.path.exists(self.keystore):
exit("Failed to locate keystore - " + self.keystore)
if not os.path.exists(self.zipalign):
exit("Failed to locate zipalign - " + self.zipalign)
if not os.path.exists(self.jarsigner):
exit("Failed to locate jarsigner - " + self.jarsigner)
def clean(self):
bin_folder = os.path.join(self.proj_folder, 'bin')
obj_folder = os.path.join(self.proj_folder, 'obj')
if os.path.exists(bin_folder):
print 'Clearing away ' + bin_folder
os.system('rm -fdr ' + bin_folder)
if os.path.exists(obj_folder):
print 'Clearing away ' + obj_folder
os.system('rm -fdr ' + obj_folder)
def get_manifest_dictionary(self):
manifest = os.path.join(self.proj_folder, 'Properties/AndroidManifest.xml')
if not os.path.exists(manifest):
exit("Failed to locate AndroidManifest.xml - " + manifest)
f = file(manifest)
xml = f.read()
f.close()
doc = xmltodict.parse(xml)
return doc
def get_build_number(self):
doc = self.get_manifest_dictionary()
return doc['manifest']['@android:versionCode']
def get_version_number(self):
doc = self.get_manifest_dictionary()
return doc['manifest']['@android:versionName']
def set_build_number(self, build_num):
doc = self.get_manifest_dictionary()
doc['manifest']['@android:versionCode'] = build_num
xml = xmltodict.unparse(doc, pretty=True)
manifest = os.path.join(self.proj_folder, 'Properties/AndroidManifest.xml')
if not os.path.exists(manifest):
exit("Failed to locate AndroidManifest.xml - " + manifest)
f = file(manifest, 'w')
f.write(xml)
f.close()
def increment_build_number(self):
build_number = self.get_build_number()
if build_number is None:
build_number = "1"
else:
build_number = str(int(build_number)+1)
self.set_build_number(build_number)
def decrement_build_number(self):
build_number = self.get_build_number()
if build_number is None:
build_number = "1"
else:
build_number = str(int(build_number)-1)
self.set_build_number(build_number)
def set_version_number(self, version):
doc = self.get_manifest_dictionary()
doc['manifest']['@android:versionName'] = version
xml = xmltodict.unparse(doc, pretty=True)
manifest = os.path.join(self.proj_folder, 'Properties/AndroidManifest.xml')
if not os.path.exists(manifest):
exit("Failed to locate AndroidManifest.xml - " + manifest)
f = file(manifest, 'w')
f.write(xml)
f.close()
def build(self):
cmd_update = "msbuild %s /t:UpdateAndroidResources /p:Configuration=%s" % (self.project, self.configuration)
os.system(cmd_update)
cmd = "msbuild %s /t:SignAndroidPackage /p:Configuration=%s" % (self.project, self.configuration)
os.system(cmd)
if not os.path.exists(self.input_apk):
exit("Failed to build raw apk, i.e. its missing - " + self.input_apk)
@staticmethod
def convert_windows_path(any_path):
chars = []
for i in range(len(any_path)):
char = any_path[i]
if char == '\\':
chars.append('/')
else:
chars.append(char)
return ''.join(chars)
@staticmethod
def update_solution_resources(solution,configuration):
if not os.path.exists(solution):
exit("Failed to locate %s - " % os.path.basename(solution))
f = file(solution)
sln = f.read()
f.close()
projects = []
lines = sln.split('\n')
for line in lines:
if line.startswith("Project("):
start = line.find(",")
rest = line[start+3:len(line)]
end = rest.find(",")
projects.append(os.path.abspath(os.path.join(os.path.dirname(solution),PackAndroid.convert_windows_path(rest[0:end-1]))))
# print projects
for project in projects:
cmd_update = "msbuild %s /t:UpdateAndroidResources /p:Configuration=%s" % (project, configuration)
os.system(cmd_update)
def sign(self):
sign_cmd = [self.jarsigner, "-verbose", "-sigalg", "MD5withRSA", "-digestalg", "SHA1", "-keystore", self.keystore]
if not self.keystore_password is None:
sign_cmd.extend(["-storepass",self.keystore_password])
sign_cmd.extend(["-signedjar", self.signed_apk, self.input_apk, self.keystore_alias])
subprocess.call(sign_cmd)
subprocess.call([self.zipalign, "-f", "-v", "4", self.signed_apk, self.final_apk])
if os.path.exists(self.final_apk):
if os.path.exists(self.signed_apk):
os.system('rm ' + self.signed_apk)
def update_version(self):
build_number = self.get_build_number()
print build_number
q = raw_input("Would you like to increment the build number for %s? y/n\n> " % self.apk_name)
if q == "y":
build_number = str(int(build_number)+1)
self.set_build_number(build_number)
version_number = self.get_version_number()
print version_number
q = raw_input("Would you like to change the version number for %s? y/n\n> " % self.apk_name)
if q == "y":
version_number = raw_input("What to?> ")
self.set_version_number(version_number)
def copy_symbols(self):
artifacts_folder = os.path.join(self.proj_folder, 'bin', 'Release')
stuff = os.listdir(artifacts_folder)
msym_folder = None
for name in stuff:
if name.endswith(".mSYM"):
msym_folder = os.path.join(artifacts_folder, name)
break
if msym_folder is not None:
def zipdir(path, ziph):
for root, dirs, files in os.walk(path):
for file in files:
ziph.write(os.path.join(root, file),os.path.relpath(os.path.join(root, file), os.path.join(path, '..')))
msym_destination = os.path.join(os.path.expanduser("~/Desktop/"), os.path.basename(self.final_apk)) + ".mSYM.zip"
zipf = zipfile.ZipFile(msym_destination, 'w', zipfile.ZIP_DEFLATED)
zipdir(msym_folder, zipf)
zipf.close()
def run(self, update_versions=True, confirm_build=True):
self.clean()
self.final_apk = os.path.join(self.destination, "%s-" % self.apk_name)
if update_versions:
self.update_version()
build_number = self.get_build_number()
version_number = self.get_version_number()
if confirm_build:
print 'So thats version ' + version_number + " build " + build_number
q = raw_input("Would you like to continue? y/n\n> ")
if q != "y":
print "Ok, not doing the build, suit yourself..."
return None
self.final_apk = self.final_apk + build_number + '-' + version_number + '.apk'
print self.final_apk
self.build()
self.sign()
self.copy_symbols()
return self.final_apk
| 38.443038 | 199 | 0.609044 | [
"MIT"
] | skela/r | packandroid.py | 9,111 | Python |
import pytest
from MidiCompose.logic.rhythm.beat import Beat
from MidiCompose.logic.rhythm.measure import Measure
from MidiCompose.logic.rhythm.part import Part
@pytest.fixture
def part_1():
m1 = Measure([Beat([1,2,1,2]),
Beat([1,0,0,1])])
m2 = Measure([Beat([2,2,1,1]),
Beat([2,2,2,2])])
part = Part([m1,m2])
return part
def test_empty_constructor():
p = Part()
assert p.n_measures == 1
assert p.n_beats == 1
assert p.n_note_on == 0
def test_n_note_on(part_1):
assert part_1.n_note_on == 6
def test_iterator(part_1):
for m in part_1:
assert type(m) == Measure
| 19.25 | 53 | 0.590188 | [
"MIT"
] | aParthemer/MidiCompose | tests/test_logic/test_rhythm/test_Part.py | 693 | Python |
from abaqusConstants import *
from .AnalysisStep import AnalysisStep
from ..Adaptivity.AdaptiveMeshConstraintState import AdaptiveMeshConstraintState
from ..Adaptivity.AdaptiveMeshDomain import AdaptiveMeshDomain
from ..BoundaryCondition.BoundaryConditionState import BoundaryConditionState
from ..Load.LoadCase import LoadCase
from ..Load.LoadState import LoadState
from ..PredefinedField.PredefinedFieldState import PredefinedFieldState
from ..StepMiscellaneous.Control import Control
from ..StepMiscellaneous.SolverControl import SolverControl
from ..StepOutput.DiagnosticPrint import DiagnosticPrint
from ..StepOutput.FieldOutputRequestState import FieldOutputRequestState
from ..StepOutput.HistoryOutputRequestState import HistoryOutputRequestState
from ..StepOutput.Monitor import Monitor
from ..StepOutput.Restart import Restart
class AnnealStep(AnalysisStep):
"""The AnnealStep object anneals a structure by setting the velocities and all appropriate
state variables to zero.
The AnnealStep object is derived from the AnalysisStep object.
Attributes
----------
name: str
A String specifying the repository key.
refTemp: float
A Float specifying the post-anneal reference temperature. The default value is the
current temperature at all nodes in the model after the annealing has completed.
previous: str
A String specifying the name of the previous step. The new step appears after this step
in the list of analysis steps.
description: str
A String specifying a description of the new step. The default value is an empty string.
explicit: SymbolicConstant
A SymbolicConstant specifying whether the step has an explicit procedure type
(**procedureType=ANNEAL**, DYNAMIC_EXPLICIT, or DYNAMIC_TEMP_DISPLACEMENT).
perturbation: Boolean
A Boolean specifying whether the step has a perturbation procedure type.
nonmechanical: Boolean
A Boolean specifying whether the step has a mechanical procedure type.
procedureType: SymbolicConstant
A SymbolicConstant specifying the Abaqus procedure. Possible values are:
- ANNEAL
- BUCKLE
- COMPLEX_FREQUENCY
- COUPLED_TEMP_DISPLACEMENT
- COUPLED_THERMAL_ELECTRIC
- DIRECT_CYCLIC
- DYNAMIC_IMPLICIT
- DYNAMIC_EXPLICIT
- DYNAMIC_SUBSPACE
- DYNAMIC_TEMP_DISPLACEMENT
- COUPLED_THERMAL_ELECTRICAL_STRUCTURAL
- FREQUENCY
- GEOSTATIC
- HEAT_TRANSFER
- MASS_DIFFUSION
- MODAL_DYNAMICS
- RANDOM_RESPONSE
- RESPONSE_SPECTRUM
- SOILS
- STATIC_GENERAL
- STATIC_LINEAR_PERTURBATION
- STATIC_RIKS
- STEADY_STATE_DIRECT
- STEADY_STATE_MODAL
- STEADY_STATE_SUBSPACE
- VISCO
suppressed: Boolean
A Boolean specifying whether the step is suppressed or not. The default value is OFF.
fieldOutputRequestState: dict[str, FieldOutputRequestState]
A repository of :py:class:`~abaqus.StepOutput.FieldOutputRequestState.FieldOutputRequestState` objects.
historyOutputRequestState: dict[str, HistoryOutputRequestState]
A repository of :py:class:`~abaqus.StepOutput.HistoryOutputRequestState.HistoryOutputRequestState` objects.
diagnosticPrint: DiagnosticPrint
A :py:class:`~abaqus.StepOutput.DiagnosticPrint.DiagnosticPrint` object.
monitor: Monitor
A :py:class:`~abaqus.StepOutput.Monitor.Monitor` object.
restart: Restart
A :py:class:`~abaqus.StepOutput.Restart.Restart` object.
adaptiveMeshConstraintStates: dict[str, AdaptiveMeshConstraintState]
A repository of :py:class:`~abaqus.Adaptivity.AdaptiveMeshConstraintState.AdaptiveMeshConstraintState` objects.
adaptiveMeshDomains: dict[str, AdaptiveMeshDomain]
A repository of :py:class:`~abaqus.Adaptivity.AdaptiveMeshDomain.AdaptiveMeshDomain` objects.
control: Control
A :py:class:`~abaqus.StepMiscellaneous.Control.Control` object.
solverControl: SolverControl
A :py:class:`~abaqus.StepMiscellaneous.SolverControl.SolverControl` object.
boundaryConditionStates: dict[str, BoundaryConditionState]
A repository of :py:class:`~abaqus.BoundaryCondition.BoundaryConditionState.BoundaryConditionState` objects.
interactionStates: int
A repository of :py:class:`~abaqus.Interaction.InteractionState.InteractionState` objects.
loadStates: dict[str, LoadState]
A repository of :py:class:`~abaqus.Load.LoadState.LoadState` objects.
loadCases: dict[str, LoadCase]
A repository of :py:class:`~abaqus.Load.LoadCase.LoadCase` objects.
predefinedFieldStates: dict[str, PredefinedFieldState]
A repository of :py:class:`~abaqus.PredefinedField.PredefinedFieldState.PredefinedFieldState` objects.
Notes
-----
This object can be accessed by:
.. code-block:: python
import step
mdb.models[name].steps[name]
The corresponding analysis keywords are:
- ANNEAL
- STEP
"""
# A String specifying the repository key.
name: str = ''
# A Float specifying the post-anneal reference temperature. The default value is the
# current temperature at all nodes in the model after the annealing has completed.
refTemp: float = None
# A String specifying the name of the previous step. The new step appears after this step
# in the list of analysis steps.
previous: str = ''
# A String specifying a description of the new step. The default value is an empty string.
description: str = ''
# A SymbolicConstant specifying whether the step has an explicit procedure type
# (*procedureType*=ANNEAL, DYNAMIC_EXPLICIT, or DYNAMIC_TEMP_DISPLACEMENT).
explicit: SymbolicConstant = None
# A Boolean specifying whether the step has a perturbation procedure type.
perturbation: Boolean = OFF
# A Boolean specifying whether the step has a mechanical procedure type.
nonmechanical: Boolean = OFF
# A SymbolicConstant specifying the Abaqus procedure. Possible values are:
# - ANNEAL
# - BUCKLE
# - COMPLEX_FREQUENCY
# - COUPLED_TEMP_DISPLACEMENT
# - COUPLED_THERMAL_ELECTRIC
# - DIRECT_CYCLIC
# - DYNAMIC_IMPLICIT
# - DYNAMIC_EXPLICIT
# - DYNAMIC_SUBSPACE
# - DYNAMIC_TEMP_DISPLACEMENT
# - COUPLED_THERMAL_ELECTRICAL_STRUCTURAL
# - FREQUENCY
# - GEOSTATIC
# - HEAT_TRANSFER
# - MASS_DIFFUSION
# - MODAL_DYNAMICS
# - RANDOM_RESPONSE
# - RESPONSE_SPECTRUM
# - SOILS
# - STATIC_GENERAL
# - STATIC_LINEAR_PERTURBATION
# - STATIC_RIKS
# - STEADY_STATE_DIRECT
# - STEADY_STATE_MODAL
# - STEADY_STATE_SUBSPACE
# - VISCO
procedureType: SymbolicConstant = None
# A Boolean specifying whether the step is suppressed or not. The default value is OFF.
suppressed: Boolean = OFF
# A repository of FieldOutputRequestState objects.
fieldOutputRequestState: dict[str, FieldOutputRequestState] = dict[str, FieldOutputRequestState]()
# A repository of HistoryOutputRequestState objects.
historyOutputRequestState: dict[str, HistoryOutputRequestState] = dict[str, HistoryOutputRequestState]()
# A DiagnosticPrint object.
diagnosticPrint: DiagnosticPrint = DiagnosticPrint()
# A Monitor object.
monitor: Monitor = None
# A Restart object.
restart: Restart = Restart()
# A repository of AdaptiveMeshConstraintState objects.
adaptiveMeshConstraintStates: dict[str, AdaptiveMeshConstraintState] = dict[
str, AdaptiveMeshConstraintState]()
# A repository of AdaptiveMeshDomain objects.
adaptiveMeshDomains: dict[str, AdaptiveMeshDomain] = dict[str, AdaptiveMeshDomain]()
# A Control object.
control: Control = Control()
# A SolverControl object.
solverControl: SolverControl = SolverControl()
# A repository of BoundaryConditionState objects.
boundaryConditionStates: dict[str, BoundaryConditionState] = dict[str, BoundaryConditionState]()
# A repository of InteractionState objects.
interactionStates: int = None
# A repository of LoadState objects.
loadStates: dict[str, LoadState] = dict[str, LoadState]()
# A repository of LoadCase objects.
loadCases: dict[str, LoadCase] = dict[str, LoadCase]()
# A repository of PredefinedFieldState objects.
predefinedFieldStates: dict[str, PredefinedFieldState] = dict[str, PredefinedFieldState]()
def __init__(self, name: str, previous: str, description: str = '', refTemp: float = None,
maintainAttributes: Boolean = False):
"""This method creates an AnnealStep object.
Notes
-----
This function can be accessed by:
.. code-block:: python
mdb.models[name].AnnealStep
Parameters
----------
name
A String specifying the repository key.
previous
A String specifying the name of the previous step. The new step appears after this step
in the list of analysis steps.
description
A String specifying a description of the new step. The default value is an empty string.
refTemp
A Float specifying the post-anneal reference temperature. The default value is the
current temperature at all nodes in the model after the annealing has completed.
maintainAttributes
A Boolean specifying whether to retain attributes from an existing step with the same
name. The default value is False.
Returns
-------
An AnnealStep object.
Raises
------
RangeError
"""
super().__init__()
pass
def setValues(self, description: str = '', refTemp: float = None):
"""This method modifies the AnnealStep object.
Parameters
----------
description
A String specifying a description of the new step. The default value is an empty string.
refTemp
A Float specifying the post-anneal reference temperature. The default value is the
current temperature at all nodes in the model after the annealing has completed.
Raises
------
RangeError
"""
pass
| 39.066421 | 119 | 0.694248 | [
"MIT"
] | Haiiliin/PyAbaqus | src/abaqus/Step/AnnealStep.py | 10,587 | Python |
import numpy as np
import pickle as pkl
import networkx as nx
import scipy.sparse as sp
from scipy.sparse.linalg.eigen.arpack import eigsh
def sample_mask(idx, l):
"""Create mask."""
mask = np.zeros(l)
mask[idx] = 1
return np.array(mask, dtype=np.bool)
def load_data(cell_line, cross_cell_line, label_rate, k_mer):
"""
Load input data from data/cell_line directory.
| x_20.index | the indices (IDs) of labeled train instances as list object (for label_rate = 20%) |
| ux_20.index | the indices (IDs) of unlabeled train instances as list object (for label_rate = 20%) |
| vx_20.index | the indices (IDs) of validation instances as list object (for label_rate = 20%) |
| tx_20.index | the indices (IDs) of test instances as list object (for label_rate = 20%) |
| features_5mer | the feature vectors of all instances as scipy.sparse.csr.csr_matrix object (for k_mer = 5) |
| nodes | a dict in the format {chromosome_name: ID} as collections.defaultdict object |
| labels | the one-hot labels of all instances as numpy.ndarray object |
| graph | a dict in the format {ID: [IDs_of_neighbor_nodes]} as collections.defaultdict object |
All objects above must be saved using python pickle module.
:param cell_line: Name of the cell line to which the datasets belong
:return: All data input files loaded (as well the training/test data).
"""
if (cross_cell_line != None) and (cross_cell_line != cell_line):
read_dir = 'data/{}_{}/'.format(cell_line, cross_cell_line)
else:
read_dir = 'data/{}/'.format(cell_line)
# STEP 1: Load all feature vectors, class labels and graph
features_file = open('{}/features_{}mer'.format(read_dir, k_mer), "rb")
features = pkl.load(features_file)
features_file.close()
labels_file = open('{}/labels'.format(read_dir), "rb")
labels = pkl.load(labels_file)
labels_file.close()
graph_file = open('{}/graph'.format(read_dir), "rb")
graph = pkl.load(graph_file)
graph_file.close()
adj = nx.adjacency_matrix(nx.from_dict_of_lists(graph))
# STEP 2: Load IDs of labeled_train/unlabeled_train/validation/test nodes
lr = txt = '{:.2f}'.format(label_rate).split('.')[1]
idx_x_file = open('{}/x_{}.index'.format(read_dir, lr), "rb")
idx_x = pkl.load(idx_x_file)
idx_x_file.close()
idx_ux_file = open('{}/ux_{}.index'.format(read_dir, lr), "rb")
idx_ux = pkl.load(idx_ux_file)
idx_ux_file.close()
idx_vx_file = open('{}/vx_{}.index'.format(read_dir, lr), "rb")
idx_vx = pkl.load(idx_vx_file)
idx_vx_file.close()
idx_tx_file = open('{}/tx_{}.index'.format(read_dir, lr), "rb")
idx_tx = pkl.load(idx_tx_file)
idx_tx_file.close()
# STEP 3: Take subsets from loaded features and class labels using loaded IDs
x = features[idx_x]
y = labels[idx_x]
ux = features[idx_ux]
uy = labels[idx_ux]
vx = features[idx_vx]
vy = labels[idx_vx]
tx = features[idx_tx]
ty = labels[idx_tx]
print("x={} ux={} vx={} tx={}".format(x.shape[0], ux.shape[0], vx.shape[0], tx.shape[0]))
# STEP 4: Mask labels
train_mask = sample_mask(idx_x, labels.shape[0])
val_mask = sample_mask(idx_vx, labels.shape[0])
test_mask = sample_mask(idx_tx, labels.shape[0])
y_train = np.zeros(labels.shape)
y_val = np.zeros(labels.shape)
y_test = np.zeros(labels.shape)
y_train[train_mask, :] = labels[train_mask, :]
y_val[val_mask, :] = labels[val_mask, :]
y_test[test_mask, :] = labels[test_mask, :]
return adj, features, y_train, y_val, y_test, train_mask, val_mask, test_mask
def sparse_to_tuple(sparse_mx):
"""Convert sparse matrix to tuple representation."""
def to_tuple(mx):
if not sp.isspmatrix_coo(mx):
mx = mx.tocoo()
coords = np.vstack((mx.row, mx.col)).transpose()
values = mx.data
shape = mx.shape
return coords, values, shape
if isinstance(sparse_mx, list):
for i in range(len(sparse_mx)):
sparse_mx[i] = to_tuple(sparse_mx[i])
else:
sparse_mx = to_tuple(sparse_mx)
return sparse_mx
def preprocess_features(features):
"""Row-normalize feature matrix and convert to tuple representation"""
rowsum = np.array(features.sum(1))
r_inv = np.power(rowsum, -1).flatten()
r_inv[np.isinf(r_inv)] = 0.
r_mat_inv = sp.diags(r_inv)
features = r_mat_inv.dot(features)
return sparse_to_tuple(features)
def normalize_adj(adj):
"""Symmetrically normalize adjacency matrix."""
adj = sp.coo_matrix(adj)
rowsum = np.array(adj.sum(1))
d_inv_sqrt = np.power(rowsum, -0.5).flatten()
d_inv_sqrt[np.isinf(d_inv_sqrt)] = 0.
d_mat_inv_sqrt = sp.diags(d_inv_sqrt)
return adj.dot(d_mat_inv_sqrt).transpose().dot(d_mat_inv_sqrt).tocoo()
def preprocess_adj(adj):
"""Preprocessing of adjacency matrix for simple GCN model and conversion to tuple representation."""
adj_normalized = normalize_adj(adj + sp.eye(adj.shape[0]))
return sparse_to_tuple(adj_normalized)
def construct_feed_dict(features, support, labels, labels_mask, placeholders):
"""Construct feed dictionary."""
feed_dict = dict()
feed_dict.update({placeholders['labels']: labels})
feed_dict.update({placeholders['labels_mask']: labels_mask})
feed_dict.update({placeholders['features']: features})
feed_dict.update({placeholders['support'][i]: support[i] for i in range(len(support))})
feed_dict.update({placeholders['num_features_nonzero']: features[1].shape})
return feed_dict
def chebyshev_polynomials(adj, k):
"""Calculate Chebyshev polynomials up to order k. Return a list of sparse matrices (tuple representation)."""
print("Calculating Chebyshev polynomials up to order {}...".format(k))
adj_normalized = normalize_adj(adj)
laplacian = sp.eye(adj.shape[0]) - adj_normalized
largest_eigval, _ = eigsh(laplacian, 1, which='LM')
scaled_laplacian = (2. / largest_eigval[0]) * laplacian - sp.eye(adj.shape[0])
t_k = list()
t_k.append(sp.eye(adj.shape[0]))
t_k.append(scaled_laplacian)
def chebyshev_recurrence(t_k_minus_one, t_k_minus_two, scaled_lap):
s_lap = sp.csr_matrix(scaled_lap, copy=True)
return 2 * s_lap.dot(t_k_minus_one) - t_k_minus_two
for i in range(2, k+1):
t_k.append(chebyshev_recurrence(t_k[-1], t_k[-2], scaled_laplacian))
return sparse_to_tuple(t_k)
| 36.144444 | 114 | 0.67553 | [
"MIT"
] | smtnkc/gcn4epi | utils.py | 6,506 | Python |
class FittingAngleUsage(Enum, IComparable, IFormattable, IConvertible):
"""
An enumerated type representing the options for how to limit the angle values applicable to fitting content.
enum FittingAngleUsage,values: UseAnAngleIncrement (1),UseAnyAngle (0),UseSpecificAngles (2)
"""
def __eq__(self, *args):
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __format__(self, *args):
""" __format__(formattable: IFormattable,format: str) -> str """
pass
def __ge__(self, *args):
pass
def __gt__(self, *args):
pass
def __init__(self, *args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __le__(self, *args):
pass
def __lt__(self, *args):
pass
def __ne__(self, *args):
pass
def __reduce_ex__(self, *args):
pass
def __str__(self, *args):
pass
UseAnAngleIncrement = None
UseAnyAngle = None
UseSpecificAngles = None
value__ = None
| 26.808511 | 221 | 0.600794 | [
"MIT"
] | YKato521/ironpython-stubs | release/stubs.min/Autodesk/Revit/DB/__init___parts/FittingAngleUsage.py | 1,260 | Python |
# Copyright 2013-2018 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PerlFileListing(PerlPackage):
"""Parse directory listing"""
homepage = "http://search.cpan.org/~gaas/File-Listing-6.04/lib/File/Listing.pm"
url = "http://search.cpan.org/CPAN/authors/id/G/GA/GAAS/File-Listing-6.04.tar.gz"
version('6.04', '83f636b477741f3a014585bb9cc079a6')
depends_on('perl-http-date', type=('build', 'run'))
| 32.222222 | 90 | 0.715517 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | HaochengLIU/spack | var/spack/repos/builtin/packages/perl-file-listing/package.py | 580 | Python |
"""
This plugin is for recording test results in the Testcase Database.
"""
import getpass
import time
import uuid
from nose.plugins import Plugin
from nose.exc import SkipTest
from seleniumbase.core.application_manager import ApplicationManager
from seleniumbase.core.testcase_manager import ExecutionQueryPayload
from seleniumbase.core.testcase_manager import TestcaseDataPayload
from seleniumbase.core.testcase_manager import TestcaseManager
from seleniumbase.fixtures import constants
from seleniumbase.fixtures import errors
class DBReporting(Plugin):
"""
This plugin records test results in the Testcase Database.
"""
name = "db_reporting" # Usage: --with-db_reporting
def __init__(self):
Plugin.__init__(self)
self.execution_guid = str(uuid.uuid4())
self.testcase_guid = None
self.execution_start_time = 0
self.case_start_time = 0
self.testcase_manager = None
self._result_set = False
self._test = None
def options(self, parser, env):
super(DBReporting, self).options(parser, env=env)
parser.add_option(
"--database_env",
"--database-env",
action="store",
dest="database_env",
choices=(
constants.Environment.QA,
constants.Environment.STAGING,
constants.Environment.DEVELOP,
constants.Environment.PRODUCTION,
constants.Environment.MASTER,
constants.Environment.REMOTE,
constants.Environment.LOCAL,
constants.Environment.ALPHA,
constants.Environment.BETA,
constants.Environment.MAIN,
constants.Environment.TEST,
),
default=constants.Environment.TEST,
help="The database environment to run the tests in.",
)
def configure(self, options, conf):
super(DBReporting, self).configure(options, conf)
self.options = options
self.testcase_manager = TestcaseManager(self.options.database_env)
def begin(self):
"""At the start of the run, we want to record the test
execution information in the database."""
exec_payload = ExecutionQueryPayload()
exec_payload.execution_start_time = int(time.time() * 1000)
self.execution_start_time = exec_payload.execution_start_time
exec_payload.guid = self.execution_guid
exec_payload.username = getpass.getuser()
self.testcase_manager.insert_execution_data(exec_payload)
def startTest(self, test):
"""At the start of the test, set the testcase details."""
data_payload = TestcaseDataPayload()
self.testcase_guid = str(uuid.uuid4())
data_payload.guid = self.testcase_guid
data_payload.execution_guid = self.execution_guid
if hasattr(test, "browser"):
data_payload.browser = test.browser
else:
data_payload.browser = "N/A"
data_payload.test_address = test.id()
application = ApplicationManager.generate_application_string(test)
data_payload.env = application.split(".")[0]
data_payload.start_time = application.split(".")[1]
data_payload.state = constants.State.UNTESTED
self.testcase_manager.insert_testcase_data(data_payload)
self.case_start_time = int(time.time() * 1000)
# Make the testcase guid available to other plugins
test.testcase_guid = self.testcase_guid
self._test = test
self._test._nose_skip_reason = None
def finalize(self, result):
"""At the end of the test run, we want to
update the DB row with the total execution time."""
runtime = int(time.time() * 1000) - self.execution_start_time
self.testcase_manager.update_execution_data(
self.execution_guid, runtime
)
def afterTest(self, test):
if not self._result_set:
err = None
try:
err = self._test._nose_skip_reason
if err:
err = "Skipped: " + str(err)
err = (err, err)
except Exception:
pass
if not err:
err = "Skipped: (no reason given)"
err = (err, err)
self.__insert_test_result(constants.State.SKIPPED, self._test, err)
def addSuccess(self, test, capt):
"""
After each test success, record testcase run information.
"""
self.__insert_test_result(constants.State.PASSED, test)
self._result_set = True
def addFailure(self, test, err, capt=None, tbinfo=None):
"""
After each test failure, record testcase run information.
"""
self.__insert_test_result(constants.State.FAILED, test, err)
self._result_set = True
def addError(self, test, err, capt=None):
"""
After each test error, record testcase run information.
(Test errors should be treated the same as test failures.)
"""
self.__insert_test_result(constants.State.FAILED, test, err)
self._result_set = True
def handleError(self, test, err, capt=None):
"""
After each test error, record testcase run information.
"Error" also encompasses any states other than Pass or Fail, so we
check for those first.
"""
if err[0] == errors.BlockedTest:
self.__insert_test_result(constants.State.BLOCKED, test, err)
self._result_set = True
raise SkipTest(err[1])
return True
elif err[0] == errors.DeprecatedTest:
self.__insert_test_result(constants.State.DEPRECATED, test, err)
self._result_set = True
raise SkipTest(err[1])
return True
elif err[0] == errors.SkipTest:
self.__insert_test_result(constants.State.SKIPPED, test, err)
self._result_set = True
raise SkipTest(err[1])
return True
def __insert_test_result(self, state, test, err=None):
data_payload = TestcaseDataPayload()
data_payload.runtime = int(time.time() * 1000) - self.case_start_time
data_payload.guid = self.testcase_guid
data_payload.execution_guid = self.execution_guid
data_payload.state = state
if err is not None:
data_payload.message = (
err[1]
.__str__()
.split(
"""-------------------- >> """
"""begin captured logging"""
""" << --------------------""",
1,
)[0]
)
self.testcase_manager.update_testcase_data(data_payload)
| 37.382514 | 79 | 0.612045 | [
"MIT"
] | Mu-L/SeleniumBase | seleniumbase/plugins/db_reporting_plugin.py | 6,841 | Python |
# -*- encoding: utf-8 -*-
from django.utils.translation import ugettext_lazy as _
from grappelli.dashboard import modules, Dashboard
from grappelli.dashboard.utils import get_admin_site_name
class DjangoPagesDashboard(Dashboard):
"""
Custom index dashboard for Django-pages
"""
def init_with_context(self, context):
site_name = get_admin_site_name(context)
self.children.append(
modules.ModelList(
_('General'),
column=1,
collapsible=True,
models=(
'django_pages.site.models.Site',
'django_pages.site.models.Script',
'django_pages.language.models.*',
'django_pages.looks.models.*',
'django_pages.feed.models.*'
),
)
)
self.children.append(
modules.ModelList(
_('Pages'),
column=1,
collapsible=True,
models=('django_pages.pages.models.*', )
)
)
self.children.append(
modules.ModelList(
_('Menu'),
column=2,
collapsible=True,
models=('django_pages.menu.models.*', )
)
)
self.children.append(
modules.ModelList(
_('Comments'),
column=2,
collapsible=True,
models=('django_pages.comments.models.*', )
)
)
self.children.append(
modules.ModelList(
_('SEO'),
column=2,
collapsible=True,
models=('django_pages.metadata.models.*', )
)
)
self.children.append(
modules.AppList(
_('Administration'),
column=1,
collapsible=False,
models=('django.contrib.*', )
)
)
self.children.append(modules.LinkList(
_('File Management'),
column=3,
children=[
{
'title': _('File Browser'),
'url': '/admin/filebrowser/browse/',
'external': False,
},
]
))
self.children.append(modules.RecentActions(
_('Recent Actions'),
limit=5,
collapsible=False,
column=3,
))
| 26.621053 | 59 | 0.451562 | [
"BSD-3-Clause"
] | lunemec/django-pages | django_pages/dashboard.py | 2,529 | Python |
import h5py
import numpy as np
import os
from plyfile import PlyData, PlyElement
HDF5_DATA = 'hdf5_data'
print('Generating .h5 files...', '\n')
if not os.path.exists(HDF5_DATA):
os.mkdir(HDF5_DATA)
filenames_training = [line.rstrip() for line in open("filelist_training.txt", 'r')]
filenames_testing = [line.rstrip() for line in open("filelist_testing.txt", 'r')]
print((len(filenames_training)))
print((len(filenames_testing)))
f_training = h5py.File("./hdf5_data/data_training.h5", 'w')
f_testing = h5py.File("./hdf5_data/data_testing.h5", 'w')
a_data_training = np.zeros((len(filenames_training), 2048, 3))
a_pid_training = np.zeros((len(filenames_training), 2048), dtype = np.uint8)
labeldata_training = []
a_label_training = np.zeros((len(filenames_training), 1), dtype = np.uint8)
a_data_testing = np.zeros((len(filenames_testing), 2048, 3))
a_pid_testing = np.zeros((len(filenames_testing), 2048), dtype = np.uint8)
labeldata_testing = []
a_label_testing = np.zeros((len(filenames_testing), 1), dtype = np.uint8)
# ====== GENERATING TRAINING FILES ======
#========================================
for i in range(0, len(filenames_training)):
print(filenames_training[i])
plydata = PlyData.read("./ply_dir/" + filenames_training[i] + ".ply")
piddata = [line.rstrip() for line in open("./seg_dir/" + filenames_training[i] + ".seg", 'r')]
# labeldata = [line.rstrip() for line in open("./label_dir/" + filenames_training[i] + ".seg", 'r')]
for j in range(0, 2048):
a_data_training[i, j] = [plydata['vertex']['x'][j], plydata['vertex']['y'][j], plydata['vertex']['z'][j]]
a_pid_training[i, j] = piddata[j]
# a_label_training[i, j] = labeldata[j]
for i in range(0, len(filenames_training)):
labeldata = [line.rstrip() for line in open("./label_dir/" + filenames_training[i] + ".seg", 'r')]
a_label_training[i] = labeldata[0]
data = f_training.create_dataset("data", data = a_data_training)
pid = f_training.create_dataset("pid", data = a_pid_training)
label = f_training.create_dataset("label", data = a_label_training)
# ====== GENERATING TRAINING FILES ======
#========================================
# ====== GENERATING TESTING FILES ======
#========================================
for i in range(0, len(filenames_testing)):
plydata = PlyData.read("./ply_dir/" + filenames_testing[i] + ".ply")
piddata = [line.rstrip() for line in open("./seg_dir/" + filenames_testing[i] + ".seg", 'r')]
# labeldata = [line.rstrip() for line in open("./label_dir/" + filenames_testing[i] + ".seg", 'r')]
for j in range(0, 2048):
a_data_testing[i, j] = [plydata['vertex']['x'][j], plydata['vertex']['y'][j], plydata['vertex']['z'][j]]
a_pid_testing[i, j] = piddata[j]
# a_label_testing[i, j] = labeldata[j]
for i in range(0, len(filenames_testing)):
labeldata = [line.rstrip() for line in open("./label_dir/" + filenames_testing[i] + ".seg", 'r')]
a_label_testing[i] = labeldata[0]
data = f_testing.create_dataset("data", data = a_data_testing)
pid = f_testing.create_dataset("pid", data = a_pid_testing)
label = f_testing.create_dataset("label", data = a_label_testing)
#========================================
#========================================
print('HDF5 files generated.') | 41.759494 | 113 | 0.631403 | [
"MIT"
] | pisalore/pointnet_shrec17-classificator | data/make_hdf5_files.py | 3,299 | Python |
#!/usr/bin/env python
"""
Object-oriented implementation of backup reporting code.
Defines a class called 'Backup' that records all backups of a device
"""
import os, sys, argparse
import glob
from configparser import ConfigParser
from atlassian import Confluence
class Backup:
def __init__(self, device, backup_root):
self.device = device
self.root = backup_root
config_pattern = "{}/*/{}".format(self.root, device)
configs = glob.glob(config_pattern, recursive=True)
# Remove the full pathname, we only want the directory and the filename
bkps = [dir[len(backup_root)+1:] for dir in configs]
self.backups = bkps
def name(self):
return self.device
def latest(self):
if len(self.backups) >= 1:
return self.backups[-1].split('/')[0]
else:
return "NotFound"
def main():
parser = ConfigParser()
parser.read('config-demo.ini')
device_list_file = parser['backups']['device_list']
apikey = parser['confluence']['apikey']
username = parser['confluence']['username']
url = parser['confluence']['url']
page_ID = parser['confluence']['page_ID']
confluence = Confluence(url=url, username=username, password=apikey)
# Read in all the devices from the nominated file
with open(device_list_file) as file:
lines = file.readlines()
devices = [line.rstrip() for line in lines]
wiki_table = "||Device||Date||"
for device in devices:
device_bkp = Backup(device, parser['backups']['path'])
latest_bkp_date = device_bkp.latest()
print(f"Latest backup for {device_bkp.name()} is {latest_bkp_date}")
wiki_table += "\n" + f"|{device}|{latest_bkp_date}|"
print("Wiki text for table is:")
print(wiki_table)
result = confluence.update_page(
page_id=page_ID,
title='Config Retrievals',
representation="wiki",
body=wiki_table)
#pprint(result)
print(f"Title of page set to '{result['title']}'")
print(f"Confluence revision for page is now {result['version']['confRev']}")
if __name__ == "__main__":
main()
| 32.630137 | 82 | 0.585642 | [
"CC0-1.0"
] | oldD0g/code-snippets | python/atlassian/config-report.py | 2,382 | Python |
# Copyright 2013-2018 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RMatrixstats(RPackage):
"""High-performing functions operating on rows and columns of matrices,
e.g. col / rowMedians(), col / rowRanks(), and col / rowSds(). Functions
optimized per data type and for subsetted calculations such that both
memory usage and processing time is minimized. There are also optimized
vector-based methods, e.g. binMeans(), madDiff() and
weightedMedian()."""
homepage = "https://cran.rstudio.com/web/packages/matrixStats/index.html"
url = "https://cran.rstudio.com/src/contrib/matrixStats_0.52.2.tar.gz"
list_url = "https://cran.r-project.org/src/contrib/Archive/matrixStats"
version('0.52.2', '41b987d3ae96ee6895875c413adcba3c')
| 42.909091 | 79 | 0.720339 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | HaochengLIU/spack | var/spack/repos/builtin/packages/r-matrixstats/package.py | 944 | Python |
import pandas as pd
import re
import nltk
from nltk.corpus import stopwords
from nltk.stem.wordnet import WordNetLemmatizer
from nltk.tokenize import word_tokenize
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.metrics.pairwise import linear_kernel
def email_mapper(df):
coded_dict = dict()
cter = 1
email_encoded = []
for val in df['email']:
if val not in coded_dict:
coded_dict[val] = cter
cter+=1
email_encoded.append(coded_dict[val])
return email_encoded
def create_user_item_matrix(df):
'''
INPUT:
df - pandas dataframe with article_id, title, user_id columns
OUTPUT:
user_item - user item matrix
Description:
Return a matrix with user ids as rows and article ids on the columns with 1 values where a user interacted with
an article and a 0 otherwise
'''
# Fill in the function here
user_item = df.groupby('user_id')['article_id'].value_counts().unstack()
user_item[user_item.isna() == False] = 1
return user_item # return the user_item matrix
def get_top_articles(n, df):
'''
INPUT:
n - (int) the number of top articles to return
df - (pandas dataframe) df as defined at the top of the notebook
OUTPUT:
top_articles - (list) A list of the top 'n' article titles
'''
article_id_grouped_df = df.groupby(['title'])
top_articles = article_id_grouped_df['user_id'].count().sort_values(ascending=False).iloc[:n].index.tolist()
return top_articles # Return the top article titles from df (not df_content)
def get_top_article_ids(n, df):
'''
INPUT:
n - (int) the number of top articles to return
df - (pandas dataframe) df as defined at the top of the notebook
OUTPUT:
top_articles - (list) A list of the top 'n' article titles
'''
article_id_grouped_df = df.groupby(['article_id'])
top_articles_ids = article_id_grouped_df['user_id'].count().sort_values(ascending=False).iloc[:n].index.tolist()
return top_articles_ids # Return the top article ids
def user_user_recs(user_id, user_item, df, m=10):
'''
INPUT:
user_id - (int) a user id
m - (int) the number of recommendations you want for the user
OUTPUT:
recs - (list) a list of recommendations for the user by article id
rec_names - (list) a list of recommendations for the user by article title
Description:
Loops through the users based on closeness to the input user_id
For each user - finds articles the user hasn't seen before and provides them as recs
Does this until m recommendations are found
Notes:
* Choose the users that have the most total article interactions
before choosing those with fewer article interactions.
* Choose articles with the articles with the most total interactions
before choosing those with fewer total interactions.
'''
def get_user_articles_names_ids(user_id):
'''
INPUT:
user_id
OUTPUT:
article_ids - (list) a list of the article ids seen by the user
article_names - (list) a list of article names associated with the list of article ids
(this is identified by the doc_full_name column in df_content)
Description:
Provides a list of the article_ids and article titles that have been seen by a user
'''
# Your code here
article_ids = user_item.loc[user_id][user_item.loc[user_id] ==1].index.tolist()
article_names = []
for i in article_ids:
try:
title = df[df['article_id'] == i]['title'].unique()[0]
except IndexError:
title ="None"
article_names.append(title)
article_ids = list(map(str, article_ids))
return article_ids, article_names # return the ids and names
def find_similar_users():
'''
OUTPUT:
similar_users - (list) an ordered list where the closest users (largest dot product users)
are listed first
Description:
Computes the similarity of every pair of users based on the dot product
Returns an ordered
'''
# compute similarity of each user to the provided user
user_item_tmp = user_item.copy()
user_item_tmp[user_item_tmp.isna() == True] = 0 # 1. Make Nan to 0
row = user_item_tmp.loc[user_id] # 2. Select a row
result_dot = row@user_item_tmp.T # 3. Dot product of each of row of the matrix
result_dot.drop(labels = [user_id], inplace=True) # remove the own user's id
most_similar_users = result_dot.sort_values(ascending=False).index.tolist() # sort by similarity # create list of just the ids
return most_similar_users # return a list of the users in order from most to least similar
def get_top_sorted_users(most_similar_users):
'''
INPUT:
most_similar_users - (list) an ordered list where the closest users (largest dot product users)
are listed first
OUTPUT:
neighbors_df - (pandas dataframe) a dataframe with:
neighbor_id - is a neighbor user_id
similarity - measure of the similarity of each user to the provided user_id
num_interactions - the number of articles viewed by the user - if a u
Other Details - sort the neighbors_df by the similarity and then by number of interactions where
highest of each is higher in the dataframe
'''
# Make neighbor_id column
df_user_id_grouped =df.groupby("user_id")
df_user_id_grouped['article_id'].count().sort_values(ascending=False)
neighbors_df = pd.DataFrame()
neighbors_df['neighbor_id'] = most_similar_users
# make similarity column
user_item_tmp = user_item.copy()
user_item_tmp[user_item_tmp.isna() == True] = 0 # 1. Make Nan to 0
row = user_item_tmp.loc[user_id] # Select a row
result_dot = row@user_item_tmp.T # Dot product of each of row of the matrix
result_dot.drop(labels = [user_id], inplace=True) # remove the own user's id
similarity = result_dot.sort_values(ascending=False).values.tolist()[0:10]
neighbors_df['similarity'] = similarity
# Make num_interactions column
num_interactions = []
for i in neighbors_df['neighbor_id']:
counted_interaction = df_user_id_grouped['article_id'].count().loc[i]
num_interactions.append(counted_interaction)
neighbors_df['num_interactions'] = num_interactions
neighbors_df = neighbors_df.sort_values(by=['similarity', 'num_interactions'], ascending=False)
return neighbors_df # Return the dataframe specified in the doc_string
recs = []
rec_names =[]
counter = 0
# Get seen article ids and names from selected user id
article_ids, article_names = get_user_articles_names_ids(user_id)
# Make set to find unseen articles
seen_ids_set = set(article_ids)
most_similar_users = find_similar_users()[0:10]
neighbors_df = get_top_sorted_users(most_similar_users)
# Find similar users of the selected user
similar_users_list = neighbors_df['neighbor_id'] # Get neighbor_df
# Make recommendation list
for sim_user in similar_users_list:
if counter < m:
# Get seen article ids and names from similar users
sim_article_ids, sim_article_names = get_user_articles_names_ids(sim_user)
# Make dict (key: article_ids, value:article_names)
sim_user_dict = dict(zip(sim_article_ids, sim_article_names))
# Make set to find unseen articles
sim_seen_ids_set = set(sim_article_ids)
# Create set of unseen articles_ids
unseen_ids_set = sim_seen_ids_set.difference(seen_ids_set)
for i in unseen_ids_set:
if counter < m:
recs.append(i)
rec_names.append(sim_user_dict[i])
counter += 1
return recs, rec_names
###
def make_Tfidf_array(df_content):
def tokenize(text):
'''
Function splits text into separate words and gets a word lowercased and removes whitespaces at the ends of a word.
The funtions also cleans irrelevant stopwords.
Input:
1. text: text message
Output:
1. Clean_tokens : list of tokenized clean words
'''
# Get rid of other sepcial characters
text = re.sub(r"[^a-zA-Z0-9]", " ", text)
# Tokenize
tokens = word_tokenize(text)
# Lemmatize
lemmatizer = WordNetLemmatizer()
clean_tokens = []
for tok in tokens:
clean_tok = lemmatizer.lemmatize(tok, pos='v').lower().strip()
clean_tokens.append(clean_tok)
# Remove stop words
stopwords = nltk.corpus.stopwords.words('english')
clean_tokens = [token for token in clean_tokens if token not in stopwords]
return clean_tokens
corpus = df_content['doc_description']
df_content['doc_description'].fillna(df_content['doc_full_name'], inplace=True)
stop_words = stopwords.words("english")
lemmatizer = WordNetLemmatizer()
# Text Processing, Feature Extraction
vect = TfidfVectorizer(tokenizer=tokenize)
# get counts of each token (word) in text data
X = vect.fit_transform(corpus)
X = X.toarray()
return vect, X
def make_content_recs(article_id, df_content, df, m=10):
'''
INPUT:
article_id = (int) a article id in df_content
m - (int) the number of recommendations you want for the user
df_content - (pandas dataframe) df_content as defined at the top of the notebook
df - (pandas dataframe) df as defined at the top of the notebook
OUTPUT:
recs - (list) a list of recommendations for the user by article id
rec_names - (list) a list of recommendations for the user by article title
'''
def tokenize(text):
'''
Function splits text into separate words and gets a word lowercased and removes whitespaces at the ends of a word.
The funtions also cleans irrelevant stopwords.
Input:
1. text: text message
Output:
1. Clean_tokens : list of tokenized clean words
'''
# Get rid of other sepcial characters
text = re.sub(r"[^a-zA-Z0-9]", " ", text)
# Tokenize
tokens = word_tokenize(text)
# Lemmatize
lemmatizer = WordNetLemmatizer()
clean_tokens = []
for tok in tokens:
clean_tok = lemmatizer.lemmatize(tok, pos='v').lower().strip()
clean_tokens.append(clean_tok)
# Remove stop words
stopwords = nltk.corpus.stopwords.words('english')
clean_tokens = [token for token in clean_tokens if token not in stopwords]
return clean_tokens
vect, X = make_Tfidf_array(df_content)
if article_id in df_content.article_id:
cosine_similarity = linear_kernel(X, X)
df_similarity = pd.DataFrame(cosine_similarity[article_id], columns=['similarity'])
df_similarity_modified = df_similarity.drop(article_id)
recs = df_similarity_modified.similarity.sort_values(ascending=False).index[0:10].tolist()
rec_names = []
for i in recs:
name = df_content[df_content['article_id'] == i]['doc_full_name'].values[0]
rec_names.append(name)
else:
tfidf_feature_name = vect.get_feature_names()
# Get title of the document of interest
booktitle = df[df['article_id'] == article_id]['title'].values[0]
# Tokenize the title
booktitle_tokenized = tokenize(booktitle)
X_slice_list = []
for i in booktitle_tokenized:
if i in tfidf_feature_name:
X_slice_list.append(tfidf_feature_name.index(i))
X_slice_list.sort()
X_sliced = X[:,X_slice_list]
check_df = pd.DataFrame(X_sliced, columns=X_slice_list)
check_df['sum'] = check_df.sum(axis=1)
recs = check_df.sort_values("sum", ascending=False)[0:10].index.tolist()
rec_names = []
for i in recs:
name = df_content[df_content['article_id'] == i]['doc_full_name'].values[0]
rec_names.append(name)
return recs, rec_names
| 37.400585 | 135 | 0.642718 | [
"MIT"
] | dalpengholic/Udacity_Recommendations_with_IBM | model/recommendation_functions.py | 12,791 | Python |
#!/usr/bin/env python
import rospy
import math
from std_msgs.msg import Float64
from geometry_msgs.msg import Twist
class SimpleRoverController:
def __init__(self):
self.namespace = rospy.get_param("name_space", "scout_1")
self.w_s = rospy.get_param("wheel_separation", 1.7680) # wheel seperation
self.w_r = rospy.get_param("wheel_separation", 0.3048) # wheel radisu
if "/" in self.namespace:
rospy.logerr("[rover_motion_controller] invalid namespace. namespace can not contain /")
exit(1)
self.lf_steering_pub = rospy.Publisher("/" + self.namespace + "/fl_steering_arm_controller/command", Float64, queue_size=2)
self.rf_steering_pub = rospy.Publisher("/" + self.namespace + "/fr_steering_arm_controller/command", Float64, queue_size=2)
self.lr_steering_pub = rospy.Publisher("/" + self.namespace + "/bl_steering_arm_controller/command", Float64, queue_size=2)
self.rr_steering_pub = rospy.Publisher("/" + self.namespace + "/br_steering_arm_controller/command", Float64, queue_size=2)
self.lf_axle_pub = rospy.Publisher("/" + self.namespace + "/fl_wheel_controller/command", Float64, queue_size=2)
self.rf_axle_pub = rospy.Publisher("/" + self.namespace + "/fr_wheel_controller/command", Float64, queue_size=2)
self.lr_axle_pub = rospy.Publisher("/" + self.namespace + "/bl_wheel_controller/command", Float64, queue_size=2)
self.rr_axle_pub = rospy.Publisher("/" + self.namespace + "/br_wheel_controller/command", Float64, queue_size=2)
self.steering_cmd = 0
self.linear_vel = 0
self.linear_x = 0
self.angular_z = 0
rospy.Subscriber("/csi_rover/cmd_vel", Twist, callback=self.directional_movement)
rospy.init_node('rover_motion_controller', anonymous=True)
rate = rospy.Rate(30) # 10hz
while not rospy.is_shutdown():
# check to see if there's an explicit yaw command
if self.angular_z != 0:
self.rf_axle_pub.publish((self.linear_x + self.angular_z * self.w_s / 2.0) / self.w_r)
self.rr_axle_pub.publish((self.linear_x + self.angular_z * self.w_s / 2.0) / self.w_r)
self.lf_axle_pub.publish((self.linear_x - self.angular_z * self.w_s / 2.0) / self.w_r)
self.lr_axle_pub.publish((self.linear_x - self.angular_z * self.w_s / 2.0) / self.w_r)
# lock all steering joints to be zero
self.synchronized_steering(0)
# else use crab steering
else:
self.lf_axle_pub.publish(self.linear_vel)
self.lr_axle_pub.publish(self.linear_vel)
self.rf_axle_pub.publish(self.linear_vel)
self.rr_axle_pub.publish(self.linear_vel)
self.synchronized_steering(self.steering_cmd)
rate.sleep()
# move all of the steering joints to a position.
# the parameter is an angle value in radians
def synchronized_steering(self, angle):
self.lf_steering_pub.publish(angle)
self.rf_steering_pub.publish(angle)
self.lr_steering_pub.publish(angle)
self.rr_steering_pub.publish(angle)
# Determine steering angle
# Set linear_vel as magnitude
# Range -pi/2 to pi/2
# else use skid_steering
def directional_movement(self, data):
# data comes in as ( x , y )
# https://answers.ros.org/question/29706/twist-message-example-and-cmd_vel/
# rospy.loginfo("Received a /cmd_vel message!")
# rospy.loginfo("Linear Components: [%f, %f, %f]"%(data.linear.x, data.linear.y, data.linear.z))
# rospy.loginfo("Angular Components: [%f, %f, %f]"%(data.angular.x, data.angular.y, data.angular.z))
theta = math.atan2(data.linear.x, data.linear.y)
self.steering_cmd = theta
self.linear_vel = math.sqrt(math.pow(data.linear.x, 2) + math.pow(data.linear.y, 2))
self.angular_z = data.angular.z
self.linear_x = data.linear.x
if __name__ == '__main__':
try:
SimpleRoverController()
except rospy.ROSInterruptExoception:
pass
| 43.822917 | 131 | 0.651058 | [
"MIT"
] | BhargavRE25/Rover-Machine-Learning | src/csi_rover_controls/deprecated/simple_rover_controller.py | 4,207 | Python |
"""
Copyright (c) 2018 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging as log
import networkx as nx
from extensions.middle.ConstSwitchResolver import ConstSwitchEraser
from mo.graph.graph import erase_node
from mo.middle.replacement import MiddleReplacementPattern
class UselessMergeEraser(MiddleReplacementPattern):
enabled = True
def run_after(self):
return [ConstSwitchEraser]
def pattern(self):
return dict(
nodes=[('merge', dict(kind='op', op='Merge')),
('merge_data', dict(kind='data'))],
edges=[('merge', 'merge_data')]
)
def replace_pattern(self, graph: nx.MultiDiGraph, match: dict):
if len(graph.in_edges(match['merge'].id)) <= 1:
erase_node(match['merge'])
erase_node(match['merge_data'])
log.info("Useles Merge op and data nodes was deleted op='{}' data='{}'"
"".format(match['merge'].id, match['merge_data'].id))
| 33.488889 | 83 | 0.68215 | [
"Apache-2.0"
] | ArutyunovG/dldt | model-optimizer/extensions/middle/UselessMerge.py | 1,507 | Python |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import sys
sys.dont_write_bytecode=True
from distutils.core import setup
from pyxsltp import __version__
setup(
name = "pyxsltp",
version = __version__,
py_modules = ['pyxsltp'],
scripts = ['pyxsltp'],
)
| 16.5625 | 32 | 0.671698 | [
"MIT"
] | hisashim/pyxsltp | setup.py | 265 | Python |
# Generated by Django 2.0.6 on 2018-07-05 16:13
from django.db import migrations, models
import posts.models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=120)),
('slug', models.SlugField(allow_unicode=True, unique=True)),
('image', models.ImageField(blank=True, height_field='height_field', null=True, upload_to=posts.models.upload_location, width_field='width_field')),
('height_field', models.IntegerField(default=0)),
('width_field', models.IntegerField(default=0)),
('content', models.TextField()),
('updated', models.DateTimeField(auto_now=True)),
('timestamp', models.DateTimeField(auto_now_add=True)),
],
options={
'ordering': ['-timestamp', '-updated'],
},
),
]
| 35.090909 | 164 | 0.578584 | [
"MIT"
] | AmrMKayid/django-blog | posts/migrations/0001_initial.py | 1,158 | Python |
# (C) Datadog, Inc. 2019-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
__version__ = '2.3.0'
| 27.4 | 59 | 0.722628 | [
"BSD-3-Clause"
] | Barbayar/integrations-core | presto/datadog_checks/presto/__about__.py | 137 | Python |
from enum import IntEnum
from typing import Dict, Union, Callable, List, Optional
from cereal import log, car
import cereal.messaging as messaging
from common.realtime import DT_CTRL
from selfdrive.config import Conversions as CV
from selfdrive.locationd.calibrationd import MIN_SPEED_FILTER
AlertSize = log.ControlsState.AlertSize
AlertStatus = log.ControlsState.AlertStatus
VisualAlert = car.CarControl.HUDControl.VisualAlert
AudibleAlert = car.CarControl.HUDControl.AudibleAlert
EventName = car.CarEvent.EventName
# Alert priorities
class Priority(IntEnum):
LOWEST = 0
LOWER = 1
LOW = 2
MID = 3
HIGH = 4
HIGHEST = 5
# Event types
class ET:
ENABLE = 'enable'
PRE_ENABLE = 'preEnable'
NO_ENTRY = 'noEntry'
WARNING = 'warning'
USER_DISABLE = 'userDisable'
SOFT_DISABLE = 'softDisable'
IMMEDIATE_DISABLE = 'immediateDisable'
PERMANENT = 'permanent'
# get event name from enum
EVENT_NAME = {v: k for k, v in EventName.schema.enumerants.items()}
class Events:
def __init__(self):
self.events: List[int] = []
self.static_events: List[int] = []
self.events_prev = dict.fromkeys(EVENTS.keys(), 0)
@property
def names(self) -> List[int]:
return self.events
def __len__(self) -> int:
return len(self.events)
def add(self, event_name: int, static: bool=False) -> None:
if static:
self.static_events.append(event_name)
self.events.append(event_name)
def clear(self) -> None:
self.events_prev = {k: (v + 1 if k in self.events else 0) for k, v in self.events_prev.items()}
self.events = self.static_events.copy()
def any(self, event_type: str) -> bool:
for e in self.events:
if event_type in EVENTS.get(e, {}).keys():
return True
return False
def create_alerts(self, event_types: List[str], callback_args=None):
if callback_args is None:
callback_args = []
ret = []
for e in self.events:
types = EVENTS[e].keys()
for et in event_types:
if et in types:
alert = EVENTS[e][et]
if not isinstance(alert, Alert):
alert = alert(*callback_args)
if DT_CTRL * (self.events_prev[e] + 1) >= alert.creation_delay:
alert.alert_type = f"{EVENT_NAME[e]}/{et}"
alert.event_type = et
ret.append(alert)
return ret
def add_from_msg(self, events):
for e in events:
self.events.append(e.name.raw)
def to_msg(self):
ret = []
for event_name in self.events:
event = car.CarEvent.new_message()
event.name = event_name
for event_type in EVENTS.get(event_name, {}).keys():
setattr(event, event_type, True)
ret.append(event)
return ret
class Alert:
def __init__(self,
alert_text_1: str,
alert_text_2: str,
alert_status: log.ControlsState.AlertStatus,
alert_size: log.ControlsState.AlertSize,
priority: Priority,
visual_alert: car.CarControl.HUDControl.VisualAlert,
audible_alert: car.CarControl.HUDControl.AudibleAlert,
duration: float,
alert_rate: float = 0.,
creation_delay: float = 0.):
self.alert_text_1 = alert_text_1
self.alert_text_2 = alert_text_2
self.alert_status = alert_status
self.alert_size = alert_size
self.priority = priority
self.visual_alert = visual_alert
self.audible_alert = audible_alert
self.duration = int(duration / DT_CTRL)
self.alert_rate = alert_rate
self.creation_delay = creation_delay
self.alert_type = ""
self.event_type: Optional[str] = None
def __str__(self) -> str:
return f"{self.alert_text_1}/{self.alert_text_2} {self.priority} {self.visual_alert} {self.audible_alert}"
def __gt__(self, alert2) -> bool:
return self.priority > alert2.priority
class NoEntryAlert(Alert):
def __init__(self, alert_text_2: str, visual_alert: car.CarControl.HUDControl.VisualAlert=VisualAlert.none):
super().__init__("오픈파일럿을 사용할 수 없음", alert_text_2, AlertStatus.normal,
AlertSize.mid, Priority.LOW, visual_alert,
AudibleAlert.refuse, 3.)
class SoftDisableAlert(Alert):
def __init__(self, alert_text_2: str):
super().__init__("핸들을 즉시 잡아주세요", alert_text_2,
AlertStatus.userPrompt, AlertSize.full,
Priority.MID, VisualAlert.steerRequired,
AudibleAlert.warningSoft, 2.),
# less harsh version of SoftDisable, where the condition is user-triggered
class UserSoftDisableAlert(SoftDisableAlert):
def __init__(self, alert_text_2: str):
super().__init__(alert_text_2),
self.alert_text_1 = "오픈파일럿이 해제됩니다."
class ImmediateDisableAlert(Alert):
def __init__(self, alert_text_2: str):
super().__init__("핸들을 즉시 잡아주세요", alert_text_2,
AlertStatus.critical, AlertSize.full,
Priority.HIGHEST, VisualAlert.steerRequired,
AudibleAlert.warningImmediate, 4.),
class EngagementAlert(Alert):
def __init__(self, audible_alert: car.CarControl.HUDControl.AudibleAlert):
super().__init__("", "",
AlertStatus.normal, AlertSize.none,
Priority.MID, VisualAlert.none,
audible_alert, .2),
class NormalPermanentAlert(Alert):
def __init__(self, alert_text_1: str, alert_text_2: str = "", duration: float = 0.2, priority: Priority = Priority.LOWER, creation_delay: float = 0.):
super().__init__(alert_text_1, alert_text_2,
AlertStatus.normal, AlertSize.mid if len(alert_text_2) else AlertSize.small,
priority, VisualAlert.none, AudibleAlert.none, duration, creation_delay=creation_delay),
class StartupAlert(Alert):
def __init__(self, alert_text_1: str, alert_text_2: str = "항상 핸들을 잡고 도로를 주시하세요", alert_status=AlertStatus.normal):
super().__init__(alert_text_1, alert_text_2,
alert_status, AlertSize.mid,
Priority.LOWER, VisualAlert.none, AudibleAlert.none, 5.),
# ********** helper functions **********
def get_display_speed(speed_ms: float, metric: bool) -> str:
speed = int(round(speed_ms * (CV.MS_TO_KPH if metric else CV.MS_TO_MPH)))
unit = 'km/h' if metric else 'mph'
return f"{speed} {unit}"
# ********** alert callback functions **********
AlertCallbackType = Callable[[car.CarParams, messaging.SubMaster, bool, int], Alert]
def soft_disable_alert(alert_text_2: str) -> AlertCallbackType:
def func(CP: car.CarParams, sm: messaging.SubMaster, metric: bool, soft_disable_time: int) -> Alert:
#if soft_disable_time < int(0.5 / DT_CTRL):
# return ImmediateDisableAlert(alert_text_2)
return SoftDisableAlert(alert_text_2)
return func
def user_soft_disable_alert(alert_text_2: str) -> AlertCallbackType:
def func(CP: car.CarParams, sm: messaging.SubMaster, metric: bool, soft_disable_time: int) -> Alert:
#if soft_disable_time < int(0.5 / DT_CTRL):
# return ImmediateDisableAlert(alert_text_2)
return UserSoftDisableAlert(alert_text_2)
return func
def below_engage_speed_alert(CP: car.CarParams, sm: messaging.SubMaster, metric: bool, soft_disable_time: int) -> Alert:
return NoEntryAlert(f"Speed Below {get_display_speed(CP.minEnableSpeed, metric)}")
def below_steer_speed_alert(CP: car.CarParams, sm: messaging.SubMaster, metric: bool, soft_disable_time: int) -> Alert:
return Alert(
f"Steer Unavailable Below {get_display_speed(CP.minSteerSpeed, metric)}",
"",
AlertStatus.userPrompt, AlertSize.small,
Priority.MID, VisualAlert.steerRequired, AudibleAlert.prompt, 0.4)
def calibration_incomplete_alert(CP: car.CarParams, sm: messaging.SubMaster, metric: bool, soft_disable_time: int) -> Alert:
return Alert(
"Calibration in Progress: %d%%" % sm['liveCalibration'].calPerc,
f"Drive Above {get_display_speed(MIN_SPEED_FILTER, metric)}",
AlertStatus.normal, AlertSize.mid,
Priority.LOWEST, VisualAlert.none, AudibleAlert.none, .2)
def no_gps_alert(CP: car.CarParams, sm: messaging.SubMaster, metric: bool, soft_disable_time: int) -> Alert:
gps_integrated = sm['peripheralState'].pandaType in [log.PandaState.PandaType.uno, log.PandaState.PandaType.dos]
return Alert(
"Poor GPS reception",
"If sky is visible, contact support" if gps_integrated else "Check GPS antenna placement",
AlertStatus.normal, AlertSize.mid,
Priority.LOWER, VisualAlert.none, AudibleAlert.none, .2, creation_delay=300.)
def wrong_car_mode_alert(CP: car.CarParams, sm: messaging.SubMaster, metric: bool, soft_disable_time: int) -> Alert:
text = "Cruise Mode Disabled"
if CP.carName == "honda":
text = "Main Switch Off"
return NoEntryAlert(text)
def joystick_alert(CP: car.CarParams, sm: messaging.SubMaster, metric: bool, soft_disable_time: int) -> Alert:
axes = sm['testJoystick'].axes
gb, steer = list(axes)[:2] if len(axes) else (0., 0.)
vals = f"Gas: {round(gb * 100.)}%, Steer: {round(steer * 100.)}%"
return NormalPermanentAlert("Joystick Mode", vals)
def auto_lane_change_alert(CP: car.CarParams, sm: messaging.SubMaster, metric: bool, soft_disable_time: int) -> Alert:
alc_timer = sm['lateralPlan'].autoLaneChangeTimer
return Alert(
"차선 변경을 시작합니다 in (%d)" % alc_timer,
"차선의 차량을 확인하세요",
AlertStatus.normal, AlertSize.mid,
Priority.LOWER, VisualAlert.none, AudibleAlert.none, .1, alert_rate=0.75)
EVENTS: Dict[int, Dict[str, Union[Alert, AlertCallbackType]]] = {
# ********** events with no alerts **********
EventName.stockFcw: {},
EventName.lkasDisabled: {},
# ********** events only containing alerts displayed in all states **********
EventName.joystickDebug: {
ET.WARNING: joystick_alert,
ET.PERMANENT: NormalPermanentAlert("Joystick Mode"),
},
EventName.controlsInitializing: {
ET.NO_ENTRY: NoEntryAlert("System Initializing"),
},
EventName.startup: {
ET.PERMANENT: StartupAlert("Be ready to take over at any time")
},
EventName.startupMaster: {
ET.PERMANENT: StartupAlert("WARNING: This branch is not tested",
alert_status=AlertStatus.userPrompt),
},
# Car is recognized, but marked as dashcam only
EventName.startupNoControl: {
ET.PERMANENT: StartupAlert("Dashcam mode"),
},
# Car is not recognized
EventName.startupNoCar: {
ET.PERMANENT: StartupAlert("Dashcam mode for unsupported car"),
},
EventName.startupNoFw: {
ET.PERMANENT: StartupAlert("Car Unrecognized",
"Check comma power connections",
alert_status=AlertStatus.userPrompt),
},
EventName.dashcamMode: {
ET.PERMANENT: NormalPermanentAlert("Dashcam Mode",
priority=Priority.LOWEST),
},
EventName.invalidLkasSetting: {
ET.PERMANENT: NormalPermanentAlert("Stock LKAS is on",
"Turn off stock LKAS to engage"),
},
EventName.cruiseMismatch: {
#ET.PERMANENT: ImmediateDisableAlert("openpilot failed to cancel cruise"),
},
# Some features or cars are marked as community features. If openpilot
# detects the use of a community feature it switches to dashcam mode
# until these features are allowed using a toggle in settings.
EventName.communityFeatureDisallowed: {
ET.PERMANENT: NormalPermanentAlert("openpilot Unavailable",
"Enable Community Features in Settings"),
},
# openpilot doesn't recognize the car. This switches openpilot into a
# read-only mode. This can be solved by adding your fingerprint.
# See https://github.com/commaai/openpilot/wiki/Fingerprinting for more information
EventName.carUnrecognized: {
ET.PERMANENT: NormalPermanentAlert("Dashcam Mode",
"Car Unrecognized",
priority=Priority.LOWEST),
},
EventName.stockAeb: {
ET.PERMANENT: Alert(
"브레이크!",
"추돌 위험",
AlertStatus.critical, AlertSize.full,
Priority.HIGHEST, VisualAlert.fcw, AudibleAlert.none, 2.),
ET.NO_ENTRY: NoEntryAlert("Stock AEB: Risk of Collision"),
},
EventName.fcw: {
ET.PERMANENT: Alert(
"브레이크!",
"추돌 위험",
AlertStatus.critical, AlertSize.full,
Priority.HIGHEST, VisualAlert.fcw, AudibleAlert.warningSoft, 2.),
},
EventName.ldw: {
ET.PERMANENT: Alert(
"핸들을 잡아주세요",
"차선이탈 감지됨",
AlertStatus.userPrompt, AlertSize.small,
Priority.LOW, VisualAlert.ldw, AudibleAlert.prompt, 3.),
},
# ********** events only containing alerts that display while engaged **********
EventName.gasPressed: {
ET.PRE_ENABLE: Alert(
"Release Gas Pedal to Engage",
"",
AlertStatus.normal, AlertSize.small,
Priority.LOWEST, VisualAlert.none, AudibleAlert.none, .1, creation_delay=1.),
},
# openpilot tries to learn certain parameters about your car by observing
# how the car behaves to steering inputs from both human and openpilot driving.
# This includes:
# - steer ratio: gear ratio of the steering rack. Steering angle divided by tire angle
# - tire stiffness: how much grip your tires have
# - angle offset: most steering angle sensors are offset and measure a non zero angle when driving straight
# This alert is thrown when any of these values exceed a sanity check. This can be caused by
# bad alignment or bad sensor data. If this happens consistently consider creating an issue on GitHub
EventName.vehicleModelInvalid: {
ET.NO_ENTRY: NoEntryAlert("Vehicle Parameter Identification Failed"),
ET.SOFT_DISABLE: soft_disable_alert("Vehicle Parameter Identification Failed"),
},
EventName.steerTempUnavailableSilent: {
ET.WARNING: Alert(
"핸들을 잡아주세요",
"조향제어 일시적으로 사용불가",
AlertStatus.userPrompt, AlertSize.small,
Priority.LOW, VisualAlert.steerRequired, AudibleAlert.prompt, 1.),
},
EventName.preDriverDistracted: {
ET.WARNING: Alert(
"도로를 주시하세요 : 운전자 도로주시 불안",
"",
AlertStatus.normal, AlertSize.small,
Priority.LOW, VisualAlert.none, AudibleAlert.none, .1),
},
EventName.promptDriverDistracted: {
ET.WARNING: Alert(
"도로를 주시하세요",
"운전자 도로주시 불안",
AlertStatus.userPrompt, AlertSize.mid,
Priority.MID, VisualAlert.steerRequired, AudibleAlert.promptDistracted, .1),
},
EventName.driverDistracted: {
ET.WARNING: Alert(
"조향제어가 강제로 해제됩니다",
"운전자 도로주시 불안",
AlertStatus.critical, AlertSize.full,
Priority.HIGH, VisualAlert.steerRequired, AudibleAlert.warningImmediate, .1),
},
EventName.preDriverUnresponsive: {
ET.WARNING: Alert(
"핸들을 잡아주세요 : 운전자 인식 불가",
"",
AlertStatus.normal, AlertSize.small,
Priority.LOW, VisualAlert.steerRequired, AudibleAlert.none, .1, alert_rate=0.75),
},
EventName.promptDriverUnresponsive: {
ET.WARNING: Alert(
"핸들을 잡아주세요",
"운전자 응답하지않음",
AlertStatus.userPrompt, AlertSize.mid,
Priority.MID, VisualAlert.steerRequired, AudibleAlert.promptDistracted, .1),
},
EventName.driverUnresponsive: {
ET.WARNING: Alert(
"조향제어가 강제로 해제됩니다",
"운전자 응답하지않음",
AlertStatus.critical, AlertSize.full,
Priority.HIGH, VisualAlert.steerRequired, AudibleAlert.warningImmediate, .1),
},
EventName.manualRestart: {
ET.WARNING: Alert(
"핸들을 잡아주세요",
"수동으로 재활성화하세요",
AlertStatus.userPrompt, AlertSize.mid,
Priority.LOW, VisualAlert.none, AudibleAlert.none, .2),
},
EventName.resumeRequired: {
ET.WARNING: Alert(
"앞차량 멈춤",
"앞차가 출발하면 자동 재출발",
AlertStatus.userPrompt, AlertSize.mid,
Priority.LOW, VisualAlert.none, AudibleAlert.none, .2),
},
EventName.belowSteerSpeed: {
ET.WARNING: below_steer_speed_alert,
},
EventName.preLaneChangeLeft: {
ET.WARNING: Alert(
"차선을 변경합니다",
"좌측차선의 차량을 확인하세요",
AlertStatus.normal, AlertSize.small,
Priority.LOW, VisualAlert.none, AudibleAlert.none, .1, alert_rate=0.75),
},
EventName.preLaneChangeRight: {
ET.WARNING: Alert(
"차선을 변경합니다",
"우측차선의 차량을 확인하세요",
AlertStatus.normal, AlertSize.small,
Priority.LOW, VisualAlert.none, AudibleAlert.none, .1, alert_rate=0.75),
},
EventName.laneChangeBlocked: {
ET.WARNING: Alert(
"후측방 차량감지",
"차선에 차량이 감지되니 대기하세요",
AlertStatus.userPrompt, AlertSize.small,
Priority.LOW, VisualAlert.none, AudibleAlert.prompt, .1),
},
EventName.laneChange: {
ET.WARNING: Alert(
"차선을 변경합니다",
"",
AlertStatus.normal, AlertSize.small,
Priority.LOW, VisualAlert.none, AudibleAlert.none, .1),
},
EventName.steerSaturated: {
ET.WARNING: Alert(
"핸들을 잡아주세요",
"조향제어 제한을 초과함",
AlertStatus.userPrompt, AlertSize.mid,
Priority.LOW, VisualAlert.steerRequired, AudibleAlert.promptRepeat, 1.),
},
# Thrown when the fan is driven at >50% but is not rotating
EventName.fanMalfunction: {
ET.PERMANENT: NormalPermanentAlert("Fan Malfunction", "Contact Support"),
},
# Camera is not outputting frames at a constant framerate
EventName.cameraMalfunction: {
ET.PERMANENT: NormalPermanentAlert("Camera Malfunction", "Contact Support"),
},
# Unused
EventName.gpsMalfunction: {
ET.PERMANENT: NormalPermanentAlert("GPS Malfunction", "Contact Support"),
},
# When the GPS position and localizer diverge the localizer is reset to the
# current GPS position. This alert is thrown when the localizer is reset
# more often than expected.
EventName.localizerMalfunction: {
ET.PERMANENT: NormalPermanentAlert("Sensor Malfunction", "Contact Support"),
},
# ********** events that affect controls state transitions **********
EventName.pcmEnable: {
ET.ENABLE: EngagementAlert(AudibleAlert.engage),
},
EventName.buttonEnable: {
ET.ENABLE: EngagementAlert(AudibleAlert.engage),
},
EventName.pcmDisable: {
ET.USER_DISABLE: EngagementAlert(AudibleAlert.disengage),
},
EventName.buttonCancel: {
ET.USER_DISABLE: EngagementAlert(AudibleAlert.disengage),
},
EventName.brakeHold: {
ET.USER_DISABLE: EngagementAlert(AudibleAlert.disengage),
ET.NO_ENTRY: NoEntryAlert("Brake Hold Active"),
},
EventName.parkBrake: {
ET.USER_DISABLE: EngagementAlert(AudibleAlert.disengage),
ET.NO_ENTRY: NoEntryAlert("Parking Brake Engaged"),
},
EventName.pedalPressed: {
ET.USER_DISABLE: EngagementAlert(AudibleAlert.disengage),
ET.NO_ENTRY: NoEntryAlert("Pedal Pressed",
visual_alert=VisualAlert.brakePressed),
},
EventName.wrongCarMode: {
ET.USER_DISABLE: EngagementAlert(AudibleAlert.disengage),
ET.NO_ENTRY: wrong_car_mode_alert,
},
EventName.wrongCruiseMode: {
ET.USER_DISABLE: EngagementAlert(AudibleAlert.disengage),
ET.NO_ENTRY: NoEntryAlert("Adaptive Cruise Disabled"),
},
EventName.steerTempUnavailable: {
ET.SOFT_DISABLE: soft_disable_alert("Steering Temporarily Unavailable"),
ET.NO_ENTRY: NoEntryAlert("Steering Temporarily Unavailable"),
},
EventName.outOfSpace: {
ET.PERMANENT: NormalPermanentAlert("Out of Storage"),
ET.NO_ENTRY: NoEntryAlert("Out of Storage"),
},
EventName.belowEngageSpeed: {
ET.NO_ENTRY: below_engage_speed_alert,
},
EventName.sensorDataInvalid: {
ET.PERMANENT: Alert(
"No Data from Device Sensors",
"Reboot your Device",
AlertStatus.normal, AlertSize.mid,
Priority.LOWER, VisualAlert.none, AudibleAlert.none, .2, creation_delay=1.),
ET.NO_ENTRY: NoEntryAlert("No Data from Device Sensors"),
},
EventName.noGps: {
ET.PERMANENT: no_gps_alert,
},
EventName.soundsUnavailable: {
ET.PERMANENT: NormalPermanentAlert("Speaker not found", "Reboot your Device"),
ET.NO_ENTRY: NoEntryAlert("Speaker not found"),
},
EventName.tooDistracted: {
ET.NO_ENTRY: NoEntryAlert("Distraction Level Too High"),
},
EventName.overheat: {
ET.PERMANENT: NormalPermanentAlert("System Overheated"),
ET.SOFT_DISABLE: soft_disable_alert("System Overheated"),
ET.NO_ENTRY: NoEntryAlert("System Overheated"),
},
EventName.wrongGear: {
ET.SOFT_DISABLE: user_soft_disable_alert("Gear not D"),
ET.NO_ENTRY: NoEntryAlert("Gear not D"),
},
# This alert is thrown when the calibration angles are outside of the acceptable range.
# For example if the device is pointed too much to the left or the right.
# Usually this can only be solved by removing the mount from the windshield completely,
# and attaching while making sure the device is pointed straight forward and is level.
# See https://comma.ai/setup for more information
EventName.calibrationInvalid: {
ET.PERMANENT: NormalPermanentAlert("Calibration Invalid", "Remount Device and Recalibrate"),
ET.SOFT_DISABLE: soft_disable_alert("Calibration Invalid: Remount Device & Recalibrate"),
ET.NO_ENTRY: NoEntryAlert("Calibration Invalid: Remount Device & Recalibrate"),
},
EventName.calibrationIncomplete: {
ET.PERMANENT: calibration_incomplete_alert,
ET.SOFT_DISABLE: soft_disable_alert("Calibration in Progress"),
ET.NO_ENTRY: NoEntryAlert("Calibration in Progress"),
},
EventName.doorOpen: {
ET.SOFT_DISABLE: user_soft_disable_alert("Door Open"),
ET.NO_ENTRY: NoEntryAlert("Door Open"),
},
EventName.seatbeltNotLatched: {
ET.SOFT_DISABLE: user_soft_disable_alert("Seatbelt Unlatched"),
ET.NO_ENTRY: NoEntryAlert("Seatbelt Unlatched"),
},
EventName.espDisabled: {
ET.SOFT_DISABLE: soft_disable_alert("ESP Off"),
ET.NO_ENTRY: NoEntryAlert("ESP Off"),
},
EventName.lowBattery: {
ET.SOFT_DISABLE: soft_disable_alert("Low Battery"),
ET.NO_ENTRY: NoEntryAlert("Low Battery"),
},
# Different openpilot services communicate between each other at a certain
# interval. If communication does not follow the regular schedule this alert
# is thrown. This can mean a service crashed, did not broadcast a message for
# ten times the regular interval, or the average interval is more than 10% too high.
EventName.commIssue: {
ET.SOFT_DISABLE: soft_disable_alert("Communication Issue between Processes"),
ET.NO_ENTRY: NoEntryAlert("Communication Issue between Processes"),
},
# Thrown when manager detects a service exited unexpectedly while driving
EventName.processNotRunning: {
ET.NO_ENTRY: NoEntryAlert("System Malfunction: Reboot Your Device"),
},
EventName.radarFault: {
ET.SOFT_DISABLE: soft_disable_alert("Radar Error: Restart the Car"),
ET.NO_ENTRY: NoEntryAlert("Radar Error: Restart the Car"),
},
# Every frame from the camera should be processed by the model. If modeld
# is not processing frames fast enough they have to be dropped. This alert is
# thrown when over 20% of frames are dropped.
EventName.modeldLagging: {
ET.SOFT_DISABLE: soft_disable_alert("Driving model lagging"),
ET.NO_ENTRY: NoEntryAlert("Driving model lagging"),
},
# Besides predicting the path, lane lines and lead car data the model also
# predicts the current velocity and rotation speed of the car. If the model is
# very uncertain about the current velocity while the car is moving, this
# usually means the model has trouble understanding the scene. This is used
# as a heuristic to warn the driver.
EventName.posenetInvalid: {
ET.SOFT_DISABLE: soft_disable_alert("Model Output Uncertain"),
ET.NO_ENTRY: NoEntryAlert("Model Output Uncertain"),
},
# When the localizer detects an acceleration of more than 40 m/s^2 (~4G) we
# alert the driver the device might have fallen from the windshield.
EventName.deviceFalling: {
ET.SOFT_DISABLE: soft_disable_alert("Device Fell Off Mount"),
ET.NO_ENTRY: NoEntryAlert("Device Fell Off Mount"),
},
EventName.lowMemory: {
ET.SOFT_DISABLE: soft_disable_alert("Low Memory: Reboot Your Device"),
ET.PERMANENT: NormalPermanentAlert("Low Memory", "Reboot your Device"),
ET.NO_ENTRY: NoEntryAlert("Low Memory: Reboot Your Device"),
},
EventName.highCpuUsage: {
#ET.SOFT_DISABLE: soft_disable_alert("System Malfunction: Reboot Your Device"),
#ET.PERMANENT: NormalPermanentAlert("System Malfunction", "Reboot your Device"),
ET.NO_ENTRY: NoEntryAlert("System Malfunction: Reboot Your Device"),
},
EventName.accFaulted: {
ET.IMMEDIATE_DISABLE: ImmediateDisableAlert("Cruise Faulted"),
ET.PERMANENT: NormalPermanentAlert("Cruise Faulted", ""),
ET.NO_ENTRY: NoEntryAlert("Cruise Faulted"),
},
EventName.controlsMismatch: {
ET.IMMEDIATE_DISABLE: ImmediateDisableAlert("Controls Mismatch"),
},
EventName.roadCameraError: {
ET.PERMANENT: NormalPermanentAlert("Camera Error",
duration=1.,
creation_delay=30.),
},
EventName.driverCameraError: {
ET.PERMANENT: NormalPermanentAlert("Camera Error",
duration=1.,
creation_delay=30.),
},
EventName.wideRoadCameraError: {
ET.PERMANENT: NormalPermanentAlert("Camera Error",
duration=1.,
creation_delay=30.),
},
# Sometimes the USB stack on the device can get into a bad state
# causing the connection to the panda to be lost
EventName.usbError: {
ET.SOFT_DISABLE: soft_disable_alert("USB Error: Reboot Your Device"),
ET.PERMANENT: NormalPermanentAlert("USB Error: Reboot Your Device", ""),
ET.NO_ENTRY: NoEntryAlert("USB Error: Reboot Your Device"),
},
# This alert can be thrown for the following reasons:
# - No CAN data received at all
# - CAN data is received, but some message are not received at the right frequency
# If you're not writing a new car port, this is usually cause by faulty wiring
EventName.canError: {
ET.IMMEDIATE_DISABLE: ImmediateDisableAlert("CAN Error: Check Connections"),
ET.PERMANENT: Alert(
"CAN Error: Check Connections",
"",
AlertStatus.normal, AlertSize.small,
Priority.LOW, VisualAlert.none, AudibleAlert.none, 1., creation_delay=1.),
ET.NO_ENTRY: NoEntryAlert("CAN Error: Check Connections"),
},
EventName.steerUnavailable: {
ET.IMMEDIATE_DISABLE: ImmediateDisableAlert("LKAS Fault: Restart the Car"),
ET.PERMANENT: NormalPermanentAlert("LKAS Fault: Restart the car to engage"),
ET.NO_ENTRY: NoEntryAlert("LKAS Fault: Restart the Car"),
},
EventName.brakeUnavailable: {
ET.IMMEDIATE_DISABLE: ImmediateDisableAlert("Cruise Fault: Restart the Car"),
ET.PERMANENT: NormalPermanentAlert("Cruise Fault: Restart the car to engage"),
ET.NO_ENTRY: NoEntryAlert("Cruise Fault: Restart the Car"),
},
EventName.reverseGear: {
ET.PERMANENT: Alert(
"Reverse\nGear",
"",
AlertStatus.normal, AlertSize.full,
Priority.LOWEST, VisualAlert.none, AudibleAlert.none, .2, creation_delay=0.5),
ET.SOFT_DISABLE: SoftDisableAlert("Reverse Gear"),
ET.NO_ENTRY: NoEntryAlert("Reverse Gear"),
},
# On cars that use stock ACC the car can decide to cancel ACC for various reasons.
# When this happens we can no long control the car so the user needs to be warned immediately.
EventName.cruiseDisabled: {
ET.IMMEDIATE_DISABLE: ImmediateDisableAlert("Cruise Is Off"),
},
# For planning the trajectory Model Predictive Control (MPC) is used. This is
# an optimization algorithm that is not guaranteed to find a feasible solution.
# If no solution is found or the solution has a very high cost this alert is thrown.
EventName.plannerError: {
ET.SOFT_DISABLE: SoftDisableAlert("Planner Solution Error"),
ET.NO_ENTRY: NoEntryAlert("Planner Solution Error"),
},
# When the relay in the harness box opens the CAN bus between the LKAS camera
# and the rest of the car is separated. When messages from the LKAS camera
# are received on the car side this usually means the relay hasn't opened correctly
# and this alert is thrown.
EventName.relayMalfunction: {
ET.IMMEDIATE_DISABLE: ImmediateDisableAlert("Harness Malfunction"),
ET.PERMANENT: NormalPermanentAlert("Harness Malfunction", "Check Hardware"),
ET.NO_ENTRY: NoEntryAlert("Harness Malfunction"),
},
EventName.noTarget: {
ET.IMMEDIATE_DISABLE: Alert(
"openpilot Canceled",
"No close lead car",
AlertStatus.normal, AlertSize.mid,
Priority.HIGH, VisualAlert.none, AudibleAlert.disengage, 3.),
ET.NO_ENTRY: NoEntryAlert("No Close Lead Car"),
},
EventName.speedTooLow: {
ET.IMMEDIATE_DISABLE: Alert(
"openpilot Canceled",
"Speed too low",
AlertStatus.normal, AlertSize.mid,
Priority.HIGH, VisualAlert.none, AudibleAlert.disengage, 3.),
},
# When the car is driving faster than most cars in the training data, the model outputs can be unpredictable.
EventName.speedTooHigh: {
ET.WARNING: Alert(
"Speed Too High",
"Model uncertain at this speed",
AlertStatus.userPrompt, AlertSize.mid,
Priority.HIGH, VisualAlert.steerRequired, AudibleAlert.promptRepeat, 4.),
ET.NO_ENTRY: NoEntryAlert("Slow down to engage"),
},
EventName.lowSpeedLockout: {
ET.PERMANENT: NormalPermanentAlert("Cruise Fault: Restart the car to engage"),
ET.NO_ENTRY: NoEntryAlert("Cruise Fault: Restart the Car"),
},
EventName.turningIndicatorOn: {
ET.WARNING: Alert(
"TAKE CONTROL",
"Steer Unavailable while Turning",
AlertStatus.userPrompt, AlertSize.small,
Priority.LOW, VisualAlert.none, AudibleAlert.none, .2),
},
EventName.autoLaneChange: {
ET.WARNING: auto_lane_change_alert,
},
EventName.slowingDownSpeed: {
ET.PERMANENT: Alert("Slowing down","", AlertStatus.normal, AlertSize.small,
Priority.MID, VisualAlert.none, AudibleAlert.none, .1),
},
EventName.slowingDownSpeedSound: {
ET.PERMANENT: Alert("Slowing down","", AlertStatus.normal, AlertSize.small,
Priority.HIGH, VisualAlert.none, AudibleAlert.slowingDownSpeed, 2.),
},
}
| 34.815668 | 152 | 0.691264 | [
"MIT"
] | fallen8angel/forNEXO | selfdrive/controls/lib/events.py | 30,954 | Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import simplejson as json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class SsdataDataserviceDtevalIdentitycheckQueryResponse(AlipayResponse):
def __init__(self):
super(SsdataDataserviceDtevalIdentitycheckQueryResponse, self).__init__()
self._evidence = None
self._ext_map = None
self._id_card_no_match_flag = None
self._name_match_flag = None
self._push_ant_data_flag = None
@property
def evidence(self):
return self._evidence
@evidence.setter
def evidence(self, value):
self._evidence = value
@property
def ext_map(self):
return self._ext_map
@ext_map.setter
def ext_map(self, value):
self._ext_map = value
@property
def id_card_no_match_flag(self):
return self._id_card_no_match_flag
@id_card_no_match_flag.setter
def id_card_no_match_flag(self, value):
self._id_card_no_match_flag = value
@property
def name_match_flag(self):
return self._name_match_flag
@name_match_flag.setter
def name_match_flag(self, value):
self._name_match_flag = value
@property
def push_ant_data_flag(self):
return self._push_ant_data_flag
@push_ant_data_flag.setter
def push_ant_data_flag(self, value):
self._push_ant_data_flag = value
def parse_response_content(self, response_content):
response = super(SsdataDataserviceDtevalIdentitycheckQueryResponse, self).parse_response_content(response_content)
if 'evidence' in response:
self.evidence = response['evidence']
if 'ext_map' in response:
self.ext_map = response['ext_map']
if 'id_card_no_match_flag' in response:
self.id_card_no_match_flag = response['id_card_no_match_flag']
if 'name_match_flag' in response:
self.name_match_flag = response['name_match_flag']
if 'push_ant_data_flag' in response:
self.push_ant_data_flag = response['push_ant_data_flag']
| 31.712121 | 122 | 0.697086 | [
"Apache-2.0"
] | articuly/alipay-sdk-python-all | alipay/aop/api/response/SsdataDataserviceDtevalIdentitycheckQueryResponse.py | 2,093 | Python |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from msrest.serialization import Model
class SourceProviderAttributes(Model):
"""SourceProviderAttributes.
:param name: The name of the source provider.
:type name: str
:param supported_capabilities: The capabilities supported by this source provider.
:type supported_capabilities: dict
:param supported_triggers: The types of triggers supported by this source provider.
:type supported_triggers: list of :class:`SupportedTrigger <build.v4_1.models.SupportedTrigger>`
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'supported_capabilities': {'key': 'supportedCapabilities', 'type': '{bool}'},
'supported_triggers': {'key': 'supportedTriggers', 'type': '[SupportedTrigger]'}
}
def __init__(self, name=None, supported_capabilities=None, supported_triggers=None):
super(SourceProviderAttributes, self).__init__()
self.name = name
self.supported_capabilities = supported_capabilities
self.supported_triggers = supported_triggers
| 48.058824 | 101 | 0.585679 | [
"Unlicense",
"MIT"
] | amcclead7336/Enterprise_Data_Science_Final | venv/lib/python3.8/site-packages/vsts/build/v4_1/models/source_provider_attributes.py | 1,634 | Python |
""" Tests the creation of tables, and the methods of the sql class
"""
from pyrate.repositories.sql import Table
from utilities import setup_database
class TestSql:
""" Tests the Sql class
"""
def test_get_list_of_columns(self, setup_database):
db = setup_database
rows = [{'unit': 'days',
'description': 'At berth/anchor',
'name': 's_berth_day'},
{'unit': 'SOG / kts',
'description': 'Average at sea',
'name': 's_av_sea'}]
with db:
actual = db.clean._get_list_of_columns(rows[0])
assert isinstance(actual, str)
assert actual.endswith(')')
assert actual[0] == '('
actual_contents = actual.strip('()').split(',')
expected = ['description','name','unit']
for expected_column in expected:
assert expected_column in actual_contents
def test_get_list_of_columns_lowerconversion(self, setup_database):
db = setup_database
rows = [{'uNit': 'days',
'Description': 'At berth/anchor',
'namE': 's_berth_day'},
{'unit': 'SOG / kts',
'description': 'Average at sea',
'name': 's_av_sea'}]
with db:
actual = db.clean._get_list_of_columns(rows[0])
assert isinstance(actual, str)
assert actual.endswith(')')
assert actual[0] == '('
actual_contents = actual.strip('()').split(',')
expected = ['description','name','unit']
for expected_column in expected:
assert expected_column in actual_contents
| 33.959184 | 71 | 0.558293 | [
"MIT"
] | UCL-ShippingGroup/pyrate | tests/test_sql.py | 1,664 | Python |
# --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
# pylint: disable-all
# flake8: noqa
"""Factory method for easily getting imdbs by name."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
__sets = {}
from datasets.pascal_voc import pascal_voc
from datasets.coco import coco
from datasets.imagenet import imagenet
from datasets.vg import vg
from datasets.ads import ads
import numpy as np
# Set up ads dataset
for split in ['train', 'val']:
name = 'pitt_ads_{}'.format(split)
__sets[name] = (lambda split=split : ads(split))
# Set up voc_<year>_<split>
for year in ['2007', '2012']:
for split in ['train', 'val', 'trainval', 'test']:
name = 'voc_{}_{}'.format(year, split)
__sets[name] = (lambda split=split, year=year: pascal_voc(split, year))
# Set up coco_2014_<split>
for year in ['2014']:
for split in ['train', 'val', 'minival', 'valminusminival', 'trainval']:
name = 'coco_{}_{}'.format(year, split)
__sets[name] = (lambda split=split, year=year: coco(split, year))
# Set up coco_2014_cap_<split>
for year in ['2014']:
for split in ['train', 'val', 'capval', 'valminuscapval', 'trainval']:
name = 'coco_{}_{}'.format(year, split)
__sets[name] = (lambda split=split, year=year: coco(split, year))
# Set up coco_2015_<split>
for year in ['2015']:
for split in ['test', 'test-dev']:
name = 'coco_{}_{}'.format(year, split)
__sets[name] = (lambda split=split, year=year: coco(split, year))
# Set up vg_<split>
# for version in ['1600-400-20']:
# for split in ['minitrain', 'train', 'minival', 'val', 'test']:
# name = 'vg_{}_{}'.format(version,split)
# __sets[name] = (lambda split=split, version=version: vg(version, split))
for version in ['150-50-20', '150-50-50', '500-150-80', '750-250-150', '1750-700-450', '1600-400-20']:
for split in ['minitrain', 'smalltrain', 'train', 'minival', 'smallval', 'val', 'test']:
name = 'vg_{}_{}'.format(version,split)
__sets[name] = (lambda split=split, version=version: vg(version, split))
# set up imagenet.
for split in ['train', 'val', 'val1', 'val2', 'test']:
name = 'imagenet_{}'.format(split)
devkit_path = 'data/imagenet/ILSVRC/devkit'
data_path = 'data/imagenet/ILSVRC'
__sets[name] = (lambda split=split, devkit_path=devkit_path, data_path=data_path: imagenet(split,devkit_path,data_path))
def get_imdb(name):
"""Get an imdb (image database) by name."""
if name not in __sets:
raise KeyError('Unknown dataset: {}'.format(name))
return __sets[name]()
def list_imdbs():
"""List all registered imdbs."""
return list(__sets.keys())
| 36 | 124 | 0.638889 | [
"MIT"
] | hinthornw/faster_rcnn_symbols | lib/datasets/factory.py | 2,880 | Python |
import math
def is_prime(num):
if num < 2:
return False
for i in range(num):
if i < 2:
continue
if num % i == 0:
return False
return True
def get_nth_prime(n):
cnt = 0
i = 0
while cnt < n:
i += 1
if is_prime(i):
cnt += 1
return i
if __name__ == '__main__':
#print get_nth_prime(6)
print get_nth_prime(10001)
| 13.740741 | 28 | 0.568733 | [
"MIT"
] | birdchan/project_euler | problems/007/run.v1.py | 371 | Python |
# coding: utf-8
import pprint
import re
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
class CreateConfigurationResponse(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'configuration': 'ConfigurationSummaryForCreate'
}
attribute_map = {
'configuration': 'configuration'
}
def __init__(self, configuration=None):
"""CreateConfigurationResponse - a model defined in huaweicloud sdk"""
super(CreateConfigurationResponse, self).__init__()
self._configuration = None
self.discriminator = None
if configuration is not None:
self.configuration = configuration
@property
def configuration(self):
"""Gets the configuration of this CreateConfigurationResponse.
:return: The configuration of this CreateConfigurationResponse.
:rtype: ConfigurationSummaryForCreate
"""
return self._configuration
@configuration.setter
def configuration(self, configuration):
"""Sets the configuration of this CreateConfigurationResponse.
:param configuration: The configuration of this CreateConfigurationResponse.
:type: ConfigurationSummaryForCreate
"""
self._configuration = configuration
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CreateConfigurationResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 28.247706 | 84 | 0.583956 | [
"Apache-2.0"
] | JeffreyDin/huaweicloud-sdk-python-v3 | huaweicloud-sdk-rds/huaweicloudsdkrds/v3/model/create_configuration_response.py | 3,079 | Python |
from tkinter import *
from tkinter import ttk
import time
import time
window = Tk()
mygreen = "lightblue"
myred = "blue"
style = ttk.Style()
style.theme_create( "dedoff", parent="alt", settings={
"TNotebook": {"configure": {"tabmargins": [2, 5, 2, 0] } },
"TNotebook.Tab": {
"configure": {"padding": [5, 1], "background": mygreen },
"map": {"background": [("selected", myred)],
"expand": [("selected", [1, 1, 1, 0])] } } } )
style.theme_use("dedoff")
window.title("Электронный учебник tkinter")
window.geometry('1920x1080')
tab_control = ttk.Notebook(window)
#панели
tab1 = ttk.Frame(tab_control, width=1920, height=1080)
tab2 = ttk.Frame(tab_control, width=1920, height=1080)
tab3 = ttk.Frame(tab_control, width=1080, height=600)
tab4 = ttk.Frame(tab_control, width=1080, height=600)
tab5 = ttk.Frame(tab_control, width=1080, height=600)
tab6 = ttk.Frame(tab_control, width=1080, height=600)
tab7 = ttk.Frame(tab_control, width=1080, height=600)
tab8 = ttk.Frame(tab_control, width=1080, height=600)
tab9 = ttk.Frame(tab_control, width=1080, height=600)
tab10 = ttk.Frame(tab_control, width=1080, height=600)
tab_control.add(tab1, text='Начало')
background_image = PhotoImage(file='background.ppm')
background_label = Label(tab1, image=background_image)
background_label.place(relwidth=1, relheight=1)
lower_frame = Frame(tab1, bg="lightblue", bd=10)
lower_frame.place(relx=0.5, rely=0.10, relwidth=0.75, relheight=0.75, anchor='n')
labeltext1 = Label(lower_frame, text="Tkinter – это кроссплатформенная библиотека для разработки графического интерфейса на "
"языке Python\n (начиная с Python 3.0 переименована в tkinter). Tkinter расшифровывается "
"как Tk interface \nНачиная с версии python-3.0 библиотека переименована в соответствии с "
"PEP 8 в tkinter (с маленькой буквы). \nИмпортируется она как и любая другая библиотека "
"абсолютно весь код в этом учебнике написан для python версии 3.x \nПодключить модуль "
"можно с помощью инструкции import. После ключевого слова import указывается название "
"модуля.\n Одной инструкцией можно подключить несколько модулей. Для подключения всех \n"
"функций модуля используем:\n"
"import tkinter \n"
"или \n"
"from tkinter import * \n"
"Чтобы убедиться, что Tkinter установлен и работает, воспользуемся стандартной "
"функцией Tkinter: test():"
"\n"
"import tkinter \n"
"tkinter._test() \n"
,
font=("Times New Roman", 13), bg="white")
labeltext1.place(relwidth=1, relheight=0.6)
photo = PhotoImage(file='edu54img.pgm')
labelimage = Label(lower_frame,bg='white', image=photo)
labelimage.place(relx=0.5, rely=0.6, relwidth=1, relheight=0.4, anchor='n')
#ОГО ВТОРООООООООООЙ ТААААААААААААААААААБ
tab_control.add(tab2, text='Canvas')
background_image2 = PhotoImage(file='background.ppm')
background_label1 = Label(tab2, image=background_image2)
background_label1.place(relwidth=1, relheight=1)
lower_frame1 = Frame(tab2, bg="lightblue", bd=10)
lower_frame1.place(relx=0.5, rely=0.02, relwidth=0.75, relheight=0.95, anchor='n')
labeltext2 = Label(lower_frame1, text=u"Привет, это второй раздел учебника.\n В tkinter от класса Canvas создаются объекты-холсты, на которых можно рисовать,\n"
"размещая различные фигуры и объекты. Делается это с помощью вызовов соответствующих \n"
"методов. При создании экземпляра Canvas необходимо указать его ширину и высоту. При \n"
"размещении геометрических примитивов и других объектов указываются их координаты на \n "
"холсте. Точкой отсчета является верхний левый угол.", font=("Times New Roman", 12), bg="white")
labeltext2.place(relwidth=1, relheight=0.3)
photo2 = PhotoImage(file='edu54img2.pgm')
labelimage1 = Label(lower_frame1, bg='white', image=photo2)
labelimage1.place(relx=0.5, rely=0.30, relwidth=1, relheight=0.49, anchor='n')
labeltext2 = Label(lower_frame1, text="В программе ниже создается холст.\n"
"from tkinter import *\n"
"window = Tk()\n"
"c = Canvas(root, width=200, height=200, bg='white')\n"
"c.pack()\n"
"window.mainloop()\n"
"в следующей главе мы разберем как рисовать на этом холсте", font=("Times New Roman", 12), bg="white")
labeltext2.place(relx=0.5, rely=0.75, relwidth=1, relheight=0.3, anchor='n')
tab_control.add(tab3, text='Примитивы')
background_image3 = PhotoImage(file='background.ppm')
background_label2 = Label(tab3, image=background_image3)
background_label2.place(relwidth=1, relheight=1)
lower_frame2 = Frame(tab3, bg="lightblue", bd=10)
lower_frame2.place(relx=0.5, rely=0.02, relwidth=0.8, relheight=0.95, anchor='n')
labeltext3 = Label(lower_frame2, text="В tkinter уже есть графические примитивы, для рисования, их нужно всего лишь правильно "
"указать.\n В программе ниже создается холст. На нем с помощью метода create_line() "
"рисуются отрезки. \n Сначала указываются координаты начала (x1, y1), затем – конца (x2, "
"y2) В программе ниже создаётся и рисуется линия на холсте.", font=("Times New Roman", 12), bg="white")
labeltext3.place(relwidth=1, relheight=0.12)
codeimg = PhotoImage(file='code.pgm')
labelimg = Label(lower_frame2, bg='white', image=codeimg)
labelimg.place(relx=0.5, rely=0.11, relwidth=1, relheight=0.5, anchor='n')
labelgotext = Label(lower_frame2, text="Собственно сами примитивы. Указываем координаты примитива всегда следующим образом – \n "
"верхний левый угол(x1, y1), вторые – правый нижний(x2, y2).", font=("Times New "
"Roman", 11),
bg='white')
labelgotext.place(relx=0.5, rely=0.52, relwidth=1, relheight=0.07, anchor='n')
rectangle = PhotoImage(file='rectangle.ppm')
rectanglelabel = Label(lower_frame2, bg='white', image=rectangle)
rectanglelabel.place(relx=0.5, rely=0.60, relwidth=1, relheight=0.45, anchor='n')
labelgotext2 = Label(lower_frame2, text="Далее о других примитивах в следующей вкладке", font=("Times New "
"Roman", 11),
bg='white')
labelgotext2.place(relx=0.5, rely=0.97, relwidth=1, relheight=0.05, anchor='n')
tab_control.add(tab4, text='Примитивы 2')
background_image4 = PhotoImage(file='background.ppm')
background_label3 = Label(tab4, image=background_image4)
background_label3.place(relwidth=1, relheight=1)
lower_frame3 = Frame(tab4, bg="lightblue", bd=10)
lower_frame3.place(relx=0.5, rely=0, relwidth=0.9, relheight=1, anchor='n')
oval = PhotoImage(file='oval_1.ppm')
ovallabel = Label(lower_frame3,bg='white', image=oval)
ovallabel.place(relx=0.5, rely=0, relwidth=1, relheight=0.55, anchor='n')
elipsoid = PhotoImage(file='ellipssmall.ppm')
elabel = Label(lower_frame3, bg='white', image=elipsoid)
elabel.place(relx=0.5, rely=0.5, relwidth=1, relheight=0.25, anchor='n')
labeltext4 = Label(lower_frame3, text="Метод create_oval(x1, y1, x2, y2) создает эллипсы. При этом задаются координаты гипотетического "
"прямоугольника, описывающего эллипс. \nЕсли нужно получить круг, то соответственно "
"описываемый прямоугольник должен быть квадратом.\n"
"Методом create_polygon(x1, x2...xn, yn) рисуется произвольный многоугольник путем задания координат каждой его точки\n"
"Создание прямоугольников методом create_rectangle(x1, y1, x2, y2)\n"
"Опции: \nwidth=число - ширина обводки, fill='color' - цвет заливки,\n outline='color' - цвет "
"обводки,\n activefill определяет цвет при наведении на него курсора мыши.\n"
"activeoutline определяет цвет обводки при наведении курсор", font=("Times New Roman", 11),
bg="white")
labeltext4.place(relx=0.5, rely=0.74, relwidth=1, relheight=0.26, anchor='n')
tab_control.add(tab5, text='Примитивы 3')
background_image5 = PhotoImage(file='background.ppm')
background_label4 = Label(tab5, image=background_image5)
background_label4.place(relwidth=1, relheight=1)
lower_frame4 = Frame(tab5, bg="lightblue", bd=10)
lower_frame4.place(relx=0.5, rely=0.05, relwidth=0.75, relheight=0.9, anchor='n')
labeltext5 = Label(lower_frame4, text="Более сложные для понимания фигуры получаются при использовании метода create_arc(). В \n"
"зависимости от значения опции style можно получить сектор (по умолчанию), \n"
"сегмент (CHORD) или дугу (ARC). Также как в случае create_oval() координаты задают \n"
"прямоугольник, в который вписана окружность (или эллипс), из которой вырезают сектор, \n"
"сегмент или дугу. Опции start присваивается градус начала фигуры, extent определяет "
"угол поворота.",
font=("Times New Roman", 11), bg="white")
labeltext5.place(relwidth=1, relheight=0.2)
arc = PhotoImage(file='arc.ppm')
arclabel = Label(lower_frame4,bg='white', image=arc)
arclabel.place(relx=0.5, rely=0.15, relwidth=1, relheight=0.4, anchor='n')
arc2 = PhotoImage(file='arc2.ppm')
arclabel2 = Label(lower_frame4,bg='white', image=arc2)
arclabel2.place(relx=0.5, rely=0.55, relwidth=1, relheight=0.5, anchor='n')
tab_control.add(tab6, text='Полезное')
background_image6 = PhotoImage(file='background.ppm')
background_label6 = Label(tab6, image=background_image6)
background_label6.place(relwidth=1, relheight=1)
table = PhotoImage(file='colortable.ppm')
tablelabel = Label(tab6,bg='lightblue', image=table)
tablelabel.place(relx=0.5, rely=0, relwidth=0.82, relheight=1, anchor='n')
tab_control.add(tab7, text='Практикум')
background_image7 = PhotoImage(file='background.ppm')
background_label7 = Label(tab7, bg='white', image=background_image7)
background_label7.place(relwidth=1, relheight=1)
lower_frame7 = Frame(tab7, bg="lightblue", bd=10)
lower_frame7.place(relx=0.5, rely=0.001, relwidth=0.65, relheight=1, anchor='n')
labelTASK1 = Label(lower_frame7, text="1) Пропеллер"
":Нарисуйте пропеллер, как это показано ниже\n"
"'Кто мечтает быть пилотом, очень смелый видно тот. От-от-от вин-та!'", font=("Georgia", 12,), bg='white')
labelTASK1.place(relx=0.5, rely=0, relwidth=1, relheight=0.06, anchor='n')
propeller = PhotoImage(file='propellersmall.ppm')
propelabel = Label(lower_frame7, bg='white', image=propeller)
propelabel.place(relx=0.5, rely=0.06, relwidth=1, relheight=0.55, anchor='n')
labelTASK2 = Label(lower_frame7, text="2) Торт"
":Нарисуйте торт для учителя информатики.\n'Треугольник' должен пропадать при наведении курсора.'\n"
"'Кто сьел мой двумерный массив?!'", font=("Georgia", 12, ), bg='white')
labelTASK2.place(relx=0.5, rely=0.6, relwidth=1, relheight=0.1, anchor='n')
tort = PhotoImage(file='tortsmall.ppm')
tortlabel = Label(lower_frame7, bg='white', image=tort)
tortlabel.place(relx=0.5, rely=0.69, relwidth=1, relheight=0.35, anchor='n')
tab_control.add(tab8, text='Анимации')
background_image8 = PhotoImage(file='background.ppm')
background_label8 = Label(tab8, image=background_image8)
background_label8.place(relwidth=1, relheight=1)
lower_frame8 = Frame(tab8, bg="lightblue", bd=10)
lower_frame8.place(relx=0.5, rely=0.5, relwidth=0.59, relheight=0.5, anchor='n')
labelanimation = Label(lower_frame8, text='Методы, создающие фигуры на холсте, возвращают численные идентификаторы \n'
'этих объектов, которые можно присвоить переменным,\n через которые позднее '
'обращаться к созданным фигурам. \n Основной шаблон для анимации с Tkinter – написать функцию, которая рисует один кадр. \n Затем используйте что-то подобное, чтобы называть его через регулярные интервалы: \n'
" def animate(self): self.draw_one_frame() self.after(100, self.animate) \n"
"Как только вы вызываете эту функцию один раз,\n она будет продолжать "
'рисовать кадры со скоростью десять в секунду – один раз каждые 100 '
"миллисекунд.\n В следующей вкладке разберём это подробно", font=("Times New Roman", 11),
bg="white")
labelanimation.place(relwidth=1, relheight=1)
WIDTH = 350
HEIGHT = 300
SIZE = 50
canvas = Canvas(tab8, width=WIDTH, height=HEIGHT, bg="blue")
canvas.pack()
color = '#6098cd'
class Ball:
def __init__(self, tag):
self.shape = canvas.create_oval(0, 0, SIZE, SIZE, fill=color, tags=tag)
self.speedx = 10
self.speedy = 15
self.active = True
def ball_update(self):
canvas.move(self.shape, self.speedx, self.speedy)
pos = canvas.coords(self.shape)
if pos[2] >= WIDTH or pos[0] <= 0:
self.speedx *= -1
if pos[3] >= HEIGHT or pos[1] <= 0:
self.speedy *= -1
global switcher
switcher = True
def cycle():
global switcher
canvas.tag_raise("bg")
if switcher:
ball2.ball_update()
ball2.ball_update()
canvas.tag_raise("ball")
else:
ball.ball_update()
ball.ball_update()
canvas.tag_raise("ball2")
tab8.update_idletasks()
switcher = not switcher
tab8.after(40, cycle)
bg = canvas.create_rectangle(0, 0, WIDTH+1, HEIGHT+1, fill="white", tags="bg")
ball = Ball("ball")
ball.ball_update()
ball2 = Ball("ball2")
tab8.after(0, cycle)
tab_control.add(tab9, text='Анимации 2')
background_image9 = PhotoImage(file='background.ppm')
background_label9 = Label(tab9, image=background_image9)
background_label9.place(relwidth=1, relheight=1)
lower_frame9 = Frame(tab9, bg="lightblue", bd=10)
lower_frame9.place(relx=0.5, rely=0.10, relwidth=0.75, relheight=0.75, anchor='n')
labelanimation2 = Label(lower_frame9, text='Рассмотрим следующий код, отвечающий за создание анимации и после этого попрактикуемся. Собственно сам код: \n', font=("Times New Roman", 11),
bg="white")
labelanimation2.place(relx=0.5, rely=0, relwidth=1, relheight=0.06, anchor='n')
code_image8 = PhotoImage(file='sharcode.ppm')
code_label8 = Label(lower_frame9, bg='white', image=code_image8)
code_label8.place(relx=0.5, rely=0.06, relwidth=1, relheight=0.6, anchor='n')
labelanimation3 = Label(lower_frame9, text='В данном коде создаётся шар, который двигается. Вначале происходит '
'создание холста Canvas и его "упаковка"\n, а также объекта ball, '
'с помощью примитива круг. После всего этого создаётся функция, которая '
'анимирует данный объект, рассмотрим её очень подробно \n '
'def motion (): - создание функции с названием motion \n'
'c.move(ball, 1, 0) - движение объекта на c. В самом начале при создании \n '
'холста мы назвали его c, следовательно при указании движения на нём мы \n'
'пишем c. move - декоратор, который указывает, что делать. В нашем случае \n'
'двигаться. Но чему? В скобках указываем объект движения и его координаты \n'
'движения x, y. if c.coords(ball)[2] < 300, отвечает за то, чтобы шар \n'
'двигался по координате X меньше 300. root.after(10, motion) - Частота обновлений окна в милисекундах. \n'
'После чего с помощью motion(), запускаем нашу функцию и само окно tkinter.', font=("Times New Roman", 10),
bg="white")
labelanimation3.place(relx=0.5, rely=0.65, relwidth=1, relheight=0.35, anchor='n')
tab_control.add(tab10, text='Практикум 2')
background_image10 = PhotoImage(file='background.ppm')
background_label10 = Label(tab10, image=background_image10)
background_label10.place(relwidth=1, relheight=1)
# Практикум 2_поезд
c = Canvas(tab10, width=300, height=200, bg="white")
c.place(relx=0.5, rely=0.65, relwidth=0.15, relheight=0.2, anchor='n')
vagon1 = c.create_rectangle(0, 50, 60, 90, fill='blue')
line = c.create_line(60, 70, 70, 70, fill='brown', width=6)
vagon2 = c.create_rectangle(70, 50, 130, 90, fill='blue')
relsa = c.create_line(0, 90, 300, 90, fill='gray', width=3)
def motion():
c.move(vagon1, 1, 0)
c.move(vagon2, 1, 0)
c.move(line, 1, 0)
if c.coords(vagon1)[0] < 50:
tab10.after(20, motion)
motion()
tab_control.pack(expand=10, fill='both', padx=5, pady=5)
lower_frame9 = Frame(tab10, bg="lightblue", bd=10)
lower_frame9.place(relx=0.5, rely=0.35, relwidth=0.45, relheight=0.25, anchor='n')
labelpractic2 = Label(lower_frame9, text="Анимируйте данный скетч поезда! Исходный код создания самого скетча без холста: \n vagon1 = c.create_rectangle(0, 50, 60, 90, fill='blue'\n"
"line = c.create_line(60, 70, 70, 70, fill='brown', width=6) \n"
"vagon2 = c.create_rectangle(70, 50, 130, 90, fill='blue') \n"
"relsa = c.create_line(0, 90, 300, 90, fill='gray', width=3) \n", bg='white', font=("Times New Roman", 11))
labelpractic2.place(relwidth=1, relheight=1)
Button(window, text='© Dedov Georgiy 2019').pack(fill='x')
window.resizable(True, True)
window.mainloop()
| 51.31405 | 251 | 0.628872 | [
"Unlicense"
] | Clonexy700/edu54book | edu54book/edu54bookSizeAuto.py | 22,153 | Python |
from onconet.models.factory import load_model, RegisterModel, get_model_by_name
import math
import torch
import torch.nn as nn
import pdb
import numpy as np
@RegisterModel("mirai_full")
class MiraiFull(nn.Module):
def __init__(self, args):
super(MiraiFull, self).__init__()
self.args = args
if args.img_encoder_snapshot is not None:
self.image_encoder = load_model(args.img_encoder_snapshot, args, do_wrap_model=False)
else:
self.image_encoder = get_model_by_name('custom_resnet', False, args)
if hasattr(self.args, "freeze_image_encoder") and self.args.freeze_image_encoder:
for param in self.image_encoder.parameters():
param.requires_grad = False
self.image_repr_dim = self.image_encoder._model.args.img_only_dim
if args.transformer_snapshot is not None:
self.transformer = load_model(args.transformer_snapshot, args, do_wrap_model=False)
else:
args.precomputed_hidden_dim = self.image_repr_dim
self.transformer = get_model_by_name('transformer', False, args)
args.img_only_dim = self.transformer.args.transfomer_hidden_dim
def forward(self, x, risk_factors=None, batch=None):
B, C, N, H, W = x.size()
x = x.transpose(1,2).contiguous().view(B*N, C, H, W)
risk_factors_per_img = (lambda N, risk_factors: [factor.expand( [N, *factor.size()]).contiguous().view([-1, factor.size()[-1]]).contiguous() for factor in risk_factors])(N, risk_factors) if risk_factors is not None else None
_, img_x, _ = self.image_encoder(x, risk_factors_per_img, batch)
img_x = img_x.view(B, N, -1)
img_x = img_x[:,:,: self.image_repr_dim]
logit, transformer_hidden, activ_dict = self.transformer(img_x, risk_factors, batch)
return logit, transformer_hidden, activ_dict
| 47.15 | 233 | 0.68929 | [
"MIT"
] | NkwamPhilip/Mirai | onconet/models/mirai_full.py | 1,886 | Python |
import unittest
from facial_recog.app import *
from .test_config import test_run_count, seed, success_perc
from .test_util import *
class TestFR(unittest.TestCase):
subject_names = dict()
subject_classes = dict()
def setUp(self):
random.seed(seed)
create_app_dirs()
setup_logger()
logging.debug('Seed is %s', seed)
# only for super strict testing
# clear_fdb()
prepare_fdb()
self.subject_names, self.subject_classes = create_sample()
logging.info('Subject names: %s', self.subject_names)
logging.info('Subject classes are: %s', self.subject_classes)
recreate_db()
populate_db(self.subject_classes)
logging.info('New db created')
clear_dataset()
copy_dataset(subject_names=self.subject_names)
logging.info('Training Dataset created')
clear_recognizers()
for class_id in get_all_classes():
train(class_id=class_id)
logging.info('Classifiers trained')
def test_fr(self):
success = 0
for _ in range(test_run_count):
random_class = random.choice(get_all_classes())
random_subject = random.choice(get_class_subjects(random_class))
random_image = random.choice(
get_images_for_subject(subject_name=self.subject_names[random_subject]))
logging.info('Testing subject %s in class %s with image %s', random_subject, random_class, random_image)
if predict(img=path_to_img(random_image), class_id=random_class) == random_subject:
success += 1
logging.info('Test success')
else:
logging.warning('Test failed')
self.assertGreaterEqual(success, int(success_perc * test_run_count))
| 30.864407 | 116 | 0.645799 | [
"MIT"
] | MePsyDuck/amfr | facial_recog/tests/test_app.py | 1,821 | Python |
version_info = (1, 5, 0, 'dev0')
__version__ = '.'.join(map(str, version_info))
| 26.666667 | 46 | 0.65 | [
"MIT"
] | hwansysgit/qtpy | qtpy/_version.py | 80 | Python |
# Copyright 2003-2008 by Leighton Pritchard. All rights reserved.
# Revisions copyright 2008-2009 by Peter Cock.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
#
# Contact: Leighton Pritchard, Scottish Crop Research Institute,
# Invergowrie, Dundee, Scotland, DD2 5DA, UK
# [email protected]
################################################################################
""" CircularDrawer module
Provides:
o CircularDrawer - Drawing object for circular diagrams
For drawing capabilities, this module uses reportlab to draw and write
the diagram:
http://www.reportlab.com
For dealing with biological information, the package expects BioPython
objects:
http://www.biopython.org
"""
# ReportLab imports
from reportlab.graphics.shapes import *
from reportlab.lib import colors
from reportlab.pdfbase import _fontdata
from reportlab.graphics.shapes import ArcPath
# GenomeDiagram imports
from _AbstractDrawer import AbstractDrawer, draw_polygon, intermediate_points
from _FeatureSet import FeatureSet
from _GraphSet import GraphSet
from math import ceil, pi, cos, sin, asin
class CircularDrawer(AbstractDrawer):
""" CircularDrawer(AbstractDrawer)
Inherits from:
o AbstractDrawer
Provides:
Methods:
o __init__(self, parent=None, pagesize='A3', orientation='landscape',
x=0.05, y=0.05, xl=None, xr=None, yt=None, yb=None,
start=None, end=None, tracklines=0, track_size=0.75,
circular=1) Called on instantiation
o set_page_size(self, pagesize, orientation) Set the page size to the
passed size and orientation
o set_margins(self, x, y, xl, xr, yt, yb) Set the drawable area of the
page
o set_bounds(self, start, end) Set the bounds for the elements to be
drawn
o is_in_bounds(self, value) Returns a boolean for whether the position
is actually to be drawn
o __len__(self) Returns the length of sequence that will be drawn
o draw(self) Place the drawing elements on the diagram
o init_fragments(self) Calculate information
about sequence fragment locations on the drawing
o set_track_heights(self) Calculate information about the offset of
each track from the fragment base
o draw_test_tracks(self) Add lines demarcating each track to the
drawing
o draw_track(self, track) Return the contents of the passed track as
drawing elements
o draw_scale(self, track) Return a scale for the passed track as
drawing elements
o draw_greytrack(self, track) Return a grey background and superposed
label for the passed track as drawing
elements
o draw_feature_set(self, set) Return the features in the passed set as
drawing elements
o draw_feature(self, feature) Return a single feature as drawing
elements
o get_feature_sigil(self, feature, x0, x1, fragment) Return a single
feature as its sigil in drawing elements
o draw_graph_set(self, set) Return the data in a set of graphs as
drawing elements
o draw_line_graph(self, graph) Return the data in a graph as a line
graph in drawing elements
o draw_heat_graph(self, graph) Return the data in a graph as a heat
graph in drawing elements
o draw_bar_graph(self, graph) Return the data in a graph as a bar
graph in drawing elements
o canvas_angle(self, base) Return the angle, and cos and sin of
that angle, subtended by the passed
base position at the diagram center
o draw_arc(self, inner_radius, outer_radius, startangle, endangle,
color) Return a drawable element describing an arc
Attributes:
o tracklines Boolean for whether to draw lines dilineating tracks
o pagesize Tuple describing the size of the page in pixels
o x0 Float X co-ord for leftmost point of drawable area
o xlim Float X co-ord for rightmost point of drawable area
o y0 Float Y co-ord for lowest point of drawable area
o ylim Float Y co-ord for topmost point of drawable area
o pagewidth Float pixel width of drawable area
o pageheight Float pixel height of drawable area
o xcenter Float X co-ord of center of drawable area
o ycenter Float Y co-ord of center of drawable area
o start Int, base to start drawing from
o end Int, base to stop drawing at
o length Size of sequence to be drawn
o track_size Float (0->1) the proportion of the track height to
draw in
o drawing Drawing canvas
o drawn_tracks List of ints denoting which tracks are to be drawn
o current_track_level Int denoting which track is currently being
drawn
o track_offsets Dictionary of number of pixels that each track top,
center and bottom is offset from the base of a
fragment, keyed by track
o sweep Float (0->1) the proportion of the circle circumference to
use for the diagram
"""
def __init__(self, parent=None, pagesize='A3', orientation='landscape',
x=0.05, y=0.05, xl=None, xr=None, yt=None, yb=None,
start=None, end=None, tracklines=0, track_size=0.75,
circular=1):
""" __init__(self, parent, pagesize='A3', orientation='landscape',
x=0.05, y=0.05, xl=None, xr=None, yt=None, yb=None,
start=None, end=None, tracklines=0, track_size=0.75,
circular=1)
o parent Diagram object containing the data that the drawer
draws
o pagesize String describing the ISO size of the image, or a tuple
of pixels
o orientation String describing the required orientation of the
final drawing ('landscape' or 'portrait')
o x Float (0->1) describing the relative size of the X
margins to the page
o y Float (0->1) describing the relative size of the Y
margins to the page
o xl Float (0->1) describing the relative size of the left X
margin to the page (overrides x)
o xl Float (0->1) describing the relative size of the left X
margin to the page (overrides x)
o xr Float (0->1) describing the relative size of the right X
margin to the page (overrides x)
o yt Float (0->1) describing the relative size of the top Y
margin to the page (overrides y)
o yb Float (0->1) describing the relative size of the lower Y
margin to the page (overrides y)
o start Int, the position to begin drawing the diagram at
o end Int, the position to stop drawing the diagram at
o tracklines Boolean flag to show (or not) lines delineating tracks
on the diagram
o track_size The proportion of the available track height that
should be taken up in drawing
o circular Boolean flaw to show whether the passed sequence is
circular or not
"""
# Use the superclass' instantiation method
AbstractDrawer.__init__(self, parent, pagesize, orientation,
x, y, xl, xr, yt, yb, start, end,
tracklines)
# Useful measurements on the page
self.track_size = track_size
if circular == False: # Determine the proportion of the circumference
self.sweep = 0.9 # around which information will be drawn
else:
self.sweep = 1
def set_track_heights(self):
""" set_track_heights(self)
Since tracks may not be of identical heights, the bottom and top
radius for each track is stored in a dictionary - self.track_radii,
keyed by track number
"""
top_track = max(self.drawn_tracks) # The 'highest' track to draw
trackunit_sum = 0 # Holds total number of 'units' taken up by all tracks
trackunits = {} # Holds start and end units for each track keyed by track number
heightholder = 0 # placeholder variable
for track in range(1, top_track+1): # track numbers to 'draw'
try:
trackheight = self._parent[track].height # Get track height
except:
trackheight = 1 # ...or default to 1
trackunit_sum += trackheight # increment total track unit height
trackunits[track] = (heightholder, heightholder+trackheight)
heightholder += trackheight # move to next height
trackunit_height = 0.5*min(self.pagewidth, self.pageheight)/trackunit_sum
# Calculate top and bottom radii for each track
self.track_radii = {} # The inner, outer and center radii for each track
track_crop = trackunit_height*(1-self.track_size)/2. # 'step back' in pixels
for track in trackunits:
top = trackunits[track][1]*trackunit_height-track_crop
btm = trackunits[track][0]*trackunit_height+track_crop
ctr = btm+(top-btm)/2.
self.track_radii[track] = (btm, ctr, top)
def draw(self):
""" draw(self)
Draw a circular diagram of the stored data
"""
# Instantiate the drawing canvas
self.drawing = Drawing(self.pagesize[0], self.pagesize[1])
feature_elements = [] # holds feature elements
feature_labels = [] # holds feature labels
greytrack_bgs = [] # holds track background
greytrack_labels = [] # holds track foreground labels
scale_axes = [] # holds scale axes
scale_labels = [] # holds scale axis labels
# Get tracks to be drawn and set track sizes
self.drawn_tracks = self._parent.get_drawn_levels()
self.set_track_heights()
# Go through each track in the parent (if it is to be drawn) one by
# one and collate the data as drawing elements
for track_level in self._parent.get_drawn_levels():
self.current_track_level = track_level
track = self._parent[track_level]
gbgs, glabels = self.draw_greytrack(track) # Greytracks
greytrack_bgs.append(gbgs)
greytrack_labels.append(glabels)
features, flabels = self.draw_track(track) # Features and graphs
feature_elements.append(features)
feature_labels.append(flabels)
if track.scale:
axes, slabels = self.draw_scale(track) # Scale axes
scale_axes.append(axes)
scale_labels.append(slabels)
# Groups listed in order of addition to page (from back to front)
# Draw track backgrounds
# Draw features and graphs
# Draw scale axes
# Draw scale labels
# Draw feature labels
# Draw track labels
element_groups = [greytrack_bgs, feature_elements,
scale_axes, scale_labels,
feature_labels, greytrack_labels
]
for element_group in element_groups:
for element_list in element_group:
[self.drawing.add(element) for element in element_list]
if self.tracklines: # Draw test tracks over top of diagram
self.draw_test_tracks()
def draw_track(self, track):
""" draw_track(self, track) -> ([element, element,...], [element, element,...])
o track Track object
Return tuple of (list of track elements, list of track labels)
"""
track_elements = [] # Holds elements for features and graphs
track_labels = [] # Holds labels for features and graphs
# Distribution dictionary for dealing with different set types
set_methods = {FeatureSet: self.draw_feature_set,
GraphSet: self.draw_graph_set
}
for set in track.get_sets(): # Draw the feature or graph sets
elements, labels = set_methods[set.__class__](set)
track_elements += elements
track_labels += labels
return track_elements, track_labels
def draw_feature_set(self, set):
""" draw_feature_set(self, set) -> ([element, element,...], [element, element,...])
o set FeatureSet object
Returns a tuple (list of elements describing features, list of
labels for elements)
"""
#print 'draw feature set'
feature_elements = [] # Holds diagram elements belonging to the features
label_elements = [] # Holds diagram elements belonging to feature labels
# Collect all the elements for the feature set
for feature in set.get_features():
if self.is_in_bounds(feature.start) or self.is_in_bounds(feature.end):
features, labels = self.draw_feature(feature)
feature_elements += features
label_elements += labels
return feature_elements, label_elements
def draw_feature(self, feature):
""" draw_feature(self, feature, parent_feature=None) -> ([element, element,...], [element, element,...])
o feature Feature containing location info
Returns tuple of (list of elements describing single feature, list
of labels for those elements)
"""
feature_elements = [] # Holds drawable elements for a single feature
label_elements = [] # Holds labels for a single feature
if feature.hide: # Don't show feature: return early
return feature_elements, label_elements
# A single feature may be split into subfeatures, so loop over them
for locstart, locend in feature.locations:
# Get sigil for the feature/ each subfeature
feature_sigil, label = self.get_feature_sigil(feature, locstart, locend)
feature_elements.append(feature_sigil)
if label is not None: # If there's a label
label_elements.append(label)
return feature_elements, label_elements
def get_feature_sigil(self, feature, locstart, locend, **kwargs):
""" get_feature_sigil(self, feature, x0, x1, fragment) -> (element, element)
o feature Feature object
o locstart The start position of the feature
o locend The end position of the feature
Returns a drawable indicator of the feature, and any required label
for it
"""
# Establish the co-ordinates for the sigil
btm, ctr, top = self.track_radii[self.current_track_level]
startangle, startcos, startsin = self.canvas_angle(locstart)
endangle, endcos, endsin = self.canvas_angle(locend)
midangle, midcos, midsin = self.canvas_angle(float(locend+locstart)/2)
# Distribution dictionary for various ways of drawing the feature
# Each method takes the inner and outer radii, the start and end angle
# subtended at the diagram center, and the color as arguments
draw_methods = {'BOX': self._draw_arc,
'ARROW': self._draw_arc_arrow,
}
# Get sigil for the feature, location dependent on the feature strand
method = draw_methods[feature.sigil]
kwargs['head_length_ratio'] = feature.arrowhead_length
kwargs['shaft_height_ratio'] = feature.arrowshaft_height
#Support for clickable links... needs ReportLab 2.4 or later
#which added support for links in SVG output.
if hasattr(feature, "url") :
kwargs["hrefURL"] = feature.url
kwargs["hrefTitle"] = feature.name
if feature.color == colors.white:
border = colors.black
else:
border = feature.color
if feature.strand == 1:
sigil = method(ctr, top, startangle, endangle, feature.color,
border, orientation='right', **kwargs)
elif feature.strand == -1:
sigil = method(btm, ctr, startangle, endangle, feature.color,
border, orientation='left', **kwargs)
else:
sigil = method(btm, top, startangle, endangle, feature.color,
border, **kwargs)
if feature.label: # Feature needs a label
label = String(0, 0, feature.name.strip(),
fontName=feature.label_font,
fontSize=feature.label_size,
fillColor=feature.label_color)
labelgroup = Group(label)
label_angle = startangle + 0.5 * pi # Make text radial
sinval, cosval = startsin, startcos
if feature.strand != -1:
# Feature is on top, or covers both strands
if startangle < pi: # Turn text round and anchor end to inner radius
sinval, cosval = endsin, endcos
label_angle = endangle - 0.5 * pi
labelgroup.contents[0].textAnchor = 'end'
pos = self.xcenter+top*sinval
coslabel = cos(label_angle)
sinlabel = sin(label_angle)
labelgroup.transform = (coslabel,-sinlabel,sinlabel,coslabel,
pos, self.ycenter+top*cosval)
else:
# Feature on bottom strand
if startangle < pi: # Turn text round and anchor end to inner radius
sinval, cosval = endsin, endcos
label_angle = endangle - 0.5 * pi
else:
labelgroup.contents[0].textAnchor = 'end'
pos = self.xcenter+btm*sinval
coslabel = cos(label_angle)
sinlabel = sin(label_angle)
labelgroup.transform = (coslabel,-sinlabel,sinlabel,coslabel,
pos, self.ycenter+btm*cosval)
else:
labelgroup = None
#if locstart > locend:
# print locstart, locend, feature.strand, sigil, feature.name
#print locstart, locend, feature.name
return sigil, labelgroup
def draw_graph_set(self, set):
""" draw_graph_set(self, set) -> ([element, element,...], [element, element,...])
o set GraphSet object
Returns tuple (list of graph elements, list of graph labels)
"""
#print 'draw graph set'
elements = [] # Holds graph elements
# Distribution dictionary for how to draw the graph
style_methods = {'line': self.draw_line_graph,
'heat': self.draw_heat_graph,
'bar': self.draw_bar_graph
}
for graph in set.get_graphs():
#print graph.name
elements += style_methods[graph.style](graph)
return elements, []
def draw_line_graph(self, graph):
""" draw_line_graph(self, graph, center) -> [element, element,...]
o graph GraphData object
Returns a line graph as a list of drawable elements
"""
#print '\tdraw_line_graph'
line_elements = [] # holds drawable elements
# Get graph data
data_quartiles = graph.quartiles()
minval, maxval = data_quartiles[0],data_quartiles[4]
btm, ctr, top = self.track_radii[self.current_track_level]
trackheight = 0.5*(top-btm)
datarange = maxval - minval
if datarange == 0:
datarange = trackheight
data = graph[self.start:self.end]
# midval is the value at which the x-axis is plotted, and is the
# central ring in the track
if graph.center is None:
midval = (maxval + minval)/2.
else:
midval = graph.center
# Whichever is the greatest difference: max-midval or min-midval, is
# taken to specify the number of pixel units resolved along the
# y-axis
resolution = max((midval-minval), (maxval-midval))
# Start from first data point
pos, val = data[0]
lastangle, lastcos, lastsin = self.canvas_angle(pos)
# We calculate the track height
posheight = trackheight*(val-midval)/resolution + ctr
lastx = self.xcenter+posheight*lastsin # start xy coords
lasty = self.ycenter+posheight*lastcos
for pos, val in data:
posangle, poscos, possin = self.canvas_angle(pos)
posheight = trackheight*(val-midval)/resolution + ctr
x = self.xcenter+posheight*possin # next xy coords
y = self.ycenter+posheight*poscos
line_elements.append(Line(lastx, lasty, x, y,
strokeColor = graph.poscolor,
strokeWidth = graph.linewidth))
lastx, lasty, = x, y
return line_elements
def draw_bar_graph(self, graph):
""" draw_bar_graph(self, graph) -> [element, element,...]
o graph Graph object
Returns a list of drawable elements for a bar graph of the passed
Graph object
"""
#print '\tdraw_bar_graph'
# At each point contained in the graph data, we draw a vertical bar
# from the track center to the height of the datapoint value (positive
# values go up in one color, negative go down in the alternative
# color).
bar_elements = []
# Set the number of pixels per unit for the data
data_quartiles = graph.quartiles()
minval, maxval = data_quartiles[0],data_quartiles[4]
btm, ctr, top = self.track_radii[self.current_track_level]
trackheight = 0.5*(top-btm)
datarange = maxval - minval
if datarange == 0:
datarange = trackheight
data = graph[self.start:self.end]
# midval is the value at which the x-axis is plotted, and is the
# central ring in the track
if graph.center is None:
midval = (maxval + minval)/2.
else:
midval = graph.center
# Convert data into 'binned' blocks, covering half the distance to the
# next data point on either side, accounting for the ends of fragments
# and tracks
newdata = intermediate_points(self.start, self.end,
graph[self.start:self.end])
# Whichever is the greatest difference: max-midval or min-midval, is
# taken to specify the number of pixel units resolved along the
# y-axis
resolution = max((midval-minval), (maxval-midval))
if resolution == 0:
resolution = trackheight
# Create elements for the bar graph based on newdata
for pos0, pos1, val in newdata:
pos0angle, pos0cos, pos0sin = self.canvas_angle(pos0)
pos1angle, pos1cos, pos1sin = self.canvas_angle(pos1)
barval = trackheight*(val-midval)/resolution
if barval >=0:
barcolor = graph.poscolor
else:
barcolor = graph.negcolor
# Draw bar
bar_elements.append(self._draw_arc(ctr, ctr+barval, pos0angle,
pos1angle, barcolor))
return bar_elements
def draw_heat_graph(self, graph):
""" draw_heat_graph(self, graph) -> [element, element,...]
o graph Graph object
Returns a list of drawable elements for the heat graph
"""
#print '\tdraw_heat_graph'
# At each point contained in the graph data, we draw a box that is the
# full height of the track, extending from the midpoint between the
# previous and current data points to the midpoint between the current
# and next data points
heat_elements = [] # holds drawable elements
# Get graph data
data_quartiles = graph.quartiles()
minval, maxval = data_quartiles[0],data_quartiles[4]
midval = (maxval + minval)/2. # mid is the value at the X-axis
btm, ctr, top = self.track_radii[self.current_track_level]
trackheight = (top-btm)
newdata = intermediate_points(self.start, self.end,
graph[self.start:self.end])
# Create elements on the graph, indicating a large positive value by
# the graph's poscolor, and a large negative value by the graph's
# negcolor attributes
for pos0, pos1, val in newdata:
pos0angle, pos0cos, pos0sin = self.canvas_angle(pos0)
pos1angle, pos1cos, pos1sin = self.canvas_angle(pos1)
# Calculate the heat color, based on the differential between
# the value and the median value
heat = colors.linearlyInterpolatedColor(graph.poscolor,
graph.negcolor,
maxval, minval, val)
# Draw heat box
heat_elements.append(self._draw_arc(btm, top, pos0angle, pos1angle,
heat, border=heat))
return heat_elements
def draw_scale(self, track):
""" draw_scale(self, track) -> ([element, element,...], [element, element,...])
o track Track object
Returns a tuple of (list of elements in the scale, list of labels
in the scale)
"""
scale_elements = [] # holds axes and ticks
scale_labels = [] # holds labels
if not track.scale: # no scale required, exit early
return [], []
# Get track locations
btm, ctr, top = self.track_radii[self.current_track_level]
trackheight = (top-ctr)
# X-axis
if self.sweep < 1:
#Draw an arc, leaving out the wedge
p = ArcPath(strokeColor=track.scale_color, fillColor=None)
#Note reportlab counts angles anti-clockwise from the horizontal
#(as in mathematics, e.g. complex numbers and polar coordinates)
#in degrees.
p.addArc(self.xcenter, self.ycenter, ctr,
startangledegrees=90-360*self.sweep,
endangledegrees=90)
scale_elements.append(p)
del p
else:
#Draw a full circle
scale_elements.append(Circle(self.xcenter, self.ycenter, ctr,
strokeColor=track.scale_color,
fillColor=None))
if track.scale_ticks: # Ticks are required on the scale
# Draw large ticks
#I want the ticks to be consistently positioned relative to
#the start of the sequence (position 0), not relative to the
#current viewpoint (self.start and self.end)
ticklen = track.scale_largeticks * trackheight
tickiterval = int(track.scale_largetick_interval)
#Note that we could just start the list of ticks using
#range(0,self.end,tickinterval) and the filter out the
#ones before self.start - but this seems wasteful.
#Using tickiterval * (self.start/tickiterval) is a shortcut.
largeticks = [pos for pos \
in range(tickiterval * (self.start//tickiterval),
int(self.end),
tickiterval) \
if pos >= self.start]
for tickpos in largeticks:
tick, label = self.draw_tick(tickpos, ctr, ticklen,
track,
track.scale_largetick_labels)
scale_elements.append(tick)
if label is not None: # If there's a label, add it
scale_labels.append(label)
# Draw small ticks
ticklen = track.scale_smallticks * trackheight
tickiterval = int(track.scale_smalltick_interval)
smallticks = [pos for pos \
in range(tickiterval * (self.start//tickiterval),
int(self.end),
tickiterval) \
if pos >= self.start]
for tickpos in smallticks:
tick, label = self.draw_tick(tickpos, ctr, ticklen,
track,
track.scale_smalltick_labels)
scale_elements.append(tick)
if label is not None: # If there's a label, add it
scale_labels.append(label)
# Check to see if the track contains a graph - if it does, get the
# minimum and maximum values, and put them on the scale Y-axis
# at 60 degree intervals, ordering the labels by graph_id
if track.axis_labels:
for set in track.get_sets():
if set.__class__ is GraphSet:
# Y-axis
for n in xrange(7):
angle = n * 1.0471975511965976
ticksin, tickcos = sin(angle), cos(angle)
x0, y0 = self.xcenter+btm*ticksin, self.ycenter+btm*tickcos
x1, y1 = self.xcenter+top*ticksin, self.ycenter+top*tickcos
scale_elements.append(Line(x0, y0, x1, y1,
strokeColor=track.scale_color))
graph_label_min = []
graph_label_max = []
graph_label_mid = []
for graph in set.get_graphs():
quartiles = graph.quartiles()
minval, maxval = quartiles[0], quartiles[4]
if graph.center is None:
midval = (maxval + minval)/2.
graph_label_min.append("%.3f" % minval)
graph_label_max.append("%.3f" % maxval)
graph_label_mid.append("%.3f" % midval)
else:
diff = max((graph.center-minval),
(maxval-graph.center))
minval = graph.center-diff
maxval = graph.center+diff
midval = graph.center
graph_label_mid.append("%.3f" % midval)
graph_label_min.append("%.3f" % minval)
graph_label_max.append("%.3f" % maxval)
xmid, ymid = (x0+x1)/2., (y0+y1)/2.
for limit, x, y, in [(graph_label_min, x0, y0),
(graph_label_max, x1, y1),
(graph_label_mid, xmid, ymid)]:
label = String(0, 0, ";".join(limit),
fontName=track.scale_font,
fontSize=track.scale_fontsize,
fillColor=track.scale_color)
label.textAnchor = 'middle'
labelgroup = Group(label)
labelgroup.transform = (tickcos, -ticksin,
ticksin, tickcos,
x, y)
scale_labels.append(labelgroup)
return scale_elements, scale_labels
def draw_tick(self, tickpos, ctr, ticklen, track, draw_label):
""" draw_tick(self, tickpos, ctr, ticklen) -> (element, element)
o tickpos Int, position of the tick on the sequence
o ctr Float, Y co-ord of the center of the track
o ticklen How long to draw the tick
o track Track, the track the tick is drawn on
o draw_label Boolean, write the tick label?
Returns a drawing element that is the tick on the scale
"""
# Calculate tick co-ordinates
tickangle, tickcos, ticksin = self.canvas_angle(tickpos)
x0, y0 = self.xcenter+ctr*ticksin, self.ycenter+ctr*tickcos
x1, y1 = self.xcenter+(ctr+ticklen)*ticksin, self.ycenter+(ctr+ticklen)*tickcos
# Calculate height of text label so it can be offset on lower half
# of diagram
# LP: not used, as not all fonts have ascent_descent data in reportlab.pdfbase._fontdata
#label_offset = _fontdata.ascent_descent[track.scale_font][0]*\
# track.scale_fontsize/1000.
tick = Line(x0, y0, x1, y1, strokeColor=track.scale_color)
if draw_label: # Put tick position on as label
if track.scale_format == 'SInt':
if tickpos >= 1000000:
tickstring = str(tickpos//1000000) + " Mbp"
elif tickpos >= 1000:
tickstring = str(tickpos//1000) + " Kbp"
else:
tickstring = str(tickpos)
else:
tickstring = str(tickpos)
label = String(0, 0, tickstring, # Make label string
fontName=track.scale_font,
fontSize=track.scale_fontsize,
fillColor=track.scale_color)
if tickangle > pi:
label.textAnchor = 'end'
# LP: This label_offset depends on ascent_descent data, which is not available for all
# fonts, so has been deprecated.
#if 0.5*pi < tickangle < 1.5*pi:
# y1 -= label_offset
labelgroup = Group(label)
labelgroup.transform = (1,0,0,1, x1, y1)
else:
labelgroup = None
return tick, labelgroup
def draw_test_tracks(self):
""" draw_test_tracks(self)
Draw blue ones indicating tracks to be drawn, with a green line
down the center.
"""
#print 'drawing test tracks'
# Add lines only for drawn tracks
for track in self.drawn_tracks:
btm, ctr, top = self.track_radii[track]
self.drawing.add(Circle(self.xcenter, self.ycenter, top,
strokeColor=colors.blue,
fillColor=None)) # top line
self.drawing.add(Circle(self.xcenter, self.ycenter, ctr,
strokeColor=colors.green,
fillColor=None)) # middle line
self.drawing.add(Circle(self.xcenter, self.ycenter, btm,
strokeColor=colors.blue,
fillColor=None)) # bottom line
def draw_greytrack(self, track):
""" draw_greytrack(self)
o track Track object
Put in a grey background to the current track, if the track
specifies that we should
"""
greytrack_bgs = [] # Holds track backgrounds
greytrack_labels = [] # Holds track foreground labels
if not track.greytrack: # No greytrack required, return early
return [], []
# Get track location
btm, ctr, top = self.track_radii[self.current_track_level]
# Make background
if self.sweep < 1:
#Make a partial circle, a large arc box
#This method assumes the correct center for us.
bg = self._draw_arc(btm, top, 0, 2*pi*self.sweep,
colors.Color(0.96, 0.96, 0.96))
else:
#Make a full circle (using a VERY thick linewidth)
bg = Circle(self.xcenter, self.ycenter, ctr,
strokeColor = colors.Color(0.96, 0.96, 0.96),
fillColor=None, strokeWidth=top-btm)
greytrack_bgs.append(bg)
if track.greytrack_labels: # Labels are required for this track
labelstep = self.length//track.greytrack_labels # label interval
for pos in range(self.start, self.end, labelstep):
label = String(0, 0, track.name, # Add a new label at
fontName=track.greytrack_font, # each interval
fontSize=track.greytrack_fontsize,
fillColor=track.greytrack_fontcolor)
theta, costheta, sintheta = self.canvas_angle(pos)
x,y = self.xcenter+btm*sintheta, self.ycenter+btm*costheta # start text halfway up marker
labelgroup = Group(label)
labelangle = self.sweep*2*pi*(pos-self.start)/self.length - pi/2
if theta > pi:
label.textAnchor = 'end' # Anchor end of text to inner radius
labelangle += pi # and reorient it
cosA, sinA = cos(labelangle), sin(labelangle)
labelgroup.transform = (cosA, -sinA, sinA,
cosA, x, y)
if not self.length-x <= labelstep: # Don't overrun the circle
greytrack_labels.append(labelgroup)
return greytrack_bgs, greytrack_labels
def canvas_angle(self, base):
""" canvas_angle(self, base) -> (float, float, float)
"""
angle = self.sweep*2*pi*(base-self.start)/self.length
return (angle, cos(angle), sin(angle))
def _draw_arc(self, inner_radius, outer_radius, startangle, endangle,
color, border=None, colour=None, **kwargs):
""" draw_arc(self, inner_radius, outer_radius, startangle, endangle, color)
-> Group
o inner_radius Float distance of inside of arc from drawing center
o outer_radius Float distance of outside of arc from drawing center
o startangle Float angle subtended by start of arc at drawing center
(in radians)
o endangle Float angle subtended by end of arc at drawing center
(in radians)
o color colors.Color object for arc (overridden by backwards
compatible argument with UK spelling, colour).
Returns a closed path object describing an arced box corresponding to
the passed values. For very small angles, a simple four sided
polygon is used.
"""
#Let the UK spelling (colour) override the USA spelling (color)
if colour is not None:
color = colour
if border is None:
border = color
if color is None:
color = colour
if color == colors.white and border is None: # Force black border on
strokecolor = colors.black # white boxes with
elif border is None: # undefined border, else
strokecolor = color # use fill colour
elif border is not None:
strokecolor = border
if abs(float(endangle - startangle))>.01:
# Wide arc, must use full curves
p = ArcPath(strokeColor=strokecolor,
fillColor=color,
strokewidth=0)
#Note reportlab counts angles anti-clockwise from the horizontal
#(as in mathematics, e.g. complex numbers and polar coordinates)
#but we use clockwise from the vertical. Also reportlab uses
#degrees, but we use radians.
p.addArc(self.xcenter, self.ycenter, inner_radius,
90 - (endangle * 180 / pi), 90 - (startangle * 180 / pi),
moveTo=True)
p.addArc(self.xcenter, self.ycenter, outer_radius,
90 - (endangle * 180 / pi), 90 - (startangle * 180 / pi),
reverse=True)
p.closePath()
return p
else:
#Cheat and just use a four sided polygon.
# Calculate trig values for angle and coordinates
startcos, startsin = cos(startangle), sin(startangle)
endcos, endsin = cos(endangle), sin(endangle)
x0,y0 = self.xcenter, self.ycenter # origin of the circle
x1,y1 = (x0+inner_radius*startsin, y0+inner_radius*startcos)
x2,y2 = (x0+inner_radius*endsin, y0+inner_radius*endcos)
x3,y3 = (x0+outer_radius*endsin, y0+outer_radius*endcos)
x4,y4 = (x0+outer_radius*startsin, y0+outer_radius*startcos)
return draw_polygon([(x1,y1),(x2,y2),(x3,y3),(x4,y4)], color, border)
def _draw_arc_arrow(self, inner_radius, outer_radius, startangle, endangle,
color, border=None,
shaft_height_ratio=0.4, head_length_ratio=0.5, orientation='right',
colour=None, **kwargs):
"""Draw an arrow along an arc."""
#Let the UK spelling (colour) override the USA spelling (color)
if colour is not None:
color = colour
if border is None:
border = color
if color is None:
color = colour
if color == colors.white and border is None: # Force black border on
strokecolor = colors.black # white boxes with
elif border is None: # undefined border, else
strokecolor = color # use fill colour
elif border is not None:
strokecolor = border
#if orientation == 'right':
# startangle, endangle = min(startangle, endangle), max(startangle, endangle)
#elif orientation == 'left':
# startangle, endangle = max(startangle, endangle), min(startangle, endangle)
#else:
startangle, endangle = min(startangle, endangle), max(startangle, endangle)
if orientation != "left" and orientation != "right":
raise ValueError("Invalid orientation %s, should be 'left' or 'right'" \
% repr(orientation))
angle = float(endangle - startangle) # angle subtended by arc
middle_radius = 0.5*(inner_radius+outer_radius)
boxheight = outer_radius - inner_radius
shaft_height = boxheight*shaft_height_ratio
shaft_inner_radius = middle_radius - 0.5*shaft_height
shaft_outer_radius = middle_radius + 0.5*shaft_height
headangle_delta = max(0.0,min(abs(boxheight)*head_length_ratio/middle_radius, abs(angle)))
if angle < 0:
headangle_delta *= -1 #reverse it
if orientation=="right":
headangle = endangle-headangle_delta
else:
headangle = startangle+headangle_delta
if startangle <= endangle:
headangle = max(min(headangle, endangle), startangle)
else:
headangle = max(min(headangle, startangle), endangle)
assert startangle <= headangle <= endangle \
or endangle <= headangle <= startangle, \
(startangle, headangle, endangle, angle)
# Calculate trig values for angle and coordinates
startcos, startsin = cos(startangle), sin(startangle)
headcos, headsin = cos(headangle), sin(headangle)
endcos, endsin = cos(endangle), sin(endangle)
x0,y0 = self.xcenter, self.ycenter # origin of the circle
if 0.5 >= abs(angle) and abs(headangle_delta) >= abs(angle):
#If the angle is small, and the arrow is all head,
#cheat and just use a triangle.
if orientation=="right":
x1,y1 = (x0+inner_radius*startsin, y0+inner_radius*startcos)
x2,y2 = (x0+outer_radius*startsin, y0+outer_radius*startcos)
x3,y3 = (x0+middle_radius*endsin, y0+middle_radius*endcos)
else:
x1,y1 = (x0+inner_radius*endsin, y0+inner_radius*endcos)
x2,y2 = (x0+outer_radius*endsin, y0+outer_radius*endcos)
x3,y3 = (x0+middle_radius*startsin, y0+middle_radius*startcos)
#return draw_polygon([(x1,y1),(x2,y2),(x3,y3)], color, border,
# stroke_line_join=1)
return Polygon([x1,y1,x2,y2,x3,y3],
strokeColor=border or color,
fillColor=color,
strokeLineJoin=1, #1=round, not mitre!
strokewidth=0)
elif orientation=="right":
p = ArcPath(strokeColor=strokecolor,
fillColor=color,
#default is mitre/miter which can stick out too much:
strokeLineJoin=1, #1=round
strokewidth=0,
**kwargs)
#Note reportlab counts angles anti-clockwise from the horizontal
#(as in mathematics, e.g. complex numbers and polar coordinates)
#but we use clockwise from the vertical. Also reportlab uses
#degrees, but we use radians.
p.addArc(self.xcenter, self.ycenter, shaft_inner_radius,
90 - (headangle * 180 / pi), 90 - (startangle * 180 / pi),
moveTo=True)
p.addArc(self.xcenter, self.ycenter, shaft_outer_radius,
90 - (headangle * 180 / pi), 90 - (startangle * 180 / pi),
reverse=True)
p.lineTo(x0+outer_radius*headsin, y0+outer_radius*headcos)
if abs(angle) < 0.5:
p.lineTo(x0+middle_radius*endsin, y0+middle_radius*endcos)
p.lineTo(x0+inner_radius*headsin, y0+inner_radius*headcos)
else:
dx = min(0.1, abs(angle)/50.0) #auto-scale number of steps
x = dx
while x < 1:
r = outer_radius - x*(outer_radius-middle_radius)
a = headangle + x*(endangle-headangle)
p.lineTo(x0+r*sin(a), y0+r*cos(a))
x += dx
p.lineTo(x0+middle_radius*endsin, y0+middle_radius*endcos)
x = dx
while x < 1:
r = middle_radius - x*(middle_radius-inner_radius)
a = headangle + (1-x)*(endangle-headangle)
p.lineTo(x0+r*sin(a), y0+r*cos(a))
x += dx
p.lineTo(x0+inner_radius*headsin, y0+inner_radius*headcos)
p.closePath()
return p
else:
p = ArcPath(strokeColor=strokecolor,
fillColor=color,
#default is mitre/miter which can stick out too much:
strokeLineJoin=1, #1=round
strokewidth=0,
**kwargs)
#Note reportlab counts angles anti-clockwise from the horizontal
#(as in mathematics, e.g. complex numbers and polar coordinates)
#but we use clockwise from the vertical. Also reportlab uses
#degrees, but we use radians.
p.addArc(self.xcenter, self.ycenter, shaft_inner_radius,
90 - (endangle * 180 / pi), 90 - (headangle * 180 / pi),
moveTo=True, reverse=True)
p.addArc(self.xcenter, self.ycenter, shaft_outer_radius,
90 - (endangle * 180 / pi), 90 - (headangle * 180 / pi),
reverse=False)
p.lineTo(x0+outer_radius*headsin, y0+outer_radius*headcos)
#TODO - two staight lines is only a good approximation for small
#head angle, in general will need to curved lines here:
if abs(angle) < 0.5:
p.lineTo(x0+middle_radius*startsin, y0+middle_radius*startcos)
p.lineTo(x0+inner_radius*headsin, y0+inner_radius*headcos)
else:
dx = min(0.1, abs(angle)/50.0) #auto-scale number of steps
x = dx
while x < 1:
r = outer_radius - x*(outer_radius-middle_radius)
a = headangle + x*(startangle-headangle)
p.lineTo(x0+r*sin(a), y0+r*cos(a))
x += dx
p.lineTo(x0+middle_radius*startsin, y0+middle_radius*startcos)
x = dx
while x < 1:
r = middle_radius - x*(middle_radius-inner_radius)
a = headangle + (1-x)*(startangle-headangle)
p.lineTo(x0+r*sin(a), y0+r*cos(a))
x += dx
p.lineTo(x0+inner_radius*headsin, y0+inner_radius*headcos)
p.closePath()
return p
| 44.79615 | 112 | 0.547479 | [
"BSD-2-Clause"
] | LyonsLab/coge | bin/last_wrapper/Bio/Graphics/GenomeDiagram/_CircularDrawer.py | 51,202 | Python |
import plotly.graph_objects as go
import pandas as pd
from .Colors import COLOR_DISCRETE_MAP
from Classification import CATEGORIES
def all_categories_grouping(row: pd.Series) -> str:
"""
Merge Category, Fuel and segment to a single string for unique categorization
"""
if row['Fuel'] == 'Battery Electric':
return row['Category'] + ' / ' + row['Fuel']
else:
try:
result = row['Fuel'] + ' / ' + row['Segment'] + ' / ' + row['Euro Standard']
except: # For Off Road type with no Segment nor Euro Standard
result = row['Fuel']
return result
def activity_horizontal_bar_chart(stock_and_mileage_df: pd.DataFrame.groupby, output_folder):
"""
Horizontal bar chart representing mean activity and other activities per unique categorization
:param stock_and_mileage_df: Dataframe of the vehicles registration list
:param output_folder: output folder name where to store resulting chart
:return: an html file containing the horizontal bar chart of the mean activity
"""
data = stock_and_mileage_df.copy()
# Delete off road data
data = data[data['Category'] != 'Off Road']
# Create single column classification
data['segmentation'] = data.apply(lambda row: all_categories_grouping(row), axis=1)
horizontal_plot = go.Figure()
# Add Activity statistics and stock traces
horizontal_plot.add_trace(go.Scatter(y=data['segmentation'], x=data['Max_Activity'], mode='markers',
name='Activitat màxima', marker_color='rgb(288, 26, 28)'
))
horizontal_plot.add_trace(go.Scatter(y=data['segmentation'], x=data['Min_Activity'], mode='markers',
name='Activitat mínima', marker_color='rgb(229, 196, 148)'
))
horizontal_plot.add_trace(go.Scatter(y=data['segmentation'], x=data['Std_Activity'], mode='markers',
name="Desviació standard de l'activitat", marker=dict(
color='rgb(800, 800, 800)',
opacity=0)
))
horizontal_plot.add_trace(go.Scatter(y=data['segmentation'], x=data['Stock'], mode='markers',
name="Estoc", marker=dict(
color='rgb(800, 800, 800)',
opacity=0)
))
horizontal_plot.add_trace(go.Scatter(y=data['segmentation'], x=data['Mean_Lifetime_Activity'], mode='markers',
name="Lifetime cumulative activity mitja", marker=dict(
color='rgb(800, 800, 800)',
opacity=0)
))
# For each category add the mean activity bar chart (to diferenciate by same colors as Stock distribution Pie Chart)
for category in CATEGORIES:
horizontal_plot.add_trace(go.Bar(
y=data[data['Category'] == category]['segmentation'], x=data[data['Category'] == category]['Mean_Activity'],
orientation='h', marker_color=COLOR_DISCRETE_MAP[category],
name=f'Activitat mitjana {category}'
))
# Update plot information
horizontal_plot.update_layout(
title="Activitat mitjana anual segons classificació del parc de vehicles d'Andorra",
title_x=0.5,
height=4000,
width=1500,
template='plotly_white',
xaxis_title='Activitat mitja (km/any)',
yaxis_title='Tipologia de vehicle',
hovermode="y unified",
hoverlabel=dict(namelength=100),
xaxis_range=[0, stock_and_mileage_df['Max_Activity'].max()*1.05],
xaxis=dict(
tickmode='array',
tickvals=[0, 5000, 15000, 25000, 50000, 100000, 150000, 200000],
ticktext=['0', '5k', '15k', '25k', '50k', '100k', '150k', '200k'])
)
horizontal_plot.update_xaxes(showgrid=True, zeroline=True)
horizontal_plot.show()
# Save plot to html file
filename = output_folder + "Activitat mitjana anual segons classificació del parc de vehicles d'Andorra.html"
horizontal_plot.write_html(filename) | 44.882979 | 120 | 0.605357 | [
"MIT"
] | actuatech/fuel-tourism | Graphing/MeanActivityHorizontalBarChart.py | 4,224 | Python |
# JadenCase 문자열 만들기
def solution(s):
s = s.lower()
changed_words = []
print(s.split(" "))
for word in s.split(" "):
if len(word) == 0:
changed_words.append(word)
continue
elif len(word) == 1:
word = word[0].upper()
else:
word = word[0].upper() + word[1:]
changed_words.append(word)
print(changed_words)
answer = ' '.join(changed_words)
return answer
'''
채점을 시작합니다.
정확성 테스트
테스트 1 〉 통과 (0.02ms, 10.3MB)
테스트 2 〉 통과 (0.02ms, 10.1MB)
테스트 3 〉 통과 (0.02ms, 10.2MB)
테스트 4 〉 통과 (0.02ms, 10.1MB)
테스트 5 〉 통과 (0.03ms, 10.2MB)
테스트 6 〉 통과 (0.02ms, 10.1MB)
테스트 7 〉 통과 (0.03ms, 10.2MB)
테스트 8 〉 통과 (0.01ms, 10.2MB)
테스트 9 〉 통과 (0.02ms, 10.2MB)
테스트 10 〉 통과 (0.01ms, 10.1MB)
테스트 11 〉 통과 (0.03ms, 10.2MB)
테스트 12 〉 통과 (0.02ms, 10.2MB)
테스트 13 〉 통과 (0.02ms, 10.2MB)
테스트 14 〉 통과 (0.02ms, 10.2MB)
테스트 15 〉 통과 (0.03ms, 10.2MB)
테스트 16 〉 통과 (0.01ms, 10.2MB)
채점 결과
정확성: 100.0
합계: 100.0 / 100.0
'''
| 22.837209 | 45 | 0.546843 | [
"MIT"
] | StudyForCoding/ProgrammersLevel | Level2/Lessons12951/gamjapark2.py | 1,232 | Python |
class Pessoa:
def __init__(self,nome,idade,cpf,salario):
self.nome = nome
self.idade = idade
self.cpf = cpf
self.salario = salario
def Aumento(self):
return self.salario *0.05
class Gerente(Pessoa):
def __init__(self,nome,idade,cpf,salario,senha):
super().__init__(nome,idade,cpf,salario)
self.senha = senha
def Aumento(self):
return self.salario * 0.01 + 1000
p = Gerente('Fabio',25,41075570816,21000,456578)
print(p.nome)
print(p.idade)
print(p.cpf)
print(p.senha)
print(p.salario)
print(p.Aumento())
print('='*30)
class Animal:
def __init__(self,nome,raca,cor,peso,comportamento = True):
self.nome = nome
self.raca = raca
self.cor = cor
self.peso = peso
self.comportamento = comportamento
def Comportamento(self):
if(self.comportamento == False):
return self.peso + 500
print('Ta Gordo por sem ruim')
class Pitbull(Animal):
pass
#def Comportamento(self):
#return False
dog = Pitbull('Luci','Pitbull','Preta',53,False)
print(dog.nome)
print(dog.raca)
print(dog.cor)
print(dog.peso)
print(dog.Comportamento())
| 20.949153 | 63 | 0.61165 | [
"Apache-2.0"
] | Madara701/Pythob_OO | Python_OO/Exercicio.py | 1,236 | Python |
import os
import re
import wx
import wx.grid
from . import dialog_base
def pop_error(msg):
wx.MessageBox(msg, 'Error', wx.OK | wx.ICON_ERROR)
class SettingsDialog(dialog_base.SettingsDialogBase):
def __init__(self, extra_data_func, extra_data_wildcard, config_save_func,
file_name_format_hint, version):
dialog_base.SettingsDialogBase.__init__(self, None)
self.panel = SettingsDialogPanel(
self, extra_data_func, extra_data_wildcard, config_save_func,
file_name_format_hint)
best_size = self.panel.BestSize
# hack for some gtk themes that incorrectly calculate best size
best_size.IncBy(dx=0, dy=30)
self.SetClientSize(best_size)
self.SetTitle('InteractiveHtmlBom %s' % version)
# hack for new wxFormBuilder generating code incompatible with old wxPython
# noinspection PyMethodOverriding
def SetSizeHints(self, sz1, sz2):
try:
# wxPython 4
super(SettingsDialog, self).SetSizeHints(sz1, sz2)
except TypeError:
# wxPython 3
self.SetSizeHintsSz(sz1, sz2)
def set_extra_data_path(self, extra_data_file):
self.panel.fields.extraDataFilePicker.Path = extra_data_file
self.panel.fields.OnExtraDataFileChanged(None)
# Implementing settings_dialog
class SettingsDialogPanel(dialog_base.SettingsDialogPanel):
def __init__(self, parent, extra_data_func, extra_data_wildcard,
config_save_func, file_name_format_hint):
self.config_save_func = config_save_func
dialog_base.SettingsDialogPanel.__init__(self, parent)
self.general = GeneralSettingsPanel(self.notebook,
file_name_format_hint)
self.html = HtmlSettingsPanel(self.notebook)
self.fields = FieldsPanel(self.notebook, extra_data_func,
extra_data_wildcard)
self.notebook.AddPage(self.general, "General")
self.notebook.AddPage(self.html, "Html defaults")
self.notebook.AddPage(self.fields, "Fields")
def OnExit(self, event):
self.GetParent().EndModal(wx.ID_CANCEL)
def OnSaveSettings(self, event):
self.config_save_func(self)
def OnGenerateBom(self, event):
self.GetParent().EndModal(wx.ID_OK)
def finish_init(self):
self.html.OnBoardRotationSlider(None)
# Implementing HtmlSettingsPanelBase
class HtmlSettingsPanel(dialog_base.HtmlSettingsPanelBase):
def __init__(self, parent):
dialog_base.HtmlSettingsPanelBase.__init__(self, parent)
# Handlers for HtmlSettingsPanelBase events.
def OnBoardRotationSlider(self, event):
degrees = self.boardRotationSlider.Value * 5
self.rotationDegreeLabel.LabelText = u"{}\u00B0".format(degrees)
# Implementing GeneralSettingsPanelBase
class GeneralSettingsPanel(dialog_base.GeneralSettingsPanelBase):
def __init__(self, parent, file_name_format_hint):
dialog_base.GeneralSettingsPanelBase.__init__(self, parent)
self.file_name_format_hint = file_name_format_hint
bitmaps = os.path.join(os.path.dirname(__file__), "bitmaps")
self.m_btnSortUp.SetBitmap(wx.Bitmap(
os.path.join(bitmaps, "btn-arrow-up.png"), wx.BITMAP_TYPE_PNG))
self.m_btnSortDown.SetBitmap(wx.Bitmap(
os.path.join(bitmaps, "btn-arrow-down.png"), wx.BITMAP_TYPE_PNG))
self.m_btnSortAdd.SetBitmap(wx.Bitmap(
os.path.join(bitmaps, "btn-plus.png"), wx.BITMAP_TYPE_PNG))
self.m_btnSortRemove.SetBitmap(wx.Bitmap(
os.path.join(bitmaps, "btn-minus.png"), wx.BITMAP_TYPE_PNG))
self.m_bpButton5.SetBitmap(wx.Bitmap(
os.path.join(bitmaps, "btn-question.png"), wx.BITMAP_TYPE_PNG))
self.m_btnBlacklistAdd.SetBitmap(wx.Bitmap(
os.path.join(bitmaps, "btn-plus.png"), wx.BITMAP_TYPE_PNG))
self.m_btnBlacklistRemove.SetBitmap(wx.Bitmap(
os.path.join(bitmaps, "btn-minus.png"), wx.BITMAP_TYPE_PNG))
# Handlers for GeneralSettingsPanelBase events.
def OnComponentSortOrderUp(self, event):
selection = self.componentSortOrderBox.Selection
if selection != wx.NOT_FOUND and selection > 0:
item = self.componentSortOrderBox.GetString(selection)
self.componentSortOrderBox.Delete(selection)
self.componentSortOrderBox.Insert(item, selection - 1)
self.componentSortOrderBox.SetSelection(selection - 1)
def OnComponentSortOrderDown(self, event):
selection = self.componentSortOrderBox.Selection
size = self.componentSortOrderBox.Count
if selection != wx.NOT_FOUND and selection < size - 1:
item = self.componentSortOrderBox.GetString(selection)
self.componentSortOrderBox.Delete(selection)
self.componentSortOrderBox.Insert(item, selection + 1)
self.componentSortOrderBox.SetSelection(selection + 1)
def OnComponentSortOrderAdd(self, event):
item = wx.GetTextFromUser(
"Characters other than A-Z will be ignored.",
"Add sort order item")
item = re.sub('[^A-Z]', '', item.upper())
if item == '':
return
found = self.componentSortOrderBox.FindString(item)
if found != wx.NOT_FOUND:
self.componentSortOrderBox.SetSelection(found)
return
self.componentSortOrderBox.Append(item)
self.componentSortOrderBox.SetSelection(
self.componentSortOrderBox.Count - 1)
def OnComponentSortOrderRemove(self, event):
selection = self.componentSortOrderBox.Selection
if selection != wx.NOT_FOUND:
item = self.componentSortOrderBox.GetString(selection)
if item == '~':
pop_error("You can not delete '~' item")
return
self.componentSortOrderBox.Delete(selection)
if self.componentSortOrderBox.Count > 0:
self.componentSortOrderBox.SetSelection(max(selection - 1, 0))
def OnComponentBlacklistAdd(self, event):
item = wx.GetTextFromUser(
"Characters other than A-Z 0-9 and * will be ignored.",
"Add blacklist item")
item = re.sub('[^A-Z0-9*]', '', item.upper())
if item == '':
return
found = self.blacklistBox.FindString(item)
if found != wx.NOT_FOUND:
self.blacklistBox.SetSelection(found)
return
self.blacklistBox.Append(item)
self.blacklistBox.SetSelection(self.blacklistBox.Count - 1)
def OnComponentBlacklistRemove(self, event):
selection = self.blacklistBox.Selection
if selection != wx.NOT_FOUND:
self.blacklistBox.Delete(selection)
if self.blacklistBox.Count > 0:
self.blacklistBox.SetSelection(max(selection - 1, 0))
def OnNameFormatHintClick(self, event):
wx.MessageBox(self.file_name_format_hint, 'File name format help',
style=wx.ICON_NONE | wx.OK)
def OnSize(self, event):
# Trick the listCheckBox best size calculations
tmp = self.componentSortOrderBox.GetStrings()
self.componentSortOrderBox.SetItems([])
self.Layout()
self.componentSortOrderBox.SetItems(tmp)
# Implementing FieldsPanelBase
class FieldsPanel(dialog_base.FieldsPanelBase):
NONE_STRING = '<none>'
FIELDS_GRID_COLUMNS = 3
def __init__(self, parent, extra_data_func, extra_data_wildcard):
dialog_base.FieldsPanelBase.__init__(self, parent)
self.extra_data_func = extra_data_func
self.extra_field_data = None
bitmaps = os.path.join(os.path.dirname(__file__), "bitmaps")
self.m_btnUp.SetBitmap(wx.Bitmap(
os.path.join(bitmaps, "btn-arrow-up.png"), wx.BITMAP_TYPE_PNG))
self.m_btnDown.SetBitmap(wx.Bitmap(
os.path.join(bitmaps, "btn-arrow-down.png"), wx.BITMAP_TYPE_PNG))
self.set_file_picker_wildcard(extra_data_wildcard)
self._setFieldsList([])
for i in range(2):
box = self.GetTextExtent(self.fieldsGrid.GetColLabelValue(i))
if hasattr(box, "x"):
width = box.x
else:
width = box[0]
width = int(width * 1.1 + 5)
self.fieldsGrid.SetColMinimalWidth(i, width)
self.fieldsGrid.SetColSize(i, width)
def set_file_picker_wildcard(self, extra_data_wildcard):
if extra_data_wildcard is None:
self.extraDataFilePicker.Disable()
return
# wxFilePickerCtrl doesn't support changing wildcard at runtime
# so we have to replace it
picker_parent = self.extraDataFilePicker.GetParent()
new_picker = wx.FilePickerCtrl(
picker_parent, wx.ID_ANY, wx.EmptyString,
u"Select a file",
extra_data_wildcard,
wx.DefaultPosition, wx.DefaultSize,
(wx.FLP_DEFAULT_STYLE | wx.FLP_FILE_MUST_EXIST | wx.FLP_OPEN |
wx.FLP_SMALL | wx.FLP_USE_TEXTCTRL | wx.BORDER_SIMPLE))
self.GetSizer().Replace(self.extraDataFilePicker, new_picker,
recursive=True)
self.extraDataFilePicker.Destroy()
self.extraDataFilePicker = new_picker
self.Layout()
def _swapRows(self, a, b):
for i in range(self.FIELDS_GRID_COLUMNS):
va = self.fieldsGrid.GetCellValue(a, i)
vb = self.fieldsGrid.GetCellValue(b, i)
self.fieldsGrid.SetCellValue(a, i, vb)
self.fieldsGrid.SetCellValue(b, i, va)
# Handlers for FieldsPanelBase events.
def OnGridCellClicked(self, event):
self.fieldsGrid.ClearSelection()
self.fieldsGrid.SelectRow(event.Row)
if event.Col < 2:
# toggle checkbox
val = self.fieldsGrid.GetCellValue(event.Row, event.Col)
val = "" if val else "1"
self.fieldsGrid.SetCellValue(event.Row, event.Col, val)
# group shouldn't be enabled without show
if event.Col == 0 and val == "":
self.fieldsGrid.SetCellValue(event.Row, 1, val)
if event.Col == 1 and val == "1":
self.fieldsGrid.SetCellValue(event.Row, 0, val)
def OnFieldsUp(self, event):
selection = self.fieldsGrid.SelectedRows
if len(selection) == 1 and selection[0] > 0:
self._swapRows(selection[0], selection[0] - 1)
self.fieldsGrid.ClearSelection()
self.fieldsGrid.SelectRow(selection[0] - 1)
def OnFieldsDown(self, event):
selection = self.fieldsGrid.SelectedRows
size = self.fieldsGrid.NumberRows
if len(selection) == 1 and selection[0] < size - 1:
self._swapRows(selection[0], selection[0] + 1)
self.fieldsGrid.ClearSelection()
self.fieldsGrid.SelectRow(selection[0] + 1)
def _setFieldsList(self, fields):
if self.fieldsGrid.NumberRows:
self.fieldsGrid.DeleteRows(0, self.fieldsGrid.NumberRows)
self.fieldsGrid.AppendRows(len(fields))
row = 0
for f in fields:
self.fieldsGrid.SetCellValue(row, 0, "1")
self.fieldsGrid.SetCellValue(row, 1, "1")
self.fieldsGrid.SetCellRenderer(
row, 0, wx.grid.GridCellBoolRenderer())
self.fieldsGrid.SetCellRenderer(
row, 1, wx.grid.GridCellBoolRenderer())
self.fieldsGrid.SetCellValue(row, 2, f)
self.fieldsGrid.SetCellAlignment(
row, 2, wx.ALIGN_LEFT, wx.ALIGN_TOP)
self.fieldsGrid.SetReadOnly(row, 2)
row += 1
def OnExtraDataFileChanged(self, event):
extra_data_file = self.extraDataFilePicker.Path
if not os.path.isfile(extra_data_file):
return
self.extra_field_data = None
try:
self.extra_field_data = self.extra_data_func(
extra_data_file, self.normalizeCaseCheckbox.Value)
except Exception as e:
pop_error(
"Failed to parse file %s\n\n%s" % (extra_data_file, e))
self.extraDataFilePicker.Path = ''
if self.extra_field_data is not None:
field_list = list(self.extra_field_data[0])
self._setFieldsList(["Value", "Footprint"] + field_list)
field_list.append(self.NONE_STRING)
self.boardVariantFieldBox.SetItems(field_list)
self.boardVariantFieldBox.SetStringSelection(self.NONE_STRING)
self.boardVariantWhitelist.Clear()
self.boardVariantBlacklist.Clear()
self.dnpFieldBox.SetItems(field_list)
self.dnpFieldBox.SetStringSelection(self.NONE_STRING)
def OnBoardVariantFieldChange(self, event):
selection = self.boardVariantFieldBox.Value
if not selection or selection == self.NONE_STRING \
or self.extra_field_data is None:
self.boardVariantWhitelist.Clear()
self.boardVariantBlacklist.Clear()
return
variant_set = set()
for _, field_dict in self.extra_field_data[1].items():
if selection in field_dict:
variant_set.add(field_dict[selection])
self.boardVariantWhitelist.SetItems(list(variant_set))
self.boardVariantBlacklist.SetItems(list(variant_set))
def OnSize(self, event):
self.Layout()
g = self.fieldsGrid
g.SetColSize(
2, g.GetClientSize().x - g.GetColSize(0) - g.GetColSize(1) - 30)
def GetShowFields(self):
result = []
for row in range(self.fieldsGrid.NumberRows):
if self.fieldsGrid.GetCellValue(row, 0) == "1":
result.append(self.fieldsGrid.GetCellValue(row, 2))
return result
def GetGroupFields(self):
result = []
for row in range(self.fieldsGrid.NumberRows):
if self.fieldsGrid.GetCellValue(row, 1) == "1":
result.append(self.fieldsGrid.GetCellValue(row, 2))
return result
def SetCheckedFields(self, show, group):
group = [s for s in group if s in show]
current = []
for row in range(self.fieldsGrid.NumberRows):
current.append(self.fieldsGrid.GetCellValue(row, 2))
new = [s for s in current if s not in show]
self._setFieldsList(show + new)
for row in range(self.fieldsGrid.NumberRows):
field = self.fieldsGrid.GetCellValue(row, 2)
self.fieldsGrid.SetCellValue(row, 0, "1" if field in show else "")
self.fieldsGrid.SetCellValue(row, 1, "1" if field in group else "")
| 41.957386 | 79 | 0.644458 | [
"MIT"
] | INTI-CMNB/InteractiveHtmlBom | InteractiveHtmlBom/dialog/settings_dialog.py | 14,769 | Python |
from __future__ import unicode_literals, print_function
from libraries.lambda_handlers.register_module_handler import RegisterModuleHandler
def handle(event, context):
"""
Called by a module when it is deployed to register it
:param dict event:
:param context:
:return dict:
"""
return RegisterModuleHandler().handle(event, context)
| 27.923077 | 83 | 0.752066 | [
"MIT"
] | WycliffeAssociates/tx-manager | functions/register_module/main.py | 363 | Python |
"""
Problem Statement:
Let the function f(s) be the frequency of the lexicographically smallest character in a non-empty string s. For example, if s = "dcce" then f(s) = 2 because the lexicographically smallest character is 'c', which has a frequency of 2.
You are given an array of strings words and another array of query strings queries. For each query queries[i], count the number of words in words such that f(queries[i]) < f(W) for each W in words.
Return an integer array answer, where each answer[i] is the answer to the ith query.
Example 1:
Input: queries = ["cbd"], words = ["zaaaz"]
Output: [1]
"""
from collections import Counter
def numSmallerByFrequency(queries, words):
# Calculate the frequency of smallest character for each word of query array
fre_queries = fre(queries)
# Calculate the frequency of smallest character for each word of words array & sort it in reverse order.
fre_words = sorted(fre(words))[::-1]
res = []
# compare reach frequency in fre_queries with each element of fre_words & increase count accordingly
for q in fre_queries:
count = 0
for w in fre_words:
if w <= q:
break
else:
count += 1
res.append(count)
return res
# A function to find the frequency of smallest character.
def fre(arrs):
# Sort the array
sorted_arrs = [sorted(arr) for arr in arrs]
fre = []
for arr in sorted_arrs:
fre.append(list(Counter(arr).items())[0][1])
return fre
# Main begins here
input_queries = input('Enter elements of a queries separated by space: ')
print("\n")
# This would split the input string separated by spaces into string array
queries_list = input_queries.split()
input_words = input('Enter elements of a words separated by space: ')
print("\n")
# This would split the input string separated by spaces into string array
words_list = input_words.split()
# print(queries_list)
# print(words_list)
ans = numSmallerByFrequency(queries_list,words_list)
print("Output: ",ans)
| 36.677966 | 233 | 0.66451 | [
"MIT"
] | Ayonijakaushik19/DSA-guide | Arrays/python/compareStringByFrequencyOfSmallestCharacter.py | 2,164 | Python |
class MetricHandler:
"""
Object meant to be used in the training loop to handle metrics logs
"""
def __init__(self):
pass
def add(self, outputs, targets):
"""
Adding metric for each batch
:param outputs: outputs of the model
:param targets: targets of the model
"""
raise NotImplementedError()
def compute(self, phase):
"""
Aggregate accumulated metrics over batches at the end of the epoch
:param phase: either 'train' or 'val'
"""
raise NotImplementedError()
def description(self, phase):
"""
Description of the current metrics
:param phase: either 'train' or 'val'
:return: str
"""
raise NotImplementedError()
def scalar_infos(self, phase):
"""
Return list of tuple to use with tensorboard writer object 'add_scalar' function
:param phase: either 'train' or 'val'
:return: [tuple(str, number)]
"""
raise NotImplementedError()
def description_best(self):
"""
Description of the best metrics
:return: str
"""
raise NotImplementedError()
class Epocher:
"""
An object which is used to print information about training without spamming the console. (WIP)
"""
def __init__(self, n_epoch, epoch_offset=1):
# epoch_offset += 1 # starting at 1 and not zero
self.n_epoch = n_epoch
self.epoch_offset = epoch_offset
self.s_more = ''
self.stats_string = ''
self.ls_string = ''
def __iter__(self):
self.n = self.epoch_offset - 1
self.stats_string = ''
self.ls_string = ''
self.s_more = ''
self.__update_stdout__()
return self
def __next__(self):
self.n += 1
if self.n >= self.n_epoch + self.epoch_offset:
raise StopIteration
self.__update_stdout__()
self.s_more = ''
return self.n
def update_stats(self, s):
self.stats_string = s
self.__update_stdout__()
def update_last_saved(self, s):
self.ls_string = s
self.__update_stdout__()
def print(self, s, sep=' '):
self.s_more = sep + s.replace('\n', '')
self.__update_stdout__()
def __update_stdout__(self):
s0 = 'Epoch [{}/{}]'.format(self.n, self.n_epoch + self.epoch_offset - 1)
s1, s2 = '', ''
if self.stats_string != '':
s1 = ' Stats [{}]'.format(self.stats_string).replace('\n', '')
if self.ls_string != '':
s2 = ' Last Saved [{}]'.format(self.ls_string).replace('\n', '')
print('\r{}'.format(s0), s1, s2, self.s_more, end='', sep='')
| 27.356436 | 99 | 0.565328 | [
"MIT"
] | gregunz/FacadeParsing | facade_project/utils/ml_utils.py | 2,763 | Python |
from __future__ import absolute_import
import pytest
import logging
import mock
from sentry.logging.handlers import StructLogHandler
@pytest.fixture
def handler():
return StructLogHandler()
@pytest.fixture
def logger():
return mock.MagicMock()
def make_logrecord(**extra):
kwargs = dict(
name="name",
level=logging.INFO,
pathname="pathname",
lineno=10,
msg="msg",
args=None,
exc_info=None,
)
kwargs.update(extra or {})
return logging.LogRecord(**kwargs)
@pytest.mark.parametrize(
"record,out",
(
({}, {}),
({"msg": "%s", "args": (1,)}, {"event": "%s", "positional_args": (1,)}),
({"args": ({"a": 1},)}, {"positional_args": ({"a": 1},)}),
({"exc_info": True}, {"exc_info": True}),
),
)
def test_emit(record, out, handler, logger):
record = make_logrecord(**record)
handler.emit(record, logger=logger)
expected = dict(level=logging.INFO, event="msg", name="name")
expected.update(out)
logger.log.assert_called_once_with(**expected)
@mock.patch("sentry.logging.handlers.metrics")
def test_log_to_metric(metrics):
logger = logging.getLogger("django.request")
logger.warn("CSRF problem")
metrics.incr.assert_called_once_with("django.request.csrf_problem", skip_internal=False)
metrics.reset_mock()
logger.warn("Some other problem we don't care about")
assert metrics.incr.call_count == 0
| 24.016393 | 92 | 0.640273 | [
"BSD-3-Clause"
] | Ali-Tahir/sentry | tests/sentry/logging/test_handler.py | 1,465 | Python |
"""
Optimizers
----------
.. autosummary::
:template: template.rst
:toctree:
Solver
ScipySolver
CandidateSolver
GridSolver
"""
from .solver import Solver
from .scipy import ScipySolver
from .candidate import CandidateSolver, GridSolver, FiniteDomainSolver
| 13.95 | 70 | 0.713262 | [
"MIT"
] | eric-vader/HD-BO-Additive-Models | hdbo/febo/solvers/__init__.py | 279 | Python |
import turtle
tortuguita= turtle.Turtle()
tortuguita.speed(100)
tortuguita.dot(30,"black")
tortuguita.forward(15)
tortuguita.left(90)
tortuguita.circle(50)
tortuguita.circle(70)
tortuguita.circle(90)
tortuguita.right(90)
tortuguita.up()
tortuguita.forward(15)
tortuguita.down()
tortuguita.dot(30,"black")
turtle.done() | 16.947368 | 27 | 0.785714 | [
"MIT"
] | Aleff13/poo-ufsc | exercicios-turtle/.history/clown_20210623230605.py | 322 | Python |
import grpc
from functools import wraps
class WalletEncryptedError(Exception):
def __init__(self, message=None):
message = message or 'Wallet is encrypted. Please unlock or set ' \
'password if this is the first time starting lnd. '
super().__init__(message)
def handle_rpc_errors(fnc):
"""Decorator to add more context to RPC errors"""
@wraps(fnc)
def wrapper(*args, **kwargs):
try:
return fnc(*args, **kwargs)
except grpc.RpcError as exc:
# lnd might be active, but not possible to contact
# using RPC if the wallet is encrypted. If we get
# an rpc error code Unimplemented, it means that lnd is
# running, but the RPC server is not active yet (only
# WalletUnlocker server active) and most likely this
# is because of an encrypted wallet.
exc.code().value
exc.details()
if exc.code() == grpc.StatusCode.UNIMPLEMENTED:
# raise WalletEncryptedError from None
print("unimplemented")
raise exc
elif exc.code() == grpc.StatusCode.UNAVAILABLE:
print("UNAVAILABLE")
print(f"ERROR MESSAGE: {exc.details()}")
elif exc.code() == grpc.StatusCode.UNKNOWN and exc.details() == "wallet locked, unlock it to enable full RPC access":
print("WALLET IS LOCKED!")
raise exc
elif exc.code() == grpc.StatusCode.UNKNOWN:
print("unknown")
print(f"ERROR MESSAGE: {exc.details()}")
elif exc.code() == grpc.StatusCode.NOT_FOUND:
print("NOT FOUND")
print(f"ERROR MESSAGE: {exc.details()}")
elif exc.code() == grpc.StatusCode.PERMISSION_DENIED:
print("PERMISSION_DENIED")
print(f"ERROR MESSAGE: {exc.details()}")
else:
raise exc
return exc
except Exception as exc:
print("unknown exception")
print(exc)
return wrapper
| 38.303571 | 129 | 0.558042 | [
"MIT"
] | ibz/lnd-grpc-client | lndgrpc/errors.py | 2,145 | Python |
# Copyright (c) OpenMMLab. All rights reserved.
import copy
import torch
import torch.nn as nn
from mmcls.models.builder import BACKBONES
from mmcv.cnn import build_activation_layer, build_norm_layer
from ...utils import Placeholder
class FactorizedReduce(nn.Module):
"""Reduce feature map size by factorized pointwise (stride=2)."""
def __init__(self,
in_channels,
out_channels,
act_cfg=dict(type='ReLU'),
norm_cfg=dict(type='BN')):
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.act_cfg = act_cfg
self.norm_cfg = norm_cfg
self.relu = build_activation_layer(self.act_cfg)
self.conv1 = nn.Conv2d(
self.in_channels,
self.out_channels // 2,
1,
stride=2,
padding=0,
bias=False)
self.conv2 = nn.Conv2d(
self.in_channels,
self.out_channels // 2,
1,
stride=2,
padding=0,
bias=False)
self.bn = build_norm_layer(self.norm_cfg, self.out_channels)[1]
def forward(self, x):
x = self.relu(x)
out = torch.cat([self.conv1(x), self.conv2(x[:, :, 1:, 1:])], dim=1)
out = self.bn(out)
return out
class StandardConv(nn.Module):
"""
Standard conv: ReLU - Conv - BN
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride,
padding,
act_cfg=dict(type='ReLU'),
norm_cfg=dict(type='BN')):
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.act_cfg = act_cfg
self.norm_cfg = norm_cfg
self.net = nn.Sequential(
build_activation_layer(self.act_cfg),
nn.Conv2d(
self.in_channels,
self.out_channels,
self.kernel_size,
self.stride,
self.padding,
bias=False),
build_norm_layer(self.norm_cfg, self.out_channels)[1])
def forward(self, x):
return self.net(x)
class Node(nn.Module):
def __init__(self, node_id, num_prev_nodes, channels,
num_downsample_nodes):
super().__init__()
edges = nn.ModuleDict()
for i in range(num_prev_nodes):
if i < num_downsample_nodes:
stride = 2
else:
stride = 1
edge_id = '{}_p{}'.format(node_id, i)
edges.add_module(
edge_id,
nn.Sequential(
Placeholder(
group='node',
space_id=edge_id,
choice_args=dict(
stride=stride,
in_channels=channels,
out_channels=channels)), ))
self.edges = Placeholder(
group='node_edge', space_id=node_id, choices=edges)
def forward(self, prev_nodes):
return self.edges(prev_nodes)
class Cell(nn.Module):
def __init__(self,
num_nodes,
channels,
prev_channels,
prev_prev_channels,
reduction,
prev_reduction,
act_cfg=dict(type='ReLU'),
norm_cfg=dict(type='BN')):
super().__init__()
self.act_cfg = act_cfg
self.norm_cfg = norm_cfg
self.reduction = reduction
self.num_nodes = num_nodes
# If previous cell is reduction cell, current input size does not match
# with output size of cell[k-2]. So the output[k-2] should be reduced
# by preprocessing.
if prev_reduction:
self.preproc0 = FactorizedReduce(prev_prev_channels, channels,
self.act_cfg, self.norm_cfg)
else:
self.preproc0 = StandardConv(prev_prev_channels, channels, 1, 1, 0,
self.act_cfg, self.norm_cfg)
self.preproc1 = StandardConv(prev_channels, channels, 1, 1, 0,
self.act_cfg, self.norm_cfg)
# generate dag
self.nodes = nn.ModuleList()
for depth in range(2, self.num_nodes + 2):
if reduction:
node_id = f'reduce_n{depth}'
num_downsample_nodes = 2
else:
node_id = f'normal_n{depth}'
num_downsample_nodes = 0
self.nodes.append(
Node(node_id, depth, channels, num_downsample_nodes))
def forward(self, s0, s1):
# s0, s1 are the outputs of previous previous cell and previous cell,
# respectively.
tensors = [self.preproc0(s0), self.preproc1(s1)]
for node in self.nodes:
cur_tensor = node(tensors)
tensors.append(cur_tensor)
output = torch.cat(tensors[2:], dim=1)
return output
class AuxiliaryModule(nn.Module):
"""Auxiliary head in 2/3 place of network to let the gradient flow well."""
def __init__(self,
in_channels,
base_channels,
out_channels,
norm_cfg=dict(type='BN')):
super().__init__()
self.norm_cfg = norm_cfg
self.net = nn.Sequential(
nn.ReLU(),
nn.AvgPool2d(5, stride=2, padding=0,
count_include_pad=False), # 2x2 out
nn.Conv2d(in_channels, base_channels, kernel_size=1, bias=False),
build_norm_layer(self.norm_cfg, base_channels)[1],
nn.ReLU(inplace=True),
nn.Conv2d(base_channels, out_channels, kernel_size=2,
bias=False), # 1x1 out
build_norm_layer(self.norm_cfg, out_channels)[1],
nn.ReLU(inplace=True))
def forward(self, x):
return self.net(x)
@BACKBONES.register_module()
class DartsBackbone(nn.Module):
def __init__(self,
in_channels,
base_channels,
num_layers=8,
num_nodes=4,
stem_multiplier=3,
out_indices=(7, ),
auxliary=False,
aux_channels=None,
aux_out_channels=None,
act_cfg=dict(type='ReLU'),
norm_cfg=dict(type='BN')):
super().__init__()
self.in_channels = in_channels
self.base_channels = base_channels
self.num_layers = num_layers
self.num_nodes = num_nodes
self.stem_multiplier = stem_multiplier
self.out_indices = out_indices
assert self.out_indices[-1] == self.num_layers - 1
if auxliary:
assert aux_channels is not None
assert aux_out_channels is not None
self.aux_channels = aux_channels
self.aux_out_channels = aux_out_channels
self.auxliary_indice = 2 * self.num_layers // 3
else:
self.auxliary_indice = -1
self.norm_cfg = norm_cfg
self.act_cfg = act_cfg
self.out_channels = self.stem_multiplier * self.base_channels
stem_norm_cfg = copy.deepcopy(self.norm_cfg)
stem_norm_cfg.update(dict(affine=True))
self.stem = nn.Sequential(
nn.Conv2d(
self.in_channels, self.out_channels, 3, 1, 1, bias=False),
build_norm_layer(self.norm_cfg, self.out_channels)[1])
# for the first cell, stem is used for both s0 and s1
# [!] prev_prev_channels and prev_channels is output channel size,
# but c_cur is input channel size.
prev_prev_channels = self.out_channels
prev_channels = self.out_channels
self.out_channels = self.base_channels
self.cells = nn.ModuleList()
prev_reduction, reduction = False, False
for i in range(self.num_layers):
prev_reduction, reduction = reduction, False
# Reduce featuremap size and double channels in 1/3
# and 2/3 layer.
if i == self.num_layers // 3 or i == 2 * self.num_layers // 3:
self.out_channels *= 2
reduction = True
cell = Cell(self.num_nodes, self.out_channels, prev_channels,
prev_prev_channels, reduction, prev_reduction,
self.act_cfg, self.norm_cfg)
self.cells.append(cell)
prev_prev_channels = prev_channels
prev_channels = self.out_channels * self.num_nodes
if i == self.auxliary_indice:
self.auxliary_module = AuxiliaryModule(prev_channels,
self.aux_channels,
self.aux_out_channels,
self.norm_cfg)
def forward(self, x):
outs = []
s0 = s1 = self.stem(x)
for i, cell in enumerate(self.cells):
s0, s1 = s1, cell(s0, s1)
if i in self.out_indices:
outs.append(s1)
if i == self.auxliary_indice and self.training:
aux_feature = self.auxliary_module(s1)
outs.insert(0, aux_feature)
return tuple(outs)
| 34.028269 | 79 | 0.538733 | [
"Apache-2.0"
] | David-19940718/mmclassification | mmcls/models/architectures/components/backbones/darts_backbone.py | 9,630 | Python |
from random import randint
from time import sleep
def sorteio(lista):
print('-=' * 30)
print('Sorteando 5 valores da lista: ', end='')
for i in range(0, 5):
lista.append(randint(1, 10))
print(f'{lista[i]} ', end='', flush=True)
sleep(0.3)
print('PRONTO!')
def somaPar(lista):
print('-=' * 30)
pares = 0
for num in lista:
if num % 2 == 0:
pares += num
print(f'Somando os valores pares de {lista}, temos {pares}')
# Programa principal
numeros = []
sorteio(numeros)
somaPar(numeros)
print('-=' * 30)
| 22.153846 | 64 | 0.574653 | [
"MIT"
] | juniorpedroso/Exercicios-CEV-Python | ex100.py | 576 | Python |
"""Project signals"""
import logging
import django.dispatch
from django.contrib import messages
from django.dispatch import receiver
from django.utils.translation import ugettext_lazy as _
from readthedocs.oauth.services import registry
before_vcs = django.dispatch.Signal(providing_args=["version"])
after_vcs = django.dispatch.Signal(providing_args=["version"])
before_build = django.dispatch.Signal(providing_args=["version"])
after_build = django.dispatch.Signal(providing_args=["version"])
project_import = django.dispatch.Signal(providing_args=["project"])
log = logging.getLogger(__name__)
@receiver(project_import)
def handle_project_import(sender, **kwargs):
"""Add post-commit hook on project import"""
project = sender
request = kwargs.get('request')
for service_cls in registry:
if service_cls.is_project_service(project):
service = service_cls.for_user(request.user)
if service is not None:
if service.setup_webhook(project):
messages.success(request, _('Webhook activated'))
else:
messages.error(request, _('Webhook configuration failed'))
| 30.410256 | 78 | 0.716695 | [
"MIT"
] | ank-forked/readthedocs.org | readthedocs/projects/signals.py | 1,186 | Python |
#Author: Sepehr Roudini.
#Date: 02/05/2018.
#University of Iowa.
#Department of Chemical Engineering.
#Purpose: Calculating mean and Std
#--------------------------------------------------------------------------------------------#
#Defining function and importing necessary libraries.
#--------------------------------------------------------------------------------------------#
##############################################################################################
#Libraries used in this function are: numpy and math.
##############################################################################################
#Data: A 1d array of data.
##############################################################################################
#This functions returnes mean and standard
#deviation of data.
##############################################################################################
def Calculate_Mean_Std(Data):
#numpy is for data manipulationt
import numpy as np
#--------------------------------------------------------------------------------------------#
#--------------------------------------------------------------------------------------------#
#--------------------------------------------------------------------------------------------#
#Preparing data and quantile calculation
#--------------------------------------------------------------------------------------------#
#Calculating mean
mean = np.sum(Data)/len(Data)
#Calculating standard deviation
std = np.sqrt(np.sum(((Data-mean)**2))/(len(Data)-1))
return mean, std
#--------------------------------------------------------------------------------------------#
#--------------------------------------------------------------------------------------------#
| 48.513514 | 95 | 0.262953 | [
"MIT"
] | SkyRd1/Statistical_Functions | Mean_Std_Calculation.py | 1,795 | Python |
import pandas as pd
import sys
from os import path
import numpy
from sys import exit
def main():
if len(sys.argv)!=5:
print("Incorrect no. of parameters passed.")
exit(0)
i=sys.argv[1]
w=sys.argv[2]
im=sys.argv[3]
result=sys.argv[4]
if not i.endswith('.csv'):
print("Input file is not in .csv format.")
exit(0)
if not path.exists(i):
print("No such file exists!!")
exit(0)
f = pd.read_csv(i)
c = f.shape[-1]
if c<3:
print("File should have at least 3 or more columns.")
exit(0)
k=0
for i in f.columns:
k=k+1
for j in f.index:
if k!=1:
v=isinstance(f[i][j],numpy.int64)
v1=isinstance(f[i][j],float)
if not v and not v1:
print(f'It is not a numeric value in {k} column.')
exit(0)
weights=w.split(',')
impacts=im.split(',')
for i in range(0, len(weights)):
weights[i] = int(weights[i])
if len(weights)!=len(impacts) and len(weights)!=len(f.iloc[:,1:]):
print("No. of input Impacts, Weights and columns(from second to last) should be similar.")
exit(0)
for j in impacts:
if j!='+' and j!='-':
print("Impact can be '+' or '-'.")
exit(0)
if w.count(",")*2+1!=len(w) and im.count(",")*2+1!=len(im):
print("Weights and Impacts should be separated by commas(,).")
exit(0)
a=f.iloc[:,1:]
vp=[]
vn=[]
sp=[]
sn=[]
skn=[]
p=[]
for col in range(a.shape[1]):
total=0
for row in range(a.shape[0]):
total=total+a.iloc[row,col]**2
total=total**0.5
for i in range(a.shape[0]):
a.iloc[i,col]=a.iloc[i,col]/total
for j in range(a.shape[0]):
a.iloc[j,col]=a.iloc[j,col]*weights[col]
if impacts[col]=='+':
vp.append(a.iloc[:,col].max())
vn.append(a.iloc[:,col].min())
else:
vp.append(a.iloc[:,col].min())
vn.append(a.iloc[:,col].max())
for m in range(a.shape[0]):
temp=0
ans=0
for n in range(a.shape[1]):
temp=temp+(a.iloc[m,n]-vp[n])**2
temp=temp**0.5
sp.append(temp)
for q in range(a.shape[1]):
ans=ans+(a.iloc[m,q]-vn[q])**2
ans=ans**0.5
sn.append(ans)
for w in range(0,len(sp)):
skn.append(sp[w]+sn[w])
for y in range(0,len(skn)):
p.append(sn[y]/skn[y])
f.insert(5,"Topsis Score",p)
f.insert(6,"Rank",f["Topsis Score"].rank(ascending=False))
f.to_csv(result)
if __name__ == "__main__":
main() | 30.281553 | 99 | 0.435075 | [
"MIT"
] | diptikaushal/TOPSIS-Dipti-101803601 | topsis.py | 3,119 | Python |
"""Users serializers"""
# Django
from django.conf import settings
from django.contrib.auth import password_validation, authenticate
from django.core.validators import RegexValidator
# Serializers
from cride.users.serializers.profiles import ProfileModelSerializer
# Django REST Framework
from rest_framework import serializers
from rest_framework.authtoken.models import Token
from rest_framework.validators import UniqueValidator
# Models
from cride.users.models import User, Profile
# Task
from cride.taskapp.task import send_confirmation_email
# Utilities
import jwt
class UserModelSerializer(serializers.ModelSerializer):
"""User model serializer"""
profile = ProfileModelSerializer(read_only=True)
class Meta:
"""Meta class."""
model = User
fields = (
'username',
'first_name',
'last_name',
'email',
'phone_number',
'profile'
)
class UserSignUpSerializer(serializers.Serializer):
"""User sign up serializer.
Handle sign up data validation and user/profile creation.
"""
email = serializers.EmailField(
validators=[UniqueValidator(queryset=User.objects.all())]
)
username = serializers.CharField(
min_length=4,
max_length=20,
validators=[UniqueValidator(queryset=User.objects.all())]
)
# Phone number
phone_regex = RegexValidator(
regex=r'\+?1?\d{9,15}$',
message="Phone number must be entered in the format: +999999999. Up to 15 digits allowed."
)
phone_number = serializers.CharField(validators=[phone_regex])
# Password
password = serializers.CharField(min_length=8, max_length=64)
password_confirmation = serializers.CharField(min_length=8, max_length=64)
# Name
first_name = serializers.CharField(min_length=2, max_length=30)
last_name = serializers.CharField(min_length=2, max_length=30)
def validate(self, data):
"""Verify passwords match."""
passwd = data['password']
passwd_conf = data['password_confirmation']
if passwd != passwd_conf:
raise serializers.ValidationError("Passwords don't match.")
password_validation.validate_password(passwd)
return data
def create(self, data):
"""Handle user and profile creation."""
data.pop('password_confirmation')
user = User.objects.create_user(**data, is_verified=False, is_client=True)
profile = Profile.objects.create(user=user)
send_confirmation_email.delay(user_pk=user.pk)
return user
class UserLoginSerializer(serializers.Serializer):
"""User Login serializer
Handle the login request data.
"""
email = serializers.EmailField()
password = serializers.CharField(min_length=8, max_length=64)
def validate(self, data):
"""Check credentials"""
user = authenticate(username=data['email'], password=data['password'])
if not user:
raise serializers.ValidationError('Invalid credentials')
if not user.is_verified:
raise serializers.ValidationError('Account is not active yet')
self.context['user'] = user
return data
def create(self, data):
"""Generate or retrieve new token"""
token, created = Token.objects.get_or_create(user=self.context['user'])
return self.context['user'], token.key
class AccountVerificationSerializer(serializers.Serializer):
"""Account verification serializer"""
token = serializers.CharField()
def validate_token(self, data):
"""Verify token is valid"""
try:
payload = jwt.decode(data, settings.SECRET_KEY, algorithms=['HS256'])
except jwt.ExpiredSignatureError:
raise serializers.ValidationError('Verification link has expired.')
except jwt.exceptions.PyJWTError:
raise serializers.ValidationError('Invalid token')
if payload['type'] != 'email_confirmation':
raise serializers.ValidationError('Invalid token')
self.context['payload'] = payload
return data
def save(self):
"""Update user's verified status"""
payload = self.context['payload']
user = User.objects.get(username=payload['user'])
user.is_verified = True
user.save()
| 30.566434 | 98 | 0.672615 | [
"MIT"
] | eocode/Rider-App | cride/users/serializers/users.py | 4,371 | Python |
import pyomo.environ as pe
import romodel as ro
feeds = range(5)
products = range(4)
pools = range(2)
qualities = range(4)
con_feed_pool = [(0, 0), (1, 0), (2, 0), (3, 1), (4, 1)]
con_pool_prod = [(0, 0), (0, 1), (0, 2), (0, 3), (1, 0), (1, 1), (1, 2), (1, 3)]
con_feed_prod = []
price_product = [16, 25, 15, 10]
price_feed = [7, 3, 2, 10, 5]
max_flow = [float('inf'), float('inf'), float('inf'), float('inf'), float('inf')]
min_flow = [0, 0, 0, 0, 0]
pool_size = [float('inf'), float('inf')]
max_demand = [10, 25, 30, 10]
min_demand = [0, 0, 0, 0]
feed_cons = [[1.0, 6.0, 4.0, 0.5],
[4.0, 1.0, 3.0, 2.0],
[4.0, 5.5, 3.0, 0.9],
[3.0, 3.0, 3.0, 1.0],
[1.0, 2.7, 4.0, 1.6]]
max_cons = [[3.00, 3.00, 3.25, 0.75],
[4.00, 2.50, 3.50, 1.50],
[1.50, 5.50, 3.90, 0.80],
[3.00, 4.00, 4.00, 1.80]]
min_cons = [[0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0]]
m = pe.ConcreteModel()
m.q = pe.Var(con_feed_pool, bounds=(0, 1))
m.y = pe.Var(con_pool_prod, within=pe.NonNegativeReals)
m.z = pe.Var(con_feed_prod, within=pe.NonNegativeReals)
m.U = ro.UncSet()
m.price_product = ro.UncParam(products, nominal=price_product, uncset=m.U)
expr = 0
for j in products:
expr += (m.price_product[j] - price_product[j])**2
m.U.c = pe.Constraint(expr=expr <= 0.1)
price_product = m.price_product
obj = 0
for i, l in con_feed_pool:
for j in [jj for ll, jj in con_pool_prod if ll == l]:
obj += price_feed[j]*m.y[(l, j)]*m.q[i, l]
for l, j in con_pool_prod:
obj -= price_product[j]*m.y[(l, j)]
for i, j in con_feed_prod:
obj -= (price_product[j] - price_feed[i])*m.z[(i, j)]
m.obj = pe.Objective(expr=obj, sense=pe.minimize)
# Feed availability
def feed_availability_rule(m, i):
expr = 0
for l in [ll for ii, ll in con_feed_pool if ii == i]:
for j in [jj for ll, jj in con_pool_prod if ll == l]:
expr += m.q[(i, l)]*m.y[(l, j)]
for j in [jj for ii, jj in con_feed_prod if ii == i]:
expr += m.z[(i, l)]
return min_flow[i], expr, max_flow[i]
m.feed_availability = pe.Constraint(feeds, rule=feed_availability_rule)
# Pool capacity
def pool_capacity_rule(m, l):
expr = 0
for j in [jj for ll, jj in con_pool_prod if ll == l]:
expr += m.y[(l, j)]
return None, expr, pool_size[l]
m.pool_capacity = pe.Constraint(pools, rule=pool_capacity_rule)
# Product demand
def prod_demand_rule(m, j):
expr = 0
for l in [ll for ll, jj in con_pool_prod if jj == j]:
expr += m.y[(l, j)]
for i in [ii for ii, jj in con_feed_prod if jj == j]:
expr += m.z[(i, j)]
return min_demand[j], expr, max_demand[j]
m.product_demand = pe.Constraint(products, rule=prod_demand_rule)
# Simplex
def simplex_rule(m, l):
return pe.quicksum(m.q[(i, l)] for i, ll in m.q if ll == l) == 1
m.simplex = pe.Constraint(pools, rule=simplex_rule)
# Product quality
def prod_quality_rule_upper(m, j, k):
expr = 0
flow = 0
for l in [ll for ll, jj in con_pool_prod if jj == j]:
flow += m.y[l, j]
for i in [ii for ii, ll in con_feed_pool if ll == l]:
expr += feed_cons[i][k]*m.q[(i, l)]*m.y[(l, j)]
for i in [ii for ii, jj in con_feed_prod if jj == j]:
flow += m.z[i, j]
expr += feed_cons[i][k]*m.z[(i, j)]
return expr <= max_cons[j][k]*flow
def prod_quality_rule_lower(m, j, k):
expr = 0
flow = 0
for l in [ll for ll, jj in con_pool_prod if jj == j]:
flow += m.y[l, j]
for i in [ii for ii, ll in con_feed_pool if ll == l]:
expr += feed_cons[i][k]*m.q[(i, l)]*m.y[(l, j)]
for i in [ii for ii, jj in con_feed_prod if jj == j]:
flow += m.z[i, j]
expr += feed_cons[i][k]*m.z[(i, j)]
return min_cons[j][k]*flow <= expr
m.prod_quality_upper = pe.Constraint(products, qualities,
rule=prod_quality_rule_upper)
m.prod_quality_lower = pe.Constraint(products, qualities,
rule=prod_quality_rule_lower)
solver = pe.SolverFactory('romodel.cuts')
solver.options['NonConvex'] = 2
solver.solve(m, tee=True)
| 29.601399 | 81 | 0.566501 | [
"MIT"
] | tsaycal/romodel | examples/pooling.py | 4,233 | Python |
from sqlalchemy.orm import Session
from .base import CRUDBase
from app.models import Group
from app.schemas import GroupCreate, GroupUpdate
class CRUDGroup(CRUDBase[Group, GroupCreate, GroupUpdate]):
def count(self, db: Session) -> int:
return db.query(Group).count()
group = CRUDGroup(Group)
| 22.142857 | 59 | 0.751613 | [
"MIT"
] | LukasPatzke/ambientHUE | api/app/crud/crud_group.py | 310 | Python |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from . import __version__ as app_version
app_name = "custom_scripts"
app_title = "Custom Scripts"
app_publisher = "C.R.I.O"
app_description = "For custom scripts"
app_icon = "octicon octicon-file-directory"
app_color = "grey"
app_email = "[email protected]"
app_license = "MIT"
# Includes in <head>
# ------------------
# include js, css files in header of desk.html
# app_include_css = "/assets/custom_scripts/css/custom_scripts.css"
# app_include_js = "/assets/custom_scripts/js/custom_scripts.js"
# include js, css files in header of web template
# web_include_css = "/assets/custom_scripts/css/custom_scripts.css"
# web_include_js = "/assets/custom_scripts/js/custom_scripts.js"
# include custom scss in every website theme (without file extension ".scss")
# website_theme_scss = "custom_scripts/public/scss/website"
# include js, css files in header of web form
# webform_include_js = {"doctype": "public/js/doctype.js"}
# webform_include_css = {"doctype": "public/css/doctype.css"}
# include js in page
# page_js = {"page" : "public/js/file.js"}
# include js in doctype views
doctype_js = {"Sales Invoice" : "custom_scripts/custom/js/sales_invoice.js"}
# doctype_list_js = {"doctype" : "public/js/doctype_list.js"}
# doctype_tree_js = {"doctype" : "public/js/doctype_tree.js"}
# doctype_calendar_js = {"doctype" : "public/js/doctype_calendar.js"}
# Home Pages
# ----------
# application home page (will override Website Settings)
# home_page = "login"
# website user home page (by Role)
# role_home_page = {
# "Role": "home_page"
# }
# Generators
# ----------
# automatically create page for each record of this doctype
# website_generators = ["Web Page"]
# Installation
# ------------
# before_install = "custom_scripts.install.before_install"
# after_install = "custom_scripts.install.after_install"
# Desk Notifications
# ------------------
# See frappe.core.notifications.get_notification_config
# notification_config = "custom_scripts.notifications.get_notification_config"
# Permissions
# -----------
# Permissions evaluated in scripted ways
# permission_query_conditions = {
# "Event": "frappe.desk.doctype.event.event.get_permission_query_conditions",
# }
#
# has_permission = {
# "Event": "frappe.desk.doctype.event.event.has_permission",
# }
# DocType Class
# ---------------
# Override standard doctype classes
override_doctype_class = {
#"Employee Advance": "custom_scripts.custom_scripts.custom.auto_additional_salary.ERPNextEmployeeAdvance",
"POS Invoice Merge Log": "custom_scripts.custom_scripts.custom.sales_invoice.ERPNextPOSInvoiceMergeLog"
}
# Document Events
# ---------------
# Hook on document methods and events
# doc_events = {
# "*": {
# "on_update": "method",
# "on_cancel": "method",
# "on_trash": "method"
# }
# }
# Scheduled Tasks
# ---------------
# scheduler_events = {
# "all": [
# "custom_scripts.tasks.all"
# ],
# "daily": [
# "custom_scripts.tasks.daily"
# ],
# "hourly": [
# "custom_scripts.tasks.hourly"
# ],
# "weekly": [
# "custom_scripts.tasks.weekly"
# ]
# "monthly": [
# "custom_scripts.tasks.monthly"
# ]
# }
# Testing
# -------
# before_tests = "custom_scripts.install.before_tests"
# Overriding Methods
# ------------------------------
#
# override_whitelisted_methods = {
# "frappe.desk.doctype.event.event.get_events": "custom_scripts.event.get_events"
# }
#
# each overriding function accepts a `data` argument;
# generated from the base implementation of the doctype dashboard,
# along with any modifications made in other Frappe apps
# override_doctype_dashboards = {
# "Task": "custom_scripts.task.get_dashboard_data"
# }
# exempt linked doctypes from being automatically cancelled
#
# auto_cancel_exempted_doctypes = ["Auto Repeat"]
# User Data Protection
# --------------------
user_data_fields = [
{
"doctype": "{doctype_1}",
"filter_by": "{filter_by}",
"redact_fields": ["{field_1}", "{field_2}"],
"partial": 1,
},
{
"doctype": "{doctype_2}",
"filter_by": "{filter_by}",
"partial": 1,
},
{
"doctype": "{doctype_3}",
"strict": False,
},
{
"doctype": "{doctype_4}"
}
]
| 24.635294 | 107 | 0.685769 | [
"MIT"
] | VPS-Consultancy/custom_scripts | custom_scripts/hooks.py | 4,188 | Python |
from django.urls import path
from .views import DetailView
app_name = 'comments'
urlpatterns = [
path('<slug:model>/<slug:slug>', DetailView.as_view(), name='detail')
]
| 17.6 | 73 | 0.704545 | [
"MIT"
] | Aabhusan/Koora | src/comments/urls.py | 176 | Python |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.utils.np_box_list_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from object_detection.utils import np_box_list
from object_detection.utils import np_box_list_ops
class AreaRelatedTest(tf.test.TestCase):
def setUp(self):
boxes1 = np.array([[4.0, 3.0, 7.0, 5.0], [5.0, 6.0, 10.0, 7.0]], dtype=float)
boxes2 = np.array(
[[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0], [0.0, 0.0, 20.0, 20.0]],
dtype=float,
)
self.boxlist1 = np_box_list.BoxList(boxes1)
self.boxlist2 = np_box_list.BoxList(boxes2)
def test_area(self):
areas = np_box_list_ops.area(self.boxlist1)
expected_areas = np.array([6.0, 5.0], dtype=float)
self.assertAllClose(expected_areas, areas)
def test_intersection(self):
intersection = np_box_list_ops.intersection(self.boxlist1, self.boxlist2)
expected_intersection = np.array(
[[2.0, 0.0, 6.0], [1.0, 0.0, 5.0]], dtype=float
)
self.assertAllClose(intersection, expected_intersection)
def test_iou(self):
iou = np_box_list_ops.iou(self.boxlist1, self.boxlist2)
expected_iou = np.array(
[[2.0 / 16.0, 0.0, 6.0 / 400.0], [1.0 / 16.0, 0.0, 5.0 / 400.0]],
dtype=float,
)
self.assertAllClose(iou, expected_iou)
def test_ioa(self):
boxlist1 = np_box_list.BoxList(
np.array(
[[0.25, 0.25, 0.75, 0.75], [0.0, 0.0, 0.5, 0.75]], dtype=np.float32
)
)
boxlist2 = np_box_list.BoxList(
np.array([[0.5, 0.25, 1.0, 1.0], [0.0, 0.0, 1.0, 1.0]], dtype=np.float32)
)
ioa21 = np_box_list_ops.ioa(boxlist2, boxlist1)
expected_ioa21 = np.array([[0.5, 0.0], [1.0, 1.0]], dtype=np.float32)
self.assertAllClose(ioa21, expected_ioa21)
def test_scale(self):
boxlist = np_box_list.BoxList(
np.array(
[[0.25, 0.25, 0.75, 0.75], [0.0, 0.0, 0.5, 0.75]], dtype=np.float32
)
)
boxlist_scaled = np_box_list_ops.scale(boxlist, 2.0, 3.0)
expected_boxlist_scaled = np_box_list.BoxList(
np.array([[0.5, 0.75, 1.5, 2.25], [0.0, 0.0, 1.0, 2.25]], dtype=np.float32)
)
self.assertAllClose(expected_boxlist_scaled.get(), boxlist_scaled.get())
def test_clip_to_window(self):
boxlist = np_box_list.BoxList(
np.array(
[
[0.25, 0.25, 0.75, 0.75],
[0.0, 0.0, 0.5, 0.75],
[-0.2, -0.3, 0.7, 1.5],
],
dtype=np.float32,
)
)
boxlist_clipped = np_box_list_ops.clip_to_window(boxlist, [0.0, 0.0, 1.0, 1.0])
expected_boxlist_clipped = np_box_list.BoxList(
np.array(
[[0.25, 0.25, 0.75, 0.75], [0.0, 0.0, 0.5, 0.75], [0.0, 0.0, 0.7, 1.0]],
dtype=np.float32,
)
)
self.assertAllClose(expected_boxlist_clipped.get(), boxlist_clipped.get())
def test_prune_outside_window(self):
boxlist = np_box_list.BoxList(
np.array(
[
[0.25, 0.25, 0.75, 0.75],
[0.0, 0.0, 0.5, 0.75],
[-0.2, -0.3, 0.7, 1.5],
],
dtype=np.float32,
)
)
boxlist_pruned, _ = np_box_list_ops.prune_outside_window(
boxlist, [0.0, 0.0, 1.0, 1.0]
)
expected_boxlist_pruned = np_box_list.BoxList(
np.array(
[[0.25, 0.25, 0.75, 0.75], [0.0, 0.0, 0.5, 0.75]], dtype=np.float32
)
)
self.assertAllClose(expected_boxlist_pruned.get(), boxlist_pruned.get())
def test_concatenate(self):
boxlist1 = np_box_list.BoxList(
np.array(
[[0.25, 0.25, 0.75, 0.75], [0.0, 0.0, 0.5, 0.75]], dtype=np.float32
)
)
boxlist2 = np_box_list.BoxList(
np.array([[0.5, 0.25, 1.0, 1.0], [0.0, 0.0, 1.0, 1.0]], dtype=np.float32)
)
boxlists = [boxlist1, boxlist2]
boxlist_concatenated = np_box_list_ops.concatenate(boxlists)
boxlist_concatenated_expected = np_box_list.BoxList(
np.array(
[
[0.25, 0.25, 0.75, 0.75],
[0.0, 0.0, 0.5, 0.75],
[0.5, 0.25, 1.0, 1.0],
[0.0, 0.0, 1.0, 1.0],
],
dtype=np.float32,
)
)
self.assertAllClose(
boxlist_concatenated_expected.get(), boxlist_concatenated.get()
)
def test_change_coordinate_frame(self):
boxlist = np_box_list.BoxList(
np.array(
[[0.25, 0.25, 0.75, 0.75], [0.0, 0.0, 0.5, 0.75]], dtype=np.float32
)
)
boxlist_coord = np_box_list_ops.change_coordinate_frame(
boxlist, np.array([0, 0, 0.5, 0.5], dtype=np.float32)
)
expected_boxlist_coord = np_box_list.BoxList(
np.array([[0.5, 0.5, 1.5, 1.5], [0, 0, 1.0, 1.5]], dtype=np.float32)
)
self.assertAllClose(boxlist_coord.get(), expected_boxlist_coord.get())
def test_filter_scores_greater_than(self):
boxlist = np_box_list.BoxList(
np.array(
[[0.25, 0.25, 0.75, 0.75], [0.0, 0.0, 0.5, 0.75]], dtype=np.float32
)
)
boxlist.add_field("scores", np.array([0.8, 0.2], np.float32))
boxlist_greater = np_box_list_ops.filter_scores_greater_than(boxlist, 0.5)
expected_boxlist_greater = np_box_list.BoxList(
np.array([[0.25, 0.25, 0.75, 0.75]], dtype=np.float32)
)
self.assertAllClose(boxlist_greater.get(), expected_boxlist_greater.get())
class GatherOpsTest(tf.test.TestCase):
def setUp(self):
boxes = np.array(
[[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0], [0.0, 0.0, 20.0, 20.0]],
dtype=float,
)
self.boxlist = np_box_list.BoxList(boxes)
self.boxlist.add_field("scores", np.array([0.5, 0.7, 0.9], dtype=float))
self.boxlist.add_field(
"labels",
np.array([[0, 0, 0, 1, 0], [0, 1, 0, 0, 0], [0, 0, 0, 0, 1]], dtype=int),
)
def test_gather_with_out_of_range_indices(self):
indices = np.array([3, 1], dtype=int)
boxlist = self.boxlist
with self.assertRaises(ValueError):
np_box_list_ops.gather(boxlist, indices)
def test_gather_with_invalid_multidimensional_indices(self):
indices = np.array([[0, 1], [1, 2]], dtype=int)
boxlist = self.boxlist
with self.assertRaises(ValueError):
np_box_list_ops.gather(boxlist, indices)
def test_gather_without_fields_specified(self):
indices = np.array([2, 0, 1], dtype=int)
boxlist = self.boxlist
subboxlist = np_box_list_ops.gather(boxlist, indices)
expected_scores = np.array([0.9, 0.5, 0.7], dtype=float)
self.assertAllClose(expected_scores, subboxlist.get_field("scores"))
expected_boxes = np.array(
[[0.0, 0.0, 20.0, 20.0], [3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0]],
dtype=float,
)
self.assertAllClose(expected_boxes, subboxlist.get())
expected_labels = np.array(
[[0, 0, 0, 0, 1], [0, 0, 0, 1, 0], [0, 1, 0, 0, 0]], dtype=int
)
self.assertAllClose(expected_labels, subboxlist.get_field("labels"))
def test_gather_with_invalid_field_specified(self):
indices = np.array([2, 0, 1], dtype=int)
boxlist = self.boxlist
with self.assertRaises(ValueError):
np_box_list_ops.gather(boxlist, indices, "labels")
with self.assertRaises(ValueError):
np_box_list_ops.gather(boxlist, indices, ["objectness"])
def test_gather_with_fields_specified(self):
indices = np.array([2, 0, 1], dtype=int)
boxlist = self.boxlist
subboxlist = np_box_list_ops.gather(boxlist, indices, ["labels"])
self.assertFalse(subboxlist.has_field("scores"))
expected_boxes = np.array(
[[0.0, 0.0, 20.0, 20.0], [3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0]],
dtype=float,
)
self.assertAllClose(expected_boxes, subboxlist.get())
expected_labels = np.array(
[[0, 0, 0, 0, 1], [0, 0, 0, 1, 0], [0, 1, 0, 0, 0]], dtype=int
)
self.assertAllClose(expected_labels, subboxlist.get_field("labels"))
class SortByFieldTest(tf.test.TestCase):
def setUp(self):
boxes = np.array(
[[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0], [0.0, 0.0, 20.0, 20.0]],
dtype=float,
)
self.boxlist = np_box_list.BoxList(boxes)
self.boxlist.add_field("scores", np.array([0.5, 0.9, 0.4], dtype=float))
self.boxlist.add_field(
"labels",
np.array([[0, 0, 0, 1, 0], [0, 1, 0, 0, 0], [0, 0, 0, 0, 1]], dtype=int),
)
def test_with_invalid_field(self):
with self.assertRaises(ValueError):
np_box_list_ops.sort_by_field(self.boxlist, "objectness")
with self.assertRaises(ValueError):
np_box_list_ops.sort_by_field(self.boxlist, "labels")
def test_with_invalid_sorting_order(self):
with self.assertRaises(ValueError):
np_box_list_ops.sort_by_field(self.boxlist, "scores", "Descending")
def test_with_descending_sorting(self):
sorted_boxlist = np_box_list_ops.sort_by_field(self.boxlist, "scores")
expected_boxes = np.array(
[[14.0, 14.0, 15.0, 15.0], [3.0, 4.0, 6.0, 8.0], [0.0, 0.0, 20.0, 20.0]],
dtype=float,
)
self.assertAllClose(expected_boxes, sorted_boxlist.get())
expected_scores = np.array([0.9, 0.5, 0.4], dtype=float)
self.assertAllClose(expected_scores, sorted_boxlist.get_field("scores"))
def test_with_ascending_sorting(self):
sorted_boxlist = np_box_list_ops.sort_by_field(
self.boxlist, "scores", np_box_list_ops.SortOrder.ASCEND
)
expected_boxes = np.array(
[[0.0, 0.0, 20.0, 20.0], [3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0],],
dtype=float,
)
self.assertAllClose(expected_boxes, sorted_boxlist.get())
expected_scores = np.array([0.4, 0.5, 0.9], dtype=float)
self.assertAllClose(expected_scores, sorted_boxlist.get_field("scores"))
class NonMaximumSuppressionTest(tf.test.TestCase):
def setUp(self):
self._boxes = np.array(
[
[0, 0, 1, 1],
[0, 0.1, 1, 1.1],
[0, -0.1, 1, 0.9],
[0, 10, 1, 11],
[0, 10.1, 1, 11.1],
[0, 100, 1, 101],
],
dtype=float,
)
self._boxlist = np_box_list.BoxList(self._boxes)
def test_with_no_scores_field(self):
boxlist = np_box_list.BoxList(self._boxes)
max_output_size = 3
iou_threshold = 0.5
with self.assertRaises(ValueError):
np_box_list_ops.non_max_suppression(boxlist, max_output_size, iou_threshold)
def test_nms_disabled_max_output_size_equals_three(self):
boxlist = np_box_list.BoxList(self._boxes)
boxlist.add_field(
"scores", np.array([0.9, 0.75, 0.6, 0.95, 0.2, 0.3], dtype=float)
)
max_output_size = 3
iou_threshold = 1.0 # No NMS
expected_boxes = np.array(
[[0, 10, 1, 11], [0, 0, 1, 1], [0, 0.1, 1, 1.1]], dtype=float
)
nms_boxlist = np_box_list_ops.non_max_suppression(
boxlist, max_output_size, iou_threshold
)
self.assertAllClose(nms_boxlist.get(), expected_boxes)
def test_select_from_three_clusters(self):
boxlist = np_box_list.BoxList(self._boxes)
boxlist.add_field(
"scores", np.array([0.9, 0.75, 0.6, 0.95, 0.2, 0.3], dtype=float)
)
max_output_size = 3
iou_threshold = 0.5
expected_boxes = np.array(
[[0, 10, 1, 11], [0, 0, 1, 1], [0, 100, 1, 101]], dtype=float
)
nms_boxlist = np_box_list_ops.non_max_suppression(
boxlist, max_output_size, iou_threshold
)
self.assertAllClose(nms_boxlist.get(), expected_boxes)
def test_select_at_most_two_from_three_clusters(self):
boxlist = np_box_list.BoxList(self._boxes)
boxlist.add_field(
"scores", np.array([0.9, 0.75, 0.6, 0.95, 0.5, 0.3], dtype=float)
)
max_output_size = 2
iou_threshold = 0.5
expected_boxes = np.array([[0, 10, 1, 11], [0, 0, 1, 1]], dtype=float)
nms_boxlist = np_box_list_ops.non_max_suppression(
boxlist, max_output_size, iou_threshold
)
self.assertAllClose(nms_boxlist.get(), expected_boxes)
def test_select_at_most_thirty_from_three_clusters(self):
boxlist = np_box_list.BoxList(self._boxes)
boxlist.add_field(
"scores", np.array([0.9, 0.75, 0.6, 0.95, 0.5, 0.3], dtype=float)
)
max_output_size = 30
iou_threshold = 0.5
expected_boxes = np.array(
[[0, 10, 1, 11], [0, 0, 1, 1], [0, 100, 1, 101]], dtype=float
)
nms_boxlist = np_box_list_ops.non_max_suppression(
boxlist, max_output_size, iou_threshold
)
self.assertAllClose(nms_boxlist.get(), expected_boxes)
def test_select_from_ten_indentical_boxes(self):
boxes = np.array(10 * [[0, 0, 1, 1]], dtype=float)
boxlist = np_box_list.BoxList(boxes)
boxlist.add_field("scores", np.array(10 * [0.8]))
iou_threshold = 0.5
max_output_size = 3
expected_boxes = np.array([[0, 0, 1, 1]], dtype=float)
nms_boxlist = np_box_list_ops.non_max_suppression(
boxlist, max_output_size, iou_threshold
)
self.assertAllClose(nms_boxlist.get(), expected_boxes)
def test_different_iou_threshold(self):
boxes = np.array(
[
[0, 0, 20, 100],
[0, 0, 20, 80],
[200, 200, 210, 300],
[200, 200, 210, 250],
],
dtype=float,
)
boxlist = np_box_list.BoxList(boxes)
boxlist.add_field("scores", np.array([0.9, 0.8, 0.7, 0.6]))
max_output_size = 4
iou_threshold = 0.4
expected_boxes = np.array([[0, 0, 20, 100], [200, 200, 210, 300],], dtype=float)
nms_boxlist = np_box_list_ops.non_max_suppression(
boxlist, max_output_size, iou_threshold
)
self.assertAllClose(nms_boxlist.get(), expected_boxes)
iou_threshold = 0.5
expected_boxes = np.array(
[[0, 0, 20, 100], [200, 200, 210, 300], [200, 200, 210, 250]], dtype=float
)
nms_boxlist = np_box_list_ops.non_max_suppression(
boxlist, max_output_size, iou_threshold
)
self.assertAllClose(nms_boxlist.get(), expected_boxes)
iou_threshold = 0.8
expected_boxes = np.array(
[
[0, 0, 20, 100],
[0, 0, 20, 80],
[200, 200, 210, 300],
[200, 200, 210, 250],
],
dtype=float,
)
nms_boxlist = np_box_list_ops.non_max_suppression(
boxlist, max_output_size, iou_threshold
)
self.assertAllClose(nms_boxlist.get(), expected_boxes)
def test_multiclass_nms(self):
boxlist = np_box_list.BoxList(
np.array(
[[0.2, 0.4, 0.8, 0.8], [0.4, 0.2, 0.8, 0.8], [0.6, 0.0, 1.0, 1.0]],
dtype=np.float32,
)
)
scores = np.array(
[
[-0.2, 0.1, 0.5, -0.4, 0.3],
[0.7, -0.7, 0.6, 0.2, -0.9],
[0.4, 0.34, -0.9, 0.2, 0.31],
],
dtype=np.float32,
)
boxlist.add_field("scores", scores)
boxlist_clean = np_box_list_ops.multi_class_non_max_suppression(
boxlist, score_thresh=0.25, iou_thresh=0.1, max_output_size=3
)
scores_clean = boxlist_clean.get_field("scores")
classes_clean = boxlist_clean.get_field("classes")
boxes = boxlist_clean.get()
expected_scores = np.array([0.7, 0.6, 0.34, 0.31])
expected_classes = np.array([0, 2, 1, 4])
expected_boxes = np.array(
[
[0.4, 0.2, 0.8, 0.8],
[0.4, 0.2, 0.8, 0.8],
[0.6, 0.0, 1.0, 1.0],
[0.6, 0.0, 1.0, 1.0],
],
dtype=np.float32,
)
self.assertAllClose(scores_clean, expected_scores)
self.assertAllClose(classes_clean, expected_classes)
self.assertAllClose(boxes, expected_boxes)
if __name__ == "__main__":
tf.test.main()
| 36.743326 | 88 | 0.555941 | [
"MIT"
] | sethusaim/Automatic-Number-Plate-Recognition | base2designs/utils/np_box_list_ops_test.py | 17,894 | Python |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.