blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c57c61d86c5572b1948a7a0502578e0809ad170e | e36472948f74fd5ed35fc64801a59db4efa27070 | /part_1/04_6_test.py | 174139579a5653b3edaa5a336c79a87ac1f5214f | [] | no_license | anton1k/python_crash_course | 051aad7c5a043830d8cc9e5fd314f568bf0f4a53 | 80f302074e5fef48fb40e72f7d79ab4b8658b38a | refs/heads/master | 2020-07-18T23:28:00.871466 | 2019-09-04T14:06:12 | 2019-09-04T14:06:12 | 206,333,934 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 75 | py | square = list(range(1, 21, 2))
print(square)
for i in square:
print(i)
| [
"[email protected]"
] | |
3351932d3d3a75e35b6b1fcbd967fa8b054bd65b | 13a32b92b1ba8ffb07e810dcc8ccdf1b8b1671ab | /home--tommy--mypy/mypy/lib/python2.7/site-packages/theano/sandbox/cuda/tests/test_tensor_op.py | cb9162354ac7fa9120cf4dd3b05d616e784e0f36 | [
"Unlicense"
] | permissive | tommybutler/mlearnpy2 | 8ec52bcd03208c9771d8d02ede8eaa91a95bda30 | 9e5d377d0242ac5eb1e82a357e6701095a8ca1ff | refs/heads/master | 2022-10-24T23:30:18.705329 | 2022-10-17T15:41:37 | 2022-10-17T15:41:37 | 118,529,175 | 0 | 2 | Unlicense | 2022-10-15T23:32:18 | 2018-01-22T23:27:10 | Python | UTF-8 | Python | false | false | 5,283 | py | """
This file test tensor op that should also operate on CudaNdaray.
"""
from __future__ import absolute_import, print_function, division
from nose.plugins.skip import SkipTest
from nose_parameterized import parameterized
import numpy
import theano
from theano import tensor
import theano.tensor as T
import theano.tests.unittest_tools as utt
# Skip test if cuda_ndarray is not available.
import theano.sandbox.cuda as cuda
from theano.tensor.nnet.tests import test_conv3d2d
if cuda.cuda_available is False:
raise SkipTest('Optional package cuda disabled')
if theano.config.mode == 'FAST_COMPILE':
mode_with_gpu = theano.compile.mode.get_mode('FAST_RUN').including('gpu')
mode_without_gpu = theano.compile.mode.get_mode('FAST_RUN').excluding('gpu')
else:
mode_with_gpu = theano.compile.mode.get_default_mode().including('gpu')
mode_without_gpu = theano.compile.mode.get_default_mode().excluding('gpu')
def test_shape_i():
x = cuda.ftensor3()
v = cuda.CudaNdarray(numpy.zeros((3, 4, 5), dtype='float32'))
f = theano.function([x], x.shape[1])
topo = f.maker.fgraph.toposort()
assert f(v) == 4
if theano.config.mode != 'FAST_COMPILE':
assert len(topo) == 1
assert isinstance(topo[0].op, T.opt.Shape_i)
def test_shape():
x = cuda.ftensor3()
v = cuda.CudaNdarray(numpy.zeros((3, 4, 5), dtype='float32'))
f = theano.function([x], x.shape)
topo = f.maker.fgraph.toposort()
assert numpy.all(f(v) == (3, 4, 5))
if theano.config.mode != 'FAST_COMPILE':
assert len(topo) == 4
assert isinstance(topo[0].op, T.opt.Shape_i)
assert isinstance(topo[1].op, T.opt.Shape_i)
assert isinstance(topo[2].op, T.opt.Shape_i)
assert isinstance(topo[3].op, T.opt.MakeVector)
def test_softmax_optimizations():
from theano.tensor.nnet.nnet import softmax, crossentropy_categorical_1hot
x = tensor.fmatrix('x')
one_of_n = tensor.lvector('one_of_n')
op = crossentropy_categorical_1hot
op(x, one_of_n)
fgraph = theano.gof.FunctionGraph(
[x, one_of_n],
[op(softmax(x), one_of_n)])
assert fgraph.outputs[0].owner.op == op
mode_with_gpu.optimizer.optimize(fgraph)
assert str(fgraph.outputs[0].owner.op) == 'OutputGuard'
assert fgraph.outputs[0].owner.inputs[0].owner.op == cuda.host_from_gpu
assert fgraph.outputs[0].owner.inputs[0].owner.inputs[0].owner.op == cuda.nnet.gpu_crossentropy_softmax_argmax_1hot_with_bias
def test_may_share_memory_cuda():
from theano.misc.may_share_memory import may_share_memory
a = cuda.CudaNdarray(numpy.zeros((3, 4), dtype='float32'))
b = cuda.CudaNdarray(numpy.zeros((3, 4), dtype='float32'))
na = numpy.zeros((3, 4))
nb = numpy.zeros((3, 4))
va = a.view()
vb = b.view()
ra = a.reshape((4, 3))
rb = b.reshape((4, 3))
# can't test the transpose as ta._strides = is not implemented
# manual transpose of a
# ta = a.reshape((4,3))
# ta._strides = (ta._strides[1],ta._strides[0])#not implemented
# elem_size=elem_size = numpy.zeros(0,dtype=a.dtype).dtype.itemsize
# ta.gpudata += ta.size*elem_size
for a_, b_, rep in [(a, a, True), (b, b, True), (a, b, False),
(a, na, False), (b, nb, False),
(na, b, False), (nb, a, False),
(a, va, True), (b, vb, True),
(va, b, False), (a, vb, False),
(a, ra, True), (b, rb, True),
(ra, b, False), (a, rb, False), ]:
assert may_share_memory(a_, b_) == rep
assert may_share_memory(b_, a_) == rep
# test that it raise error when needed.
for a_, b_, rep in [(a, (0,), False), (a, 1, False), (a, None, False)]:
assert may_share_memory(a_, b_, False) == rep
assert may_share_memory(b_, a_, False) == rep
try:
may_share_memory(a_, b_)
raise Exception("An error was expected")
except TypeError:
pass
try:
may_share_memory(b_, a_)
raise Exception("An error was expected")
except TypeError:
pass
def test_deepcopy():
a = cuda.fmatrix()
a_v = cuda.CudaNdarray(numpy.zeros((3, 4), dtype='float32'))
# We force the c code to check that we generate c code
mode = theano.Mode("c", mode_with_gpu.optimizer)
f = theano.function([a], a, mode=mode)
theano.printing.debugprint(f)
out = f(a_v)
assert out is not a_v
assert numpy.allclose(numpy.asarray(a_v), numpy.asarray(out))
# We force the python linker as the default code should work for this op
mode = theano.Mode("py", mode_with_gpu.optimizer)
f = theano.function([a], a, mode=mode)
theano.printing.debugprint(f)
out = f(a_v)
assert out is not a_v
assert numpy.allclose(numpy.asarray(a_v), numpy.asarray(out))
def test_get_diagonal_subtensor_view():
test_conv3d2d.test_get_diagonal_subtensor_view(wrap=cuda.CudaNdarray)
@parameterized.expand(('valid', 'full'), utt.custom_name_func)
def test_conv3d(border_mode):
test_conv3d2d.check_conv3d(border_mode=border_mode,
mode=mode_with_gpu,
shared=cuda.shared_constructor)
| [
"[email protected]"
] | |
1f54af48b0de5de3deb1326d6dfc2e3b9b08012e | 7246faf9a222269ce2612613f58dc5ff19091f10 | /baekjoon/3000~5999/4949_균형잡힌세상.py | 69e300ec26003ff839d8917a542427b2e7f68cc4 | [] | no_license | gusdn3477/Algorithm_Study | 87a2eb72a8488d9263a86db70dadc7944434d41d | 3fefe1dcb40122157845ffc542f41cb097711cc8 | refs/heads/main | 2023-08-30T12:18:21.412945 | 2021-09-28T13:00:11 | 2021-09-28T13:00:11 | 308,364,230 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 598 | py | a = input()
while a != '.':
poc = []
for i in range(len(a)):
if a[i] == '(' or a[i] == '[':
poc.append(a[i])
if a[i] == ')':
if not poc or poc[-1] != '(':
poc.append(a[i])
break
if poc[-1] == '(':
poc.pop()
if a[i] == ']':
if not poc or poc[-1] != '[':
poc.append(a[i])
break
if poc[-1] == '[':
poc.pop()
if not poc:
print("yes")
else:
print("no")
poc.clear()
a = input() | [
"[email protected]"
] | |
170a083c957c7be6132d27953ebb3e394bf8b3e5 | 130a98632d2ab4c171503b79e455b7aa27a1dda4 | /models/research/object_detection/inputs_test.py | 10dd078873c538661d0f57fd9154cb10f2b0c150 | [
"Apache-2.0",
"MIT"
] | permissive | aboerzel/German_License_Plate_Recognition | d7fc0314295f5cf0c9d7ae9c93a795e3ef1c5787 | 6fc53292b1d3ce3c0340ce724c2c11c77e663d27 | refs/heads/master | 2023-01-30T18:08:37.339542 | 2023-01-07T07:41:36 | 2023-01-07T07:41:36 | 245,586,430 | 34 | 12 | MIT | 2023-01-07T07:41:37 | 2020-03-07T07:16:51 | Python | UTF-8 | Python | false | false | 75,857 | py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.tflearn.inputs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import os
import unittest
from absl import logging
from absl.testing import parameterized
import numpy as np
import six
import tensorflow.compat.v1 as tf
from object_detection import inputs
from object_detection.core import preprocessor
from object_detection.core import standard_fields as fields
from object_detection.utils import config_util
from object_detection.utils import test_case
from object_detection.utils import test_utils
from object_detection.utils import tf_version
if six.PY2:
import mock # pylint: disable=g-import-not-at-top
else:
from unittest import mock # pylint: disable=g-import-not-at-top, g-importing-member
FLAGS = tf.flags.FLAGS
def _get_configs_for_model(model_name):
"""Returns configurations for model."""
fname = os.path.join(tf.resource_loader.get_data_files_path(),
'samples/configs/' + model_name + '.config')
label_map_path = os.path.join(tf.resource_loader.get_data_files_path(),
'data/pet_label_map.pbtxt')
data_path = os.path.join(tf.resource_loader.get_data_files_path(),
'test_data/pets_examples.record')
configs = config_util.get_configs_from_pipeline_file(fname)
override_dict = {
'train_input_path': data_path,
'eval_input_path': data_path,
'label_map_path': label_map_path
}
return config_util.merge_external_params_with_configs(
configs, kwargs_dict=override_dict)
def _get_configs_for_model_sequence_example(model_name, frame_index=-1):
"""Returns configurations for model."""
fname = os.path.join(tf.resource_loader.get_data_files_path(),
'test_data/' + model_name + '.config')
label_map_path = os.path.join(tf.resource_loader.get_data_files_path(),
'data/snapshot_serengeti_label_map.pbtxt')
data_path = os.path.join(
tf.resource_loader.get_data_files_path(),
'test_data/snapshot_serengeti_sequence_examples.record')
configs = config_util.get_configs_from_pipeline_file(fname)
override_dict = {
'train_input_path': data_path,
'eval_input_path': data_path,
'label_map_path': label_map_path,
'frame_index': frame_index
}
return config_util.merge_external_params_with_configs(
configs, kwargs_dict=override_dict)
def _make_initializable_iterator(dataset):
"""Creates an iterator, and initializes tables.
Args:
dataset: A `tf.data.Dataset` object.
Returns:
A `tf.data.Iterator`.
"""
iterator = tf.data.make_initializable_iterator(dataset)
tf.add_to_collection(tf.GraphKeys.TABLE_INITIALIZERS, iterator.initializer)
return iterator
@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only tests under TF2.X.')
class InputFnTest(test_case.TestCase, parameterized.TestCase):
def test_faster_rcnn_resnet50_train_input(self):
"""Tests the training input function for FasterRcnnResnet50."""
configs = _get_configs_for_model('faster_rcnn_resnet50_pets')
model_config = configs['model']
model_config.faster_rcnn.num_classes = 37
train_input_fn = inputs.create_train_input_fn(
configs['train_config'], configs['train_input_config'], model_config)
features, labels = _make_initializable_iterator(train_input_fn()).get_next()
self.assertAllEqual([1, None, None, 3],
features[fields.InputDataFields.image].shape.as_list())
self.assertEqual(tf.float32, features[fields.InputDataFields.image].dtype)
self.assertAllEqual([1],
features[inputs.HASH_KEY].shape.as_list())
self.assertEqual(tf.int32, features[inputs.HASH_KEY].dtype)
self.assertAllEqual(
[1, 100, 4],
labels[fields.InputDataFields.groundtruth_boxes].shape.as_list())
self.assertEqual(tf.float32,
labels[fields.InputDataFields.groundtruth_boxes].dtype)
self.assertAllEqual(
[1, 100, model_config.faster_rcnn.num_classes],
labels[fields.InputDataFields.groundtruth_classes].shape.as_list())
self.assertEqual(tf.float32,
labels[fields.InputDataFields.groundtruth_classes].dtype)
self.assertAllEqual(
[1, 100],
labels[fields.InputDataFields.groundtruth_weights].shape.as_list())
self.assertEqual(tf.float32,
labels[fields.InputDataFields.groundtruth_weights].dtype)
self.assertAllEqual(
[1, 100, model_config.faster_rcnn.num_classes],
labels[fields.InputDataFields.groundtruth_confidences].shape.as_list())
self.assertEqual(
tf.float32,
labels[fields.InputDataFields.groundtruth_confidences].dtype)
def test_faster_rcnn_resnet50_train_input_with_additional_channels(self):
"""Tests the training input function for FasterRcnnResnet50."""
configs = _get_configs_for_model('faster_rcnn_resnet50_pets')
model_config = configs['model']
configs['train_input_config'].num_additional_channels = 2
configs['train_config'].retain_original_images = True
model_config.faster_rcnn.num_classes = 37
train_input_fn = inputs.create_train_input_fn(
configs['train_config'], configs['train_input_config'], model_config)
features, labels = _make_initializable_iterator(train_input_fn()).get_next()
self.assertAllEqual([1, None, None, 5],
features[fields.InputDataFields.image].shape.as_list())
self.assertAllEqual(
[1, None, None, 3],
features[fields.InputDataFields.original_image].shape.as_list())
self.assertEqual(tf.float32, features[fields.InputDataFields.image].dtype)
self.assertAllEqual([1],
features[inputs.HASH_KEY].shape.as_list())
self.assertEqual(tf.int32, features[inputs.HASH_KEY].dtype)
self.assertAllEqual(
[1, 100, 4],
labels[fields.InputDataFields.groundtruth_boxes].shape.as_list())
self.assertEqual(tf.float32,
labels[fields.InputDataFields.groundtruth_boxes].dtype)
self.assertAllEqual(
[1, 100, model_config.faster_rcnn.num_classes],
labels[fields.InputDataFields.groundtruth_classes].shape.as_list())
self.assertEqual(tf.float32,
labels[fields.InputDataFields.groundtruth_classes].dtype)
self.assertAllEqual(
[1, 100],
labels[fields.InputDataFields.groundtruth_weights].shape.as_list())
self.assertEqual(tf.float32,
labels[fields.InputDataFields.groundtruth_weights].dtype)
self.assertAllEqual(
[1, 100, model_config.faster_rcnn.num_classes],
labels[fields.InputDataFields.groundtruth_confidences].shape.as_list())
self.assertEqual(
tf.float32,
labels[fields.InputDataFields.groundtruth_confidences].dtype)
@parameterized.parameters(
{'eval_batch_size': 1},
{'eval_batch_size': 8}
)
def test_faster_rcnn_resnet50_eval_input(self, eval_batch_size=1):
"""Tests the eval input function for FasterRcnnResnet50."""
configs = _get_configs_for_model('faster_rcnn_resnet50_pets')
model_config = configs['model']
model_config.faster_rcnn.num_classes = 37
eval_config = configs['eval_config']
eval_config.batch_size = eval_batch_size
eval_input_fn = inputs.create_eval_input_fn(
eval_config, configs['eval_input_configs'][0], model_config)
features, labels = _make_initializable_iterator(eval_input_fn()).get_next()
self.assertAllEqual([eval_batch_size, None, None, 3],
features[fields.InputDataFields.image].shape.as_list())
self.assertEqual(tf.float32, features[fields.InputDataFields.image].dtype)
self.assertAllEqual(
[eval_batch_size, None, None, 3],
features[fields.InputDataFields.original_image].shape.as_list())
self.assertEqual(tf.uint8,
features[fields.InputDataFields.original_image].dtype)
self.assertAllEqual([eval_batch_size],
features[inputs.HASH_KEY].shape.as_list())
self.assertEqual(tf.int32, features[inputs.HASH_KEY].dtype)
self.assertAllEqual(
[eval_batch_size, 100, 4],
labels[fields.InputDataFields.groundtruth_boxes].shape.as_list())
self.assertEqual(tf.float32,
labels[fields.InputDataFields.groundtruth_boxes].dtype)
self.assertAllEqual(
[eval_batch_size, 100, model_config.faster_rcnn.num_classes],
labels[fields.InputDataFields.groundtruth_classes].shape.as_list())
self.assertEqual(tf.float32,
labels[fields.InputDataFields.groundtruth_classes].dtype)
self.assertAllEqual(
[eval_batch_size, 100],
labels[fields.InputDataFields.groundtruth_weights].shape.as_list())
self.assertEqual(
tf.float32,
labels[fields.InputDataFields.groundtruth_weights].dtype)
self.assertAllEqual(
[eval_batch_size, 100],
labels[fields.InputDataFields.groundtruth_area].shape.as_list())
self.assertEqual(tf.float32,
labels[fields.InputDataFields.groundtruth_area].dtype)
self.assertAllEqual(
[eval_batch_size, 100],
labels[fields.InputDataFields.groundtruth_is_crowd].shape.as_list())
self.assertEqual(
tf.bool, labels[fields.InputDataFields.groundtruth_is_crowd].dtype)
self.assertAllEqual(
[eval_batch_size, 100],
labels[fields.InputDataFields.groundtruth_difficult].shape.as_list())
self.assertEqual(
tf.int32, labels[fields.InputDataFields.groundtruth_difficult].dtype)
def test_context_rcnn_resnet50_train_input_with_sequence_example(
self, train_batch_size=8):
"""Tests the training input function for FasterRcnnResnet50."""
configs = _get_configs_for_model_sequence_example(
'context_rcnn_camera_trap')
model_config = configs['model']
train_config = configs['train_config']
train_config.batch_size = train_batch_size
train_input_fn = inputs.create_train_input_fn(
train_config, configs['train_input_config'], model_config)
features, labels = _make_initializable_iterator(train_input_fn()).get_next()
self.assertAllEqual([train_batch_size, 640, 640, 3],
features[fields.InputDataFields.image].shape.as_list())
self.assertEqual(tf.float32, features[fields.InputDataFields.image].dtype)
self.assertAllEqual([train_batch_size],
features[inputs.HASH_KEY].shape.as_list())
self.assertEqual(tf.int32, features[inputs.HASH_KEY].dtype)
self.assertAllEqual(
[train_batch_size, 100, 4],
labels[fields.InputDataFields.groundtruth_boxes].shape.as_list())
self.assertEqual(tf.float32,
labels[fields.InputDataFields.groundtruth_boxes].dtype)
self.assertAllEqual(
[train_batch_size, 100, model_config.faster_rcnn.num_classes],
labels[fields.InputDataFields.groundtruth_classes].shape.as_list())
self.assertEqual(tf.float32,
labels[fields.InputDataFields.groundtruth_classes].dtype)
self.assertAllEqual(
[train_batch_size, 100],
labels[fields.InputDataFields.groundtruth_weights].shape.as_list())
self.assertEqual(tf.float32,
labels[fields.InputDataFields.groundtruth_weights].dtype)
self.assertAllEqual(
[train_batch_size, 100, model_config.faster_rcnn.num_classes],
labels[fields.InputDataFields.groundtruth_confidences].shape.as_list())
self.assertEqual(
tf.float32,
labels[fields.InputDataFields.groundtruth_confidences].dtype)
def test_context_rcnn_resnet50_eval_input_with_sequence_example(
self, eval_batch_size=8):
"""Tests the eval input function for FasterRcnnResnet50."""
configs = _get_configs_for_model_sequence_example(
'context_rcnn_camera_trap')
model_config = configs['model']
eval_config = configs['eval_config']
eval_config.batch_size = eval_batch_size
eval_input_fn = inputs.create_eval_input_fn(
eval_config, configs['eval_input_configs'][0], model_config)
features, labels = _make_initializable_iterator(eval_input_fn()).get_next()
self.assertAllEqual([eval_batch_size, 640, 640, 3],
features[fields.InputDataFields.image].shape.as_list())
self.assertEqual(tf.float32, features[fields.InputDataFields.image].dtype)
self.assertAllEqual(
[eval_batch_size, 640, 640, 3],
features[fields.InputDataFields.original_image].shape.as_list())
self.assertEqual(tf.uint8,
features[fields.InputDataFields.original_image].dtype)
self.assertAllEqual([eval_batch_size],
features[inputs.HASH_KEY].shape.as_list())
self.assertEqual(tf.int32, features[inputs.HASH_KEY].dtype)
self.assertAllEqual(
[eval_batch_size, 100, 4],
labels[fields.InputDataFields.groundtruth_boxes].shape.as_list())
self.assertEqual(tf.float32,
labels[fields.InputDataFields.groundtruth_boxes].dtype)
self.assertAllEqual(
[eval_batch_size, 100, model_config.faster_rcnn.num_classes],
labels[fields.InputDataFields.groundtruth_classes].shape.as_list())
self.assertEqual(tf.float32,
labels[fields.InputDataFields.groundtruth_classes].dtype)
self.assertAllEqual(
[eval_batch_size, 100],
labels[fields.InputDataFields.groundtruth_weights].shape.as_list())
self.assertEqual(
tf.float32,
labels[fields.InputDataFields.groundtruth_weights].dtype)
def test_context_rcnn_resnet50_eval_input_with_sequence_example_image_id_list(
self, eval_batch_size=8):
"""Tests the eval input function for FasterRcnnResnet50."""
configs = _get_configs_for_model_sequence_example(
'context_rcnn_camera_trap')
model_config = configs['model']
eval_config = configs['eval_config']
eval_config.batch_size = eval_batch_size
eval_input_config = configs['eval_input_configs'][0]
eval_input_config.load_context_image_ids = True
eval_input_fn = inputs.create_eval_input_fn(
eval_config, eval_input_config, model_config)
features, labels = _make_initializable_iterator(eval_input_fn()).get_next()
self.assertAllEqual([eval_batch_size, 640, 640, 3],
features[fields.InputDataFields.image].shape.as_list())
self.assertEqual(tf.float32, features[fields.InputDataFields.image].dtype)
self.assertAllEqual(
[eval_batch_size, 640, 640, 3],
features[fields.InputDataFields.original_image].shape.as_list())
self.assertEqual(tf.uint8,
features[fields.InputDataFields.original_image].dtype)
self.assertAllEqual([eval_batch_size],
features[inputs.HASH_KEY].shape.as_list())
self.assertEqual(tf.int32, features[inputs.HASH_KEY].dtype)
self.assertAllEqual(
[eval_batch_size, 100, 4],
labels[fields.InputDataFields.groundtruth_boxes].shape.as_list())
self.assertEqual(tf.float32,
labels[fields.InputDataFields.groundtruth_boxes].dtype)
self.assertAllEqual(
[eval_batch_size, 100, model_config.faster_rcnn.num_classes],
labels[fields.InputDataFields.groundtruth_classes].shape.as_list())
self.assertEqual(tf.float32,
labels[fields.InputDataFields.groundtruth_classes].dtype)
self.assertAllEqual(
[eval_batch_size, 100],
labels[fields.InputDataFields.groundtruth_weights].shape.as_list())
self.assertEqual(
tf.float32,
labels[fields.InputDataFields.groundtruth_weights].dtype)
def test_context_rcnn_resnet50_train_input_with_sequence_example_frame_index(
self, train_batch_size=8):
"""Tests the training input function for FasterRcnnResnet50."""
configs = _get_configs_for_model_sequence_example(
'context_rcnn_camera_trap', frame_index=2)
model_config = configs['model']
train_config = configs['train_config']
train_config.batch_size = train_batch_size
train_input_fn = inputs.create_train_input_fn(
train_config, configs['train_input_config'], model_config)
features, labels = _make_initializable_iterator(train_input_fn()).get_next()
self.assertAllEqual([train_batch_size, 640, 640, 3],
features[fields.InputDataFields.image].shape.as_list())
self.assertEqual(tf.float32, features[fields.InputDataFields.image].dtype)
self.assertAllEqual([train_batch_size],
features[inputs.HASH_KEY].shape.as_list())
self.assertEqual(tf.int32, features[inputs.HASH_KEY].dtype)
self.assertAllEqual(
[train_batch_size, 100, 4],
labels[fields.InputDataFields.groundtruth_boxes].shape.as_list())
self.assertEqual(tf.float32,
labels[fields.InputDataFields.groundtruth_boxes].dtype)
self.assertAllEqual(
[train_batch_size, 100, model_config.faster_rcnn.num_classes],
labels[fields.InputDataFields.groundtruth_classes].shape.as_list())
self.assertEqual(tf.float32,
labels[fields.InputDataFields.groundtruth_classes].dtype)
self.assertAllEqual(
[train_batch_size, 100],
labels[fields.InputDataFields.groundtruth_weights].shape.as_list())
self.assertEqual(tf.float32,
labels[fields.InputDataFields.groundtruth_weights].dtype)
self.assertAllEqual(
[train_batch_size, 100, model_config.faster_rcnn.num_classes],
labels[fields.InputDataFields.groundtruth_confidences].shape.as_list())
self.assertEqual(
tf.float32,
labels[fields.InputDataFields.groundtruth_confidences].dtype)
def test_ssd_inceptionV2_train_input(self):
"""Tests the training input function for SSDInceptionV2."""
configs = _get_configs_for_model('ssd_inception_v2_pets')
model_config = configs['model']
model_config.ssd.num_classes = 37
batch_size = configs['train_config'].batch_size
train_input_fn = inputs.create_train_input_fn(
configs['train_config'], configs['train_input_config'], model_config)
features, labels = _make_initializable_iterator(train_input_fn()).get_next()
self.assertAllEqual([batch_size, 300, 300, 3],
features[fields.InputDataFields.image].shape.as_list())
self.assertEqual(tf.float32, features[fields.InputDataFields.image].dtype)
self.assertAllEqual([batch_size],
features[inputs.HASH_KEY].shape.as_list())
self.assertEqual(tf.int32, features[inputs.HASH_KEY].dtype)
self.assertAllEqual(
[batch_size],
labels[fields.InputDataFields.num_groundtruth_boxes].shape.as_list())
self.assertEqual(tf.int32,
labels[fields.InputDataFields.num_groundtruth_boxes].dtype)
self.assertAllEqual(
[batch_size, 100, 4],
labels[fields.InputDataFields.groundtruth_boxes].shape.as_list())
self.assertEqual(tf.float32,
labels[fields.InputDataFields.groundtruth_boxes].dtype)
self.assertAllEqual(
[batch_size, 100, model_config.ssd.num_classes],
labels[fields.InputDataFields.groundtruth_classes].shape.as_list())
self.assertEqual(tf.float32,
labels[fields.InputDataFields.groundtruth_classes].dtype)
self.assertAllEqual(
[batch_size, 100],
labels[
fields.InputDataFields.groundtruth_weights].shape.as_list())
self.assertEqual(
tf.float32,
labels[fields.InputDataFields.groundtruth_weights].dtype)
@parameterized.parameters(
{'eval_batch_size': 1},
{'eval_batch_size': 8}
)
def test_ssd_inceptionV2_eval_input(self, eval_batch_size=1):
"""Tests the eval input function for SSDInceptionV2."""
configs = _get_configs_for_model('ssd_inception_v2_pets')
model_config = configs['model']
model_config.ssd.num_classes = 37
eval_config = configs['eval_config']
eval_config.batch_size = eval_batch_size
eval_input_fn = inputs.create_eval_input_fn(
eval_config, configs['eval_input_configs'][0], model_config)
features, labels = _make_initializable_iterator(eval_input_fn()).get_next()
self.assertAllEqual([eval_batch_size, 300, 300, 3],
features[fields.InputDataFields.image].shape.as_list())
self.assertEqual(tf.float32, features[fields.InputDataFields.image].dtype)
self.assertAllEqual(
[eval_batch_size, 300, 300, 3],
features[fields.InputDataFields.original_image].shape.as_list())
self.assertEqual(tf.uint8,
features[fields.InputDataFields.original_image].dtype)
self.assertAllEqual([eval_batch_size],
features[inputs.HASH_KEY].shape.as_list())
self.assertEqual(tf.int32, features[inputs.HASH_KEY].dtype)
self.assertAllEqual(
[eval_batch_size, 100, 4],
labels[fields.InputDataFields.groundtruth_boxes].shape.as_list())
self.assertEqual(tf.float32,
labels[fields.InputDataFields.groundtruth_boxes].dtype)
self.assertAllEqual(
[eval_batch_size, 100, model_config.ssd.num_classes],
labels[fields.InputDataFields.groundtruth_classes].shape.as_list())
self.assertEqual(tf.float32,
labels[fields.InputDataFields.groundtruth_classes].dtype)
self.assertAllEqual(
[eval_batch_size, 100],
labels[
fields.InputDataFields.groundtruth_weights].shape.as_list())
self.assertEqual(
tf.float32,
labels[fields.InputDataFields.groundtruth_weights].dtype)
self.assertAllEqual(
[eval_batch_size, 100],
labels[fields.InputDataFields.groundtruth_area].shape.as_list())
self.assertEqual(tf.float32,
labels[fields.InputDataFields.groundtruth_area].dtype)
self.assertAllEqual(
[eval_batch_size, 100],
labels[fields.InputDataFields.groundtruth_is_crowd].shape.as_list())
self.assertEqual(
tf.bool, labels[fields.InputDataFields.groundtruth_is_crowd].dtype)
self.assertAllEqual(
[eval_batch_size, 100],
labels[fields.InputDataFields.groundtruth_difficult].shape.as_list())
self.assertEqual(
tf.int32, labels[fields.InputDataFields.groundtruth_difficult].dtype)
def test_ssd_inceptionV2_eval_input_with_additional_channels(
self, eval_batch_size=1):
"""Tests the eval input function for SSDInceptionV2 with additional channel.
Args:
eval_batch_size: Batch size for eval set.
"""
configs = _get_configs_for_model('ssd_inception_v2_pets')
model_config = configs['model']
model_config.ssd.num_classes = 37
configs['eval_input_configs'][0].num_additional_channels = 1
eval_config = configs['eval_config']
eval_config.batch_size = eval_batch_size
eval_config.retain_original_image_additional_channels = True
eval_input_fn = inputs.create_eval_input_fn(
eval_config, configs['eval_input_configs'][0], model_config)
features, labels = _make_initializable_iterator(eval_input_fn()).get_next()
self.assertAllEqual([eval_batch_size, 300, 300, 4],
features[fields.InputDataFields.image].shape.as_list())
self.assertEqual(tf.float32, features[fields.InputDataFields.image].dtype)
self.assertAllEqual(
[eval_batch_size, 300, 300, 3],
features[fields.InputDataFields.original_image].shape.as_list())
self.assertEqual(tf.uint8,
features[fields.InputDataFields.original_image].dtype)
self.assertAllEqual([eval_batch_size, 300, 300, 1], features[
fields.InputDataFields.image_additional_channels].shape.as_list())
self.assertEqual(
tf.uint8,
features[fields.InputDataFields.image_additional_channels].dtype)
self.assertAllEqual([eval_batch_size],
features[inputs.HASH_KEY].shape.as_list())
self.assertEqual(tf.int32, features[inputs.HASH_KEY].dtype)
self.assertAllEqual(
[eval_batch_size, 100, 4],
labels[fields.InputDataFields.groundtruth_boxes].shape.as_list())
self.assertEqual(tf.float32,
labels[fields.InputDataFields.groundtruth_boxes].dtype)
self.assertAllEqual(
[eval_batch_size, 100, model_config.ssd.num_classes],
labels[fields.InputDataFields.groundtruth_classes].shape.as_list())
self.assertEqual(tf.float32,
labels[fields.InputDataFields.groundtruth_classes].dtype)
self.assertAllEqual(
[eval_batch_size, 100],
labels[fields.InputDataFields.groundtruth_weights].shape.as_list())
self.assertEqual(tf.float32,
labels[fields.InputDataFields.groundtruth_weights].dtype)
self.assertAllEqual(
[eval_batch_size, 100],
labels[fields.InputDataFields.groundtruth_area].shape.as_list())
self.assertEqual(tf.float32,
labels[fields.InputDataFields.groundtruth_area].dtype)
self.assertAllEqual(
[eval_batch_size, 100],
labels[fields.InputDataFields.groundtruth_is_crowd].shape.as_list())
self.assertEqual(tf.bool,
labels[fields.InputDataFields.groundtruth_is_crowd].dtype)
self.assertAllEqual(
[eval_batch_size, 100],
labels[fields.InputDataFields.groundtruth_difficult].shape.as_list())
self.assertEqual(tf.int32,
labels[fields.InputDataFields.groundtruth_difficult].dtype)
def test_predict_input(self):
"""Tests the predict input function."""
configs = _get_configs_for_model('ssd_inception_v2_pets')
predict_input_fn = inputs.create_predict_input_fn(
model_config=configs['model'],
predict_input_config=configs['eval_input_configs'][0])
serving_input_receiver = predict_input_fn()
image = serving_input_receiver.features[fields.InputDataFields.image]
receiver_tensors = serving_input_receiver.receiver_tensors[
inputs.SERVING_FED_EXAMPLE_KEY]
self.assertEqual([1, 300, 300, 3], image.shape.as_list())
self.assertEqual(tf.float32, image.dtype)
self.assertEqual(tf.string, receiver_tensors.dtype)
def test_predict_input_with_additional_channels(self):
"""Tests the predict input function with additional channels."""
configs = _get_configs_for_model('ssd_inception_v2_pets')
configs['eval_input_configs'][0].num_additional_channels = 2
predict_input_fn = inputs.create_predict_input_fn(
model_config=configs['model'],
predict_input_config=configs['eval_input_configs'][0])
serving_input_receiver = predict_input_fn()
image = serving_input_receiver.features[fields.InputDataFields.image]
receiver_tensors = serving_input_receiver.receiver_tensors[
inputs.SERVING_FED_EXAMPLE_KEY]
# RGB + 2 additional channels = 5 channels.
self.assertEqual([1, 300, 300, 5], image.shape.as_list())
self.assertEqual(tf.float32, image.dtype)
self.assertEqual(tf.string, receiver_tensors.dtype)
def test_error_with_bad_train_config(self):
"""Tests that a TypeError is raised with improper train config."""
configs = _get_configs_for_model('ssd_inception_v2_pets')
configs['model'].ssd.num_classes = 37
train_input_fn = inputs.create_train_input_fn(
train_config=configs['eval_config'], # Expecting `TrainConfig`.
train_input_config=configs['train_input_config'],
model_config=configs['model'])
with self.assertRaises(TypeError):
train_input_fn()
def test_error_with_bad_train_input_config(self):
"""Tests that a TypeError is raised with improper train input config."""
configs = _get_configs_for_model('ssd_inception_v2_pets')
configs['model'].ssd.num_classes = 37
train_input_fn = inputs.create_train_input_fn(
train_config=configs['train_config'],
train_input_config=configs['model'], # Expecting `InputReader`.
model_config=configs['model'])
with self.assertRaises(TypeError):
train_input_fn()
def test_error_with_bad_train_model_config(self):
"""Tests that a TypeError is raised with improper train model config."""
configs = _get_configs_for_model('ssd_inception_v2_pets')
configs['model'].ssd.num_classes = 37
train_input_fn = inputs.create_train_input_fn(
train_config=configs['train_config'],
train_input_config=configs['train_input_config'],
model_config=configs['train_config']) # Expecting `DetectionModel`.
with self.assertRaises(TypeError):
train_input_fn()
def test_error_with_bad_eval_config(self):
"""Tests that a TypeError is raised with improper eval config."""
configs = _get_configs_for_model('ssd_inception_v2_pets')
configs['model'].ssd.num_classes = 37
eval_input_fn = inputs.create_eval_input_fn(
eval_config=configs['train_config'], # Expecting `EvalConfig`.
eval_input_config=configs['eval_input_configs'][0],
model_config=configs['model'])
with self.assertRaises(TypeError):
eval_input_fn()
def test_error_with_bad_eval_input_config(self):
"""Tests that a TypeError is raised with improper eval input config."""
configs = _get_configs_for_model('ssd_inception_v2_pets')
configs['model'].ssd.num_classes = 37
eval_input_fn = inputs.create_eval_input_fn(
eval_config=configs['eval_config'],
eval_input_config=configs['model'], # Expecting `InputReader`.
model_config=configs['model'])
with self.assertRaises(TypeError):
eval_input_fn()
def test_error_with_bad_eval_model_config(self):
"""Tests that a TypeError is raised with improper eval model config."""
configs = _get_configs_for_model('ssd_inception_v2_pets')
configs['model'].ssd.num_classes = 37
eval_input_fn = inputs.create_eval_input_fn(
eval_config=configs['eval_config'],
eval_input_config=configs['eval_input_configs'][0],
model_config=configs['eval_config']) # Expecting `DetectionModel`.
with self.assertRaises(TypeError):
eval_input_fn()
def test_output_equal_in_replace_empty_string_with_random_number(self):
string_placeholder = tf.placeholder(tf.string, shape=[])
replaced_string = inputs._replace_empty_string_with_random_number(
string_placeholder)
test_string = b'hello world'
feed_dict = {string_placeholder: test_string}
with self.test_session() as sess:
out_string = sess.run(replaced_string, feed_dict=feed_dict)
self.assertEqual(test_string, out_string)
def test_output_is_integer_in_replace_empty_string_with_random_number(self):
string_placeholder = tf.placeholder(tf.string, shape=[])
replaced_string = inputs._replace_empty_string_with_random_number(
string_placeholder)
empty_string = ''
feed_dict = {string_placeholder: empty_string}
with self.test_session() as sess:
out_string = sess.run(replaced_string, feed_dict=feed_dict)
is_integer = True
try:
# Test whether out_string is a string which represents an integer, the
# casting below will throw an error if out_string is not castable to int.
int(out_string)
except ValueError:
is_integer = False
self.assertTrue(is_integer)
def test_force_no_resize(self):
"""Tests the functionality of force_no_reisze option."""
configs = _get_configs_for_model('ssd_inception_v2_pets')
configs['eval_config'].force_no_resize = True
eval_input_fn = inputs.create_eval_input_fn(
eval_config=configs['eval_config'],
eval_input_config=configs['eval_input_configs'][0],
model_config=configs['model']
)
train_input_fn = inputs.create_train_input_fn(
train_config=configs['train_config'],
train_input_config=configs['train_input_config'],
model_config=configs['model']
)
features_train, _ = _make_initializable_iterator(
train_input_fn()).get_next()
features_eval, _ = _make_initializable_iterator(
eval_input_fn()).get_next()
images_train, images_eval = features_train['image'], features_eval['image']
self.assertEqual([1, None, None, 3], images_eval.shape.as_list())
self.assertEqual([24, 300, 300, 3], images_train.shape.as_list())
class DataAugmentationFnTest(test_case.TestCase):
def test_apply_image_and_box_augmentation(self):
data_augmentation_options = [
(preprocessor.resize_image, {
'new_height': 20,
'new_width': 20,
'method': tf.image.ResizeMethod.NEAREST_NEIGHBOR
}),
(preprocessor.scale_boxes_to_pixel_coordinates, {}),
]
data_augmentation_fn = functools.partial(
inputs.augment_input_data,
data_augmentation_options=data_augmentation_options)
def graph_fn():
tensor_dict = {
fields.InputDataFields.image:
tf.constant(np.random.rand(10, 10, 3).astype(np.float32)),
fields.InputDataFields.groundtruth_boxes:
tf.constant(np.array([[.5, .5, 1., 1.]], np.float32))
}
augmented_tensor_dict = data_augmentation_fn(tensor_dict=tensor_dict)
return (augmented_tensor_dict[fields.InputDataFields.image],
augmented_tensor_dict[fields.InputDataFields.
groundtruth_boxes])
image, groundtruth_boxes = self.execute_cpu(graph_fn, [])
self.assertAllEqual(image.shape, [20, 20, 3])
self.assertAllClose(groundtruth_boxes, [[10, 10, 20, 20]])
def test_apply_image_and_box_augmentation_with_scores(self):
data_augmentation_options = [
(preprocessor.resize_image, {
'new_height': 20,
'new_width': 20,
'method': tf.image.ResizeMethod.NEAREST_NEIGHBOR
}),
(preprocessor.scale_boxes_to_pixel_coordinates, {}),
]
data_augmentation_fn = functools.partial(
inputs.augment_input_data,
data_augmentation_options=data_augmentation_options)
def graph_fn():
tensor_dict = {
fields.InputDataFields.image:
tf.constant(np.random.rand(10, 10, 3).astype(np.float32)),
fields.InputDataFields.groundtruth_boxes:
tf.constant(np.array([[.5, .5, 1., 1.]], np.float32)),
fields.InputDataFields.groundtruth_classes:
tf.constant(np.array([1.0], np.float32)),
fields.InputDataFields.groundtruth_weights:
tf.constant(np.array([0.8], np.float32)),
}
augmented_tensor_dict = data_augmentation_fn(tensor_dict=tensor_dict)
return (augmented_tensor_dict[fields.InputDataFields.image],
augmented_tensor_dict[fields.InputDataFields.groundtruth_boxes],
augmented_tensor_dict[fields.InputDataFields.groundtruth_classes],
augmented_tensor_dict[fields.InputDataFields.groundtruth_weights])
(image, groundtruth_boxes,
groundtruth_classes, groundtruth_weights) = self.execute_cpu(graph_fn, [])
self.assertAllEqual(image.shape, [20, 20, 3])
self.assertAllClose(groundtruth_boxes, [[10, 10, 20, 20]])
self.assertAllClose(groundtruth_classes.shape, [1.0])
self.assertAllClose(groundtruth_weights, [0.8])
def test_include_masks_in_data_augmentation(self):
data_augmentation_options = [
(preprocessor.resize_image, {
'new_height': 20,
'new_width': 20,
'method': tf.image.ResizeMethod.NEAREST_NEIGHBOR
})
]
data_augmentation_fn = functools.partial(
inputs.augment_input_data,
data_augmentation_options=data_augmentation_options)
def graph_fn():
tensor_dict = {
fields.InputDataFields.image:
tf.constant(np.random.rand(10, 10, 3).astype(np.float32)),
fields.InputDataFields.groundtruth_instance_masks:
tf.constant(np.zeros([2, 10, 10], np.uint8))
}
augmented_tensor_dict = data_augmentation_fn(tensor_dict=tensor_dict)
return (augmented_tensor_dict[fields.InputDataFields.image],
augmented_tensor_dict[fields.InputDataFields.
groundtruth_instance_masks])
image, masks = self.execute_cpu(graph_fn, [])
self.assertAllEqual(image.shape, [20, 20, 3])
self.assertAllEqual(masks.shape, [2, 20, 20])
def test_include_keypoints_in_data_augmentation(self):
data_augmentation_options = [
(preprocessor.resize_image, {
'new_height': 20,
'new_width': 20,
'method': tf.image.ResizeMethod.NEAREST_NEIGHBOR
}),
(preprocessor.scale_boxes_to_pixel_coordinates, {}),
]
data_augmentation_fn = functools.partial(
inputs.augment_input_data,
data_augmentation_options=data_augmentation_options)
def graph_fn():
tensor_dict = {
fields.InputDataFields.image:
tf.constant(np.random.rand(10, 10, 3).astype(np.float32)),
fields.InputDataFields.groundtruth_boxes:
tf.constant(np.array([[.5, .5, 1., 1.]], np.float32)),
fields.InputDataFields.groundtruth_keypoints:
tf.constant(np.array([[[0.5, 1.0], [0.5, 0.5]]], np.float32))
}
augmented_tensor_dict = data_augmentation_fn(tensor_dict=tensor_dict)
return (augmented_tensor_dict[fields.InputDataFields.image],
augmented_tensor_dict[fields.InputDataFields.groundtruth_boxes],
augmented_tensor_dict[fields.InputDataFields.
groundtruth_keypoints])
image, boxes, keypoints = self.execute_cpu(graph_fn, [])
self.assertAllEqual(image.shape, [20, 20, 3])
self.assertAllClose(boxes, [[10, 10, 20, 20]])
self.assertAllClose(keypoints, [[[10, 20], [10, 10]]])
def _fake_model_preprocessor_fn(image):
return (image, tf.expand_dims(tf.shape(image)[1:], axis=0))
def _fake_image_resizer_fn(image, mask):
return (image, mask, tf.shape(image))
def _fake_resize50_preprocess_fn(image):
image = image[0]
image, shape = preprocessor.resize_to_range(
image, min_dimension=50, max_dimension=50, pad_to_max_dimension=True)
return tf.expand_dims(image, 0), tf.expand_dims(shape, axis=0)
class DataTransformationFnTest(test_case.TestCase, parameterized.TestCase):
def test_combine_additional_channels_if_present(self):
image = np.random.rand(4, 4, 3).astype(np.float32)
additional_channels = np.random.rand(4, 4, 2).astype(np.float32)
def graph_fn(image, additional_channels):
tensor_dict = {
fields.InputDataFields.image: image,
fields.InputDataFields.image_additional_channels: additional_channels,
fields.InputDataFields.groundtruth_classes:
tf.constant([1, 1], tf.int32)
}
input_transformation_fn = functools.partial(
inputs.transform_input_data,
model_preprocess_fn=_fake_model_preprocessor_fn,
image_resizer_fn=_fake_image_resizer_fn,
num_classes=1)
out_tensors = input_transformation_fn(tensor_dict=tensor_dict)
return out_tensors[fields.InputDataFields.image]
out_image = self.execute_cpu(graph_fn, [image, additional_channels])
self.assertAllEqual(out_image.dtype, tf.float32)
self.assertAllEqual(out_image.shape, [4, 4, 5])
self.assertAllClose(out_image, np.concatenate((image, additional_channels),
axis=2))
def test_use_multiclass_scores_when_present(self):
def graph_fn():
tensor_dict = {
fields.InputDataFields.image: tf.constant(np.random.rand(4, 4, 3).
astype(np.float32)),
fields.InputDataFields.groundtruth_boxes:
tf.constant(np.array([[.5, .5, 1, 1], [.5, .5, 1, 1]],
np.float32)),
fields.InputDataFields.multiclass_scores:
tf.constant(np.array([0.2, 0.3, 0.5, 0.1, 0.6, 0.3], np.float32)),
fields.InputDataFields.groundtruth_classes:
tf.constant(np.array([1, 2], np.int32))
}
input_transformation_fn = functools.partial(
inputs.transform_input_data,
model_preprocess_fn=_fake_model_preprocessor_fn,
image_resizer_fn=_fake_image_resizer_fn,
num_classes=3, use_multiclass_scores=True)
transformed_inputs = input_transformation_fn(tensor_dict=tensor_dict)
return transformed_inputs[fields.InputDataFields.groundtruth_classes]
groundtruth_classes = self.execute_cpu(graph_fn, [])
self.assertAllClose(
np.array([[0.2, 0.3, 0.5], [0.1, 0.6, 0.3]], np.float32),
groundtruth_classes)
@unittest.skipIf(tf_version.is_tf2(), ('Skipping due to different behaviour '
'in TF 2.X'))
def test_use_multiclass_scores_when_not_present(self):
def graph_fn():
zero_num_elements = tf.random.uniform([], minval=0, maxval=1,
dtype=tf.int32)
tensor_dict = {
fields.InputDataFields.image:
tf.constant(np.random.rand(4, 4, 3).astype(np.float32)),
fields.InputDataFields.groundtruth_boxes:
tf.constant(np.array([[.5, .5, 1, 1], [.5, .5, 1, 1]],
np.float32)),
fields.InputDataFields.multiclass_scores: tf.zeros(zero_num_elements),
fields.InputDataFields.groundtruth_classes:
tf.constant(np.array([1, 2], np.int32))
}
input_transformation_fn = functools.partial(
inputs.transform_input_data,
model_preprocess_fn=_fake_model_preprocessor_fn,
image_resizer_fn=_fake_image_resizer_fn,
num_classes=3, use_multiclass_scores=True)
transformed_inputs = input_transformation_fn(tensor_dict=tensor_dict)
return transformed_inputs[fields.InputDataFields.groundtruth_classes]
groundtruth_classes = self.execute_cpu(graph_fn, [])
self.assertAllClose(
np.array([[0, 1, 0], [0, 0, 1]], np.float32),
groundtruth_classes)
@parameterized.parameters(
{'labeled_classes': [1, 2]},
{'labeled_classes': []},
{'labeled_classes': [1, -1, 2]} # -1 denotes an unrecognized class
)
def test_use_labeled_classes(self, labeled_classes):
def compute_fn(image, groundtruth_boxes, groundtruth_classes,
groundtruth_labeled_classes):
tensor_dict = {
fields.InputDataFields.image:
image,
fields.InputDataFields.groundtruth_boxes:
groundtruth_boxes,
fields.InputDataFields.groundtruth_classes:
groundtruth_classes,
fields.InputDataFields.groundtruth_labeled_classes:
groundtruth_labeled_classes
}
input_transformation_fn = functools.partial(
inputs.transform_input_data,
model_preprocess_fn=_fake_model_preprocessor_fn,
image_resizer_fn=_fake_image_resizer_fn,
num_classes=3)
return input_transformation_fn(tensor_dict=tensor_dict)
image = np.random.rand(4, 4, 3).astype(np.float32)
groundtruth_boxes = np.array([[.5, .5, 1, 1], [.5, .5, 1, 1]], np.float32)
groundtruth_classes = np.array([1, 2], np.int32)
groundtruth_labeled_classes = np.array(labeled_classes, np.int32)
transformed_inputs = self.execute_cpu(compute_fn, [
image, groundtruth_boxes, groundtruth_classes,
groundtruth_labeled_classes
])
if labeled_classes == [1, 2] or labeled_classes == [1, -1, 2]:
transformed_labeled_classes = [1, 1, 0]
elif not labeled_classes:
transformed_labeled_classes = [1, 1, 1]
else:
logging.exception('Unexpected labeled_classes %r', labeled_classes)
self.assertAllEqual(
np.array(transformed_labeled_classes, np.float32),
transformed_inputs[fields.InputDataFields.groundtruth_labeled_classes])
def test_returns_correct_class_label_encodings(self):
def graph_fn():
tensor_dict = {
fields.InputDataFields.image:
tf.constant(np.random.rand(4, 4, 3).astype(np.float32)),
fields.InputDataFields.groundtruth_boxes:
tf.constant(np.array([[0, 0, 1, 1], [.5, .5, 1, 1]], np.float32)),
fields.InputDataFields.groundtruth_classes:
tf.constant(np.array([3, 1], np.int32))
}
num_classes = 3
input_transformation_fn = functools.partial(
inputs.transform_input_data,
model_preprocess_fn=_fake_model_preprocessor_fn,
image_resizer_fn=_fake_image_resizer_fn,
num_classes=num_classes)
transformed_inputs = input_transformation_fn(tensor_dict=tensor_dict)
return (transformed_inputs[fields.InputDataFields.groundtruth_classes],
transformed_inputs[fields.InputDataFields.
groundtruth_confidences])
(groundtruth_classes, groundtruth_confidences) = self.execute_cpu(graph_fn,
[])
self.assertAllClose(groundtruth_classes, [[0, 0, 1], [1, 0, 0]])
self.assertAllClose(groundtruth_confidences, [[0, 0, 1], [1, 0, 0]])
def test_returns_correct_labels_with_unrecognized_class(self):
def graph_fn():
tensor_dict = {
fields.InputDataFields.image:
tf.constant(np.random.rand(4, 4, 3).astype(np.float32)),
fields.InputDataFields.groundtruth_boxes:
tf.constant(
np.array([[0, 0, 1, 1], [.2, .2, 4, 4], [.5, .5, 1, 1]],
np.float32)),
fields.InputDataFields.groundtruth_area:
tf.constant(np.array([.5, .4, .3])),
fields.InputDataFields.groundtruth_classes:
tf.constant(np.array([3, -1, 1], np.int32)),
fields.InputDataFields.groundtruth_keypoints:
tf.constant(
np.array([[[.1, .1]], [[.2, .2]], [[.5, .5]]],
np.float32)),
fields.InputDataFields.groundtruth_keypoint_visibilities:
tf.constant([[True, True], [False, False], [True, True]]),
fields.InputDataFields.groundtruth_instance_masks:
tf.constant(np.random.rand(3, 4, 4).astype(np.float32)),
fields.InputDataFields.groundtruth_is_crowd:
tf.constant([False, True, False]),
fields.InputDataFields.groundtruth_difficult:
tf.constant(np.array([0, 0, 1], np.int32))
}
num_classes = 3
input_transformation_fn = functools.partial(
inputs.transform_input_data,
model_preprocess_fn=_fake_model_preprocessor_fn,
image_resizer_fn=_fake_image_resizer_fn,
num_classes=num_classes)
transformed_inputs = input_transformation_fn(tensor_dict)
return (transformed_inputs[fields.InputDataFields.groundtruth_classes],
transformed_inputs[fields.InputDataFields.num_groundtruth_boxes],
transformed_inputs[fields.InputDataFields.groundtruth_area],
transformed_inputs[fields.InputDataFields.
groundtruth_confidences],
transformed_inputs[fields.InputDataFields.groundtruth_boxes],
transformed_inputs[fields.InputDataFields.groundtruth_keypoints],
transformed_inputs[fields.InputDataFields.
groundtruth_keypoint_visibilities],
transformed_inputs[fields.InputDataFields.
groundtruth_instance_masks],
transformed_inputs[fields.InputDataFields.groundtruth_is_crowd],
transformed_inputs[fields.InputDataFields.groundtruth_difficult])
(groundtruth_classes, num_groundtruth_boxes, groundtruth_area,
groundtruth_confidences, groundtruth_boxes, groundtruth_keypoints,
groundtruth_keypoint_visibilities, groundtruth_instance_masks,
groundtruth_is_crowd, groundtruth_difficult) = self.execute_cpu(graph_fn,
[])
self.assertAllClose(groundtruth_classes, [[0, 0, 1], [1, 0, 0]])
self.assertAllEqual(num_groundtruth_boxes, 2)
self.assertAllClose(groundtruth_area, [.5, .3])
self.assertAllEqual(groundtruth_confidences, [[0, 0, 1], [1, 0, 0]])
self.assertAllClose(groundtruth_boxes, [[0, 0, 1, 1], [.5, .5, 1, 1]])
self.assertAllClose(groundtruth_keypoints, [[[.1, .1]], [[.5, .5]]])
self.assertAllEqual(groundtruth_keypoint_visibilities,
[[True, True], [True, True]])
self.assertAllEqual(groundtruth_instance_masks.shape, [2, 4, 4])
self.assertAllEqual(groundtruth_is_crowd, [False, False])
self.assertAllEqual(groundtruth_difficult, [0, 1])
def test_returns_correct_merged_boxes(self):
def graph_fn():
tensor_dict = {
fields.InputDataFields.image:
tf.constant(np.random.rand(4, 4, 3).astype(np.float32)),
fields.InputDataFields.groundtruth_boxes:
tf.constant(np.array([[.5, .5, 1, 1], [.5, .5, 1, 1]],
np.float32)),
fields.InputDataFields.groundtruth_classes:
tf.constant(np.array([3, 1], np.int32))
}
num_classes = 3
input_transformation_fn = functools.partial(
inputs.transform_input_data,
model_preprocess_fn=_fake_model_preprocessor_fn,
image_resizer_fn=_fake_image_resizer_fn,
num_classes=num_classes,
merge_multiple_boxes=True)
transformed_inputs = input_transformation_fn(tensor_dict)
return (transformed_inputs[fields.InputDataFields.groundtruth_boxes],
transformed_inputs[fields.InputDataFields.groundtruth_classes],
transformed_inputs[fields.InputDataFields.
groundtruth_confidences],
transformed_inputs[fields.InputDataFields.num_groundtruth_boxes])
(groundtruth_boxes, groundtruth_classes, groundtruth_confidences,
num_groundtruth_boxes) = self.execute_cpu(graph_fn, [])
self.assertAllClose(
groundtruth_boxes,
[[.5, .5, 1., 1.]])
self.assertAllClose(
groundtruth_classes,
[[1, 0, 1]])
self.assertAllClose(
groundtruth_confidences,
[[1, 0, 1]])
self.assertAllClose(
num_groundtruth_boxes,
1)
def test_returns_correct_groundtruth_confidences_when_input_present(self):
def graph_fn():
tensor_dict = {
fields.InputDataFields.image:
tf.constant(np.random.rand(4, 4, 3).astype(np.float32)),
fields.InputDataFields.groundtruth_boxes:
tf.constant(np.array([[0, 0, 1, 1], [.5, .5, 1, 1]], np.float32)),
fields.InputDataFields.groundtruth_classes:
tf.constant(np.array([3, 1], np.int32)),
fields.InputDataFields.groundtruth_confidences:
tf.constant(np.array([1.0, -1.0], np.float32))
}
num_classes = 3
input_transformation_fn = functools.partial(
inputs.transform_input_data,
model_preprocess_fn=_fake_model_preprocessor_fn,
image_resizer_fn=_fake_image_resizer_fn,
num_classes=num_classes)
transformed_inputs = input_transformation_fn(tensor_dict)
return (transformed_inputs[fields.InputDataFields.groundtruth_classes],
transformed_inputs[fields.InputDataFields.
groundtruth_confidences])
groundtruth_classes, groundtruth_confidences = self.execute_cpu(graph_fn,
[])
self.assertAllClose(
groundtruth_classes,
[[0, 0, 1], [1, 0, 0]])
self.assertAllClose(
groundtruth_confidences,
[[0, 0, 1], [-1, 0, 0]])
def test_returns_resized_masks(self):
def graph_fn():
tensor_dict = {
fields.InputDataFields.image:
tf.constant(np.random.rand(4, 4, 3).astype(np.float32)),
fields.InputDataFields.groundtruth_instance_masks:
tf.constant(np.random.rand(2, 4, 4).astype(np.float32)),
fields.InputDataFields.groundtruth_classes:
tf.constant(np.array([3, 1], np.int32)),
fields.InputDataFields.original_image_spatial_shape:
tf.constant(np.array([4, 4], np.int32))
}
def fake_image_resizer_fn(image, masks=None):
resized_image = tf.image.resize_images(image, [8, 8])
results = [resized_image]
if masks is not None:
resized_masks = tf.transpose(
tf.image.resize_images(tf.transpose(masks, [1, 2, 0]), [8, 8]),
[2, 0, 1])
results.append(resized_masks)
results.append(tf.shape(resized_image))
return results
num_classes = 3
input_transformation_fn = functools.partial(
inputs.transform_input_data,
model_preprocess_fn=_fake_model_preprocessor_fn,
image_resizer_fn=fake_image_resizer_fn,
num_classes=num_classes,
retain_original_image=True)
transformed_inputs = input_transformation_fn(tensor_dict)
return (transformed_inputs[fields.InputDataFields.original_image],
transformed_inputs[fields.InputDataFields.
original_image_spatial_shape],
transformed_inputs[fields.InputDataFields.
groundtruth_instance_masks])
(original_image, original_image_shape,
groundtruth_instance_masks) = self.execute_cpu(graph_fn, [])
self.assertEqual(original_image.dtype, np.uint8)
self.assertAllEqual(original_image_shape, [4, 4])
self.assertAllEqual(original_image.shape, [8, 8, 3])
self.assertAllEqual(groundtruth_instance_masks.shape, [2, 8, 8])
def test_applies_model_preprocess_fn_to_image_tensor(self):
np_image = np.random.randint(256, size=(4, 4, 3))
def graph_fn(image):
tensor_dict = {
fields.InputDataFields.image: image,
fields.InputDataFields.groundtruth_classes:
tf.constant(np.array([3, 1], np.int32))
}
def fake_model_preprocessor_fn(image):
return (image / 255., tf.expand_dims(tf.shape(image)[1:], axis=0))
num_classes = 3
input_transformation_fn = functools.partial(
inputs.transform_input_data,
model_preprocess_fn=fake_model_preprocessor_fn,
image_resizer_fn=_fake_image_resizer_fn,
num_classes=num_classes)
transformed_inputs = input_transformation_fn(tensor_dict)
return (transformed_inputs[fields.InputDataFields.image],
transformed_inputs[fields.InputDataFields.true_image_shape])
image, true_image_shape = self.execute_cpu(graph_fn, [np_image])
self.assertAllClose(image, np_image / 255.)
self.assertAllClose(true_image_shape, [4, 4, 3])
def test_applies_data_augmentation_fn_to_tensor_dict(self):
np_image = np.random.randint(256, size=(4, 4, 3))
def graph_fn(image):
tensor_dict = {
fields.InputDataFields.image: image,
fields.InputDataFields.groundtruth_classes:
tf.constant(np.array([3, 1], np.int32))
}
def add_one_data_augmentation_fn(tensor_dict):
return {key: value + 1 for key, value in tensor_dict.items()}
num_classes = 4
input_transformation_fn = functools.partial(
inputs.transform_input_data,
model_preprocess_fn=_fake_model_preprocessor_fn,
image_resizer_fn=_fake_image_resizer_fn,
num_classes=num_classes,
data_augmentation_fn=add_one_data_augmentation_fn)
transformed_inputs = input_transformation_fn(tensor_dict)
return (transformed_inputs[fields.InputDataFields.image],
transformed_inputs[fields.InputDataFields.groundtruth_classes])
image, groundtruth_classes = self.execute_cpu(graph_fn, [np_image])
self.assertAllEqual(image, np_image + 1)
self.assertAllEqual(
groundtruth_classes,
[[0, 0, 0, 1], [0, 1, 0, 0]])
def test_applies_data_augmentation_fn_before_model_preprocess_fn(self):
np_image = np.random.randint(256, size=(4, 4, 3))
def graph_fn(image):
tensor_dict = {
fields.InputDataFields.image: image,
fields.InputDataFields.groundtruth_classes:
tf.constant(np.array([3, 1], np.int32))
}
def mul_two_model_preprocessor_fn(image):
return (image * 2, tf.expand_dims(tf.shape(image)[1:], axis=0))
def add_five_to_image_data_augmentation_fn(tensor_dict):
tensor_dict[fields.InputDataFields.image] += 5
return tensor_dict
num_classes = 4
input_transformation_fn = functools.partial(
inputs.transform_input_data,
model_preprocess_fn=mul_two_model_preprocessor_fn,
image_resizer_fn=_fake_image_resizer_fn,
num_classes=num_classes,
data_augmentation_fn=add_five_to_image_data_augmentation_fn)
transformed_inputs = input_transformation_fn(tensor_dict)
return transformed_inputs[fields.InputDataFields.image]
image = self.execute_cpu(graph_fn, [np_image])
self.assertAllEqual(image, (np_image + 5) * 2)
def test_resize_with_padding(self):
def graph_fn():
tensor_dict = {
fields.InputDataFields.image:
tf.constant(np.random.rand(100, 50, 3).astype(np.float32)),
fields.InputDataFields.groundtruth_boxes:
tf.constant(np.array([[.5, .5, 1, 1], [.0, .0, .5, .5]],
np.float32)),
fields.InputDataFields.groundtruth_classes:
tf.constant(np.array([1, 2], np.int32)),
fields.InputDataFields.groundtruth_keypoints:
tf.constant([[[0.1, 0.2]], [[0.3, 0.4]]]),
}
num_classes = 3
input_transformation_fn = functools.partial(
inputs.transform_input_data,
model_preprocess_fn=_fake_resize50_preprocess_fn,
image_resizer_fn=_fake_image_resizer_fn,
num_classes=num_classes,)
transformed_inputs = input_transformation_fn(tensor_dict)
return (transformed_inputs[fields.InputDataFields.groundtruth_boxes],
transformed_inputs[fields.InputDataFields.groundtruth_keypoints])
groundtruth_boxes, groundtruth_keypoints = self.execute_cpu(graph_fn, [])
self.assertAllClose(
groundtruth_boxes,
[[.5, .25, 1., .5], [.0, .0, .5, .25]])
self.assertAllClose(
groundtruth_keypoints,
[[[.1, .1]], [[.3, .2]]])
def test_groundtruth_keypoint_weights(self):
def graph_fn():
tensor_dict = {
fields.InputDataFields.image:
tf.constant(np.random.rand(100, 50, 3).astype(np.float32)),
fields.InputDataFields.groundtruth_boxes:
tf.constant(np.array([[.5, .5, 1, 1], [.0, .0, .5, .5]],
np.float32)),
fields.InputDataFields.groundtruth_classes:
tf.constant(np.array([1, 2], np.int32)),
fields.InputDataFields.groundtruth_keypoints:
tf.constant([[[0.1, 0.2], [0.3, 0.4]],
[[0.5, 0.6], [0.7, 0.8]]]),
fields.InputDataFields.groundtruth_keypoint_visibilities:
tf.constant([[True, False], [True, True]]),
}
num_classes = 3
keypoint_type_weight = [1.0, 2.0]
input_transformation_fn = functools.partial(
inputs.transform_input_data,
model_preprocess_fn=_fake_resize50_preprocess_fn,
image_resizer_fn=_fake_image_resizer_fn,
num_classes=num_classes,
keypoint_type_weight=keypoint_type_weight)
transformed_inputs = input_transformation_fn(tensor_dict=tensor_dict)
return (transformed_inputs[fields.InputDataFields.groundtruth_keypoints],
transformed_inputs[fields.InputDataFields.
groundtruth_keypoint_weights])
groundtruth_keypoints, groundtruth_keypoint_weights = self.execute_cpu(
graph_fn, [])
self.assertAllClose(
groundtruth_keypoints,
[[[0.1, 0.1], [0.3, 0.2]],
[[0.5, 0.3], [0.7, 0.4]]])
self.assertAllClose(
groundtruth_keypoint_weights,
[[1.0, 0.0], [1.0, 2.0]])
def test_groundtruth_keypoint_weights_default(self):
def graph_fn():
tensor_dict = {
fields.InputDataFields.image:
tf.constant(np.random.rand(100, 50, 3).astype(np.float32)),
fields.InputDataFields.groundtruth_boxes:
tf.constant(np.array([[.5, .5, 1, 1], [.0, .0, .5, .5]],
np.float32)),
fields.InputDataFields.groundtruth_classes:
tf.constant(np.array([1, 2], np.int32)),
fields.InputDataFields.groundtruth_keypoints:
tf.constant([[[0.1, 0.2], [0.3, 0.4]],
[[0.5, 0.6], [0.7, 0.8]]]),
}
num_classes = 3
input_transformation_fn = functools.partial(
inputs.transform_input_data,
model_preprocess_fn=_fake_resize50_preprocess_fn,
image_resizer_fn=_fake_image_resizer_fn,
num_classes=num_classes)
transformed_inputs = input_transformation_fn(tensor_dict=tensor_dict)
return (transformed_inputs[fields.InputDataFields.groundtruth_keypoints],
transformed_inputs[fields.InputDataFields.
groundtruth_keypoint_weights])
groundtruth_keypoints, groundtruth_keypoint_weights = self.execute_cpu(
graph_fn, [])
self.assertAllClose(
groundtruth_keypoints,
[[[0.1, 0.1], [0.3, 0.2]],
[[0.5, 0.3], [0.7, 0.4]]])
self.assertAllClose(
groundtruth_keypoint_weights,
[[1.0, 1.0], [1.0, 1.0]])
def test_groundtruth_dense_pose(self):
def graph_fn():
tensor_dict = {
fields.InputDataFields.image:
tf.constant(np.random.rand(100, 50, 3).astype(np.float32)),
fields.InputDataFields.groundtruth_boxes:
tf.constant(np.array([[.5, .5, 1, 1], [.0, .0, .5, .5]],
np.float32)),
fields.InputDataFields.groundtruth_classes:
tf.constant(np.array([1, 2], np.int32)),
fields.InputDataFields.groundtruth_dp_num_points:
tf.constant([0, 2], dtype=tf.int32),
fields.InputDataFields.groundtruth_dp_part_ids:
tf.constant([[0, 0], [4, 23]], dtype=tf.int32),
fields.InputDataFields.groundtruth_dp_surface_coords:
tf.constant([[[0., 0., 0., 0.,], [0., 0., 0., 0.,]],
[[0.1, 0.2, 0.3, 0.4,], [0.6, 0.8, 0.6, 0.7,]]],
dtype=tf.float32),
}
num_classes = 1
input_transformation_fn = functools.partial(
inputs.transform_input_data,
model_preprocess_fn=_fake_resize50_preprocess_fn,
image_resizer_fn=_fake_image_resizer_fn,
num_classes=num_classes)
transformed_inputs = input_transformation_fn(tensor_dict=tensor_dict)
transformed_dp_num_points = transformed_inputs[
fields.InputDataFields.groundtruth_dp_num_points]
transformed_dp_part_ids = transformed_inputs[
fields.InputDataFields.groundtruth_dp_part_ids]
transformed_dp_surface_coords = transformed_inputs[
fields.InputDataFields.groundtruth_dp_surface_coords]
return (transformed_dp_num_points, transformed_dp_part_ids,
transformed_dp_surface_coords)
dp_num_points, dp_part_ids, dp_surface_coords = self.execute_cpu(
graph_fn, [])
self.assertAllEqual(dp_num_points, [0, 2])
self.assertAllEqual(dp_part_ids, [[0, 0], [4, 23]])
self.assertAllClose(
dp_surface_coords,
[[[0., 0., 0., 0.,], [0., 0., 0., 0.,]],
[[0.1, 0.1, 0.3, 0.4,], [0.6, 0.4, 0.6, 0.7,]]])
class PadInputDataToStaticShapesFnTest(test_case.TestCase):
def test_pad_images_boxes_and_classes(self):
input_tensor_dict = {
fields.InputDataFields.image:
tf.random.uniform([3, 3, 3]),
fields.InputDataFields.groundtruth_boxes:
tf.random.uniform([2, 4]),
fields.InputDataFields.groundtruth_classes:
tf.random.uniform([2, 3], minval=0, maxval=2, dtype=tf.int32),
fields.InputDataFields.true_image_shape:
tf.constant([3, 3, 3]),
fields.InputDataFields.original_image_spatial_shape:
tf.constant([3, 3])
}
padded_tensor_dict = inputs.pad_input_data_to_static_shapes(
tensor_dict=input_tensor_dict,
max_num_boxes=3,
num_classes=3,
spatial_image_shape=[5, 6])
self.assertAllEqual(
padded_tensor_dict[fields.InputDataFields.image].shape.as_list(),
[5, 6, 3])
self.assertAllEqual(
padded_tensor_dict[fields.InputDataFields.true_image_shape]
.shape.as_list(), [3])
self.assertAllEqual(
padded_tensor_dict[fields.InputDataFields.original_image_spatial_shape]
.shape.as_list(), [2])
self.assertAllEqual(
padded_tensor_dict[fields.InputDataFields.groundtruth_boxes]
.shape.as_list(), [3, 4])
self.assertAllEqual(
padded_tensor_dict[fields.InputDataFields.groundtruth_classes]
.shape.as_list(), [3, 3])
def test_clip_boxes_and_classes(self):
def graph_fn():
input_tensor_dict = {
fields.InputDataFields.groundtruth_boxes:
tf.random.uniform([5, 4]),
fields.InputDataFields.groundtruth_classes:
tf.random.uniform([2, 3], maxval=10, dtype=tf.int32),
fields.InputDataFields.num_groundtruth_boxes:
tf.constant(5)
}
padded_tensor_dict = inputs.pad_input_data_to_static_shapes(
tensor_dict=input_tensor_dict,
max_num_boxes=3,
num_classes=3,
spatial_image_shape=[5, 6])
return (padded_tensor_dict[fields.InputDataFields.groundtruth_boxes],
padded_tensor_dict[fields.InputDataFields.groundtruth_classes],
padded_tensor_dict[fields.InputDataFields.num_groundtruth_boxes])
(groundtruth_boxes, groundtruth_classes,
num_groundtruth_boxes) = self.execute_cpu(graph_fn, [])
self.assertAllEqual(groundtruth_boxes.shape, [3, 4])
self.assertAllEqual(groundtruth_classes.shape, [3, 3])
self.assertEqual(num_groundtruth_boxes, 3)
def test_images_and_additional_channels(self):
input_tensor_dict = {
fields.InputDataFields.image:
test_utils.image_with_dynamic_shape(4, 3, 5),
fields.InputDataFields.image_additional_channels:
test_utils.image_with_dynamic_shape(4, 3, 2),
}
padded_tensor_dict = inputs.pad_input_data_to_static_shapes(
tensor_dict=input_tensor_dict,
max_num_boxes=3,
num_classes=3,
spatial_image_shape=[5, 6])
# pad_input_data_to_static_shape assumes that image is already concatenated
# with additional channels.
self.assertAllEqual(
padded_tensor_dict[fields.InputDataFields.image].shape.as_list(),
[5, 6, 5])
self.assertAllEqual(
padded_tensor_dict[fields.InputDataFields.image_additional_channels]
.shape.as_list(), [5, 6, 2])
def test_images_and_additional_channels_errors(self):
input_tensor_dict = {
fields.InputDataFields.image:
test_utils.image_with_dynamic_shape(10, 10, 3),
fields.InputDataFields.image_additional_channels:
test_utils.image_with_dynamic_shape(10, 10, 2),
fields.InputDataFields.original_image:
test_utils.image_with_dynamic_shape(10, 10, 3),
}
with self.assertRaises(ValueError):
_ = inputs.pad_input_data_to_static_shapes(
tensor_dict=input_tensor_dict,
max_num_boxes=3,
num_classes=3,
spatial_image_shape=[5, 6])
def test_gray_images(self):
input_tensor_dict = {
fields.InputDataFields.image:
test_utils.image_with_dynamic_shape(4, 4, 1),
}
padded_tensor_dict = inputs.pad_input_data_to_static_shapes(
tensor_dict=input_tensor_dict,
max_num_boxes=3,
num_classes=3,
spatial_image_shape=[5, 6])
self.assertAllEqual(
padded_tensor_dict[fields.InputDataFields.image].shape.as_list(),
[5, 6, 1])
def test_gray_images_and_additional_channels(self):
input_tensor_dict = {
fields.InputDataFields.image:
test_utils.image_with_dynamic_shape(4, 4, 3),
fields.InputDataFields.image_additional_channels:
test_utils.image_with_dynamic_shape(4, 4, 2),
}
# pad_input_data_to_static_shape assumes that image is already concatenated
# with additional channels.
padded_tensor_dict = inputs.pad_input_data_to_static_shapes(
tensor_dict=input_tensor_dict,
max_num_boxes=3,
num_classes=3,
spatial_image_shape=[5, 6])
self.assertAllEqual(
padded_tensor_dict[fields.InputDataFields.image].shape.as_list(),
[5, 6, 3])
self.assertAllEqual(
padded_tensor_dict[fields.InputDataFields.image_additional_channels]
.shape.as_list(), [5, 6, 2])
def test_keypoints(self):
keypoints = test_utils.keypoints_with_dynamic_shape(10, 16, 4)
visibilities = tf.cast(tf.random.uniform(tf.shape(keypoints)[:-1], minval=0,
maxval=2, dtype=tf.int32), tf.bool)
input_tensor_dict = {
fields.InputDataFields.groundtruth_keypoints:
test_utils.keypoints_with_dynamic_shape(10, 16, 4),
fields.InputDataFields.groundtruth_keypoint_visibilities:
visibilities
}
padded_tensor_dict = inputs.pad_input_data_to_static_shapes(
tensor_dict=input_tensor_dict,
max_num_boxes=3,
num_classes=3,
spatial_image_shape=[5, 6])
self.assertAllEqual(
padded_tensor_dict[fields.InputDataFields.groundtruth_keypoints]
.shape.as_list(), [3, 16, 4])
self.assertAllEqual(
padded_tensor_dict[
fields.InputDataFields.groundtruth_keypoint_visibilities]
.shape.as_list(), [3, 16])
def test_dense_pose(self):
input_tensor_dict = {
fields.InputDataFields.groundtruth_dp_num_points:
tf.constant([0, 2], dtype=tf.int32),
fields.InputDataFields.groundtruth_dp_part_ids:
tf.constant([[0, 0], [4, 23]], dtype=tf.int32),
fields.InputDataFields.groundtruth_dp_surface_coords:
tf.constant([[[0., 0., 0., 0.,], [0., 0., 0., 0.,]],
[[0.1, 0.2, 0.3, 0.4,], [0.6, 0.8, 0.6, 0.7,]]],
dtype=tf.float32),
}
padded_tensor_dict = inputs.pad_input_data_to_static_shapes(
tensor_dict=input_tensor_dict,
max_num_boxes=3,
num_classes=1,
spatial_image_shape=[128, 128],
max_dp_points=200)
self.assertAllEqual(
padded_tensor_dict[fields.InputDataFields.groundtruth_dp_num_points]
.shape.as_list(), [3])
self.assertAllEqual(
padded_tensor_dict[fields.InputDataFields.groundtruth_dp_part_ids]
.shape.as_list(), [3, 200])
self.assertAllEqual(
padded_tensor_dict[fields.InputDataFields.groundtruth_dp_surface_coords]
.shape.as_list(), [3, 200, 4])
def test_pad_input_data_to_static_shapes_for_trackid(self):
input_tensor_dict = {
fields.InputDataFields.groundtruth_track_ids:
tf.constant([0, 1], dtype=tf.int32),
}
padded_tensor_dict = inputs.pad_input_data_to_static_shapes(
tensor_dict=input_tensor_dict,
max_num_boxes=3,
num_classes=1,
spatial_image_shape=[128, 128])
self.assertAllEqual(
padded_tensor_dict[fields.InputDataFields.groundtruth_track_ids]
.shape.as_list(), [3])
def test_context_features(self):
context_memory_size = 8
context_feature_length = 10
max_num_context_features = 20
def graph_fn():
input_tensor_dict = {
fields.InputDataFields.context_features:
tf.ones([context_memory_size, context_feature_length]),
fields.InputDataFields.context_feature_length:
tf.constant(context_feature_length)
}
padded_tensor_dict = inputs.pad_input_data_to_static_shapes(
tensor_dict=input_tensor_dict,
max_num_boxes=3,
num_classes=3,
spatial_image_shape=[5, 6],
max_num_context_features=max_num_context_features,
context_feature_length=context_feature_length)
self.assertAllEqual(
padded_tensor_dict[
fields.InputDataFields.context_features].shape.as_list(),
[max_num_context_features, context_feature_length])
return padded_tensor_dict[fields.InputDataFields.valid_context_size]
valid_context_size = self.execute_cpu(graph_fn, [])
self.assertEqual(valid_context_size, context_memory_size)
class NegativeSizeTest(test_case.TestCase):
"""Test for inputs and related funcitons."""
def test_negative_size_error(self):
"""Test that error is raised for negative size boxes."""
def graph_fn():
tensors = {
fields.InputDataFields.image: tf.zeros((128, 128, 3)),
fields.InputDataFields.groundtruth_classes:
tf.constant([1, 1], tf.int32),
fields.InputDataFields.groundtruth_boxes:
tf.constant([[0.5, 0.5, 0.4, 0.5]], tf.float32)
}
tensors = inputs.transform_input_data(
tensors, _fake_model_preprocessor_fn, _fake_image_resizer_fn,
num_classes=10)
return tensors[fields.InputDataFields.groundtruth_boxes]
with self.assertRaises(tf.errors.InvalidArgumentError):
self.execute_cpu(graph_fn, [])
def test_negative_size_no_assert(self):
"""Test that negative size boxes are filtered out without assert.
This test simulates the behaviour when we run on TPU and Assert ops are
not supported.
"""
tensors = {
fields.InputDataFields.image: tf.zeros((128, 128, 3)),
fields.InputDataFields.groundtruth_classes:
tf.constant([1, 1], tf.int32),
fields.InputDataFields.groundtruth_boxes:
tf.constant([[0.5, 0.5, 0.4, 0.5], [0.5, 0.5, 0.6, 0.6]],
tf.float32)
}
with mock.patch.object(tf, 'Assert') as tf_assert:
tf_assert.return_value = tf.no_op()
tensors = inputs.transform_input_data(
tensors, _fake_model_preprocessor_fn, _fake_image_resizer_fn,
num_classes=10)
self.assertAllClose(tensors[fields.InputDataFields.groundtruth_boxes],
[[0.5, 0.5, 0.6, 0.6]])
if __name__ == '__main__':
tf.test.main()
| [
"[email protected]"
] | |
83547d23b166d298dba8225456e446df30293c67 | c90962d0f3233205d4dc99391f6aab3402fa7ed8 | /parPDE/__init__.py | 4557dcb4c8a5af05fe9207a2e5b7ce6f3b66bbdf | [
"BSD-2-Clause"
] | permissive | chrisjbillington/parpde | f97a1303c404ec83f22df15c6fb4e80485176150 | 4f882cbbb9ad6c57814e4422e9ba063fa27886a0 | refs/heads/master | 2022-11-10T16:18:31.489069 | 2019-01-22T21:01:25 | 2019-01-22T21:01:25 | 275,920,389 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 109 | py | from .parPDE import *
try:
from __version__ import __version__
except ImportError:
__version__ = None | [
"[email protected]"
] | |
0d8f76b499ac816e3bd0061d7450637456aaa4d7 | 1681332a25e5130517c403bb7a860ca30506d5ea | /res/dlworkshop/conv_test.py | abb17bf25cec4bb3835f22de94b69b03e0211a02 | [
"MIT",
"CC-BY-4.0"
] | permissive | dgyrt/dgyrt.github.io | 240550826aa031323db1f64b00b36db1ac3d65df | fac6c1a9d10d8e87bad6e80aa96027b84975ee1d | refs/heads/master | 2020-05-21T12:23:00.437395 | 2017-01-31T14:05:39 | 2017-01-31T14:05:39 | 43,422,967 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,173 | py | """" convnet test """
import os;
import gzip;
import cPickle as pickle;
import numpy as np;
import theano;
import theano.tensor as T;
from theano.tensor.nnet import conv;
from theano.tensor.signal import downsample;
n_epochs=100;
batch_size=100;
def relu(x):
return x*(x>1e-13);
class ConvLayer(object):
def __init__(self, filter_size, num_filters, num_channels, fm_size, batch_size, **kwargs):
self.filter_size=filter_size;
self.num_filters=num_filters;
self.num_channels=num_channels;
self.fm_size=fm_size;
self.batch_size=batch_size;
super(ConvLayer, self).__init__(**kwargs);
self.initialize();
self.params=[self.filters, self.bias];
def initialize(self):
filter_shape=(self.num_filters, self.num_channels)+(self.filter_size);
self.filters=theano.shared(np.asarray(np.random.uniform(low=-0.0001,
high=0.0001,
size=filter_shape),
dtype="float32"),
borrow=True);
self.bias=theano.shared(np.asarray(np.zeros((self.num_filters, )),
dtype="float32"), borrow=True);
def apply_lin(self, X):
Y=conv.conv2d(input=X,
filters=self.filters,
image_shape=(self.batch_size, self.num_channels)+(self.fm_size),
filter_shape=(self.num_filters, self.num_channels)+(self.filter_size));
Y+=self.bias.dimshuffle('x', 0, 'x', 'x');
return Y;
class ReLUConvLayer(ConvLayer):
def __init__(self, **kwargs):
super(ReLUConvLayer, self).__init__(**kwargs);
def apply(self, X):
return relu(self.apply_lin(X));
class MaxPooling(object):
def __init__(self, pool_size):
self.pool_size=pool_size;
def apply(self, X):
return downsample.max_pool_2d(X, self.pool_size);
class Layer(object):
def __init__(self, in_dim, out_dim, W=None, b=None, **kwargs):
self.in_dim=in_dim;
self.out_dim=out_dim;
self.W=W;
self.b=b;
self.initialize();
super(Layer, self).__init__(**kwargs);
self.params=[self.W, self.b];
def initialize(self):
if self.W == None:
self.W=theano.shared(np.asarray(np.random.uniform(low=-0.0001,
high=0.0001,
size=(self.in_dim, self.out_dim)),
dtype="float32"),
borrow=True);
if self.b == None:
self.b=theano.shared(np.asarray(np.zeros((self.out_dim, )),
dtype="float32"), borrow=True);
def apply_lin(self, X):
return T.dot(X, self.W)+self.b;
class ReLULayer(Layer):
def __init__(self, **kwargs):
super(ReLULayer, self).__init__(**kwargs);
def apply(self, X):
return relu(self.apply_lin(X));
class TanhLayer(Layer):
def __init__(self, **kwargs):
super(TanhLayer, self).__init__(**kwargs);
def apply(self, X):
return T.tanh(self.apply_lin(X));
class SoftmaxLayer(Layer):
def __init__(self, **kwargs):
super(SoftmaxLayer, self).__init__(**kwargs);
def apply(self, X):
return T.nnet.softmax(self.apply_lin(X));
def predict(self, X_out):
return T.argmax(X_out, axis=1);
def error(self, X_out, Y):
return T.mean(T.neq(self.predict(X_out), Y));
# load dataset
def shared_dataset(data_xy):
data_x, data_y=data_xy;
shared_x=theano.shared(np.asarray(data_x, dtype="float32"),
borrow=True);
shared_y=theano.shared(np.asarray(data_y, dtype="float32"),
borrow=True);
return shared_x, T.cast(shared_y, "int32");
def load_mnist(dataset):
f=gzip.open(dataset, 'rb');
train_set, valid_set, test_set=pickle.load(f);
f.close();
train_set_x, train_set_y=shared_dataset(train_set);
valid_set_x, valid_set_y=shared_dataset(valid_set);
test_set_x, test_set_y=shared_dataset(test_set);
return [(train_set_x, train_set_y), (valid_set_x, valid_set_y), (test_set_x, test_set_y)];
dataset=load_mnist("mnist.pkl.gz");
train_set_x, train_set_y=dataset[0];
valid_set_x, valid_set_y=dataset[1];
test_set_x, test_set_y=dataset[2];
n_train_batches=train_set_x.get_value(borrow=True).shape[0]/batch_size;
n_valid_batches=valid_set_x.get_value(borrow=True).shape[0]/batch_size;
n_test_batches=test_set_x.get_value(borrow=True).shape[0]/batch_size;
print n_train_batches
print n_valid_batches
print n_test_batches
print "dataset loaded"
# build mode
X=T.matrix("data");
y=T.ivector("label");
idx=T.lscalar();
images=X.reshape((batch_size, 1, 28, 28));
### configure some layers
### build some convlayers
layer_0=ReLUConvLayer(filter_size=(7,7), num_filters=10, num_channels=1,
fm_size=(28, 28), batch_size=batch_size);
pool_0=MaxPooling((2,2));
layer_1=ReLUConvLayer(filter_size=(4,4), num_filters=10, num_channels=10,
fm_size=(11,11), batch_size=batch_size);
pool_1=MaxPooling((2,2));
layer_2=ReLULayer(in_dim=160, out_dim=100);
layer_3=SoftmaxLayer(in_dim=100, out_dim=10);
### compile some model
out=pool_1.apply(layer_1.apply(pool_0.apply(layer_0.apply(images))))
out=out.flatten(ndim=2);
out=layer_3.apply(layer_2.apply(out));
cost=T.nnet.categorical_crossentropy(out, y).mean();
params=layer_0.params+layer_1.params+layer_2.params+layer_3.params;
#### calculate the updates of each params
gparams=T.grad(cost, params);
from collections import OrderedDict;
updates=OrderedDict();
for param, gparam in zip(params, gparams):
updates[param]=param-0.01*gparam;
train=theano.function(inputs=[idx],
outputs=cost,
updates=updates,
givens={X: train_set_x[idx*batch_size: (idx+1)*batch_size],
y: train_set_y[idx*batch_size: (idx+1)*batch_size]});
test=theano.function(inputs=[idx],
outputs=layer_3.error(out, y),
givens={X: test_set_x[idx*batch_size: (idx+1)*batch_size],
y: test_set_y[idx*batch_size: (idx+1)*batch_size]});
print "the model is built :)"
# train the model
test_record=np.zeros((n_epochs, 1));
epoch=0;
while (epoch<n_epochs):
epoch+=1;
for minibatch_index in xrange(n_train_batches):
mlp_train_cost=train(minibatch_index);
iteration=(epoch-1)*n_train_batches+minibatch_index;
if (iteration+1)%n_train_batches==0:
print "MLP model";
test_cost=[test(i) for i in xrange(n_test_batches)];
test_record[epoch-1]=np.mean(test_cost);
print " epoch %i, test error %f %%" % (epoch, test_record[epoch-1]*100.);
| [
"[email protected]"
] | |
36297de68d4dda62481025cf1bbce659d0ce664f | 3b89c0a97ac6b58b6923a213bc8471e11ad4fe69 | /python/CodingExercises/CheckSecondStringFormedFirstString.py | 82a8ded99bb4b608b37b268d47ca9e6f94271932 | [] | no_license | ksayee/programming_assignments | b187adca502ecf7ff7b51dc849d5d79ceb90d4a6 | 13bc1c44e1eef17fc36724f20b060c3339c280ea | refs/heads/master | 2021-06-30T07:19:34.192277 | 2021-06-23T05:11:32 | 2021-06-23T05:11:32 | 50,700,556 | 1 | 3 | null | null | null | null | UTF-8 | Python | false | false | 1,218 | py | '''
Check whether second string can be formed from characters of first string
Given two strings str1 and str2, check if str2 can be formed from str1
Example :
Input : str1 = geekforgeeks, str2 = geeks
Output : Yes
Here, string2 can be formed from string1.
Input : str1 = geekforgeeks, str2 = and
Output : No
Here string2 cannot be formed from string1.
Input : str1 = geekforgeeks, str2 = geeeek
Output : Yes
Here string2 can be formed from string1
as string1 contains 'e' comes 4 times in
string2 which is present in string1.
'''
import collections
def CheckSecondStringFormedFirstString(str1,str2):
dict1=collections.Counter(str1)
dict2=collections.Counter(str2)
for key,val in dict2.items():
if key in dict1.keys() and dict1[key]>0:
dict1[key]=dict1.get(key)-1
else:
return False
return True
def main():
str1='geekforgeeks'
str2='geeks'
print(CheckSecondStringFormedFirstString(str1,str2))
str1 = 'geekforgeeks'
str2 = 'and'
print(CheckSecondStringFormedFirstString(str1, str2))
str1 = 'geekforgeeks'
str2 = 'geeeek'
print(CheckSecondStringFormedFirstString(str1, str2))
if __name__=='__main__':
main() | [
"[email protected]"
] | |
9796214d25e80f9655fb1910bc028c1969ce3aca | 1d8535658ed07fc88558c7d9bf3a01b709f189b1 | /src/reversion/migrations/0001_initial.py | 986fd81ac986f7c87b8babac57ae6a6c0bfa701a | [
"BSD-2-Clause"
] | permissive | druids/django-reversion | ebedc4debe3ffc611f9e2bf72a04f388274502a0 | d80a24b6a195c8a68bfc3100ba533419226fa18d | refs/heads/master | 2020-12-25T08:50:58.658410 | 2018-06-10T20:19:42 | 2018-06-10T20:19:42 | 40,229,843 | 0 | 3 | NOASSERTION | 2020-04-09T13:16:57 | 2015-08-05T06:56:01 | Python | UTF-8 | Python | false | false | 2,506 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.db.models.deletion
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('contenttypes', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Revision',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('manager_slug', models.CharField(default='default', max_length=200, db_index=True)),
('date_created', models.DateTimeField(auto_now_add=True, help_text='The date and time this revision was created.', verbose_name='date created', db_index=True)),
('comment', models.TextField(help_text='A text comment on this revision.', verbose_name='comment', blank=True)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.SET_NULL, blank=True, to=settings.AUTH_USER_MODEL, help_text='The user who created this revision.', null=True, verbose_name='user')),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Version',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('object_id', models.TextField(help_text='Primary key of the model under version control.')),
('object_id_int', models.IntegerField(help_text="An indexed, integer version of the stored model's primary key, used for faster lookups.", null=True, db_index=True, blank=True)),
('format', models.CharField(help_text='The serialization format used by this model.', max_length=255)),
('serialized_data', models.TextField(help_text='The serialized form of this version of the model.')),
('object_repr', models.TextField(help_text='A string representation of the object.')),
('content_type', models.ForeignKey(help_text='Content type of the model under version control.', to='contenttypes.ContentType')),
('revision', models.ForeignKey(help_text='The revision that contains this version.', to='reversion.Revision')),
],
options={
},
bases=(models.Model,),
),
]
| [
"[email protected]"
] | |
290c90e1ec3e9aea7039b80484a81718c05d1dfb | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_shelled.py | 92293753951a72a46ead1e9e801bf3e2ad1a351b | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 233 | py |
from xai.brain.wordbase.nouns._shell import _SHELL
#calss header
class _SHELLED(_SHELL, ):
def __init__(self,):
_SHELL.__init__(self)
self.name = "SHELLED"
self.specie = 'nouns'
self.basic = "shell"
self.jsondata = {}
| [
"[email protected]"
] | |
ca58b1ce2b21900200329d5dbd2507235c210435 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03853/s066137758.py | acb2ac42342d566e74d51b19e21c6c91f5ab7f87 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 288 | py | num = input().split()
hei = int(num[0])
wei = int(num[1])
photo = []
for i in range(hei):
temp = input()
temp = list(temp)
photo.append(temp)
photo.append(temp)
for i in range(hei*2):
for j in range(wei):
print(photo[i][j],end="")
print("\n",end="") | [
"[email protected]"
] | |
a0a3e8f4dab8d2d3cc6497f8b4e8c5507e50f494 | 9497432cd07d17be15853544197853d1ae7ae472 | /encryption files/hashing/sha384hash.py | 1880fd67f6fa014e3adfcf43b48c4f4a11238ba8 | [] | no_license | SeresAdrian/Crypto-Project | e99be9c2bf9155e1a54be4419d5626633fd2b333 | 4c2fd709f667bdfa71bc5fadd9b47a1c79f59c6a | refs/heads/master | 2022-07-25T13:54:46.704949 | 2020-05-18T19:40:42 | 2020-05-18T19:40:42 | 265,021,044 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 194 | py | #!/usr/bin/python
import hashlib
string=input("Please enter tha plaintext : ")
result = hashlib.sha384(string.encode())
print("The hexadecimal equivalent of hash is : ", result.hexdigest())
| [
"[email protected]"
] | |
721a102e40b391250ee3101e851acdd76b192386 | 34f29e764609930da0b3d3d7db18dc63ab1b4a97 | /util/tasks/trainInvV2_2.py | d118d52a2810437ab0796e678089eb538f9bbefd | [] | no_license | samhu1989/RAtlasNet | c77fe2a65fcbfb34bfdf78a5e1c7abdcea989341 | 0b2859a620dd15f66c4af1355eb79356ee335507 | refs/heads/master | 2020-04-15T01:01:33.874790 | 2019-05-30T15:00:13 | 2019-05-30T15:00:13 | 164,260,625 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,503 | py | #
import os;
from .task import Task;
from ..ply import *;
from ..Lutils import *;
from ..utils import *;
from ..datasets import *;
import torch;
from torch.autograd import Variable;
from torch.utils.data import DataLoader;
import torch.nn as nn
import math;
import json;
sys.path.append("./ext/");
import dist_chamfer as ext;
distChamfer = ext.chamferDist();
def eval_ae(net,pts):
with torch.no_grad():
points = Variable(pts);
points = points.transpose(2,1).contiguous();
points = points.cuda();
out = net(points);
dist1, dist2 = distChamfer(points.transpose(2,1).contiguous(),out['y']);
cd = (torch.mean(dist1)) + (torch.mean(dist2))
inv_err = torch.mean(torch.sum((out['inv_x'] - out['grid_x'])**2,dim=2));
return cd.data.cpu().numpy(),inv_err.data.cpu().numpy();
def train_ae(net,optim,cd_meter,inv_meter,pts,opt):
optim.zero_grad();
points = Variable(pts,requires_grad=True);
points = points.transpose(2,1).contiguous();
points = points.cuda();
out = net(points);
dist1, dist2 = distChamfer(points.transpose(2,1).contiguous(),out['y']);
cd = (torch.mean(dist1)) + (torch.mean(dist2))
inv_err = torch.mean(torch.sum((out['inv_x'] - out['grid_x'])**2,dim=2));
cd_meter.update(cd.data.cpu().numpy());
inv_meter.update(inv_err.data.cpu().numpy())
inv_gt = out['invmap'](points);
dist1, dist2 = distChamfer(inv_gt,out['grid_x']);
inv_cd = (torch.mean(dist1)) + (torch.mean(dist2))
loss = inv_cd + opt['w']*inv_err + cd;
loss.backward();
optim.step();
return loss,cd,inv_err;
def eval_svr(net,pts,img):
with torch.no_grad():
img = Variable(img);
img = img.cuda();
points = Variable(pts);
points = points.cuda();
out = net(img);
dist1, dist2 = distChamfer(points,out['y']);
cd = (torch.mean(dist1)) + (torch.mean(dist2));
inv_err = torch.mean(torch.sum((out['inv_x'] - out['grid_x'])**2,dim=2));
return cd.data.cpu().numpy(),inv_err.data.cpu().numpy();
def train_svr(net,optim,cd_meter,inv_meter,pts,img,opt):
optim.zero_grad();
img = Variable(img,requires_grad=True);
img = img.cuda();
points = Variable(pts);
points = points.cuda();
out = net(img);
dist1, dist2 = distChamfer(points,out['y']);
cd = (torch.mean(dist1)) + (torch.mean(dist2));
inv_err = torch.mean(torch.sum((out['inv_x'] - out['grid_x'])**2,dim=2));
cd_meter.update(cd.data.cpu().numpy());
inv_meter.update(inv_err.data.cpu().numpy());
loss = cd + opt['w']*inv_err;
loss.backward();
optim.step();
return loss,cd,inv_err;
def write_log(logfile,val_cd,val_inv,dataset_test,train_cd=None,train_inv=None,epoch=None):
log_dict = {};
log_dict['val_cd'] = val_cd.avg;
log_dict['val_inv'] = val_inv.avg;
for item in dataset_test.cat:
print(item,dataset_test.perCatValueMeter[item].avg)
log_dict.update({item:dataset_test.perCatValueMeter[item].avg})
if train_cd is not None:
log_dict['train_cd'] = train_cd.avg;
if train_inv is not None:
log_dict['train_inv'] = train_inv.avg;
if epoch is not None:
log_dict['epoch'] = epoch;
logfile.write('json_stats: '+json.dumps(log_dict)+'\n');
return;
bestnum = 3;
best_cd = np.zeros(bestnum);
best_all = np.zeros(bestnum);
def save_model(logtxt,dirname,net,opt,vcd,vall):
global best_cd;
global best_all;
cdname = dirname+os.sep+opt['mode']+'gn'+str(opt['grid_num'])+'_cd';
allname = dirname+os.sep+opt['mode']+'gn'+str(opt['grid_num'])+'_all';
name = dirname+os.sep+opt['mode']+'gn'+str(opt['grid_num'])+'_current';
sdict = net.state_dict();
torch.save(sdict,name+'.pth');
if vcd < best_cd[-1]:
best_cd[-1] = vcd;
best_cd = np.sort(best_cd);
bidx = np.searchsorted(best_cd,vcd);
for idx in range(bestnum-2,bidx-1,-1):
if os.path.exists(cdname+'_%d'%idx+'.pth'):
if os.path.exists(cdname+'_%d'%(idx+1)+'.pth'):
os.remove(cdname+'_%d'%(idx+1)+'.pth');
print('rename '+cdname+'_%d'%(idx)+'.pth'+' '+cdname+'_%d'%(idx+1)+'.pth');
os.rename(cdname+'_%d'%(idx)+'.pth',cdname+'_%d'%(idx+1)+'.pth');
print('saving model at '+cdname+'_%d'%(bidx)+'.pth');
torch.save(sdict,cdname+'_%d'%(bidx)+'.pth');
logtxt.write('saving model at '+cdname+'_%d'%(bidx)+'.pth\n');
logtxt.write('best_cd:'+np.array2string(best_cd,precision=6,separator=',')+'\n');
if vall < best_all[-1]:
best_all[-1] = vall;
best_all = np.sort(best_all);
bidx = np.searchsorted(best_all,vall);
for idx in range(bestnum-2,bidx-1,-1):
if os.path.exists(allname+'_%d'%idx+'.pth'):
if os.path.exists(allname+'_%d'%(idx+1)+'.pth'):
os.remove(allname+'_%d'%(idx+1)+'.pth');
print('rename '+allname+'_%d'%(idx)+'.pth'+' '+allname+'_%d'%(idx+1)+'.pth');
os.rename(allname+'_%d'%(idx)+'.pth',allname+'_%d'%(idx+1)+'.pth');
print('saving model at '+allname+'_%d'%(bidx)+'.pth\n');
torch.save(sdict,allname+'_%d'%(bidx)+'.pth\n');
logtxt.write('saving model at '+allname+'_%d'%(bidx)+'.pth\n');
logtxt.write('best_all:'+np.array2string(best_all,precision=6,separator=',')+'\n');
def view_color(y,c=None):
if c is None:
c = colorcoord(y);
return pd.concat([pd.DataFrame(y),pd.DataFrame(c)],axis=1,ignore_index=True);
def view_ae(dirname,net,pts,index,cat,opt):
points = Variable(pts, volatile=True);
points = points.transpose(2,1).contiguous();
points = points.cuda();
grid = None;
fidx = None;
if opt.grid_dim == 3:
grid,Li,Lw,fidx = sphere_grid(points.size()[0],opt.pts_num,'cot');
elif opt.grid_dim == 2:
grid,Li,Lw,fidx = patch_grid(points.size()[0],opt.pts_num,opt.grid_num);
grid = Variable(grid,volatile=True);
grid = grid.cuda();
y,inv_err = net(points,grid);
y_inv = net.inv_y;
ply_path = dirname+os.sep+'ply';
if not os.path.exists(ply_path):
os.mkdir(ply_path);
T=np.dtype([("n",np.uint8),("i0",np.int32),('i1',np.int32),('i2',np.int32)]);
face = np.zeros(shape=[fidx.shape[0]],dtype=T);
for i in range(fidx.shape[0]):
face[i] = (3,fidx[i,0],fidx[i,1],fidx[i,2]);
y = y.cpu().data.numpy();
inv_y = net.inv_y.cpu().data.numpy();
grid = grid.transpose(2,1).contiguous().cpu().data.numpy();
c = colorcoord(grid[0,...])
write_ply(ply_path+os.sep+'%02d_%s_grid.ply'%(index,cat[0]),points = view_color(grid[0,...],c),faces=pd.DataFrame(face),color=True);
for i in range(y.shape[0]):
write_ply(ply_path+os.sep+'%02d_%02d_%s.ply'%(index,i,cat[0]),points = view_color(y[i,...],c),faces=pd.DataFrame(face),color=True);
write_ply(ply_path+os.sep+'%02d_%02d_%s_inv.ply'%(index,i,cat[0]),points = view_color(inv_y[i,...],c),faces=pd.DataFrame(face),color=True);
def view_svr(dirname,net,img,index,cat,opt):
img = Variable(img,volatile=True);
img = img.cuda();
grid = None;
fidx = None;
if opt.grid_dim == 3:
grid,Li,Lw,fidx = sphere_grid(points.size()[0],opt.pts_num,'cot');
elif opt.grid_dim == 2:
grid,Li,Lw,fidx = patch_grid(points.size()[0],opt.pts_num,opt.grid_num);
grid = Variable(grid,volatile=True);
grid = grid.cuda();
y,inv_err = net(img,grid);
ply_path = dirname+os.sep+'ply';
if not os.path.exists(ply_path):
os.mkdir(ply_path);
T=np.dtype([("n",np.uint8),("i0",np.int32),('i1',np.int32),('i2',np.int32)]);
face = np.zeros(shape=[fidx.shape[0]],dtype=T);
for i in range(fidx.shape[0]):
face[i] = (3,fidx[i,0],fidx[i,1],fidx[i,2]);
y = y.cpu().data.numpy();
inv_y = net.inv_y.cpu().data.numpy();
grid = grid.transpose(2,1).contiguous().cpu().data.numpy();
c = colorcoord(grid[0,...])
write_ply(ply_path+os.sep+'%02d_%s_grid.ply'%(index,cat[0]),points = view_color(grid[0,...],c),faces=pd.DataFrame(face),color=True);
for i in range(y.shape[0]):
write_ply(ply_path+os.sep+'%02d_%02d_%s.ply'%(index,i,cat[0]),points = view_color(y[i,...],c),faces=pd.DataFrame(face),color=True);
write_ply(ply_path+os.sep+'%02d_%02d_%s_inv.ply'%(index,i,cat[0]),points = view_color(inv_y[i,...],c),faces=pd.DataFrame(face),color=True);
class RealTask(Task):
def __init__(self):
super(RealTask,self).__init__();
self.tskname = os.path.basename(__file__).split('.')[0];
def run(self,*args,**kwargs):
self.start();
self.step();
return;
def start(self):
if self.cnt > 0:
return;
self.SVR = (self.opt['mode']=='SVR');
self.train_data = ShapeNet(SVR=self.SVR,normal = False,class_choice = None,train=True);
self.train_load = DataLoader(self.train_data,batch_size=self.opt['batchSize'],shuffle=True, num_workers=int(self.opt['workers']));
self.valid_data = ShapeNet(SVR=self.SVR,normal = False,class_choice = None,train=False);
self.valid_load = DataLoader(self.valid_data,batch_size=self.opt['batchSize'],shuffle=False, num_workers=int(self.opt['workers']));
self.load_pretrain();
#
self.train_cd = AverageValueMeter();
self.train_inv = AverageValueMeter();
self.valid_cd = AverageValueMeter();
self.valid_inv = AverageValueMeter();
self.optim = optim.Adam(self.net.parameters(),lr=self.opt['lr'],weight_decay=self.opt['weight_decay']);
for group in self.optim.param_groups:
group.setdefault('initial_lr', group['lr']);
self.lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(self.optim,40,eta_min=0,last_epoch=self.opt['last_epoch']);
#
self.train_loss_acc0 = 1e-9;
self.train_loss_accs = 0;
self.eval();
write_log(self.logtxt,self.valid_cd,self.valid_inv,self.valid_data,None,None,self.cnt);
best_all.fill(self.opt['w']*self.valid_inv.avg+self.valid_cd.avg);
best_cd.fill(self.valid_cd.avg)
def eval(self):
self.valid_cd.reset();
self.valid_inv.reset();
for item in self.valid_data.cat:
self.valid_data.perCatValueMeter[item].reset();
self.net.eval();
for i, data in enumerate(self.valid_load, 0):
img, points, cat, _, _ = data;
if self.SVR:
cd,inv = eval_svr(self.net,points,img);
else:
cd,inv = eval_ae(self.net,points);
self.valid_cd.update(cd);
self.valid_inv.update(inv);
self.valid_data.perCatValueMeter[cat[0]].update(cd);
print('[%d: %d/%d] val loss:%f ' %(self.cnt,i,len(self.valid_data)/self.opt['batchSize'],cd));
def train(self):
self.lr_scheduler.step();
self.net.train()
for i, data in enumerate(self.train_load, 0):
img, points, cat, _ , _= data;
if self.SVR:
loss,cd,inv_err = train_svr(self.net,self.optim,self.train_cd,self.train_inv,points,img,self.opt);
else:
loss,cd,inv_err = train_ae(self.net,self.optim,self.train_cd,self.train_inv,points,self.opt);
self.train_loss_accs = self.train_loss_accs * 0.99 + loss.data.cpu().numpy();
self.train_loss_acc0 = self.train_loss_acc0 * 0.99 + 1;
print('[%d: %d/%d] train loss:%f,%f,%f/%f' %(self.cnt+self.opt['last_epoch'],i,len(self.train_data)//self.opt['batchSize'],cd.data.cpu().numpy(),inv_err.data.cpu().numpy(),loss.data.cpu().numpy(),self.train_loss_accs/self.train_loss_acc0));
def load_pretrain(self):
if self.opt['model']!='':
partial_restore(self.net,self.opt['model']);
print("Previous weights loaded");
def step(self):
if self.cnt == 0:
return;
self.train();
self.eval();
write_log(self.logtxt,self.valid_cd,self.valid_inv,self.valid_data,self.train_cd,self.train_inv,self.cnt+self.opt['last_epoch']);
save_model(self.logtxt,self.tskdir,self.net,self.opt,self.valid_cd.avg,self.valid_cd.avg+self.opt['w']*self.valid_inv.avg);
def createOptim(self):
self.optim = optim.Adam(self.net.parameters(),lr = self.opt['lr'],weight_decay=self.opt['weight_decay']); | [
"[email protected]"
] | |
fc10885cc1c93b0fef9785b5f6bc9a544e6d5749 | b943f725f8c6b20c277eb7b77e67689173bc0d1a | /simplemooc/core/urls.py | 06bf7fc6d8d6aa47edc54cbee4a80a33837c89a1 | [] | no_license | Akijunior/Eleanning | c4da62971735b5a6c18f0ee04758ac6db770c2a4 | 4b0e4c397b76a7839722b00f23ef3eb4f309d229 | refs/heads/master | 2020-08-19T05:14:57.581513 | 2019-10-21T15:33:36 | 2019-10-21T15:33:36 | 215,882,153 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 119 | py | from django.urls import path
from simplemooc.core.views import home
urlpatterns = [
path('', home, name='home'),
] | [
"[email protected]"
] | |
9b8ffd02c0680421820d9d17d7078ba7ee1365ba | ce8bb40bf2b688f19ab8bcc20cfd58994413bc0f | /ajax/ajax_mysite/app01/views.py | b372bd95be6a215aa5b89bd42d3acb0b23b5da03 | [] | no_license | Fover21/project1 | 457f452d7f6e7ecbfc81a18512377ebc5457f3f6 | 84d596caf5701d7d76eee8c50f61bcb6150c57f2 | refs/heads/master | 2020-03-24T20:01:51.506348 | 2018-12-26T06:07:45 | 2018-12-26T06:07:45 | 142,955,917 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,389 | py | from django.shortcuts import render, HttpResponse, reverse, redirect
# Create your views here.
from django.views.decorators.csrf import csrf_exempt, csrf_protect
@csrf_exempt # 排除
def login(request):
return render(request, 'login.html')
def index(request):
i1, i2, i3 = '', '', ''
if request.method == 'POST':
i1 = int(request.POST.get('i1'))
i2 = int(request.POST.get('i2'))
i3 = i1 + i2
return render(request, 'index.html', {
'i1': i1,
'i2': i2,
'i3': i3,
})
# from django.views.decorators.csrf import ensure_csrf_cookie 全局的第二中配置方法
# @csrf_exempt
def calc(request):
# csrfmiddlewaretoken = request.POST.get('csrfmiddlewaretoken')
# print(csrfmiddlewaretoken)
i1 = int(request.POST.get('i1'))
i2 = int(request.POST.get('i2'))
i3 = i1 + i2
print(request.POST)
return HttpResponse(i3)
# 上传
def upload(request):
if request.method == "POST":
print("FILES", request.FILES)
file_obj = request.FILES.get("file")
with open(file_obj.name, "wb") as f:
for i in file_obj.chunks():
f.write(i)
return HttpResponse("success!")
# test
def tt(request):
if request.method == "POST":
ret = reverse('uu')
print(ret)
return redirect(ret)
return render(request, 'index.html') | [
"[email protected]"
] | |
525faba85baf47e70bd840eb6b17b29331739083 | 0c41031269497790425702d4ad882423dc443a6a | /pandas14/pandas14_9.py | ad0ca612be2e850e77d6d818f876fb6c53ce6255 | [] | no_license | diegoami/datacamp-courses-PY | 4c546e69241ca429adefdd459db92d617cfa0e9f | bab3082929fa6f1cf2fc2f2efb46d16374715b4b | refs/heads/master | 2023-07-20T06:42:29.776349 | 2018-10-28T22:57:21 | 2018-10-28T22:57:21 | 92,448,198 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,599 | py | import pandas as pd
# Load DataFrame from file_path: editions
medals = pd.read_csv('../data/medals_2.csv')
# Construct the pivot_table: medal_counts
medal_counts = medals.pivot_table(index='Edition',values='Athlete',columns='NOC',aggfunc='count')
# Load DataFrame from file_path: editions
editions = pd.read_csv('../data/editions.csv')
# Set Index of editions: totals
totals = editions.set_index('Edition')
# Reassign totals['Grand Total']: totals
totals = totals['Grand Total']
# Divide medal_counts by totals: fractions
fractions = medal_counts.divide( totals, axis = 'rows' )
# Apply the expanding mean: mean_fractions
mean_fractions = fractions.expanding().mean()
# Compute the percentage change: fractions_change
fractions_change = mean_fractions.pct_change().multiply(100)
# Reset the index of fractions_change: fractions_change
fractions_change = fractions_change.reset_index()
ioc_codes = pd.read_csv('../data/country_codes.csv')
# Extract the relevant columns: ioc_codes
ioc_codes = ioc_codes[['Country', 'NOC']]
# Left join editions and ioc_codes: hosts
hosts = pd.merge(editions,ioc_codes, how='left')
# Extract relevant columns and set index: hosts
hosts = hosts[['Edition','NOC']].set_index( 'Edition')
# Reshape fractions_change: reshaped
reshaped = pd.melt(fractions_change,id_vars='Edition', value_name='Change')
# Print reshaped.shape and fractions_change.shape
print(reshaped.shape, fractions_change.shape)
# Extract rows from reshaped where 'NOC' == 'CHN': chn
chn = reshaped.loc[reshaped['NOC'] == 'CHN']
# Print last 5 rows of chn with .tail()
print(chn.tail()) | [
"[email protected]"
] | |
15d215b500c6d26dbd37bfda3a9d73e8979c26aa | 01af3f8a79453482febefe64d356a616abc08c1e | /backend/config/settings/production/third_party.py | c58e3e64fb67c81f95dfd14e878c6d18778211f4 | [] | no_license | by-Exist/django-skeleton | 0ea3dbc815cb8da8417ef0f64e304715b8e5b5dd | 4848dd1074533b368015cdde943719114d001bcc | refs/heads/master | 2023-06-12T12:52:09.216952 | 2021-07-12T08:48:09 | 2021-07-12T08:48:09 | 372,245,930 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,638 | py | from .django import *
# Django Storage
# =============================================================================
STATICFILES_STORAGE = "config.storages.StaticStorage"
DEFAULT_FILE_STORAGE = "config.storages.MediaStorage"
AWS_S3_REGION_NAME = "ewr1" # TODO: region, endpoint url도 environment로 관리해야 하지 않을까?
AWS_S3_ENDPOINT_URL = f"https://{AWS_S3_REGION_NAME}.vultrobjects.com/"
AWS_ACCESS_KEY_ID = env.str("DJANGO_STORAGE_ACCESS_KEY_ID")
AWS_SECRET_ACCESS_KEY = env.str("DJANGO_STORAGE_SECRET_ACCESS_KEY")
# Django REST Framework
# =============================================================================
# https://www.django-rest-framework.org/api-guide/settings/
REST_FRAMEWORK[
"DEFAULT_VERSIONING_CLASS"
] = "rest_framework.versioning.URLPathVersioning"
# DRF Spectacular
# =============================================================================
# https://drf-spectacular.readthedocs.io/en/latest/settings.html
SPECTACULAR_SETTINGS["TITLE"] = "Backend Production API"
SPECTACULAR_SETTINGS["DESCRIPTION"] = "Backend Production api description..."
SPECTACULAR_SETTINGS["VERSION"] = "0.0.1"
# https://swagger.io/docs/open-source-tools/swagger-ui/usage/configuration/
SPECTACULAR_SETTINGS["SWAGGER_UI_SETTINGS"]["supportedSubmitMethods"] = []
# Django Cachalot
# =============================================================================
# https://django-cachalot.readthedocs.io/en/latest/quickstart.html#settings
INSTALLED_APPS += ["cachalot"]
CACHES["cachalot"] = env.cache("DJANGO_CACHALOT_CACHE_URL")
CACHALOT_CACHE = "cachalot"
CACHALOT_UNCACHABLE_TABLES = ["django_migrations"]
| [
"[email protected]"
] | |
bf8a6a3bbd710bdaa7611c6890907a61a0e9cce7 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_010/ch136_2020_04_01_12_09_01_220465.py | 566b2a82eb758b3344edaf9b17037a14dee59e8d | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,641 | py | import random
dinheiro=10
dicas=True
jogo=True
chutes=True
dado1=random.randint (1,6)
dado2=random.randint (1,6)
dado3=random.randint (1,6)
soma=dado1+dado2+dado3
while dicas:
print ("Fase de dicas")
print ("Você tem {} dinheiros.".format (dinheiro))
if dinheiro==0:
dicas=False
chutes=False
print ("Você perdeu o jogo!")
else:
pergunta=str(input("Você quer uma dica?"))
if pergunta=="sim":
dinheiro=dinheiro-1
dica1=int(input("Digite o primeiro número: "))
dica2=int(input("Digite o segundo número: "))
dica3=int(input("Digite o terceiro número: "))
if dica1==soma or dica2==soma or dica3==soma:
print ("Está entre os três")
else:
print ("Não está entre os três")
elif pergunta=="não":
dicas=False
while chutes:
print ("Fase de chutes")
print ("Você tem {} dinheiros.".format (dinheiro))
if dinheiro==0:
print ("Você perdeu o jogo!")
chutes=False
else:
chute=int(input("Chute um número: "))
if chute==soma:
dinheiro=dinheiro + 5*dinheiro
print ("Você acertou!")
chutes=False
print ("Você ganhou o jogo com {} dinheiros.".format (dinheiro))
else:
print ("Você errou!")
dinheiro=dinheiro-1
if dinheiro==0:
print ("Você perdeu!")
chutes=False
| [
"[email protected]"
] | |
ca2e11ed3a29496a59aceae5171f893f340a43d0 | c0caed81b5b3e1498cbca4c1627513c456908e38 | /src/python/bindings/app/pyrosetta_toolkit/pyrosetta_toolkit.py | b9934a7edda6e3f28079e9c1f622ad02ca1c8a1e | [
"LicenseRef-scancode-other-permissive"
] | permissive | malaifa/source | 5b34ac0a4e7777265b291fc824da8837ecc3ee84 | fc0af245885de0fb82e0a1144422796a6674aeae | refs/heads/master | 2021-01-19T22:10:22.942155 | 2017-04-19T14:13:07 | 2017-04-19T14:13:07 | 88,761,668 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 11,387 | py | #!/usr/bin/env python
# (c) Copyright Rosetta Commons Member Institutions.
# (c) This file is part of the Rosetta software suite and is made available under license.
# (c) The Rosetta software is developed by the contributing members of the Rosetta Commons.
# (c) For more information, see http://www.rosettacommons.org. Questions about this can be
# (c) addressed to University of Washington UW TechTransfer, email: [email protected].
## @file /GUIs/pyrosetta_toolkit/pyrosetta_toolkit.py
## @brief Main window for the toolkit.
## @author Jared Adolf-Bryfogle ([email protected])
#Rosetta Imports
from rosetta import *
import functools
#Python Imports
from os import listdir
from os import getcwd
from shutil import copy
from os import remove
from os import environ
from os import path
from os import system
import glob
import signal
import sys
#Append Python Path
#p = os.path.split(os.path.abspath(__file__))[0]
#p2 = p.split("/"); p2.pop()
#sys.path.append("/".join(p2)); #Allows all Window_Modules to use Modules and the use of python GUIs from main GUI directory
#Tk Imports
from Tkinter import *
from Tkinter import Frame as FrameTk
import tkFileDialog
import tkMessageBox
import tkSimpleDialog
import tkFont
#Toolkit Imports
from app.pyrosetta_toolkit.modules import tools
from app.pyrosetta_toolkit.window_main.menu import *
from app.pyrosetta_toolkit.window_main import global_variables
from app.pyrosetta_toolkit.window_main.frames.InputFrame import InputFrame
from app.pyrosetta_toolkit.window_main.frames.OutputFrame import OutputFrame
from app.pyrosetta_toolkit.window_main.frames.QuickProtocolsFrame import QuickProtocolsFrame
from app.pyrosetta_toolkit.window_main.frames.SimpleAnalysisFrame import SimpleAnalysisFrame
from app.pyrosetta_toolkit.window_main.IO.GUIInput import GUIInput
from app.pyrosetta_toolkit.window_main.IO.GUIOutput import GUIOutput
from app.pyrosetta_toolkit.window_modules.pymol_integration.PyMOL import AdvancedPyMOL
from app.pyrosetta_toolkit.window_modules.scorefunction.ScoreFxnControl import ScoreFxnControl
from app.pyrosetta_toolkit.modules.Region import Region
class main_window:
def __init__(self):
"""
Initializes the main window.
Sets common global variables.
"""
self._tk_ = Tk()
self.pose = Pose()
self.native_pose = Pose()
self.current_directory = global_variables.current_directory = getcwd()
self.toolkit_home = self.location()[0]
self.DesignDic = dict()
### Init ###
self._initialize_GUI()
self._initialize_Frames()
### TextBox ###
self.textbox_frame = Frame(self._tk_, bd=3, relief=GROOVE)
outfont = tkFont.Font(family="Helvetica", size=11)
self.output_textbox= Text(self.textbox_frame,wrap="word", height=8,width=113,font = outfont)
self.output_scrollbar = Scrollbar(self.textbox_frame)
self.output_textbox.configure(yscrollcommand = self.output_scrollbar.set)
self.output_scrollbar.configure(command = self.output_textbox.yview)
#self.old_stdout = sys.stdout
#self.output_class.terminal_output.trace_variable('w', self.output_tracer)
#self.output_class.terminal_output.set(0)
self.input_class.options_manager.print_current_options()
print "\nRegion Selection Tips: No regions added = Full structure selected. \nAdding Regions: For N-terminus omit start; For C-terminus omit end; For whole Chain omit start + end"
print "For additional protocol options, please use the Option System Manager.\n"
print "Please see RosettaCommons for full documentation and references for all protocols and tools utilized in the GUI\n"
def quit(self):
self._tk_.destroy()
def _initialize_GUI(self):
"""
Creates object for the GUI
"""
#self.options_class = OptionSystemManager(global_variables.current_directory)Relocated to input_class
self.input_class = GUIInput(self)
self.output_class = GUIOutput(self)
####Sequence#####
self.residue_string = StringVar()
self.input_class.region_sequence.trace_variable('w', self.clear_num_string_on_new_input)
self.sequence_output = Entry(self._tk_, textvariable = self.input_class.region_sequence)
#self.sequence_output.bind('<FocusIn>', self.print_numbering)
self.sequence_output.bind('<ButtonRelease-1>', self.print_numbering)
self.sequence_output.bind('<KeyRelease>', self.print_numbering)
self.seq_scroll = Scrollbar(self._tk_, orient=HORIZONTAL, command=self.__scrollHandler)
self.num_label = Label(self._tk_, textvariable = self.residue_string, justify=CENTER)
####Sequence#####
self.score_class = ScoreFxnControl(); #Main Score Function Object. Holds Score. Controls switching scorefunctions, etc.
self.pymol_class = AdvancedPyMOL(self.pose); #PyMOL Object for advanced visualization.
def clear_num_string_on_new_input(self, name, index, mode):
self.residue_string.set("")
self.input_class.set_residue_of_interest("", "", "")
def print_numbering(self, event):
if not self.pose.total_residue():return
#print self.sequence_output.index(INSERT)
rosetta_num=0
pdb_num=""
if self.pose.total_residue()==len(self.input_class.region_sequence.get()):
rosetta_num = 1+self.sequence_output.index(INSERT)
try:
pdb_num = self.pose.pdb_info().pose2pdb(rosetta_num)
except PyRosettaException:
#Catches the the LAST index
return
#print self.num_string
else:
region = self.input_frame.return_region_from_entry()
rosetta_num = region.get_rosetta_start(self.pose)+self.sequence_output.index(INSERT)
try:
pdb_num = self.pose.pdb_info().pose2pdb(rosetta_num)
except PyRosettaException:
return
pdbSP = pdb_num.split()
self.input_class.set_residue_of_interest(pdbSP[0], pdbSP[1], repr(rosetta_num))
self.input_class.residue_string.set(pdb_num+' - '+repr(rosetta_num))
self.residue_string.set(pdb_num+' - '+repr(rosetta_num))
self.input_class.residue_rosetta_resnum.set(repr(rosetta_num))
if self.pymol_class.auto_send_residue_colors.get():
self.pymol_class.color_residue(int(rosetta_num))
#self.fullcontrol_class.shoInfo(pdbSP[0], pdbSP[1])
def __scrollHandler(self, *L):
"""
Handles scrolling of entry.
CODE: http://infohost.nmt.edu/tcc/help/pubs/tkinter/web/entry-scrolling.html
"""
try:
op, howMany = L[0], L[1]
except IndexError:
return
if op =='scroll':
units = L[2]
self.sequence_output.xview_scroll(howMany, units)
elif op=='moveto':
self.sequence_output.xview_moveto(howMany)
def _initialize_Frames(self):
"""
Creates the Frame Objects that will go in the main window
"""
self.input_frame = InputFrame(self._tk_, self, self.input_class, bd=1, relief=SUNKEN)
self.output_frame = OutputFrame(self._tk_, self, self.output_class, bd=1, relief = SUNKEN)
self.protocol_frame = QuickProtocolsFrame(self._tk_, self, self.output_class, bd=1, relief=SUNKEN)
self.simple_analysis_frame = SimpleAnalysisFrame(self._tk_, self, bd=2, relief=SUNKEN)
self.menu_class = Menus(self._tk_, self)
def show_gui(self):
"""
Shows each piece of the main GUI.
Does not do anything with the Window Modules, just each individual main component of the main window.
These Inhereit from the Frame class. See one of these for an example.
Window Modules should be initialized through the Menus class in /window_main/menu.py
"""
#6x4 Grid Pain in the ass. At some point, everything will move to Qt - Either in Python or C++
#Grid:
self.menu_class.setTk(); self.menu_class.shoTk()
self.input_frame.grid(row=1, column=0, rowspan=7, padx=15, pady=15);
self.output_frame.grid(row=0, column=1, rowspan=2, pady=3);
self.protocol_frame.grid(row=3, column=1, rowspan=4, padx=5)
self.simple_analysis_frame.grid(row=0, column=0, padx=5, pady=5)
### Text Output ###
self.num_label.grid(column=0, row=8, columnspan=2, pady=2, padx=2)
self.seq_scroll.grid(column=0, row=9, columnspan=3, sticky=W+E)
self.sequence_output.grid(column=0, row=10, columnspan=3, sticky=W+E)
self.sequence_output['xscrollcommand']=self.seq_scroll.set
self.output_textbox.grid(column=0, row = 11, rowspan=2, columnspan=3,sticky=W+E)
self.output_scrollbar.grid(column=3, row=11, rowspan=2, sticky=E+N+S)
self.textbox_frame.grid(column=0, row=11, rowspan=2, columnspan=3, sticky=W+E, pady=3, padx=6)
#self.Photo.grid(row = 0, column = 2, rowspan=4)
"""
#Pack:
self.menu_class.setTk(); self.menu_class.shoTk()
self.input_class.pack(side=LEFT, padx=3, pady=3)
self.output_class.pack(padx=3, pady=3)
self.simple_analysis_frame.pack(padx=3, pady=3)
self.protocol_frame.pack(padx=3, pady=3)
#self.output_textbox.pack(side=BOTTOM, padx=3, pady=3)
#self.output_frame.pack(side=BOTTOM, padx=3, pady=3)
"""
def run(self, run_event_loop=True):
self._tk_.title("PyRosetta Toolkit")
self.show_gui()
self._tk_.grid_columnconfigure(ALL, weight=1)
#self._tk_.grid_rowconfigure(ALL, weight=1)
if run_event_loop:
self._tk_.mainloop()
def redirect_stdout_to_textbox(self):
print "Redirect stdout to textbox"
sys.stdout = self; #Set stdout to be redirected to textbox using the write function override.
def redirect_stdout_to_default(self):
print "Redirect stdout to default"
sys.stdout = self.old_stdout
def write(self, text):
self.output_textbox.insert(END, text)
self.output_textbox.yview(END)
def output_tracer(self, name, index, mode):
"""
Controls where stdout goes. Textbox or Terminal.
Does not override Tracer for now.
"""
varvalue = self.output_class.terminal_output.get()
if (varvalue):
self.redirect_stdout_to_default()
else:
self.redirect_stdout_to_textbox()
def location(self):
"""
Allows the script to be self-aware of it's path.
So that it can be imported/ran from anywhere.
"""
p = os.path.abspath(__file__)
pathSP = os.path.split(p)
return pathSP
class MainTracer(rosetta.basic.PyTracer):
def __init__(self, textbox):
rosetta.basic.PyTracer.__init__(self)
self.textbox = textbox
def output_callback(self, s):
pass
#s = " "+s
#self.textbox.insert(END, s)
#print s
if __name__ == '__main__':
rosetta.init()
main_window_class = main_window()
#main_window_class.TR = MainTracer(main_window_class.output_textbox)
#rosetta.basic.Tracer.set_ios_hook(main_window_class.TR, rosetta.basic.Tracer.get_all_channels_string(), False)
#rosetta.init(extra_options="-mute all")
main_window_class.run()
| [
"[email protected]"
] | |
69024abc125c1c0fbb26411947e1976dc81fb6e6 | 1f41b828fb652795482cdeaac1a877e2f19c252a | /maya_menus/_MAINMENU_PMC_Rigging/05.Put Object-(RP[N])/03.Put Controller/18.sidePin.py | e9ed02aaea8fe1c13e224651d6f47fb6657f251a | [] | no_license | jonntd/mayadev-1 | e315efe582ea433dcf18d7f1e900920f5590b293 | f76aeecb592df766d05a4e10fa2c2496f0310ca4 | refs/heads/master | 2021-05-02T07:16:17.941007 | 2018-02-05T03:55:12 | 2018-02-05T03:55:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 173 | py | from sgMaya import sgModel, sgCmds
sels = cmds.ls( sl=1 )
if not sels: sels = [None]
for sel in sels:
sgCmds.putControllerToGeo( sel, sgModel.Controller.sidePinPoints ) | [
"[email protected]"
] | |
22b95182bd9050b6d8dbb6cfd970e83489eff911 | 477c8309420eb102b8073ce067d8df0afc5a79b1 | /Applications/ParaView/Testing/Python/DisconnectAndSaveAnimation.py | f9f080edafa9f6c87116a65800627d5c41831290 | [
"LicenseRef-scancode-paraview-1.2"
] | permissive | aashish24/paraview-climate-3.11.1 | e0058124e9492b7adfcb70fa2a8c96419297fbe6 | c8ea429f56c10059dfa4450238b8f5bac3208d3a | refs/heads/uvcdat-master | 2021-07-03T11:16:20.129505 | 2013-05-10T13:14:30 | 2013-05-10T13:14:30 | 4,238,077 | 1 | 0 | NOASSERTION | 2020-10-12T21:28:23 | 2012-05-06T02:32:44 | C++ | UTF-8 | Python | false | false | 3,862 | py | #/usr/bin/env python
import QtTesting
import QtTestingImage
object1 = 'pqClientMainWindow/menubar/menuSources'
QtTesting.playCommand(object1, 'activate', 'SphereSource')
object2 = 'pqClientMainWindow/proxyTabDock/proxyTabWidget/qt_tabwidget_stackedwidget/objectInspector/Accept'
QtTesting.playCommand(object2, 'activate', '')
object3 = 'pqClientMainWindow/centralwidget/MultiViewManager/SplitterFrame/MultiViewSplitter/0/MultiViewFrameMenu/SplitVerticalButton'
QtTesting.playCommand(object3, 'activate', '')
QtTesting.playCommand(object1, 'activate', 'SphereSource')
object4 = 'pqClientMainWindow/proxyTabDock/proxyTabWidget/qt_tabwidget_stackedwidget/objectInspector/ScrollArea/qt_scrollarea_viewport/PanelArea/Editor/Center_0'
QtTesting.playCommand(object4, 'set_string', '1')
QtTesting.playCommand(object2, 'activate', '')
object5 = 'pqClientMainWindow/pipelineBrowserDock/pipelineBrowser'
QtTesting.playCommand(object5, 'currentChanged', '/0/0|0')
object6 = 'pqClientMainWindow/proxyTabDock/proxyTabWidget/qt_tabwidget_tabbar'
QtTesting.playCommand(object6, 'set_tab', '1')
object7 = 'pqClientMainWindow/proxyTabDock/proxyTabWidget/qt_tabwidget_stackedwidget/1QScrollArea0/qt_scrollarea_viewport/1pqDisplayProxyEditorWidget0/Form/ViewGroup/ViewData'
QtTesting.playCommand(object7, 'set_boolean', 'true')
object8 = 'pqClientMainWindow/menubar'
QtTesting.playCommand(object8, 'activate', 'menu_View')
object8 = 'pqClientMainWindow/menubar/menu_View'
QtTesting.playCommand(object8, 'activate', 'Animation View')
object9 = 'pqClientMainWindow/animationViewDock/animationView/pqAnimationWidget/CreateDeleteWidget/PropertyCombo'
QtTesting.playCommand(object9, 'set_string', 'Start Theta')
object10 = "pqClientMainWindow/animationViewDock/animationView/1pqAnimationWidget0/1QHeaderView0"
QtTesting.playCommand(object10, "mousePress", "1,1,0,0,0,2")
QtTesting.playCommand(object10, "mouseRelease", "1,1,0,0,0,2")
object11 = 'pqClientMainWindow/VCRToolbar/1QToolButton3'
QtTesting.playCommand(object11, 'activate', '')
QtTesting.playCommand(object11, 'activate', '')
object12 = 'pqClientMainWindow/menubar/menu_File'
QtTesting.playCommand(object12, 'activate', '')
QtTesting.playCommand(object12, 'activate', 'actionFileSaveAnimation')
object13 = 'pqAnimationSettingsDialog/checkBoxDisconnect'
QtTesting.playCommand(object13, 'set_boolean', 'true')
object14 = 'pqAnimationSettingsDialog/width'
QtTesting.playCommand(object14, 'set_string', '300')
object14 = 'pqAnimationSettingsDialog/height'
QtTesting.playCommand(object14, 'set_string', '300')
object15 = 'pqAnimationSettingsDialog/okButton'
QtTesting.playCommand(object15, 'activate', '')
object16 = 'pqClientMainWindow/FileSaveAnimationDialog'
# Remove old files.
QtTesting.playCommand(object16, 'remove', '$PARAVIEW_TEST_ROOT/disconnectSave.0000.png')
QtTesting.playCommand(object16, 'remove', '$PARAVIEW_TEST_ROOT/disconnectSave.0001.png')
QtTesting.playCommand(object16, 'remove', '$PARAVIEW_TEST_ROOT/disconnectSave.0002.png')
QtTesting.playCommand(object16, 'remove', '$PARAVIEW_TEST_ROOT/disconnectSave.0003.png')
QtTesting.playCommand(object16, 'remove', '$PARAVIEW_TEST_ROOT/disconnectSave.0004.png')
QtTesting.playCommand(object16, 'remove', '$PARAVIEW_TEST_ROOT/disconnectSave.0005.png')
QtTesting.playCommand(object16, 'remove', '$PARAVIEW_TEST_ROOT/disconnectSave.0006.png')
QtTesting.playCommand(object16, 'remove', '$PARAVIEW_TEST_ROOT/disconnectSave.0007.png')
QtTesting.playCommand(object16, 'remove', '$PARAVIEW_TEST_ROOT/disconnectSave.0008.png')
QtTesting.playCommand(object16, 'remove', '$PARAVIEW_TEST_ROOT/disconnectSave.0009.png')
QtTesting.playCommand(object16, 'filesSelected', '$PARAVIEW_TEST_ROOT/disconnectSave.png')
import time
print "Wait for 60 secs"
time.sleep(60);
QtTestingImage.compareImage('$PARAVIEW_TEST_ROOT/disconnectSave.0005.png', 'DisconnectAndSaveAnimation.png');
| [
"[email protected]"
] | |
be33d28852484275819ace98b621bc01decf9381 | 985a874c832d7632e287f2185b18aaf2e1b42018 | /dtcwt_gainlayer/layers/nonlinear.py | f3f2b274c311342e0a0b16400783156a896a9a06 | [
"MIT"
] | permissive | fbcotter/dtcwt_gainlayer | e2ea03ccfe8ad4f903b59846c1c902391c66b227 | 32ec3e21066edc2a0d5edefaf70f43d031d1b4ac | refs/heads/master | 2023-03-28T13:08:37.919222 | 2019-08-20T09:05:46 | 2019-08-20T09:05:46 | 157,608,716 | 6 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,276 | py | import torch
import torch.nn as nn
import torch.nn.functional as func
from dtcwt_gainlayer.layers.shrink import SparsifyWaveCoeffs_std, mag, SoftShrink
class PassThrough(nn.Module):
def forward(self, x):
return x
class WaveNonLinearity(nn.Module):
""" Performs a wavelet-based nonlinearity.
Args:
C (int): Number of input channels. Some of the nonlinearities have batch
norm, so need to know this.
lp (str): Nonlinearity to use for the lowpass coefficients
bp (list(str)): Nonlinearity to use for the bandpass coefficients.
lp_q (float): Quantile value for sparsity threshold for lowpass.
1 keeps all coefficients and 0 keeps none. Only valid if lp is
'softshrink_std' or 'hardshrink_std'. See
:class:`SparsifyWaveCoeffs_std`.
bp_q (float): Quantile value for sparsity threshold for bandpass
coefficients. Only valid if bp is 'softshrink_std' or
'hardshrink_std'.
The options for the lowpass are:
- none
- relu (as you'd expect)
- relu2 - applies batch norm + relu
- softshrink - applies soft shrinkage with a learnable threshold
- hardshrink_std - applies hard shrinkage. The 'std' implies that it
tracks the standard deviation of the activations, and sets a threshold
attempting to reach a desired sparsity level. This assumes that the
lowpass coefficients follow a laplacian distribution. See
:class:`dtcwt_gainlayer.layers.shrink.SparsifyWaveCoeffs_std`.
- softshrink_std - same as hardshrink std except uses soft shrinkage.
The options for the bandpass are:
- none
- relu (applied indepently to the real and imaginary components)
- relu2 - applies batch norm + relu to the magnitude of the bandpass
coefficients
- softshrink - applies shoft shrinkage to the magnitude of the bp
coefficietns with a learnable threshold
- hardshrink_std - applies hard shrinkage by tracking the standard
deviation. Assumes the bp distributions follow an exponential
distribution. See
:class:`dtcwt_gainlayer.layers.shrink.SparsifyWaveCoeffs_std`.
- softshrink_std - same as hardshrink_std but with soft shrinkage.
"""
def __init__(self, C, lp=None, bp=(None,), lp_q=0.8, bp_q=0.8):
super().__init__()
if lp is None or lp == 'none':
self.lp = PassThrough()
elif lp == 'relu':
self.lp = nn.ReLU()
elif lp == 'relu2':
self.lp = BNReLUWaveCoeffs(C, bp=False)
elif lp == 'softshrink':
self.lp = SoftShrink(C, complex=False)
elif lp == 'hardshrink_std':
self.lp = SparsifyWaveCoeffs_std(C, lp_q, bp=False, soft=False)
elif lp == 'softshrink_std':
self.lp = SparsifyWaveCoeffs_std(C, lp_q, bp=False, soft=True)
else:
raise ValueError("Unkown nonlinearity {}".format(lp))
fs = []
for b in bp:
if b is None or b == 'none':
f = PassThrough()
elif b == 'relu':
f = nn.ReLU()
elif b == 'relu2':
f = BNReLUWaveCoeffs(C, bp=True)
elif b == 'softshrink':
f = SoftShrink(C, complex=True)
elif b == 'hardshrink_std':
f = SparsifyWaveCoeffs_std(C, bp_q, bp=True, soft=False)
elif b == 'softshrink_std':
f = SparsifyWaveCoeffs_std(C, bp_q, bp=True, soft=True)
else:
raise ValueError("Unkown nonlinearity {}".format(lp))
fs.append(f)
self.bp = nn.ModuleList(fs)
def forward(self, x):
""" Applies the selected lowpass and bandpass nonlinearities to the
input x.
Args:
x (tuple): tuple of (lowpass, bandpasses)
Returns:
y (tuple): tuple of (lowpass, bandpasses)
"""
yl, yh = x
yl = self.lp(yl)
yh = [bp(y) if y.shape != torch.Size([0]) else y
for bp, y in zip(self.bp, yh)]
return (yl, yh)
class BNReLUWaveCoeffs(nn.Module):
""" Applies batch normalization followed by a relu
Args:
C (int): number of channels
bp (bool): If true, applies bn+relu to the magnitude of the bandpass
coefficients. If false, is applying bn+relu to the lowpass coeffs.
"""
def __init__(self, C, bp=True):
super().__init__()
self.bp = bp
if bp:
self.BN = nn.BatchNorm2d(6*C)
else:
self.BN = nn.BatchNorm2d(C)
self.ReLU = nn.ReLU()
def forward(self, x):
""" Applies nonlinearity to the input x """
if self.bp:
s = x.shape
# Move the orientation dimension to the channel
x = x.view(s[0], s[1]*s[2], s[3], s[4], s[5])
θ = torch.atan2(x.data[..., 1], x.data[..., 0])
r = mag(x, complex=True)
r_new = self.ReLU(self.BN(r))
y = torch.stack((r_new * torch.cos(θ), r_new * torch.sin(θ)), dim=-1)
# Reshape to a 6D tensor again
y = y.view(s[0], s[1], s[2], s[3], s[4], s[5])
else:
y = self.ReLU(self.BN(x))
return y
| [
"[email protected]"
] | |
34a5496edaf78c200fe0a67006564fb6d0ff9b2b | a4ea525e226d6c401fdb87a6e9adfdc5d07e6020 | /src/azure-cli-core/azure/cli/core/tests/test_aaz_paging.py | 2ec14f790d3d6bce17f38400edfd9df57904a7dc | [
"MIT",
"BSD-3-Clause",
"LGPL-2.0-or-later",
"GPL-1.0-or-later",
"MPL-2.0",
"LGPL-2.1-only",
"Apache-2.0",
"LGPL-2.1-or-later",
"BSD-2-Clause"
] | permissive | Azure/azure-cli | 13340eeca2e288e66e84d393fa1c8a93d46c8686 | a40fd14ad0b6e89720a2e58d4d9be3a6ce1535ca | refs/heads/dev | 2023-08-17T06:25:37.431463 | 2023-08-17T06:00:10 | 2023-08-17T06:00:10 | 51,040,886 | 4,018 | 3,310 | MIT | 2023-09-14T11:11:05 | 2016-02-04T00:21:51 | Python | UTF-8 | Python | false | false | 3,855 | py | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import unittest
from azure.cli.core.aaz import AAZUndefined
from azure.cli.core.aaz._paging import AAZPaged, AAZPageIterator
from azure.cli.core.mock import DummyCli
class TestAAZPaging(unittest.TestCase):
def test_aaz_paging_sample(self):
data_by_pages = [(['a', 'b', 'c'], 1), (['d', 'e'], 2), (['f'], 3), (['g', 'h'], AAZUndefined)]
result = {
"value": AAZUndefined,
"next_link": AAZUndefined,
}
def executor(next_link):
if next_link is None:
next_link = 0
value, next_link = data_by_pages[next_link]
result["value"] = value
result['next_link'] = next_link
def extract_result():
return result['value'], result['next_link']
paged = AAZPaged(executor=executor, extract_result=extract_result, cli_ctx=DummyCli())
self.assertTrue(list(paged) == ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h'])
def test_aaz_paging_with_limit_and_token(self):
data_by_pages = [
(["a", "b", "c"], 1),
(["d", "e"], 2),
(["f"], 3),
(["g", "h"], AAZUndefined)
]
result = {
"value": AAZUndefined,
"next_link": AAZUndefined
}
def executor(next_link):
if next_link is None:
next_link = 0
value, next_link = data_by_pages[next_link]
result["value"] = value
result["next_link"] = next_link
def extract_result():
return result["value"], result["next_link"]
next_token = '{"next_link": 1, "offset": 1}'
paged = AAZPaged(
executor=executor, extract_result=extract_result, cli_ctx=DummyCli(),
token=next_token, limit=4
)
self.assertTrue(list(paged) == ["e", "f", "g", "h"])
def test_aaz_paging_iterator(self):
data_by_pages = [
(["a", "b", "c"], 1),
(["d", "e"], 2),
(["f"], 3),
(["g", "h"], AAZUndefined)
]
result = {
"value": AAZUndefined,
"next_link": AAZUndefined
}
def executor(next_link):
if next_link is None:
next_link = 0
value, next_link = data_by_pages[next_link]
result["value"] = value
result["next_link"] = next_link
def extract_result():
return result["value"], result["next_link"]
page_iterator = AAZPageIterator(
executor=executor, extract_result=extract_result, cli_ctx=DummyCli(),
next_link=1, offset=1, limit=4
)
# | a b c | d e | f | g h |
# *
self.assertTrue(page_iterator._next_link == 1)
self.assertTrue(page_iterator._start == 1) # offset
self.assertTrue(page_iterator._total == 5)
# | a b c | d e | f | g h |
# *
next(page_iterator)
self.assertTrue(page_iterator._next_link == 2)
self.assertTrue(page_iterator._total == 3)
# | a b c | d e | f | g h |
# *
next(page_iterator)
self.assertTrue(page_iterator._next_link == 3)
self.assertTrue(page_iterator._total == 2)
# | a b c | d e | f | g h |
# *
next(page_iterator)
self.assertTrue(page_iterator._next_link == AAZUndefined)
self.assertTrue(page_iterator._total == 0)
| [
"[email protected]"
] | |
2a43d736e2b0bed80741d6dc401155c5fb685570 | 374aac5655cbdead72683a5e8b6e02126a024768 | /tests/test_sqlalchemy.py | b05d87ce415b1c3218592d47c7af99354879f0b8 | [
"MIT"
] | permissive | naveenkumar-grofers/nplusone | 0f51179a5a4aa717ea2b537bfa1a8e07af568ebb | 2bcf727a73c05afa01a020993997a6a60778b872 | refs/heads/master | 2021-01-24T21:54:08.390445 | 2015-11-15T16:52:42 | 2015-11-15T16:52:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,041 | py | # -*- coding: utf-8 -*-
import pytest
import sqlalchemy as sa
from sqlalchemy.ext.declarative import declarative_base
from nplusone.core import signals
import nplusone.ext.sqlalchemy # noqa
from tests import utils
from tests.utils import calls # noqa
pytest.yield_fixture(calls)
Base = declarative_base()
models = utils.make_models(Base)
@pytest.fixture()
def session():
engine = sa.create_engine('sqlite:///:memory:')
Session = sa.orm.sessionmaker(bind=engine)
Base.metadata.create_all(bind=engine)
return Session()
@pytest.fixture()
def objects(session):
hobby = models.Hobby()
address = models.Address()
user = models.User(addresses=[address], hobbies=[hobby])
session.add(user)
session.commit()
session.close()
class TestManyToOne:
def test_many_to_one(self, session, objects, calls):
user = session.query(models.User).first()
user.addresses
assert len(calls) == 1
call = calls[0]
assert call.objects == (models.User, 'addresses')
assert 'user.addresses' in ''.join(call.frame[4])
def test_many_to_one_ignore(self, session, objects, calls):
user = session.query(models.User).first()
with signals.ignore(signals.lazy_load):
user.addresses
assert len(calls) == 0
def test_many_to_one_subquery(self, session, objects, calls):
user = session.query(
models.User
).options(
sa.orm.subqueryload('addresses')
).first()
user.addresses
assert len(calls) == 0
def test_many_to_one_joined(self, session, objects, calls):
user = session.query(models.User).options(sa.orm.joinedload('addresses')).first()
user.addresses
assert len(calls) == 0
def test_many_to_one_reverse(self, session, objects, calls):
address = session.query(models.Address).first()
address.user
assert len(calls) == 1
call = calls[0]
assert call.objects == (models.Address, 'user')
assert 'address.user' in ''.join(call.frame[4])
def test_many_to_one_reverse_subquery(self, session, objects, calls):
address = session.query(
models.Address
).options(
sa.orm.subqueryload('user')
).first()
address.user
assert len(calls) == 0
def test_many_to_one_reverse_joined(self, session, objects, calls):
address = session.query(models.Address).options(sa.orm.joinedload('user')).first()
address.user
assert len(calls) == 0
class TestManyToMany:
def test_many_to_many(self, session, objects, calls):
user = session.query(models.User).first()
user.hobbies
assert len(calls) == 1
call = calls[0]
assert call.objects == (models.User, 'hobbies')
assert 'user.hobbies' in ''.join(call.frame[4])
def test_many_to_many_subquery(self, session, objects, calls):
user = session.query(models.User).options(sa.orm.subqueryload('hobbies')).first()
user.hobbies
assert len(calls) == 0
def test_many_to_many_joined(self, session, objects, calls):
user = session.query(models.User).options(sa.orm.joinedload('hobbies')).first()
user.hobbies
assert len(calls) == 0
def test_many_to_many_reverse(self, session, objects, calls):
hobby = session.query(models.Hobby).first()
hobby.users
assert len(calls) == 1
call = calls[0]
assert call.objects == (models.Hobby, 'users')
assert 'hobby.users' in ''.join(call.frame[4])
def test_many_to_many_reverse_subquery(self, session, objects, calls):
hobby = session.query(models.Hobby).options(sa.orm.subqueryload('users')).first()
hobby.users
assert len(calls) == 0
def test_many_to_many_reverse_joined(self, session, objects, calls):
hobby = session.query(models.Hobby).options(sa.orm.joinedload('users')).first()
hobby.users
assert len(calls) == 0
| [
"[email protected]"
] | |
f41d0214026ead1d8003886b6a76d15d7f9fd2d8 | 29c3595a4e1f8de9382650610aee5a13e2a135f6 | /venv/Lib/site-packages/django/db/utils.py | 28afa6cd076757b79702b2aab41a1cc2382588ba | [
"MIT"
] | permissive | zoelesv/Smathchat | 1515fa56fbb0ad47e1859f6bf931b772446ea261 | 5cee0a8c4180a3108538b4e4ce945a18726595a6 | refs/heads/main | 2023-08-04T14:47:21.185149 | 2023-08-02T15:53:20 | 2023-08-02T15:53:20 | 364,627,392 | 9 | 1 | MIT | 2023-08-02T15:53:21 | 2021-05-05T15:42:47 | Python | UTF-8 | Python | false | false | 10,398 | py | import pkgutil
from importlib import import_module
from pathlib import Path
from asgiref.local import Local
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.utils.functional import cached_property
from django.utils.module_loading import import_string
DEFAULT_DB_ALIAS = 'default'
DJANGO_VERSION_PICKLE_KEY = '_django_version'
class Error(Exception):
pass
class InterfaceError(Error):
pass
class DatabaseError(Error):
pass
class DataError(DatabaseError):
pass
class OperationalError(DatabaseError):
pass
class IntegrityError(DatabaseError):
pass
class InternalError(DatabaseError):
pass
class ProgrammingError(DatabaseError):
pass
class NotSupportedError(DatabaseError):
pass
class DatabaseErrorWrapper:
"""
Context manager and decorator that reraises backend-specific database
exceptions using Django's common wrappers.
"""
def __init__(self, wrapper):
"""
wrapper is a database wrapper.
It must have a Database attribute defining PEP-249 exceptions.
"""
self.wrapper = wrapper
def __enter__(self):
pass
def __exit__(self, exc_type, exc_value, traceback):
if exc_type is None:
return
for dj_exc_type in (
DataError,
OperationalError,
IntegrityError,
InternalError,
ProgrammingError,
NotSupportedError,
DatabaseError,
InterfaceError,
Error,
):
db_exc_type = getattr(self.wrapper.Database, dj_exc_type.__name__)
if issubclass(exc_type, db_exc_type):
dj_exc_value = dj_exc_type(*exc_value.args)
# Only set the 'errors_occurred' flag for errors that may make
# the connection unusable.
if dj_exc_type not in (DataError, IntegrityError):
self.wrapper.errors_occurred = True
raise dj_exc_value.with_traceback(traceback) from exc_value
def __call__(self, func):
# Note that we are intentionally not using @wraps here for performance
# reasons. Refs #21109.
def inner(*args, **kwargs):
with self:
return func(*args, **kwargs)
return inner
def load_backend(backend_name):
"""
Return a database backend's "base" module given a fully qualified database
backend name, or raise an error if it doesn't exist.
"""
# This backend was renamed in Django 1.9.
if backend_name == 'django.db.backends.postgresql_psycopg2':
backend_name = 'django.db.backends.postgresql'
try:
return import_module('%s.base' % backend_name)
except ImportError as e_user:
# The database backend wasn't found. Display a helpful error message
# listing all built-in database backends.
backend_dir = str(Path(__file__).parent / 'backends')
builtin_backends = [
name for _, name, ispkg in pkgutil.iter_modules([backend_dir])
if ispkg and name not in {'base', 'dummy', 'postgresql_psycopg2'}
]
if backend_name not in ['django.db.backends.%s' % b for b in builtin_backends]:
backend_reprs = map(repr, sorted(builtin_backends))
raise ImproperlyConfigured(
"%r isn't an available database backend.\n"
"Try using 'django.db.backends.XXX', where XXX is one of:\n"
" %s" % (backend_name, ", ".join(backend_reprs))
) from e_user
else:
# If there's some other error, this must be an error in Django
raise
class ConnectionDoesNotExist(Exception):
pass
class ConnectionHandler:
def __init__(self, databases=None):
"""
databases is an optional dictionary of database definitions (structured
like settings.DATABASES).
"""
self._databases = databases
# Connections needs to still be an actual thread local, as it's truly
# thread-critical. Database backends should use @async_unsafe to protect
# their code from async contexts, but this will give those contexts
# separate connections in case it's needed as well. There's no cleanup
# after async contexts, though, so we don't allow that if we can help it.
self._connections = Local(thread_critical=True)
@cached_property
def databases(self):
if self._databases is None:
self._databases = settings.DATABASES
if self._databases == {}:
self._databases = {
DEFAULT_DB_ALIAS: {
'ENGINE': 'django.db.backends.dummy',
},
}
if DEFAULT_DB_ALIAS not in self._databases:
raise ImproperlyConfigured("You must define a '%s' database." % DEFAULT_DB_ALIAS)
if self._databases[DEFAULT_DB_ALIAS] == {}:
self._databases[DEFAULT_DB_ALIAS]['ENGINE'] = 'django.db.backends.dummy'
return self._databases
def ensure_defaults(self, alias):
"""
Put the defaults into the settings dictionary for a given connection
where no settings is provided.
"""
try:
conn = self.databases[alias]
except KeyError:
raise ConnectionDoesNotExist("The connection %s doesn't exist" % alias)
conn.setdefault('ATOMIC_REQUESTS', False)
conn.setdefault('AUTOCOMMIT', True)
conn.setdefault('ENGINE', 'django.db.backends.dummy')
if conn['ENGINE'] == 'django.db.backends.' or not conn['ENGINE']:
conn['ENGINE'] = 'django.db.backends.dummy'
conn.setdefault('CONN_MAX_AGE', 0)
conn.setdefault('OPTIONS', {})
conn.setdefault('TIME_ZONE', None)
for setting in ['NAME', 'USER', 'PASSWORD', 'HOST', 'PORT']:
conn.setdefault(setting, '')
def prepare_test_settings(self, alias):
"""
Make sure the test settings are available in the 'TEST' sub-dictionary.
"""
try:
conn = self.databases[alias]
except KeyError:
raise ConnectionDoesNotExist("The connection %s doesn't exist" % alias)
test_settings = conn.setdefault('TEST', {})
default_test_settings = [
('CHARSET', None),
('COLLATION', None),
('MIGRATE', True),
('MIRROR', None),
('NAME', None),
]
for key, value in default_test_settings:
test_settings.setdefault(key, value)
def __getitem__(self, alias):
if hasattr(self._connections, alias):
return getattr(self._connections, alias)
self.ensure_defaults(alias)
self.prepare_test_settings(alias)
db = self.databases[alias]
backend = load_backend(db['ENGINE'])
conn = backend.DatabaseWrapper(db, alias)
setattr(self._connections, alias, conn)
return conn
def __setitem__(self, key, value):
setattr(self._connections, key, value)
def __delitem__(self, key):
delattr(self._connections, key)
def __iter__(self):
return iter(self.databases)
def all(self):
return [self[alias] for alias in self]
def close_all(self):
for alias in self:
try:
connection = getattr(self._connections, alias)
except AttributeError:
continue
connection.close()
class ConnectionRouter:
def __init__(self, routers=None):
"""
If routers is not specified, default to settings.DATABASE_ROUTERS.
"""
self._routers = routers
@cached_property
def routers(self):
if self._routers is None:
self._routers = settings.DATABASE_ROUTERS
routers = []
for r in self._routers:
if isinstance(r, str):
router = import_string(r)()
else:
router = r
routers.append(router)
return routers
def _router_func(action):
def _route_db(self, model, **hints):
chosen_db = None
for router in self.routers:
try:
method = getattr(router, action)
except AttributeError:
# If the router doesn't have a method, skip to the next one.
pass
else:
chosen_db = method(model, **hints)
if chosen_db:
return chosen_db
instance = hints.get('instance')
if instance is not None and instance._state.db:
return instance._state.db
return DEFAULT_DB_ALIAS
return _route_db
db_for_read = _router_func('db_for_read')
db_for_write = _router_func('db_for_write')
def allow_relation(self, obj1, obj2, **hints):
for router in self.routers:
try:
method = router.allow_relation
except AttributeError:
# If the router doesn't have a method, skip to the next one.
pass
else:
allow = method(obj1, obj2, **hints)
if allow is not None:
return allow
return obj1._state.db == obj2._state.db
def allow_migrate(self, db, app_label, **hints):
for router in self.routers:
try:
method = router.allow_migrate
except AttributeError:
# If the router doesn't have a method, skip to the next one.
continue
allow = method(db, app_label, **hints)
if allow is not None:
return allow
return True
def allow_migrate_model(self, db, model):
return self.allow_migrate(
db,
model._meta.app_label,
model_name=model._meta.model_name,
model=model,
)
def get_migratable_models(self, app_config, db, include_auto_created=False):
"""Return app models allowed to be migrated on provided db."""
models = app_config.get_models(include_auto_created=include_auto_created)
return [model for model in models if self.allow_migrate_model(db, model)]
| [
"[email protected]"
] | |
8a3441b439ae0c781ace3ba8281fe64a57450d67 | b550eda62179ffd8e49a59df7f8a30163140204f | /backend/openshift-old/services/user/service/model/user.py | 72451f8834453939723096891846cc39a7ccf1a3 | [
"Apache-2.0"
] | permissive | bgoesswe/openeo-repeatability | 6222fb235b70fda9da998b63fec92c0e5ac07169 | 087b9965e710d16cd6f29cb25e2cb94e443c2b30 | refs/heads/master | 2022-12-11T03:43:35.365574 | 2018-08-07T20:02:02 | 2018-08-07T20:02:02 | 139,158,921 | 0 | 1 | null | 2022-12-08T02:15:15 | 2018-06-29T14:27:34 | Python | UTF-8 | Python | false | false | 2,121 | py | ''' Model of User '''
import jwt
import datetime
from flask import current_app
from service import DB, BCRYPT
class User(DB.Model):
__tablename__ = "users"
id = DB.Column(DB.Integer, primary_key=True, autoincrement=True) # Umbennen in uid
username = DB.Column(DB.String(128), unique=True, nullable=False)
email = DB.Column(DB.String(128), unique=True, nullable=False)
password = DB.Column(DB.String(255), nullable=False)
admin = DB.Column(DB.Boolean, default=False, nullable=False)
active = DB.Column(DB.Boolean, default=True, nullable=False)
created_at = DB.Column(DB.DateTime, nullable=False)
def __init__(self, username, email, password, created_at=datetime.datetime.utcnow(), admin=False):
self.username = username
self.email = email
self.password = self.generate_hash(password)
self.admin = admin
self.created_at = created_at
def get_dict(self):
''' Returns the users data '''
return {
"id": self.id,
"username": self.username,
"email": self.email,
"admin": self.admin,
"created_at": self.created_at
}
@staticmethod
def generate_hash(password):
''' Generates the password hash '''
return BCRYPT.generate_password_hash(password, current_app.config.get('BCRYPT_LOG_ROUNDS')).decode()
@staticmethod
def encode_auth_token(user_id):
''' Generates the auth token '''
payload = {
'exp': datetime.datetime.utcnow() + datetime.timedelta(
days=current_app.config.get('TOKEN_EXPIRATION_DAYS'),
seconds=current_app.config.get('TOKEN_EXPIRATION_SECONDS')
),
'iat': datetime.datetime.utcnow(),
'sub': user_id
}
return jwt.encode(payload, current_app.config.get('SECRET_BCRYPT'), algorithm='HS256')
@staticmethod
def decode_auth_token(auth_token):
''' Decodes the auth token '''
payload = jwt.decode(auth_token, current_app.config.get('SECRET_BCRYPT'))
return payload['sub']
| [
"[email protected]"
] | |
2db80125614126b1bda5dac81b52721288060e5e | 3d19e1a316de4d6d96471c64332fff7acfaf1308 | /Users/D/dasfaha/get_imdb_movie_rating.py | 4f8bab114f20f4b7fedaa3cbfb02a591f9fa6362 | [] | no_license | BerilBBJ/scraperwiki-scraper-vault | 4e98837ac3b1cc3a3edb01b8954ed00f341c8fcc | 65ea6a943cc348a9caf3782b900b36446f7e137d | refs/heads/master | 2021-12-02T23:55:58.481210 | 2013-09-30T17:02:59 | 2013-09-30T17:02:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,532 | py | import lxml.html
import scraperwiki
#Ge the data
html = scraperwiki.scrape("http://www.imdb.com/title/tt2103264/")
#The request to IMDB returns plain text so the line below processes this text and turns it into a format that can be queried
root = lxml.html.fromstring(html)
#The rating of a movie is within a div with class: "titlePageSprite star-box-giga-star" like this:
#
#<div class="titlePageSprite star-box-giga-star">
# 7.7
#</div>
#
#Use CSS selector to get the div html element that has class="titlePageSprite"
el = root.cssselect("div.titlePageSprite")
#el is a list as there could be several div elements with the same class. In our case we know there is only one div with that class
print "Number of elements in el: {0}".format(len(el))
#Create a python 'dictionary' to store the two fields of the data we just scraped: 'movie title' and 'rating'
data = {
'movie title': 'Emperor', #exercise: is it possible to scrape the movie name from the page? :p
'rating' : el[0].text
}
print "Movie rating: {0}".format(data['rating']) #The fields in 'data' can be accessed by their names
#Save into a databaase. Completely pointless in this case but useful if the data changes...
scraperwiki.sqlite.save(unique_keys=['movie title'], data=data)
import lxml.html
import scraperwiki
#Ge the data
html = scraperwiki.scrape("http://www.imdb.com/title/tt2103264/")
#The request to IMDB returns plain text so the line below processes this text and turns it into a format that can be queried
root = lxml.html.fromstring(html)
#The rating of a movie is within a div with class: "titlePageSprite star-box-giga-star" like this:
#
#<div class="titlePageSprite star-box-giga-star">
# 7.7
#</div>
#
#Use CSS selector to get the div html element that has class="titlePageSprite"
el = root.cssselect("div.titlePageSprite")
#el is a list as there could be several div elements with the same class. In our case we know there is only one div with that class
print "Number of elements in el: {0}".format(len(el))
#Create a python 'dictionary' to store the two fields of the data we just scraped: 'movie title' and 'rating'
data = {
'movie title': 'Emperor', #exercise: is it possible to scrape the movie name from the page? :p
'rating' : el[0].text
}
print "Movie rating: {0}".format(data['rating']) #The fields in 'data' can be accessed by their names
#Save into a databaase. Completely pointless in this case but useful if the data changes...
scraperwiki.sqlite.save(unique_keys=['movie title'], data=data)
| [
"[email protected]"
] | |
c4be35002664253e83bad83bee500cc207fa909c | e4700f3ff598b997bf0ea35bcdb76b00c530c994 | /tmp.py | d616e22344314282bffb61071d044da898ac2eef | [] | no_license | nikkibisarya/therapysummarization | 64d056683454289561a45b6e5e1d88f5e3f78dae | 203b5a06577456d68d3022aa94d9476e0d352e18 | refs/heads/master | 2020-03-16T23:23:11.698069 | 2019-11-05T18:08:54 | 2019-11-05T18:08:54 | 133,075,146 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 407 | py |
import numpy as np
import matplotlib.pyplot as plt
loss = [1.0761, 0.8476, 0.7516, 0.6956, 0.6562, 0.6243, 0.5985, 0.5765, 0.5586, 0.5427, 0.5315, 0.5169, 0.5089, 0.4994,
0.4923, 0.4866, 0.4806, 0.4763, 0.4708, 0.4707]
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.plot(np.arange(len(loss)), loss)
plt.savefig('MISCloss.png')
| [
"[email protected]"
] | |
feb967e768de780f768c67ee8e6bc478974aa13b | 7e90a1f8280618b97729d0b49b80c6814d0466e2 | /workspace_pc/catkin_ws/build_isolated/hector_slam/catkin_generated/stamps/hector_slam/_setup_util.py.stamp | c51cc0942a19064fe6e7239c5adffd9ad95290b7 | [] | no_license | IreneYIN7/Map-Tracer | 91909f4649a8b65afed56ae3803f0c0602dd89ff | cbbe9acf067757116ec74c3aebdd672fd3df62ed | refs/heads/master | 2022-04-02T09:53:15.650365 | 2019-12-19T07:31:31 | 2019-12-19T07:31:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,976 | stamp | #!/usr/bin/python2
# -*- coding: utf-8 -*-
# Software License Agreement (BSD License)
#
# Copyright (c) 2012, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
'''This file generates shell code for the setup.SHELL scripts to set environment variables'''
from __future__ import print_function
import argparse
import copy
import errno
import os
import platform
import sys
CATKIN_MARKER_FILE = '.catkin'
system = platform.system()
IS_DARWIN = (system == 'Darwin')
IS_WINDOWS = (system == 'Windows')
# subfolder of workspace prepended to CMAKE_PREFIX_PATH
ENV_VAR_SUBFOLDERS = {
'CMAKE_PREFIX_PATH': '',
'LD_LIBRARY_PATH' if not IS_DARWIN else 'DYLD_LIBRARY_PATH': ['lib', os.path.join('lib', 'x86_64-linux-gnu')],
'PATH': 'bin',
'PKG_CONFIG_PATH': [os.path.join('lib', 'pkgconfig'), os.path.join('lib', 'x86_64-linux-gnu', 'pkgconfig')],
'PYTHONPATH': 'lib/python2.7/dist-packages',
}
def rollback_env_variables(environ, env_var_subfolders):
'''
Generate shell code to reset environment variables
by unrolling modifications based on all workspaces in CMAKE_PREFIX_PATH.
This does not cover modifications performed by environment hooks.
'''
lines = []
unmodified_environ = copy.copy(environ)
for key in sorted(env_var_subfolders.keys()):
subfolders = env_var_subfolders[key]
if not isinstance(subfolders, list):
subfolders = [subfolders]
value = _rollback_env_variable(unmodified_environ, key, subfolders)
if value is not None:
environ[key] = value
lines.append(assignment(key, value))
if lines:
lines.insert(0, comment('reset environment variables by unrolling modifications based on all workspaces in CMAKE_PREFIX_PATH'))
return lines
def _rollback_env_variable(environ, name, subfolders):
'''
For each catkin workspace in CMAKE_PREFIX_PATH remove the first entry from env[NAME] matching workspace + subfolder.
:param subfolders: list of str '' or subfoldername that may start with '/'
:returns: the updated value of the environment variable.
'''
value = environ[name] if name in environ else ''
env_paths = [path for path in value.split(os.pathsep) if path]
value_modified = False
for subfolder in subfolders:
if subfolder:
if subfolder.startswith(os.path.sep) or (os.path.altsep and subfolder.startswith(os.path.altsep)):
subfolder = subfolder[1:]
if subfolder.endswith(os.path.sep) or (os.path.altsep and subfolder.endswith(os.path.altsep)):
subfolder = subfolder[:-1]
for ws_path in _get_workspaces(environ, include_fuerte=True, include_non_existing=True):
path_to_find = os.path.join(ws_path, subfolder) if subfolder else ws_path
path_to_remove = None
for env_path in env_paths:
env_path_clean = env_path[:-1] if env_path and env_path[-1] in [os.path.sep, os.path.altsep] else env_path
if env_path_clean == path_to_find:
path_to_remove = env_path
break
if path_to_remove:
env_paths.remove(path_to_remove)
value_modified = True
new_value = os.pathsep.join(env_paths)
return new_value if value_modified else None
def _get_workspaces(environ, include_fuerte=False, include_non_existing=False):
'''
Based on CMAKE_PREFIX_PATH return all catkin workspaces.
:param include_fuerte: The flag if paths starting with '/opt/ros/fuerte' should be considered workspaces, ``bool``
'''
# get all cmake prefix paths
env_name = 'CMAKE_PREFIX_PATH'
value = environ[env_name] if env_name in environ else ''
paths = [path for path in value.split(os.pathsep) if path]
# remove non-workspace paths
workspaces = [path for path in paths if os.path.isfile(os.path.join(path, CATKIN_MARKER_FILE)) or (include_fuerte and path.startswith('/opt/ros/fuerte')) or (include_non_existing and not os.path.exists(path))]
return workspaces
def prepend_env_variables(environ, env_var_subfolders, workspaces):
'''
Generate shell code to prepend environment variables
for the all workspaces.
'''
lines = []
lines.append(comment('prepend folders of workspaces to environment variables'))
paths = [path for path in workspaces.split(os.pathsep) if path]
prefix = _prefix_env_variable(environ, 'CMAKE_PREFIX_PATH', paths, '')
lines.append(prepend(environ, 'CMAKE_PREFIX_PATH', prefix))
for key in sorted([key for key in env_var_subfolders.keys() if key != 'CMAKE_PREFIX_PATH']):
subfolder = env_var_subfolders[key]
prefix = _prefix_env_variable(environ, key, paths, subfolder)
lines.append(prepend(environ, key, prefix))
return lines
def _prefix_env_variable(environ, name, paths, subfolders):
'''
Return the prefix to prepend to the environment variable NAME, adding any path in NEW_PATHS_STR without creating duplicate or empty items.
'''
value = environ[name] if name in environ else ''
environ_paths = [path for path in value.split(os.pathsep) if path]
checked_paths = []
for path in paths:
if not isinstance(subfolders, list):
subfolders = [subfolders]
for subfolder in subfolders:
path_tmp = path
if subfolder:
path_tmp = os.path.join(path_tmp, subfolder)
# skip nonexistent paths
if not os.path.exists(path_tmp):
continue
# exclude any path already in env and any path we already added
if path_tmp not in environ_paths and path_tmp not in checked_paths:
checked_paths.append(path_tmp)
prefix_str = os.pathsep.join(checked_paths)
if prefix_str != '' and environ_paths:
prefix_str += os.pathsep
return prefix_str
def assignment(key, value):
if not IS_WINDOWS:
return 'export %s="%s"' % (key, value)
else:
return 'set %s=%s' % (key, value)
def comment(msg):
if not IS_WINDOWS:
return '# %s' % msg
else:
return 'REM %s' % msg
def prepend(environ, key, prefix):
if key not in environ or not environ[key]:
return assignment(key, prefix)
if not IS_WINDOWS:
return 'export %s="%s$%s"' % (key, prefix, key)
else:
return 'set %s=%s%%%s%%' % (key, prefix, key)
def find_env_hooks(environ, cmake_prefix_path):
'''
Generate shell code with found environment hooks
for the all workspaces.
'''
lines = []
lines.append(comment('found environment hooks in workspaces'))
generic_env_hooks = []
generic_env_hooks_workspace = []
specific_env_hooks = []
specific_env_hooks_workspace = []
generic_env_hooks_by_filename = {}
specific_env_hooks_by_filename = {}
generic_env_hook_ext = 'bat' if IS_WINDOWS else 'sh'
specific_env_hook_ext = environ['CATKIN_SHELL'] if not IS_WINDOWS and 'CATKIN_SHELL' in environ and environ['CATKIN_SHELL'] else None
# remove non-workspace paths
workspaces = [path for path in cmake_prefix_path.split(os.pathsep) if path and os.path.isfile(os.path.join(path, CATKIN_MARKER_FILE))]
for workspace in reversed(workspaces):
env_hook_dir = os.path.join(workspace, 'etc', 'catkin', 'profile.d')
if os.path.isdir(env_hook_dir):
for filename in sorted(os.listdir(env_hook_dir)):
if filename.endswith('.%s' % generic_env_hook_ext):
# remove previous env hook with same name if present
if filename in generic_env_hooks_by_filename:
i = generic_env_hooks.index(generic_env_hooks_by_filename[filename])
generic_env_hooks.pop(i)
generic_env_hooks_workspace.pop(i)
# append env hook
generic_env_hooks.append(os.path.join(env_hook_dir, filename))
generic_env_hooks_workspace.append(workspace)
generic_env_hooks_by_filename[filename] = generic_env_hooks[-1]
elif specific_env_hook_ext is not None and filename.endswith('.%s' % specific_env_hook_ext):
# remove previous env hook with same name if present
if filename in specific_env_hooks_by_filename:
i = specific_env_hooks.index(specific_env_hooks_by_filename[filename])
specific_env_hooks.pop(i)
specific_env_hooks_workspace.pop(i)
# append env hook
specific_env_hooks.append(os.path.join(env_hook_dir, filename))
specific_env_hooks_workspace.append(workspace)
specific_env_hooks_by_filename[filename] = specific_env_hooks[-1]
env_hooks = generic_env_hooks + specific_env_hooks
env_hooks_workspace = generic_env_hooks_workspace + specific_env_hooks_workspace
count = len(env_hooks)
lines.append(assignment('_CATKIN_ENVIRONMENT_HOOKS_COUNT', count))
for i in range(count):
lines.append(assignment('_CATKIN_ENVIRONMENT_HOOKS_%d' % i, env_hooks[i]))
lines.append(assignment('_CATKIN_ENVIRONMENT_HOOKS_%d_WORKSPACE' % i, env_hooks_workspace[i]))
return lines
def _parse_arguments(args=None):
parser = argparse.ArgumentParser(description='Generates code blocks for the setup.SHELL script.')
parser.add_argument('--extend', action='store_true', help='Skip unsetting previous environment variables to extend context')
parser.add_argument('--local', action='store_true', help='Only consider this prefix path and ignore other prefix path in the environment')
return parser.parse_known_args(args=args)[0]
if __name__ == '__main__':
try:
try:
args = _parse_arguments()
except Exception as e:
print(e, file=sys.stderr)
sys.exit(1)
if not args.local:
# environment at generation time
CMAKE_PREFIX_PATH = '/home/gse5/catkin_ws/devel_isolated/hector_map_server;/home/gse5/catkin_ws/devel_isolated/hector_geotiff_plugins;/home/gse5/catkin_ws/devel_isolated/hector_geotiff;/home/gse5/catkin_ws/devel_isolated/hector_nav_msgs;/home/gse5/catkin_ws/devel_isolated/hector_marker_drawing;/home/gse5/catkin_ws/devel_isolated/hector_mapping;/home/gse5/catkin_ws/devel_isolated/hector_compressed_map_transport;/home/gse5/catkin_ws/devel_isolated/hector_map_tools;/home/gse5/catkin_ws/devel_isolated/hector_imu_tools;/home/gse5/catkin_ws/devel_isolated/hector_imu_attitude_to_tf;/home/gse5/catkin_ws/devel_isolated/rplidar_ros;/home/gse5/catkin_ws/devel_isolated/cartographer_rviz;/home/gse5/catkin_ws/devel_isolated/cartographer_ros;/home/gse5/catkin_ws/devel_isolated/cartographer_ros_msgs;/home/gse5/catkin_ws/devel_isolated/beginner_tutorials;/home/gse5/catkin_ws/install_isolated;/opt/ros/melodic;/home/gse5/catkin_ws/devel_isolated/cartographer;/home/gse5/catkin_ws/devel_isolated/ceres-solver'.split(';')
else:
# don't consider any other prefix path than this one
CMAKE_PREFIX_PATH = []
# prepend current workspace if not already part of CPP
base_path = os.path.dirname(__file__)
# CMAKE_PREFIX_PATH uses forward slash on all platforms, but __file__ is platform dependent
# base_path on Windows contains backward slashes, need to be converted to forward slashes before comparison
if os.path.sep != '/':
base_path = base_path.replace(os.path.sep, '/')
if base_path not in CMAKE_PREFIX_PATH:
CMAKE_PREFIX_PATH.insert(0, base_path)
CMAKE_PREFIX_PATH = os.pathsep.join(CMAKE_PREFIX_PATH)
environ = dict(os.environ)
lines = []
if not args.extend:
lines += rollback_env_variables(environ, ENV_VAR_SUBFOLDERS)
lines += prepend_env_variables(environ, ENV_VAR_SUBFOLDERS, CMAKE_PREFIX_PATH)
lines += find_env_hooks(environ, CMAKE_PREFIX_PATH)
print('\n'.join(lines))
# need to explicitly flush the output
sys.stdout.flush()
except IOError as e:
# and catch potential "broken pipe" if stdout is not writable
# which can happen when piping the output to a file but the disk is full
if e.errno == errno.EPIPE:
print(e, file=sys.stderr)
sys.exit(2)
raise
sys.exit(0)
| [
"[email protected]"
] | |
a0afd01311fc3c8b2e58fd920285130338e86b2d | 62c11667bc780b8fb80b69a069c5e4135a40ac8a | /src/newsletter/migrations/0001_initial.py | 77ec77167df437d057a369a632f89115ed37d047 | [] | no_license | garabek/Django_BootcampSite | 39b8bc976730c0776d733536f020a043d2f89370 | 8752cd7f2c469e2e4c9cf639e357c51cd05b5c4d | refs/heads/master | 2021-07-01T12:09:57.557274 | 2017-09-21T23:07:01 | 2017-09-21T23:07:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 710 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='SignUp',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('email', models.EmailField(max_length=254)),
('full_name', models.CharField(max_length=100, null=True, blank=True)),
('timestamp', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
],
),
]
| [
"[email protected]"
] | |
2bf07793bfef24a2bed035690bb6849533f776bc | 1239393937f155fd5090c41f462262098fa6c6c1 | /dev/docs/source/conf.py | 20af5dc2d5e88af3e123d49e2e27b9d9573e3297 | [
"BSD-2-Clause-Views",
"BSD-3-Clause"
] | permissive | hyunjinb/XlsxWriter | af4fe17c11b81c05ba8ec6adf27d0f6d1d632399 | b4c4b499ffb3db8e0fa1b306880bcbcb3675fd4d | refs/heads/master | 2021-01-23T13:42:00.785444 | 2017-09-05T23:17:06 | 2017-09-05T23:17:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,926 | py | # -*- coding: utf-8 -*-
#
# XlsxWriter documentation build configuration file, created by
# sphinx-quickstart on Mon Jan 28 00:12:14 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.intersphinx']
# Add any paths that contain templates here, relative to this directory.
# templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'XlsxWriter'
copyright = u'2013-2017, John McNamara'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.9.9'
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/2/': None}
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# html_theme = 'default'
sys.path.append(os.path.abspath('_themes'))
html_theme_path = ['_themes']
html_theme = 'bootstrap'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {
# # 'nosidebar': True,
# 'sidebarbgcolor': '#F2F2F2',
# 'relbarbgcolor': '#9CB640',
# 'linkcolor': '#9CB640',
# 'sidebarlinkcolor': '#9CB640',
# 'footerbgcolor': '#FFFFFF',
# 'footertextcolor': '#9CB640',
# 'headtextcolor': '#9CB640',
# 'codebgcolor': '#FFFFFF',
# }
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = "XlsxWriter Documentation"
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = '_images/logo.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'XlsxWriterdoc'
# Remove permalinks.
html_add_permalinks = ""
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
'pointsize': '11pt',
# Additional stuff for the LaTeX preamble.
'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'XlsxWriter.tex', u'Creating Excel files with Python and XlsxWriter',
u'John McNamara', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
latex_logo = '_images/logo.png'
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'xlsxwriter', u'XlsxWriter Documentation',
[u'John McNamara'], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'XlsxWriter', u'XlsxWriter Documentation',
u'John McNamara', 'XlsxWriter', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# -- Options for Epub output ---------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'XlsxWriter'
epub_author = u'John McNamara'
epub_publisher = u'John McNamara'
epub_copyright = u'2013-2017, John McNamara'
# The language of the text. It defaults to the language option
# or en if the language is not set.
# epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
# epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
# epub_identifier = ''
# A unique identification for the text.
# epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
# epub_cover = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
# epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
# epub_post_files = []
# A list of files that should not be packed into the epub file.
# epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
# epub_tocdepth = 3
# Allow duplicate toc entries.
# epub_tocdup = True
| [
"[email protected]"
] | |
88c0d4f7001e4d7f2d2a994d979b9b99a1ed7d08 | 9adc810b07f7172a7d0341f0b38088b4f5829cf4 | /experiments/ashvin/icml2020/hand/buffers/pen1.py | c92cde36156496ccf82fa584986ffbc35a17a452 | [
"MIT"
] | permissive | Asap7772/railrl_evalsawyer | 7ee9358b5277b9ddf2468f0c6d28beb92a5a0879 | baba8ce634d32a48c7dfe4dc03b123e18e96e0a3 | refs/heads/main | 2023-05-29T10:00:50.126508 | 2021-06-18T03:08:12 | 2021-06-18T03:08:12 | 375,810,557 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,576 | py | """
AWR + SAC from demo experiment
"""
from rlkit.demos.source.dict_to_mdp_path_loader import DictToMDPPathLoader
from rlkit.launchers.experiments.awac.awac_rl import experiment, process_args
import rlkit.misc.hyperparameter as hyp
from rlkit.launchers.arglauncher import run_variants
from rlkit.torch.sac.policies import GaussianPolicy, BinnedGMMPolicy
from rlkit.torch.networks import Clamp
if __name__ == "__main__":
variant = dict(
num_epochs=1001,
num_eval_steps_per_epoch=1000,
num_trains_per_train_loop=1000,
num_expl_steps_per_train_loop=1000,
min_num_steps_before_training=1000,
max_path_length=1000,
batch_size=1024,
replay_buffer_size=int(1E6),
layer_size=256,
policy_class=GaussianPolicy,
policy_kwargs=dict(
hidden_sizes=[256, 256, 256, 256],
max_log_std=0,
min_log_std=-6,
std_architecture="values",
),
buffer_policy_class=BinnedGMMPolicy,
buffer_policy_kwargs=dict(
hidden_sizes=[256, 256, 256, 256],
max_log_std=0,
min_log_std=-6,
std_architecture="values",
num_gaussians=11,
),
algorithm="SAC",
version="normal",
collection_mode='batch',
trainer_kwargs=dict(
discount=0.99,
soft_target_tau=5e-3,
target_update_period=1,
policy_lr=3E-4,
qf_lr=3E-4,
reward_scale=1,
beta=1,
use_automatic_entropy_tuning=False,
alpha=0,
compute_bc=False,
bc_num_pretrain_steps=0,
q_num_pretrain1_steps=0,
q_num_pretrain2_steps=25000,
policy_weight_decay=1e-4,
q_weight_decay=0,
bc_loss_type="mse",
rl_weight=1.0,
use_awr_update=True,
use_reparam_update=False,
reparam_weight=0.0,
awr_weight=0.0,
bc_weight=1.0,
post_bc_pretrain_hyperparams=dict(
bc_weight=0.0,
compute_bc=False,
),
reward_transform_kwargs=None, # r' = r + 1
terminal_transform_kwargs=None, # t = 0
),
launcher_config=dict(
num_exps_per_instance=1,
region='us-west-2',
),
path_loader_class=DictToMDPPathLoader,
path_loader_kwargs=dict(
obs_key="state_observation",
demo_paths=[
# dict(
# path="demos/icml2020/hand/pen2_sparse.npy",
# obs_dict=True,
# is_demo=True,
# ),
# dict(
# path="demos/icml2020/hand/pen_bc5.npy",
# obs_dict=False,
# is_demo=False,
# train_split=0.9,
# ),
],
),
add_env_demos=True,
add_env_offpolicy_data=True,
# logger_variant=dict(
# tensorboard=True,
# ),
load_demos=True,
pretrain_policy=True,
pretrain_rl=True,
# save_pretrained_algorithm=True,
# snapshot_mode="all",
use_validation_buffer=True,
)
search_space = {
'env': ["pen-sparse-v0", "door-sparse-v0", ],
'trainer_kwargs.bc_loss_type': ["mle"],
'trainer_kwargs.awr_loss_type': ["mle"],
'seedid': range(3),
'trainer_kwargs.beta': [0.5, ],
'trainer_kwargs.reparam_weight': [0.0, ],
'trainer_kwargs.awr_weight': [1.0],
'trainer_kwargs.bc_weight': [1.0, ],
'policy_kwargs.std_architecture': ["values", ],
# 'trainer_kwargs.compute_bc': [True, ],
'trainer_kwargs.awr_use_mle_for_vf': [True, ],
'trainer_kwargs.awr_sample_actions': [False, ],
'trainer_kwargs.awr_min_q': [True, ],
'trainer_kwargs.q_weight_decay': [0],
'trainer_kwargs.reward_transform_kwargs': [None, ],
'trainer_kwargs.terminal_transform_kwargs': [dict(m=0, b=0), ],
'qf_kwargs.output_activation': [Clamp(max=0)],
'trainer_kwargs.train_bc_on_rl_buffer':[True],
# 'policy_kwargs.num_gaussians': [11, ],
}
sweeper = hyp.DeterministicHyperparameterSweeper(
search_space, default_parameters=variant,
)
variants = []
for variant in sweeper.iterate_hyperparameters():
variants.append(variant)
run_variants(experiment, variants, process_args)
| [
"[email protected]"
] | |
70c3c06f681b066ac0388b0d3c1198b4074e9724 | 7f24023d365e013ec0924844c1a872edfb0c75b4 | /tests/trac/trac-0186/check.py | 08b3119a43dd3dd72dd22febf93509b88bca7eca | [
"Python-2.0",
"MIT",
"Apache-2.0"
] | permissive | pabigot/pyxb | cd42c024607572c6363682d389e9296caf3f2857 | 5ee5ba54c9f702dc9c9efc2731ee547ecd4dae4a | refs/heads/next | 2023-05-11T03:23:19.599756 | 2023-04-29T20:38:15 | 2023-04-29T20:45:13 | 20,547,850 | 130 | 63 | Apache-2.0 | 2021-08-19T16:52:18 | 2014-06-06T01:49:03 | Python | UTF-8 | Python | false | false | 493 | py | # -*- coding: utf-8 -*-
import logging
if __name__ == '__main__':
logging.basicConfig()
_log = logging.getLogger(__name__)
import pyxb.utils.domutils
import resources
import unittest
class ExternalTrac0186 (unittest.TestCase):
def testXBIngress (self):
instance = resources.XBIngress(match='all', action1='none', digits1='', action2='none', digits2='')
def testXBMatch (self):
instance = resources.XBMatch('all')
if '__main__' == __name__:
unittest.main()
| [
"[email protected]"
] | |
969d035c63ace1f7b4c413e93f06400bb2d2bf34 | 119437adb7830659307c18b79a9cc3f6bfc6fe40 | /transformers_learning/english_sequence_labeling/torch_model_train.py | 234011630b2febd960451887847252ee4bdd95c0 | [] | no_license | percent4/PyTorch_Learning | 478bec35422cdc66bf41b4258e29fbcb6d24f60c | 24184d49032c9c9a68142aff89dabe33adc17b52 | refs/heads/master | 2023-03-31T03:01:19.372830 | 2023-03-17T17:02:39 | 2023-03-17T17:02:39 | 171,400,828 | 16 | 7 | null | 2023-09-02T08:53:26 | 2019-02-19T03:47:41 | Jupyter Notebook | UTF-8 | Python | false | false | 5,513 | py | # -*- coding: utf-8 -*-
# @Time : 2021/1/31 15:01
# @Author : Jclian91
# @File : torch_model_train.py
# @Place : Yangpu, Shanghai
import json
import torch
import numpy as np
from torch.utils.data import Dataset, DataLoader
from transformers import BertForTokenClassification, BertTokenizer, BertConfig
from util import event_type, train_file_path, test_file_path
from util import MAX_LEN, BERT_MODEL_DIR, TRAIN_BATCH_SIZE, VALID_BATCH_SIZE, EPOCHS, LEARNING_RATE
from load_data import read_data
# tokenizer and label_2_id_dict
with open("{}_label2id.json".format(event_type), "r", encoding="utf-8") as f:
tag2idx = json.loads(f.read())
idx2tag = {v: k for k, v in tag2idx.items()}
class CustomDataset(Dataset):
def __init__(self, tokenizer, sentences, labels, max_len):
self.len = len(sentences)
self.sentences = sentences
self.labels = labels
self.tokenizer = tokenizer
self.max_len = max_len
def __getitem__(self, index):
sentence = str(self.sentences[index])
inputs = self.tokenizer.encode_plus(
sentence,
None,
add_special_tokens=True,
max_length=self.max_len,
truncation=True,
padding="max_length",
# pad_to_max_length=True,
return_token_type_ids=True
)
ids = inputs['input_ids']
mask = inputs['attention_mask']
label = self.labels[index]
label.extend([0] * MAX_LEN)
label = label[:MAX_LEN]
return {
'ids': torch.tensor(ids, dtype=torch.long),
'mask': torch.tensor(mask, dtype=torch.long),
'tags': torch.tensor(label, dtype=torch.long)
}
def __len__(self):
return self.len
# Creating the customized model
class BERTClass(torch.nn.Module):
def __init__(self):
super(BERTClass, self).__init__()
config = BertConfig.from_pretrained("./bert-base-uncased", num_labels=len(list(tag2idx.keys())))
self.l1 = BertForTokenClassification.from_pretrained('./bert-base-uncased', config=config)
# self.l2 = torch.nn.Dropout(0.3)
# self.l3 = torch.nn.Linear(768, 200)
def forward(self, ids, mask, labels):
output_1 = self.l1(ids, mask, labels=labels)
# output_2 = self.l2(output_1[0])
# output = self.l3(output_2)
return output_1
def flat_accuracy(preds, labels):
flat_preds = np.argmax(preds, axis=2).flatten()
flat_labels = labels.flatten()
return np.sum(flat_preds == flat_labels)/len(flat_labels)
def valid(model, testing_loader):
model.eval()
eval_loss = 0; eval_accuracy = 0
nb_eval_steps, nb_eval_examples = 0, 0
with torch.no_grad():
for _, data in enumerate(testing_loader):
ids = data['ids'].to(dev, dtype=torch.long)
mask = data['mask'].to(dev, dtype=torch.long)
targets = data['tags'].to(dev, dtype=torch.long)
output = model(ids, mask, labels=targets)
loss, logits = output[:2]
logits = logits.detach().cpu().numpy()
label_ids = targets.to('cpu').numpy()
accuracy = flat_accuracy(logits, label_ids)
eval_loss += loss.mean().item()
eval_accuracy += accuracy
nb_eval_examples += ids.size(0)
nb_eval_steps += 1
eval_loss = eval_loss/nb_eval_steps
print("Validation loss: {}".format(eval_loss))
print("Validation Accuracy: {}".format(eval_accuracy/nb_eval_steps))
if __name__ == '__main__':
# Preparing for CPU or GPU usage
dev = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
tokenizer = BertTokenizer.from_pretrained('./{}'.format(BERT_MODEL_DIR))
# Creating the Dataset and DataLoader for the neural network
train_sentences, train_labels = read_data(train_file_path)
train_labels = [[tag2idx.get(l) for l in lab] for lab in train_labels]
test_sentences, test_labels = read_data(test_file_path)
test_labels = [[tag2idx.get(l) for l in lab] for lab in test_labels]
print("TRAIN Dataset: {}".format(len(train_sentences)))
print("TEST Dataset: {}".format(len(test_sentences)))
training_set = CustomDataset(tokenizer, train_sentences, train_labels, MAX_LEN)
testing_set = CustomDataset(tokenizer, test_sentences, test_labels, MAX_LEN)
train_params = {'batch_size': TRAIN_BATCH_SIZE, 'shuffle': True, 'num_workers': 0}
test_params = {'batch_size': VALID_BATCH_SIZE, 'shuffle': True, 'num_workers': 0}
training_loader = DataLoader(training_set, **train_params)
testing_loader = DataLoader(testing_set, **test_params)
# train the model
model = BERTClass()
model.to(dev)
optimizer = torch.optim.Adam(params=model.parameters(), lr=LEARNING_RATE)
for epoch in range(EPOCHS):
model.train()
for _, data in enumerate(training_loader):
ids = data['ids'].to(dev, dtype=torch.long)
mask = data['mask'].to(dev, dtype=torch.long)
targets = data['tags'].to(dev, dtype=torch.long)
loss = model(ids, mask, labels=targets)[0]
# optimizer.zero_grad()
if _ % 50 == 0:
print(f'Epoch: {epoch}, Batch: {_}, Loss: {loss.item()}')
optimizer.zero_grad()
loss.backward()
optimizer.step()
# model evaluate
valid(model, testing_loader)
torch.save(model.state_dict(), '{}_ner.pth'.format(event_type))
| [
"[email protected]"
] | |
d8c137dda1852fc28941eac7e6a8c8a76905993e | 9bde6cafb4273d721229448d115853ff2f5994a6 | /myblog/blog/models.py | 29739ca1865621b4e4224bca3f600e41f915a179 | [] | no_license | davejonesbkk/myblog | 11eb30b4d75270b3e99f172f27f05ce31e318f93 | 4a5cbeb47154004ef239b16e63155997b1c9afe6 | refs/heads/master | 2021-01-17T17:43:28.465235 | 2016-05-31T02:02:07 | 2016-05-31T02:02:07 | 59,930,156 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 706 | py | from django.db import models
from django_markdown.models import MarkdownField
from django.core.urlresolvers import reverse
class EntryQuerySet(models.QuerySet):
def published(self):
return self.filter(publish=True)
class Entry(models.Model):
title = models.CharField(max_length=200)
body = models.TextField()
slug = models.SlugField(max_length=200, unique=True)
publish = models.BooleanField(default=True)
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
objects = EntryQuerySet.as_manager()
def __str__(self):
return self.title
class Meta:
verbose_name = 'Blog Entry'
verbose_name_plural = 'Blog Entries'
ordering = ["-created"]
| [
"[email protected]"
] | |
447a75ff7f1e949a3c268918e94f8ab08d58da0f | 68cd659b44f57adf266dd37789bd1da31f61670d | /2020-01/python/18188_다오의데이트.py | 7c55c44e597a14f68e338a66b4a4458c5ab95c41 | [] | no_license | 01090841589/solved_problem | c0c6f5a46e4d48860dccb3b0288aa5b56868fbca | bbea2f31e5fe36cad100bc514eacd83545fb25b1 | refs/heads/master | 2023-07-02T23:55:51.631478 | 2021-08-04T13:57:00 | 2021-08-04T13:57:00 | 197,157,830 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,117 | py | import sys
sys.stdin = open("다오의데이트.txt")
DIR = [[-1, 0], [0, 1], [1, 0], [0, -1]]
def go_dao(y, x, k, route):
global result, rts
if result:
return
if k >= A:
return
flag = 1
for i in range(4):
if can[k][i]:
Y = y+DIR[i][0]
X = x+DIR[i][1]
if 0 <= Y < H and 0 <= X < W:
if MAP[Y][X] != '@':
if MAP[Y][X] == 'Z':
rts = route+arr[i]
result = 1
return
flag = 0
go_dao(Y, X, k+1, route+arr[i])
H, W = map(int, input().split())
MAP = [list(input()) for _ in range(H)]
for h in range(H):
for w in range(W):
if MAP[h][w] == 'D':
y = h
x = w
result = 0
rts = ''
A = int(input())
arr = ['W', 'D', 'S', 'A']
can = [[0, 0, 0, 0] for _ in range(A)]
for i in range(A):
B, C = map(str, input().split())
can[i][arr.index(B)] = 1
can[i][arr.index(C)] = 1
go_dao(y, x, 0, '')
if result:
print("YES")
print(rts)
else:
print("NO")
| [
"[email protected]"
] | |
b1918d70a960ef445232d6b1b21ffd44d9848c48 | 71c7683331a9037fda7254b3a7b1ffddd6a4c4c8 | /Phys/Urania/examples/KsPiZeroMM_angularPDF.py | a83417211276319e5a15c72d57e48769a1b46477 | [] | no_license | pseyfert-cern-gitlab-backup/Urania | edc58ba4271089e55900f8bb4a5909e9e9c12d35 | 1b1c353ed5f1b45b3605990f60f49881b9785efd | refs/heads/master | 2021-05-18T13:33:22.732970 | 2017-12-15T14:42:04 | 2017-12-15T14:42:04 | 251,259,622 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,684 | py | from Urania.Helicity import *
from Urania.SympyBasic import *
from os import *
DiLeptonSpins = [0,1,2] ## DMS: I doube we'll need 2, probably we'll only
## have Pwave (J=1) from the photon, plus maybe some S-wave (J=0)
### transAmp=1 : Changes to transversity amplitude basis
A = doKsPizeroMuMu(DiLeptonSpins ) ## This is now in Urania.Helicity
### massage a bit the expression to make it more suitable for fitting
pdf_split = DecomposeAmplitudes(A,TransAmplitudes.values())
phys = 0
for key in pdf_split: phys += StrongPhases(key)*pdf_split[key]
### change the free variables to cosines
x = USymbol("helcosthetaK","c\\theta_{K}",real = True)
y = USymbol("helcosthetaL", "c\\theta_{l}", real = True)
z = USymbol("helphi" , "\\phi", real = True)
CThL = Cos(ThetaL)
CThK = Cos(ThetaK)
def changeFreeVars(function):
### Phi now as in DTT !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
function = function.subs( Sin(2*ThetaK), 2*Sin(ThetaK)*Cos(ThetaK))
function = function.subs( Sin(2*ThetaL), 2*Sin(ThetaL)*Cos(ThetaL))
function = function.subs( Cos(2*ThetaK), 2*Cos(ThetaK)**2 - 1)
function = function.subs( Cos(2*ThetaL), 2*Cos(ThetaL)**2 - 1)
function = function.subs( Sin(ThetaK), Sqrt(1-Cos(ThetaK)**2))
function = function.subs( Sin(ThetaL), Sqrt(1-Cos(ThetaL)**2))
function = function.subs([(CThK,x),(CThL,y), (Phi, -z)])
return function
func = changeFreeVars(phys)
### Print out to a latex document
from Urania.LatexFunctions import *
flatex = file("Kspizeromm_PDF.tex","w")
begintex(flatex)
begin_multline(flatex)
i = 0
for key in pdf_split.keys():
if i > 20:
i = 0
multline_break(flatex)
if pdf_split[key]:
flatex.write(Ulatex(key) + "\t" + Ulatex(pdf_split[key]) + "\\\\" + "\n")
i += 1
end_multline(flatex)
flatex.write("\\end{document}\n")
flatex.close()
system("pdflatex " + "Kspizeromm_PDF")
print "angular function saved in Kspizeromm_PDF.pdf"
print "Now making RooFit class as well"
##BREAK
##### Generate and compile a fitting class corresponding to "A"
### Trial 1, w/o analytical integrals
from Urania.RooInterfaces import *
potential_list = [x,y,z]+TransAmpModuli.values() + TransAmpPhases.values()
final_list = []
for thing in potential_list:
if thing in func.atoms(): final_list.append(thing)
op = RooClassGenerator(func, final_list ,"RooKspizeroMM")
### Define intermediate variables to be calculated once
op.makePdf(integrable = 1)
op.doIntegral(1,(y,-1,1))#,(y,-1,1),(z,-Pi,Pi))
##op.doIntegral(2,(x,-1,1),(y,-1,1))
##op.doIntegral(3,(x,-1,1),(z,-Pi,Pi))
##op.doIntegral(4,(y,-1,1),(z,-Pi,Pi))
op.overwrite()
op.invoke()
| [
"[email protected]"
] | |
d2e18daba5039bfa0fe53bdc30e97c234ded7ec8 | bbfa9cdfd5f09c833ab9190cd4ad5a46e7a515e7 | /effective-python/2020-05/item_61.py | 863a8f8f00e61d939277ee2b82426ba026599225 | [] | no_license | alexchonglian/readings | 775204e013a2301f08fee96c5e8b116842faebcb | 03cb6cb266d8d2376db411e9b12e9b6cd1f2b33b | refs/heads/master | 2022-12-02T13:56:56.878477 | 2021-06-18T05:53:14 | 2021-06-18T05:53:14 | 218,573,810 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,393 | py | import random
random.seed(1234)
import logging
from pprint import pprint
from sys import stdout as STDOUT
# Write all output to a temporary directory
import atexit
import gc
import io
import os
import tempfile
TEST_DIR = tempfile.TemporaryDirectory()
atexit.register(TEST_DIR.cleanup)
# Make sure Windows processes exit cleanly
OLD_CWD = os.getcwd()
atexit.register(lambda: os.chdir(OLD_CWD))
os.chdir(TEST_DIR.name)
def close_open_files():
everything = gc.get_objects()
for obj in everything:
if isinstance(obj, io.IOBase):
obj.close()
atexit.register(close_open_files)
def example(i): print(f'\n==== Example {i} ====')
example(1)
class EOFError(Exception):
pass
class ConnectionBase:
def __init__(self, connection):
self.connection = connection
self.file = connection.makefile('rb')
def send(self, command):
line = command + '\n'
data = line.encode()
self.connection.send(data)
def receive(self):
line = self.file.readline()
if not line:
raise EOFError('Connection closed')
return line[:-1].decode()
example(2)
import random
WARMER = 'Warmer'
COLDER = 'Colder'
UNSURE = 'Unsure'
CORRECT = 'Correct'
class UnknownCommandError(Exception):
pass
example(3)
example(4)
example(5)
example(6)
class Session(ConnectionBase):
def __init__(self, *args):
super().__init__(*args)
self._clear_state(None, None)
def _clear_state(self, lower, upper):
self.lower = lower
self.upper = upper
self.secret = None
self.guesses = []
def loop(self):
while command := self.receive():
parts = command.split(' ')
if parts[0] == 'PARAMS':
self.set_params(parts)
elif parts[0] == 'NUMBER':
self.send_number()
elif parts[0] == 'REPORT':
self.receive_report(parts)
else:
raise UnknownCommandError(command)
def set_params(self, parts):
assert len(parts) == 3
lower = int(parts[1])
upper = int(parts[2])
self._clear_state(lower, upper)
def next_guess(self):
if self.secret is not None:
return self.secret
while True:
guess = random.randint(self.lower, self.upper)
if guess not in self.guesses:
return guess
def send_number(self):
guess = self.next_guess()
self.guesses.append(guess)
self.send(format(guess))
def receive_report(self, parts):
assert len(parts) == 2
decision = parts[1]
last = self.guesses[-1]
if decision == CORRECT:
self.secret = last
print(f'Server: {last} is {decision}')
example(7)
example(8)
example(9)
example(10)
import contextlib
import math
class Client(ConnectionBase):
def __init__(self, *args):
super().__init__(*args)
self._clear_state()
def _clear_state(self):
self.secret = None
self.last_distance = None
@contextlib.contextmanager
def session(self, lower, upper, secret):
print(f'Guess a number between {lower} and {upper}!'
f' Shhhhh, it\'s {secret}.')
self.secret = secret
self.send(f'PARAMS {lower} {upper}')
try:
yield
finally:
self._clear_state()
self.send('PARAMS 0 -1')
def request_numbers(self, count):
for _ in range(count):
self.send('NUMBER')
data = self.receive()
yield int(data)
if self.last_distance == 0:
return
def report_outcome(self, number):
new_distance = math.fabs(number - self.secret)
decision = UNSURE
if new_distance == 0:
decision = CORRECT
elif self.last_distance is None:
pass
elif new_distance < self.last_distance:
decision = WARMER
elif new_distance > self.last_distance:
decision = COLDER
self.last_distance = new_distance
self.send(f'REPORT {decision}')
return decision
example(11)
import socket
from threading import Thread
def handle_connection(connection):
with connection:
session = Session(connection)
try:
session.loop()
except EOFError:
pass
def run_server(address):
with socket.socket() as listener:
# Allow the port to be reused
listener.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
listener.bind(address)
listener.listen()
while True:
connection, _ = listener.accept()
thread = Thread(target=handle_connection,
args=(connection,),
daemon=True)
thread.start()
example(12)
def run_client(address):
with socket.create_connection(address) as connection:
client = Client(connection)
with client.session(1, 5, 3):
results = [(x, client.report_outcome(x))
for x in client.request_numbers(5)]
with client.session(10, 15, 12):
for number in client.request_numbers(5):
outcome = client.report_outcome(number)
results.append((number, outcome))
return results
example(13)
def main():
address = ('127.0.0.1', 1234)
server_thread = Thread(
target=run_server, args=(address,), daemon=True)
server_thread.start()
results = run_client(address)
for number, outcome in results:
print(f'Client: {number} is {outcome}')
main()
example(14)
class AsyncConnectionBase:
def __init__(self, reader, writer): # Changed
self.reader = reader # Changed
self.writer = writer # Changed
async def send(self, command):
line = command + '\n'
data = line.encode()
self.writer.write(data) # Changed
await self.writer.drain() # Changed
async def receive(self):
line = await self.reader.readline() # Changed
if not line:
raise EOFError('Connection closed')
return line[:-1].decode()
example(15)
example(16)
example(17)
example(18)
example(19)
class AsyncSession(AsyncConnectionBase): # Changed
def __init__(self, *args):
super().__init__(*args)
self._clear_values(None, None)
def _clear_values(self, lower, upper):
self.lower = lower
self.upper = upper
self.secret = None
self.guesses = []
async def loop(self): # Changed
while command := await self.receive(): # Changed
parts = command.split(' ')
if parts[0] == 'PARAMS':
self.set_params(parts)
elif parts[0] == 'NUMBER':
await self.send_number() # Changed
elif parts[0] == 'REPORT':
self.receive_report(parts)
else:
raise UnknownCommandError(command)
def set_params(self, parts):
assert len(parts) == 3
lower = int(parts[1])
upper = int(parts[2])
self._clear_values(lower, upper)
def next_guess(self):
if self.secret is not None:
return self.secret
while True:
guess = random.randint(self.lower, self.upper)
if guess not in self.guesses:
return guess
async def send_number(self): # Changed
guess = self.next_guess()
self.guesses.append(guess)
await self.send(format(guess)) # Changed
def receive_report(self, parts):
assert len(parts) == 2
decision = parts[1]
last = self.guesses[-1]
if decision == CORRECT:
self.secret = last
print(f'Server: {last} is {decision}')
example(20)
example(21)
example(22)
example(23)
class AsyncClient(AsyncConnectionBase): # Changed
def __init__(self, *args):
super().__init__(*args)
self._clear_state()
def _clear_state(self):
self.secret = None
self.last_distance = None
@contextlib.asynccontextmanager # Changed
async def session(self, lower, upper, secret): # Changed
print(f'Guess a number between {lower} and {upper}!'
f' Shhhhh, it\'s {secret}.')
self.secret = secret
await self.send(f'PARAMS {lower} {upper}') # Changed
try:
yield
finally:
self._clear_state()
await self.send('PARAMS 0 -1') # Changed
async def request_numbers(self, count): # Changed
for _ in range(count):
await self.send('NUMBER') # Changed
data = await self.receive() # Changed
yield int(data)
if self.last_distance == 0:
return
async def report_outcome(self, number): # Changed
new_distance = math.fabs(number - self.secret)
decision = UNSURE
if new_distance == 0:
decision = CORRECT
elif self.last_distance is None:
pass
elif new_distance < self.last_distance:
decision = WARMER
elif new_distance > self.last_distance:
decision = COLDER
self.last_distance = new_distance
await self.send(f'REPORT {decision}') # Changed
# Make it so the output printing is in
# the same order as the threaded version.
await asyncio.sleep(0.01)
return decision
example(24)
import asyncio
async def handle_async_connection(reader, writer):
session = AsyncSession(reader, writer)
try:
await session.loop()
except EOFError:
pass
async def run_async_server(address):
server = await asyncio.start_server(
handle_async_connection, *address)
async with server:
await server.serve_forever()
example(25)
async def run_async_client(address):
# Wait for the server to listen before trying to connect
await asyncio.sleep(0.1)
streams = await asyncio.open_connection(*address) # New
client = AsyncClient(*streams) # New
async with client.session(1, 5, 3):
results = [(x, await client.report_outcome(x))
async for x in client.request_numbers(5)]
async with client.session(10, 15, 12):
async for number in client.request_numbers(5):
outcome = await client.report_outcome(number)
results.append((number, outcome))
_, writer = streams # New
writer.close() # New
await writer.wait_closed() # New
return results
example(26)
async def main_async():
address = ('127.0.0.1', 4321)
server = run_async_server(address)
asyncio.create_task(server)
results = await run_async_client(address)
for number, outcome in results:
print(f'Client: {number} is {outcome}')
logging.getLogger().setLevel(logging.ERROR)
asyncio.run(main_async())
logging.getLogger().setLevel(logging.DEBUG)
| [
"[email protected]"
] | |
e8492bd500e419e50fa3815209d4889eb2e4e971 | c761f3fbce728e61cbcf5179f1d3f27e1e5625cd | /register_key.py | 1328baddc2fe4d7e5f91b2052b07daa49e53649f | [] | no_license | philopon/usermon | 16033d41436efe2cf4971bcd3b25f99cf82de318 | 7f97db09a65466e2133d4304f9fe5ba212299598 | refs/heads/master | 2021-01-18T16:51:56.457593 | 2017-04-21T13:06:12 | 2017-04-21T13:06:12 | 86,775,704 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 469 | py | #!/usr/bin/env python3
def main():
import sys
import os
import pwd
import pamela
pw = pwd.getpwuid(os.getuid())
ssh_dir = os.path.join(pw.pw_dir, '.ssh')
auth_keys = os.path.join(ssh_dir, 'authorized_keys')
os.makedirs(ssh_dir, mode=0o700, exist_ok=True)
with open(auth_keys, 'a') as f:
for key in sys.stdin:
print(key.strip(), file=f)
os.chmod(auth_keys, 0o600)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
3e35560a675840b2ed59a45d39e280ce612af5c6 | 4e5b20fdcca20f458322f0a8cd11bbdacb6fb3e5 | /suning/api/union/UnionInfomationGetRequest.py | 5a52d242f32e5e4c7c3d65d8e1872c3832f9291a | [] | no_license | shijingyu/sunningAPI | 241f33b0660dc84635ce39688fed499f5c57a5da | 4a3b2ef7f9bdc4707d1eaff185bc7eb636fe90d5 | refs/heads/master | 2020-04-24T22:15:11.584028 | 2019-02-24T06:41:20 | 2019-02-24T06:41:20 | 172,305,179 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 525 | py | # -*- coding: utf-8 -*-
'''
Created on 2016-1-27
@author: suning
'''
from suning.api.abstract import AbstractApi
class UnionInfomationGetRequest(AbstractApi):
'''
'''
def __init__(self):
AbstractApi.__init__(self)
self.goodsCode = None
self.setParamRule({
'goodsCode':{'allow_empty':False}
})
def getApiBizName(self):
return 'getUnionInfomation'
def getApiMethod(self):
return 'suning.netalliance.unioninfomation.get'
| [
"[email protected]"
] | |
da850d8841ddddfdccfc6bde153467956b91789c | 78e60a7d8a67ed76244004e8a3ed573fbf396e41 | /samples/get_zip_codes.py | a89c105f5ec1a635d350ba870418f9f735a0bb60 | [
"MIT"
] | permissive | Crivez/apiclient-python | 837a9f7cc0453ccd3121311adc7920b5fe6b3e33 | 860fc054f546152a101e29b1af388c381075ac47 | refs/heads/master | 2023-06-08T13:24:09.249704 | 2021-06-17T12:16:35 | 2021-06-17T12:16:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 420 | py | from voximplant.apiclient import VoximplantAPI, VoximplantException
if __name__ == "__main__":
voxapi = VoximplantAPI("credentials.json")
# Search for zip codes in Germany.
COUNTRY_CODE = "DE"
COUNT = 1
try:
res = voxapi.get_zip_codes(COUNTRY_CODE,
count=COUNT)
print(res)
except VoximplantException as e:
print("Error: {}".format(e.message))
| [
"[email protected]"
] | |
0ae55acd20bb59d6c3f499e32e0f526820a351d7 | 822d3cd484b54f0531fc205520c765a8321c0613 | /pyFile/8.面向对象/2.类的属性/9.类方法和静态方法.py | a0ccbf84964d8f9059c7feb1ae5efeedb1a3e65a | [] | no_license | mghxy123/learnPython | 31d1cc18deeed5a89864ca0333fe488e0dbf08b4 | 00740e87d55a4dffd78773deaff8689485df31e8 | refs/heads/master | 2021-07-21T14:31:02.421788 | 2020-06-27T11:28:01 | 2020-06-27T11:28:01 | 187,751,182 | 0 | 0 | null | 2020-06-07T05:14:05 | 2019-05-21T02:58:35 | Python | UTF-8 | Python | false | false | 1,232 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# File : 9.类方法和静态方法.py
# Author: HuXianyong
# Mail: [email protected]
# Date : 2019/5/16 0016
#类中普通函数的方法
# class Person:
# def normal_method(): #可以吗? 这样是可以的没有语法上面的问题,执行也没问题,只是大家都默认不这么写
# print('normal')
#
# # 如何调用?
# Person.normal_method() #可以吗? 这个是可以的,应为只是直接调用函数
# # Person().normal_method() #可以吗? 这个不可以,应为这个是实例化,实例化之后类里面的方法需要接受一个类的实例化对象,然而这里并没有传入,self,因此会报错
# print(Person.__dict__)
# # 静态方法
# class Person:
# @staticmethod
# def class_method():
# print('this is staticMethod')
# Person.class_method()
# Person().class_method()
#静态方法
class Person:
@classmethod
def class_method(cls): #cls 是什么?
print('this is class method')
print('class = {0.__name__}({0})'.format(cls))
cls.HEIGHT = 170
@staticmethod
def static_method():
print('this is staticMethod')
Person.class_method()
print(Person.__dict__)
| [
"[email protected]"
] | |
f29fc6830528398b792fd60578b01a78f12aa4e7 | 41ede4fd3bfba1bff0166bca7aee80dcf21434c6 | /ayhanyalcinsoy/Desktop/lxde/base/libfm/actions.py | ad79cdbb6f0b2d887aa5244a18b52080cbb19379 | [] | no_license | pisilinux/playground | a7db4b42559a21cc72fd4c8649e0231ab6a3eb3c | e4e12fff8a847ba210befc8db7e2af8556c3adf7 | refs/heads/master | 2022-08-12T23:03:27.609506 | 2022-08-11T18:28:19 | 2022-08-11T18:28:19 | 8,429,459 | 16 | 22 | null | 2022-08-11T18:28:20 | 2013-02-26T09:37:11 | Python | UTF-8 | Python | false | false | 811 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Licensed under the GNU General Public License, version 3.
# See the file http://www.gnu.org/licenses/gpl.txt
from pisi.actionsapi import autotools
from pisi.actionsapi import pisitools
from pisi.actionsapi import get
WorkDir = "libfm-%s" % (get.srcVERSION())
def setup():
autotools.configure("--disable-static \
--sysconfdir=/etc \
--enable-debug \
--enable-udisks \
--enable-demo")
pisitools.dosed("libtool", " -shared ", " -Wl,-O1,--as-needed -shared ")
def build():
autotools.make()
def install():
pisitools.dosed("data/libfm.conf", "xarchiver", "file-roller")
autotools.install()
pisitools.dodoc("AUTHORS", "COPYING", "TODO")
| [
"[email protected]"
] | |
d7336abe08b51fb335e57cf3d53ee20b79886453 | a6e4a6f0a73d24a6ba957277899adbd9b84bd594 | /sdk/python/pulumi_azure_native/insights/v20160301/_inputs.py | 5910733c44e6efa9bc7563418d54942acbf6f519 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | MisinformedDNA/pulumi-azure-native | 9cbd75306e9c8f92abc25be3f73c113cb93865e9 | de974fd984f7e98649951dbe80b4fc0603d03356 | refs/heads/master | 2023-03-24T22:02:03.842935 | 2021-03-08T21:16:19 | 2021-03-08T21:16:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 38,307 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from ._enums import *
__all__ = [
'LocationThresholdRuleConditionArgs',
'ManagementEventAggregationConditionArgs',
'ManagementEventRuleConditionArgs',
'RetentionPolicyArgs',
'RuleEmailActionArgs',
'RuleManagementEventClaimsDataSourceArgs',
'RuleManagementEventDataSourceArgs',
'RuleMetricDataSourceArgs',
'RuleWebhookActionArgs',
'ThresholdRuleConditionArgs',
]
@pulumi.input_type
class LocationThresholdRuleConditionArgs:
def __init__(__self__, *,
failed_location_count: pulumi.Input[int],
odata_type: pulumi.Input[str],
data_source: Optional[pulumi.Input[Union['RuleManagementEventDataSourceArgs', 'RuleMetricDataSourceArgs']]] = None,
window_size: Optional[pulumi.Input[str]] = None):
"""
A rule condition based on a certain number of locations failing.
:param pulumi.Input[int] failed_location_count: the number of locations that must fail to activate the alert.
:param pulumi.Input[str] odata_type: specifies the type of condition. This can be one of three types: ManagementEventRuleCondition (occurrences of management events), LocationThresholdRuleCondition (based on the number of failures of a web test), and ThresholdRuleCondition (based on the threshold of a metric).
Expected value is 'Microsoft.Azure.Management.Insights.Models.LocationThresholdRuleCondition'.
:param pulumi.Input[Union['RuleManagementEventDataSourceArgs', 'RuleMetricDataSourceArgs']] data_source: the resource from which the rule collects its data. For this type dataSource will always be of type RuleMetricDataSource.
:param pulumi.Input[str] window_size: the period of time (in ISO 8601 duration format) that is used to monitor alert activity based on the threshold. If specified then it must be between 5 minutes and 1 day.
"""
pulumi.set(__self__, "failed_location_count", failed_location_count)
pulumi.set(__self__, "odata_type", 'Microsoft.Azure.Management.Insights.Models.LocationThresholdRuleCondition')
if data_source is not None:
pulumi.set(__self__, "data_source", data_source)
if window_size is not None:
pulumi.set(__self__, "window_size", window_size)
@property
@pulumi.getter(name="failedLocationCount")
def failed_location_count(self) -> pulumi.Input[int]:
"""
the number of locations that must fail to activate the alert.
"""
return pulumi.get(self, "failed_location_count")
@failed_location_count.setter
def failed_location_count(self, value: pulumi.Input[int]):
pulumi.set(self, "failed_location_count", value)
@property
@pulumi.getter(name="odataType")
def odata_type(self) -> pulumi.Input[str]:
"""
specifies the type of condition. This can be one of three types: ManagementEventRuleCondition (occurrences of management events), LocationThresholdRuleCondition (based on the number of failures of a web test), and ThresholdRuleCondition (based on the threshold of a metric).
Expected value is 'Microsoft.Azure.Management.Insights.Models.LocationThresholdRuleCondition'.
"""
return pulumi.get(self, "odata_type")
@odata_type.setter
def odata_type(self, value: pulumi.Input[str]):
pulumi.set(self, "odata_type", value)
@property
@pulumi.getter(name="dataSource")
def data_source(self) -> Optional[pulumi.Input[Union['RuleManagementEventDataSourceArgs', 'RuleMetricDataSourceArgs']]]:
"""
the resource from which the rule collects its data. For this type dataSource will always be of type RuleMetricDataSource.
"""
return pulumi.get(self, "data_source")
@data_source.setter
def data_source(self, value: Optional[pulumi.Input[Union['RuleManagementEventDataSourceArgs', 'RuleMetricDataSourceArgs']]]):
pulumi.set(self, "data_source", value)
@property
@pulumi.getter(name="windowSize")
def window_size(self) -> Optional[pulumi.Input[str]]:
"""
the period of time (in ISO 8601 duration format) that is used to monitor alert activity based on the threshold. If specified then it must be between 5 minutes and 1 day.
"""
return pulumi.get(self, "window_size")
@window_size.setter
def window_size(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "window_size", value)
@pulumi.input_type
class ManagementEventAggregationConditionArgs:
def __init__(__self__, *,
operator: Optional[pulumi.Input['ConditionOperator']] = None,
threshold: Optional[pulumi.Input[float]] = None,
window_size: Optional[pulumi.Input[str]] = None):
"""
How the data that is collected should be combined over time.
:param pulumi.Input['ConditionOperator'] operator: the condition operator.
:param pulumi.Input[float] threshold: The threshold value that activates the alert.
:param pulumi.Input[str] window_size: the period of time (in ISO 8601 duration format) that is used to monitor alert activity based on the threshold. If specified then it must be between 5 minutes and 1 day.
"""
if operator is not None:
pulumi.set(__self__, "operator", operator)
if threshold is not None:
pulumi.set(__self__, "threshold", threshold)
if window_size is not None:
pulumi.set(__self__, "window_size", window_size)
@property
@pulumi.getter
def operator(self) -> Optional[pulumi.Input['ConditionOperator']]:
"""
the condition operator.
"""
return pulumi.get(self, "operator")
@operator.setter
def operator(self, value: Optional[pulumi.Input['ConditionOperator']]):
pulumi.set(self, "operator", value)
@property
@pulumi.getter
def threshold(self) -> Optional[pulumi.Input[float]]:
"""
The threshold value that activates the alert.
"""
return pulumi.get(self, "threshold")
@threshold.setter
def threshold(self, value: Optional[pulumi.Input[float]]):
pulumi.set(self, "threshold", value)
@property
@pulumi.getter(name="windowSize")
def window_size(self) -> Optional[pulumi.Input[str]]:
"""
the period of time (in ISO 8601 duration format) that is used to monitor alert activity based on the threshold. If specified then it must be between 5 minutes and 1 day.
"""
return pulumi.get(self, "window_size")
@window_size.setter
def window_size(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "window_size", value)
@pulumi.input_type
class ManagementEventRuleConditionArgs:
def __init__(__self__, *,
odata_type: pulumi.Input[str],
aggregation: Optional[pulumi.Input['ManagementEventAggregationConditionArgs']] = None,
data_source: Optional[pulumi.Input[Union['RuleManagementEventDataSourceArgs', 'RuleMetricDataSourceArgs']]] = None):
"""
A management event rule condition.
:param pulumi.Input[str] odata_type: specifies the type of condition. This can be one of three types: ManagementEventRuleCondition (occurrences of management events), LocationThresholdRuleCondition (based on the number of failures of a web test), and ThresholdRuleCondition (based on the threshold of a metric).
Expected value is 'Microsoft.Azure.Management.Insights.Models.ManagementEventRuleCondition'.
:param pulumi.Input['ManagementEventAggregationConditionArgs'] aggregation: How the data that is collected should be combined over time and when the alert is activated. Note that for management event alerts aggregation is optional – if it is not provided then any event will cause the alert to activate.
:param pulumi.Input[Union['RuleManagementEventDataSourceArgs', 'RuleMetricDataSourceArgs']] data_source: the resource from which the rule collects its data. For this type dataSource will always be of type RuleMetricDataSource.
"""
pulumi.set(__self__, "odata_type", 'Microsoft.Azure.Management.Insights.Models.ManagementEventRuleCondition')
if aggregation is not None:
pulumi.set(__self__, "aggregation", aggregation)
if data_source is not None:
pulumi.set(__self__, "data_source", data_source)
@property
@pulumi.getter(name="odataType")
def odata_type(self) -> pulumi.Input[str]:
"""
specifies the type of condition. This can be one of three types: ManagementEventRuleCondition (occurrences of management events), LocationThresholdRuleCondition (based on the number of failures of a web test), and ThresholdRuleCondition (based on the threshold of a metric).
Expected value is 'Microsoft.Azure.Management.Insights.Models.ManagementEventRuleCondition'.
"""
return pulumi.get(self, "odata_type")
@odata_type.setter
def odata_type(self, value: pulumi.Input[str]):
pulumi.set(self, "odata_type", value)
@property
@pulumi.getter
def aggregation(self) -> Optional[pulumi.Input['ManagementEventAggregationConditionArgs']]:
"""
How the data that is collected should be combined over time and when the alert is activated. Note that for management event alerts aggregation is optional – if it is not provided then any event will cause the alert to activate.
"""
return pulumi.get(self, "aggregation")
@aggregation.setter
def aggregation(self, value: Optional[pulumi.Input['ManagementEventAggregationConditionArgs']]):
pulumi.set(self, "aggregation", value)
@property
@pulumi.getter(name="dataSource")
def data_source(self) -> Optional[pulumi.Input[Union['RuleManagementEventDataSourceArgs', 'RuleMetricDataSourceArgs']]]:
"""
the resource from which the rule collects its data. For this type dataSource will always be of type RuleMetricDataSource.
"""
return pulumi.get(self, "data_source")
@data_source.setter
def data_source(self, value: Optional[pulumi.Input[Union['RuleManagementEventDataSourceArgs', 'RuleMetricDataSourceArgs']]]):
pulumi.set(self, "data_source", value)
@pulumi.input_type
class RetentionPolicyArgs:
def __init__(__self__, *,
days: pulumi.Input[int],
enabled: pulumi.Input[bool]):
"""
Specifies the retention policy for the log.
:param pulumi.Input[int] days: the number of days for the retention in days. A value of 0 will retain the events indefinitely.
:param pulumi.Input[bool] enabled: a value indicating whether the retention policy is enabled.
"""
pulumi.set(__self__, "days", days)
pulumi.set(__self__, "enabled", enabled)
@property
@pulumi.getter
def days(self) -> pulumi.Input[int]:
"""
the number of days for the retention in days. A value of 0 will retain the events indefinitely.
"""
return pulumi.get(self, "days")
@days.setter
def days(self, value: pulumi.Input[int]):
pulumi.set(self, "days", value)
@property
@pulumi.getter
def enabled(self) -> pulumi.Input[bool]:
"""
a value indicating whether the retention policy is enabled.
"""
return pulumi.get(self, "enabled")
@enabled.setter
def enabled(self, value: pulumi.Input[bool]):
pulumi.set(self, "enabled", value)
@pulumi.input_type
class RuleEmailActionArgs:
def __init__(__self__, *,
odata_type: pulumi.Input[str],
custom_emails: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
send_to_service_owners: Optional[pulumi.Input[bool]] = None):
"""
Specifies the action to send email when the rule condition is evaluated. The discriminator is always RuleEmailAction in this case.
:param pulumi.Input[str] odata_type: specifies the type of the action. There are two types of actions: RuleEmailAction and RuleWebhookAction.
Expected value is 'Microsoft.Azure.Management.Insights.Models.RuleEmailAction'.
:param pulumi.Input[Sequence[pulumi.Input[str]]] custom_emails: the list of administrator's custom email addresses to notify of the activation of the alert.
:param pulumi.Input[bool] send_to_service_owners: Whether the administrators (service and co-administrators) of the service should be notified when the alert is activated.
"""
pulumi.set(__self__, "odata_type", 'Microsoft.Azure.Management.Insights.Models.RuleEmailAction')
if custom_emails is not None:
pulumi.set(__self__, "custom_emails", custom_emails)
if send_to_service_owners is not None:
pulumi.set(__self__, "send_to_service_owners", send_to_service_owners)
@property
@pulumi.getter(name="odataType")
def odata_type(self) -> pulumi.Input[str]:
"""
specifies the type of the action. There are two types of actions: RuleEmailAction and RuleWebhookAction.
Expected value is 'Microsoft.Azure.Management.Insights.Models.RuleEmailAction'.
"""
return pulumi.get(self, "odata_type")
@odata_type.setter
def odata_type(self, value: pulumi.Input[str]):
pulumi.set(self, "odata_type", value)
@property
@pulumi.getter(name="customEmails")
def custom_emails(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
the list of administrator's custom email addresses to notify of the activation of the alert.
"""
return pulumi.get(self, "custom_emails")
@custom_emails.setter
def custom_emails(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "custom_emails", value)
@property
@pulumi.getter(name="sendToServiceOwners")
def send_to_service_owners(self) -> Optional[pulumi.Input[bool]]:
"""
Whether the administrators (service and co-administrators) of the service should be notified when the alert is activated.
"""
return pulumi.get(self, "send_to_service_owners")
@send_to_service_owners.setter
def send_to_service_owners(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "send_to_service_owners", value)
@pulumi.input_type
class RuleManagementEventClaimsDataSourceArgs:
def __init__(__self__, *,
email_address: Optional[pulumi.Input[str]] = None):
"""
The claims for a rule management event data source.
:param pulumi.Input[str] email_address: the email address.
"""
if email_address is not None:
pulumi.set(__self__, "email_address", email_address)
@property
@pulumi.getter(name="emailAddress")
def email_address(self) -> Optional[pulumi.Input[str]]:
"""
the email address.
"""
return pulumi.get(self, "email_address")
@email_address.setter
def email_address(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "email_address", value)
@pulumi.input_type
class RuleManagementEventDataSourceArgs:
def __init__(__self__, *,
odata_type: pulumi.Input[str],
claims: Optional[pulumi.Input['RuleManagementEventClaimsDataSourceArgs']] = None,
event_name: Optional[pulumi.Input[str]] = None,
event_source: Optional[pulumi.Input[str]] = None,
legacy_resource_id: Optional[pulumi.Input[str]] = None,
level: Optional[pulumi.Input[str]] = None,
metric_namespace: Optional[pulumi.Input[str]] = None,
operation_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
resource_location: Optional[pulumi.Input[str]] = None,
resource_provider_name: Optional[pulumi.Input[str]] = None,
resource_uri: Optional[pulumi.Input[str]] = None,
status: Optional[pulumi.Input[str]] = None,
sub_status: Optional[pulumi.Input[str]] = None):
"""
A rule management event data source. The discriminator fields is always RuleManagementEventDataSource in this case.
:param pulumi.Input[str] odata_type: specifies the type of data source. There are two types of rule data sources: RuleMetricDataSource and RuleManagementEventDataSource
Expected value is 'Microsoft.Azure.Management.Insights.Models.RuleManagementEventDataSource'.
:param pulumi.Input['RuleManagementEventClaimsDataSourceArgs'] claims: the claims.
:param pulumi.Input[str] event_name: the event name.
:param pulumi.Input[str] event_source: the event source.
:param pulumi.Input[str] legacy_resource_id: the legacy resource identifier of the resource the rule monitors. **NOTE**: this property cannot be updated for an existing rule.
:param pulumi.Input[str] level: the level.
:param pulumi.Input[str] metric_namespace: the namespace of the metric.
:param pulumi.Input[str] operation_name: The name of the operation that should be checked for. If no name is provided, any operation will match.
:param pulumi.Input[str] resource_group_name: the resource group name.
:param pulumi.Input[str] resource_location: the location of the resource.
:param pulumi.Input[str] resource_provider_name: the resource provider name.
:param pulumi.Input[str] resource_uri: the resource identifier of the resource the rule monitors. **NOTE**: this property cannot be updated for an existing rule.
:param pulumi.Input[str] status: The status of the operation that should be checked for. If no status is provided, any status will match.
:param pulumi.Input[str] sub_status: the substatus.
"""
pulumi.set(__self__, "odata_type", 'Microsoft.Azure.Management.Insights.Models.RuleManagementEventDataSource')
if claims is not None:
pulumi.set(__self__, "claims", claims)
if event_name is not None:
pulumi.set(__self__, "event_name", event_name)
if event_source is not None:
pulumi.set(__self__, "event_source", event_source)
if legacy_resource_id is not None:
pulumi.set(__self__, "legacy_resource_id", legacy_resource_id)
if level is not None:
pulumi.set(__self__, "level", level)
if metric_namespace is not None:
pulumi.set(__self__, "metric_namespace", metric_namespace)
if operation_name is not None:
pulumi.set(__self__, "operation_name", operation_name)
if resource_group_name is not None:
pulumi.set(__self__, "resource_group_name", resource_group_name)
if resource_location is not None:
pulumi.set(__self__, "resource_location", resource_location)
if resource_provider_name is not None:
pulumi.set(__self__, "resource_provider_name", resource_provider_name)
if resource_uri is not None:
pulumi.set(__self__, "resource_uri", resource_uri)
if status is not None:
pulumi.set(__self__, "status", status)
if sub_status is not None:
pulumi.set(__self__, "sub_status", sub_status)
@property
@pulumi.getter(name="odataType")
def odata_type(self) -> pulumi.Input[str]:
"""
specifies the type of data source. There are two types of rule data sources: RuleMetricDataSource and RuleManagementEventDataSource
Expected value is 'Microsoft.Azure.Management.Insights.Models.RuleManagementEventDataSource'.
"""
return pulumi.get(self, "odata_type")
@odata_type.setter
def odata_type(self, value: pulumi.Input[str]):
pulumi.set(self, "odata_type", value)
@property
@pulumi.getter
def claims(self) -> Optional[pulumi.Input['RuleManagementEventClaimsDataSourceArgs']]:
"""
the claims.
"""
return pulumi.get(self, "claims")
@claims.setter
def claims(self, value: Optional[pulumi.Input['RuleManagementEventClaimsDataSourceArgs']]):
pulumi.set(self, "claims", value)
@property
@pulumi.getter(name="eventName")
def event_name(self) -> Optional[pulumi.Input[str]]:
"""
the event name.
"""
return pulumi.get(self, "event_name")
@event_name.setter
def event_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "event_name", value)
@property
@pulumi.getter(name="eventSource")
def event_source(self) -> Optional[pulumi.Input[str]]:
"""
the event source.
"""
return pulumi.get(self, "event_source")
@event_source.setter
def event_source(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "event_source", value)
@property
@pulumi.getter(name="legacyResourceId")
def legacy_resource_id(self) -> Optional[pulumi.Input[str]]:
"""
the legacy resource identifier of the resource the rule monitors. **NOTE**: this property cannot be updated for an existing rule.
"""
return pulumi.get(self, "legacy_resource_id")
@legacy_resource_id.setter
def legacy_resource_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "legacy_resource_id", value)
@property
@pulumi.getter
def level(self) -> Optional[pulumi.Input[str]]:
"""
the level.
"""
return pulumi.get(self, "level")
@level.setter
def level(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "level", value)
@property
@pulumi.getter(name="metricNamespace")
def metric_namespace(self) -> Optional[pulumi.Input[str]]:
"""
the namespace of the metric.
"""
return pulumi.get(self, "metric_namespace")
@metric_namespace.setter
def metric_namespace(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "metric_namespace", value)
@property
@pulumi.getter(name="operationName")
def operation_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the operation that should be checked for. If no name is provided, any operation will match.
"""
return pulumi.get(self, "operation_name")
@operation_name.setter
def operation_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "operation_name", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> Optional[pulumi.Input[str]]:
"""
the resource group name.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="resourceLocation")
def resource_location(self) -> Optional[pulumi.Input[str]]:
"""
the location of the resource.
"""
return pulumi.get(self, "resource_location")
@resource_location.setter
def resource_location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_location", value)
@property
@pulumi.getter(name="resourceProviderName")
def resource_provider_name(self) -> Optional[pulumi.Input[str]]:
"""
the resource provider name.
"""
return pulumi.get(self, "resource_provider_name")
@resource_provider_name.setter
def resource_provider_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_provider_name", value)
@property
@pulumi.getter(name="resourceUri")
def resource_uri(self) -> Optional[pulumi.Input[str]]:
"""
the resource identifier of the resource the rule monitors. **NOTE**: this property cannot be updated for an existing rule.
"""
return pulumi.get(self, "resource_uri")
@resource_uri.setter
def resource_uri(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_uri", value)
@property
@pulumi.getter
def status(self) -> Optional[pulumi.Input[str]]:
"""
The status of the operation that should be checked for. If no status is provided, any status will match.
"""
return pulumi.get(self, "status")
@status.setter
def status(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "status", value)
@property
@pulumi.getter(name="subStatus")
def sub_status(self) -> Optional[pulumi.Input[str]]:
"""
the substatus.
"""
return pulumi.get(self, "sub_status")
@sub_status.setter
def sub_status(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "sub_status", value)
@pulumi.input_type
class RuleMetricDataSourceArgs:
def __init__(__self__, *,
odata_type: pulumi.Input[str],
legacy_resource_id: Optional[pulumi.Input[str]] = None,
metric_name: Optional[pulumi.Input[str]] = None,
metric_namespace: Optional[pulumi.Input[str]] = None,
resource_location: Optional[pulumi.Input[str]] = None,
resource_uri: Optional[pulumi.Input[str]] = None):
"""
A rule metric data source. The discriminator value is always RuleMetricDataSource in this case.
:param pulumi.Input[str] odata_type: specifies the type of data source. There are two types of rule data sources: RuleMetricDataSource and RuleManagementEventDataSource
Expected value is 'Microsoft.Azure.Management.Insights.Models.RuleMetricDataSource'.
:param pulumi.Input[str] legacy_resource_id: the legacy resource identifier of the resource the rule monitors. **NOTE**: this property cannot be updated for an existing rule.
:param pulumi.Input[str] metric_name: the name of the metric that defines what the rule monitors.
:param pulumi.Input[str] metric_namespace: the namespace of the metric.
:param pulumi.Input[str] resource_location: the location of the resource.
:param pulumi.Input[str] resource_uri: the resource identifier of the resource the rule monitors. **NOTE**: this property cannot be updated for an existing rule.
"""
pulumi.set(__self__, "odata_type", 'Microsoft.Azure.Management.Insights.Models.RuleMetricDataSource')
if legacy_resource_id is not None:
pulumi.set(__self__, "legacy_resource_id", legacy_resource_id)
if metric_name is not None:
pulumi.set(__self__, "metric_name", metric_name)
if metric_namespace is not None:
pulumi.set(__self__, "metric_namespace", metric_namespace)
if resource_location is not None:
pulumi.set(__self__, "resource_location", resource_location)
if resource_uri is not None:
pulumi.set(__self__, "resource_uri", resource_uri)
@property
@pulumi.getter(name="odataType")
def odata_type(self) -> pulumi.Input[str]:
"""
specifies the type of data source. There are two types of rule data sources: RuleMetricDataSource and RuleManagementEventDataSource
Expected value is 'Microsoft.Azure.Management.Insights.Models.RuleMetricDataSource'.
"""
return pulumi.get(self, "odata_type")
@odata_type.setter
def odata_type(self, value: pulumi.Input[str]):
pulumi.set(self, "odata_type", value)
@property
@pulumi.getter(name="legacyResourceId")
def legacy_resource_id(self) -> Optional[pulumi.Input[str]]:
"""
the legacy resource identifier of the resource the rule monitors. **NOTE**: this property cannot be updated for an existing rule.
"""
return pulumi.get(self, "legacy_resource_id")
@legacy_resource_id.setter
def legacy_resource_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "legacy_resource_id", value)
@property
@pulumi.getter(name="metricName")
def metric_name(self) -> Optional[pulumi.Input[str]]:
"""
the name of the metric that defines what the rule monitors.
"""
return pulumi.get(self, "metric_name")
@metric_name.setter
def metric_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "metric_name", value)
@property
@pulumi.getter(name="metricNamespace")
def metric_namespace(self) -> Optional[pulumi.Input[str]]:
"""
the namespace of the metric.
"""
return pulumi.get(self, "metric_namespace")
@metric_namespace.setter
def metric_namespace(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "metric_namespace", value)
@property
@pulumi.getter(name="resourceLocation")
def resource_location(self) -> Optional[pulumi.Input[str]]:
"""
the location of the resource.
"""
return pulumi.get(self, "resource_location")
@resource_location.setter
def resource_location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_location", value)
@property
@pulumi.getter(name="resourceUri")
def resource_uri(self) -> Optional[pulumi.Input[str]]:
"""
the resource identifier of the resource the rule monitors. **NOTE**: this property cannot be updated for an existing rule.
"""
return pulumi.get(self, "resource_uri")
@resource_uri.setter
def resource_uri(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_uri", value)
@pulumi.input_type
class RuleWebhookActionArgs:
def __init__(__self__, *,
odata_type: pulumi.Input[str],
properties: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
service_uri: Optional[pulumi.Input[str]] = None):
"""
Specifies the action to post to service when the rule condition is evaluated. The discriminator is always RuleWebhookAction in this case.
:param pulumi.Input[str] odata_type: specifies the type of the action. There are two types of actions: RuleEmailAction and RuleWebhookAction.
Expected value is 'Microsoft.Azure.Management.Insights.Models.RuleWebhookAction'.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] properties: the dictionary of custom properties to include with the post operation. These data are appended to the webhook payload.
:param pulumi.Input[str] service_uri: the service uri to Post the notification when the alert activates or resolves.
"""
pulumi.set(__self__, "odata_type", 'Microsoft.Azure.Management.Insights.Models.RuleWebhookAction')
if properties is not None:
pulumi.set(__self__, "properties", properties)
if service_uri is not None:
pulumi.set(__self__, "service_uri", service_uri)
@property
@pulumi.getter(name="odataType")
def odata_type(self) -> pulumi.Input[str]:
"""
specifies the type of the action. There are two types of actions: RuleEmailAction and RuleWebhookAction.
Expected value is 'Microsoft.Azure.Management.Insights.Models.RuleWebhookAction'.
"""
return pulumi.get(self, "odata_type")
@odata_type.setter
def odata_type(self, value: pulumi.Input[str]):
pulumi.set(self, "odata_type", value)
@property
@pulumi.getter
def properties(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
the dictionary of custom properties to include with the post operation. These data are appended to the webhook payload.
"""
return pulumi.get(self, "properties")
@properties.setter
def properties(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "properties", value)
@property
@pulumi.getter(name="serviceUri")
def service_uri(self) -> Optional[pulumi.Input[str]]:
"""
the service uri to Post the notification when the alert activates or resolves.
"""
return pulumi.get(self, "service_uri")
@service_uri.setter
def service_uri(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "service_uri", value)
@pulumi.input_type
class ThresholdRuleConditionArgs:
def __init__(__self__, *,
odata_type: pulumi.Input[str],
operator: pulumi.Input['ConditionOperator'],
threshold: pulumi.Input[float],
data_source: Optional[pulumi.Input[Union['RuleManagementEventDataSourceArgs', 'RuleMetricDataSourceArgs']]] = None,
time_aggregation: Optional[pulumi.Input['TimeAggregationOperator']] = None,
window_size: Optional[pulumi.Input[str]] = None):
"""
A rule condition based on a metric crossing a threshold.
:param pulumi.Input[str] odata_type: specifies the type of condition. This can be one of three types: ManagementEventRuleCondition (occurrences of management events), LocationThresholdRuleCondition (based on the number of failures of a web test), and ThresholdRuleCondition (based on the threshold of a metric).
Expected value is 'Microsoft.Azure.Management.Insights.Models.ThresholdRuleCondition'.
:param pulumi.Input['ConditionOperator'] operator: the operator used to compare the data and the threshold.
:param pulumi.Input[float] threshold: the threshold value that activates the alert.
:param pulumi.Input[Union['RuleManagementEventDataSourceArgs', 'RuleMetricDataSourceArgs']] data_source: the resource from which the rule collects its data. For this type dataSource will always be of type RuleMetricDataSource.
:param pulumi.Input['TimeAggregationOperator'] time_aggregation: the time aggregation operator. How the data that are collected should be combined over time. The default value is the PrimaryAggregationType of the Metric.
:param pulumi.Input[str] window_size: the period of time (in ISO 8601 duration format) that is used to monitor alert activity based on the threshold. If specified then it must be between 5 minutes and 1 day.
"""
pulumi.set(__self__, "odata_type", 'Microsoft.Azure.Management.Insights.Models.ThresholdRuleCondition')
pulumi.set(__self__, "operator", operator)
pulumi.set(__self__, "threshold", threshold)
if data_source is not None:
pulumi.set(__self__, "data_source", data_source)
if time_aggregation is not None:
pulumi.set(__self__, "time_aggregation", time_aggregation)
if window_size is not None:
pulumi.set(__self__, "window_size", window_size)
@property
@pulumi.getter(name="odataType")
def odata_type(self) -> pulumi.Input[str]:
"""
specifies the type of condition. This can be one of three types: ManagementEventRuleCondition (occurrences of management events), LocationThresholdRuleCondition (based on the number of failures of a web test), and ThresholdRuleCondition (based on the threshold of a metric).
Expected value is 'Microsoft.Azure.Management.Insights.Models.ThresholdRuleCondition'.
"""
return pulumi.get(self, "odata_type")
@odata_type.setter
def odata_type(self, value: pulumi.Input[str]):
pulumi.set(self, "odata_type", value)
@property
@pulumi.getter
def operator(self) -> pulumi.Input['ConditionOperator']:
"""
the operator used to compare the data and the threshold.
"""
return pulumi.get(self, "operator")
@operator.setter
def operator(self, value: pulumi.Input['ConditionOperator']):
pulumi.set(self, "operator", value)
@property
@pulumi.getter
def threshold(self) -> pulumi.Input[float]:
"""
the threshold value that activates the alert.
"""
return pulumi.get(self, "threshold")
@threshold.setter
def threshold(self, value: pulumi.Input[float]):
pulumi.set(self, "threshold", value)
@property
@pulumi.getter(name="dataSource")
def data_source(self) -> Optional[pulumi.Input[Union['RuleManagementEventDataSourceArgs', 'RuleMetricDataSourceArgs']]]:
"""
the resource from which the rule collects its data. For this type dataSource will always be of type RuleMetricDataSource.
"""
return pulumi.get(self, "data_source")
@data_source.setter
def data_source(self, value: Optional[pulumi.Input[Union['RuleManagementEventDataSourceArgs', 'RuleMetricDataSourceArgs']]]):
pulumi.set(self, "data_source", value)
@property
@pulumi.getter(name="timeAggregation")
def time_aggregation(self) -> Optional[pulumi.Input['TimeAggregationOperator']]:
"""
the time aggregation operator. How the data that are collected should be combined over time. The default value is the PrimaryAggregationType of the Metric.
"""
return pulumi.get(self, "time_aggregation")
@time_aggregation.setter
def time_aggregation(self, value: Optional[pulumi.Input['TimeAggregationOperator']]):
pulumi.set(self, "time_aggregation", value)
@property
@pulumi.getter(name="windowSize")
def window_size(self) -> Optional[pulumi.Input[str]]:
"""
the period of time (in ISO 8601 duration format) that is used to monitor alert activity based on the threshold. If specified then it must be between 5 minutes and 1 day.
"""
return pulumi.get(self, "window_size")
@window_size.setter
def window_size(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "window_size", value)
| [
"[email protected]"
] | |
32965056a1b7a8f68e29a888ddf16692219f8202 | 6f2675eee55b7ebc5adf9c2176ced8cb59fc64d4 | /dataInterKingdee/interDebug.py | f5873ce9a0c97db0f8dd05bed388d20b019fdced | [] | no_license | wildmanwang/proDataInter | 8c2b65fa96ad45b21165d997b1769a28e12fc42a | f5a1f1fb195c66bf586bd999465c7e3b16453369 | refs/heads/master | 2023-06-07T11:57:16.763251 | 2023-06-03T08:54:56 | 2023-06-03T08:54:56 | 157,559,747 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 602 | py | # -*- coding:utf-8 -*-
"""
"""
__author__ = "Cliff.wang"
import os
from interConfig import Settings
#from interProcess import InterProcess
from interControl import InterControl
if __name__ == "__main__":
try:
path = os.path.abspath(os.path.dirname(__file__))
sett = Settings(path, "config")
inter = InterControl(sett)
inter.interInit()
if 1 == 2:
# 传输基础资料、业务数据
inter.interBusiData()
elif 1 == 2:
# 获取部门ID和用户ID
pass
except Exception as e:
print(str(e))
| [
"[email protected]"
] | |
a44db705bdc58cdcecdcd4b8200bf85a3d08fc83 | b15d2787a1eeb56dfa700480364337216d2b1eb9 | /samples/cli/accelbyte_py_sdk_cli/group/_get_group_join_request_public_v2.py | 32ba9735f4911a02f803f73dab69c4e7a260ec52 | [
"MIT"
] | permissive | AccelByte/accelbyte-python-sdk | dedf3b8a592beef5fcf86b4245678ee3277f953d | 539c617c7e6938892fa49f95585b2a45c97a59e0 | refs/heads/main | 2023-08-24T14:38:04.370340 | 2023-08-22T01:08:03 | 2023-08-22T01:08:03 | 410,735,805 | 2 | 1 | MIT | 2022-08-02T03:54:11 | 2021-09-27T04:00:10 | Python | UTF-8 | Python | false | false | 2,611 | py | # Copyright (c) 2021 AccelByte Inc. All Rights Reserved.
# This is licensed software from AccelByte Inc, for limitations
# and restrictions contact your company contract manager.
#
# Code generated. DO NOT EDIT!
# template_file: python-cli-command.j2
# AGS Group Service (2.18.1)
# pylint: disable=duplicate-code
# pylint: disable=line-too-long
# pylint: disable=missing-function-docstring
# pylint: disable=missing-module-docstring
# pylint: disable=too-many-arguments
# pylint: disable=too-many-branches
# pylint: disable=too-many-instance-attributes
# pylint: disable=too-many-lines
# pylint: disable=too-many-locals
# pylint: disable=too-many-public-methods
# pylint: disable=too-many-return-statements
# pylint: disable=too-many-statements
# pylint: disable=unused-import
import json
import yaml
from typing import Optional
import click
from .._utils import login_as as login_as_internal
from .._utils import to_dict
from accelbyte_py_sdk.api.group import (
get_group_join_request_public_v2 as get_group_join_request_public_v2_internal,
)
from accelbyte_py_sdk.api.group.models import ModelsGetMemberRequestsListResponseV1
from accelbyte_py_sdk.api.group.models import ResponseErrorResponse
@click.command()
@click.argument("group_id", type=str)
@click.option("--limit", "limit", type=int)
@click.option("--offset", "offset", type=int)
@click.option("--namespace", type=str)
@click.option("--login_as", type=click.Choice(["client", "user"], case_sensitive=False))
@click.option("--login_with_auth", type=str)
@click.option("--doc", type=bool)
def get_group_join_request_public_v2(
group_id: str,
limit: Optional[int] = None,
offset: Optional[int] = None,
namespace: Optional[str] = None,
login_as: Optional[str] = None,
login_with_auth: Optional[str] = None,
doc: Optional[bool] = None,
):
if doc:
click.echo(get_group_join_request_public_v2_internal.__doc__)
return
x_additional_headers = None
if login_with_auth:
x_additional_headers = {"Authorization": login_with_auth}
else:
login_as_internal(login_as)
result, error = get_group_join_request_public_v2_internal(
group_id=group_id,
limit=limit,
offset=offset,
namespace=namespace,
x_additional_headers=x_additional_headers,
)
if error:
raise Exception(f"getGroupJoinRequestPublicV2 failed: {str(error)}")
click.echo(yaml.safe_dump(to_dict(result), sort_keys=False))
get_group_join_request_public_v2.operation_id = "getGroupJoinRequestPublicV2"
get_group_join_request_public_v2.is_deprecated = False
| [
"[email protected]"
] | |
c2d9305312002748edb2d0e5470f541784c71352 | 3fc00c49c6b5a5d3edb4f5a97a86ecc8f59a3035 | /shared_models/test/test_api.py | ae9465bb6b3b41416d097c202b1034470650a378 | [] | no_license | yc-hu/dm_apps | 9e640ef08da8ecefcd7008ee2d4f8f268ec9062e | 483f855b19876fd60c0017a270df74e076aa0d8b | refs/heads/master | 2023-04-07T13:13:55.999058 | 2021-04-12T10:19:21 | 2021-04-12T10:19:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,264 | py | from django.test import tag
from django.urls import reverse
from rest_framework import status
from shared_models.test import SharedModelsFactoryFloor as FactoryFloor
from shared_models.test.common_tests import CommonTest
class TestUserAPIListView(CommonTest):
def setUp(self):
super().setUp()
self.user = self.get_and_login_user()
self.test_url = reverse("user-list", args=None)
@tag("api", 'user')
def test_url(self):
self.assert_correct_url("user-list", test_url_args=None, expected_url_path=f"/api/shared/users/")
@tag("api", 'user')
def test_get(self):
# PERMISSIONS
# authenticated users
response = self.client.get(self.test_url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
# unauthenticated users
self.client.logout()
response = self.client.get(self.test_url)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
# TODO: build up this test!
# # RESPONSE DATA
# valid_user = None
# self.get_and_login_user(user=None)
# response = self.client.get(self.test_url)
# self.assertEqual(len(response.data), 1)
# self.assertEqual(response.data[0]["id"], self.instance.id)
# # or, for lists with pagination...
# self.assertEqual(len(data["results"]), 1)
# self.assertEqual(data["results"][0]["id"], self.instance.id)
#
# # check query params
# object = FactoryFloor.UserFactory()
# data = self.client.get(self.test_url+f"?={object.id}").data
# keys.extend([
# "",
# ])
# self.assert_dict_has_keys(data, keys)
@tag("api", 'user')
def test_unallowed_methods_only(self):
restricted_statuses = [status.HTTP_405_METHOD_NOT_ALLOWED, status.HTTP_403_FORBIDDEN]
self.assertIn(self.client.put(self.test_url, data=None).status_code, restricted_statuses)
self.assertIn(self.client.delete(self.test_url, data=None).status_code, restricted_statuses)
self.assertIn(self.client.post(self.test_url, data=None).status_code, restricted_statuses)
self.assertIn(self.client.patch(self.test_url, data=None).status_code, restricted_statuses)
| [
"[email protected]"
] | |
270875ed2be025781a975375972379cf8f211f80 | dfad28a2e1a0199c0117e551fd1e31804804d5b9 | /app/auth/views.py | d2df7a97666207276aa6648ef9f85af4a25d98bc | [
"MIT"
] | permissive | wilbrone/Pitches | c33d60b142b43de9ccf60a86cf59acbc262c6711 | b20d234fd930a6551f26d9cf863c6d1631b62bc2 | refs/heads/master | 2022-12-09T08:02:08.631177 | 2019-11-25T23:47:13 | 2019-11-25T23:47:13 | 223,405,696 | 0 | 0 | MIT | 2022-12-08T06:55:48 | 2019-11-22T13:09:30 | Python | UTF-8 | Python | false | false | 1,583 | py | from flask import render_template,redirect,url_for, flash,request
from flask_login import login_user,logout_user,login_required
from . import auth
from ..models import User
from .forms import LoginForm,RegistrationForm
from .. import db
from ..email import mail_message
@auth.route('/login',methods=['GET','POST'])
def login():
login_form = LoginForm()
if login_form.validate_on_submit():
user = User.query.filter_by(email = login_form.email.data).first()
if user is not None and user.verify_password(login_form.password.data):
login_user(user,login_form.remember.data)
return redirect(request.args.get('next') or url_for('main.index'))
flash('Invalid username or Password')
title = "One Minute Perfect Pitch login"
return render_template('auth/login.html',login_form = login_form,title=title)
@auth.route('/register',methods = ["GET","POST"])
def register():
form = RegistrationForm()
if form.validate_on_submit():
user = User(email = form.email.data, username = form.username.data,full_name= form.full_name.data,password = form.password.data)
# saving the data
db.session.add(user)
db.session.commit()
mail_message("Welcome to One Minute Perfect Pitch","email/welcome_user",user.email,user=user)
return redirect(url_for('auth.login'))
title = "New Account"
return render_template('auth/register.html',registration_form = form)
@auth.route('/logout')
@login_required
def logout():
logout_user()
return redirect(url_for("main.index"))
| [
"[email protected]"
] | |
9f04557904bdeeb5a5b0b9e265605429682ff434 | a867b1c9da10a93136550c767c45e0d8c98f5675 | /G_11_RemoveKthNode.py | 408aa2a8a0bdec884c65ff5c410cb79045ed72b6 | [] | no_license | Omkar02/FAANG | f747aacc938bf747129b8ff35b6648fb265d95b6 | ee9b245aa83ea58aa67954ab96442561dbe68d06 | refs/heads/master | 2023-03-25T19:45:08.153403 | 2021-03-28T07:13:08 | 2021-03-28T07:13:08 | 280,783,785 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 941 | py | import __main__ as main
from Helper.TimerLogger import CodeTimeLogging
fileName = main.__file__
fileName = fileName.split('\\')[-1]
CodeTimeLogging(Flag='F', filename=fileName, Tag='Linked-List', Difficult='Medium')
from Datastruct.masterLinkedList import l
arr = [1, 2, 3, 4, 5, 6]
# arr = [1, 2]
for i in arr:
l.insertStart(i)
# l.traverseList()
def removeKNodeFromEnd(head, k):
print(f'Removed {k} node: ',end = '')
first = head
second = head
count = 1
while count <= k and second is not None:
second = second.nextNode
count += 1
if second is None:
head.data = first.nextNode.data
head.nextNode = first.nextNode.nextNode
l.traverseList()
return
while second.nextNode is not None:
second = second.nextNode
first = first.nextNode
first.nextNode = first.nextNode.nextNode
l.traverseList()
removeKNodeFromEnd(l.getHead(), 3)
| [
"[email protected]"
] | |
c119687b11afe9b22fca389be33ff9b8a804cf22 | 9322c270beaf1019328bf14c836d167145d45946 | /raoteh/sampler/tests/test_graph_transform.py | af315325cddb45fdc81619cf995488fd53736710 | [] | no_license | argriffing/raoteh | 13d198665a7a3968aad8d41ddad12c08d36d57b4 | cdc9cce8fdad0a79dbd90dfcdec6feece8fc931f | refs/heads/master | 2021-01-22T19:41:25.828133 | 2014-03-10T22:25:48 | 2014-03-10T22:25:48 | 10,087,018 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,511 | py | """Test graph algorithms relevant to Rao-Teh sampling.
"""
from __future__ import division, print_function, absolute_import
import itertools
from collections import defaultdict
import networkx as nx
from numpy.testing import (run_module_suite, TestCase,
assert_equal, assert_allclose, assert_, assert_raises)
from raoteh.sampler._graph_transform import (
get_edge_bisected_graph,
get_node_to_state,
remove_redundant_nodes,
get_redundant_degree_two_nodes,
get_chunk_tree,
add_trajectories,
)
# This is an official itertools recipe.
def powerset(iterable):
"powerset([1,2,3]) --> () (1,) (2,) (3,) (1,2) (1,3) (2,3) (1,2,3)"
s = list(iterable)
return itertools.chain.from_iterable(
itertools.combinations(s, r) for r in range(len(s)+1))
class TestGraphTransform(TestCase):
def test_get_edge_bisected_graph(self):
# Create an example from the networkx documentation.
G = nx.Graph()
G.add_weighted_edges_from([
(1, 2, 0.125),
(1, 3, 0.75),
(2, 4, 1.2),
(3, 4, 0.375)])
# Create a new graph by bisecting the edges of the old graph.
H = get_edge_bisected_graph(G)
# The edge-bisected graph has twice as many edges.
assert_equal(len(G.edges()) * 2, len(H.edges()))
assert_equal(G.size()*2, H.size())
# The sum of edge weights is unchanged.
assert_allclose(G.size(weight='weight'), H.size(weight='weight'))
# The node set of the edge-bisected graph includes that of the original.
assert_(set(G) <= set(H))
# The added nodes are each greater than each original node.
assert_(max(G) < min(set(H) - set(G)))
def test_get_chunk_tree(self):
# Define the original tree and its event nodes.
# This is taken from a doodle in my notebook,
# and it is not particularly cleverly chosen.
tree_edges = (
(0, 1),
(1, 2),
(3, 4),
(4, 2),
(2, 5),
(5, 6),
(6, 7),
(7, 8),
(8, 9),
(7, 10),
(10, 11),
(11, 12),
(12, 13),
(13, 14),
(13, 15),
(15, 16),
(16, 17),
)
event_nodes = {1, 4, 5, 6, 8, 10, 11, 12, 15, 16}
# Create a tree by specifying the edges.
T = nx.Graph()
T.add_edges_from(tree_edges)
# Run tests, using all possible roots and also a default root.
potential_roots = list(T) + [None]
for root in potential_roots:
# Construct the chunk tree and its associated node maps.
results = get_chunk_tree(T, event_nodes)
chunk_tree, non_event_map, event_map = results
# The nodes pointed to by the non_event_map
# should be nodes in the chunk_tree.
assert_(set(non_event_map.values()) <= set(T))
# The output tree should have 11 nodes and 10 edges.
assert_equal(len(chunk_tree), 11)
assert_equal(len(chunk_tree.edges()), 10)
# The 8 non-event nodes should map to 7 unique chunk nodes.
assert_equal(len(non_event_map), 8)
assert_equal(len(set(non_event_map.values())), 7)
# The non-event nodes 13 and 14 should map to the same chunk.
assert_equal(non_event_map[13], non_event_map[14])
def test_remove_redundant_nodes_short_path(self):
# Define a short path with one redundant
# and one non-redundant internal node.
T = nx.Graph()
T.add_edge(0, 1, state=0, weight=1)
T.add_edge(1, 2, state=0, weight=1)
T.add_edge(2, 3, state=1, weight=1)
# Try removing a redundant node.
redundant_nodes = {1}
T_out = remove_redundant_nodes(T, redundant_nodes)
assert_equal(set(T_out), set(T) - redundant_nodes)
assert_equal(T_out[0][2]['weight'], 2)
# Fail at removing a non-redundant node.
redundant_nodes = {2}
assert_raises(
Exception,
remove_redundant_nodes,
T, redundant_nodes)
def test_remove_redundant_nodes_long_path(self):
# Define a path with multiple consecutive redundant internal nodes.
T = nx.Graph()
T.add_edge(0, 1, state=0, weight=1.1)
T.add_edge(1, 2, state=0, weight=1.2)
T.add_edge(2, 3, state=1, weight=1.3)
T.add_edge(3, 4, state=1, weight=1.4)
T.add_edge(4, 5, state=1, weight=1.5)
T.add_edge(5, 6, state=1, weight=1.6)
T.add_edge(6, 7, state=1, weight=1.7)
# Get the original weighted size.
# This is the sum of weights of all edges.
original_size = T.size(weight='weight')
# Check the set of redundant nodes.
all_redundant_nodes = {1, 3, 4, 5, 6}
obs_nodes = get_redundant_degree_two_nodes(T)
assert_equal(all_redundant_nodes, obs_nodes)
# Try removing all valid combinations of redundant nodes.
for redundant_node_tuple in powerset(all_redundant_nodes):
redundant_nodes = set(redundant_node_tuple)
T_out = remove_redundant_nodes(T, redundant_nodes)
assert_equal(set(T_out), set(T) - redundant_nodes)
assert_allclose(T_out.size(weight='weight'), original_size)
def test_remove_redundant_nodes_small_tree(self):
# Define a short path with one redundant
# and one non-redundant internal node.
T = nx.Graph()
T.add_edge(0, 1, state=0, weight=1)
T.add_edge(0, 2, state=0, weight=1)
T.add_edge(0, 3, state=0, weight=1)
# None of the nodes are considered redundant in the current
# implementation, because each node is of degree 1 or 3.
for redundant_nodes in ({0}, {1}, {2}, {3}):
assert_raises(
Exception,
remove_redundant_nodes,
T, redundant_nodes)
def test_remove_redundant_nodes_medium_tree(self):
# Define a tree.
T = nx.Graph()
T.add_edge(0, 10, state=0, weight=1.1)
T.add_edge(0, 20, state=0, weight=1.2)
T.add_edge(0, 30, state=0, weight=1.3)
T.add_edge(20, 21, state=0, weight=1.4)
T.add_edge(30, 31, state=0, weight=1.5)
T.add_edge(31, 32, state=0, weight=1.6)
# Get the original weighted size.
# This is the sum of weights of all edges.
original_size = T.size(weight='weight')
# Try removing all valid combinations of redundant nodes.
for redundant_node_tuple in powerset((20, 30, 31)):
redundant_nodes = set(redundant_node_tuple)
T_out = remove_redundant_nodes(T, redundant_nodes)
assert_equal(set(T_out), set(T) - redundant_nodes)
assert_allclose(T_out.size(weight='weight'), original_size)
class TestAddTrajectories(TestCase):
def test_compatible_trees(self):
T_base = nx.Graph()
T_base.add_edge(0, 1, weight=0.1)
T_base.add_edge(0, 2, weight=0.1)
T_base.add_edge(0, 3, weight=0.1)
T_traj = nx.Graph()
T_traj.add_edge(0, 1, state=0, weight=0.1)
T_traj.add_edge(0, 20, state=0, weight=0.05)
T_traj.add_edge(20, 2, state=0, weight=0.05)
T_traj.add_edge(0, 3, state=0, weight=0.1)
root = 0
T_merged, dummy_nodes = add_trajectories(T_base, root, [T_traj])
# There should not be any dummy nodes.
assert_equal(dummy_nodes, set())
# The merged tree should have four edges.
assert_equal(T_base.size(), 3)
assert_equal(T_merged.size(), 4)
# The total weight of the merged tree
# should be the same as the total weight of the base tree.
assert_allclose(
T_merged.size(weight='weight'),
T_base.size(weight='weight'))
def test_incompatible_trees(self):
T_base = nx.Graph()
T_base.add_edge(0, 1, weight=0.1)
T_base.add_edge(0, 2, weight=0.1)
T_base.add_edge(0, 3, weight=0.1)
root = 0
# Define a trajectory that is bad because it adds a high degree node.
traj = nx.Graph()
traj.add_edge(0, 4, state=0, weight=0.1)
traj.add_edge(4, 20, state=0, weight=0.05)
traj.add_edge(20, 2, state=0, weight=0.05)
traj.add_edge(4, 3, state=0, weight=0.1)
assert_raises(ValueError, add_trajectories,
T_base, root, [traj])
# Define a trajectory that is bad because it adds a leaf node.
traj = nx.Graph()
traj.add_edge(0, 1, state=0, weight=0.1)
traj.add_edge(0, 20, state=0, weight=0.05)
traj.add_edge(20, 2, state=0, weight=0.05)
traj.add_edge(0, 3, state=0, weight=0.05)
traj.add_edge(3, 4, state=0, weight=0.05)
assert_raises(ValueError, add_trajectories,
T_base, root, [traj])
# Define a trajectory that is bad
# because it flips around the nodes in a way that is incompatible
# with the original tree topology.
traj = nx.Graph()
traj.add_edge(1, 0, state=0, weight=0.1)
traj.add_edge(1, 2, state=0, weight=0.1)
traj.add_edge(1, 3, state=0, weight=0.1)
assert_raises(ValueError, add_trajectories,
T_base, root, [traj])
def test_complicated_incompatible_trees(self):
T_base = nx.Graph()
T_base.add_edge(0, 1, weight=0.1)
T_base.add_edge(0, 2, weight=0.1)
T_base.add_edge(0, 3, weight=0.1)
T_base.add_edge(3, 4, weight=0.1)
T_base.add_edge(3, 5, weight=0.1)
root = 0
# Define a trajectory that is bad
# because the topology is different in a way that cannot be detected
# by checking the degrees of the nodes.
traj = nx.Graph()
traj.add_edge(3, 1, state=0, weight=0.1)
traj.add_edge(3, 2, state=0, weight=0.1)
traj.add_edge(3, 0, state=0, weight=0.1)
traj.add_edge(0, 4, state=0, weight=0.1)
traj.add_edge(0, 5, state=0, weight=0.1)
assert_raises(ValueError, add_trajectories,
T_base, root, [traj])
def test_edge_to_event_times(self):
# The merged tree will look like the following,
# where 'x' is a node in the original tree,
# and 'a' is a node introduced by trajectory merging,
# and 'o' is an event node.
#
# x
# /|\
# / | \
# | | |
# o o x
# | | |
# x | | (0, 0)
# x |
# x
# /| (0, 0)
# / a
# / | (0, 10)
# | a
# x | (5, 10)
# a
# | (5, 0)
# o
# | (5, 0)
# a
# | (0, 0)
# x
#
T = nx.Graph()
T.add_edge(0, 1, weight=0.1)
T.add_edge(0, 2, weight=0.1)
T.add_edge(0, 3, weight=0.1)
T.add_edge(3, 4, weight=0.1)
T.add_edge(3, 5, weight=0.1)
T.add_edge(4, 6, weight=0.1)
root = 0
# Define a trajectory with an extra segment along one edge.
traj_a = nx.Graph()
traj_a.add_edge(0, 1, weight=0.1, state=0)
traj_a.add_edge(0, 2, weight=0.1, state=0)
traj_a.add_edge(0, 3, weight=0.1, state=0)
traj_a.add_edge(3, 4, weight=0.1, state=0)
traj_a.add_edge(3, 5, weight=0.1, state=0)
traj_a.add_edge(4, 10, weight=0.025, state=0)
traj_a.add_edge(10, 11, weight=0.05, state=5)
traj_a.add_edge(11, 6, weight=0.025, state=0)
# Define a trajectory with an interleaving segment.
traj_b = nx.Graph()
traj_b.add_edge(0, 1, weight=0.1, state=0)
traj_b.add_edge(0, 2, weight=0.1, state=0)
traj_b.add_edge(0, 3, weight=0.1, state=0)
traj_b.add_edge(3, 4, weight=0.1, state=0)
traj_b.add_edge(3, 5, weight=0.1, state=0)
traj_b.add_edge(4, 20, weight=0.02, state=0)
traj_b.add_edge(20, 21, weight=0.02, state=10)
traj_b.add_edge(21, 6, weight=0.06, state=0)
# Define a few event times along directed edges,
# where the edge direction radiates away from the root.
edge_to_event_times = {
(0, 1) : {0.06},
(0, 2) : {0.02},
(4, 6) : {0.045},
}
# Construct the merged tree.
T_merged, event_nodes = add_trajectories(
T, root,
[traj_a, traj_b],
edge_to_event_times=edge_to_event_times)
# After this point are some tests.
# Check the total number of nodes in the merged tree.
assert_equal(len(T_merged.edges()), 13)
# Check the multiset of edge state pairs in the merged tree.
state_pair_to_count = defaultdict(int)
for edge in nx.bfs_edges(T_merged, root):
na, nb = edge
states = T_merged[na][nb]['states']
state_pair = tuple(states)
assert_equal(len(state_pair), 2)
state_pair_to_count[state_pair] += 1
assert_equal(state_pair_to_count[(0, 10)], 1)
assert_equal(state_pair_to_count[(5, 10)], 1)
assert_equal(state_pair_to_count[(5, 0)], 2)
expected_state_pairs = set([(0, 0), (0, 10), (5, 10), (5, 0)])
assert_equal(set(state_pair_to_count), expected_state_pairs)
# Check that the number of event nodes is correct.
assert_equal(len(edge_to_event_times), len(event_nodes))
# The merged tree must contain all of the nodes of the original tree.
missing_nodes = set(T) - set(T_merged)
assert_equal(missing_nodes, set())
# The base tree, the two trajectories, and the merged tree
# should all have the same weighted size.
weighted_size = T.size(weight='weight')
assert_allclose(traj_a.size(weight='weight'), weighted_size)
assert_allclose(traj_b.size(weight='weight'), weighted_size)
assert_allclose(T_merged.size(weight='weight'), weighted_size)
# Each event node must be adjacent to exactly two edges
# in the merged tree, and both of these edges
# must be annotated with the same sequence of state values.
for node in event_nodes:
assert_equal(T_merged.degree(node), 2)
na, nb = T_merged[node]
na_states = T_merged[node][na]['states']
nb_states = T_merged[node][nb]['states']
assert_equal(na_states, nb_states)
# Print the edges of the merged tree.
"""
print()
print('--- add_trajectories test output ---')
print(event_nodes)
for edge in nx.bfs_edges(T_merged, root):
na, nb = edge
weight = T_merged[na][nb]['weight']
states = T_merged[na][nb]['states']
print(na, nb, weight, states)
print()
"""
"""
0 8 0.02 [0, 0]
0 3 0.1 [0, 0]
0 7 0.06 [0, 0]
8 2 0.08 [0, 0]
3 4 0.1 [0, 0]
3 5 0.1 [0, 0]
7 1 0.04 [0, 0]
4 9 0.02 [0, 0]
9 10 0.005 [0, 10]
10 11 0.015 [5, 10]
11 12 0.005 [5, 0]
12 13 0.03 [5, 0]
13 6 0.025 [0, 0]
"""
class TestGetNodeToState(TestCase):
def test_get_node_to_state_simple_tree_identical_states(self):
T = nx.Graph()
T.add_edge(0, 1, state=42)
T.add_edge(1, 2, state=42)
all_query_nodes = {0, 1, 2}
for query_nodes in powerset(all_query_nodes):
nnodes = len(query_nodes)
node_to_state = get_node_to_state(T, query_nodes)
assert_equal(set(node_to_state), set(query_nodes))
assert_equal(set(node_to_state.values()), set([42]*nnodes))
def test_get_node_to_state_simple_tree_different_states(self):
T = nx.Graph()
T.add_edge(0, 1, state=42)
T.add_edge(1, 2, state=42)
T.add_edge(2, 3, state=99)
# Some of the nodes have defined states.
query_nodes = {0, 1, 3}
node_to_state = get_node_to_state(T, query_nodes)
assert_equal(node_to_state, {0:42, 1:42, 3:99})
# But node 2 does not have a defined state
# because it represents a state transition.
query_nodes = {0, 1, 2, 3}
assert_raises(ValueError, get_node_to_state, T, query_nodes)
def test_complicated_tree(self):
T = nx.Graph()
T.add_edge(0, 1, state=2)
T.add_edge(0, 2, state=2)
T.add_edge(0, 3, state=2)
T.add_edge(3, 4, state=10)
T.add_edge(4, 5, state=10)
T.add_edge(4, 6, state=10)
# Most of the nodes have defined states.
query_nodes = {0, 1, 2, 4, 5, 6}
expected_node_to_state = {0:2, 1:2, 2:2, 4:10, 5:10, 6:10}
node_to_state = get_node_to_state(T, query_nodes)
assert_equal(node_to_state, expected_node_to_state)
# One of the nodes is a transition without a defined state.
query_nodes = {0, 1, 2, 3, 4, 5, 6}
assert_raises(ValueError, get_node_to_state, T, query_nodes)
if __name__ == '__main__':
run_module_suite()
| [
"[email protected]"
] | |
7b5a81f5531be906c6c75c6ea6ee45ae41407e10 | 188950fb7b1fce4840b41e1e9454f0133a8d75ce | /src/Server/Controller/guess_controller.py | a2518f5c1fdefce113aeaec0371319b7b16a82fa | [] | no_license | cloew/WordGuessAngular | 3f5c6a1e0e14f6e905ec78a618b606ff3cb3e798 | 0d889cd3bb9cafe35a6e7e2ccba97914a26825b9 | refs/heads/master | 2021-01-01T05:53:26.776161 | 2014-09-01T14:55:39 | 2014-09-01T14:55:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 400 | py | from Server.game_wrapper import GameWrapper
from kao_flask.controllers.json_controller import JSONController
class GuessController(JSONController):
""" Controller to allow a player to guess the word for the current Round """
def performWithJSON(self, gameId):
game = GameWrapper(id=gameId)
results = game.guess(self.json['guesses'])
return game.toJSON() | [
"[email protected]"
] | |
45bd5115c7a3405823961182633a568318a1d2ef | 7234e6c72eb3f09c4a66dbe91f00fdf7742f010f | /algo/arrays/binarysearch/shiftedBinarySearch.py | fc901758206f1662bac912102f0b1b7740f4186f | [] | no_license | srinathalla/python | 718ac603473e7bed060ba66aa3d39a90cf7ef69d | b6c546070b1738350303df3939888d1b0e90e89b | refs/heads/master | 2021-06-13T06:11:42.653311 | 2021-02-19T06:01:41 | 2021-02-19T06:01:41 | 150,374,828 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 687 | py | #
# T.C : O(logn) S.C : O(1)
# #
def shiftedBinarySearch(array, target):
l = 0
r = len(array)-1
while l < r:
m = (l + r)//2
if array[m] == target:
return m
elif array[m] < array[r]:
if array[m] < target and target <= array[r]:
l = m + 1
else:
r = m - 1
elif array[m] > array[r]:
if array[l] <= target and target < array[m]:
r = m - 1
else:
l = m + 1
return l if array[l] == target else -1
print(shiftedBinarySearch([5, 23, 111, 1], 111))
print(shiftedBinarySearch([45, 61, 71, 72, 73, 0, 1, 21, 33, 45], 33))
| [
"[email protected]"
] | |
cd83a748401283dfbf2bddb5137bb34063e8eb43 | 1825283527f5a479204708feeaf55f4ab6d1290b | /leetcode/python/50/50.powx-n.py | c24eb3b7c7bcc033fb5286680caebed06bbe3c0f | [] | no_license | frankieliu/problems | b82c61d3328ffcc1da2cbc95712563355f5d44b5 | 911c6622448a4be041834bcab25051dd0f9209b2 | refs/heads/master | 2023-01-06T14:41:58.044871 | 2019-11-24T03:47:22 | 2019-11-24T03:47:22 | 115,065,956 | 1 | 0 | null | 2023-01-04T07:25:52 | 2017-12-22T02:06:57 | HTML | UTF-8 | Python | false | false | 802 | py | #
# @lc app=leetcode id=50 lang=python3
#
# [50] Pow(x, n)
#
# https://leetcode.com/problems/powx-n/description/
#
# algorithms
# Medium (27.38%)
# Total Accepted: 281K
# Total Submissions: 1M
# Testcase Example: '2.00000\n10'
#
# Implement pow(x, n), which calculates x raised to the power n (x^n).
#
# Example 1:
#
#
# Input: 2.00000, 10
# Output: 1024.00000
#
#
# Example 2:
#
#
# Input: 2.10000, 3
# Output: 9.26100
#
#
# Example 3:
#
#
# Input: 2.00000, -2
# Output: 0.25000
# Explanation: 2^-2 = 1/2^2 = 1/4 = 0.25
#
#
# Note:
#
#
# -100.0 < x < 100.0
# n is a 32-bit signed integer, within the range [−2^31, 2^31 − 1]
#
#
#
class Solution:
def myPow(self, x, n):
"""
:type x: float
:type n: int
:rtype: float
"""
| [
"[email protected]"
] | |
2c72fc48e73c2fcf5db27a84c63d3341b2696983 | ed7fde0483a4836bfc9ef3ab887cf1220559bfc7 | /masters_scripts/EC17_get_allele_dist_1.py | 80bb3023acd365ccf7683c6816f51994e190d9c1 | [] | no_license | cizydorczyk/python_scripts | 326b3142a3c6ce850237e8b13e229854699c6359 | b914dcff60727bbfaa2b32e1a634ca9ca354eeeb | refs/heads/master | 2023-05-11T14:29:44.548144 | 2023-05-05T19:39:28 | 2023-05-05T19:39:28 | 116,588,201 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,850 | py | from sys import argv
import numpy as np
import itertools
script, inputallelicdepth, outputfile = argv
print "Working on file: " + inputallelicdepth.split('/')[-1]
with open(inputallelicdepth, 'r') as infile1:
lines = infile1.read().splitlines()
del lines[0]
proportions_breakdown = {1:[], 2:[], 3:[], 4:[]}
proportions = []
for i in lines:
line = i.strip().split('\t')
ad = [float(j) for j in line[-1].split(',')]
adsum = sum(ad)
numbases = len(ad[0:-1])
if adsum != 0.0:
for k in ad[0:-1]:
proportions_breakdown[numbases].append(round((k/adsum),2))
proportions.append(round((k/adsum),2))
elif adsum == 0.0:
# proportions[numbases].append(0.00)
continue
# Count total proportions:
proportions_dict = {}
for i in np.arange(0,1.01, 0.01):
proportions_dict[str(i)] = proportions.count(i)
# Count proportions with 2, 3, and 4 bases separately:
proportions_2_dict = {}
proportions_3_dict = {}
proportions_4_dict = {}
for i in np.arange(0,1.01, 0.01):
proportions_2_dict[str(i)] = proportions_breakdown[2].count(i)
for i in np.arange(0,1.01, 0.01):
proportions_3_dict[str(i)] = proportions_breakdown[3].count(i)
for i in np.arange(0,1.01, 0.01):
proportions_4_dict[str(i)] = proportions_breakdown[4].count(i)
with open(outputfile, 'w') as outfile1:
outfile1.write('proportion\ttotal_count\tcount_2\tcount_3\tcount_4\n')
for keyt, key2, key3, key4 in itertools.izip(sorted(proportions_dict.keys()), sorted(proportions_2_dict.keys()), sorted(proportions_3_dict.keys()), sorted(proportions_4_dict.keys())):
outfile1.write(str(keyt) + '\t' + str(proportions_dict[keyt]) + '\t' + str(proportions_2_dict[key2]) + '\t' + str(proportions_3_dict[key3]) + '\t' + str(proportions_4_dict[key4]) + '\n')
# for key, value in sorted(proportions_dict.iteritems()):
# outfile1.write(str(key) + '\t' + str(value) + '\n') | [
"[email protected]"
] | |
88267b9d5edb8a48d3ceb3ce7f9c307f1a46e175 | 55965f592cb7e915cd68bd371ee1a6ad2a6e0247 | /libmngmtsys.py | 79288d746d1e8cdb428259f150297c49244931cb | [] | no_license | Upasna4/Training | 2b5b57fc3e5229304860f153db93d912a44472bf | 33c6eeb565c422e40ea88d50af787f58b9f0da6d | refs/heads/master | 2020-08-05T03:50:36.280910 | 2019-10-02T16:36:09 | 2019-10-02T16:36:09 | 212,383,151 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,733 | py | memberData = {}
bookData = {}
borrowData = {}
m_id = 101
b_id = 201
print("Library Management System\n"
"1.Add Member\n"
"2.Add Book\n"
"3.Book Borrowing\n"
"4.Book Returning\n"
"5.Member Status\n"
"6.Book Status\n"
"7.Exit")
while True:
choice = int(input("Enter Choice: "))
if choice == 1:
print("Add Member Program")
loop1=True
while(loop1):
name = input("Member Name: ")
memberData.update({m_id: name}) #updates value of key and val
print("Member Added. Member id is: ", m_id)
m_id += 1 #incrementing value of m_id
while (True):
choice = input("Add more member (Y/N): ").lower().strip()
if choice == 'y':
break
elif choice == 'n':
loop1 = False
break
else:
print("invalid choice")
loop1=False
continue
elif choice == 2:
print("Add Book Program")
while True:
name = input("Book Name: ")
qty = int(input("enter quantity"))
bookData.update({b_id: [name, qty]}) #dict ko update krna
print("Book Added. Book id is: ", b_id)
b_id += 1
choice = input("Add more member (Y/N): ").lower().strip()
if choice == 'y':
continue
elif choice == 'n':
break
elif choice == 3:
print("Book Borrowing Program")
while True:
m_id = int(input("Member id: "))
if m_id in memberData: #checks if member id in present in memberData dict
b_name = input("Book Name: ")
for b_id, b_name_qty in bookData.items(): #when we want both key and value
if b_name_qty[0] == b_name: #indexing is done coz we have a list here..at [0] we have name in list
if b_name_qty[1] > 0: #here we compare quantity as it is on 1st index..we see whether it is >0 or not
borrowData.update({m_id: b_id}) #update dict
bookData[b_id][1] -= 1 #decrement quantity of books
break
else:
print("Book out of stock")
else:
print("Book not present")
choice = input("Add more member (Y/N): ").lower().strip()
if choice == 'y':
continue
elif choice == 'n':
break
elif choice == 4:
print("Book Returning Program")
m_id = int(input("Member Id: "))
name = input("Book Name: ")
for b_id, b_name in borrowData.items():
if b_name == name:
bookData[b_id][1] += 1
borrowData.pop(m_id) #person is returning book so book will pop from borrowData dict
borrowData.update({m_id: b_id}) #dict is updated
break
else:
print("Book not present")
choice = input("Add more member (Y/N): ").lower().strip()
if choice == 'y':
continue
elif choice == 'n':
break
elif choice == 5:
print("Member Status Program")
m_id = int(input("Member Id: "))
if m_id in memberData: #to check mem status we check m_id is in memberData and borrowData or not
if m_id in borrowData: #if b_id is in borrowData then borrowData m se b_id nikalo
b_id = borrowData[m_id] #bid nikal ra h dict m se
print("Member Name: ", memberData[m_id]) #the value of this key is name
print("Allow Book Name: ", bookData[b_id][0]) #the val of this is bookname
elif choice == 6:
print("Book Status Program")
b_id = int(input("Book Id: "))
for m_id, m_name in memberData.items(): #valuefetch
if b_id in borrowData:
b_id = borrowData[m_id]
print("Member name:",memberData[m_id])
print("Book name:",bookData[b_id][0])
print("Book issue to user:", memberData[m_id])
elif choice == 7:
break
else:
print("invalid choice") | [
"[email protected]"
] | |
03e79839472824d49009eb882c9be785ea788325 | 1c6283303ceb883add8de4ee07c5ffcfc2e93fab | /Jinja2/lib/python3.7/site-packages/uhd_restpy/testplatform/sessions/ixnetwork/globals/globals.py | d957b41f406b4b6a75b7525b1800f265fe66875b | [] | no_license | pdobrinskiy/devcore | 0f5b3dfc2f3bf1e44abd716f008a01c443e14f18 | 580c7df6f5db8c118990cf01bc2b986285b9718b | refs/heads/main | 2023-07-29T20:28:49.035475 | 2021-09-14T10:02:16 | 2021-09-14T10:02:16 | 405,919,390 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,362 | py | # MIT LICENSE
#
# Copyright 1997 - 2020 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from uhd_restpy.base import Base
from uhd_restpy.files import Files
from typing import List, Any, Union
class Globals(Base):
"""This object holds the configurable global values of IxNetwork for interfaces and the protocol stack.
The Globals class encapsulates a required globals resource which will be retrieved from the server every time the property is accessed.
"""
__slots__ = ()
_SDM_NAME = 'globals'
_SDM_ATT_MAP = {
'ApplicationName': 'applicationName',
'BuildNumber': 'buildNumber',
'ConfigFileName': 'configFileName',
'ConfigSummary': 'configSummary',
'IsConfigDifferent': 'isConfigDifferent',
'PersistencePath': 'persistencePath',
'ProductVersion': 'productVersion',
'RpfPort': 'rpfPort',
'Username': 'username',
}
_SDM_ENUM_MAP = {
}
def __init__(self, parent, list_op=False):
super(Globals, self).__init__(parent, list_op)
@property
def AppErrors(self):
"""
Returns
-------
- obj(uhd_restpy.testplatform.sessions.ixnetwork.globals.apperrors.apperrors.AppErrors): An instance of the AppErrors class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from uhd_restpy.testplatform.sessions.ixnetwork.globals.apperrors.apperrors import AppErrors
if self._properties.get('AppErrors', None) is not None:
return self._properties.get('AppErrors')
else:
return AppErrors(self)
@property
def Diagnostics(self):
"""
Returns
-------
- obj(uhd_restpy.testplatform.sessions.ixnetwork.globals.diagnostics.diagnostics.Diagnostics): An instance of the Diagnostics class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from uhd_restpy.testplatform.sessions.ixnetwork.globals.diagnostics.diagnostics import Diagnostics
if self._properties.get('Diagnostics', None) is not None:
return self._properties.get('Diagnostics')
else:
return Diagnostics(self)._select()
@property
def Interfaces(self):
"""
Returns
-------
- obj(uhd_restpy.testplatform.sessions.ixnetwork.globals.interfaces.interfaces.Interfaces): An instance of the Interfaces class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from uhd_restpy.testplatform.sessions.ixnetwork.globals.interfaces.interfaces import Interfaces
if self._properties.get('Interfaces', None) is not None:
return self._properties.get('Interfaces')
else:
return Interfaces(self)._select()
@property
def Licensing(self):
"""
Returns
-------
- obj(uhd_restpy.testplatform.sessions.ixnetwork.globals.licensing.licensing.Licensing): An instance of the Licensing class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from uhd_restpy.testplatform.sessions.ixnetwork.globals.licensing.licensing import Licensing
if self._properties.get('Licensing', None) is not None:
return self._properties.get('Licensing')
else:
return Licensing(self)._select()
@property
def PortTestOptions(self):
"""
Returns
-------
- obj(uhd_restpy.testplatform.sessions.ixnetwork.globals.porttestoptions.porttestoptions.PortTestOptions): An instance of the PortTestOptions class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from uhd_restpy.testplatform.sessions.ixnetwork.globals.porttestoptions.porttestoptions import PortTestOptions
if self._properties.get('PortTestOptions', None) is not None:
return self._properties.get('PortTestOptions')
else:
return PortTestOptions(self)._select()
@property
def Preferences(self):
"""
Returns
-------
- obj(uhd_restpy.testplatform.sessions.ixnetwork.globals.preferences.preferences.Preferences): An instance of the Preferences class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from uhd_restpy.testplatform.sessions.ixnetwork.globals.preferences.preferences import Preferences
if self._properties.get('Preferences', None) is not None:
return self._properties.get('Preferences')
else:
return Preferences(self)._select()
@property
def ProgressDialog(self):
"""
Returns
-------
- obj(uhd_restpy.testplatform.sessions.ixnetwork.globals.progressdialog.progressdialog.ProgressDialog): An instance of the ProgressDialog class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from uhd_restpy.testplatform.sessions.ixnetwork.globals.progressdialog.progressdialog import ProgressDialog
if self._properties.get('ProgressDialog', None) is not None:
return self._properties.get('ProgressDialog')
else:
return ProgressDialog(self)._select()
@property
def ProtocolStack(self):
"""
Returns
-------
- obj(uhd_restpy.testplatform.sessions.ixnetwork.globals.protocolstack.protocolstack.ProtocolStack): An instance of the ProtocolStack class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from uhd_restpy.testplatform.sessions.ixnetwork.globals.protocolstack.protocolstack import ProtocolStack
if self._properties.get('ProtocolStack', None) is not None:
return self._properties.get('ProtocolStack')
else:
return ProtocolStack(self)._select()
@property
def Testworkflow(self):
"""
Returns
-------
- obj(uhd_restpy.testplatform.sessions.ixnetwork.globals.testworkflow.testworkflow.Testworkflow): An instance of the Testworkflow class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from uhd_restpy.testplatform.sessions.ixnetwork.globals.testworkflow.testworkflow import Testworkflow
if self._properties.get('Testworkflow', None) is not None:
return self._properties.get('Testworkflow')
else:
return Testworkflow(self)._select()
@property
def Topology(self):
"""
Returns
-------
- obj(uhd_restpy.testplatform.sessions.ixnetwork.globals.topology.topology_678a8dc80c9b4b2b5c741072eab4305d.Topology): An instance of the Topology class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from uhd_restpy.testplatform.sessions.ixnetwork.globals.topology.topology_678a8dc80c9b4b2b5c741072eab4305d import Topology
if self._properties.get('Topology', None) is not None:
return self._properties.get('Topology')
else:
return Topology(self)._select()
@property
def ApplicationName(self):
# type: () -> str
"""
Returns
-------
- str:
"""
return self._get_attribute(self._SDM_ATT_MAP['ApplicationName'])
@property
def BuildNumber(self):
# type: () -> str
"""
Returns
-------
- str: The IxNetwork software build number.
"""
return self._get_attribute(self._SDM_ATT_MAP['BuildNumber'])
@property
def ConfigFileName(self):
# type: () -> str
"""
Returns
-------
- str: The name of the configuration file.
"""
return self._get_attribute(self._SDM_ATT_MAP['ConfigFileName'])
@property
def ConfigSummary(self):
"""
Returns
-------
- list(dict(arg1:str,arg2:str,arg3:list[dict(arg1:str,arg2:str)])): A high level summary description of the currently loaded configuration
"""
return self._get_attribute(self._SDM_ATT_MAP['ConfigSummary'])
@property
def IsConfigDifferent(self):
# type: () -> bool
"""
Returns
-------
- bool: (Read only) If true, then the current IxNetwork configuration is different than the configuration that was previously loaded.
"""
return self._get_attribute(self._SDM_ATT_MAP['IsConfigDifferent'])
@property
def PersistencePath(self):
# type: () -> str
"""
Returns
-------
- str: This attribute returns a directory of the IxNetwork API server machine, where users can drop their files from the client scripts using IxNetwork APIs. To Put files in this directory, users do not require to run IxNetwork API server in administrative mode
"""
return self._get_attribute(self._SDM_ATT_MAP['PersistencePath'])
@property
def ProductVersion(self):
# type: () -> str
"""
Returns
-------
- str:
"""
return self._get_attribute(self._SDM_ATT_MAP['ProductVersion'])
@property
def RpfPort(self):
# type: () -> int
"""
Returns
-------
- number:
"""
return self._get_attribute(self._SDM_ATT_MAP['RpfPort'])
@property
def Username(self):
# type: () -> str
"""
Returns
-------
- str: The name of the user.
"""
return self._get_attribute(self._SDM_ATT_MAP['Username'])
| [
"[email protected]"
] | |
6cdaa4435e0e15d1f90e91b2cdd9468848c117bf | 9a258d81d612b855e244e4a03594ebe312ff3268 | /webapp/tests/test_urls.py | 8a82dcab33b5fefed07c162dd7d7b024a90d642f | [
"MIT"
] | permissive | erischon/p10_digitalocean | 19fb39f7442e0eec669fbd1ef5b2d49464c37493 | a850dfb97470da57117fa1dfc62c4614a602fe40 | refs/heads/master | 2023-04-27T16:52:04.158502 | 2021-05-15T22:44:34 | 2021-05-15T22:44:34 | 360,518,773 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 427 | py | from django.test import SimpleTestCase
from django.urls import reverse, resolve
from webapp.views import home_page, mentions
class WebappTestUrls(SimpleTestCase):
def test_home_url_is_resolved(self):
url = reverse('home')
self.assertEqual(resolve(url).func, home_page)
def test_mentions_url_is_resolved(self):
url = reverse('mentions')
self.assertEqual(resolve(url).func, mentions)
| [
"[email protected]"
] | |
e4fbb87cc08aaac02be49eda20357561270994c1 | 61863d68d64c9319cd49d280b20d2c2a40957363 | /r2lab.inria.fr/users/views.py | b878f3e298472f439face96a6b9b5c151f7c9383 | [] | no_license | sfehlandt/r2lab | fd781637d258a7fc40043f4f8cddef9ec672b563 | b4f8ddd84327c426b20fe8f772a4e5e47e0cce31 | refs/heads/master | 2021-01-22T18:46:29.143500 | 2017-03-08T17:38:29 | 2017-03-08T17:38:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 38 | py | from .plcapi_users import UsersProxy
| [
"[email protected]"
] | |
793fabc710ab61e60bc4ad701ef6d70a64ebffcc | 5d0f91e3a4c75375a2ba9b12cf3cbd4350c2ccdf | /geopdf/__init__.py | 11df3297614cf7a212aab066ac7d3ed89a52d353 | [
"MIT"
] | permissive | garnertb/geopdf | 8fac6419e62db9d880d48bb4b202cfbf11729629 | 175073cb44a308513bdb6db32092dd806029afc0 | refs/heads/master | 2021-01-10T18:50:22.802931 | 2015-06-09T13:53:43 | 2015-06-09T13:53:43 | 29,563,939 | 1 | 4 | null | null | null | null | UTF-8 | Python | false | false | 6,121 | py | # -*- coding: utf-8 -*-
"""Adds GeoPDF functionality to ReportLab"""
from reportlab.lib.colors import black
from reportlab.pdfbase.pdfdoc import PDFArray, PDFDictionary, PDFName, PDFString
from reportlab.pdfbase import pdfdoc
from reportlab.pdfgen import canvas
class GeoPDFBase(object, PDFDictionary):
"""
Base class for GeoPDF dicts.
"""
def __init__(self, dict=None):
"""dict should be namestring to value eg "a": 122 NOT pdfname to value NOT "/a":122"""
if dict is None:
self.dict = {}
else:
self.dict = dict.copy()
self.set_defaults()
def set_defaults(self):
"""
A hook for creating default values.
"""
return
def is_valid(self):
"""
Test the validity of the dict.
"""
return True
class Projection(GeoPDFBase):
"""
A Projection dict.
"""
def set_defaults(self):
self.dict.setdefault('ProjectionType', PDFString('GEOGRAPHIC'))
self.dict.setdefault('Type', PDFName('Projection'))
class LGIDict(GeoPDFBase):
"""
The LGI dict.
"""
def set_defaults(self):
self.dict.setdefault('Type', PDFString('LGIDict'))
self.dict.setdefault('Version', PDFString('2.1'))
self.dict.setdefault('Projection', Projection({'Datum': PDFString('WE')}))
def is_valid(self):
if not any(map(lambda key: key in self.dict, 'Registration CTM'.split())):
return False
for key, value in self.dict.items():
if hasattr(value, 'is_valid') and getattr(value, 'is_valid')() is False:
return False
return True
class GeoCanvas(canvas.Canvas, object):
LGIDict = PDFArray([])
def _startPage(self):
# now get ready for the next one
super(GeoCanvas, self)._startPage()
self.LGIDict = PDFArray([])
def showPage(self):
"""Close the current page and possibly start on a new page."""
# ensure a space at the end of the stream - Acrobat does
# not mind, but Ghostscript dislikes 'Qendstream' even if
# the length marker finishes after 'Q'
pageWidth = self._pagesize[0]
pageHeight = self._pagesize[1]
cM = self._cropMarks
code = self._code
if cM:
bw = max(0, getattr(cM, 'borderWidth', 36))
if bw:
markLast = getattr(cM, 'markLast', 1)
ml = min(bw, max(0, getattr(cM, 'markLength', 18)))
mw = getattr(cM, 'markWidth', 0.5)
mc = getattr(cM, 'markColor', black)
mg = 2 * bw - ml
cx0 = len(code)
if ml and mc:
self.saveState()
self.setStrokeColor(mc)
self.setLineWidth(mw)
self.lines([
(bw, 0, bw, ml),
(pageWidth + bw, 0, pageWidth + bw, ml),
(bw, pageHeight + mg, bw, pageHeight + 2 * bw),
(pageWidth + bw, pageHeight + mg, pageWidth + bw, pageHeight + 2 * bw),
(0, bw, ml, bw),
(pageWidth + mg, bw, pageWidth + 2 * bw, bw),
(0, pageHeight + bw, ml, pageHeight + bw),
(pageWidth + mg, pageHeight + bw, pageWidth + 2 * bw, pageHeight + bw)
])
self.restoreState()
if markLast:
# if the marks are to be drawn after the content
# save the code we just drew for later use
L = code[cx0:]
del code[cx0:]
cx0 = len(code)
bleedW = max(0, getattr(cM, 'bleedWidth', 0))
self.saveState()
self.translate(bw - bleedW, bw - bleedW)
if bleedW:
# scale everything
self.scale(1 + (2.0 * bleedW) / pageWidth, 1 + (2.0 * bleedW) / pageHeight)
# move our translation/expansion code to the beginning
C = code[cx0:]
del code[cx0:]
code[0:0] = C
self.restoreState()
if markLast:
code.extend(L)
pageWidth = 2 * bw + pageWidth
pageHeight = 2 * bw + pageHeight
code.append(' ')
page = pdfdoc.PDFPage()
page.__NoDefault__ = """Parent
MediaBox Resources Contents CropBox Rotate Thumb Annots B Dur Hid Trans AA
PieceInfo LastModified SeparationInfo ArtBox TrimBox BleedBox ID PZ
Trans LGIDict""".split()
page.pagewidth = pageWidth
page.pageheight = pageHeight
if getattr(self, 'LGIDict', None):
if len(self.LGIDict.sequence) == 1:
page.LGIDict = self.LGIDict.sequence[0]
else:
page.LGIDict = self.LGIDict
page.Rotate = self._pageRotation
page.hasImages = self._currentPageHasImages
page.setPageTransition(self._pageTransition)
page.setCompression(self._pageCompression)
if self._pageDuration is not None:
page.Dur = self._pageDuration
strm = self._psCommandsBeforePage + [self._preamble] + code + self._psCommandsAfterPage
page.setStream(strm)
self._setColorSpace(page)
self._setExtGState(page)
self._setXObjects(page)
self._setShadingUsed(page)
self._setAnnotations(page)
self._doc.addPage(page)
if self._onPage:
self._onPage(self._pageNumber)
self._startPage()
def addGeo(self, **kwargs):
"""
Adds the LGIDict to the document.
:param kwargs: Keyword arguments that are used to update the LGI Dictionary.
"""
lgi = LGIDict()
lgi.dict.update(kwargs)
if not lgi.is_valid():
return
pdf_obj = lgi.format(self._doc)
self.LGIDict.sequence.append(pdf_obj)
return pdf_obj
| [
"[email protected]"
] | |
aa49a4d64508c9fa62c1e3f29026d15008e407f4 | 23611933f0faba84fc82a1bc0a85d97cf45aba99 | /google-cloud-sdk/lib/surface/app/versions/delete.py | fe4a27d6de672df18ddf9b85bc4ecc86e88036db | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | KaranToor/MA450 | 1f112d1caccebdc04702a77d5a6cee867c15f75c | c98b58aeb0994e011df960163541e9379ae7ea06 | refs/heads/master | 2021-06-21T06:17:42.585908 | 2020-12-24T00:36:28 | 2020-12-24T00:36:28 | 79,285,433 | 1 | 1 | Apache-2.0 | 2020-12-24T00:38:09 | 2017-01-18T00:05:44 | Python | UTF-8 | Python | false | false | 4,568 | py | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The Delete command."""
import copy
from googlecloudsdk.api_lib.app import appengine_api_client
from googlecloudsdk.api_lib.app import service_util
from googlecloudsdk.api_lib.app import version_util
from googlecloudsdk.calliope import base
from googlecloudsdk.core import exceptions
from googlecloudsdk.core import log
from googlecloudsdk.core.console import console_io
from googlecloudsdk.core.resource import resource_printer
from googlecloudsdk.core.util import text
class VersionsDeleteError(exceptions.Error):
"""Errors occurring when deleting versions."""
pass
class Delete(base.DeleteCommand):
"""Delete a specified version.
You cannot delete a version of a service that is currently receiving traffic.
"""
detailed_help = {
'DESCRIPTION': '{description}',
'EXAMPLES': """\
To delete a specific version of a specific service, run:
$ {command} --service myService v1
To delete a named version across all services, run:
$ {command} v1
To delete multiple versions of a specific service, run:
$ {command} --service myService v1 v2
To delete multiple named versions across all services, run:
$ {command} v1 v2
""",
}
@staticmethod
def Args(parser):
parser.add_argument('versions', nargs='+', help=(
'The versions to delete (optionally filtered by the --service flag).'))
parser.add_argument('--service', '-s',
help=('If specified, only delete versions from the '
'given service.'))
def Run(self, args):
client = appengine_api_client.GetApiClient()
services = client.ListServices()
all_versions = client.ListVersions(services)
# Sort versions to make behavior deterministic enough for unit testing.
versions = sorted(version_util.GetMatchingVersions(all_versions,
args.versions,
args.service))
services_to_delete = []
for service in sorted(services):
if (len([v for v in all_versions if v.service == service.id]) ==
len([v for v in versions if v.service == service.id])):
services_to_delete.append(service)
for version in copy.copy(versions):
if version.service == service.id:
versions.remove(version)
for version in versions:
if version.traffic_split:
# TODO(user): mention `migrate` once it's implemented.
# TODO(b/32869800): collect info on all versions before raising.
raise VersionsDeleteError(
'Version [{version}] is currently serving {allocation:.2f}% of '
'traffic for service [{service}].\n\n'
'Please move all traffic away by deploying a new version with the'
'`--promote` argument or running `gcloud app services '
'set-traffic`.'.format(
version=version.id,
allocation=version.traffic_split * 100,
service=version.service))
if services_to_delete:
word = text.Pluralize(len(services_to_delete), 'service')
log.warn('Requested deletion of all existing versions for the following '
'{0}:'.format(word))
resource_printer.Print(services_to_delete, 'list', out=log.status)
console_io.PromptContinue(prompt_string=(
'\nYou cannot delete all versions of a service. Would you like to '
'delete the entire {0} instead?').format(word), cancel_on_no=True)
service_util.DeleteServices(client, services_to_delete)
if versions:
fmt = 'list[title="Deleting the following versions:"]'
resource_printer.Print(versions, fmt, out=log.status)
console_io.PromptContinue(cancel_on_no=True)
else:
if not services_to_delete:
log.warn('No matching versions found.')
version_util.DeleteVersions(client, versions)
| [
"[email protected]"
] | |
e1ea4c169eac6a692d0243c2fe8e607a7bc281e2 | 2c74bb301f1ed83b79254944183ac5a18a639fdf | /tests/components/github/test_diagnostics.py | 80dfaec24459735e6cd3e4ebee2a1a78979dbbc2 | [
"Apache-2.0"
] | permissive | Adminiuga/home-assistant | 5bec93007ddac1a268cc359bf7e48530c5f73b38 | dcf68d768e4f628d038f1fdd6e40bad713fbc222 | refs/heads/dev | 2023-02-22T22:03:31.013931 | 2022-11-09T00:27:20 | 2022-11-09T00:27:20 | 123,929,062 | 5 | 4 | Apache-2.0 | 2023-02-22T06:14:31 | 2018-03-05T14:11:09 | Python | UTF-8 | Python | false | false | 2,437 | py | """Test GitHub diagnostics."""
import json
from aiogithubapi import GitHubException
from aiohttp import ClientSession
from homeassistant.components.github.const import CONF_REPOSITORIES, DOMAIN
from homeassistant.core import HomeAssistant
from .common import setup_github_integration
from tests.common import MockConfigEntry, load_fixture
from tests.components.diagnostics import get_diagnostics_for_config_entry
from tests.test_util.aiohttp import AiohttpClientMocker
async def test_entry_diagnostics(
hass: HomeAssistant,
hass_client: ClientSession,
mock_config_entry: MockConfigEntry,
aioclient_mock: AiohttpClientMocker,
) -> None:
"""Test config entry diagnostics."""
mock_config_entry.options = {CONF_REPOSITORIES: ["home-assistant/core"]}
response_json = json.loads(load_fixture("graphql.json", DOMAIN))
response_json["data"]["repository"]["full_name"] = "home-assistant/core"
aioclient_mock.post(
"https://api.github.com/graphql",
json=response_json,
headers=json.loads(load_fixture("base_headers.json", DOMAIN)),
)
aioclient_mock.get(
"https://api.github.com/rate_limit",
json={"resources": {"core": {"remaining": 100, "limit": 100}}},
headers={"Content-Type": "application/json"},
)
await setup_github_integration(hass, mock_config_entry, aioclient_mock)
result = await get_diagnostics_for_config_entry(
hass,
hass_client,
mock_config_entry,
)
assert result["options"]["repositories"] == ["home-assistant/core"]
assert result["rate_limit"] == {
"resources": {"core": {"remaining": 100, "limit": 100}}
}
assert (
result["repositories"]["home-assistant/core"]["full_name"]
== "home-assistant/core"
)
async def test_entry_diagnostics_exception(
hass: HomeAssistant,
hass_client: ClientSession,
init_integration: MockConfigEntry,
aioclient_mock: AiohttpClientMocker,
) -> None:
"""Test config entry diagnostics with exception for ratelimit."""
aioclient_mock.get(
"https://api.github.com/rate_limit",
exc=GitHubException("error"),
)
result = await get_diagnostics_for_config_entry(
hass,
hass_client,
init_integration,
)
assert (
result["rate_limit"]["error"]
== "Unexpected exception for 'https://api.github.com/rate_limit' with - error"
)
| [
"[email protected]"
] | |
30cc1d1fc50d0f446d0341344fbc5cfd52d78242 | 9df89a1652d183d8fc654acd728f9a578d6d1912 | /cli/psym/graphql/query/customers.py | cc9b41c460503454a4b358260df7649396259444 | [
"BSD-3-Clause"
] | permissive | duranrojasm/symphony | b37d54a134e29093edacb80442e204fc71a37fbe | 55b3d0c20b669374303bafb10e9c96c734647c9c | refs/heads/main | 2023-08-24T02:00:33.433220 | 2021-10-28T20:35:23 | 2021-10-28T20:35:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,209 | py | #!/usr/bin/env python3
# @generated AUTOGENERATED file. Do not Change!
from dataclasses import dataclass, field as _field
from ...config import custom_scalars, datetime
from gql_client.runtime.variables import encode_variables
from gql import gql, Client
from gql.transport.exceptions import TransportQueryError
from functools import partial
from numbers import Number
from typing import Any, AsyncGenerator, Dict, List, Generator, Optional
from time import perf_counter
from dataclasses_json import DataClassJsonMixin, config
from ..fragment.customer import CustomerFragment, QUERY as CustomerFragmentQuery
# fmt: off
QUERY: List[str] = CustomerFragmentQuery + ["""
query CustomersQuery {
customers {
edges {
node {
...CustomerFragment
}
}
}
}
"""
]
class CustomersQuery:
@dataclass(frozen=True)
class CustomersQueryData(DataClassJsonMixin):
@dataclass(frozen=True)
class CustomerConnection(DataClassJsonMixin):
@dataclass(frozen=True)
class CustomerEdge(DataClassJsonMixin):
@dataclass(frozen=True)
class Customer(CustomerFragment):
pass
node: Optional[Customer]
edges: List[CustomerEdge]
customers: Optional[CustomerConnection]
# fmt: off
@classmethod
def execute(cls, client: Client) -> Optional[CustomersQueryData.CustomerConnection]:
variables: Dict[str, Any] = {}
new_variables = encode_variables(variables, custom_scalars)
response_text = client.execute(
gql("".join(set(QUERY))), variable_values=new_variables
)
res = cls.CustomersQueryData.from_dict(response_text)
return res.customers
# fmt: off
@classmethod
async def execute_async(cls, client: Client) -> Optional[CustomersQueryData.CustomerConnection]:
variables: Dict[str, Any] = {}
new_variables = encode_variables(variables, custom_scalars)
response_text = await client.execute_async(
gql("".join(set(QUERY))), variable_values=new_variables
)
res = cls.CustomersQueryData.from_dict(response_text)
return res.customers
| [
"[email protected]"
] | |
faf45b629da2c9b6f878c086d6691fdf8be9c9f5 | 54f352a242a8ad6ff5516703e91da61e08d9a9e6 | /Source Codes/CodeJamData/15/31/5.py | 0b76face2b61578a0a63ae7ae2bee12b06fe88cd | [] | no_license | Kawser-nerd/CLCDSA | 5cbd8a4c3f65173e4e8e0d7ed845574c4770c3eb | aee32551795763b54acb26856ab239370cac4e75 | refs/heads/master | 2022-02-09T11:08:56.588303 | 2022-01-26T18:53:40 | 2022-01-26T18:53:40 | 211,783,197 | 23 | 9 | null | null | null | null | UTF-8 | Python | false | false | 745 | py | import os
import sys
from collections import defaultdict
problem_id = 'A'
sys.setrecursionlimit(10**9)
input_path = '%s.in' % problem_id
output_path = '%s.out' % problem_id
def read_line():
line = ''
while len(line) == 0:
line = input_file.readline().strip()
return line
def write_line(line):
print line
return output_file.write(line + os.linesep)
def solve():
r, c, w = map(int, read_line().split(' '))
nc = (c / w) * r + (w - 1)
if c % w:
nc += 1
return '%s' % nc
input_file = open(input_path, "r")
output_file = open(output_path, "w+")
T = int(read_line())
for case_id in xrange(1, T + 1):
write_line("Case #%d: %s" % (case_id, solve()))
input_file.close()
output_file.close() | [
"[email protected]"
] | |
7d0b9e321fad687717ba261f712748cb57d968a3 | 7848ded2f7b1cf5cc33380d739e0ceee5718ffec | /imrunicorn/activity_log/migrations/0006_auto_20210218_0756.py | 73aa939743b218d1fe05de35fdd5684fce3b3c7e | [] | no_license | benspelledabc/djangosite | cbed1a7da3eb6ba6eee05897ec928b350831fc6b | fa8004b20f790f56fc69e9d158128a867be700f3 | refs/heads/master | 2023-04-17T19:24:48.908640 | 2021-05-02T19:05:38 | 2021-05-02T19:05:38 | 294,891,690 | 1 | 1 | null | 2021-05-02T19:05:38 | 2020-09-12T07:16:11 | Python | UTF-8 | Python | false | false | 474 | py | # Generated by Django 3.0.7 on 2021-02-18 12:56
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('activity_log', '0005_activity_sfw'),
]
operations = [
migrations.AlterModelOptions(
name='activityphotovalidation',
options={'ordering': ('-activity_log', 'id'), 'verbose_name': 'Activity Photo Validation', 'verbose_name_plural': 'Activity Photo Validations'},
),
]
| [
"[email protected]"
] | |
8e56a302ab72b021d83ee70f0ad1e776d0ef9fc3 | 1956b7c652d8c2e22a9edc22032a1ee5a64b6b7b | /apps/partner/migrations/016_auto__change_data_type__commission_field.py | 0a86af3822ac6019bf771eb379a3bc08602d411f | [] | no_license | quantmScubism/django_oscar | 939bb5fd0d4caa17747e966a0a847939646808c1 | e283abbe89a0ca0488fc6442de0a0eb5b53f0149 | refs/heads/master | 2020-04-16T02:30:18.269115 | 2017-06-24T14:41:28 | 2017-06-24T14:41:28 | 95,303,096 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,247 | py | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
from oscar.core.compat import AUTH_USER_MODEL, AUTH_USER_MODEL_NAME
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Stockrecord.commission'
db.alter_column('partner_stockrecord', 'commission',
self.gf('django.db.models.fields.DecimalField')(default=0, null=True, max_digits=12, decimal_places=2, blank=True))
def backwards(self, orm):
# Deleting field 'Stockrecord.commission'
db.delete_column('partner_stockrecord', 'commission')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'catalogue.attributeentity': {
'Meta': {'object_name': 'AttributeEntity'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255', 'blank': 'True'}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'entities'", 'to': "orm['catalogue.AttributeEntityType']"})
},
'catalogue.attributeentitytype': {
'Meta': {'object_name': 'AttributeEntityType'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255', 'blank': 'True'})
},
'catalogue.attributeoption': {
'Meta': {'object_name': 'AttributeOption'},
'group': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'options'", 'to': "orm['catalogue.AttributeOptionGroup']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'option': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'catalogue.attributeoptiongroup': {
'Meta': {'object_name': 'AttributeOptionGroup'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'catalogue.category': {
'Meta': {'ordering': "['full_name']", 'object_name': 'Category'},
'depth': ('django.db.models.fields.PositiveIntegerField', [], {}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'full_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'numchild': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'path': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255'})
},
'catalogue.option': {
'Meta': {'object_name': 'Option'},
'code': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '128'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'type': ('django.db.models.fields.CharField', [], {'default': "'Required'", 'max_length': '128'})
},
'catalogue.product': {
'Meta': {'ordering': "['-date_created']", 'object_name': 'Product'},
'attributes': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catalogue.ProductAttribute']", 'through': "orm['catalogue.ProductAttributeValue']", 'symmetrical': 'False'}),
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catalogue.Category']", 'through': "orm['catalogue.ProductCategory']", 'symmetrical': 'False'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_discountable': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'variants'", 'null': 'True', 'to': "orm['catalogue.Product']"}),
'product_class': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.ProductClass']", 'null': 'True'}),
'product_options': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catalogue.Option']", 'symmetrical': 'False', 'blank': 'True'}),
'rating': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'recommended_products': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catalogue.Product']", 'symmetrical': 'False', 'through': "orm['catalogue.ProductRecommendation']", 'blank': 'True'}),
'related_products': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'relations'", 'blank': 'True', 'to': "orm['catalogue.Product']"}),
'score': ('django.db.models.fields.FloatField', [], {'default': '0.0', 'db_index': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255'}),
'status': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'upc': ('django.db.models.fields.CharField', [], {'max_length': '64', 'unique': 'True', 'null': 'True', 'blank': 'True'})
},
'catalogue.productattribute': {
'Meta': {'ordering': "['code']", 'object_name': 'ProductAttribute'},
'code': ('django.db.models.fields.SlugField', [], {'max_length': '128'}),
'entity_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.AttributeEntityType']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'option_group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.AttributeOptionGroup']", 'null': 'True', 'blank': 'True'}),
'product_class': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'attributes'", 'null': 'True', 'to': "orm['catalogue.ProductClass']"}),
'required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'type': ('django.db.models.fields.CharField', [], {'default': "'text'", 'max_length': '20'})
},
'catalogue.productattributevalue': {
'Meta': {'object_name': 'ProductAttributeValue'},
'attribute': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.ProductAttribute']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'attribute_values'", 'to': "orm['catalogue.Product']"}),
'value_boolean': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'value_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'value_entity': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.AttributeEntity']", 'null': 'True', 'blank': 'True'}),
'value_float': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'value_integer': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'value_option': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.AttributeOption']", 'null': 'True', 'blank': 'True'}),
'value_richtext': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'value_text': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
'catalogue.productcategory': {
'Meta': {'ordering': "['-is_canonical']", 'object_name': 'ProductCategory'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.Category']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_canonical': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.Product']"})
},
'catalogue.productclass': {
'Meta': {'ordering': "['name']", 'object_name': 'ProductClass'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'options': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catalogue.Option']", 'symmetrical': 'False', 'blank': 'True'}),
'requires_shipping': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '128'}),
'track_stock': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'catalogue.productrecommendation': {
'Meta': {'object_name': 'ProductRecommendation'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'primary': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'primary_recommendations'", 'to': "orm['catalogue.Product']"}),
'ranking': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'recommendation': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.Product']"})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'partner.partner': {
'Meta': {'object_name': 'Partner'},
'code': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '128'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'partners'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['auth.User']"})
},
'partner.stockalert': {
'Meta': {'ordering': "('-date_created',)", 'object_name': 'StockAlert'},
'date_closed': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'Open'", 'max_length': '128'}),
'stockrecord': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'alerts'", 'to': "orm['partner.StockRecord']"}),
'threshold': ('django.db.models.fields.PositiveIntegerField', [], {})
},
'partner.stockrecord': {
'Meta': {'unique_together': "(('partner', 'partner_sku'),)", 'object_name': 'StockRecord'},
'cost_price': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'low_stock_threshold': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'num_allocated': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'num_in_stock': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'partner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['partner.Partner']"}),
'partner_sku': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'price_currency': ('django.db.models.fields.CharField', [], {'default': "'GBP'", 'max_length': '12'}),
'price_excl_tax': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'price_retail': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'stockrecords'", 'to': "orm['catalogue.Product']"}),
'selected_partner': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True', 'default': '0', 'max_length': '128'}),
}
}
complete_apps = ['partner'] | [
"[email protected]"
] | |
88163ffa4c39f9c08b7cefc81c2eb7c2b7c7bed4 | f146cef3f2172275c8d7f526dab92951fa50eb2c | /COURSE/group project -week9/backup -backend day3/backend/app/users/views.py | d0da6f0f9c4c96335aafbf7f3314c9c3e1305e26 | [] | no_license | mehranj73/Bootcamp | fed04d3858d6d0bc1cdad94e1f05bd4f7a47c0ec | bd575cd02329ad1ce21b05350380dfbf17cbdd89 | refs/heads/master | 2023-02-09T06:50:00.590751 | 2019-08-22T18:56:02 | 2019-08-22T18:56:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,413 | py |
from django.contrib.auth.models import User
from rest_framework import filters
from rest_framework.generics import RetrieveAPIView, ListCreateAPIView, ListAPIView, UpdateAPIView
from rest_framework.response import Response
from rest_framework.permissions import IsAuthenticated
from app.permissions import IsOwnerOrReadOnly
from .serializers import MyProfileSerializer, UserSerializer, UserProfileSerializer, MyUserSerializer
from .models import UserProfile
#GET my profile
# URL 'me/'
class GetMyProfile(RetrieveAPIView):
# allow this action only to the user who owns the profile or to admin
#permission_classes = (IsAuthenticated, IsOwnerOrReadOnly)
permission_classes = (IsAuthenticated, IsOwnerOrReadOnly,)
queryset = UserProfile.objects.all()
serializer_class = MyProfileSerializer
def get(self, request, *args, **kwargs):
user = self.request.user
me = user.user_profile
serializer = self.get_serializer(me)
return Response(serializer.data)
#GET: to get all users
# URL 'list/'
class GenericGetUsersView(ListCreateAPIView):
# queryset = User.objects.all()
serializer_class = UserSerializer
def get_queryset(self):
return User.objects.all()
#GET userprofile by user ID
# URL <int:pk>
class GetUserProfileById(RetrieveAPIView):
queryset = UserProfile.objects.all()
serializer_class = UserProfileSerializer
lookup_url_kwarg = 'pk'
#POST: update user profile - userprofile model part (in front end to be united in same page with "update user profile-user model part)
#URL 'me/update/user-profile/'
class UpdateUserProfileView(UpdateAPIView):
serializer_class = MyProfileSerializer
queryset = UserProfile.objects.all()
permission_classes = [
IsAuthenticated,
IsOwnerOrReadOnly,
]
def update(self, request, *args, **kwargs):
user = self.request.user
serializer = MyProfileSerializer(instance=user.user_profile, data=request.data, partial=True)
if serializer.is_valid():
serializer.save()
return Response( "User profile updated.", status=200)
else:
return Response( "Unable to perform request. Please try again later.", status=400)
#POST: update user profile - user model part (in front end to be united in same page with "update user profile-userprofile model part)
#URL 'me/update/user-profile/'
class UpdateUserProfileViewMyUser(UpdateAPIView):
serializer_class = MyProfileSerializer
queryset = User.objects.all()
permission_classes = [
IsAuthenticated,
IsOwnerOrReadOnly,
]
def update(self, request, *args, **kwargs):
user = self.request.user
serializer = MyUserSerializer(instance=user, data=request.data, partial=True)
if serializer.is_valid():
serializer.save()
return Response( "User profile updated.", status=200)
else:
return Response( "Unable to perform request. Please try again later.", status=400)
#GET: to search by username or first name or last name
class SearchUsers(ListAPIView):
"""
GET: Search users
in Postman add in Params key: search, value: string
"""
serializer_class = UserSerializer
queryset = User.objects.all()
filter_backends = (filters.SearchFilter,)
search_fields = ('username', 'first_name', 'last_name')
| [
"[email protected]"
] | |
92df14d9f3a7a7b18fe39ebd9d18ab9b452e8f22 | 44032f82bcb767175cf86aeccee623eb6cfbd40e | /server/dvaapp/task_shared.py | 8733e35fc2bb8ee34d3eb83854a172bd24e95358 | [
"BSD-3-Clause",
"MIT",
"Apache-2.0"
] | permissive | veyselkoparal/DeepVideoAnalytics | 3628d41f8e06547e177a7badd20b399bd7f9028a | 013f7e1efcc11f9ed5762192a91589aa6b4df359 | refs/heads/master | 2020-03-16T04:22:46.603989 | 2018-05-07T06:55:47 | 2018-05-07T06:55:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,253 | py | import os, json, requests, copy, time, subprocess, logging, shutil, zipfile, uuid, calendar, shlex, sys, tempfile, uuid
from models import Video, QueryRegion, QueryRegionIndexVector, DVAPQL, Region, Frame, Segment, IndexEntries, TEvent,\
Worker, TrainedModel
from django.conf import settings
from PIL import Image
from . import serializers
from dva.in_memory import redis_client
from .fs import ensure, upload_file_to_remote, upload_video_to_remote, get_path_to_file, \
download_video_from_remote_to_local, upload_file_to_path
def pid_exists(pid):
try:
os.kill(pid, 0)
except OSError:
return False
else:
return True
def relaunch_failed_task(old, app):
"""
TODO: Relaunch failed tasks, requires a rethink in how we store number of attempts.
Cleanup of objects created by previous task that that failed.
:param old:
:param app:
:return:
"""
if old.errored:
next_task = TEvent.objects.create(video=old.video, operation=old.operation, arguments=old.arguments,
parent=old.parent, parent_process=old.parent_process, queue=old.queue)
app.send_task(next_task.operation, args=[next_task.pk, ], queue=old.queue)
else:
raise ValueError("Task not errored")
def launch_worker(queue_name, worker_name):
p = subprocess.Popen(['./startq.py','{}'.format(queue_name)], close_fds=True)
message = "launched {} with pid {} on {}".format(queue_name, p.pid, worker_name)
return message
def import_path(dv,path,export=False,framelist=False):
if export:
dv.create_directory(create_subdirs=False)
output_filename = "{}/{}/{}.zip".format(settings.MEDIA_ROOT, dv.pk, dv.pk)
else:
dv.create_directory(create_subdirs=True)
extension = path.split('?')[0].split('.')[-1]
if framelist:
output_filename = "{}/{}/framelist.{}".format(settings.MEDIA_ROOT, dv.pk, extension)
else:
output_filename = "{}/{}/video/{}.{}".format(settings.MEDIA_ROOT, dv.pk, dv.pk, extension)
get_path_to_file(path,output_filename)
def count_framelist(dv):
frame_list = dv.get_frame_list()
return len(frame_list['frames'])
def load_dva_export_file(dv):
video_id = dv.pk
if settings.ENABLE_CLOUDFS:
fname = "/{}/{}.zip".format(video_id, video_id)
logging.info("Downloading {}".format(fname))
ensure(fname)
zipf = zipfile.ZipFile("{}/{}/{}.zip".format(settings.MEDIA_ROOT, video_id, video_id), 'r')
zipf.extractall("{}/{}/".format(settings.MEDIA_ROOT, video_id))
zipf.close()
video_root_dir = "{}/{}/".format(settings.MEDIA_ROOT, video_id)
old_key = None
for k in os.listdir(video_root_dir):
unzipped_dir = "{}{}".format(video_root_dir, k)
if os.path.isdir(unzipped_dir):
for subdir in os.listdir(unzipped_dir):
shutil.move("{}/{}".format(unzipped_dir, subdir), "{}".format(video_root_dir))
shutil.rmtree(unzipped_dir)
break
with open("{}/{}/table_data.json".format(settings.MEDIA_ROOT, video_id)) as input_json:
video_json = json.load(input_json)
importer = serializers.VideoImporter(video=dv, json=video_json, root_dir=video_root_dir)
importer.import_video()
source_zip = "{}/{}.zip".format(video_root_dir, video_id)
os.remove(source_zip)
def export_video_to_file(video_obj,export,task_obj):
if settings.ENABLE_CLOUDFS:
download_video_from_remote_to_local(video_obj)
video_id = video_obj.pk
export_uuid = str(uuid.uuid4())
file_name = '{}.dva_export.zip'.format(export_uuid)
try:
os.mkdir("{}/{}".format(settings.MEDIA_ROOT, 'exports'))
except:
pass
shutil.copytree('{}/{}'.format(settings.MEDIA_ROOT, video_id),
"{}/exports/{}".format(settings.MEDIA_ROOT, export_uuid))
a = serializers.VideoExportSerializer(instance=video_obj)
data = copy.deepcopy(a.data)
data['labels'] = serializers.serialize_video_labels(video_obj)
with file("{}/exports/{}/table_data.json".format(settings.MEDIA_ROOT, export_uuid), 'w') as output:
json.dump(data, output)
zipper = subprocess.Popen(['zip', file_name, '-r', '{}'.format(export_uuid)],
cwd='{}/exports/'.format(settings.MEDIA_ROOT))
zipper.wait()
shutil.rmtree("{}/exports/{}".format(settings.MEDIA_ROOT, export_uuid))
local_path = "{}/exports/{}".format(settings.MEDIA_ROOT, file_name)
path = task_obj.arguments.get('path', None)
if path:
if not path.endswith('dva_export.zip'):
if path.endswith('.zip'):
path = path.replace('.zip', '.dva_export.zip')
else:
path = '{}.dva_export.zip'.format(path)
upload_file_to_path(local_path, path)
os.remove(local_path)
export.url = path
else:
if settings.ENABLE_CLOUDFS:
upload_file_to_remote("/exports/{}".format(file_name))
export.url = "{}/exports/{}".format(settings.MEDIA_URL,file_name).replace('//exports','/exports')
def build_queryset(args,video_id=None,query_id=None,target=None,filters=None):
if target is None:
target = args['target']
if filters is None:
kwargs = args.get('filters',{})
else:
kwargs = filters
if video_id:
kwargs['video_id'] = video_id
if target == 'frames':
queryset = Frame.objects.all().filter(**kwargs)
elif target == 'regions':
queryset = Region.objects.all().filter(**kwargs)
elif target == 'query':
kwargs['pk'] = query_id
queryset = DVAPQL.objects.all().filter(**kwargs)
elif target == 'index_entries':
queryset = IndexEntries.objects.all().filter(**kwargs)
elif target == 'query_regions':
queryset = QueryRegion.objects.all().filter(**kwargs)
elif target == 'query_region_index_vectors':
queryset = QueryRegionIndexVector.objects.all().filter(**kwargs)
elif target == 'segments':
queryset = Segment.objects.filter(**kwargs)
else:
raise ValueError("target {} not found".format(target))
return queryset,target
def load_frame_list(dv,event_id,frame_index__gte=0,frame_index__lt=-1):
"""
Add ability load frames & regions specified in a JSON file and then automatically
retrieve them in a distributed manner them through CPU workers.
"""
frame_list = dv.get_frame_list()
temp_path = "{}.jpg".format(uuid.uuid1()).replace('-', '_')
video_id = dv.pk
frame_index_to_regions = {}
frames = []
for i, f in enumerate(frame_list['frames']):
if i == frame_index__lt:
break
elif i >= frame_index__gte:
try:
get_path_to_file(f['path'],temp_path)
im = Image.open(temp_path)
w, h = im.size
im.close()
except:
logging.exception("Failed to get {}".format(f['path']))
pass
else:
df, drs = serializers.import_frame_json(f,i,event_id,video_id,w,h)
frame_index_to_regions[i] = drs
frames.append(df)
shutil.move(temp_path,df.path())
fids = Frame.objects.bulk_create(frames,1000)
regions = []
for f in fids:
region_list = frame_index_to_regions[f.frame_index]
for dr in region_list:
dr.frame_id = f.id
regions.append(dr)
Region.objects.bulk_create(regions,1000)
def download_and_get_query_path(start):
local_path = "{}/queries/{}_{}.png".format(settings.MEDIA_ROOT, start.pk, start.parent_process.uuid)
if not os.path.isfile(local_path):
source_path = "/queries/{}.png".format(start.parent_process.uuid)
image_data = redis_client.get(source_path)
if image_data:
with open(local_path, 'w') as fh:
fh.write(str(image_data))
else:
ensure(source_path,safe=True)
shutil.copy("{}{}".format(settings.MEDIA_ROOT,source_path),local_path)
return local_path
def download_and_get_query_region_path(start,regions):
query_local_path = download_and_get_query_path(start)
imdata = Image.open(query_local_path)
rpaths = []
for r in regions:
region_path = "{}/queries/region_{}_{}.png".format(settings.MEDIA_ROOT, r.pk, start.parent_process.uuid)
img2 = imdata.crop((r.x, r.y, r.x + r.w, r.y + r.h))
img2.save(region_path)
rpaths.append(region_path)
return rpaths
def get_query_dimensions(start):
query_local_path = download_and_get_query_path(start)
imdata = Image.open(query_local_path)
width, height = imdata.size
return width, height
def crop_and_get_region_path(df,images,temp_root):
if not df.materialized:
frame_path = df.frame_path()
if frame_path not in images:
images[frame_path] = Image.open(frame_path)
img2 = images[frame_path].crop((df.x, df.y, df.x + df.w, df.y + df.h))
region_path = df.path(temp_root=temp_root)
img2.save(region_path)
else:
return df.path()
return region_path
def ensure_files(queryset, target):
dirnames = {}
if target == 'frames':
for k in queryset:
ensure(k.path(media_root=''),dirnames)
elif target == 'regions':
for k in queryset:
if k.materialized:
ensure(k.path(media_root=''), dirnames)
else:
ensure(k.frame_path(media_root=''), dirnames)
elif target == 'segments':
for k in queryset:
ensure(k.path(media_root=''),dirnames)
elif target == 'indexes':
for k in queryset:
ensure(k.npy_path(media_root=''), dirnames)
else:
raise NotImplementedError
def import_frame_regions_json(regions_json,video,event_id):
"""
Import regions from a JSON with frames identified by immutable identifiers such as filename/path
:param regions_json:
:param video:
:param event_id:
:return:
"""
video_id = video.pk
filename_to_pk = {}
frame_index_to_pk = {}
if video.dataset:
# For dataset frames are identified by subdir/filename
filename_to_pk = { df.original_path(): (df.pk, df.frame_index)
for df in Frame.objects.filter(video_id=video_id)}
else:
# For videos frames are identified by frame index
frame_index_to_pk = { df.frame_index: (df.pk, df.segment_index) for df in
Frame.objects.filter(video_id=video_id)}
regions = []
not_found = 0
for k in regions_json:
if k['target'] == 'filename':
fname = k['filename']
if not fname.startswith('/'):
fname = '/{}'.format(fname)
if fname in filename_to_pk:
pk,findx = filename_to_pk[fname]
regions.append(serializers.import_region_json(k,frame_index=findx, frame_id=pk, video_id=video_id,
event_id=event_id))
else:
not_found += 1
elif k['target'] == 'index':
findx = k['frame_index']
pk,sindx = frame_index_to_pk[findx]
regions.append(serializers.import_region_json(k, frame_index=findx, frame_id=pk, video_id=video_id,
event_id=event_id))
else:
raise ValueError('invalid target: {}'.format(k['target']))
logging.info("{} filenames not found in the dataset".format(not_found))
Region.objects.bulk_create(regions,1000)
def get_sync_paths(dirname,task_id):
if dirname == 'indexes':
f = [k.npy_path(media_root="") for k in IndexEntries.objects.filter(event_id=task_id) if k.features_file_name]
elif dirname == 'frames':
f = [k.path(media_root="") for k in Frame.objects.filter(event_id=task_id)]
elif dirname == 'segments':
f = []
for k in Segment.objects.filter(event_id=task_id):
f.append(k.path(media_root=""))
elif dirname == 'regions':
e = TEvent.objects.get(pk=task_id)
if e.operation == 'perform_transformation': # TODO: transformation events merely materialize, fix this
fargs = copy.deepcopy(e.arguments['filters'])
fargs['materialized'] = True
fargs['video_id'] = e.video_id
f = [k.path(media_root="") for k in Region.objects.filter(**fargs)]
else:
f = [k.path(media_root="") for k in Region.objects.filter(event_id=task_id) if k.materialized]
else:
raise NotImplementedError,"dirname : {} not configured".format(dirname)
return f
def upload(dirname,event_id,video_id):
if dirname:
fnames = get_sync_paths(dirname, event_id)
logging.info("Syncing {} containing {} files".format(dirname, len(fnames)))
for fp in fnames:
upload_file_to_remote(fp)
if fnames: # if files are uploaded, sleep three seconds to ensure that files are available before launching
time.sleep(3)
else:
upload_video_to_remote(video_id)
| [
"[email protected]"
] | |
f027e2fef6d80f6cee29c3c460427d5ff4690d31 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_073/ch18_2020_03_09_13_23_07_056737.py | 5580c305acffebf7d622cc6890b83f53b3de7ef7 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 203 | py | def verifica_idade(idade):
if idade>=21:
return 'liberado EUA e BRASILl'
if idad>=1 and idade<18:
return 'Não está liberado'
else:
return 'esta liberado BRASIL'
| [
"[email protected]"
] | |
14652fb38016928ddefc74fa43e0a8c3e8ada405 | 63b0fed007d152fe5e96640b844081c07ca20a11 | /ABC/ABC200~ABC299/ABC245/d2.py | 39575b7b194f9b94d10060fb30a0af67e9572081 | [] | no_license | Nikkuniku/AtcoderProgramming | 8ff54541c8e65d0c93ce42f3a98aec061adf2f05 | fbaf7b40084c52e35c803b6b03346f2a06fb5367 | refs/heads/master | 2023-08-21T10:20:43.520468 | 2023-08-12T09:53:07 | 2023-08-12T09:53:07 | 254,373,698 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 538 | py | def polydiv(xs, ys):
xn = len(xs)
yn = len(ys)
zs = xs.copy()
qs = []
for _ in range(xn - yn + 1):
temp = zs[0] // ys[0]
for i in range(yn):
zs[i] -= temp * ys[i]
qs.append(temp)
zs = zs[1:]
if qs == []: qs = [0.]
return qs
n,m=map(int,input().split())
a=list(map(int,input().split()))
c=list(map(int,input().split()))
a=list(reversed(a))
c=list(reversed(c))
ans=[]
p=polydiv(c,a)
for i in range(len(p)):
ans.append(int(p[i]))
ans=list(reversed(ans))
print(*ans) | [
"[email protected]"
] | |
843d8fb2fb90c80110e6a1f94182e4440e561463 | 7a07d957316172fe78b341c6f5215df2ccdb24f6 | /assignment/EasyAI_all_program.py | c6fbae635760a88672fcd1070d47c597c1a75d57 | [] | no_license | chandraprakashh/Python_with_AI | 87ff4655c44eef9d0459cf0f2ceedabde88b0f1f | 6d76eeea94e0cb7402330a2beea1fc4a7ab73e29 | refs/heads/master | 2020-07-18T18:18:06.463302 | 2019-12-11T08:20:12 | 2019-12-11T08:20:12 | 206,291,055 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,008 | py | # -*- coding: utf-8 -*-
"""
Created on Sun Oct 13 22:50:57 2019
@author: user
"""
# Code => 1
from easyAI import TwoPlayersGame, Human_Player, AI_Player, Negamax
class GameOfBones( TwoPlayersGame ):
def __init__(self, players):
self.players = players
self.pile = 20
self.nplayer = 1
def possible_moves(self): return ['1','2','3']
def make_move(self,move): self.pile -= int(move)
def win(self): return self.pile<=0
def is_over(self): return self.win()
def show(self): print ("%d bones left in the pile" % self.pile)
def scoring(self): return 100 if game.win() else 0
ai = Negamax(13)
game = GameOfBones( [ Human_Player(), AI_Player(ai) ] )
history = game.play()
# Code => 2
from easyAI import TwoPlayersGame, AI_Player, Negamax
from easyAI.Player import Human_Player
class GameController(TwoPlayersGame):
def __init__(self, players):
self.players = players
self.nplayer = 1
self.board = [0] * 9
def possible_moves(self):
return [a + 1 for a, b in enumerate(self.board) if b == 0]
def make_move(self, move):
self.board[int(move) - 1] = self.nplayer
def loss_condition(self):
possible_combinations = [[1,2,3], [4,5,6], [7,8,9],
[1,4,7], [2,5,8], [3,6,9], [1,5,9], [3,5,7]]
return any([all([(self.board[i-1] == self.nopponent)
for i in combination]) for combination in possible_combinations])
def is_over(self):
return (self.possible_moves() == []) or self.loss_condition()
def show(self):
print('\n'+'\n'.join([' '.join([['. ', 'O', 'X'][self.board[3*j + i]]
for i in range(3)]) for j in range(3)]))
def scoring(self):
return -100 if self.loss_condition() else 0
if __name__ == "__main__":
algorithm = Negamax(7)
GameController([Human_Player(), AI_Player(algorithm)]).play()
| [
"[email protected]"
] | |
7678c21d2e011e118d23455f36514f5d73e162d6 | 8454441f899c3beb9fcea26cffc2f4c3cf75ff6a | /common/code/snippets/py/flask-get-header.py | a040c637e90ee07be18f7cd6ed97246a58f26c1e | [
"MIT"
] | permissive | nevesnunes/env | 4a837e8fcf4a6a597992103e0a0c3d0db93e1c78 | f2cd7d884d46275a2fcb206eeeac5a8e176b12af | refs/heads/master | 2023-08-22T15:49:35.897161 | 2023-08-15T13:51:08 | 2023-08-15T13:51:08 | 199,400,869 | 9 | 6 | MIT | 2023-06-22T10:59:51 | 2019-07-29T07:24:47 | Python | UTF-8 | Python | false | false | 630 | py | #!/usr/bin/env python3
from flask import Flask, request
app = Flask(__name__)
@app.route("/")
def hello():
return "Hello!"
@app.route("/<path:text>", methods=["GET", "POST"])
def echo(text):
return f"You said (len = {len(text)}): {bytes(text, 'latin-1')}"
@app.after_request
def after(response):
red_foo = b"\x1b\x5b\x33\x31\x6d\x66\x6f\x6f\x1b\x28\x42\x1b\x5b\x6d"
response.headers["X-Foo"] = red_foo
response.headers["X-Bar"] = "".join(
[chr(x) if x not in (ord("\r"), ord("\n")) else "" for x in range(0, 255)]
)
return response
if __name__ == "__main__":
app.run(port=18123)
| [
"[email protected]"
] | |
2ddaa2d8860b7299c64a636af17c11fbc5ebfa46 | c04acaa6ee9c6a7c365e217bc78039fa9c77833e | /cuzquena/urls.py | 785b7ed1280475deaaa389f28b11b64b4deafb40 | [] | no_license | danielhuamani/django-la-cuzquena | 0386800d640b224d94b0fac2d83f999b60d7da85 | a6f4aaf44775b27328d073a65f1d0f50eff51fad | refs/heads/master | 2020-12-05T04:51:01.077860 | 2016-09-17T13:56:58 | 2016-09-17T13:56:58 | 67,900,351 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,190 | py | """cconline URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf import settings
from django.conf.urls import url, include
from django.conf.urls.static import static
from django.contrib import admin
from filebrowser.sites import site
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^summernote/', include('django_summernote.urls')),
url(r'^admin/filebrowser/', include(site.urls)),
url(r'', include('my_apps.web.urls', namespace='web')),
] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT) + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| [
"[email protected]"
] | |
8349477f2dc38370be2a6048b4ca40ce366e75e2 | f3a4b4c7c39d2ed2959b410367e8abc66493772e | /laplacianFlux/r2_1_0/__init__.py | c64bf8efa3593dcacfa71e4abd9edc4f9e87754b | [] | no_license | asimurzin/laplacianFlux | 6800bc5aba29968f7784ce91a5a1503318fad246 | 83977d5ce967b87ed0203a143d19d88c9a5d7ed7 | refs/heads/master | 2020-03-29T20:22:44.143734 | 2012-07-01T19:36:36 | 2012-07-01T19:36:36 | 1,613,806 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,376 | py | #!/usr/bin/env python
#--------------------------------------------------------------------------------------
## pythonFlu - Python wrapping for OpenFOAM C++ API
## Copyright (C) 2010- Alexey Petrov
## Copyright (C) 2009-2010 Pebble Bed Modular Reactor (Pty) Limited (PBMR)
##
## This program is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program. If not, see <http://www.gnu.org/licenses/>.
##
## See http://sourceforge.net/projects/pythonflu
##
## Author : Alexey PETROV
##
#----------------------------------------------------------------------------
from Foam import ref, man
#----------------------------------------------------------------------------
def _createFields( runTime, mesh ):
ref.ext_Info() << "Reading field T\n" << ref.nl
T = man.volScalarField( man.IOobject( ref.word( "T" ),
ref.fileName( runTime.timeName() ),
mesh,
ref.IOobject.MUST_READ,
ref.IOobject.AUTO_WRITE ),
mesh )
ref.ext_Info() << "Reading transportProperties\n" << ref.nl
transportProperties = man.IOdictionary( man.IOobject( ref.word( "transportProperties" ),
ref.fileName( runTime.constant() ),
mesh,
ref.IOobject.MUST_READ,
ref.IOobject.NO_WRITE ) )
ref.ext_Info() << "Reading diffusivity DT\n" << ref.nl
DT = ref.dimensionedScalar( transportProperties.lookup( ref.word( "DT" ) ) )
return T, transportProperties, DT
#--------------------------------------------------------------------------------------
def write( runTime, mesh, T ):
if runTime.outputTime():
gradT = ref.fvc.grad(T)
gradTx = ref.volScalarField( ref.IOobject( ref.word( "gradTx" ),
ref.fileName( runTime.timeName() ),
mesh,
ref.IOobject.NO_READ,
ref.IOobject.AUTO_WRITE ),
gradT.component( ref.vector.X ) )
gradTy = ref.volScalarField( ref.IOobject( ref.word( "gradTy" ),
ref.fileName( runTime.timeName() ),
mesh,
ref.IOobject.NO_READ,
ref.IOobject.AUTO_WRITE ),
gradT.component( ref.vector.Y ) )
gradTz = ref.volScalarField( ref.IOobject( ref.word( "gradTz" ),
ref.fileName( runTime.timeName() ),
mesh,
ref.IOobject.NO_READ,
ref.IOobject.AUTO_WRITE ),
gradT.component( ref.vector.Z ) )
runTime.write()
pass
#--------------------------------------------------------------------------------------
def main_standalone( argc, argv ):
args = ref.setRootCase( argc, argv )
runTime = man.createTime( args )
mesh = man.createMesh( runTime )
T, transportProperties, DT = _createFields( runTime, mesh )
simple = man.simpleControl( mesh )
ref.ext_Info() << "\nCalculating temperature distribution\n" << ref.nl
while runTime.loop() :
ref.ext_Info() << "Time = " << runTime.timeName() << ref.nl << ref.nl
while simple.correctNonOrthogonal():
ref.solve( ref.fvm.ddt( T ) - ref.fvm.laplacian( DT, T ) )
pass
write( runTime, mesh, T )
ref.ext_Info() << "ExecutionTime = " << runTime.elapsedCpuTime() << " s" << \
" ClockTime = " << runTime.elapsedClockTime() << " s" << ref.nl << ref.nl
pass
ref.ext_Info() << "End\n" << ref.nl
import os
return os.EX_OK
#--------------------------------------------------------------------------------------
import sys, os
from Foam import FOAM_VERSION
if FOAM_VERSION( ">=", "020100" ):
if __name__ == "__main__" :
argv = sys.argv
os._exit( main_standalone( len( argv ), argv ) )
pass
else:
from Foam.OpenFOAM import ext_Info
ref.ext_Info()<< "\nTo use this solver, It is necessary to SWIG OpenFoam2.1.0 or higher \n "
pass
#--------------------------------------------------------------------------------------
| [
"[email protected]"
] | |
178ebfab22130821e12bb8c9157a0436f54acf48 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/109/usersdata/172/63370/submittedfiles/av2_p3_civil.py | 4c68528d682769ee8dc9310c3e74e069e24ca4aa | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 498 | py | # -*- coding: utf-8 -*-
import numpy as np
def somal(l,i):
soma=0
for j in range(0,l.shape[1],1):
soma=soma+l[i,j]
return (soma)
def somac(l,j):
soma=0
for i in range(0,l.shape[0],1):
soma=soma+l[i,j]
return (soma)
n=int(input('Tamanho: '))
g=int(input('Pl: '))
h=int(input('Pc: '))
l=np.zeros((n,n))
for i in range(0,l.shape[0],1):
for j in range(0,l.shape[1],1):
l[i,j]= int(input(' peso: '))
fim=somal(l,g)+somac(l,h)-(2*(l[g,h]))
print(fim)
| [
"[email protected]"
] | |
5d745f9fd64c2b44a2dd7a0b7c45e43d247a4cc2 | 1c0509a06cec726735048f00f63d2529f5e43ce6 | /code_supermarkets_france/analysis/analysis_qlmc_prices_2007_2012/stats_des/price_frequencies_by_chain.py | b951142551efcfccf3721c6c7e0bf28f2e1fe55d | [] | no_license | etiennecha/master_code | e99c62e93aa052a66d4cdd3f3e3aa25a3aec4880 | 48821f6c854a1c6aa05cf81b653b3b757212b6f8 | refs/heads/master | 2021-01-23T14:35:45.904595 | 2018-03-11T18:57:38 | 2018-03-11T18:57:38 | 16,312,906 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,391 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import add_to_path
from add_to_path import path_data
from functions_generic_qlmc import *
import numpy as np
import pandas as pd
import statsmodels.api as sm
import statsmodels.formula.api as smf
pd.set_option('float_format', '{:,.2f}'.format)
path_built_csv = os.path.join(path_data, 'data_supermarkets', 'data_built',
'data_qlmc_2007_2012', 'data_csv')
# #######################
# LOAD DATA
# #######################
# LOAD DF QLMC
df_qlmc = pd.read_csv(os.path.join(path_built_csv, 'df_qlmc.csv'),
parse_dates = ['date'],
dayfirst = True,
infer_datetime_format = True,
encoding = 'utf-8')
# Fix Store_Chain for prelim stats des
ls_sc_drop = ['CARREFOUR CITY',
'CARREFOUR CONTACT',
'CARREFOUR PLANET',
'GEANT DISCOUNT',
'HYPER CHAMPION',
'INTERMARCHE HYPER',
'LECLERC EXPRESS',
'MARCHE U',
'U EXPRESS']
df_qlmc = df_qlmc[~df_qlmc['store_chain'].isin(ls_sc_drop)]
ls_sc_replace = [('CENTRE E. LECLERC', 'LECLERC'),
('CENTRE LECLERC', 'LECLERC'),
('E. LECLERC', 'LECLERC'),
('E.LECLERC', 'LECLERC'),
('SYSTEME U', 'SUPER U'),
('GEANT', 'GEANT CASINO'),
('CHAMPION', 'CARREFOUR MARKET'),
('INTERMARCHE SUPER', 'INTERMARCHE'),
('HYPER U', 'SUPER U')]
for sc_old, sc_new in ls_sc_replace:
df_qlmc.loc[df_qlmc['store_chain'] == sc_old,
'store_chain'] = sc_new
# #############################################
# PRICE DISTRIBUTION PER CHAIN FOR TOP PRODUCTS
# #############################################
PD = PriceDispersion()
ls_prod_cols = ['section', 'family', 'product']
store_chain = 'CARREFOUR' # 'CENTRE E.LECLERC'
nb_obs_min = 20 # Product must be observed at X stores at least
pct_min = 0.33
ls_loop_scs = ['AUCHAN',
'CARREFOUR',
'CARREFOUR MARKET',
'GEANT CASINO', # no CASINO
'CORA',
'INTERMARCHE',
'LECLERC',
'SUPER U']
ls_dict_df_desc = []
ls_dict_df_chain_product_stats = []
ls_dict_df_chain_store_desc = []
for per in range(13):
df_qlmc_per = df_qlmc[df_qlmc['period'] == per]
dict_ls_se_desc = {'nb_stores_by_prod' : [],
'freq_prods' : [],
'nb_prods_by_store' : [],
'no_ref' : [],
'freq_stores' : []}
dict_df_chain_product_stats = {}
dict_df_chain_store_desc = {}
print()
print(u'-'*80)
print('Stats on chain prices for period:', per)
for store_chain in ls_loop_scs:
print()
print(u'-'*60)
print(store_chain)
# Build df with product most common prices
df_sub = df_qlmc_per[df_qlmc_per['store_chain'] == store_chain]
# Make sure no duplicates at store level
ls_sub_dup_cols = ls_prod_cols + ['id_lsa']
df_sub_dup = df_sub[(df_sub.duplicated(ls_sub_dup_cols, take_last = True)) |\
(df_sub.duplicated(ls_sub_dup_cols, take_last = False))]
df_sub = df_sub.drop_duplicates(ls_sub_dup_cols)
# Build df with product most common prices
df_sub_products = df_sub[ls_prod_cols + ['price']]\
.groupby(ls_prod_cols)\
.agg([len,
'mean',
PD.kurtosis,
PD.skew,
PD.price_1,
PD.price_1_fq,
PD.price_2,
PD.price_2_fq])['price']
df_sub_products.columns = [col.replace('PD.', '') for col in df_sub_products.columns]
df_sub_products.rename(columns = {'len': 'nb_obs'}, inplace = True)
df_sub_products['price_12_fq'] =\
df_sub_products[['price_1_fq', 'price_2_fq']].sum(axis = 1)
# Pbm with kurtosis and skew: div by 0 (only one price)
# fix (a priori highly degenerate hence not normal)
df_sub_products.loc[df_sub_products['kurtosis'].abs() >= 1000,
'kurtosis'] = np.nan
df_sub_products.loc[df_sub_products['skew'].abs() >= 1000,
'skew'] = np.nan
df_sub_products.reset_index(drop = False, inplace = True)
# Keep only products observed at enough stores
df_enough_obs = df_sub_products[(df_sub_products['nb_obs'] >= nb_obs_min)]
df_ref_price = df_sub_products[(df_sub_products['nb_obs'] >= nb_obs_min) &\
(df_sub_products['price_1_fq'] >= pct_min)]
# Save chain product stats
dict_df_chain_product_stats[store_chain] = df_enough_obs
# Define ref prices and get stats from store viewpoint
if len(df_enough_obs) >= 100:
print()
print(u'Overview at product level')
print(df_enough_obs.describe().to_string())
df_enough_obs_desc = df_enough_obs.describe()
dict_ls_se_desc['nb_stores_by_prod'].append(df_enough_obs_desc['nb_obs'])
dict_ls_se_desc['freq_prods'].append(df_enough_obs_desc['price_1_fq'])
print()
print(u'Nb prod w/ >= {:d} obs: {:d}'.format(\
nb_obs_min,
len(df_enough_obs)))
print(u'Nb prod w/ >= {:d} obs and ref price (33%+): {:d} ({:.0f}%)'.format(\
nb_obs_min,
len(df_ref_price),
len(df_ref_price) / float(len(df_enough_obs)) * 100))
df_sub = pd.merge(df_sub,
df_enough_obs,
on = ls_prod_cols,
how = 'left')
# Build df stores accounting for match with ref prices
df_sub['ref_price'] = 'diff'
df_sub.loc[df_sub['price'] == df_sub['price_1'],
'ref_price'] = 'price_1'
df_sub.loc[(df_sub['price'] != df_sub['price_1']) &\
(df_sub['price'] == df_sub['price_2']),
'ref_price'] = 'price_2'
df_sub.loc[(df_sub['price_1_fq'] <= pct_min),
'ref_price'] = 'no_ref'
df_ref = pd.pivot_table(data = df_sub[['store', 'ref_price']],
index = 'store',
columns = 'ref_price',
aggfunc = len,
fill_value = 0).astype(int)
try:
df_ref_pct = df_ref.apply(lambda x: x / x.sum(), axis = 1)
df_ref_pct['nb_obs'] = df_ref.sum(axis = 1).astype(int)
if 'no_ref' not in df_ref_pct.columns:
df_ref_pct['no_ref'] = 0
# keep only stores with enough procucts
df_ref_pct = df_ref_pct[df_ref_pct['nb_obs'] >= 100]
print()
print(u'Overview at store level:')
print(df_ref_pct[['nb_obs',
'no_ref',
'diff',
'price_1',
'price_2']].describe())
df_ref_pct_desc = df_ref_pct.describe()
dict_ls_se_desc['nb_prods_by_store'].append(df_ref_pct_desc['nb_obs'])
dict_ls_se_desc['no_ref'].append(df_ref_pct_desc['no_ref'])
dict_ls_se_desc['freq_stores'].append(df_ref_pct_desc['price_1'])
# also save store stats for each chain
df_ref_pct.sort('price_1', ascending = False, inplace = True)
dict_df_chain_store_desc[store_chain] = df_ref_pct
except:
print()
print(u'Not enough data to display store ref prices')
for col in ['nb_prods_by_store', 'no_ref', 'freq_stores']:
dict_ls_se_desc[col].append(None)
else:
for col in ['nb_stores_by_prod', 'freq_prods',
'nb_prods_by_store', 'no_ref', 'freq_stores']:
dict_ls_se_desc[col].append(None)
dict_df_desc = {k: pd.concat(v, axis = 1, keys = ls_loop_scs)\
for k, v in dict_ls_se_desc.items()}
dict_ens_alt_replace = {'CENTRE E.LECLERC' : 'LECLERC',
'INTERMARCHE SUPER' : 'ITM SUP',
'INTERMARCHE HYPER' : 'ITM HYP',
'CARREFOUR MARKET' : 'CAR. MARKET',
'SIMPLY MARKET' : 'SIMPLY'}
dict_df_desc = {k: v.rename(columns = dict_ens_alt_replace)\
for k,v in dict_df_desc.items()}
ls_dict_df_desc.append(dict_df_desc)
ls_dict_df_chain_product_stats.append(dict_df_chain_product_stats)
ls_dict_df_chain_store_desc.append(dict_df_chain_store_desc)
ls_loop_scs[2] = 'CAR. MARKET' # adhoc fix..
# Freq prods across period for one chain
dict_su_chains = {}
for var in ['freq_prods', 'freq_stores']:
dict_su_chains[var] = {}
for store_chain in ls_loop_scs:
ls_se_temp = []
for per, dict_df_desc_per in enumerate(ls_dict_df_desc):
ls_se_temp.append(dict_df_desc_per[var].get(store_chain))
df_chain_temp = pd.concat(ls_se_temp,
axis = 1,
keys = range(13))
dict_su_chains[var][store_chain] = df_chain_temp
for var in ['freq_prods', 'freq_stores']:
print()
print(var)
for k,v in dict_su_chains[var].items():
print()
print(k)
print(v.to_string())
| [
"[email protected]"
] | |
6c6be5bb613ab1ba748008cf64ecb99a72b2ea86 | 814fd0bea5bc063a4e34ebdd0a5597c9ff67532b | /build/android/pylib/utils/mock_calls_test.py | 1b474afd1ea1707910b1716170ec0f65c1c87e17 | [
"BSD-3-Clause"
] | permissive | rzr/chromium-crosswalk | 1b22208ff556d69c009ad292bc17dca3fe15c493 | d391344809adf7b4f39764ac0e15c378169b805f | refs/heads/master | 2021-01-21T09:11:07.316526 | 2015-02-16T11:52:21 | 2015-02-16T11:52:21 | 38,887,985 | 0 | 0 | NOASSERTION | 2019-08-07T21:59:20 | 2015-07-10T15:35:50 | C++ | UTF-8 | Python | false | false | 5,078 | py | #!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Unit tests for the contents of mock_calls.py.
"""
import logging
import os
import sys
import unittest
from pylib import constants
from pylib.utils import mock_calls
sys.path.append(os.path.join(
constants.DIR_SOURCE_ROOT, 'third_party', 'pymock'))
import mock # pylint: disable=F0401
class _DummyAdb(object):
def __str__(self):
return '0123456789abcdef'
def Push(self, host_path, device_path):
logging.debug('(device %s) pushing %r to %r', self, host_path, device_path)
def IsOnline(self):
logging.debug('(device %s) checking device online', self)
return True
def Shell(self, cmd):
logging.debug('(device %s) running command %r', self, cmd)
return "nice output\n"
def Reboot(self):
logging.debug('(device %s) rebooted!', self)
class TestCaseWithAssertCallsTest(mock_calls.TestCase):
def setUp(self):
self.adb = _DummyAdb()
def ShellError(self):
def action(cmd):
raise ValueError('(device %s) command %r is not nice' % (self.adb, cmd))
return action
def get_answer(self):
logging.debug("called 'get_answer' of %r object", self)
return 42
def echo(self, thing):
logging.debug("called 'echo' of %r object", self)
return thing
def testCallTarget_succeds(self):
self.assertEquals(self.adb.Shell,
self.call_target(self.call.adb.Shell))
def testCallTarget_failsExternal(self):
with self.assertRaises(ValueError):
self.call_target(mock.call.sys.getcwd)
def testCallTarget_failsUnknownAttribute(self):
with self.assertRaises(AttributeError):
self.call_target(self.call.adb.Run)
def testCallTarget_failsIntermediateCalls(self):
with self.assertRaises(AttributeError):
self.call_target(self.call.adb.RunShell('cmd').append)
def testPatchCall_method(self):
self.assertEquals(42, self.get_answer())
with self.patch_call(self.call.get_answer, return_value=123):
self.assertEquals(123, self.get_answer())
self.assertEquals(42, self.get_answer())
def testPatchCall_attribute_method(self):
with self.patch_call(self.call.adb.Shell, return_value='hello'):
self.assertEquals('hello', self.adb.Shell('echo hello'))
def testPatchCall_global(self):
with self.patch_call(mock.call.os.getcwd, return_value='/some/path'):
self.assertEquals('/some/path', os.getcwd())
def testPatchCall_withSideEffect(self):
with self.patch_call(self.call.adb.Shell, side_effect=ValueError):
with self.assertRaises(ValueError):
self.adb.Shell('echo hello')
def testAssertCalls_succeeds_simple(self):
self.assertEquals(42, self.get_answer())
with self.assertCall(self.call.get_answer(), 123):
self.assertEquals(123, self.get_answer())
self.assertEquals(42, self.get_answer())
def testAssertCalls_succeeds_multiple(self):
with self.assertCalls(
(mock.call.os.getcwd(), '/some/path'),
(self.call.echo('hello'), 'hello'),
(self.call.get_answer(), 11),
self.call.adb.Push('this_file', 'that_file'),
(self.call.get_answer(), 12)):
self.assertEquals(os.getcwd(), '/some/path')
self.assertEquals('hello', self.echo('hello'))
self.assertEquals(11, self.get_answer())
self.adb.Push('this_file', 'that_file')
self.assertEquals(12, self.get_answer())
def testAsserCalls_succeeds_withAction(self):
with self.assertCall(
self.call.adb.Shell('echo hello'), self.ShellError()):
with self.assertRaises(ValueError):
self.adb.Shell('echo hello')
def testAssertCalls_fails_tooManyCalls(self):
with self.assertRaises(AssertionError):
with self.assertCalls(self.call.adb.IsOnline()):
self.adb.IsOnline()
self.adb.IsOnline()
def testAssertCalls_fails_tooFewCalls(self):
with self.assertRaises(AssertionError):
with self.assertCalls(self.call.adb.IsOnline()):
pass
def testAssertCalls_succeeds_extraCalls(self):
# we are not watching Reboot, so the assertion succeeds
with self.assertCalls(self.call.adb.IsOnline()):
self.adb.IsOnline()
self.adb.Reboot()
def testAssertCalls_fails_extraCalls(self):
self.watchCalls([self.call.adb.Reboot])
# this time we are also watching Reboot, so the assertion fails
with self.assertRaises(AssertionError):
with self.assertCalls(self.call.adb.IsOnline()):
self.adb.IsOnline()
self.adb.Reboot()
def testAssertCalls_succeeds_NoCalls(self):
self.watchMethodCalls(self.call.adb) # we are watching all adb methods
with self.assertCalls():
pass
def testAssertCalls_fails_NoCalls(self):
self.watchMethodCalls(self.call.adb)
with self.assertRaises(AssertionError):
with self.assertCalls():
self.adb.IsOnline()
if __name__ == '__main__':
logging.getLogger().setLevel(logging.DEBUG)
unittest.main(verbosity=2)
| [
"[email protected]"
] | |
4a5f20033b2ce926b8c120facc7b1de246135d9c | c47e274f6af4d08bff65e360fb8a11b163dc34b2 | /common/global_constants.py | 7e184ce065f2d0ce801d87ae0ab50fb3d1e9079c | [
"BSD-3-Clause"
] | permissive | nozberkaryaindonesia/ReadableWebProxy | 6b66994c574dc0a70767397403c04f97bf2d07f0 | 82d14d8dfb23ef135a16f88274c14c7acc1162a5 | refs/heads/master | 2022-05-21T20:06:03.707617 | 2017-09-24T09:54:23 | 2017-09-24T09:54:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,460 | py |
GLOBAL_BAD_URLS = [
'//mail.google.com',
'/comments/feed/',
'/embed?',
'/osd.xml',
'/page/page/',
'/wp-json/',
'/wp-login.php',
'/xmlrpc.php',
'?openidserver=1',
'a.wikia-beacon.com',
'accounts.google.com',
'add.my.yahoo.com',
'addtoany.com',
'b.scorecardresearch.com',
'delicious.com',
'digg.com',
'edit.yahoo.com',
'facebook.com',
'fbcdn-',
'feeds.wordpress.com',
'gprofiles.js',
'javascript:void',
'netvibes.com',
'newsgator.com',
'paypal.com',
'pixel.wp.com',
'public-api.wordpress.com',
'r-login.wordpress.com',
'reddit.com',
'stumbleupon.com',
'technorati.com',
'topwebfiction.com',
'twitter.com',
'twitter.com/intent/',
'wretch.cc',
'ws-na.amazon-adsystem.com',
'www.addtoany.com'
'www.pinterest.com/pin/',
'www.wattpad.com/login?',
'www.tumblr.com/reblog/',
'www.paypalobjects.com',
# Tumblr can seriously go fuck itself with a rusty stake
'tumblr.com/widgets/',
'www.tumblr.com/login',
'://tumblr.com',
'&share=tumblr',
'/wp-content/plugins/',
'/wp-content/themes/',
'/wp-json/oembed/',
# At least one site (booksie) is serving the favicon with a mime-type
# of "text/plain", which then confuses the absolute crap out of the
# mime-type dispatcher.
# Since I'm not re-serving favicons anyways, just do not fetch them ever.
'favicon.ico',
# Try to not scrape inline images
';base64,',
"www.fashionmodeldirectory.com",
"www.watchingprivatepractice.com",
"Ebonyimages.jupiterimages.com",
# More garbage issues.
'"https',
'#comment-',
'/oembed/1.0/',
'&share=',
'replytocom=',
'?feed=rss2&page_id',
'?share=tumblr',
'?share=facebook',
'chasingadreamtranslations.com/?fp=',
# NFI where /this/ came from
'www.miforcampuspolice.com',
'tracking.feedpress.it',
'www.quantcast.com',
'mailto:',
'javascript:popupWindow(',
'en.blog.wordpress.com',
'counter.yadro.ru',
'/js/js/',
'/css/css/',
'/images/images/',
'ref=dp_brlad_entry',
'https:/www.',
'tumblr.com/oembed/1.0?',
]
GLOBAL_DECOMPOSE_BEFORE = [
{'name' : 'likes-master'}, # Bullshit sharing widgets
{'id' : 'jp-post-flair'},
{'class' : 'post-share-buttons'},
#{'class' : 'commentlist'}, # Scrub out the comments so we don't try to fetch links from them
#{'class' : 'comments'},
#{'id' : 'comments'},
]
GLOBAL_DECOMPOSE_AFTER = []
RSS_SKIP_FILTER = [
"www.baka-tsuki.org",
"re-monster.wikia.com",
'inmydaydreams.com',
'www.fanfiction.net',
'www.booksie.com',
'www.booksiesilk.com',
'www.fictionpress.com',
'storiesonline.net',
'www.fictionmania.tv',
'www.bestories.net',
'www.tgstorytime.com',
'www.nifty.org',
'www.literotica.com',
'pokegirls.org',
'www.asstr.org',
'www.mcstories.com',
'www.novelupdates.com',
'40pics.com',
'#comment-',
'?showComment=',
]
RSS_TITLE_FILTER = [
"by: ",
"comments on: ",
"comment on: ",
"comment on ",
]
# Goooooo FUCK YOURSELF
GLOBAL_INLINE_BULLSHIT = [
"This translation is property of Infinite Novel Translations.",
"This translation is property of Infinite NovelTranslations.",
"If you read this anywhere but at Infinite Novel Translations, you are reading a stolen translation.",
"<Blank>",
"<space>",
"<Blank>",
"<Blank>",
"please read only translator’s websitewww.novitranslation.com",
"please read only translator’s website www.novitranslation.com",
"Please do not host elsewhere but MBC and Yumeabyss",
'Original and most updated translations are from volaretranslations.',
'Please support the translator for Wild Consort by reading on volarenovels!',
'Original and most updated translations are from volaretranslations.',
'Original and most updated translations are from volaretranslations.',
"<StarveCleric>",
'(trytranslations.com at your service!)',
'Please do not host elsewhere but volare and Yumeabyss',
'[Follow the latest chapter at wuxiadream.com]',
'I slid my penis inside her. She squirmed a bit but YOU SICK FUCK STOP STEALING MY TRANSLATIONS', # siiiiigh
'I kissed her sweet anus once more before leaving', # siiiiiiiiiiiiigh
'(Watermark: read this translation only at shinku. xiaoxiaonovels.com)',
"<TLN: If you're reading this novel at any other site than Sousetsuka.com you might be reading an unedited, uncorrected version of the novel.>",
'Original and most updated translations are from volare. If read elsewhere, this chapter has been stolen. Please stop supporting theft.',
'*******If you are reading this on a place other than rinkagetranslation.com, this chapter has been stolen and is neither the most recent or complete chapter.*******',
'*******Read the chapters at rinkagetranslation.com. The chapters for this series will NOT be posted anywhere else other than on that site itself. If you are reading this from somewhere else then this is chapter has been stolen.*******',
'If you are reading this on a place other than rinkagetranslation.com, this chapter has been stolen and is neither the most recent or complete chapter.',
"Read The Lazy Swordmaster first on Lightnovelbastion.com (If you're reading this elsewhere, it has been stolen)",
"Read The Lazy Swordmaster on Lightnovelbastion.com",
"Property of © Fantasy-Books.live; outside of it, it is stolen.",
]
| [
"[email protected]"
] | |
a43033cd1083b62dfa20f3914123e00835219987 | c5a004f26bf249f888be3849114dd35dbd24cb24 | /python/evalrescallers/tests/ten_k_validation_data_test.py | c9f2b7b50349d5124180fb1dad48982f96e4202e | [
"MIT"
] | permissive | wangdi2014/tb-amr-benchmarking | f7cf331608cfe7b9cc8995906d991573323dc87a | 276f4f7f30639dacc62b3e8e395b2d2ce8675089 | refs/heads/master | 2022-03-10T00:41:07.364006 | 2019-11-08T09:37:23 | 2019-11-08T09:37:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,515 | py | import os
import unittest
from evalrescallers import ten_k_validation_data
modules_dir = os.path.dirname(os.path.abspath(ten_k_validation_data.__file__))
data_dir = os.path.join(modules_dir, 'tests', 'data', 'ten_k_validation_data')
class TestTenKValidationData(unittest.TestCase):
def test_load_sample_to_res_file(self):
'''test load_sample_to_res_file'''
expected_drugs = {'Isoniazid', 'Rifampicin', 'Ethambutol', 'Pyrazinamide'}
expected_data = {
'ena1': {'Isoniazid': 'n/a', 'Rifampicin': 'S', 'Ethambutol': 'R', 'Pyrazinamide': 'S'},
'ena2': {'Isoniazid': 'S', 'Rifampicin': 'U', 'Ethambutol': 'S', 'Pyrazinamide': 'S'},
}
infile = os.path.join(data_dir, 'load_sample_to_res_file.tsv')
got_drugs, got_data = ten_k_validation_data.load_sample_to_res_file(infile)
self.assertEqual(expected_drugs, got_drugs)
self.assertEqual(expected_data, got_data)
def test_load_sources_file(self):
'''test load_sources_file'''
infile = os.path.join(data_dir, 'load_sources_file.tsv')
expect = {
'ena1': ('source1', 'country1'),
'ena2': ('source1', 'country1'),
'ena3': ('source1', 'country2'),
'ena4': ('source2', 'country1'),
'ena5': ('source2', 'country2'),
}
got = ten_k_validation_data.load_sources_file(infile)
self.assertEqual(expect, got)
def test_sources_file_to_country_counts(self):
'''test sources_file_to_country_counts'''
infile = os.path.join(data_dir, 'sources_file_to_country_counts.tsv')
expect = {
'Country1': {'validate': 3, 'test': 0},
'Country2': {'validate': 1, 'test': 0},
'Germany': {'validate': 0, 'test': 1},
'UK': {'validate': 1, 'test': 2},
}
got = ten_k_validation_data.sources_file_to_country_counts(infile)
self.assertEqual(expect, got)
def test_load_all_data(self):
'''test load_all_data'''
expected_drugs = {'Quinolones', 'Isoniazid', 'Rifampicin', 'Ethambutol', 'Pyrazinamide', 'Amikacin', 'Capreomycin', 'Ciprofloxacin', 'Cycloserine', 'Ethionamide', 'Kanamycin', 'Linezolid', 'Moxifloxacin', 'Ofloxacin', 'PAS', 'Rifabutin', 'Streptomycin'}
got_drugs, got_pheno_validation, got_pheno_test, got_predict = ten_k_validation_data.load_all_data()
self.assertEqual(expected_drugs, got_drugs)
_, expect_pheno = ten_k_validation_data.load_sample_to_res_file(os.path.join(ten_k_validation_data.data_dir, '10k_validation.phenotype.tsv'))
_, expect_predict = ten_k_validation_data.load_sample_to_res_file(os.path.join(ten_k_validation_data.data_dir, '10k_validation.prediction.tsv'))
_, expect_more_pheno = ten_k_validation_data.load_sample_to_res_file(os.path.join(ten_k_validation_data.data_dir, '10k_validation.extra_phenotypes.tsv'))
expect_samples = set(expect_pheno.keys()).union(set(expect_more_pheno.keys()))
got_samples = set(expect_pheno.keys())
self.assertEqual(expect_samples, got_samples)
for pheno_dict in got_pheno_validation, got_pheno_test:
for sample in pheno_dict:
for d in expect_pheno, expect_more_pheno:
if sample in d:
for k, v in d[sample].items():
self.assertEqual(v, pheno_dict[sample][k])
self.assertEqual(expect_predict, got_predict)
| [
"[email protected]"
] | |
0129e5f8bb4ef9510bef37bfe7c32a58b45a1089 | 6ec8e4271968cae715babe05029931d2c11df754 | /run.py | 3381b4ca954744143adb1172231fafc792c96a42 | [
"MIT"
] | permissive | lllhhhqqq/SPIRAL-tensorflow | 040efe8af0fd3bc4d5f5ce2ed5474e6d732763f5 | 05ddfdc20c73a61cde46594bd6b7b7a2e255a44b | refs/heads/master | 2020-03-08T08:57:45.938448 | 2018-04-03T15:32:19 | 2018-04-03T15:32:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,371 | py | # -*- coding: future_fstrings -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import tensorflow as tf
from six.moves import shlex_quote
import utils as ut
def new_cmd(session, name, cmd, load_path, shell):
if isinstance(cmd, (list, tuple)):
cmd = " ".join(shlex_quote(str(v)) for v in cmd)
return name, "tmux send-keys -t {}:{} {} Enter".format(session, name, shlex_quote(cmd))
def create_commands(session, args, shell='bash'):
ut.train.prepare_dirs(args)
actual_args = ut.io.get_cmd(as_list=True)
actual_cmd = ' '.join(actual_args)
# for launching the TF workers and for launching tensorboard
base_cmd = [
'CUDA_VISIBLE_DEVICES=',
sys.executable, 'main.py',
'--load_path', args.load_path,
'--start_port', args.start_port,
'--num_gpu', ut.misc.count_gpu(),
] + actual_args
cmds_map = [new_cmd(session, "ps", base_cmd + ["--job_name", "ps"], args.load_path, shell)]
if args.loss == 'l2':
gpu_task_num = 1
elif args.loss == 'gan':
gpu_task_num = 2
for i in range(args.num_workers):
if i < gpu_task_num: # gpu workers
cmd = base_cmd[1:]
else:
cmd = base_cmd[:]
cmd += ["--job_name", "worker", "--task", str(i)]
cmds_map += [new_cmd(session, "w-%d" % i, cmd, args.load_path, shell)]
tmp_tb_dir = "/".join(sys.executable.split('/')[:-1])
tmp_tb_path = os.path.join(tmp_tb_dir, "tensorboard")
if os.path.exists(tmp_tb_path):
tb = tmp_tb_dir + "/tensorboard"
else:
tb = "tensorboard"
tb_args = [tb, "--logdir", args.log_dir, "--port", "12345"]
cmds_map += [new_cmd(session, "tb", tb_args, args.load_path, shell)]
cmds_map += [new_cmd(session, "htop", ["htop"], args.load_path, shell)]
windows = [v[0] for v in cmds_map]
notes = []
cmds = []
notes += ["Use `tmux attach -t {}` to watch process output".format(session)]
notes += ["Use `tmux kill-session -t {}` to kill the job".format(session)]
notes += ["Point your browser to http://localhost:12345 to see Tensorboard"]
cmds += [
# kill any process using tensorboard's port
f"kill $( lsof -i:{args.tb_port} -t ) > /dev/null 2>&1",
# kill any processes using ps / worker ports
f"kill $( lsof -i:{args.start_port}-{args.num_workers + args.start_port} -t ) > /dev/null 2>&1",
f"tmux kill-session -t {session}",
f"tmux new-session -s {session} -n {windows[0]} -d {shell}",
]
for w in windows[1:]:
cmds += ["tmux new-window -t {} -n {} {}".format(session, w, shell)]
cmds += ["sleep 1"]
for window, cmd in cmds_map:
cmds += [cmd]
return cmds, notes
def run(args):
cmds, notes = create_commands("spiral", args)
if args.dry_run:
print("Dry-run mode due to -n flag, otherwise the following commands would be executed:")
else:
print("Executing the following commands:")
print("\n".join(cmds))
print("")
if not args.dry_run:
os.environ["TMUX"] = ""
os.system("\n".join(cmds))
print('\n'.join(notes))
if __name__ == "__main__":
from config import get_args
args = get_args()
run(args)
| [
"[email protected]"
] | |
7c602f029e3a124f40432e96b024c8300417ae5b | f4b5721c6b3f5623e306d0aa9a95ec53461c1f89 | /backend/src/gloader/xml/dom/html/HTMLTableRowElement.py | e18280ba18ad8af52f593f29fbe8bf83d5cc6ac0 | [
"Apache-1.1",
"MIT"
] | permissive | citelab/gini5 | b53e306eb5dabf98e9a7ded3802cf2c646f32914 | d095076113c1e84c33f52ef46a3df1f8bc8ffa43 | refs/heads/uml-rename | 2022-12-10T15:58:49.578271 | 2021-12-09T23:58:01 | 2021-12-09T23:58:01 | 134,980,773 | 12 | 11 | MIT | 2022-12-08T05:20:58 | 2018-05-26T17:16:50 | Python | UTF-8 | Python | false | false | 3,711 | py | ########################################################################
#
# File Name: HTMLTableRowElement.py
#
#
"""
WWW: http://4suite.com/4DOM e-mail: [email protected]
Copyright (c) 2000 Fourthought Inc, USA. All Rights Reserved.
See http://4suite.com/COPYRIGHT for license and copyright information
"""
import string
from xml.dom import implementation
from xml.dom import IndexSizeErr
from xml.dom.html.HTMLElement import HTMLElement
class HTMLTableRowElement(HTMLElement):
def __init__(self, ownerDocument, nodeName='TR'):
HTMLElement.__init__(self, ownerDocument, nodeName)
### Attribute Methods ###
def _get_align(self):
return string.capitalize(self.getAttribute('ALIGN'))
def _set_align(self,align):
self.setAttribute('ALIGN', align)
def _get_bgColor(self):
return self.getAttribute('BGCOLOR')
def _set_bgColor(self, color):
self.setAttribute('BGCOLOR', color)
def _get_cells(self):
cells = []
for child in self.childNodes:
if child.tagName in ['TD','TH']:
cells.append(child)
return implementation._4dom_createHTMLCollection(cells)
def _get_ch(self):
return self.getAttribute('CHAR')
def _set_ch(self, ch):
self.setAttribute('CHAR', ch)
def _get_chOff(self):
return self.getAttribute('CHAROFF')
def _set_chOff(self, offset):
self.setAttribute('CHAROFF', offset)
def _get_rowIndex(self):
#Get our index in the table
section = self.parentNode
if section == None:
return -1
table = section.parentNode
if table == None:
return -1
rows = table._get_rows()
return rows.index(self)
def _get_sectionRowIndex(self):
section = self.parentNode
if section == None:
return -1
rows = section._get_rows()
return rows.index(self)
def _get_vAlign(self):
return string.capitalize(self.getAttribute('VALIGN'))
def _set_vAlign(self, valign):
self.setAttribute('VALIGN', valign)
### Methods ###
def insertCell(self, index):
cells = self._get_cells()
if index < 0 or index > len(cells):
raise IndexSizeErr()
cell = self.ownerDocument.createElement('TD')
length = cells.length
if index == len(cells):
ref = None
elif index < len(cells):
ref = cells[index]
return self.insertBefore(cell, ref)
def deleteCell(self,index):
cells = self._get_cells()
if index < 0 or index >= len(cells):
raise IndexSizeErr()
self.removeChild(cells[index])
### Attribute Access Mappings ###
_readComputedAttrs = HTMLElement._readComputedAttrs.copy()
_readComputedAttrs.update ({
'rowIndex' : _get_rowIndex,
'sectionRowIndex' : _get_sectionRowIndex,
'cells' : _get_cells,
'align' : _get_align,
'bgColor' : _get_bgColor,
'ch' : _get_ch,
'chOff' : _get_chOff,
'vAlign' : _get_vAlign,
})
_writeComputedAttrs = HTMLElement._writeComputedAttrs.copy()
_writeComputedAttrs.update ({
'align' : _set_align,
'bgColor' : _set_bgColor,
'ch' : _set_ch,
'chOff' : _set_chOff,
'vAlign' : _set_vAlign,
})
# Create the read-only list of attributes
_readOnlyAttrs = filter(lambda k,m=_writeComputedAttrs: not m.has_key(k),
HTMLElement._readOnlyAttrs + _readComputedAttrs.keys())
| [
"[email protected]"
] | |
82cd114c38d8767bd5493b1054b0112eb2f33b82 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02772/s117684727.py | af179f388db806b32c0635d0c096c78b0d0171ea | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 253 | py | N = int(input())
A = list(map(int, input().split()))
even_numbers = [a for a in A if a % 2 == 0]
is_approved = all([even_num % 3 == 0 or even_num % 5 == 0 for even_num in even_numbers])
if is_approved:
print('APPROVED')
else:
print('DENIED')
| [
"[email protected]"
] | |
26a00630aeba6a6ae67c356e67ad7108f664c08b | 2aec9c5e8c72b731d3abf22f2a407fe09c1cde09 | /ZQZ510/ZQZ510/pipelines.py | 96d17ebedbe541b8ea71011896e82ef784f24a35 | [] | no_license | jiangyg/ZWFproject | 8b24cc34970ae0a9c2a2b0039dc527c83a5862b5 | aa35bc59566d92721f23d2dd00b0febd268ac2dd | refs/heads/master | 2020-09-26T17:01:00.229380 | 2019-11-15T13:16:21 | 2019-11-15T13:16:21 | 226,297,631 | 0 | 1 | null | 2019-12-06T09:55:37 | 2019-12-06T09:55:36 | null | UTF-8 | Python | false | false | 542 | py | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
import json
class Zqz510Pipeline(object):
def open_spider(self, spider):
self.file = open('./zqz_data.json', 'w+', encoding='utf-8')
def process_item(self, item, spider):
self.file.write(json.dumps(dict(item), ensure_ascii=False) + '\n')
return item
def close_spider(self, spider):
self.file.close()
| [
"[email protected]"
] | |
560d28d47aec3beddae995957b47f2a586147262 | 153995fa868b4697d8d6b25379a16f9756604151 | /student/migrations/0003_auto_20180530_1427.py | 23c1d56c1bf88a956e612254eb17747ba36e63f8 | [] | no_license | Manju1313/django-school | 816c13259654c4f57352add903cc13e3915f3724 | 1182de09e9b638a2a4f328024f6bc6807eff6029 | refs/heads/master | 2023-03-21T22:44:59.002131 | 2020-08-15T14:34:19 | 2020-08-15T14:34:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,414 | py | # Generated by Django 2.0.4 on 2018-05-30 18:27
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('student', '0002_auto_20180530_1421'),
]
operations = [
migrations.RemoveField(
model_name='student',
name='stu_phone_number',
),
migrations.AddField(
model_name='student',
name='phone_number',
field=models.CharField(blank=True, max_length=17, validators=[django.core.validators.RegexValidator(message="Phone number must be entered in the format: '+999999999'. Up to 15 digits allowed.", regex='^\\+?1?\\d{9,15}$')]),
),
migrations.AlterField(
model_name='guardian',
name='phone_number',
field=models.CharField(blank=True, max_length=17, validators=[django.core.validators.RegexValidator(message="Phone number must be entered in the format: '+999999999'. Up to 15 digits allowed.", regex='^\\+?1?\\d{9,15}$')]),
),
migrations.AlterField(
model_name='instructor',
name='phone_number',
field=models.CharField(blank=True, max_length=17, validators=[django.core.validators.RegexValidator(message="Phone number must be entered in the format: '+999999999'. Up to 15 digits allowed.", regex='^\\+?1?\\d{9,15}$')]),
),
]
| [
"[email protected]"
] | |
3cb9f0d148c54cbbe893c3e1c798c3bb23c70ffc | a3cc7286d4a319cb76f3a44a593c4a18e5ddc104 | /lib/surface/compute/instances/delete_access_config.py | 479bf531ec0ef199dca5ae411f4dd8aff59f1cff | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | jordanistan/Google-Cloud-SDK | f2c6bb7abc2f33b9dfaec5de792aa1be91154099 | 42b9d7914c36a30d1e4b84ae2925df7edeca9962 | refs/heads/master | 2023-09-01T01:24:53.495537 | 2023-08-22T01:12:23 | 2023-08-22T01:12:23 | 127,072,491 | 0 | 1 | NOASSERTION | 2023-08-22T01:12:24 | 2018-03-28T02:31:19 | Python | UTF-8 | Python | false | false | 3,102 | py | # Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command for deleting access configs from virtual machine instances."""
from googlecloudsdk.api_lib.compute import base_classes
from googlecloudsdk.api_lib.compute import constants
from googlecloudsdk.calliope import arg_parsers
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.compute.instances import flags
class DeleteAccessConfig(base.SilentCommand):
"""Delete an access configuration from a virtual machine network interface."""
detailed_help = {
'DESCRIPTION': """\
*{command}* is used to delete access configurations from network
interfaces of Google Compute Engine virtual machines. Access
configurations allow you to assign a public, external IP to a virtual
machine.
""",
'EXAMPLES': """\
To remove the externally accessible IP from a virtual machine named
``example-instance'' in zone ``us-central1-a'', run:
$ {command} example-instance --zone us-central1-a
""",
}
@staticmethod
def Args(parser):
flags.INSTANCE_ARG.AddArgument(parser)
parser.add_argument(
'--access-config-name',
default=constants.DEFAULT_ACCESS_CONFIG_NAME,
help="""\
Specifies the name of the access configuration to delete.
``{0}'' is used as the default if this flag is not provided.
""".format(constants.DEFAULT_ACCESS_CONFIG_NAME))
parser.add_argument(
'--network-interface',
default=constants.DEFAULT_NETWORK_INTERFACE,
action=arg_parsers.StoreOnceAction,
help="""\
Specifies the name of the network interface from which to delete the
access configuration. If this is not provided, then ``nic0'' is used
as the default.
""")
def Run(self, args):
"""Invokes request necessary for removing an access config."""
holder = base_classes.ComputeApiHolder(self.ReleaseTrack())
client = holder.client
instance_ref = flags.INSTANCE_ARG.ResolveAsResource(
args, holder.resources,
scope_lister=flags.GetInstanceZoneScopeLister(client))
request = client.messages.ComputeInstancesDeleteAccessConfigRequest(
accessConfig=args.access_config_name,
instance=instance_ref.Name(),
networkInterface=args.network_interface,
project=instance_ref.project,
zone=instance_ref.zone)
return client.MakeRequests([(client.apitools_client.instances,
'DeleteAccessConfig', request)])
| [
"[email protected]"
] | |
51076cbc05dfd34c93e5ff0d33ec683f7304252f | 6cc795fef13e82a2e50f487740f5373b5a3f8549 | /pyunlocbox/tests/__init__.py | 7cae2d147d6d4ccbb8129886a11191b019a147e2 | [
"BSD-3-Clause"
] | permissive | epfl-lts2/pyunlocbox | 7a14e97f7e46981ed6748bb5073d473f45af676e | ec84282096fa9154d8bdcc52bacc3531c9720779 | refs/heads/master | 2023-08-29T22:13:29.345251 | 2022-10-18T11:18:53 | 2022-10-18T11:18:53 | 17,248,167 | 98 | 28 | BSD-3-Clause | 2023-08-18T02:01:44 | 2014-02-27T12:33:31 | Python | UTF-8 | Python | false | false | 445 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Test suite for the pyunlocbox package, broken by modules.
"""
import unittest
from . import test_functions
from . import test_operators
from . import test_solvers
from . import test_acceleration
from . import test_docstrings
suite = unittest.TestSuite([
test_functions.suite,
test_operators.suite,
test_solvers.suite,
test_acceleration.suite,
test_docstrings.suite,
])
| [
"[email protected]"
] | |
2f52dc55e8244d2992f25fe087aa779b5ee88b23 | edfa045d12b8efb65de20261ff80a86160298e44 | /contact/views.py | 92ba8640b73aadf1add6ef04d0e028b1dae69786 | [
"MIT"
] | permissive | yusif763/Unistore-pro | 1d559a89bb71f3db8b5d1e89df64ed7113f00f2a | 41ad0fa209c79a201d3f6a7aa68ec0ace707dcad | refs/heads/main | 2023-04-24T02:50:30.085011 | 2021-04-29T11:00:11 | 2021-04-29T11:00:11 | 362,782,688 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,954 | py | from django.shortcuts import render,redirect
from contact.models import *
from contact.forms import ContactForm
from django.views.generic import (
ListView, DetailView,CreateView
)
from django.views.generic.edit import FormMixin
from django.http import HttpResponseRedirect
from django.urls import reverse_lazy
# Create your views here.
# def about_contact(request):
# form = ContactForm()
# # sub_form = SubscriberForm()
# if request.method == 'POST':
# form = ContactForm(data=request.POST)
# if form.is_valid():
# form.save()
# return redirect('/about-contact/')
# context = {
# # "sub_form":sub_form,
# 'form': form
# }
# return render(request , 'about_contact.html' , context)
class AboutContactView(CreateView):
form_class = ContactForm
# fields = '__all__'
# model = Contact
template_name = 'about_contact.html'
success_url = reverse_lazy('common:index')
def form_valid(self, form):
result = super(ContactView, self).form_valid(form)
messages.success(self.request, 'Sizin muracietiniz qebul edildi.')
return result
# def contact_page(request):
# form = ContactForm()
# # sub_form = SubscriberForm()
# if request.method == 'POST':
# form = ContactForm(data=request.POST)
# if form.is_valid():
# form.save()
# return redirect('/contact/')
# context = {
# # "sub_form":sub_form,
# 'form': form
# }
# return render(request, "contact.html", context)
class ContactView(CreateView):
form_class = ContactForm
# fields = '__all__'
# model = Contact
template_name = 'contact.html'
success_url = reverse_lazy('common:index')
def form_valid(self, form):
result = super(ContactView, self).form_valid(form)
messages.success(self.request, 'Sizin muracietiniz qebul edildi.')
return result
| [
"[email protected]"
] | |
8cbb0199476d4a0ff738d2012c7bde1daee5d0e7 | 496e05014492b4bbecf9f15c40ae416c21e27a46 | /src/outpost/django/api/serializers.py | f3b19c6cac6763725dbcfae9ac299911d7d02ba2 | [
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | medunigraz/outpost_deprecated | b1ff802054c04cf989b3b660e132fa6a1c2a078c | bc88eaa3bb504d394fdf13f1131e40db27759c89 | refs/heads/master | 2022-01-23T15:46:34.859095 | 2019-05-21T08:38:11 | 2019-05-21T08:38:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,266 | py | import re
from base64 import (
b64decode,
urlsafe_b64encode,
)
from pathlib import PurePosixPath
from uuid import uuid4
import six
from django.core.files.base import ContentFile
from drf_haystack.serializers import HaystackSerializer
from rest_framework.serializers import (
FileField,
IntegerField,
SerializerMethodField,
)
from outpost.django.geo import search_indexes as geo
from outpost.django.structure import search_indexes as structure
class AutocompleteSerializer(HaystackSerializer):
id = IntegerField(source='pk')
ctype = SerializerMethodField()
class Meta:
index_classes = [
geo.RoomIndex,
structure.OrganizationIndex,
structure.PersonIndex,
]
fields = [
'presentation',
'id',
'ctype',
'level_id',
'room_id',
'autocomplete',
]
ignore_fields = [
'text',
'autocomplete',
]
field_aliases = {
'q': 'autocomplete',
}
def get_ctype(self, obj):
return obj.content_type()
class Base64FileField(FileField):
"""
A Django REST framework field for handling file-uploads through raw post
data. It uses base64 for encoding and decoding the contents of the file.
Heavily based on
https://stackoverflow.com/a/28036805
"""
parser = re.compile(r'^data:(?P<mimetype>.*?);base64,')
def to_internal_value(self, raw):
# Check if this is a base64 string
if isinstance(raw, six.string_types):
header = self.parser.match(raw)
# Check if the base64 string is in the "data:" format
if header:
try:
decoded_file = b64decode(self.parser.sub('', raw))
except TypeError:
self.fail('invalid_image')
# Generate file name:
p = PurePosixPath()
uid = uuid4().bytes
u = urlsafe_b64encode(uid).decode('ascii').rstrip('=')
filename = p.joinpath(u).as_posix()
raw = ContentFile(decoded_file, name=filename)
return super(Base64FileField, self).to_internal_value(raw)
| [
"[email protected]"
] | |
556cd12c5bcabb294fdef6cef5e233d27d08634b | b5ce6908490cfb8e6a1e1cbe4745d675122ddce0 | /questions/search-insert-position/Solution.py | c0090acd08a2b839bf40909c0f07c328192ae1f5 | [
"MIT"
] | permissive | franklingu/leetcode-solutions | 8895910f13208e1d8e604100d84c2dd35684cde4 | 7ad7e5c1c040510b7b7bd225ed4297054464dbc6 | refs/heads/master | 2023-01-09T01:34:08.097518 | 2023-01-02T02:05:35 | 2023-01-02T02:05:35 | 43,345,677 | 155 | 66 | MIT | 2020-10-02T03:41:36 | 2015-09-29T04:54:38 | Python | UTF-8 | Python | false | false | 523 | py | '''
Given a sorted array and a target value, return the index if the target is found. If not, return the index where it would be if it were inserted in order.
You may assume no duplicates in the array.
Example 1:
Input: [1,3,5,6], 5
Output: 2
Example 2:
Input: [1,3,5,6], 2
Output: 1
Example 3:
Input: [1,3,5,6], 7
Output: 4
Example 4:
Input: [1,3,5,6], 0
Output: 0
'''
import bisect
class Solution:
def searchInsert(self, nums: List[int], target: int) -> int:
return bisect.bisect_left(nums, target)
| [
"[email protected]"
] | |
1c8e3344ff726702de26bc95b86ffad4f8fa87df | cce5684e1bb9fea2df762c1afedb17b1795b7a5f | /pymcutil/selector/selectors/self_selector.py | 03d75db75e9fd916c4c55f012ac4e9ca91173109 | [
"MIT"
] | permissive | Arcensoth/pymcutil | 85071e5c3bbd25a47a1133bfa464f67126c62bdd | 0c8f1efa4d611e92170ec48bedb160b1d00d0022 | refs/heads/master | 2020-03-18T05:02:06.769457 | 2018-07-02T00:45:51 | 2018-07-02T00:45:51 | 91,645,414 | 3 | 1 | null | 2017-07-12T15:56:12 | 2017-05-18T03:37:33 | Python | UTF-8 | Python | false | false | 322 | py | from pymcutil.selector.abc.selector import Selector
from pymcutil.symbols import selector_bases
from pymcutil.symbols.selector_bases.selector_bases import SelectorBase
class SelfSelector(Selector):
@property
def base(self) -> SelectorBase:
return selector_bases.self
self = SelfSelector
SELF = self()
| [
"[email protected]"
] | |
8065d5a222ea47f64ef74359900886af9e88af37 | fa9c5bb26c72b053a41f34e858d7395ee29aea5d | /HttpTesting/main.py | ed8bc130a9da20ac6da12479814e772dab2e1447 | [
"Apache-2.0"
] | permissive | HttpTesting/HttpTesting | 3c01d7b00b13ddf5d52ac223693a5222d9a52a03 | b5b68dd546cb7aea02931c46fe4e51a98f71566e | refs/heads/master | 2021-06-20T15:34:35.583811 | 2019-12-25T05:58:34 | 2019-12-25T05:58:34 | 195,352,254 | 1 | 0 | Apache-2.0 | 2021-03-25T22:50:17 | 2019-07-05T06:33:48 | Python | UTF-8 | Python | false | false | 9,090 | py | # ########################################################
# 将根目录加入sys.path中,解决命令行找不到包的问题
import sys
import os
curPath = os.path.abspath(os.path.dirname(__file__))
rootPath = os.path.split(curPath)[0]
sys.path.insert(0, rootPath)
# ########################################################
import unittest
import shutil
import time,json
import logging
from httptesting.globalVar import gl
from httptesting.library import HTMLTESTRunnerCN
from httptesting.library import scripts
from httptesting.library.scripts import (get_yaml_field)
from httptesting.library.emailstmp import EmailClass
from httptesting.library.case_queue import case_exec_queue
from httptesting import case
from httptesting.library.falsework import create_falsework
from httptesting.library.har import ConvertHarToYAML
from httptesting import __version__
import argparse
########################################################################
cmd_path = ''
# Command line mode.
def run_min():
# Takes the current path of the command line
cur_dir = os.getcwd()
os.chdir(cur_dir)
parse = argparse.ArgumentParser(
description='httptesting parameters',
prog='httptesting'
)
parse.add_argument(
"-v",
"--version",
action='version',
version="%(prog)s {}".format(__version__),
help='Framework version.'
)
parse.add_argument(
"-f",
"--file",
default='',
help='The file path; File absolute or relative path.'
)
parse.add_argument(
"-d",
"--dir",
default='',
help='The folder path; folder absolute or relative path.'
)
parse.add_argument(
"-sp",
"--startproject",
default='',
help='Generate test case templates.'
)
parse.add_argument(
"-conf",
"--config",
default='',
help='Basic setting of framework.'
)
parse.add_argument(
"-har",
default='',
help='Convert the har files to YAML. har file is *.har'
)
parse.add_argument(
"-c",
"--convert",
default='',
help='Convert the har files to YAML. YAML file is *.yaml'
)
args = parse.parse_args()
case_file = args.file
case_dir = args.dir
start_project = args.startproject
config = args.config
har = args.har
vert = args.convert
# Conver YAML.
if vert:
yamlfile = os.path.join(cur_dir, str(vert).strip())
scripts.generate_case_tmpl(yamlfile)
# Convert har files to YAML.
# r'D:\httphar.har'
if har:
temp_dict = ConvertHarToYAML.convert_har_to_ht(har)
ConvertHarToYAML.write_case_to_yaml('', temp_dict)
# Setting global var.
if config == 'set':
try:
os.system(gl.configFile)
except (KeyboardInterrupt, SystemExit):
print("已终止执行.")
if start_project:
create_falsework(os.path.join(os.getcwd(), start_project))
# Get the yaml file name and write to the queue.
if case_file:
case_exec_queue.put(case_file)
# Began to call.
Run_Test_Case.invoke()
if case_dir:
for root, dirs, files in os.walk(case_dir):
for f in files:
if 'yaml' in f:
case_exec_queue.put(os.path.join(case_dir, f))
# Began to call.
Run_Test_Case.invoke()
#########################################################################
# Not in command mode --dir defaults to the testcase directory.
# Example:
# python3 main.py --dir=r"D:\test_project\project\cloud_fi_v2\testcase"
#########################################################################
class Run_Test_Case(object):
@classmethod
def load_tests_list(cls, to):
"""
Specifies the order in which test cases are loaded
:return: There is no.
"""
tests = [unittest.TestLoader().loadTestsFromModule(to)]
return tests
@classmethod
def create_report_file(cls):
# 测试报告文件名
report_dir = time.strftime('%Y%m%d_%H%M%S', time.localtime())
rdir = os.path.join(os.getcwd() ,'report')
cls.file_name = 'report.html'
portdir = os.path.join(rdir, report_dir)
# 按日期创建测试报告文件夹
if not os.path.exists(portdir):
# os.mkdir(portdir)
os.makedirs(portdir)
cls.filePath = os.path.join(portdir, cls.file_name) # 确定生成报告的路径
return cls.filePath
@staticmethod
def copy_custom_function():
# 自定义函数功能
func = os.path.join(os.getcwd(), 'extfunc.py')
target = os.path.join(gl.loadcasePath, 'extfunc.py')
if os.path.exists(func):
shutil.copy(func, target)
@staticmethod
def copy_report(filePath, file_name):
# 复制report下子文件夹到 templates/report/下
split_path = os.path.dirname(filePath).split("\\")
low_path = split_path[split_path.__len__() - 1]
web_path = os.path.join(gl.templatesReportPath, low_path)
if not os.path.exists(web_path):
shutil.copytree(os.path.dirname(filePath), web_path)
else:
shutil.copy(filePath, os.path.join(web_path, file_name))
return low_path
@staticmethod
def tmpl_msg(low_path, file_name):
# 发送钉钉模版测试结果
result_str = """共{}个用例, 通过{}, 失败{}, 错误{}, 通过率{}""".format(
gl.get_value('sum'),
gl.get_value('passed'),
gl.get_value('failed'),
gl.get_value('error'),
gl.get_value('passrate')
)
# 测试结论
if '100' in str(gl.get_value('passrate')):
msg_1 = '本次测试★通过★'
else:
msg_1 = '本次测试★不通过★'
config = get_yaml_field(gl.configFile)
# report外网发布地址ip+port
report_url = config['REPORT_URL']
content = config['DING_TITLE']
# 发送钉钉消息
msg = """{}已完成:{},{}\n测试报告地址:{}/{}/{}"""
msg = msg.format(content, result_str, msg_1, report_url, low_path, file_name)
return msg
@staticmethod
def run(filePath):
"""
Execute the test and generate the test report file.
:param filePath: Report file absolute path.
:return: There is no.
"""
# custom function
Run_Test_Case.copy_custom_function()
# Load the unittest framework, which must be written here or DDT will be loaded first.
from httptesting.case import load_case
# Unittest test suite.
suite = unittest.TestSuite()
suite.addTests(Run_Test_Case.load_tests_list(load_case))
# Execute the test and generate the test report file.
with open(filePath, 'wb') as fp:
runner = HTMLTESTRunnerCN.HTMLTestRunner(
stream=fp,
title= '接口自动化测试报告',
description= '详细测试用例结果', # Do not default to null.
tester= "测试组", # tester name ,not default to jack.
verbosity=2
)
# Run the test case.
runner.run(suite)
@staticmethod
def invoke():
"""
Start executing tests generate test reports.
:return: There is no.
"""
# #########################Read configuration information###############
config = get_yaml_field(gl.configFile)
dd_enable = config['ENABLE_DDING']
dd_token = config['DD_TOKEN']
dd_url = config['DING_URL']
email_enable = config['EMAIL_ENABLE']
########################################################################
# Test report file name.
time_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
filePath = Run_Test_Case.create_report_file()
# Start test the send pin message.
if dd_enable:
scripts.send_msg_dding(
'{}:★开始API接口自动化测试★'.format(time_str),
dd_token,
dd_url
)
# Execute the test and send the test report.
Run_Test_Case.run(filePath)
print(filePath)
# Copy the folder under the report directory under /templates/report/
# low_path = Run_Test_Case.copy_report(filePath, Run_Test_Case.file_name)
if dd_enable:
# Template message.
dir_list = filePath.split('\\')
low_path = dir_list[len(dir_list) - 2]
msg = Run_Test_Case.tmpl_msg(low_path, Run_Test_Case.file_name)
print(msg)
scripts.send_msg_dding(msg, dd_token, dd_url)
if email_enable:
# Send test report to EMAIL.
email = EmailClass()
email.send(filePath)
if __name__ == "__main__":
run_min()
| [
"[email protected]"
] | |
84b921ebd67dca82253a50ee13baf4d2cb8fdb97 | 6646f6b92e9ff31f2f74b749ea12ace53cfc135c | /tests/unit/models/test_package_model.py | 5d883c6352b89b74851372eb02d55c084db4b862 | [] | no_license | EricMontague/SponsorMatch | 0a6685edb44b2694824d3d3a4d15dfcb42fdb68e | 864aa3cfe25d74c2b97b9f09f45eb9fa10dac892 | refs/heads/master | 2022-12-08T22:43:21.684165 | 2021-03-19T00:50:06 | 2021-03-19T00:50:06 | 241,396,411 | 0 | 0 | null | 2022-12-08T03:38:23 | 2020-02-18T15:27:42 | Python | UTF-8 | Python | false | false | 2,398 | py | """This module contains tests for the package model."""
import unittest
from tests.integration.testing_data import TestModelFactory
from app import create_app
from app.extensions import db
class PackageModelTestCase(unittest.TestCase):
"""Class to test the Package Model."""
def setUp(self):
"""Create application instance and insert necessary
information into the database before each test.
"""
self.app = create_app("testing", False)
self.app_context = self.app.app_context()
self.app_context.push()
db.create_all()
def tearDown(self):
"""Pop application context, remove the db session,
and drop all tables in the database.
"""
db.session.remove()
db.drop_all()
self.app_context.pop()
def test_package_sold_out(self):
"""Test to ensure that a package is correctly
recognized as sold out.
"""
role = TestModelFactory.create_role("Event Organizer")
user = TestModelFactory.create_user()
user.role = role
venue = TestModelFactory.create_venue()
event = TestModelFactory.create_event("Test Event", "live")
event.user = user
event.venue = venue
package = TestModelFactory.create_package(price=100, available_packages=10)
package.event = event
db.session.add_all([user, event, package])
db.session.commit()
package.num_purchased = package.available_packages
self.assertTrue(package.is_sold_out())
def test_package_num_sales(self):
"""Test to ensure that the number of packages purchased
is recorded correctly in the database.
"""
role = TestModelFactory.create_role("Event Organizer")
user = TestModelFactory.create_user()
user.role = role
venue = TestModelFactory.create_venue()
event = TestModelFactory.create_event("Test Event", "live")
event.user = user
event.venue = venue
package = TestModelFactory.create_package(price=100, available_packages=10)
package.event = event
db.session.add_all([user, event, package])
db.session.commit()
self.assertEqual(package.num_for_sale(), 10)
package.num_purchased += 1
self.assertEqual(package.num_for_sale(), 9)
if __name__ == "__main__":
unittest.main()
| [
"[email protected]"
] | |
73a23e510d0db12d3463c18a0f24bc61535d211a | 9d1b1d52f99b86bec0e74878c0535057115dc667 | /pes/views.py | 2c1c8ed73da304fb4070741309f11b3496348234 | [] | no_license | antocuni/pesranking | 1f9b2bb8f03ba15f5f5d36ff6e70e0de8edc5002 | 574ecf8b5e49979adf709239a4df78de83acd039 | refs/heads/master | 2022-11-22T03:21:40.837305 | 2011-12-01T19:31:03 | 2011-12-01T19:31:03 | 275,815,179 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 194 | py | from django.http import HttpResponseRedirect
from pes import models
def updateranking(request):
models.Match.updateranking()
return HttpResponseRedirect(request.META['HTTP_REFERER'])
| [
"[email protected]"
] | |
9dc4b494a28257793973cafba0d97492a5e21a0a | 8dbb2a3e2286c97b1baa3ee54210189f8470eb4d | /kubernetes-stubs/client/models/v1beta1_volume_error.pyi | 9f645d23474efed86f712f543da51fd09fa5d231 | [] | no_license | foodpairing/kubernetes-stubs | e4b0f687254316e6f2954bacaa69ff898a88bde4 | f510dc3d350ec998787f543a280dd619449b5445 | refs/heads/master | 2023-08-21T21:00:54.485923 | 2021-08-25T03:53:07 | 2021-08-25T04:45:17 | 414,555,568 | 0 | 0 | null | 2021-10-07T10:26:08 | 2021-10-07T10:26:08 | null | UTF-8 | Python | false | false | 518 | pyi | import datetime
import typing
import kubernetes.client
class V1beta1VolumeError:
message: typing.Optional[str]
time: typing.Optional[datetime.datetime]
def __init__(
self,
*,
message: typing.Optional[str] = ...,
time: typing.Optional[datetime.datetime] = ...
) -> None: ...
def to_dict(self) -> V1beta1VolumeErrorDict: ...
class V1beta1VolumeErrorDict(typing.TypedDict, total=False):
message: typing.Optional[str]
time: typing.Optional[datetime.datetime]
| [
"[email protected]"
] | |
f8e6de07d5227dc1abec9911ddada669643f42bb | 8b97e08d7d7cd904cafe5163eb81d6e2a81fbd90 | /venv/bin/easy_install | 74aede3b0a1b02eca86658618ec7a70e9aa649db | [] | no_license | Shaigift/Python-Practice-3 | 180f8005b31526a4f4c834be5ea46bea1a04ef62 | e464740050513a455debeecc1af568f10707010a | refs/heads/master | 2022-08-27T15:50:14.602975 | 2020-05-20T08:31:32 | 2020-05-20T08:31:32 | 265,501,050 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 444 | #!/Users/mphoshai/PycharmProjects/untitled/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install'
__requires__ = 'setuptools==40.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install')()
)
| [
"[email protected]"
] | ||
688f5a27c17943c555fe537f43e8a91de0397e93 | a46d135ba8fd7bd40f0b7d7a96c72be446025719 | /packages/python/plotly/plotly/validators/scattercarpet/_uid.py | e693f7c7608fa0de61de5dbd33659c52dd174a3f | [
"MIT"
] | permissive | hugovk/plotly.py | 5e763fe96f225d964c4fcd1dea79dbefa50b4692 | cfad7862594b35965c0e000813bd7805e8494a5b | refs/heads/master | 2022-05-10T12:17:38.797994 | 2021-12-21T03:49:19 | 2021-12-21T03:49:19 | 234,146,634 | 0 | 0 | MIT | 2020-01-15T18:33:43 | 2020-01-15T18:33:41 | null | UTF-8 | Python | false | false | 390 | py | import _plotly_utils.basevalidators
class UidValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(self, plotly_name="uid", parent_name="scattercarpet", **kwargs):
super(UidValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
**kwargs
)
| [
"[email protected]"
] | |
073c78c464eb8c22be7697340798bdfb19009e7c | 8adec48dfaee1cdfd6c7f4d2fb3038aa1c17bda6 | /WProf/build/masters/master.chromium.chromiumos/master_gatekeeper_cfg.py | 68b8a28fa503e0db0192a1a7d126068772a3feef | [] | no_license | kusoof/wprof | ef507cfa92b3fd0f664d0eefef7fc7d6cd69481e | 8511e9d4339d3d6fad5e14ad7fff73dfbd96beb8 | refs/heads/master | 2021-01-11T00:52:51.152225 | 2016-12-10T23:51:14 | 2016-12-10T23:51:14 | 70,486,057 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,526 | py | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from master import gatekeeper
from master import master_utils
# This is the list of the builder categories and the corresponding critical
# steps. If one critical step fails, gatekeeper will close the tree
# automatically.
# Note: don't include 'update scripts' since we can't do much about it when
# it's failing and the tree is still technically fine.
chromium_categories_steps = {
'': ['update'],
'tester': [
'base_unittests',
#'browser_tests',
'cacheinvalidation_unittests',
'content_unittests',
'courgette_unittests',
'crypto_unittests',
'dbus_unittests',
'googleurl_unittests',
'installer_util_unittests',
#'interactive_ui_tests',
'ipc_tests',
'jingle_unittests',
'media_unittests',
'mini_installer_test',
'nacl_integration',
'net_unittests',
'printing_unittests',
'remoting_unittests',
'sbox_integration_tests',
'sbox_unittests',
'sbox_validation_tests',
'sizes',
'sql_unittests',
'start_crash_handler',
'sync_unittests',
'test_shell_tests',
'ui_unittests',
'unit_tests',
'views_unittests',
#'webkit_tests',
],
'compile': ['check_deps', 'compile', 'archive_build'],
'closer': ['BuildTarget'],
}
exclusions = {
}
forgiving_steps = ['update_scripts', 'update', 'svnkill', 'taskkill',
'archive_build', 'start_crash_handler']
close_chromiumos_categories_steps = {
'closer': [
'LKGMSync',
'BuildBoard',
'UnitTest',
],
}
warn_chromiumos_categories_steps = {
'watch': [
'UploadPrebuilts',
'Archive',
'VMTest',
],
}
warn_aura_chromiumos_categories_steps = {
'aurawatch': [
'Archive',
'BuildTarget',
'BuildBoard',
'UnitTest',
]
}
subject = ('buildbot %(result)s in %(projectName)s on %(builder)s, '
'revision %(revision)s')
warning_header = ('Please look at failure in "%(steps)s" on "%(builder)s" '
'and help out if you can')
def Update(config, active_master, alternate_master, c):
# chrome likely/possible failures to the chrome sheriffs, closing the
# chrome tree
c['status'].append(gatekeeper.GateKeeper(
fromaddr=active_master.from_address,
categories_steps=chromium_categories_steps,
exclusions=exclusions,
relayhost=config.Master.smtp,
subject=subject,
extraRecipients=active_master.tree_closing_notification_recipients,
lookup=master_utils.FilterDomain(),
forgiving_steps=forgiving_steps,
tree_status_url=active_master.tree_status_url,
sheriffs=['sheriff'],
use_getname=True))
# chromium os failures close the chromeOS tree
c['status'].append(gatekeeper.GateKeeper(
fromaddr=active_master.from_address,
categories_steps=close_chromiumos_categories_steps,
exclusions=exclusions,
relayhost=config.Master.smtp,
subject='Closer ' + subject,
extraRecipients=alternate_master.tree_closing_notification_recipients,
lookup=master_utils.FilterDomain(),
forgiving_steps=forgiving_steps,
tree_status_url=alternate_master.tree_status_url,
sheriffs=['sheriff_cros_mtv', 'sheriff_cros_nonmtv'],
use_getname=True))
# chromium os buried failures/flakiness to chrome OS folk
c['status'].append(gatekeeper.GateKeeper(
fromaddr=active_master.from_address,
categories_steps=warn_chromiumos_categories_steps,
exclusions=exclusions,
relayhost=config.Master.smtp,
subject='Warning ' + subject,
status_header=warning_header,
extraRecipients=[],
lookup=master_utils.FilterDomain(),
forgiving_steps=forgiving_steps,
tree_status_url=None,
sheriffs=['sheriff_cros_mtv', 'sheriff_cros_nonmtv'],
use_getname=True))
# while the Aura folk are in panic fast mode, let them know to help on
# failures that may be related to their special configs.
c['status'].append(gatekeeper.GateKeeper(
fromaddr=active_master.from_address,
categories_steps=warn_aura_chromiumos_categories_steps,
exclusions=exclusions,
relayhost=config.Master.smtp,
subject='Warning ' + subject,
status_header=warning_header,
extraRecipients=[],
lookup=master_utils.FilterDomain(),
forgiving_steps=forgiving_steps,
tree_status_url=None,
sheriffs=['sheriff_aura'],
use_getname=True))
| [
"kusoof@kookaburra.(none)"
] | kusoof@kookaburra.(none) |
f0eefe22562432df713f9a164d1362e2892d2ea0 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03495/s527457667.py | af89959a94f603b8e66e9c604d4ff5d4f266dce7 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 316 | py | n, k = map(int, input().split())
a = list(map(int, input().split()))
ball = {}
for i in a:
if i in ball:
ball[i] += 1
else:
ball[i] = 1
ball = sorted(ball.items(), key=lambda x: -x[1])
ans = 0
if len(ball) > k:
for i in range(k):
ans += ball[i][1]
ans = n - ans
print(ans)
| [
"[email protected]"
] | |
2a46cba90659a56d1af070ee76242a046edd72a9 | ff12b271c7538f0621b88e567b315d5bb44166af | /ambari_monitor/hbase_monitor/hbase_monitor_v2/conn_db.py | d483eee20dceb6a566d0b5d5b49a331740dd2f1d | [] | no_license | witnesslq/big_data_operation | 23ca6afd2f69fbe2b4f9debea4bd2f49f6d4a1c8 | 829422bfd3c52fbd99e0b54e3da7b9ac7ec4f3cd | refs/heads/main | 2023-06-06T22:17:15.572951 | 2021-07-13T14:34:18 | 2021-07-13T14:34:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,630 | py | #!/usr/bin/env python
# -*-coding:utf-8 -*-
# ***************************************************************************
# 文件名称:conn_db.py
# 功能描述:迁移Hive表
# 输 入 表:
# 输 出 表:
# 创 建 者:hyn
# 创建日期:20200808
# 修改日志:
# 修改日期:
# ***************************************************************************
# 程序调用格式:python conn_db.py
# ***************************************************************************
import os
import sys
from datetime import datetime
import datetime as date_time
import pymysql
mysql_sh = "mysql -h 172.19.168.22 -P 3308 -u zhao -pzhao zhao -e ' "
# 连接
def conn_db():
conn = pymysql.connect(host="192.168.195.233", port=20031, user="csapdmcfg", passwd="iEXIMt3w!TFL9vkO", db="csapdmcfg", charset="utf8")
return conn
# 查询数据
def select(sql):
conn = conn_db()
cursor = conn.cursor()
cursor.execute(sql)
result = cursor.fetchall()
cursor.close()
conn.close()
# print result
return result
# 插入及更新数据
def insert(sql):
conn = conn_db()
cursor = conn.cursor()
cursor.execute(sql)
result = cursor.fetchall()
conn.commit()
cursor.close()
conn.close()
# print type(result)
# print result
return result
# 批量插入及更新数据
def insert_batch(sql_list):
conn = conn_db()
cursor = conn.cursor()
for sql in sql_list:
cursor.execute(sql)
conn.commit()
cursor.close()
conn.close()
# result = cursor.fetchall()
# print type(result)
# print result
return
| [
"[email protected]"
] | |
9fbddab470ce95d6a31bb446fcd8a7ee812aa1d0 | 5399dd4580ea3f528753bc8b52a981743d62f8bb | /keras/keras26_LSTM_hamsu.py | 10977c12c7ffbf0c14ef67ef7f6d8b6f2e3211d9 | [] | no_license | iwillbeaprogramer/Study | 3ac7c118ffe3981d78b4ad263cb62432eae13970 | 3bfe571da5bbfc545b994e5878e217f9306bde14 | refs/heads/main | 2023-05-07T16:31:05.564973 | 2021-05-27T14:50:00 | 2021-05-27T14:50:00 | 324,044,441 | 8 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,598 | py | # keras 23 _LSTM3_scale을 함수형으로 코딩
import numpy as np
x = np.array([[1,2,3],[2,3,4],[3,4,5],[4,5,6],[5,6,7],[6,7,8],[7,8,9],[8,9,10],[9,10,11],[10,11,12],[20,30,40],[30,40,50],[40,50,60]])
y = np.array([4,5,6,7,8,9,10,11,12,13,50,60,70])
x_pred = np.array([50,60,70]).reshape(1,3,1)
from tensorflow.keras.models import Sequential,Model
from tensorflow.keras.layers import Dense,LSTM,Input
from sklearn.metrics import r2_score
x = x.reshape(13,3,1)
inputs = Input(shape = (3,1))
lstm = LSTM(1024,activation='relu')(inputs)
dense1 = Dense(512,activation='relu')(lstm)
dense2 = Dense(256,activation='relu')(dense1)
dense3 = Dense(128,activation='relu')(dense2)
dense4 = Dense(64,activation='relu')(dense3)
dense5 = Dense(32,activation='relu')(dense4)
dense6 = Dense(8,activation='relu')(dense5)
dense7 = Dense(4,activation='relu')(dense6)
outputs = Dense(1)(dense7)
model = Model(inputs,outputs)
# model = Sequential()
# model.add(LSTM(1024,input_shape=(3,1),activation='relu'))
# model.add(Dense(512,activation='relu'))
# model.add(Dense(256,activation='relu'))
# model.add(Dense(128,activation='relu'))
# model.add(Dense(64,activation='relu'))
# model.add(Dense(32,activation='relu'))
# model.add(Dense(16,activation='relu'))
# model.add(Dense(8,activation='relu'))
# model.add(Dense(4,activation='relu'))
# model.add(Dense(1))
model.compile(loss='mse',optimizer='adam')
model.fit(x,y,epochs=500,batch_size=1)
loss = model.evaluate(x,y,batch_size=1)
y_pred = model.predict(x_pred)
print(y_pred)
print('loss : ',loss)
'''
[[81.13962]]
[[80.14889]]
loss : 0.05985087901353836
''' | [
"[email protected]"
] | |
1e4ec69660f5980e00461dbe5783a03c23174204 | d554b1aa8b70fddf81da8988b4aaa43788fede88 | /5 - Notebooks e Data/1 - Análises numéricas/Arquivos David/Atualizados/logDicas-master/data/2019-1/226/users/4131/codes/1758_1580.py | c80833ecede95654db5b447bb5eb5803ca08197f | [] | no_license | JosephLevinthal/Research-projects | a3bc3ca3b09faad16f5cce5949a2279cf14742ba | 60d5fd6eb864a5181f4321e7a992812f3c2139f9 | refs/heads/master | 2022-07-31T06:43:02.686109 | 2020-05-23T00:24:26 | 2020-05-23T00:24:26 | 266,199,309 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 423 | py | from numpy import*
v=array(eval(input("notas:")))*1.0
n=array(eval(input("alunos:")))
i=0
faltas=0
aprovados=0
reprovados=0
soma=0
while(i<size(v)):
if(v[i]==-1.0):
faltas+=1
if(v[i]>=6):
aprovados+=1
soma+=v[i]
if(v[i]<6.0 and v[i]!=-1.0):
reprovados+=1
soma+=v[i]
if(v[i]==max(v)):
nome = n[i]
i=i+1
print(faltas)
print(aprovados)
print(reprovados)
print(round(soma/(aprovados+reprovados),2))
print(nome) | [
"[email protected]"
] | |
573e195a6ee0cf64d44548b0b3cf38f0233749b1 | 7c843f80a08db6725fd8d2e85099d9e6c13f6426 | /lib/yllibInterface.py | ec7a24ba3216459a602d8f2be161df434745f8a3 | [] | no_license | wanfade/scaffolding_Seg | e983c1d1cdd60efcd7d381728c277993a1cf4721 | 12ba8892eb44d3ce47fa2609973b0510904c4753 | refs/heads/master | 2023-03-16T05:57:28.808341 | 2017-11-25T13:53:11 | 2017-11-25T13:53:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 405 | py | # -*- coding: utf-8 -*-
import sys
from os.path import abspath,join,dirname
yllibPath = abspath(join(dirname(abspath(__file__)),'./yl'))
if yllibPath not in sys.path:
sys.path = [yllibPath] + sys.path
import tool
import ylimg as imglib
import ylml as mllib
import ylnp as nplib
from tool import *
from ylimg import *
from ylml import *
from ylnp import *
if __name__ == '__main__':
pass
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.