max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
torch/utils/data/datapipes/iter/httpreader.py | xiaohanhuang/pytorch | 60,067 | 12781056 | <reponame>xiaohanhuang/pytorch<filename>torch/utils/data/datapipes/iter/httpreader.py
from io import IOBase
from typing import Sized, Tuple
from torch.utils.data import IterDataPipe
from torch.utils.data.datapipes.utils.common import deprecation_warning_torchdata
class HTTPReaderIterDataPipe(IterDataPipe[Tuple[str, IOBase]]):
r""" :class:`HTTPReaderIterDataPipe`
Iterable DataPipe to load file url(s) (http url(s) pointing to file(s)),
yield file url and IO stream in a tuple
Args:
datapipe: Iterable DataPipe providing urls
timeout: Timeout for http request
"""
def __init__(self, datapipe, timeout=None):
self.datapipe = datapipe
self.timeout = timeout
deprecation_warning_torchdata(type(self).__name__)
def __iter__(self):
from requests import HTTPError, RequestException, Session
for url in self.datapipe:
try:
with Session() as session:
if self.timeout is None:
r = session.get(url, stream=True)
else:
r = session.get(url, timeout=self.timeout, stream=True)
return url, r.raw
except HTTPError as e:
raise Exception(f"Could not get the file. [HTTP Error] {e.response}.")
except RequestException as e:
raise Exception(f"Could not get the file at {url}. [RequestException] {e.response}.")
except Exception:
raise
def __len__(self) -> int:
if isinstance(self.datapipe, Sized):
return len(self.datapipe)
raise TypeError("{} instance doesn't have valid length".format(type(self).__name__))
|
scipy/stats/tests/test_crosstab.py | Ennosigaeon/scipy | 9,095 | 12781061 | <filename>scipy/stats/tests/test_crosstab.py
import pytest
import numpy as np
from numpy.testing import assert_array_equal
from scipy.stats.contingency import crosstab
@pytest.mark.parametrize('sparse', [False, True])
def test_crosstab_basic(sparse):
a = [0, 0, 9, 9, 0, 0, 9]
b = [2, 1, 3, 1, 2, 3, 3]
expected_avals = [0, 9]
expected_bvals = [1, 2, 3]
expected_count = np.array([[1, 2, 1],
[1, 0, 2]])
(avals, bvals), count = crosstab(a, b, sparse=sparse)
assert_array_equal(avals, expected_avals)
assert_array_equal(bvals, expected_bvals)
if sparse:
assert_array_equal(count.A, expected_count)
else:
assert_array_equal(count, expected_count)
def test_crosstab_basic_1d():
# Verify that a single input sequence works as expected.
x = [1, 2, 3, 1, 2, 3, 3]
expected_xvals = [1, 2, 3]
expected_count = np.array([2, 2, 3])
(xvals,), count = crosstab(x)
assert_array_equal(xvals, expected_xvals)
assert_array_equal(count, expected_count)
def test_crosstab_basic_3d():
# Verify the function for three input sequences.
a = 'a'
b = 'b'
x = [0, 0, 9, 9, 0, 0, 9, 9]
y = [a, a, a, a, b, b, b, a]
z = [1, 2, 3, 1, 2, 3, 3, 1]
expected_xvals = [0, 9]
expected_yvals = [a, b]
expected_zvals = [1, 2, 3]
expected_count = np.array([[[1, 1, 0],
[0, 1, 1]],
[[2, 0, 1],
[0, 0, 1]]])
(xvals, yvals, zvals), count = crosstab(x, y, z)
assert_array_equal(xvals, expected_xvals)
assert_array_equal(yvals, expected_yvals)
assert_array_equal(zvals, expected_zvals)
assert_array_equal(count, expected_count)
@pytest.mark.parametrize('sparse', [False, True])
def test_crosstab_levels(sparse):
a = [0, 0, 9, 9, 0, 0, 9]
b = [1, 2, 3, 1, 2, 3, 3]
expected_avals = [0, 9]
expected_bvals = [0, 1, 2, 3]
expected_count = np.array([[0, 1, 2, 1],
[0, 1, 0, 2]])
(avals, bvals), count = crosstab(a, b, levels=[None, [0, 1, 2, 3]],
sparse=sparse)
assert_array_equal(avals, expected_avals)
assert_array_equal(bvals, expected_bvals)
if sparse:
assert_array_equal(count.A, expected_count)
else:
assert_array_equal(count, expected_count)
@pytest.mark.parametrize('sparse', [False, True])
def test_crosstab_extra_levels(sparse):
# The pair of values (-1, 3) will be ignored, because we explicitly
# request the counted `a` values to be [0, 9].
a = [0, 0, 9, 9, 0, 0, 9, -1]
b = [1, 2, 3, 1, 2, 3, 3, 3]
expected_avals = [0, 9]
expected_bvals = [0, 1, 2, 3]
expected_count = np.array([[0, 1, 2, 1],
[0, 1, 0, 2]])
(avals, bvals), count = crosstab(a, b, levels=[[0, 9], [0, 1, 2, 3]],
sparse=sparse)
assert_array_equal(avals, expected_avals)
assert_array_equal(bvals, expected_bvals)
if sparse:
assert_array_equal(count.A, expected_count)
else:
assert_array_equal(count, expected_count)
def test_validation_at_least_one():
with pytest.raises(TypeError, match='At least one'):
crosstab()
def test_validation_same_lengths():
with pytest.raises(ValueError, match='must have the same length'):
crosstab([1, 2], [1, 2, 3, 4])
def test_validation_sparse_only_two_args():
with pytest.raises(ValueError, match='only two input sequences'):
crosstab([0, 1, 1], [8, 8, 9], [1, 3, 3], sparse=True)
def test_validation_len_levels_matches_args():
with pytest.raises(ValueError, match='number of input sequences'):
crosstab([0, 1, 1], [8, 8, 9], levels=([0, 1, 2, 3],))
|
examples/libtest/imports/allsimple.py | takipsizad/pyjs | 739 | 12781079 | """
Helper module for import * without __all__
"""
all_import2 = 3
all_import3 = 3
all_override = True
|
step30_multiple_stacks/Python/backend/lambda.py | fullstackwebdev/full-stack-serverless-cdk | 192 | 12781091 | <reponame>fullstackwebdev/full-stack-serverless-cdk<filename>step30_multiple_stacks/Python/backend/lambda.py
import json
import random
import string
def lambda_handler(event, context):
# print(event)
# print(context)
letters = string.ascii_lowercase
value = ''.join(random.choice(letters) for i in range(10))
return {
'statusCode': 200,
"headers": json.dumps({ 'Access-Control-Allow-Origin': '*' }),
"body": json.dumps(value)
}
|
scripts/benchmark_ecef2geo.py | wrlssqi/pymap3d | 116 | 12781116 | <filename>scripts/benchmark_ecef2geo.py
#!/usr/bin/env python3
"""
benchmark ecef2geodetic
"""
import time
from pymap3d.ecef import ecef2geodetic
import numpy as np
import argparse
ll0 = (42.0, 82.0)
def bench(N: int) -> float:
x = np.random.random(N)
y = np.random.random(N)
z = np.random.random(N)
tic = time.monotonic()
lat, lon, alt = ecef2geodetic(x, y, z)
return time.monotonic() - tic
if __name__ == "__main__":
p = argparse.ArgumentParser()
p.add_argument("N", type=int)
p = p.parse_args()
N = p.N
print(f"ecef2geodetic: {bench(N):.3f} seconds")
|
mmdeploy/codebase/mmdet3d/models/voxelnet.py | xizi/mmdeploy | 746 | 12781126 | # Copyright (c) OpenMMLab. All rights reserved.
from mmdeploy.core import FUNCTION_REWRITER
@FUNCTION_REWRITER.register_rewriter(
'mmdet3d.models.detectors.voxelnet.VoxelNet.simple_test')
def voxelnet__simple_test(ctx,
self,
voxels,
num_points,
coors,
img_metas=None,
imgs=None,
rescale=False):
"""Test function without augmentaiton. Rewrite this func to remove model
post process.
Args:
voxels (torch.Tensor): Point features or raw points in shape (N, M, C).
num_points (torch.Tensor): Number of points in each pillar.
coors (torch.Tensor): Coordinates of each voxel.
input_metas (list[dict]): Contain pcd meta info.
Returns:
List: Result of model.
"""
x = self.extract_feat(voxels, num_points, coors, img_metas)
bbox_preds, scores, dir_scores = self.bbox_head(x)
return bbox_preds, scores, dir_scores
@FUNCTION_REWRITER.register_rewriter(
'mmdet3d.models.detectors.voxelnet.VoxelNet.extract_feat')
def voxelnet__extract_feat(ctx,
self,
voxels,
num_points,
coors,
img_metas=None):
"""Extract features from points. Rewrite this func to remove voxelize op.
Args:
voxels (torch.Tensor): Point features or raw points in shape (N, M, C).
num_points (torch.Tensor): Number of points in each pillar.
coors (torch.Tensor): Coordinates of each voxel.
input_metas (list[dict]): Contain pcd meta info.
Returns:
torch.Tensor: Features from points.
"""
voxel_features = self.voxel_encoder(voxels, num_points, coors)
batch_size = coors[-1, 0] + 1 # refactor
assert batch_size == 1
x = self.middle_encoder(voxel_features, coors, batch_size)
x = self.backbone(x)
if self.with_neck:
x = self.neck(x)
return x
|
egg/hatch.py | TheMartianObserver/nsimd | 247 | 12781151 | <filename>egg/hatch.py
# Copyright (c) 2021 Agenium Scale
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# What does this script?
# ----------------------
#
# This script generates code for each architecture, the base C/C++ APIs and
# the advanced C++ API. Each part to be generated is handled by a
# `gen_*.py` file. This script simply calls the `doit` function of each
# `gen_*.py` module. Names are self-explanatory.
#
# -----------------------------------------------------------------------------
# First thing we do is check whether python3 is used
import sys
if sys.version_info[0] < 3:
print('Only Python 3 is supported')
sys.exit(1)
# -----------------------------------------------------------------------------
# Imports
import argparse
import os
import re
import common
import gen_archis
import gen_base_apis
import gen_adv_cxx_api
import gen_adv_c_api
import gen_tests
import gen_src
import gen_doc
import gen_friendly_but_not_optimized
import gen_modules
import gen_scalar_utilities
import get_sleef_code
# Dir of this script
script_dir = os.path.dirname(__file__)
if script_dir == '':
script_dir = '.'
# -----------------------------------------------------------------------------
# Arguments parsing
def parse_args(args):
def parse_simd(value):
## Split .simd now
values = {
'x86': common.x86_simds,
'arm': common.arm_simds,
'ppc': common.ppc_simds,
'all': common.simds,
}.get(value, value.split(','))
## Check that all simd are valid
ret = []
for simd in values:
if simd not in common.simds:
raise argparse.ArgumentTypeError(
"SIMD '{}' not found in {}".format(simd, common.simds))
ret += common.simds_deps[simd]
return list(set(ret))
def parse_match(value):
if value is None:
return None
else:
return re.compile(value)
# In pratice, we either generate all or all except tests and we never
# change default directories for code generation. So we remove unused
# options and regroup some into --library.
parser = argparse.ArgumentParser(
description='This is NSIMD generation script.')
parser.add_argument('--force', '-f', action='store_true',
help='Generate all files even if they already exist')
parser.add_argument('--list-files', '-L', action='store_true',
default=False,
help='List files that will be created by hatch.py')
parser.add_argument('--all', '-A', action='store_true',
help='Generate code for the library and its tests')
parser.add_argument('--library', '-l', action='store_true',
help='Generate code of the library (C and C++ APIs)')
parser.add_argument('--sleef', '-s', action='store_true', default=False,
help='Compile Sleef')
parser.add_argument('--tests', '-t', action='store_true',
help='Generate tests in C and C++')
parser.add_argument('--doc', '-d', action='store_true',
help='Generate all documentation')
parser.add_argument('--enable-clang-format', '-F', action='store_false',
default=True,
help='Disable Clang Format (mainly for speed on Windows)')
parser.add_argument('--sve-emulate-bool', action='store_true',
default=False,
help='Use normal SVE vector to emulate predicates.')
parser.add_argument('--simd', '-D', type=parse_simd, default='all',
help='List of SIMD extensions (separated by a comma)')
parser.add_argument('--match', '-m', type=parse_match, default=None,
help='Regex used to filter generation on operator names')
parser.add_argument('--verbose', '-v', action = 'store_true', default=None,
help='Enable verbose mode')
parser.add_argument('--simple-license', action='store_true', default=False,
help='Put a simple copyright statement instead of the whole license')
opts = parser.parse_args(args)
# When -L has been chosen, we want to list all files and so we have to
# turn to True other parameters
if opts.list_files:
opts.library = True
opts.tests = True
opts.force = True
opts.doc = True
# We set variables here because all the code depends on them + we do want
# to keep the possibility to change them in the future
opts.archis = opts.library
opts.base_apis = opts.library
opts.adv_cxx_api = opts.library
opts.adv_c_api = opts.library
opts.friendly_but_not_optimized = opts.library
opts.src = opts.library
opts.scalar_utilities = opts.library
opts.sleef_version = '3.5.1'
opts.include_dir = os.path.join(script_dir, '..', 'include', 'nsimd')
opts.tests_dir = os.path.join(script_dir, '..', 'tests')
opts.src_dir = os.path.join(script_dir, '..', 'src')
return opts
# -----------------------------------------------------------------------------
# Entry point
def main():
opts = parse_args(sys.argv[1:])
opts.script_dir = script_dir
opts.modules_list = None
opts.platforms_list = None
## Gather all SIMD dependencies
opts.simd = common.get_simds_deps_from_opts(opts)
common.myprint(opts, 'List of SIMD: {}'.format(', '.join(opts.simd)))
if opts.archis == True or opts.all == True:
gen_archis.doit(opts)
if opts.base_apis == True or opts.all == True:
gen_base_apis.doit(opts)
if opts.adv_cxx_api == True or opts.all == True:
gen_adv_cxx_api.doit(opts)
if opts.adv_c_api == True or opts.all == True:
gen_adv_c_api.doit(opts)
if opts.tests == True or opts.all == True:
gen_tests.doit(opts)
if opts.src == True or opts.all == True:
gen_src.doit(opts)
if opts.sleef == True or opts.all == True:
get_sleef_code.doit(opts)
if opts.scalar_utilities == True or opts.all == True:
gen_scalar_utilities.doit(opts)
if opts.friendly_but_not_optimized == True or opts.all == True:
gen_friendly_but_not_optimized.doit(opts)
gen_modules.doit(opts) # this must be here after all NSIMD
if opts.doc == True or opts.all == True:
gen_doc.doit(opts)
if __name__ == '__main__':
main()
|
ggtnn_graph_parse.py | hexahedria/gated-graph-transformer-network | 160 | 12781160 | <filename>ggtnn_graph_parse.py<gh_stars>100-1000
import os
import sys
import re
import collections
import numpy as np
import scipy
import json
import itertools
import pickle
import gc
import gzip
import argparse
def tokenize(sent):
'''Return the tokens of a sentence including punctuation.
>>> tokenize('Bob dropped the apple. Where is the apple?')
['Bob', 'dropped', 'the', 'apple', '.', 'Where', 'is', 'the', 'apple', '?']
'''
return re.findall('(?:\w+)|\S',sent)
def list_to_map(l):
'''Convert a list of values to a map from values to indices'''
return {val:i for i,val in enumerate(l)}
def parse_stories(lines):
'''
Parse stories provided in the bAbi tasks format, with knowledge graph.
'''
data = []
story = []
for line in lines:
if line[-1] == "\n":
line = line[:-1]
nid, line = line.split(' ', 1)
nid = int(nid)
if nid == 1:
story = []
questions = []
if '\t' in line:
q, apre = line.split('\t')[:2]
a = apre.split(',')
q = tokenize(q)
substory = [x for x in story if x]
data.append((substory, q, a))
story.append('')
else:
line, graph = line.split('=', 1)
sent = tokenize(line)
graph_parsed = json.loads(graph)
story.append((sent, graph_parsed))
return data
def get_stories(taskname):
with open(taskname, 'r') as f:
lines = f.readlines()
return parse_stories(lines)
def get_max_sentence_length(stories):
return max((max((len(sentence) for (sentence, graph) in sents_graphs)) for (sents_graphs, query, answer) in stories))
def get_max_query_length(stories):
return max((len(query) for (sents_graphs, query, answer) in stories))
def get_max_num_queries(stories):
return max((len(queries) for (sents_graphs, query, answer) in stories))
def get_max_nodes_per_iter(stories):
result = 0
for (sents_graphs, query, answer) in stories:
prev_nodes = set()
for (sentence, graph) in sents_graphs:
cur_nodes = set(graph["nodes"])
new_nodes = len(cur_nodes - prev_nodes)
if new_nodes > result:
result = new_nodes
prev_nodes = cur_nodes
return result
def get_buckets(stories, max_ignore_unbatched=100, max_pad_amount=25):
sentencecounts = [len(sents_graphs) for (sents_graphs, query, answer) in stories]
countpairs = sorted(collections.Counter(sentencecounts).items())
buckets = []
smallest_left_val = 0
num_unbatched = max_ignore_unbatched
for val,ct in countpairs:
num_unbatched += ct
if val - smallest_left_val > max_pad_amount or num_unbatched > max_ignore_unbatched:
buckets.append(val)
smallest_left_val = val
num_unbatched = 0
if buckets[-1] != countpairs[-1][0]:
buckets.append(countpairs[-1][0])
return buckets
PAD_WORD = "<PAD>"
def get_wordlist(stories):
words = [PAD_WORD] + sorted(list(set((word
for (sents_graphs, query, answer) in stories
for wordbag in itertools.chain((s for s,g in sents_graphs), [query])
for word in wordbag ))))
wordmap = list_to_map(words)
return words, wordmap
def get_answer_list(stories):
words = sorted(list(set(word for (sents_graphs, query, answer) in stories for word in answer)))
wordmap = list_to_map(words)
return words, wordmap
def pad_story(story, num_sentences, sentence_length):
def pad(lst,dlen,pad):
return lst + [pad]*(dlen - len(lst))
sents_graphs, query, answer = story
padded_sents_graphs = [(pad(s,sentence_length,PAD_WORD), g) for s,g in sents_graphs]
padded_query = pad(query,sentence_length,PAD_WORD)
sentgraph_padding = (pad([],sentence_length,PAD_WORD), padded_sents_graphs[-1][1])
return (pad(padded_sents_graphs, num_sentences, sentgraph_padding), padded_query, answer)
def get_unqualified_id(s):
return s.split("#")[0]
def get_graph_lists(stories):
node_words = sorted(list(set(get_unqualified_id(node)
for (sents_graphs, query, answer) in stories
for sent,graph in sents_graphs
for node in graph["nodes"])))
nodemap = list_to_map(node_words)
edge_words = sorted(list(set(get_unqualified_id(edge["type"])
for (sents_graphs, query, answer) in stories
for sent,graph in sents_graphs
for edge in graph["edges"])))
edgemap = list_to_map(edge_words)
return node_words, nodemap, edge_words, edgemap
def convert_graph(graphs, nodemap, edgemap, new_nodes_per_iter, dynamic=True):
num_node_ids = len(nodemap)
num_edge_types = len(edgemap)
full_size = len(graphs)*new_nodes_per_iter + 1
prev_size = 1
processed_nodes = []
index_map = {}
all_num_nodes = []
all_node_ids = []
all_node_strengths = []
all_edges = []
if not dynamic:
processed_nodes = list(nodemap.keys())
index_map = nodemap.copy()
prev_size = num_node_ids
full_size = prev_size
new_nodes_per_iter = 0
for g in graphs:
active_nodes = g["nodes"]
active_edges = g["edges"]
new_nodes = [e for e in active_nodes if e not in processed_nodes]
num_new_nodes = len(new_nodes)
if not dynamic:
assert num_new_nodes == 0, "Cannot create more nodes in non-dynamic mode!\n{}".format(graphs)
new_node_strengths = np.zeros([new_nodes_per_iter], np.float32)
new_node_strengths[:num_new_nodes] = 1.0
new_node_ids = np.zeros([new_nodes_per_iter, num_node_ids], np.float32)
for i, node in enumerate(new_nodes):
new_node_ids[i,nodemap[get_unqualified_id(node)]] = 1.0
index_map[node] = prev_size + i
next_edges = np.zeros([full_size, full_size, num_edge_types])
for edge in active_edges:
next_edges[index_map[edge["from"]],
index_map[edge["to"]],
edgemap[get_unqualified_id(edge["type"])]] = 1.0
processed_nodes.extend(new_nodes)
prev_size += new_nodes_per_iter
all_num_nodes.append(num_new_nodes)
all_node_ids.append(new_node_ids)
all_edges.append(next_edges)
all_node_strengths.append(new_node_strengths)
return np.stack(all_num_nodes), np.stack(all_node_strengths), np.stack(all_node_ids), np.stack(all_edges)
def convert_story(story, wordmap, answer_map, graph_node_map, graph_edge_map, new_nodes_per_iter, dynamic=True):
"""
Converts a story in format
([(sentence, graph)], [(index, question_arr, answer)])
to a consolidated story in format
(sentence_arr, [graph_arr_dict], [(index, question_arr, answer)])
and also replaces words according to the input maps
"""
sents_graphs, query, answer = story
sentence_arr = [[wordmap[w] for w in s] for s,g in sents_graphs]
graphs = convert_graph([g for s,g in sents_graphs], graph_node_map, graph_edge_map, new_nodes_per_iter, dynamic)
query_arr = [wordmap[w] for w in query]
answer_arr = [answer_map[w] for w in answer]
return (sentence_arr, graphs, query_arr, answer_arr)
def process_story(s,bucket_len):
return convert_story(pad_story(s, bucket_len, sentence_length), wordmap, answer_map, graph_node_map, graph_edge_map, new_nodes_per_iter, dynamic)
def bucket_stories(stories, buckets, wordmap, answer_map, graph_node_map, graph_edge_map, sentence_length, new_nodes_per_iter, dynamic=True):
return [ [process_story(story,bmax) for story in stories if bstart < len(story[0]) <= bmax]
for bstart, bmax in zip([0]+buckets,buckets)]
def prepare_stories(stories, dynamic=True):
sentence_length = max(get_max_sentence_length(stories), get_max_query_length(stories))
buckets = get_buckets(stories)
wordlist, wordmap = get_wordlist(stories)
anslist, ansmap = get_answer_list(stories)
new_nodes_per_iter = get_max_nodes_per_iter(stories)
graph_node_list, graph_node_map, graph_edge_list, graph_edge_map = get_graph_lists(stories)
bucketed = bucket_stories(stories, buckets, wordmap, ansmap, graph_node_map, graph_edge_map, sentence_length, new_nodes_per_iter, dynamic)
return sentence_length, new_nodes_per_iter, buckets, wordlist, anslist, graph_node_list, graph_edge_list, bucketed
def print_batch(story, wordlist, anslist, file=sys.stdout):
sents, query, answer = story
for batch,(s,q,a) in enumerate(zip(sents,query,answer)):
file.write("Story {}\n".format(batch))
for sent in s:
file.write(" ".join([wordlist[word] for word in sent]) + "\n")
file.write(" ".join(wordlist[word] for word in q) + "\n")
file.write(" ".join(anslist[word] for word in a.nonzero()[1]) + "\n")
MetadataList = collections.namedtuple("MetadataList", ["sentence_length", "new_nodes_per_iter", "buckets", "wordlist", "anslist", "graph_node_list", "graph_edge_list"])
PreppedStory = collections.namedtuple("PreppedStory", ["converted", "sentences", "query", "answer"])
def generate_metadata(stories, dynamic=True):
sentence_length = max(get_max_sentence_length(stories), get_max_query_length(stories))
buckets = get_buckets(stories)
wordlist, wordmap = get_wordlist(stories)
anslist, ansmap = get_answer_list(stories)
new_nodes_per_iter = get_max_nodes_per_iter(stories)
graph_node_list, graph_node_map, graph_edge_list, graph_edge_map = get_graph_lists(stories)
metadata = MetadataList(sentence_length, new_nodes_per_iter, buckets, wordlist, anslist, graph_node_list, graph_edge_list)
return metadata
def preprocess_stories(stories, savedir, dynamic=True, metadata_file=None):
if metadata_file is None:
metadata = generate_metadata(stories, dynamic)
else:
with open(metadata_file,'rb') as f:
metadata = pickle.load(f)
buckets = get_buckets(stories)
sentence_length, new_nodes_per_iter, old_buckets, wordlist, anslist, graph_node_list, graph_edge_list = metadata
metadata = metadata._replace(buckets=buckets)
if not os.path.exists(savedir):
os.makedirs(savedir)
with open(os.path.join(savedir,'metadata.p'),'wb') as f:
pickle.dump(metadata, f)
bucketed_files = [[] for _ in buckets]
for i,story in enumerate(stories):
bucket_idx, cur_bucket = next(((i,bmax) for (i,(bstart, bmax)) in enumerate(zip([0]+buckets,buckets))
if bstart < len(story[0]) <= bmax), (None,None))
assert cur_bucket is not None, "Couldn't put story of length {} into buckets {}".format(len(story[0]), buckets)
bucket_dir = os.path.join(savedir, "bucket_{}".format(cur_bucket))
if not os.path.exists(bucket_dir):
os.makedirs(bucket_dir)
story_fn = os.path.join(bucket_dir, "story_{}.pz".format(i))
sents_graphs, query, answer = story
sents = [s for s,g in sents_graphs]
cvtd = convert_story(pad_story(story, cur_bucket, sentence_length), list_to_map(wordlist), list_to_map(anslist), list_to_map(graph_node_list), list_to_map(graph_edge_list), new_nodes_per_iter, dynamic)
prepped = PreppedStory(cvtd, sents, query, answer)
with gzip.open(story_fn, 'wb') as zf:
pickle.dump(prepped, zf)
bucketed_files[bucket_idx].append(os.path.relpath(story_fn, savedir))
gc.collect() # we don't want to use too much memory, so try to clean it up
with open(os.path.join(savedir,'file_list.p'),'wb') as f:
pickle.dump(bucketed_files, f)
def main(file, dynamic, metadata_file=None):
stories = get_stories(file)
dirname, ext = os.path.splitext(file)
preprocess_stories(stories, dirname, dynamic, metadata_file)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Parse a graph file')
parser.add_argument("file", help="Graph file to parse")
parser.add_argument("--static", dest="dynamic", action="store_false", help="Don't use dynamic nodes")
parser.add_argument("--metadata-file", default=None, help="Use this particular metadata file instead of building it from scratch")
args = vars(parser.parse_args())
main(**args)
|
src/examples/plot_costs.py | zhhengcs/sunny-side-up | 581 | 12781176 | #!/usr/bin/env python
import os
import json
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import argparse
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument("--start", default=25, type=int)
arg_parser.add_argument("cost_file", default="metrics_costs.json", nargs="?")
args = arg_parser.parse_args()
def plot_costs(json_path, path_prefix=""):
with open(json_path) as f:
json_obj = json.load(f)
#df = np.array(json_obj)
for idx, epoch in enumerate(json_obj):
print idx, ":"
costs_epoch = np.array(list(enumerate(epoch)))
plt.figure()
plt.plot(costs_epoch[args.start:,0], costs_epoch[args.start:,1])
plt.savefig(os.path.join(path_prefix, "costs_{}.png".format(idx)))
plt.close()
if __name__=="__main__":
plot_costs(args.cost_file)
|
tests/unit/test_utils.py | shkumagai/python-ndb | 137 | 12781247 | <reponame>shkumagai/python-ndb
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import threading
try:
from unittest import mock
except ImportError: # pragma: NO PY3 COVER
import mock
import pytest
from google.cloud.ndb import utils
class Test_asbool:
@staticmethod
def test_None():
assert utils.asbool(None) is False
@staticmethod
def test_bool():
assert utils.asbool(True) is True
assert utils.asbool(False) is False
@staticmethod
def test_truthy_int():
assert utils.asbool(0) is False
assert utils.asbool(1) is True
@staticmethod
def test_truthy_string():
assert utils.asbool("Y") is True
assert utils.asbool("f") is False
def test_code_info():
with pytest.raises(NotImplementedError):
utils.code_info()
def test_decorator():
with pytest.raises(NotImplementedError):
utils.decorator()
def test_frame_info():
with pytest.raises(NotImplementedError):
utils.frame_info()
def test_func_info():
with pytest.raises(NotImplementedError):
utils.func_info()
def test_gen_info():
with pytest.raises(NotImplementedError):
utils.gen_info()
def test_get_stack():
with pytest.raises(NotImplementedError):
utils.get_stack()
class Test_logging_debug:
@staticmethod
@mock.patch("google.cloud.ndb.utils.DEBUG", False)
def test_noop():
log = mock.Mock(spec=("debug",))
utils.logging_debug(log, "hello dad! {} {where}", "I'm", where="in jail")
log.debug.assert_not_called()
@staticmethod
@mock.patch("google.cloud.ndb.utils.DEBUG", True)
def test_log_it():
log = mock.Mock(spec=("debug",))
utils.logging_debug(log, "hello dad! {} {where}", "I'm", where="in jail")
log.debug.assert_called_once_with("hello dad! I'm in jail")
def test_positional():
@utils.positional(2)
def test_func(a=1, b=2, **kwargs):
return a, b
@utils.positional(1)
def test_func2(a=3, **kwargs):
return a
with pytest.raises(TypeError):
test_func(1, 2, 3)
with pytest.raises(TypeError):
test_func2(1, 2)
assert test_func(4, 5, x=0) == (4, 5)
assert test_func(6) == (6, 2)
assert test_func2(6) == 6
def test_keyword_only():
@utils.keyword_only(foo=1, bar=2, baz=3)
def test_kwonly(**kwargs):
return kwargs["foo"], kwargs["bar"], kwargs["baz"]
with pytest.raises(TypeError):
test_kwonly(faz=4)
assert test_kwonly() == (1, 2, 3)
assert test_kwonly(foo=3, bar=5, baz=7) == (3, 5, 7)
assert test_kwonly(baz=7) == (1, 2, 7)
def test_threading_local():
assert utils.threading_local is threading.local
def test_tweak_logging():
with pytest.raises(NotImplementedError):
utils.tweak_logging()
def test_wrapping():
with pytest.raises(NotImplementedError):
utils.wrapping()
|
test/test_cairopen.py | colinmford/coldtype | 142 | 12781260 | import unittest
from coldtype.pens.cairopen import CairoPen
from pathlib import Path
from coldtype.color import hsl
from coldtype.geometry import Rect
from coldtype.text.composer import StSt, Font
from coldtype.pens.datpen import DATPen, DATPens
from PIL import Image
import drawBot as db
import imagehash
import contextlib
co = Font.Cacheable("assets/ColdtypeObviously-VF.ttf")
renders = Path("test/renders/cairo")
renders.mkdir(parents=True, exist_ok=True)
def hash_img(path):
if path.exists():
return (
imagehash.colorhash(Image.open(path)),
imagehash.average_hash(Image.open(path)))
else:
return -1
@contextlib.contextmanager
def test_image(test:unittest.TestCase, path, rect=Rect(300, 300)):
img = (renders / path)
hash_before = hash_img(img)
if img.exists():
img.unlink()
yield(img, rect)
hash_after = hash_img(img)
test.assertEqual(hash_after, hash_before)
test.assertEqual(img.exists(), True)
class TestCairoPen(unittest.TestCase):
def test_cairo_pdf(self):
r = Rect(300, 300)
pdf = renders / "test_cairo.pdf"
dp = (StSt("CDEL", co, 100, wdth=0.5)
.pens()
.align(r))
CairoPen.Composite(dp, r, pdf)
self.assertEqual(len(dp), 4)
self.assertEqual(type(dp), DATPens)
def test_cairo_png(self):
with test_image(self, "test_cairo.png") as (i, r):
rr = Rect(0, 0, 100, 100)
dp = (DATPen()
.define(r=rr, c=75)
.gs("$r↗ $r↓|↘|$c $r↖|↙|$c")
.align(r)
.scale(1.2)
.rotate(180)
.f(hsl(0.5, a=0.1))
.s(hsl(0.9))
.sw(5))
CairoPen.Composite(dp, r, i)
self.assertEqual(len(dp.value), 4)
self.assertEqual(type(dp), DATPen)
if __name__ == "__main__":
unittest.main() |
start_mirt_pipeline.py | hmirin/guacamole | 141 | 12781293 | <reponame>hmirin/guacamole
#!/usr/bin/env python
"""This file will take you all the way from a CSV of student performance on
test items to trained parameters describing the difficulties of the assessment
items.
The parameters can be used to identify the different concepts in your
assessment items, and to drive your own adaptive test. The mirt_engine python
file included here can be used to run an adaptive pretest that will provide an
adaptive set of assessment items if you provide information about whether the
questions are being answered correctly or incorrectly.
Example Use:
with a file called my_data.csv call
./start_mirt_pipeline -i path/to/my_data.csv
let a1_time.json be the name of the output json file
(Congrats! Examine that for information about item difficulty!)
To run an adaptive test with your test items:
./run_adaptive_test.py -i a1_time.json
This will open an interactive session where the test will ask you questions
according to whatever will cause the model to gain the most information to
predict your abilities.
Authors: <NAME>, <NAME>, <NAME>, <NAME>
(2014)
"""
import argparse
import datetime
import multiprocessing
import os
import shutil
import sys
from mirt import mirt_train_EM, generate_predictions, score
from mirt import visualize, adaptive_pretest, generate_responses
from train_util import model_training_util
# Necessary on some systems to make sure all cores are used. If not all
# cores are being used and you'd like a speedup, pip install affinity
try:
import affinity
affinity.set_process_affinity_mask(0, 2 ** multiprocessing.cpu_count() - 1)
except NotImplementedError:
pass
except ImportError:
sys.stderr.write('If you find that not all cores are being '
'used, try installing affinity.\n')
def get_command_line_arguments(arguments=None):
"""Gets command line arguments passed in when called, or
can be called from within a program.
Parses input from the command line into options for running
the MIRT model. For more fine-grained options, look at
mirt_train_EM.py
"""
parser = argparse.ArgumentParser()
parser.add_argument("--generate", action="store_true",
help=("Generate fake training data."))
parser.add_argument("--train", action="store_true",
help=("Train a model from training data."))
parser.add_argument("--visualize", action="store_true",
help=("Visualize a trained model."))
parser.add_argument("--test", action="store_true",
help=("Take an adaptive test from a trained model."))
parser.add_argument("--score", action="store_true",
help=("Score the responses of each student."))
parser.add_argument("--report", action="store_true",
help=("Report on the parameters of each exercise."))
parser.add_argument("--roc_viz", action="store_true",
help=("Examine the roc curve for the current model"
" on the data in the data file."))
parser.add_argument("--sigmoid_viz", action="store_true",
help=("Examine the sigmoids generated for the model in"
" the model file."))
parser.add_argument(
"-d", "--data_file",
default=os.path.dirname(
os.path.abspath(__file__)) + '/sample_data/all.responses',
help=("Name of file where data of interest is located."))
parser.add_argument(
'-a', '--abilities', default=1, type=int,
help='The dimensionality/number of abilities.')
parser.add_argument(
'-s', '--num_students', default=500, type=int,
help="Number of students to generate data for. Only meaningful when "
"generating fake data - otherwise it's read from the data file.")
parser.add_argument(
'-p', '--num_problems', default=10, type=int,
help="Number of problems to generate data for. Only meaningful when "
"generating fake data - otherwise it's read from the data file.")
parser.add_argument("-t", "--time", action="store_true",
help=("Whether to include time as a parameter."
"If you do not select time, the 'time' field"
"in your data is ignored."))
parser.add_argument(
'-w', '--workers', type=int, default=1,
help=("The number of processes to use to parallelize mirt training"))
parser.add_argument(
"-n", "--num_epochs", type=int, default=20,
help=("The number of EM iterations to do during learning"))
parser.add_argument(
"-o", "--model_directory",
default=os.path.dirname(
os.path.abspath(__file__)) + '/sample_data/models/',
help=("The directory to write models and other output"))
parser.add_argument(
"-m", "--model",
default=os.path.dirname(
os.path.abspath(__file__)) + '/sample_data/models/model.json',
help=("The location of the model (to write if training, and to read if"
" visualizing or testing."))
parser.add_argument(
"-q", "--num_replicas", type=int, default=1, help=(
"The number of copies of the data to train on. If there is too "
"little training data, increase this number in order to maintain "
"multiple samples from the abilities vector for each student. A "
"sign that there is too little training data is if the update step"
" length ||dcouplings|| remains large."))
parser.add_argument(
"-i", "--items", type=int, default=5, help=(
"Number of items to use in adaptive test."))
if arguments:
arguments = parser.parse_args(arguments)
else:
arguments = parser.parse_args()
# Support file paths in the form of "~/blah", which python
# doesn't normally recognise
if arguments.data_file:
arguments.data_file = os.path.expanduser(arguments.data_file)
if arguments.model_directory:
arguments.model_directory = os.path.expanduser(
arguments.model_directory)
if arguments.model:
arguments.model = os.path.expanduser(arguments.model)
# When visualize is true, we do all visualizations
if arguments.visualize:
arguments.roc_viz = True
arguments.sigmoid_viz = True
arguments.report = True
# if we haven't been instructed to do anything, then show the help text
if not (arguments.generate or arguments.train
or arguments.visualize or arguments.test
or arguments.roc_viz or arguments.sigmoid_viz
or arguments.report or arguments.score):
print ("\nMust specify at least one task (--generate, --train,"
" --visualize, --test, --report, --roc_viz, --sigmoid_viz, "
"--score).\n")
parser.print_help()
# Save the current time for reference when looking at generated models.
DATE_FORMAT = '%Y-%m-%d-%H-%M-%S'
arguments.datetime = str(datetime.datetime.now().strftime(DATE_FORMAT))
return arguments
def save_model(arguments):
"""Look at all generated models, and save the most recent to the correct
location"""
latest_model = get_latest_parameter_file_name(arguments)
print "Saving model to %s" % arguments.model
shutil.copyfile(latest_model, arguments.model)
def get_latest_parameter_file_name(arguments):
"""Get the most recent of many parameter files in a directory.
There will be many .npz files written; we take the last one.
"""
params = gen_param_str(arguments)
path = arguments.model_directory + params + '/'
npz_files = os.listdir(path)
npz_files.sort(key=lambda fname: fname.split('_')[-1])
return path + npz_files[-1]
def main():
"""Get arguments from the command line and runs with those arguments."""
arguments = get_command_line_arguments()
run_with_arguments(arguments)
def make_necessary_directories(arguments):
"""Ensure that output directories for the data we'll be writing exist."""
roc_dir = arguments.model_directory + 'rocs/'
model_training_util.mkdir_p([roc_dir])
def gen_param_str(arguments):
"""Transform data about current run into a param string for file names."""
time_str = 'time' if arguments.time else 'no_time'
return "%s_%s_%s" % (arguments.abilities, time_str, arguments.datetime)
def generate_model_with_parameters(arguments):
"""Trains a model with the given parameters, saving results."""
param_str = gen_param_str(arguments)
out_dir_name = arguments.model_directory + param_str + '/'
model_training_util.mkdir_p(out_dir_name)
# to set more fine-grained parameters about MIRT training, look at
# the arguments at mirt/mirt_train_EM.py
mirt_train_params = [
'-a', str(arguments.abilities),
'-w', str(arguments.workers),
'-n', str(arguments.num_epochs),
'-f', arguments.model_directory + 'train.responses',
'-o', out_dir_name,
]
if arguments.time:
mirt_train_params.append('--time')
mirt_train_EM.run_programmatically(mirt_train_params)
def generate_roc_curve_from_model(arguments):
"""Read results from each model trained and generate roc curves."""
roc_dir = arguments.model_directory + 'rocs/'
roc_file = roc_dir + arguments.datetime
test_file = arguments.model_directory + 'test.responses'
return generate_predictions.load_and_simulate_assessment(
arguments.model, roc_file, test_file)
def run_with_arguments(arguments):
"""Generate data, train a model, visualize your trained data, and score
students based on a trained model.
"""
params = gen_param_str(arguments)
# Set up directories
make_necessary_directories(arguments)
if arguments.generate:
print 'Generating Responses'
generate_responses.run(arguments)
print 'Generated responses for %d students and %d problems' % (
arguments.num_students, arguments.num_problems)
if arguments.train:
# Only re-separate into test and train when resume_from_file
# is False.
# Separate provided data file into a train and test set.
model_training_util.sep_into_train_and_test(arguments)
print 'Training MIRT models'
generate_model_with_parameters(arguments)
save_model(arguments)
if arguments.roc_viz:
print 'Generating ROC for %s' % arguments.model
roc_curve = generate_roc_curve_from_model(arguments)
print 'Visualizing roc for %s' % arguments.model
visualize.show_roc({params: [r for r in roc_curve]})
if arguments.sigmoid_viz:
print 'Visualizing sigmoids for %s' % arguments.model
visualize.show_exercises(arguments.model)
if arguments.test:
print 'Starting adaptive pretest'
adaptive_pretest.main(arguments.model, arguments.items)
if arguments.report:
print "Generating problems report based on params file."
visualize.print_report(arguments.model)
if arguments.score:
print "Scoring all students based on trained test file"
score.score_students(arguments.model, arguments.data_file)
if __name__ == '__main__':
main()
|
samples/python/46.teams-auth/bots/__init__.py | Aliacf21/BotBuilder-Samples | 1,998 | 12781296 | <reponame>Aliacf21/BotBuilder-Samples<gh_stars>1000+
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
from .dialog_bot import DialogBot
from .teams_bot import TeamsBot
__all__ = ["DialogBot", "TeamsBot"]
|
tests/test_model_methods/test_load_all.py | naterenegar/ormar | 905 | 12781304 | <filename>tests/test_model_methods/test_load_all.py<gh_stars>100-1000
from typing import List
import databases
import pytest
import sqlalchemy
import ormar
from tests.settings import DATABASE_URL
database = databases.Database(DATABASE_URL, force_rollback=True)
metadata = sqlalchemy.MetaData()
class BaseMeta(ormar.ModelMeta):
database = database
metadata = metadata
class Language(ormar.Model):
class Meta(BaseMeta):
tablename = "languages"
id: int = ormar.Integer(primary_key=True)
name: str = ormar.String(max_length=100)
level: str = ormar.String(max_length=150, default="Beginner")
class CringeLevel(ormar.Model):
class Meta(BaseMeta):
tablename = "levels"
id: int = ormar.Integer(primary_key=True)
name: str = ormar.String(max_length=100)
language = ormar.ForeignKey(Language)
class NickName(ormar.Model):
class Meta(BaseMeta):
tablename = "nicks"
id: int = ormar.Integer(primary_key=True)
name: str = ormar.String(max_length=100, nullable=False, name="hq_name")
is_lame: bool = ormar.Boolean(nullable=True)
level: CringeLevel = ormar.ForeignKey(CringeLevel)
class HQ(ormar.Model):
class Meta(BaseMeta):
tablename = "hqs"
id: int = ormar.Integer(primary_key=True)
name: str = ormar.String(max_length=100, nullable=False, name="hq_name")
nicks: List[NickName] = ormar.ManyToMany(NickName)
class Company(ormar.Model):
class Meta(BaseMeta):
tablename = "companies"
id: int = ormar.Integer(primary_key=True)
name: str = ormar.String(max_length=100, nullable=False, name="company_name")
founded: int = ormar.Integer(nullable=True)
hq: HQ = ormar.ForeignKey(HQ, related_name="companies")
@pytest.fixture(autouse=True, scope="module")
def create_test_database():
engine = sqlalchemy.create_engine(DATABASE_URL)
metadata.drop_all(engine)
metadata.create_all(engine)
yield
metadata.drop_all(engine)
@pytest.mark.asyncio
async def test_load_all_fk_rel():
async with database:
async with database.transaction(force_rollback=True):
hq = await HQ.objects.create(name="Main")
company = await Company.objects.create(name="Banzai", founded=1988, hq=hq)
hq = await HQ.objects.get(name="Main")
await hq.load_all()
assert hq.companies[0] == company
assert hq.companies[0].name == "Banzai"
assert hq.companies[0].founded == 1988
hq2 = await HQ.objects.select_all().get(name="Main")
assert hq2.companies[0] == company
assert hq2.companies[0].name == "Banzai"
assert hq2.companies[0].founded == 1988
@pytest.mark.asyncio
async def test_load_all_many_to_many():
async with database:
async with database.transaction(force_rollback=True):
nick1 = await NickName.objects.create(name="BazingaO", is_lame=False)
nick2 = await NickName.objects.create(name="Bazinga20", is_lame=True)
hq = await HQ.objects.create(name="Main")
await hq.nicks.add(nick1)
await hq.nicks.add(nick2)
hq = await HQ.objects.get(name="Main")
await hq.load_all()
assert hq.nicks[0] == nick1
assert hq.nicks[0].name == "BazingaO"
assert hq.nicks[1] == nick2
assert hq.nicks[1].name == "Bazinga20"
hq2 = await HQ.objects.select_all().get(name="Main")
assert hq2.nicks[0] == nick1
assert hq2.nicks[0].name == "BazingaO"
assert hq2.nicks[1] == nick2
assert hq2.nicks[1].name == "Bazinga20"
@pytest.mark.asyncio
async def test_load_all_with_order():
async with database:
async with database.transaction(force_rollback=True):
nick1 = await NickName.objects.create(name="Barry", is_lame=False)
nick2 = await NickName.objects.create(name="Joe", is_lame=True)
hq = await HQ.objects.create(name="Main")
await hq.nicks.add(nick1)
await hq.nicks.add(nick2)
hq = await HQ.objects.get(name="Main")
await hq.load_all(order_by="-nicks__name")
assert hq.nicks[0] == nick2
assert hq.nicks[0].name == "Joe"
assert hq.nicks[1] == nick1
assert hq.nicks[1].name == "Barry"
await hq.load_all()
assert hq.nicks[0] == nick1
assert hq.nicks[1] == nick2
hq2 = (
await HQ.objects.select_all().order_by("-nicks__name").get(name="Main")
)
assert hq2.nicks[0] == nick2
assert hq2.nicks[1] == nick1
hq3 = await HQ.objects.select_all().get(name="Main")
assert hq3.nicks[0] == nick1
assert hq3.nicks[1] == nick2
@pytest.mark.asyncio
async def test_loading_reversed_relation():
async with database:
async with database.transaction(force_rollback=True):
hq = await HQ.objects.create(name="Main")
await Company.objects.create(name="Banzai", founded=1988, hq=hq)
company = await Company.objects.get(name="Banzai")
await company.load_all()
assert company.hq == hq
company2 = await Company.objects.select_all().get(name="Banzai")
assert company2.hq == hq
@pytest.mark.asyncio
async def test_loading_nested():
async with database:
async with database.transaction(force_rollback=True):
language = await Language.objects.create(name="English")
level = await CringeLevel.objects.create(name="High", language=language)
level2 = await CringeLevel.objects.create(name="Low", language=language)
nick1 = await NickName.objects.create(
name="BazingaO", is_lame=False, level=level
)
nick2 = await NickName.objects.create(
name="Bazinga20", is_lame=True, level=level2
)
hq = await HQ.objects.create(name="Main")
await hq.nicks.add(nick1)
await hq.nicks.add(nick2)
hq = await HQ.objects.get(name="Main")
await hq.load_all(follow=True)
assert hq.nicks[0] == nick1
assert hq.nicks[0].name == "BazingaO"
assert hq.nicks[0].level.name == "High"
assert hq.nicks[0].level.language.name == "English"
assert hq.nicks[1] == nick2
assert hq.nicks[1].name == "Bazinga20"
assert hq.nicks[1].level.name == "Low"
assert hq.nicks[1].level.language.name == "English"
hq2 = await HQ.objects.select_all(follow=True).get(name="Main")
assert hq2.nicks[0] == nick1
assert hq2.nicks[0].name == "BazingaO"
assert hq2.nicks[0].level.name == "High"
assert hq2.nicks[0].level.language.name == "English"
assert hq2.nicks[1] == nick2
assert hq2.nicks[1].name == "Bazinga20"
assert hq2.nicks[1].level.name == "Low"
assert hq2.nicks[1].level.language.name == "English"
hq5 = await HQ.objects.select_all().get(name="Main")
assert len(hq5.nicks) == 2
await hq5.nicks.select_all(follow=True).all()
assert hq5.nicks[0] == nick1
assert hq5.nicks[0].name == "BazingaO"
assert hq5.nicks[0].level.name == "High"
assert hq5.nicks[0].level.language.name == "English"
assert hq5.nicks[1] == nick2
assert hq5.nicks[1].name == "Bazinga20"
assert hq5.nicks[1].level.name == "Low"
assert hq5.nicks[1].level.language.name == "English"
await hq.load_all(follow=True, exclude="nicks__level__language")
assert len(hq.nicks) == 2
assert hq.nicks[0].level.language is None
assert hq.nicks[1].level.language is None
hq3 = (
await HQ.objects.select_all(follow=True)
.exclude_fields("nicks__level__language")
.get(name="Main")
)
assert len(hq3.nicks) == 2
assert hq3.nicks[0].level.language is None
assert hq3.nicks[1].level.language is None
await hq.load_all(follow=True, exclude="nicks__level__language__level")
assert len(hq.nicks) == 2
assert hq.nicks[0].level.language is not None
assert hq.nicks[0].level.language.level is None
assert hq.nicks[1].level.language is not None
assert hq.nicks[1].level.language.level is None
await hq.load_all(follow=True, exclude="nicks__level")
assert len(hq.nicks) == 2
assert hq.nicks[0].level is None
assert hq.nicks[1].level is None
await hq.load_all(follow=True, exclude="nicks")
assert len(hq.nicks) == 0
|
features/steps/log-scale-axes.py | eaton-lab/toyplot | 438 | 12781313 | <reponame>eaton-lab/toyplot<filename>features/steps/log-scale-axes.py<gh_stars>100-1000
# Copyright 2014, Sandia Corporation. Under the terms of Contract
# DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains certain
# rights in this software.
from behave import *
import nose
import numpy
import toyplot.data
@given(u'values from -1000 to -1')
def step_impl(context):
context.x = numpy.linspace(-1000, -1, 100)
@given(u'values from -1000 to -0.01')
def step_impl(context):
context.x = numpy.linspace(-1000, -0.01, 100)
@given(u'values from -1000 to 0')
def step_impl(context):
context.x = numpy.linspace(-1000, 0, 100)
@given(u'values from -1000 to 0.5')
def step_impl(context):
context.x = numpy.linspace(-1000, 0.5, 100)
@given(u'values from -0.5 to 1000')
def step_impl(context):
context.x = numpy.linspace(-0.5, 1000, 100)
@given(u'values from 0 to 1000')
def step_impl(context):
context.x = numpy.linspace(0, 1000, 100)
@given(u'values from 0.01 to 1000')
def step_impl(context):
context.x = numpy.linspace(0.01, 1000, 100)
@given(u'values from 1 to 1000')
def step_impl(context):
context.x = numpy.linspace(1, 1000, 100)
@given(u'values from -1000 to 1000')
def step_impl(context):
context.x = numpy.linspace(-1000, 1000, 100)
@given(u'log 10 axes on x and y')
def step_impl(context):
context.axes = context.canvas.cartesian(xscale="log10", yscale="log10")
@given(u'log 2 axes on x and y')
def step_impl(context):
context.axes = context.canvas.cartesian(xscale="log2", yscale="log2")
@given(u'log 10 axes on x and y with custom format')
def step_impl(context):
context.axes = context.canvas.cartesian(xscale="log10", yscale="log10")
context.axes.x.ticks.locator = toyplot.locator.Log(base=10, format="{base}^{exponent}")
context.axes.y.ticks.locator = toyplot.locator.Log(base=10, format="{base}^{exponent}")
@when(u'plotting x, x with markers')
def step_impl(context):
context.axes.plot(context.x, context.x, marker="o")
@given(u'squared values from 0 to 10')
def step_impl(context):
context.values = numpy.linspace(0, 10) ** 2
@given(u'squared values from -10 to 0')
def step_impl(context):
context.values = -(numpy.linspace(10, 0) ** 2)
@given(u'log 10 axes on y with domain min 10')
def step_impl(context):
context.axes = context.canvas.cartesian(yscale="log10")
context.axes.y.domain.min = 10
@given(u'log 10 axes on y with domain max -10')
def step_impl(context):
context.axes = context.canvas.cartesian(yscale="log10")
context.axes.y.domain.max = -10
@when(u'plotting the values with bars')
def step_impl(context):
context.axes.bars(context.values)
|
PyLeap_CPB_EyeLights_LED_Glasses_Sparkle/code.py | gamblor21/Adafruit_Learning_System_Guides | 665 | 12781322 | # SPDX-FileCopyrightText: 2021 <NAME>
# SPDX-License-Identifier: MIT
import board
from adafruit_led_animation.animation.sparkle import Sparkle
from adafruit_led_animation.color import PURPLE
from adafruit_led_animation.sequence import AnimationSequence
from adafruit_is31fl3741.adafruit_ledglasses import MUST_BUFFER, LED_Glasses
from adafruit_is31fl3741.led_glasses_animation import LED_Glasses_Animation
glasses = LED_Glasses(board.I2C(), allocate=MUST_BUFFER)
glasses.set_led_scaling(255)
glasses.global_current = 0xFE
glasses.enable = True
pixels = LED_Glasses_Animation(glasses)
anim2 = Sparkle(pixels, 0.05, PURPLE)
group = AnimationSequence(
anim2, advance_interval=5, auto_reset=True, auto_clear=True
)
while True:
group.animate()
|
plum/util.py | ruancomelli/plum | 153 | 12781336 | <filename>plum/util.py
import abc
import logging
__all__ = ["multihash", "Comparable", "is_in_class", "get_class", "get_context"]
log = logging.getLogger(__name__)
def multihash(*args):
"""Multi-argument order-sensitive hash.
Args:
*args: Objects to hash.
Returns:
int: Hash.
"""
return hash(args)
class Comparable:
"""A mixin that makes instances of the class comparable.
Requires the subclass to just implement `__le__`.
"""
__metaclass__ = abc.ABCMeta
def __eq__(self, other):
return self <= other <= self
def __ne__(self, other):
return not self == other
@abc.abstractmethod
def __le__(self, other):
pass # pragma: no cover
def __lt__(self, other):
return self <= other and self != other
def __ge__(self, other):
return other.__le__(self)
def __gt__(self, other):
return self >= other and self != other
def is_comparable(self, other):
"""Check whether this object is comparable with another one.
Args:
other (:class:`.util.Comparable`): Object to check comparability
with.
Returns:
bool: `True` if the object is comparable with `other` and `False`
otherwise.
"""
return self < other or self == other or self > other
def is_in_class(f):
"""Check if a function is part of a class.
Args:
f (function): Function to check.
Returns:
bool: `True` if `f` is part of a class, else `False`.
"""
parts = f.__qualname__.split(".")
return len(parts) >= 2 and parts[-2] != "<locals>"
def _split_parts(f):
qualified_name = f.__module__ + "." + f.__qualname__
return qualified_name.split(".")
def get_class(f):
"""Assuming that `f` is part of a class, get the fully qualified name of the
class.
Args:
f (function): Method to get class name for.
Returns:
str: Fully qualified name of class.
"""
parts = _split_parts(f)
return ".".join(parts[:-1])
def get_context(f):
"""Get the fully qualified name of the context for `f`.
If `f` is part of a class, then the context corresponds to the scope of the class.
If `f` is not part of a class, then the context corresponds to the scope of the
function.
Args:
f (function): Method to get context for.
Returns:
str: Context.
"""
parts = _split_parts(f)
if is_in_class(f):
# Split off function name and class.
return ".".join(parts[:-2])
else:
# Split off function name only.
return ".".join(parts[:-1])
|
tools/spaln/list_spaln_tables.py | ic4f/tools-iuc | 142 | 12781352 | #!/usr/bin/env python3
import argparse
import shlex
import sys
from subprocess import run
from typing import TextIO
def find_common_ancestor_distance(
taxon: str, other_taxon: str, taxonomy_db_path: str, only_canonical: bool
):
canonical = "--only_canonical" if only_canonical else ""
cmd_str = f"taxonomy_util -d {taxonomy_db_path} common_ancestor_distance {canonical} '{other_taxon}' '{taxon}'"
cmd = shlex.split(cmd_str)
proc = run(cmd, encoding="utf8", capture_output=True)
return proc
def find_distances(gnm2tab_file: TextIO, taxon: str, taxonomy_db_path: str):
cmd = ["taxonomy_util", "-d", taxonomy_db_path, "get_id", taxon]
proc = run(cmd, capture_output=True, encoding="utf8")
if "not found in" in proc.stderr:
exit("Error: " + proc.stderr.strip())
for line in gnm2tab_file:
fields = line.split("\t")
(species_code, settings, other_taxon) = map(lambda el: el.strip(), fields[:3])
proc = find_common_ancestor_distance(taxon, other_taxon, taxonomy_db_path, True)
ancestor_info = proc.stdout.rstrip()
if proc.stderr != "":
print("Warning:", other_taxon, proc.stderr.rstrip(), file=sys.stderr)
else:
proc = find_common_ancestor_distance(
taxon, other_taxon, taxonomy_db_path, False
)
non_canonical_distance = proc.stdout.split("\t")[0]
print(
non_canonical_distance,
ancestor_info,
species_code,
settings,
other_taxon,
sep="\t",
)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Find distance to common ancestor")
parser.add_argument(
"--taxonomy_db", required=True, help="NCBI Taxonomy database (SQLite format)"
)
parser.add_argument(
"--gnm2tab_file",
required=True,
type=argparse.FileType(),
help="gnm2tab file from spal",
)
parser.add_argument("taxon")
args = parser.parse_args()
find_distances(args.gnm2tab_file, args.taxon, args.taxonomy_db)
|
OmniMarkupLib/Renderers/MediaWikiRenderer.py | henumohe/OmniMarkupPreviewer | 476 | 12781363 | <filename>OmniMarkupLib/Renderers/MediaWikiRenderer.py<gh_stars>100-1000
from .base_renderer import *
import os.path
__file__ = os.path.normpath(os.path.abspath(__file__))
__path__ = os.path.dirname(__file__)
@renderer
class MediaWikiRenderer(CommandlineRenderer):
def __init__(self):
super(MediaWikiRenderer, self).__init__(
executable='ruby',
args=['-rubygems', os.path.join(__path__, 'bin/mw2html.rb')])
@classmethod
def is_enabled(cls, filename, syntax):
if syntax == 'text.html.mediawiki':
return True
return filename.endswith('.mediawiki') or filename.endswith('.wiki')
|
Python/Battery_Full_Charged_Notifier/battery_full_charged_notifier.pyw | iamakkkhil/Rotten-Scripts | 1,127 | 12781364 | import psutil #Library to get System details
import time
import pyttsx3 # Library for text to speech Offline
from win10toast import ToastNotifier # also need to install win32api (This is for Notifications)
import threading # To make notification and speech work at same time
toaster = ToastNotifier()
x=pyttsx3.init()
x.setProperty('rate',130)
x.setProperty('volume',8)
count = 0
def show_notification(show_text):
toaster.show_toast(show_text,
icon_path='battery.ico',
duration=10)
# loop the toaster over some period of time
while toaster.notification_active():
time.sleep(0.1)
def monitor():
while (True):
time.sleep(10)
battery = psutil.sensors_battery()
plugged = battery.power_plugged
percent = int(battery.percent)
if percent == 100:
if plugged == True:
processThread = threading.Thread(target=show_notification, args=("Laptop Fully Charged",)) # <- note extra ','
processThread.start()
x.say("Laptop is Fully Charged Please plug out the cable")
x.runAndWait()
elif percent == 90:
if plugged == True:
if count == 0:
processThread = threading.Thread(target=show_notification, args=("Your Battery at 90% Please plug out the cable",)) # <- note extra ','
processThread.start()
x.say("Your battery at 90% ")
x.runAndWait()
count = count + 1
if __name__ == "__main__":
monitor() |
A1014280203/7/7.py | saurabh896/python-1 | 3,976 | 12781369 | import os
code_lines = list()
notation_lines = list()
blank_lines = list()
def process_file(filename):
global code_lines
global notation_lines
global blank_lines
with open(filename, 'r') as file:
for line in file.readlines():
_line = line.strip()
if not _line:
blank_lines.append(_line)
elif _line.startswith('#'):
notation_lines.append(_line)
else:
code_lines.append(_line)
def show_result():
global code_lines
global notation_lines
global blank_lines
print('-'*20)
print('code:', len(code_lines))
for line in code_lines:
print(line)
print('-' * 20)
print('notation:', len(notation_lines))
for line in notation_lines:
print(line)
print('-' * 20)
print('blank:', len(blank_lines))
code_lines.clear()
notation_lines.clear()
blank_lines.clear()
def process_files(path='../6'):
files = os.listdir(path)
for file in files:
if file.endswith('.py'):
print('='*30)
print('current file:', os.path.join(path, file))
process_file(os.path.join(path, file))
show_result()
process_files() |
users/models.py | hrbhat/twissandra | 308 | 12781383 | # Nope, we're using Cassandra :) |
ci/release.py | steve-louis/mist-ce | 778 | 12781387 | <gh_stars>100-1000
#!/usr/bin/env python
import os
import sys
import hashlib
import argparse
import magic
import requests
def main():
args = parse_args()
request = Client(args.owner, args.repo, args.token)
update_release(
request, args.tag, msg=args.msg, files=args.files,
prerelease=args.prerelease, draft=args.draft,
remove_undefined_files=args.remove_undefined_files,
)
def parse_args():
argparser = argparse.ArgumentParser(
description=("Create/Update Github release based on git tag. When "
"creating a release that doesn't exist, it'll be marked "
"as stable (not prerelease) and public (not draft), "
"unless otherwise specified. When updating a release, "
"only fields specified by corresponding options will be "
"modified."))
argparser.add_argument('owner', help="The github repo's owner")
argparser.add_argument('repo', help="The github repo's name")
argparser.add_argument('tag', help="Tag name for which to make release")
argparser.add_argument(
'-m', '--msg', default=None,
help=("Message for the release. Either the message as a string, or "
"the filename of a text file preceded by '@'. Use an empty "
"string '' to set an empty message."))
argparser.add_argument(
'-f', '--files', nargs='+', metavar='FILE',
help="Files to upload as release assets.")
argparser.add_argument(
'--remove-undefined-files', action='store_true',
help=("If specified, remove any preexisting files from the release "
"that aren't currently specified with the `--files` option."))
argparser.add_argument(
'--prerelease', dest='prerelease', default=None, action='store_true',
help="Mark release as prerelease.")
argparser.add_argument(
'--no-prerelease', dest='prerelease', default=None,
action='store_false',
help="Mark release as regular release, no prerelease.")
argparser.add_argument(
'--draft', dest='draft', default=None, action='store_true',
help="Mark release as draft.")
argparser.add_argument(
'--no-draft', dest='draft', default=None, action='store_false',
help="Publish release, unmark as draft.")
argparser.add_argument(
'--token', default=os.getenv('GITHUB_API_TOKEN'),
help=("Github API token to use. Can also be specified as env var "
"GITHUB_API_TOKEN."))
args = argparser.parse_args()
if args.msg and args.msg.startswith('@'):
with open(args.msg[1:], 'r') as fobj:
args.msg = fobj.read()
return args
class Client(object):
def __init__(self, owner, repo, token):
self.owner = owner
self.repo = repo
self.token = token
def __call__(self, url, method='GET', parse_json_resp=True,
api='https://api.github.com', **kwargs):
url = '%s/repos/%s/%s%s' % (api, self.owner, self.repo, url)
headers = kwargs.pop('headers', {})
headers.update({'Authorization': 'token %s' % self.token})
print("Will make %s request to %s: %s" % (method, url, kwargs))
resp = requests.request(method, url, headers=headers, **kwargs)
if not resp.ok:
print(resp.status_code)
print(resp.text)
raise Exception(resp.status_code)
if parse_json_resp:
try:
return resp.json()
except Exception:
print("Error decoding json response")
print(resp.text)
raise
else:
return resp
def print_release(release):
print('-' * 60)
for name, key in [('id', 'id'), ('name', 'name'),
('tag', 'tag_name'), ('ref', 'target_commitish'),
('draft', 'draft'), ('prerelease', 'prerelease')]:
print('%s: %s' % (name, release[key]))
print('assets:')
for asset in release['assets']:
print(' - %s' % asset['name'])
if release['body']:
print('msg: |')
for line in release['body'].splitlines():
print(' %s' % line)
print('-' * 60)
def update_release(request, tag, msg=None, files=None,
draft=None, prerelease=None,
remove_undefined_files=False):
# Check that the repo exists.
resp = request('')
# Find git tag corresponding to release.
resp = request('/tags')
for item in resp:
if item['name'] == tag:
sha = item['commit']['sha']
print("Tag %s points to %s" % (tag, sha))
break
else:
print("Tag %s doesn't exist" % tag)
sys.exit(1)
# Create or update github release.
data = {
'tag_name': tag,
'target_commitish': sha,
'name': tag,
'body': msg,
'draft': draft,
'prerelease': prerelease,
}
for key, val in list(data.items()):
if val is None:
data.pop(key)
for release in request('/releases'):
if release['tag_name'] == tag:
print("Found preexisting release.")
print_release(release)
for key in list(data.keys()):
if data[key] == release[key]:
data.pop(key)
if data:
print("Release already exists, updating.")
release = request('/releases/%s' % release['id'], 'PATCH',
json=data)
print_release(release)
else:
print("No need to modify release's metadata.")
break
else:
print("Creating a new release.")
release = request('/releases', 'POST', json=data)
print_release(release)
# Add or update assets.
assets = list(release['assets'])
for path in files or []:
name = os.path.basename(path)
uploaded = False
for i, asset in enumerate(list(assets)):
if asset['name'] != name:
continue
assets.pop(i)
print("Found already uploaded file '%s'" % path)
md5 = hashlib.md5()
resp = request('/releases/assets/%s' % asset['id'],
headers={'Accept': 'application/octet-stream'},
parse_json_resp=False, stream=True)
for chunk in resp.iter_content(chunk_size=1024):
if chunk:
md5.update(chunk)
md5sum_remote = md5.hexdigest()
md5 = hashlib.md5()
with open(path, 'rb') as fobj:
while True:
chunk = fobj.read(1024)
if not chunk:
break
md5.update(chunk)
md5sum_local = md5.hexdigest()
if md5sum_local == md5sum_remote:
print("Preexisting file matches local file")
uploaded = True
break
print("Deleting preexisting different asset.")
request('/releases/assets/%s' % asset['id'], 'DELETE',
parse_json_resp=False)
if not uploaded:
with open(path, 'rb') as fobj:
ctype = magic.Magic(mime=True).from_file(path)
request('/releases/%s/assets' % release['id'], 'POST',
api='https://uploads.github.com',
headers={'Content-Type': ctype},
params={'name': name}, data=fobj)
if remove_undefined_files:
for asset in assets:
print("Deleting preexisting undefined asset %s." % asset['name'])
request('/releases/assets/%s' % asset['id'], 'DELETE',
parse_json_resp=False)
if __name__ == "__main__":
main()
|
build/lib/jet_django/__init__.py | lukejamison/jet-dasboard | 193 | 12781388 | VERSION = '0.0.1'
default_app_config = 'jet_django.apps.JetDjangoConfig'
|
tests/suite/test_healthcheck_uri.py | snebel29/kubernetes-ingress | 3,803 | 12781401 | <reponame>snebel29/kubernetes-ingress
import pytest
import requests
from suite.resources_utils import ensure_connection
@pytest.mark.ingresses
@pytest.mark.parametrize('ingress_controller, expected_responses',
[
pytest.param({"extra_args": ["-health-status=true",
"-health-status-uri=/something-va(l)id/blabla"]},
{"/something-va(l)id/blabla": 200, "/nginx-health": 404},
id="custom-health-status-uri"),
pytest.param({"extra_args": ["-health-status=true"]},
{"/something-va(l)id/blabla": 404, "/nginx-health": 200},
id="default-health-status-uri"),
pytest.param({"extra_args": ["-health-status=false"]},
{"/something-va(l)id/blabla": 404, "/nginx-health": 404},
id="disable-health-status")
],
indirect=["ingress_controller"])
class TestHealthStatusURI:
def test_response_code(self, ingress_controller_endpoint, ingress_controller, expected_responses):
for uri in expected_responses:
req_url = f"http://{ingress_controller_endpoint.public_ip}:{ingress_controller_endpoint.port}{uri}"
ensure_connection(req_url, expected_responses[uri])
resp = requests.get(req_url)
assert resp.status_code == expected_responses[uri],\
f"Expected {expected_responses[uri]} code for {uri} but got {resp.status_code}"
|
kino-webhook/handler.py | DongjunLee/kino-bot | 109 | 12781404 | import arrow
import json
import requests
def kanban_webhook(event, context):
input_body = json.loads(event['body'])
print(event['body'])
action = input_body["action"]
action_type = action["type"]
if action_type == "createCard":
list_name, card_name = get_create_card(action["data"])
elif action_type == "updateCard":
list_name, card_name = get_update_card(action["data"])
kanban_list = ["DOING", "BREAK", "DONE"]
if list_name in kanban_list:
payload = make_payload(action=list_name, msg=card_name)
r = send_to_kino({"text": payload})
response = {
"statusCode": r.status_code
}
response = {
"statusCode": 400
}
return response
def get_create_card(action_data):
list_name = action_data["list"]["name"].upper()
card_name = action_data["card"]["name"]
return list_name, card_name
def get_update_card(action_data):
list_name = action_data["listAfter"]["name"].upper()
card_name = action_data["card"]["name"]
return list_name, card_name
def make_payload(action=None, msg=None, time=None):
if time is None:
now = arrow.now()
time = now.format(" MMMM d, YYYY") + " at " + now.format("HH:mmA")
payload = {
"action": "KANBAN_" + action,
"msg": msg,
"time": time
}
return json.dumps(payload)
def send_to_kino(data):
return requests.post("https://hooks.slack.com/services/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", data=json.dumps(data))
|
tests/unittests/load_functions/outside_main_code_in_main/main.py | anandagopal6/azure-functions-python-worker | 277 | 12781426 | <reponame>anandagopal6/azure-functions-python-worker
# This function app is to ensure the code outside main() function
# should only get loaded once in __init__.py
from .count import invoke, get_invoke_count, reset_count
invoke()
def main(req):
count = get_invoke_count()
reset_count()
return f'executed count = {count}'
|
layers.py | gemilepus/ShadeSketch | 313 | 12781430 | """
ShadeSketch
https://github.com/qyzdao/ShadeSketch
Learning to Shadow Hand-drawn Sketches
<NAME>, <NAME>, <NAME>
Copyright (C) 2020 The respective authors and Project HAT. All rights reserved.
Licensed under MIT license.
"""
import tensorflow as tf
# import keras
keras = tf.keras
K = keras.backend
Layer = keras.layers.Layer
Conv2D = keras.layers.Conv2D
InputSpec = keras.layers.InputSpec
image_data_format = K.image_data_format
activations = keras.activations
initializers = keras.initializers
regularizers = keras.regularizers
constraints = keras.constraints
class Composite(Layer):
def __init__(self,
data_format='channels_last',
**kwargs):
self.data_format = data_format
super(Composite, self).__init__(**kwargs)
def call(self, inputs):
line_inputs, shade_inputs = inputs
return line_inputs + (shade_inputs + 1) * 0.25
def compute_output_shape(self, input_shape):
return input_shape[0]
class PixelwiseConcat(Layer):
def __init__(self,
data_format='channels_last',
**kwargs):
self.data_format = data_format
super(PixelwiseConcat, self).__init__(**kwargs)
def call(self, inputs):
pixel_inputs, unit_inputs = inputs
if self.data_format == 'channels_first':
repeated_unit_inputs = tf.tile(
K.expand_dims(K.expand_dims(unit_inputs, 2), 2),
[1, K.shape(pixel_inputs)[2], K.shape(pixel_inputs)[3], 1]
)
elif self.data_format == 'channels_last':
repeated_unit_inputs = tf.tile(
K.expand_dims(K.expand_dims(unit_inputs, 1), 1),
[1, K.shape(pixel_inputs)[1], K.shape(pixel_inputs)[2], 1]
)
return K.concatenate([pixel_inputs, repeated_unit_inputs])
def compute_output_shape(self, input_shape):
if self.data_format == 'channels_first':
return (input_shape[0][0], input_shape[0][1] + input_shape[1][1], input_shape[0][2], input_shape[0][3])
elif self.data_format == 'channels_last':
return (input_shape[0][0], input_shape[0][1], input_shape[0][2], input_shape[0][3] + input_shape[1][1])
class SubPixelConv2D(Conv2D):
def __init__(self,
filters,
kernel_size,
r,
padding='same',
data_format=None,
strides=(1, 1),
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
super(SubPixelConv2D, self).__init__(
filters=r * r * filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
**kwargs)
self.r = r
if hasattr(tf.nn, 'depth_to_space'):
self.depth_to_space = tf.nn.depth_to_space
else:
self.depth_to_space = tf.depth_to_space
def phase_shift(self, I):
if self.data_format == 'channels_first':
return self.depth_to_space(I, self.r, data_format="NCHW")
elif self.data_format == 'channels_last':
return self.depth_to_space(I, self.r, data_format="NHWC")
def call(self, inputs):
return self.phase_shift(super(SubPixelConv2D, self).call(inputs))
def compute_output_shape(self, input_shape):
if self.data_format == 'channels_first':
n, c, h, w = super(SubPixelConv2D, self).compute_output_shape(input_shape)
elif self.data_format == 'channels_last':
n, h, w, c = super(SubPixelConv2D, self).compute_output_shape(input_shape)
if h is not None:
h = int(self.r * h)
if w is not None:
w = int(self.r * w)
c = int(c / (self.r * self.r))
if self.data_format == 'channels_first':
return (n, c, h, w)
elif self.data_format == 'channels_last':
return (n, h, w, c)
def get_config(self):
config = super(Conv2D, self).get_config()
config.pop('rank')
config.pop('dilation_rate')
config['filters'] /= self.r * self.r
config['r'] = self.r
return config
class SelfAttention(Layer):
def __init__(self,
data_format='channels_last',
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
super(SelfAttention, self).__init__(**kwargs)
self.data_format = data_format
self.activation = activations.get(activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.bias_constraint = constraints.get(bias_constraint)
def build(self, input_shape):
if self.data_format == 'channels_first':
channel_axis = 1
else:
channel_axis = -1
kernel_size = (1, 1)
self.filters = int(input_shape[channel_axis])
self.kernel_f = self.add_weight(shape=kernel_size + (self.filters, self.filters // 8),
initializer=self.kernel_initializer,
name='kernel_f',
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
self.kernel_g = self.add_weight(shape=kernel_size + (self.filters, self.filters // 8),
initializer=self.kernel_initializer,
name='kernel_g',
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
self.kernel_h = self.add_weight(shape=kernel_size + (self.filters, self.filters),
initializer=self.kernel_initializer,
name='kernel_h',
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
if self.use_bias:
self.bias_f = self.add_weight(shape=(self.filters // 8,),
initializer=self.bias_initializer,
name='bias_f',
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
self.bias_g = self.add_weight(shape=(self.filters // 8,),
initializer=self.bias_initializer,
name='bias_g',
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
self.bias_h = self.add_weight(shape=(self.filters,),
initializer=self.bias_initializer,
name='bias_h',
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
else:
self.bias_f = None
self.bias_g = None
self.bias_h = None
self.gamma = self.add_weight(
name='gamma',
shape=(1,),
initializer=initializers.Constant(0)
)
super(SelfAttention, self).build(input_shape)
def call(self, inputs):
f = K.conv2d(inputs,
self.kernel_f,
data_format=self.data_format,
strides=(1, 1),
dilation_rate=(1, 1)) # [bs, h, w, c']
g = K.conv2d(inputs,
self.kernel_g,
data_format=self.data_format,
strides=(1, 1),
dilation_rate=(1, 1)) # [bs, h, w, c']
h = K.conv2d(inputs,
self.kernel_h,
data_format=self.data_format,
strides=(1, 1),
dilation_rate=(1, 1)) # [bs, h, w, c]
if self.use_bias:
f = K.bias_add(f, self.bias_f, data_format=self.data_format) # [bs, h, w, c']
g = K.bias_add(g, self.bias_g, data_format=self.data_format) # [bs, h, w, c']
h = K.bias_add(h, self.bias_h, data_format=self.data_format) # [bs, h, w, c]
# N = h * w
s = K.dot(K.batch_flatten(g), K.transpose(K.batch_flatten(f))) # # [bs, N, N]
beta = K.softmax(s) # attention map
o = K.dot(beta, K.batch_flatten(h)) # [bs, N, C]
o = K.reshape(o, K.shape(inputs)) # [bs, h, w, C]
return self.activation(self.gamma * o + inputs)
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
config = {
'activation': activations.serialize(self.activation),
'data_format': self.data_format,
'use_bias': self.use_bias,
'kernel_initializer': initializers.serialize(self.kernel_initializer),
'bias_initializer': initializers.serialize(self.bias_initializer),
'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'activity_regularizer': regularizers.serialize(self.activity_regularizer),
'kernel_constraint': constraints.serialize(self.kernel_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint)
}
base_config = super(SelfAttention, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
"""
Implementation of Coordinate Channel
keras-coordconv
MIT License
Copyright (c) 2018 <NAME>
https://github.com/titu1994/keras-coordconv/blob/master/coord.py
"""
class _CoordinateChannel(Layer):
""" Adds Coordinate Channels to the input tensor.
# Arguments
rank: An integer, the rank of the input data-uniform,
e.g. "2" for 2D convolution.
use_radius: Boolean flag to determine whether the
radius coordinate should be added for 2D rank
inputs or not.
data_format: A string,
one of `"channels_last"` or `"channels_first"`.
The ordering of the dimensions in the inputs.
`"channels_last"` corresponds to inputs with shape
`(batch, ..., channels)` while `"channels_first"` corresponds to
inputs with shape `(batch, channels, ...)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
# Input shape
ND tensor with shape:
`(samples, channels, *)`
if `data_format` is `"channels_first"`
or ND tensor with shape:
`(samples, *, channels)`
if `data_format` is `"channels_last"`.
# Output shape
ND tensor with shape:
`(samples, channels + 2, *)`
if `data_format` is `"channels_first"`
or 5D tensor with shape:
`(samples, *, channels + 2)`
if `data_format` is `"channels_last"`.
# References:
- [An Intriguing Failing of Convolutional Neural Networks and the CoordConv Solution](https://arxiv.org/abs/1807.03247)
"""
def __init__(self, rank,
use_radius=False,
data_format='channels_last',
**kwargs):
super(_CoordinateChannel, self).__init__(**kwargs)
if data_format not in [None, 'channels_first', 'channels_last']:
raise ValueError('`data_format` must be either "channels_last", "channels_first" '
'or None.')
self.rank = rank
self.use_radius = use_radius
self.data_format = data_format
self.axis = 1 if image_data_format() == 'channels_first' else -1
self.input_spec = InputSpec(min_ndim=2)
self.supports_masking = True
def build(self, input_shape):
assert len(input_shape) >= 2
input_dim = input_shape[self.axis]
self.input_spec = InputSpec(min_ndim=self.rank + 2,
axes={self.axis: input_dim})
self.built = True
def call(self, inputs, training=None, mask=None):
input_shape = K.shape(inputs)
if self.rank == 1:
input_shape = [input_shape[i] for i in range(3)]
batch_shape, dim, channels = input_shape
xx_range = tf.tile(K.expand_dims(K.arange(0, dim), axis=0),
K.stack([batch_shape, 1]))
xx_range = K.expand_dims(xx_range, axis=-1)
xx_channels = K.cast(xx_range, K.floatx())
xx_channels = xx_channels / K.cast(dim - 1, K.floatx())
xx_channels = (xx_channels * 2) - 1.
outputs = K.concatenate([inputs, xx_channels], axis=-1)
if self.rank == 2:
if self.data_format == 'channels_first':
inputs = K.permute_dimensions(inputs, [0, 2, 3, 1])
input_shape = K.shape(inputs)
input_shape = [input_shape[i] for i in range(4)]
batch_shape, dim1, dim2, channels = input_shape
xx_ones = tf.ones(K.stack([batch_shape, dim2]), dtype='int32')
xx_ones = K.expand_dims(xx_ones, axis=-1)
xx_range = tf.tile(K.expand_dims(K.arange(0, dim1), axis=0),
K.stack([batch_shape, 1]))
xx_range = K.expand_dims(xx_range, axis=1)
xx_channels = K.batch_dot(xx_ones, xx_range, axes=[2, 1])
xx_channels = K.expand_dims(xx_channels, axis=-1)
xx_channels = K.permute_dimensions(xx_channels, [0, 2, 1, 3])
yy_ones = tf.ones(K.stack([batch_shape, dim1]), dtype='int32')
yy_ones = K.expand_dims(yy_ones, axis=1)
yy_range = tf.tile(K.expand_dims(K.arange(0, dim2), axis=0),
K.stack([batch_shape, 1]))
yy_range = K.expand_dims(yy_range, axis=-1)
yy_channels = K.batch_dot(yy_range, yy_ones, axes=[2, 1])
yy_channels = K.expand_dims(yy_channels, axis=-1)
yy_channels = K.permute_dimensions(yy_channels, [0, 2, 1, 3])
xx_channels = K.cast(xx_channels, K.floatx())
xx_channels = xx_channels / K.cast(dim1 - 1, K.floatx())
xx_channels = (xx_channels * 2) - 1.
yy_channels = K.cast(yy_channels, K.floatx())
yy_channels = yy_channels / K.cast(dim2 - 1, K.floatx())
yy_channels = (yy_channels * 2) - 1.
outputs = K.concatenate([inputs, xx_channels, yy_channels], axis=-1)
if self.use_radius:
rr = K.sqrt(K.square(xx_channels - 0.5) +
K.square(yy_channels - 0.5))
outputs = K.concatenate([outputs, rr], axis=-1)
if self.data_format == 'channels_first':
outputs = K.permute_dimensions(outputs, [0, 3, 1, 2])
if self.rank == 3:
if self.data_format == 'channels_first':
inputs = K.permute_dimensions(inputs, [0, 2, 3, 4, 1])
input_shape = K.shape(inputs)
input_shape = [input_shape[i] for i in range(5)]
batch_shape, dim1, dim2, dim3, channels = input_shape
xx_ones = tf.ones(K.stack([batch_shape, dim3]), dtype='int32')
xx_ones = K.expand_dims(xx_ones, axis=-1)
xx_range = tf.tile(K.expand_dims(K.arange(0, dim2), axis=0),
K.stack([batch_shape, 1]))
xx_range = K.expand_dims(xx_range, axis=1)
xx_channels = K.batch_dot(xx_ones, xx_range, axes=[2, 1])
xx_channels = K.expand_dims(xx_channels, axis=-1)
xx_channels = K.permute_dimensions(xx_channels, [0, 2, 1, 3])
xx_channels = K.expand_dims(xx_channels, axis=1)
xx_channels = tf.tile(xx_channels,
[1, dim1, 1, 1, 1])
yy_ones = tf.ones(K.stack([batch_shape, dim2]), dtype='int32')
yy_ones = K.expand_dims(yy_ones, axis=1)
yy_range = tf.tile(K.expand_dims(K.arange(0, dim3), axis=0),
K.stack([batch_shape, 1]))
yy_range = K.expand_dims(yy_range, axis=-1)
yy_channels = K.batch_dot(yy_range, yy_ones, axes=[2, 1])
yy_channels = K.expand_dims(yy_channels, axis=-1)
yy_channels = K.permute_dimensions(yy_channels, [0, 2, 1, 3])
yy_channels = K.expand_dims(yy_channels, axis=1)
yy_channels = tf.tile(yy_channels,
[1, dim1, 1, 1, 1])
zz_range = tf.tile(K.expand_dims(K.arange(0, dim1), axis=0),
K.stack([batch_shape, 1]))
zz_range = K.expand_dims(zz_range, axis=-1)
zz_range = K.expand_dims(zz_range, axis=-1)
zz_channels = tf.tile(zz_range,
[1, 1, dim2, dim3])
zz_channels = K.expand_dims(zz_channels, axis=-1)
xx_channels = K.cast(xx_channels, K.floatx())
xx_channels = xx_channels / K.cast(dim2 - 1, K.floatx())
xx_channels = xx_channels * 2 - 1.
yy_channels = K.cast(yy_channels, K.floatx())
yy_channels = yy_channels / K.cast(dim3 - 1, K.floatx())
yy_channels = yy_channels * 2 - 1.
zz_channels = K.cast(zz_channels, K.floatx())
zz_channels = zz_channels / K.cast(dim1 - 1, K.floatx())
zz_channels = zz_channels * 2 - 1.
outputs = K.concatenate([inputs, zz_channels, xx_channels, yy_channels],
axis=-1)
if self.data_format == 'channels_first':
outputs = K.permute_dimensions(outputs, [0, 4, 1, 2, 3])
return outputs
def compute_output_shape(self, input_shape):
assert input_shape and len(input_shape) >= 2
assert input_shape[self.axis]
if self.use_radius and self.rank == 2:
channel_count = 3
else:
channel_count = self.rank
output_shape = list(input_shape)
output_shape[self.axis] = input_shape[self.axis] + channel_count
return tuple(output_shape)
def get_config(self):
config = {
'rank': self.rank,
'use_radius': self.use_radius,
'data_format': self.data_format
}
base_config = super(_CoordinateChannel, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class CoordinateChannel1D(_CoordinateChannel):
""" Adds Coordinate Channels to the input tensor of rank 1.
# Arguments
data_format: A string,
one of `"channels_last"` or `"channels_first"`.
The ordering of the dimensions in the inputs.
`"channels_last"` corresponds to inputs with shape
`(batch, ..., channels)` while `"channels_first"` corresponds to
inputs with shape `(batch, channels, ...)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
# Input shape
3D tensor with shape: `(batch_size, steps, input_dim)`
# Output shape
3D tensor with shape: `(batch_size, steps, input_dim + 2)`
# References:
- [An Intriguing Failing of Convolutional Neural Networks and the CoordConv Solution](https://arxiv.org/abs/1807.03247)
"""
def __init__(self, data_format=None, **kwargs):
super(CoordinateChannel1D, self).__init__(
rank=1,
use_radius=False,
data_format=data_format,
**kwargs
)
def get_config(self):
config = super(CoordinateChannel1D, self).get_config()
config.pop('rank')
config.pop('use_radius')
return config
class CoordinateChannel2D(_CoordinateChannel):
""" Adds Coordinate Channels to the input tensor.
# Arguments
use_radius: Boolean flag to determine whether the
radius coordinate should be added for 2D rank
inputs or not.
data_format: A string,
one of `"channels_last"` or `"channels_first"`.
The ordering of the dimensions in the inputs.
`"channels_last"` corresponds to inputs with shape
`(batch, ..., channels)` while `"channels_first"` corresponds to
inputs with shape `(batch, channels, ...)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
# Input shape
4D tensor with shape:
`(samples, channels, rows, cols)`
if `data_format` is `"channels_first"`
or 4D tensor with shape:
`(samples, rows, cols, channels)`
if `data_format` is `"channels_last"`.
# Output shape
4D tensor with shape:
`(samples, channels + 2/3, rows, cols)`
if `data_format` is `"channels_first"`
or 4D tensor with shape:
`(samples, rows, cols, channels + 2/3)`
if `data_format` is `"channels_last"`.
If `use_radius` is set, then will have 3 additional filers,
else only 2 additional filters will be added.
# References:
- [An Intriguing Failing of Convolutional Neural Networks and the CoordConv Solution](https://arxiv.org/abs/1807.03247)
"""
def __init__(self, use_radius=False,
data_format=None,
**kwargs):
super(CoordinateChannel2D, self).__init__(
rank=2,
use_radius=use_radius,
data_format=data_format,
**kwargs
)
def get_config(self):
config = super(CoordinateChannel2D, self).get_config()
config.pop('rank')
return config
|
smartmin/perms.py | nickhargreaves/smartmin | 166 | 12781445 | from django.contrib.auth.models import Permission
def assign_perm(perm, group):
"""
Assigns a permission to a group
"""
if not isinstance(perm, Permission):
try:
app_label, codename = perm.split('.', 1)
except ValueError:
raise ValueError("For global permissions, first argument must be in"
" format: 'app_label.codename' (is %r)" % perm)
perm = Permission.objects.get(content_type__app_label=app_label, codename=codename)
group.permissions.add(perm)
return perm
def remove_perm(perm, group):
"""
Removes a permission from a group
"""
if not isinstance(perm, Permission):
try:
app_label, codename = perm.split('.', 1)
except ValueError:
raise ValueError("For global permissions, first argument must be in"
" format: 'app_label.codename' (is %r)" % perm)
perm = Permission.objects.get(content_type__app_label=app_label, codename=codename)
group.permissions.remove(perm)
return
|
hummingbot/connector/exchange/huobi/huobi_api_order_book_data_source.py | BGTCapital/hummingbot | 3,027 | 12781455 | #!/usr/bin/env python
import asyncio
import logging
import hummingbot.connector.exchange.huobi.huobi_constants as CONSTANTS
from collections import defaultdict
from typing import (
Any,
Dict,
List,
Optional,
)
from hummingbot.connector.exchange.huobi.huobi_order_book import HuobiOrderBook
from hummingbot.connector.exchange.huobi.huobi_utils import (
convert_from_exchange_trading_pair,
convert_to_exchange_trading_pair,
build_api_factory,
)
from hummingbot.core.data_type.order_book import OrderBook
from hummingbot.core.data_type.order_book_message import OrderBookMessage
from hummingbot.core.data_type.order_book_tracker_data_source import OrderBookTrackerDataSource
from hummingbot.core.web_assistant.connections.data_types import RESTMethod, RESTRequest, RESTResponse, WSRequest
from hummingbot.core.web_assistant.rest_assistant import RESTAssistant
from hummingbot.core.web_assistant.web_assistants_factory import WebAssistantsFactory
from hummingbot.core.web_assistant.ws_assistant import WSAssistant
from hummingbot.logger import HummingbotLogger
class HuobiAPIOrderBookDataSource(OrderBookTrackerDataSource):
MESSAGE_TIMEOUT = 30.0
PING_TIMEOUT = 10.0
HEARTBEAT_INTERVAL = 30.0 # seconds
ORDER_BOOK_SNAPSHOT_DELAY = 60 * 60 # expressed in seconds
TRADE_CHANNEL_SUFFIX = "trade.detail"
ORDERBOOK_CHANNEL_SUFFIX = "depth.step0"
_haobds_logger: Optional[HummingbotLogger] = None
@classmethod
def logger(cls) -> HummingbotLogger:
if cls._haobds_logger is None:
cls._haobds_logger = logging.getLogger(__name__)
return cls._haobds_logger
def __init__(self,
trading_pairs: List[str],
api_factory: Optional[WebAssistantsFactory] = None,
):
super().__init__(trading_pairs)
self._api_factory = api_factory or build_api_factory()
self._rest_assistant: Optional[RESTAssistant] = None
self._ws_assistant: Optional[WSAssistant] = None
self._message_queue: Dict[str, asyncio.Queue] = defaultdict(asyncio.Queue)
async def _get_rest_assistant(self) -> RESTAssistant:
if self._rest_assistant is None:
self._rest_assistant = await self._api_factory.get_rest_assistant()
return self._rest_assistant
async def _get_ws_assistant(self) -> WSAssistant:
if self._ws_assistant is None:
self._ws_assistant = await self._api_factory.get_ws_assistant()
return self._ws_assistant
@classmethod
async def get_last_traded_prices(cls, trading_pairs: List[str]) -> Dict[str, float]:
api_factory = build_api_factory()
rest_assistant = await api_factory.get_rest_assistant()
url = CONSTANTS.REST_URL + CONSTANTS.TICKER_URL
request = RESTRequest(method=RESTMethod.GET,
url=url)
response: RESTResponse = await rest_assistant.call(request=request)
results = dict()
resp_json = await response.json()
for trading_pair in trading_pairs:
resp_record = [o for o in resp_json["data"] if o["symbol"] == convert_to_exchange_trading_pair(trading_pair)][0]
results[trading_pair] = float(resp_record["close"])
return results
@staticmethod
async def fetch_trading_pairs() -> List[str]:
try:
api_factory = build_api_factory()
rest_assistant = await api_factory.get_rest_assistant()
url = CONSTANTS.REST_URL + CONSTANTS.API_VERSION + CONSTANTS.SYMBOLS_URL
request = RESTRequest(method=RESTMethod.GET,
url=url)
response: RESTResponse = await rest_assistant.call(request=request)
if response.status == 200:
all_symbol_infos: Dict[str, Any] = await response.json()
return [f"{symbol_info['base-currency']}-{symbol_info['quote-currency']}".upper()
for symbol_info in all_symbol_infos["data"]
if symbol_info["state"] == "online"]
except Exception:
# Do nothing if the request fails -- there will be no autocomplete for huobi trading pairs
pass
return []
async def get_snapshot(self, trading_pair: str) -> Dict[str, Any]:
rest_assistant = await self._get_rest_assistant()
url = CONSTANTS.REST_URL + CONSTANTS.DEPTH_URL
# when type is set to "step0", the default value of "depth" is 150
params: Dict = {"symbol": convert_to_exchange_trading_pair(trading_pair), "type": "step0"}
request = RESTRequest(method=RESTMethod.GET,
url=url,
params=params)
response: RESTResponse = await rest_assistant.call(request=request)
if response.status != 200:
raise IOError(f"Error fetching Huobi market snapshot for {trading_pair}. "
f"HTTP status is {response.status}.")
snapshot_data: Dict[str, Any] = await response.json()
return snapshot_data
async def get_new_order_book(self, trading_pair: str) -> OrderBook:
snapshot: Dict[str, Any] = await self.get_snapshot(trading_pair)
timestamp = snapshot["tick"]["ts"]
snapshot_msg: OrderBookMessage = HuobiOrderBook.snapshot_message_from_exchange(
msg=snapshot,
timestamp=timestamp,
metadata={"trading_pair": trading_pair},
)
order_book: OrderBook = self.order_book_create_function()
order_book.apply_snapshot(snapshot_msg.bids, snapshot_msg.asks, snapshot_msg.update_id)
return order_book
async def _subscribe_channels(self, ws: WSAssistant):
try:
for trading_pair in self._trading_pairs:
subscribe_orderbook_request: WSRequest = WSRequest({
"sub": f"market.{convert_to_exchange_trading_pair(trading_pair)}.depth.step0",
"id": convert_to_exchange_trading_pair(trading_pair)
})
subscribe_trade_request: WSRequest = WSRequest({
"sub": f"market.{convert_to_exchange_trading_pair(trading_pair)}.trade.detail",
"id": convert_to_exchange_trading_pair(trading_pair)
})
await ws.send(subscribe_orderbook_request)
await ws.send(subscribe_trade_request)
self.logger().info("Subscribed to public orderbook and trade channels...")
except asyncio.CancelledError:
raise
except Exception:
self.logger().error(
"Unexpected error occurred subscribing to order book trading and delta streams...", exc_info=True
)
raise
async def listen_for_subscriptions(self):
ws = None
while True:
try:
ws: WSAssistant = await self._get_ws_assistant()
await ws.connect(ws_url=CONSTANTS.WS_PUBLIC_URL, ping_timeout=self.HEARTBEAT_INTERVAL)
await self._subscribe_channels(ws)
async for ws_response in ws.iter_messages():
data = ws_response.data
if "subbed" in data:
continue
if "ping" in data:
ping_request = WSRequest(payload={
"pong": data["ping"]
})
await ws.send(request=ping_request)
channel = data.get("ch", "")
if channel.endswith(self.TRADE_CHANNEL_SUFFIX):
self._message_queue[self.TRADE_CHANNEL_SUFFIX].put_nowait(data)
if channel.endswith(self.ORDERBOOK_CHANNEL_SUFFIX):
self._message_queue[self.ORDERBOOK_CHANNEL_SUFFIX].put_nowait(data)
except asyncio.CancelledError:
raise
except Exception:
self.logger().error(
"Unexpected error occurred when listening to order book streams. Retrying in 5 seconds...",
exc_info=True,
)
await self._sleep(5.0)
finally:
ws and await ws.disconnect()
async def listen_for_trades(self, ev_loop: asyncio.BaseEventLoop, output: asyncio.Queue):
message_queue = self._message_queue[self.TRADE_CHANNEL_SUFFIX]
while True:
try:
msg: Dict[str, Any] = await message_queue.get()
trading_pair = msg["ch"].split(".")[1]
timestamp = msg["tick"]["ts"]
for data in msg["tick"]["data"]:
trade_message: OrderBookMessage = HuobiOrderBook.trade_message_from_exchange(
msg=data,
timestamp=timestamp,
metadata={"trading_pair": convert_from_exchange_trading_pair(trading_pair)}
)
output.put_nowait(trade_message)
except asyncio.CancelledError:
raise
except Exception:
self.logger().error("Unexpected error with WebSocket connection. Retrying after 30 seconds...",
exc_info=True)
await self._sleep(30.0)
async def listen_for_order_book_diffs(self, ev_loop: asyncio.BaseEventLoop, output: asyncio.Queue):
message_queue = self._message_queue[self.ORDERBOOK_CHANNEL_SUFFIX]
while True:
try:
msg: Dict[str, Any] = await message_queue.get()
timestamp = msg["tick"]["ts"]
order_book_message: OrderBookMessage = HuobiOrderBook.diff_message_from_exchange(
msg=msg,
timestamp=timestamp
)
output.put_nowait(order_book_message)
except asyncio.CancelledError:
raise
except Exception:
self.logger().error("Unexpected error with WebSocket connection. Retrying after 30 seconds...",
exc_info=True)
await self._sleep(30.0)
async def listen_for_order_book_snapshots(self, ev_loop: asyncio.BaseEventLoop, output: asyncio.Queue):
while True:
await self._sleep(self.ORDER_BOOK_SNAPSHOT_DELAY)
try:
for trading_pair in self._trading_pairs:
snapshot: Dict[str, Any] = await self.get_snapshot(trading_pair)
snapshot_message: OrderBookMessage = HuobiOrderBook.snapshot_message_from_exchange(
snapshot,
timestamp=snapshot["tick"]["ts"],
metadata={"trading_pair": trading_pair},
)
output.put_nowait(snapshot_message)
self.logger().debug(f"Saved order book snapshot for {trading_pair}")
except asyncio.CancelledError:
raise
except Exception:
self.logger().error("Unexpected error listening for orderbook snapshots. Retrying in 5 secs...", exc_info=True)
await self._sleep(5.0)
|
acora/__init__.py | scoder/acora | 209 | 12781492 | """\
Acora - a multi-keyword search engine based on Aho-Corasick trees.
Usage::
>>> from acora import AcoraBuilder
Collect some keywords::
>>> builder = AcoraBuilder('ab', 'bc', 'de')
>>> builder.add('a', 'b')
Generate the Acora search engine::
>>> ac = builder.build()
Search a string for all occurrences::
>>> ac.findall('abc')
[('a', 0), ('ab', 0), ('b', 1), ('bc', 1)]
>>> ac.findall('abde')
[('a', 0), ('ab', 0), ('b', 1), ('de', 2)]
"""
from __future__ import absolute_import
import sys
IS_PY3 = sys.version_info[0] >= 3
if IS_PY3:
unicode = str
FILE_BUFFER_SIZE = 32 * 1024
class PyAcora(object):
"""A simple (and very slow) Python implementation of the Acora
search engine.
"""
transitions = None
def __init__(self, machine, transitions=None):
if transitions is not None:
# old style format
start_state = machine
self.transitions = dict([
((state.id, char), (target_state.id, target_state.matches))
for ((state, char), target_state) in transitions.items()])
else:
# new style Machine format
start_state = machine.start_state
ignore_case = machine.ignore_case
self.transitions = transitions = {}
child_states = machine.child_states
child_targets = {}
state_matches = {}
needs_bytes_conversion = None
for state in child_states:
state_id = state.id
child_targets[state_id], state_matches[state_id] = (
_merge_targets(state, ignore_case))
if needs_bytes_conversion is None and state_matches[state_id]:
if IS_PY3:
needs_bytes_conversion = any(
isinstance(s, bytes) for s in state_matches[state_id])
elif any(isinstance(s, unicode) for s in state_matches[state_id]):
# in Py2, some keywords might be str even though we're processing unicode
needs_bytes_conversion = False
if needs_bytes_conversion is None and not IS_PY3:
needs_bytes_conversion = True
if needs_bytes_conversion:
if IS_PY3:
convert = ord
else:
from codecs import latin_1_encode
def convert(s):
return latin_1_encode(s)[0]
else:
convert = None
get_child_targets = child_targets.get
get_matches = state_matches.get
state_id = start_state.id
for ch, child in _merge_targets(start_state, ignore_case)[0].items():
child_id = child.id
if convert is not None:
ch = convert(ch)
transitions[(state_id, ch)] = (child_id, get_matches(child_id))
for state in child_states:
state_id = state.id
for ch, child in get_child_targets(state_id).items():
child_id = child.id
if convert is not None:
ch = convert(ch)
transitions[(state_id, ch)] = (child_id, get_matches(child_id))
self.start_state = start_state.id
def finditer(self, s):
"""Iterate over all occurrences of any keyword in the string.
Returns (keyword, offset) pairs.
"""
state = self.start_state
start_state = (state, [])
next_state = self.transitions.get
pos = 0
for char in s:
pos += 1
state, matches = next_state((state, char), start_state)
if matches:
for match in matches:
yield (match, pos-len(match))
def findall(self, s):
"""Find all occurrences of any keyword in the string.
Returns a list of (keyword, offset) pairs.
"""
return list(self.finditer(s))
def filefind(self, f):
"""Iterate over all occurrences of any keyword in a file.
Returns (keyword, offset) pairs.
"""
opened = False
if not hasattr(f, 'read'):
f = open(f, 'rb')
opened = True
try:
state = self.start_state
start_state = (state, ())
next_state = self.transitions.get
pos = 0
while 1:
data = f.read(FILE_BUFFER_SIZE)
if not data:
break
for char in data:
pos += 1
state, matches = next_state((state, char), start_state)
if matches:
for match in matches:
yield (match, pos-len(match))
finally:
if opened:
f.close()
def filefindall(self, f):
"""Find all occurrences of any keyword in a file.
Returns a list of (keyword, offset) pairs.
"""
return list(self.filefind(f))
# import from shared Python/Cython module
from acora._acora import (
insert_bytes_keyword, insert_unicode_keyword,
build_trie as _build_trie, build_MachineState as _MachineState, merge_targets as _merge_targets)
# import from Cython module if available
try:
from acora._cacora import (
UnicodeAcora, BytesAcora, insert_bytes_keyword, insert_unicode_keyword)
except ImportError:
# C module not there ...
UnicodeAcora = BytesAcora = PyAcora
class AcoraBuilder(object):
"""The main builder class for an Acora search engine.
Add keywords by calling ``.add(*keywords)`` or by passing them
into the constructor. Then build the search engine by calling
``.build()``.
Builds a case insensitive search engine when passing
``ignore_case=True``, and a case sensitive engine otherwise.
"""
ignore_case = False
def __init__(self, *keywords, **kwargs):
if kwargs:
self.ignore_case = kwargs.pop('ignore_case', False)
if kwargs:
raise TypeError(
"%s() got unexpected keyword argument %s" % (
self.__class__.__name__, next(iter(kwargs))))
if len(keywords) == 1 and isinstance(keywords[0], (list, tuple)):
keywords = keywords[0]
self.for_unicode = None
self.state_counter = 1
self.keywords = set()
self.tree = _MachineState(0)
if keywords:
self.update(keywords)
def __update(self, keywords):
"""Add more keywords to the search engine builder.
Adding keywords does not impact previously built search
engines.
"""
if not keywords:
return
self.tree = None
self.keywords.update(keywords)
if self.for_unicode is None:
for keyword in keywords:
if isinstance(keyword, unicode):
self.for_unicode = True
elif isinstance(keyword, bytes):
self.for_unicode = False
else:
raise TypeError(
"keywords must be either bytes or unicode, not mixed (got %s)" %
type(keyword))
break
# validate input string types
marker = object()
if self.for_unicode:
for keyword in keywords:
if not isinstance(keyword, unicode):
break
else:
keyword = marker
else:
for keyword in keywords:
if not isinstance(keyword, bytes):
break
else:
keyword = marker
if keyword is not marker:
raise TypeError(
"keywords must be either bytes or unicode, not mixed (got %s)" %
type(keyword))
def add(self, *keywords):
"""Add more keywords to the search engine builder.
Adding keywords does not impact previously built search
engines.
"""
if keywords:
self.update(keywords)
def build(self, ignore_case=None, acora=None):
"""Build a search engine from the aggregated keywords.
Builds a case insensitive search engine when passing
``ignore_case=True``, and a case sensitive engine otherwise.
"""
if acora is None:
if self.for_unicode:
acora = UnicodeAcora
else:
acora = BytesAcora
if self.for_unicode == False and ignore_case:
import sys
if sys.version_info[0] >= 3:
raise ValueError(
"Case insensitive search is not supported for byte strings in Python 3")
if ignore_case is not None and ignore_case != self.ignore_case:
# must rebuild tree
builder = type(self)(ignore_case=ignore_case)
builder.update(self.keywords)
return builder.build(acora=acora)
return acora(_build_trie(self.tree, ignore_case=self.ignore_case))
def update(self, keywords):
for_unicode = self.for_unicode
ignore_case = self.ignore_case
insert_keyword = insert_unicode_keyword if for_unicode else insert_bytes_keyword
for keyword in keywords:
if for_unicode is None:
for_unicode = self.for_unicode = isinstance(keyword, unicode)
insert_keyword = (
insert_unicode_keyword if for_unicode else insert_bytes_keyword)
elif for_unicode != isinstance(keyword, unicode):
raise TypeError(
"keywords must be either bytes or unicode, not mixed (got %s)" %
type(keyword))
self.state_counter = insert_keyword(
self.tree, keyword, self.state_counter, ignore_case)
self.keywords.update(keywords)
### convenience functions
def search(s, *keywords):
"""Convenience function to search a string for keywords.
"""
acora = AcoraBuilder(keywords).build()
return acora.findall(s)
def search_ignore_case(s, *keywords):
"""Convenience function to search a string for keywords. Case
insensitive version.
"""
acora = AcoraBuilder(keywords, ignore_case=True).build()
return acora.findall(s)
|
tests/model_test.py | gaganchhabra/appkernel | 156 | 12781516 | import json
from decimal import Decimal
from pymongo import MongoClient
from appkernel import PropertyRequiredException
from appkernel.configuration import config
from appkernel.repository import mongo_type_converter_to_dict, mongo_type_converter_from_dict
from .utils import *
import pytest
from jsonschema import validate
def setup_module(module):
config.mongo_database = MongoClient(host='localhost')['appkernel']
def setup_function(function):
""" executed before each method call
"""
print('\n\nSETUP ==> ')
Project.delete_all()
User.delete_all()
def test_required_field():
project = Project()
with pytest.raises(PropertyRequiredException):
project.finalise_and_validate()
with pytest.raises(PropertyRequiredException):
project.update(name=None)
project.finalise_and_validate()
project.update(name='some_name')
project.finalise_and_validate()
def test_append_to_non_existing_non_defined_element():
project = Project().update(name='strange project')
project.append_to(users=Task().update(name='some_task', description='some description'))
project.finalise_and_validate()
assert 'users' in project.__dict__
assert len(project.users) == 1
assert isinstance(project.users[0], Task)
print(('{}'.format(project)))
def test_append_to_non_existing_element():
project = Project().update(name='strange project')
project.append_to(tasks=Task().update(name='some_task', description='some description'))
project.finalise_and_validate()
assert 'tasks' in project.__dict__
assert len(project.tasks) == 1
assert isinstance(project.tasks[0], Task)
print(('{}'.format(project)))
def test_remove_non_existing_element():
with pytest.raises(AttributeError):
project = Project().update(name='strange project')
project.remove_from(tasks=Task())
with pytest.raises(AttributeError):
project = Project().update(name='strange project')
project.remove_from(tasks=None)
with pytest.raises(AttributeError):
project = Project().update(name='strange project')
project.remove_from(somehtings=Task())
def test_remove_existing_defined_element():
task1 = Task().update(name='some_task', description='some description')
task2 = Task().update(name='some_other_task', description='some other description')
task3 = Task().update(name='a third task', description='some third description')
project = Project().update(name='strange project')
project.append_to(tasks=[task1, task2])
project.finalise_and_validate()
assert len(project.tasks) == 2
project.append_to(tasks=task3)
project.finalise_and_validate()
assert len(project.tasks) == 3
print(('{}'.format(project)))
project.remove_from(tasks=task1)
assert len(project.tasks) == 2
print(('{}'.format(project)))
def test_generator():
task = Task()
task.name = 'some task name'
task.description = 'some task description'
task.finalise_and_validate()
print(('\nTask:\n {}'.format(task)))
assert task.id is not None and task.id.startswith('U')
def test_converter():
user = create_and_save_a_user('test user', 'test password', 'test description')
print(('\n{}'.format(user.dumps(pretty_print=True))))
assert user.password.startswith('<PASSWORD>')
hash1 = user.password
user.save()
assert user.password.startswith('<PASSWORD>')
assert hash1 == user.password
def test_nested_object_serialisation():
portfolio = create_a_portfolion_with_owner()
print((portfolio.dumps(pretty_print=True)))
check_portfolio(portfolio)
def test_describe_model():
user_spec = User.get_parameter_spec()
print(User.get_paramater_spec_as_json())
assert 'name' in user_spec
assert user_spec.get('name').get('required')
assert user_spec.get('name').get('type') == 'str'
assert len(user_spec.get('name').get('validators')) == 2
for validator in user_spec.get('name').get('validators'):
if validator.get('type') == 'Regexp':
assert validator.get('value') == '[A-Za-z0-9-_]'
assert user_spec.get('roles').get('sub_type') == 'str'
def test_describe_rich_model():
project_spec = Project.get_parameter_spec()
print(Project.get_paramater_spec_as_json())
assert project_spec.get('created').get('required')
assert project_spec.get('created').get('type') == 'datetime'
assert project_spec.get('name').get('required')
assert project_spec.get('name').get('type') == 'str'
name_validators = project_spec.get('name').get('validators')
assert len(name_validators) == 1
assert name_validators[0].get('type') == 'NotEmpty'
assert name_validators[0].get('value') is None or 'null'
tasks = project_spec.get('tasks')
assert not tasks.get('required')
assert 'sub_type' in tasks
assert tasks.get('type') == 'list'
task = tasks.get('sub_type')
assert task.get('type') == 'Task'
assert 'props' in task
props = task.get('props')
assert not props.get('closed_date').get('required')
assert props.get('closed_date').get('type') == 'datetime'
assert props.get('closed_date').get('validators')[0].get('type') == 'Past'
def test_json_schema():
json_schema = Project.get_json_schema()
print('\n{}'.format(json.dumps(json_schema, indent=2)))
print('===========')
project = create_rich_project()
print(project.dumps(pretty_print=True))
assert json_schema.get('title') == 'Project Schema'
assert 'title' in json_schema
assert json_schema.get('type') == 'object'
assert 'name' in json_schema.get('required')
assert 'created' in json_schema.get('required')
assert 'definitions' in json_schema
assert json_schema.get('additionalProperties')
definitions = json_schema.get('definitions')
assert 'Task' in definitions
assert len(definitions.get('Task').get('required')) == 6
assert 'id' in definitions.get('Task').get('properties')
closed_date = definitions.get('Task').get('properties').get('closed_date')
assert 'string' in closed_date.get('type')
assert len(closed_date.get('type')) == 2
assert closed_date.get('format') == 'date-time'
completed = definitions.get('Task').get('properties').get('completed')
assert 'boolean' in completed.get('type')
assert len(completed.get('type')) == 1
validate(json.loads(project.dumps()), json_schema)
# todo: check the enum / make a negative test
# validator = Draft4Validator(json_schema)
# errors = sorted(validator.iter_errors(project.dumps()), key=lambda e: e.path)
# for error in errors:
# print('{}'.format(error.message, list(error.path)))
def test_json_schema_primitives_types():
json_schema = Stock.get_json_schema()
print(json.dumps(json_schema, indent=2))
props = json_schema.get('properties')
opentypes = props.get('open').get('type')
assert 'number' in opentypes
assert len(opentypes) == 1
item_types = props.get('history').get('items').get('type')
assert 'number' in item_types
len(item_types) == 1
stock = create_a_stock()
validate(json.loads(stock.dumps()), json_schema)
def test_json_schema_complex():
# print json.dumps(Portfolio.get_parameter_spec(True), indent=2)
json_schema = Portfolio.get_json_schema()
print(json.dumps(json_schema, indent=2))
stock_definition = json_schema.get('definitions').get('Stock')
assert stock_definition.get('properties').get('updated').get('format') == 'date-time'
assert stock_definition.get('properties').get('code').get('pattern') == '[A-Za-z0-9-_]'
assert stock_definition.get('properties').get('code').get('maxLength') == 4
assert stock_definition.get('properties').get('open').get('minimum') == 0
open_types = stock_definition.get('properties').get('open').get('type')
assert 'number' in open_types
assert len(open_types) == 1
sequence_types = stock_definition.get('properties').get('sequence').get('type')
assert 'number' in sequence_types
assert len(sequence_types) == 2
assert stock_definition.get('properties').get('sequence').get('minimum') == 1
assert stock_definition.get('properties').get('sequence').get('maximum') == 100
assert stock_definition.get('properties').get('sequence').get('multipleOf') == 1.0
history_types = stock_definition.get('properties').get('history').get('type')
assert 'array' in history_types
assert len(history_types) == 2
portfolio = create_portfolio('My Portfolio')
validate(json.loads(portfolio.dumps()), json_schema)
def test_json_schema_in_mongo_compat_mode():
json_schema = Project.get_json_schema(mongo_compatibility=True)
print('\n\n{}'.format(json.dumps(json_schema, indent=2)))
print('===========')
task_spec = json_schema.get('properties').get('tasks')
assert len(task_spec.get('items').get('required')) == 5
priority_spec = task_spec.get('items').get('properties').get('priority')
assert len(priority_spec.get('enum')) == 3
closed_date_spec = task_spec.get('items').get('properties').get('closed_date')
assert len(closed_date_spec.get('bsonType')) == 2
assert 'bsonType' in json_schema
assert 'id' not in json_schema
assert '$schema' not in json_schema
assert 'definitions' not in json_schema
for prop in json_schema.get('properties').items():
assert 'format' not in prop[1]
assert 'bsonType' in prop[1]
for prop in task_spec.get('items').get('properties').items():
assert 'format' not in prop[1]
assert 'bsonType' or 'enum' in prop[1]
project = create_rich_project()
print(project.dumps(pretty_print=True))
validate(json.loads(project.dumps()), json_schema)
def __assert_product_dict(product_dict: dict):
assert 'name' in product_dict
assert 'description' in product_dict
assert 'size' in product_dict
assert product_dict.get('size') == 'M'
assert 'price' in product_dict
assert isinstance(product_dict.get('price'), dict)
price_dict = product_dict.get('price')
assert '_type' in price_dict
assert price_dict.get('_type') == 'money.money.Money'
assert price_dict.get('currency') == 'EUR'
def test_custom_object_marshalling():
product = Product(code='TRX', name='White T-Shirt', description='a stylish white shirt', size=ProductSize.M,
price=Money(10.50, 'EUR'))
product_dict = Model.to_dict(product)
__assert_product_dict(product_dict)
amount = product_dict.get('price').get('amount')
assert isinstance(amount, Decimal)
assert amount == 10.5
product_json = product.dumps(pretty_print=True)
print('JSON: \n{}'.format(product_json))
reloaded_product = Product.loads(product_json)
assert reloaded_product is not None and isinstance(reloaded_product, Product)
assert reloaded_product.name == product.name
assert reloaded_product.description == product.description
assert reloaded_product.size == product.size
assert isinstance(reloaded_product.price, Money)
assert reloaded_product.price == product.price
def test_custom_converter_function():
product = Product(code='TRX', name='White T-Shirt', description='a stylish white shirt', size=ProductSize.M,
price=Money(10.50, 'EUR'))
product_dict = Model.to_dict(product, converter_func=mongo_type_converter_to_dict)
__assert_product_dict(product_dict)
amount = product_dict.get('price').get('amount')
assert isinstance(amount, float)
product_json = product.dumps(pretty_print=True)
print('JSON: \n{}'.format(product_json))
reloaded_product = Model.from_dict(product_dict, Product, converter_func=mongo_type_converter_from_dict)
assert isinstance(reloaded_product.price, Money)
assert isinstance(reloaded_product.price.amount, Decimal)
|
study/vanilla/models.py | NunoEdgarGFlowHub/wavetorch | 470 | 12781536 | import torch
import torch.nn as nn
from torch.nn import functional as F
class CustomRNN(nn.Module):
def __init__(self, input_size, output_size, hidden_size, batch_first=True, W_scale=1e-1, f_hidden=None):
super(CustomRNN, self).__init__()
self.input_size = input_size
self.output_size = output_size
self.hidden_size = hidden_size
self.f_hidden = f_hidden
self.W1 = nn.Parameter((torch.rand(hidden_size, input_size)-0.5)*W_scale)
self.W2 = nn.Parameter((torch.rand(hidden_size, hidden_size)-0.5)*W_scale)
self.W3 = nn.Parameter((torch.rand(output_size, hidden_size)-0.5)*W_scale)
self.b_h = nn.Parameter(torch.zeros(hidden_size))
def forward(self, x):
h1 = torch.zeros(x.shape[0], self.hidden_size)
ys = []
for i, xi in enumerate(x.chunk(x.size(1), dim=1)):
h1 = (torch.matmul(self.W2, h1.t()) + torch.matmul(self.W1, xi.t())).t() + self.b_h
if self.f_hidden is not None:
h1 = getattr(F, self.f_hidden)(h1)
y = torch.matmul(self.W3, h1.t()).t()
ys.append(y)
ys = torch.stack(ys, dim=1)
return ys
class CustomRes(nn.Module):
def __init__(self, input_size, output_size, hidden_size, batch_first=True, W_scale=1e-1, f_hidden=None):
super(CustomRes, self).__init__()
self.input_size = input_size
self.output_size = output_size
self.hidden_size = hidden_size
self.f_hidden = f_hidden
self.W1 = torch.nn.Parameter((torch.rand(hidden_size, input_size)-0.5)*W_scale)
self.W2 = torch.nn.Parameter((torch.rand(hidden_size, hidden_size)-0.5)*W_scale)
self.W3 = torch.nn.Parameter((torch.rand(output_size, hidden_size)-0.5)*W_scale)
self.b_h = torch.nn.Parameter(torch.zeros(hidden_size))
def forward(self, x):
h1 = torch.zeros(x.shape[0], self.hidden_size)
ys = []
for i, xi in enumerate(x.chunk(x.size(1), dim=1)):
hprev = h1
h1 = (torch.matmul(self.W2, h1.t()) + torch.matmul(self.W1, xi.t())).t() + self.b_h
if self.f_hidden is not None:
h1 = getattr(F, self.f_hidden)(h1)
y = torch.matmul(self.W3, h1.t()).t()
ys.append(y)
h1 = h1 + hprev
ys = torch.stack(ys, dim=1)
return ys
class CustomLSTM(nn.Module):
def __init__(self, input_size, output_size, hidden_size, batch_first=True, W_scale=1e-1):
super(CustomLSTM, self).__init__()
self.input_size = input_size
self.output_size = output_size
self.hidden_size = hidden_size
self.lstm = nn.LSTM(input_size, hidden_size, batch_first=batch_first)
self.W3 = torch.nn.Parameter((torch.rand(output_size, hidden_size)-0.5))
def forward(self, x):
# out should have size [N_batch, T, N_hidden]
out, hidden = self.lstm(x.unsqueeze(2))
# print(torch.max(x, 1))
# print(x[:, 100])
# print(out[:, 100, 0].detach())
# ys should have size [N_batch, T, N_classes]
ys = torch.matmul(out, self.W3.t())
return ys |
office-plugin/windows-office/program/wizards/common/Properties.py | jerrykcode/kkFileView | 6,660 | 12781554 | #
# This file is part of the LibreOffice project.
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
#
# This file incorporates work covered by the following license notice:
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed
# with this work for additional information regarding copyright
# ownership. The ASF licenses this file to you under the Apache
# License, Version 2.0 (the "License"); you may not use this file
# except in compliance with the License. You may obtain a copy of
# the License at http://www.apache.org/licenses/LICENSE-2.0 .
#
from com.sun.star.beans import PropertyValue
'''
Simplifies handling Arrays of PropertyValue.
To make a use of this class, instantiate it, and call
the put(propName,propValue) method.
caution: propName should always be a String.
When finished, call the getProperties() method to get an array of the set properties.
'''
class Properties(dict):
@classmethod
def getPropertyValue(self, props, propName):
for i in props:
if propName == i.Name:
return i.Value
raise AttributeError ("Property '" + propName + "' not found.")
@classmethod
def hasPropertyValue(self, props, propName):
for i in props:
if propName == i.Name:
return True
return False
@classmethod
def getProperties(self, _map):
pv = []
for k,v in _map.items():
pv.append(self.createProperty(k, v))
return pv
@classmethod
def createProperty(self, name, value, handle=None):
pv = PropertyValue()
pv.Name = name
pv.Value = value
if handle is not None:
pv.Handle = handle
return pv
def getProperties1(self):
return self.getProperties(self)
|
indicnlp/transliterate/unicode_transliterate.py | shubham303/indic_nlp_library | 432 | 12781564 | <reponame>shubham303/indic_nlp_library
#
# Copyright (c) 2013-present, <NAME>
# All rights reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
#Program for text written in one Indic script to another based on Unicode mappings.
#
# @author <NAME>
#
import sys, string, itertools, re, os
from collections import defaultdict
from indicnlp import common
from indicnlp import langinfo
from indicnlp.script import indic_scripts as isc
from indicnlp.transliterate.sinhala_transliterator import SinhalaDevanagariTransliterator as sdt
import pandas as pd
OFFSET_TO_ITRANS={}
ITRANS_TO_OFFSET=defaultdict(list)
DUPLICATE_ITRANS_REPRESENTATIONS={}
def init():
"""
To be called by library loader, do not call it in your program
"""
### Load the ITRANS-script offset map. The map was initially generated using the snippet below (uses the old itrans transliterator)
### The map is modified as needed to accomodate extensions and corrections to the mappings
#
# base=0x900
# l=[]
# for i in range(0,0x80):
# c=chr(base+i)
# itrans=ItransTransliterator.to_itrans(c,'hi')
# l.append((hex(i),c,itrans))
# print(l)
#
# pd.DataFrame(l,columns=['offset_hex','devnag_char','itrans']).to_csv('offset_itrans_map.csv',index=False,encoding='utf-8')
itrans_map_fname=os.path.join(common.get_resources_path(),'transliterate','offset_itrans_map.csv')
#itrans_map_fname=r'D:\src\python_sandbox\src\offset_itrans_map.csv'
itrans_df=pd.read_csv(itrans_map_fname,encoding='utf-8')
global OFFSET_TO_ITRANS, ITRANS_TO_OFFSET, DUPLICATE_ITRANS_REPRESENTATIONS
for r in itrans_df.iterrows():
itrans=r[1]['itrans']
o=int(r[1]['offset_hex'],base=16)
OFFSET_TO_ITRANS[o]=itrans
if langinfo.is_consonant_offset(o):
### for consonants, strip the schwa - add halant offset
ITRANS_TO_OFFSET[itrans[:-1]].extend([o,0x4d])
else:
### the append assumes that the maatra always comes after independent vowel in the df
ITRANS_TO_OFFSET[itrans].append(o)
DUPLICATE_ITRANS_REPRESENTATIONS = {
'A': 'aa',
'I': 'ii',
'U': 'uu',
'RRi': 'R^i',
'RRI': 'R^I',
'LLi': 'L^i',
'LLI': 'L^I',
'L': 'ld',
'w': 'v',
'x': 'kSh',
'gj': 'j~n',
'dny': 'j~n',
'.n': '.m',
'M': '.m',
'OM': 'AUM'
}
class UnicodeIndicTransliterator(object):
"""
Base class for rule-based transliteration among Indian languages.
Script pair specific transliterators should derive from this class and override the transliterate() method.
They can call the super class 'transliterate()' method to avail of the common transliteration
"""
@staticmethod
def _correct_tamil_mapping(offset):
# handle missing unaspirated and voiced plosives in Tamil script
# replace by unvoiced, unaspirated plosives
# for first 4 consonant rows of varnamala
# exception: ja has a mapping in Tamil
if offset>=0x15 and offset<=0x28 and \
offset!=0x1c and \
not ( (offset-0x15)%5==0 or (offset-0x15)%5==4 ) :
subst_char=(offset-0x15)//5
offset=0x15+5*subst_char
# for 5th consonant row of varnamala
if offset in [ 0x2b, 0x2c, 0x2d]:
offset=0x2a
# 'sh' becomes 'Sh'
if offset==0x36:
offset=0x37
return offset
@staticmethod
def transliterate(text,lang1_code,lang2_code):
"""
convert the source language script (lang1) to target language script (lang2)
text: text to transliterate
lang1_code: language 1 code
lang1_code: language 2 code
"""
if lang1_code in langinfo.SCRIPT_RANGES and lang2_code in langinfo.SCRIPT_RANGES:
# if Sinhala is source, do a mapping to Devanagari first
if lang1_code=='si':
text=sdt.sinhala_to_devanagari(text)
lang1_code='hi'
# if Sinhala is target, make Devanagiri the intermediate target
org_lang2_code=''
if lang2_code=='si':
lang2_code='hi'
org_lang2_code='si'
trans_lit_text=[]
for c in text:
newc=c
offset=ord(c)-langinfo.SCRIPT_RANGES[lang1_code][0]
if offset >=langinfo.COORDINATED_RANGE_START_INCLUSIVE and offset <= langinfo.COORDINATED_RANGE_END_INCLUSIVE and c!='\u0964' and c!='\u0965':
if lang2_code=='ta':
# tamil exceptions
offset=UnicodeIndicTransliterator._correct_tamil_mapping(offset)
newc=chr(langinfo.SCRIPT_RANGES[lang2_code][0]+offset)
trans_lit_text.append(newc)
# if Sinhala is source, do a mapping to Devanagari first
if org_lang2_code=='si':
return sdt.devanagari_to_sinhala(''.join(trans_lit_text))
return ''.join(trans_lit_text)
else:
return text
class ItransTransliterator(object):
"""
Transliterator between Indian scripts and ITRANS
"""
@staticmethod
def to_itrans(text,lang_code):
if lang_code in langinfo.SCRIPT_RANGES:
if lang_code=='ml':
# Change from chillus characters to corresponding consonant+halant
text=text.replace('\u0d7a','\u0d23\u0d4d')
text=text.replace('\u0d7b','\u0d28\u0d4d')
text=text.replace('\u0d7c','\u0d30\u0d4d')
text=text.replace('\u0d7d','\u0d32\u0d4d')
text=text.replace('\u0d7e','\u0d33\u0d4d')
text=text.replace('\u0d7f','\u0d15\u0d4d')
offsets = [ isc.get_offset(c,lang_code) for c in text ]
### naive lookup
# itrans_l = [ OFFSET_TO_ITRANS.get(o, '-' ) for o in offsets ]
itrans_l=[]
for o in offsets:
itrans=OFFSET_TO_ITRANS.get(o, chr(langinfo.SCRIPT_RANGES[lang_code][0]+o) )
if langinfo.is_halanta_offset(o):
itrans=''
if len(itrans_l)>0:
itrans_l.pop()
elif langinfo.is_vowel_sign_offset(o) and len(itrans_l)>0:
itrans_l.pop()
itrans_l.extend(itrans)
return ''.join(itrans_l)
else:
return text
@staticmethod
def from_itrans(text,lang):
"""
TODO: Document this method properly
TODO: A little hack is used to handle schwa: needs to be documented
TODO: check for robustness
"""
MAXCODE=4 ### TODO: Needs to be fixed
## handle_duplicate_itrans_representations
for k, v in DUPLICATE_ITRANS_REPRESENTATIONS.items():
if k in text:
text=text.replace(k,v)
start=0
match=None
solution=[]
i=start+1
while i<=len(text):
itrans=text[start:i]
# print('===')
# print('i: {}'.format(i))
# if i<len(text):
# print('c: {}'.format(text[i-1]))
# print('start: {}'.format(start))
# print('itrans: {}'.format(itrans))
if itrans in ITRANS_TO_OFFSET:
offs=ITRANS_TO_OFFSET[itrans]
## single element list - no problem
## except when it is 'a'
## 2 element list of 2 kinds:
### 1. alternate char for independent/dependent vowel
### 2. consonant + halant
if len(offs)==2 and \
langinfo.is_vowel_offset(offs[0]):
### 1. alternate char for independent/dependent vowel
## if previous is a consonant, then use the dependent vowel
if len(solution)>0 and langinfo.is_halanta(solution[-1],lang):
offs=[offs[1]] ## dependent vowel
else:
offs=[offs[0]] ## independent vowel
c=''.join([ langinfo.offset_to_char(x,lang) for x in offs ])
match=(i,c)
elif len(itrans)==1: ## unknown character
match=(i,itrans)
elif i<len(text) and (i-start)<MAXCODE+1: ## continue matching till MAXCODE length substring
i=i+1
continue
else:
solution.extend(match[1])
# start=i-1
start=match[0]
i=start
match=None
# print('match done')
# print('match: {}'.format(match))
i=i+1
### flush matches
if match is not None:
solution.extend(match[1])
#### post-processing
## delete unecessary halants
# print(''.join(solution))
temp_out=list(''.join(solution))
rem_indices=[]
for i in range(len(temp_out)-1):
if langinfo.is_halanta(temp_out[i],lang) and \
(langinfo.is_vowel_sign(temp_out[i+1],lang) \
or langinfo.is_nukta(temp_out[i+1],lang) \
or temp_out[i+1]==langinfo.offset_to_char(0x7f,lang)):
rem_indices.append(i)
# if temp_out[i]==langinfo.offset_to_char(0x7f,lang):
# rem_indices.append(i)
for i in reversed(rem_indices):
temp_out.pop(i)
out=''.join(temp_out)
## delete schwa placeholder
out=out.replace(langinfo.offset_to_char(0x7f,lang),'')
return out
if __name__ == '__main__':
if len(sys.argv)<4:
print("Usage: python unicode_transliterate.py <command> <infile> <outfile> <src_language> <tgt_language>")
sys.exit(1)
if sys.argv[1]=='transliterate':
src_language=sys.argv[4]
tgt_language=sys.argv[5]
with open(sys.argv[2],'r', encoding='utf-8') as ifile:
with open(sys.argv[3],'w', encoding='utf-8') as ofile:
for line in ifile.readlines():
transliterated_line=UnicodeIndicTransliterator.transliterate(line,src_language,tgt_language)
ofile.write(transliterated_line)
elif sys.argv[1]=='romanize':
language=sys.argv[4]
### temp fix to replace anusvara with corresponding nasal
#r1_nasal=re.compile(ur'\u0902([\u0915-\u0918])')
#r2_nasal=re.compile(ur'\u0902([\u091a-\u091d])')
#r3_nasal=re.compile(ur'\u0902([\u091f-\u0922])')
#r4_nasal=re.compile(ur'\u0902([\u0924-\u0927])')
#r5_nasal=re.compile(ur'\u0902([\u092a-\u092d])')
with open(sys.argv[2],'r', encoding='utf-8') as ifile:
with open(sys.argv[3],'w', encoding='utf-8') as ofile:
for line in ifile.readlines():
### temp fix to replace anusvara with corresponding nasal
#line=r1_nasal.sub(u'\u0919\u094D\\1',line)
#line=r2_nasal.sub(u'\u091e\u094D\\1',line)
#line=r3_nasal.sub(u'\u0923\u094D\\1',line)
#line=r4_nasal.sub(u'\u0928\u094D\\1',line)
#line=r5_nasal.sub(u'\u092e\u094D\\1',line)
transliterated_line=ItransTransliterator.to_itrans(line,language)
## temp fix to replace 'ph' to 'F' to match with Urdu transliteration scheme
transliterated_line=transliterated_line.replace('ph','f')
ofile.write(transliterated_line)
elif sys.argv[1]=='indicize':
language=sys.argv[4]
with open(sys.argv[2],'r', encoding='utf-8') as ifile:
with open(sys.argv[3],'w', encoding='utf-8') as ofile:
for line in ifile.readlines():
transliterated_line=ItransTransliterator.from_itrans(line,language)
ofile.write(transliterated_line)
|
tensorflow/python/tools/module_util.py | KosingZhu/tensorflow | 190,993 | 12781575 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Helper functions for modules."""
import os
import six
if six.PY2:
import imp # pylint: disable=g-import-not-at-top
else:
import importlib # pylint: disable=g-import-not-at-top
def get_parent_dir(module):
return os.path.abspath(os.path.join(os.path.dirname(module.__file__), ".."))
def get_parent_dir_for_name(module_name):
"""Get parent directory for module with the given name.
Args:
module_name: Module name for e.g.
tensorflow_estimator.python.estimator.api._v1.estimator.
Returns:
Path to the parent directory if module is found and None otherwise.
Given example above, it should return:
/pathtoestimator/tensorflow_estimator/python/estimator/api/_v1.
"""
name_split = module_name.split(".")
if not name_split:
return None
if six.PY2:
try:
spec = imp.find_module(name_split[0])
except ImportError:
return None
if not spec:
return None
base_path = spec[1]
else:
try:
spec = importlib.util.find_spec(name_split[0])
except ValueError:
return None
if not spec or not spec.origin:
return None
base_path = os.path.dirname(spec.origin)
return os.path.join(base_path, *name_split[1:-1])
|
tests/qos/conftest.py | dmytroxshevchuk/sonic-mgmt | 132 | 12781592 | <gh_stars>100-1000
from .args.qos_sai_args import add_qos_sai_args
from .args.buffer_args import add_dynamic_buffer_calculation_args
# QoS pytest arguments
def pytest_addoption(parser):
'''
Adds option to QoS pytest
Args:
parser: pytest parser object
Returns:
None
'''
add_qos_sai_args(parser)
add_dynamic_buffer_calculation_args(parser)
|
scripts/automation/trex_control_plane/stf/trex_stf_lib/trex_daemon_server.py | timgates42/trex-core | 956 | 12781603 | #!/usr/bin/python
import outer_packages
import daemon
from trex_server import do_main_program, trex_parser
import CCustomLogger
import logging
import time
import sys
import os, errno
import grp
import signal
from daemon import runner
from extended_daemon_runner import ExtendedDaemonRunner
import lockfile
import errno
class TRexServerApp(object):
def __init__(self):
TRexServerApp.create_working_dirs()
self.stdin_path = '/dev/null'
self.stdout_path = '/dev/tty' # All standard prints will come up from this source.
self.stderr_path = "/var/log/trex/trex_daemon_server.log" # All log messages will come up from this source
self.pidfile_path = '/var/run/trex/trex_daemon_server.pid'
self.pidfile_timeout = 5 # timeout in seconds
def run(self):
do_main_program()
@staticmethod
def create_working_dirs():
if not os.path.exists('/var/log/trex'):
os.mkdir('/var/log/trex')
if not os.path.exists('/var/run/trex'):
os.mkdir('/var/run/trex')
def main ():
trex_app = TRexServerApp()
# setup the logger
default_log_path = '/var/log/trex/trex_daemon_server.log'
try:
CCustomLogger.setup_daemon_logger('TRexServer', default_log_path)
logger = logging.getLogger('TRexServer')
logger.setLevel(logging.INFO)
formatter = logging.Formatter("%(asctime)s %(name)-10s %(module)-20s %(levelname)-8s %(message)s")
handler = logging.FileHandler("/var/log/trex/trex_daemon_server.log")
logger.addHandler(handler)
except EnvironmentError, e:
if e.errno == errno.EACCES: # catching permission denied error
print "Launching user must have sudo privileges in order to run TRex daemon.\nTerminating daemon process."
exit(-1)
daemon_runner = ExtendedDaemonRunner(trex_app, trex_parser)
#This ensures that the logger file handle does not get closed during daemonization
daemon_runner.daemon_context.files_preserve=[handler.stream]
try:
if not set(['start', 'stop']).isdisjoint(set(sys.argv)):
print "Logs are saved at: {log_path}".format( log_path = default_log_path )
daemon_runner.do_action()
except lockfile.LockTimeout as inst:
logger.error(inst)
print inst
print """
Please try again once the timeout has been reached.
If this error continues, consider killing the process manually and restart the daemon."""
if __name__ == "__main__":
main()
|
streaming-mqtt/python-tests/tests.py | JacopoCastello/bahir | 337 | 12781610 | <gh_stars>100-1000
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import time
import random
if sys.version_info[:2] <= (2, 6):
try:
import unittest2 as unittest
except ImportError:
sys.stderr.write('Please install unittest2 to test with Python 2.6 or earlier')
sys.exit(1)
else:
import unittest
from pyspark.context import SparkConf, SparkContext, RDD
from pyspark.streaming.context import StreamingContext
from pyspark.streaming.tests import PySparkStreamingTestCase
from mqtt import MQTTUtils
class MQTTStreamTests(PySparkStreamingTestCase):
timeout = 20 # seconds
duration = 1
def setUp(self):
super(MQTTStreamTests, self).setUp()
MQTTTestUtilsClz = self.ssc._jvm.java.lang.Thread.currentThread().getContextClassLoader() \
.loadClass("org.apache.spark.streaming.mqtt.MQTTTestUtils")
self._MQTTTestUtils = MQTTTestUtilsClz.newInstance()
self._MQTTTestUtils.setup()
def tearDown(self):
if self._MQTTTestUtils is not None:
self._MQTTTestUtils.teardown()
self._MQTTTestUtils = None
super(MQTTStreamTests, self).tearDown()
def _randomTopic(self):
return "topic-%d" % random.randint(0, 10000)
def _startContext(self, topic):
# Start the StreamingContext and also collect the result
stream = MQTTUtils.createStream(self.ssc, "tcp://" + self._MQTTTestUtils.brokerUri(), topic)
result = []
def getOutput(_, rdd):
for data in rdd.collect():
result.append(data)
stream.foreachRDD(getOutput)
self.ssc.start()
return result
def test_mqtt_stream(self):
"""Test the Python MQTT stream API."""
sendData = "MQTT demo for spark streaming"
topic = self._randomTopic()
result = self._startContext(topic)
def retry():
self._MQTTTestUtils.publishData(topic, sendData)
# Because "publishData" sends duplicate messages, here we should use > 0
self.assertTrue(len(result) > 0)
self.assertEqual(sendData, result[0])
# Retry it because we don't know when the receiver will start.
self._retry_or_timeout(retry)
def _start_context_with_paired_stream(self, topics):
stream = MQTTUtils.createPairedStream(self.ssc, "tcp://" + self._MQTTTestUtils.brokerUri(), topics)
# Keep a set because records can potentially be repeated.
result = set()
def getOutput(_, rdd):
for data in rdd.collect():
result.add(data)
stream.foreachRDD(getOutput)
self.ssc.start()
return result
def test_mqtt_pair_stream(self):
"""Test the Python MQTT stream API with multiple topics."""
data_records = ["random string 1", "random string 2", "random string 3"]
topics = [self._randomTopic(), self._randomTopic(), self._randomTopic()]
topics_and_records = zip(topics, data_records)
result = self._start_context_with_paired_stream(topics)
def retry():
for topic, data_record in topics_and_records:
self._MQTTTestUtils.publishData(topic, data_record)
# Sort the received records as they might be out of order.
self.assertEqual(topics_and_records, sorted(result, key=lambda x: x[1]))
# Retry it because we don't know when the receiver will start.
self._retry_or_timeout(retry)
def _retry_or_timeout(self, test_func):
start_time = time.time()
while True:
try:
test_func()
break
except:
if time.time() - start_time > self.timeout:
raise
time.sleep(0.01)
if __name__ == "__main__":
unittest.main()
|
desktop/core/ext-py/cx_Oracle-6.4.1/samples/tutorial/query_one.py | yetsun/hue | 5,079 | 12781613 | #------------------------------------------------------------------------------
# query_one.py (Section 3.2)
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
# Copyright 2017, 2018, Oracle and/or its affiliates. All rights reserved.
#------------------------------------------------------------------------------
from __future__ import print_function
import cx_Oracle
import db_config
con = cx_Oracle.connect(db_config.user, db_config.pw, db_config.dsn)
cur = con.cursor()
cur.execute("select * from dept order by deptno")
row = cur.fetchone()
print(row)
row = cur.fetchone()
print(row)
|
running_modes/reinforcement_learning/core_reinforcement_learning.py | lilleswing/Reinvent-1 | 183 | 12781637 | <gh_stars>100-1000
import time
import numpy as np
import torch
from reinvent_chemistry.utils import get_indices_of_unique_smiles
from reinvent_models.lib_invent.enums.generative_model_regime import GenerativeModelRegimeEnum
from reinvent_models.model_factory.configurations.model_configuration import ModelConfiguration
from reinvent_models.model_factory.enums.model_type_enum import ModelTypeEnum
from reinvent_models.model_factory.generative_model import GenerativeModel
from reinvent_models.model_factory.generative_model_base import GenerativeModelBase
from reinvent_scoring import FinalSummary
from reinvent_scoring.scoring.diversity_filters.reinvent_core.base_diversity_filter import BaseDiversityFilter
from reinvent_scoring.scoring.function.base_scoring_function import BaseScoringFunction
from running_modes.configurations import ReinforcementLearningConfiguration
from running_modes.constructors.base_running_mode import BaseRunningMode
from running_modes.reinforcement_learning.inception import Inception
from running_modes.reinforcement_learning.logging.base_reinforcement_logger import BaseReinforcementLogger
from running_modes.reinforcement_learning.margin_guard import MarginGuard
from running_modes.utils.general import to_tensor
class CoreReinforcementRunner(BaseRunningMode):
def __init__(self, critic: GenerativeModelBase, actor: GenerativeModelBase,
configuration: ReinforcementLearningConfiguration,
scoring_function: BaseScoringFunction, diversity_filter: BaseDiversityFilter,
inception: Inception, logger: BaseReinforcementLogger):
self._prior = critic
self._agent = actor
self._scoring_function = scoring_function
self._diversity_filter = diversity_filter
self.config = configuration
self._logger = logger
self._inception = inception
self._margin_guard = MarginGuard(self)
self._optimizer = torch.optim.Adam(self._agent.get_network_parameters(), lr=self.config.learning_rate)
def run(self):
self._logger.log_message("starting an RL run")
start_time = time.time()
self._disable_prior_gradients()
for step in range(self.config.n_steps):
seqs, smiles, agent_likelihood = self._sample_unique_sequences(self._agent, self.config.batch_size)
# switch signs
agent_likelihood = -agent_likelihood
prior_likelihood = -self._prior.likelihood(seqs)
score_summary: FinalSummary = self._scoring_function.get_final_score_for_step(smiles, step)
score = self._diversity_filter.update_score(score_summary, step)
augmented_likelihood = prior_likelihood + self.config.sigma * to_tensor(score)
loss = torch.pow((augmented_likelihood - agent_likelihood), 2)
loss, agent_likelihood = self._inception_filter(self._agent, loss, agent_likelihood, prior_likelihood,
self.config.sigma, smiles, score)
loss = loss.mean()
self._optimizer.zero_grad()
loss.backward()
self._optimizer.step()
self._stats_and_chekpoint(score, start_time, step, smiles, score_summary,
agent_likelihood, prior_likelihood,
augmented_likelihood)
self._logger.save_final_state(self._agent, self._diversity_filter)
self._logger.log_out_input_configuration()
self._logger.log_out_inception(self._inception)
def _disable_prior_gradients(self):
# There might be a more elegant way of disabling gradients
for param in self._prior.get_network_parameters():
param.requires_grad = False
def _stats_and_chekpoint(self, score, start_time, step, smiles, score_summary: FinalSummary,
agent_likelihood, prior_likelihood, augmented_likelihood):
self._margin_guard.adjust_margin(step)
mean_score = np.mean(score)
self._margin_guard.store_run_stats(agent_likelihood, prior_likelihood, augmented_likelihood, score)
self._logger.timestep_report(start_time, self.config.n_steps, step, smiles,
mean_score, score_summary, score,
agent_likelihood, prior_likelihood, augmented_likelihood, self._diversity_filter)
self._logger.save_checkpoint(step, self._diversity_filter, self._agent)
def _sample_unique_sequences(self, agent, batch_size):
seqs, smiles, agent_likelihood = agent.sample(batch_size)
unique_idxs = get_indices_of_unique_smiles(smiles)
seqs_unique = seqs[unique_idxs]
smiles_np = np.array(smiles)
smiles_unique = smiles_np[unique_idxs]
agent_likelihood_unique = agent_likelihood[unique_idxs]
return seqs_unique, smiles_unique, agent_likelihood_unique
def _inception_filter(self, agent, loss, agent_likelihood, prior_likelihood, sigma, smiles, score):
exp_smiles, exp_scores, exp_prior_likelihood = self._inception.sample()
if len(exp_smiles) > 0:
exp_agent_likelihood = -agent.likelihood_smiles(exp_smiles)
exp_augmented_likelihood = exp_prior_likelihood + sigma * exp_scores
exp_loss = torch.pow((to_tensor(exp_augmented_likelihood) - exp_agent_likelihood), 2)
loss = torch.cat((loss, exp_loss), 0)
agent_likelihood = torch.cat((agent_likelihood, exp_agent_likelihood), 0)
self._inception.add(smiles, score, prior_likelihood)
return loss, agent_likelihood
def reset(self, reset_countdown=0):
model_type_enum = ModelTypeEnum()
model_regime = GenerativeModelRegimeEnum()
actor_config = ModelConfiguration(model_type_enum.DEFAULT, model_regime.TRAINING,
self.config.agent)
self._agent = GenerativeModel(actor_config)
self._optimizer = torch.optim.Adam(self._agent.get_network_parameters(), lr=self.config.learning_rate)
self._logger.log_message("Resetting Agent")
self._logger.log_message(f"Adjusting sigma to: {self.config.sigma}")
return reset_countdown
|
pixelssl/task_template/__init__.py | charlesCXK/PixelSSL | 223 | 12781641 | from . import func as func_template
from . import data as data_template
from . import model as model_template
from . import criterion as criterion_template
from . import proxy as proxy_template
__all__ = [
'func_template',
'data_template',
'model_template',
'criterion_template',
'proxy_template',
] |
Python3/1302.py | rakhi2001/ecom7 | 854 | 12781662 | <gh_stars>100-1000
__________________________________________________________________________________________________
sample 72 ms submission
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def deepestLeavesSum(self, root: TreeNode) -> int:
pre = []
queue = [root]
while queue:
pre, queue = queue, [leaf for q in queue for leaf in [q.left, q.right] if leaf]
return sum([p.val for p in pre])
__________________________________________________________________________________________________
sample 76 ms submission
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def deepestLeavesSum(self, root: TreeNode) -> int:
layer = [root]
res = root.val
while layer:
next_layer = []
next_res = 0
for node in layer:
next_res += node.val
if node.left:
next_layer.append(node.left)
if node.right:
next_layer.append(node.right)
layer = next_layer
res = next_res
return res
__________________________________________________________________________________________________
|
HOG-Features/scikithog.py | saneravi/ML_Stuff | 209 | 12781674 | #!/usr/bin/env python
"""Calculate HOG features for an image"""
import os
import matplotlib.pyplot as plt
from hog_features import image2pixelarray
from skimage import exposure
from skimage.feature import hog
def main(filename):
"""
Orchestrate the HOG feature calculation
Parameters
----------
filename : str
"""
image = image2pixelarray(filename)
fd, hog_image = hog(
image,
orientations=8,
pixels_per_cell=(16, 16),
cells_per_block=(1, 1),
visualise=True,
)
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(8, 4), sharex=True, sharey=True)
ax1.axis("off")
ax1.imshow(image, cmap=plt.cm.gray)
ax1.set_title("Input image")
ax1.set_adjustable("box-forced")
# Rescale histogram for better display
hog_image_rescaled = exposure.rescale_intensity(hog_image, in_range=(0, 0.02))
ax2.axis("off")
ax2.imshow(hog_image_rescaled, cmap=plt.cm.gray)
ax2.set_title("Histogram of Oriented Gradients")
ax1.set_adjustable("box-forced")
plt.show()
def is_valid_file(parser, arg):
"""
Check if arg is a valid file that already exists on the file system.
Parameters
----------
parser : argparse object
arg : str
Returns
-------
arg
"""
arg = os.path.abspath(arg)
if not os.path.exists(arg):
parser.error("The file %s does not exist!" % arg)
else:
return arg
def get_parser():
"""Get parser object for scikithog"""
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser
parser = ArgumentParser(
description=__doc__, formatter_class=ArgumentDefaultsHelpFormatter
)
parser.add_argument(
"-f",
"--file",
dest="filename",
type=lambda x: is_valid_file(parser, x),
help="write report to FILE",
required=True,
metavar="FILE",
)
return parser
if __name__ == "__main__":
args = get_parser().parse_args()
main(args.filename)
|
cactus/listener/__init__.py | danielchasehooper/Cactus | 1,048 | 12781766 | <filename>cactus/listener/__init__.py
import logging
from cactus.listener.polling import PollingListener
logger = logging.getLogger(__name__)
try:
from cactus.listener.mac import FSEventsListener as Listener
except (ImportError, OSError):
logger.debug("Failed to load FSEventsListener, falling back to PollingListener", exc_info=True)
Listener = PollingListener
|
python/RawNet2/dataloader.py | ishine/RawNet | 199 | 12781810 | <reponame>ishine/RawNet<gh_stars>100-1000
import numpy as np
import soundfile as sf
from torch.utils import data
class Dataset_VoxCeleb2(data.Dataset):
def __init__(self, list_IDs, base_dir, nb_samp = 0, labels = {}, cut = True, return_label = True, norm_scale = True):
'''
self.list_IDs : list of strings (each string: utt key)
self.labels : dictionary (key: utt key, value: label integer)
self.nb_samp : integer, the number of timesteps for each mini-batch
cut : (boolean) adjust utterance duration for mini-batch construction
return_label : (boolean)
norm_scale : (boolean) normalize scale alike SincNet github repo
'''
self.list_IDs = list_IDs
self.nb_samp = nb_samp
self.base_dir = base_dir
self.labels = labels
self.cut = cut
self.return_label = return_label
self.norm_scale = norm_scale
if self.cut and self.nb_samp == 0: raise ValueError('when adjusting utterance length, "nb_samp" should be input')
def __len__(self):
return len(self.list_IDs)
def __getitem__(self, index):
ID = self.list_IDs[index]
try:
X, _ = sf.read(self.base_dir+ID)
X = X.astype(np.float64)
except:
raise ValueError('%s'%ID)
if self.norm_scale:
X = self._normalize_scale(X).astype(np.float32)
X = X.reshape(1,-1) #because of LayerNorm for the input
if self.cut:
nb_time = X.shape[1]
if nb_time > self.nb_samp:
start_idx = np.random.randint(low = 0, high = nb_time - self.nb_samp)
X = X[:, start_idx : start_idx + self.nb_samp][0]
elif nb_time < self.nb_samp:
nb_dup = int(self.nb_samp / nb_time) + 1
X = np.tile(X, (1, nb_dup))[:, :self.nb_samp][0]
else:
X = X[0]
if not self.return_label:
return X
y = self.labels[ID.split('/')[0]]
return X, y
def _normalize_scale(self, x):
'''
Normalize sample scale alike SincNet.
'''
return x/np.max(np.abs(x))
class TA_Dataset_VoxCeleb2(data.Dataset):
def __init__(self, list_IDs, base_dir, nb_samp = 0, window_size = 0, labels = {}, cut = True, return_label = True, norm_scale = True):
'''
self.list_IDs : list of strings (each string: utt key)
self.labels : dictionary (key: utt key, value: label integer)
self.nb_samp : integer, the number of timesteps for each mini-batch
cut : (boolean) adjust utterance duration for mini-batch construction
return_label : (boolean)
norm_scale : (boolean) normalize scale alike SincNet github repo
'''
self.list_IDs = list_IDs
self.window_size = window_size
self.nb_samp = nb_samp
self.base_dir = base_dir
self.labels = labels
self.cut = cut
self.return_label = return_label
self.norm_scale = norm_scale
if self.cut and self.nb_samp == 0: raise ValueError('when adjusting utterance length, "nb_samp" should be input')
def __len__(self):
return len(self.list_IDs)
def __getitem__(self, index):
ID = self.list_IDs[index]
try:
X, _ = sf.read(self.base_dir+ID)
X = X.astype(np.float64)
except:
raise ValueError('%s'%ID)
if self.norm_scale:
X = self._normalize_scale(X).astype(np.float32)
X = X.reshape(1,-1)
list_X = []
nb_time = X.shape[1]
if nb_time < self.nb_samp:
nb_dup = int(self.nb_samp / nb_time) + 1
list_X.append(np.tile(X, (1, nb_dup))[:, :self.nb_samp][0])
elif nb_time > self.nb_samp:
step = self.nb_samp - self.window_size
iteration = int( (nb_time - self.window_size) / step ) + 1
for i in range(iteration):
if i == 0:
list_X.append(X[:, :self.nb_samp][0])
elif i < iteration - 1:
list_X.append(X[:, i*step : i*step + self.nb_samp][0])
else:
list_X.append(X[:, -self.nb_samp:][0])
else :
list_X.append(X[0])
if not self.return_label:
return list_X
y = self.labels[ID.split('/')[0]]
return list_X, y
def _normalize_scale(self, x):
'''
Normalize sample scale alike SincNet.
'''
return x/np.max(np.abs(x)) |
website/discovery/views.py | gaybro8777/osf.io | 628 | 12781812 | <filename>website/discovery/views.py
from framework.flask import redirect
def redirect_activity_to_search(**kwargs):
return redirect('/search/')
|
lib/models/mixformer/__init__.py | SangbumChoi/MixFormer | 103 | 12781829 | from .mixformer import build_mixformer
from .mixformer_online import build_mixformer_online_score |
interpretation/deepseismic_interpretation/dutchf3/tests/test_dataloaders.py | elmajdma/seismic-deeplearning | 270 | 12781849 | <filename>interpretation/deepseismic_interpretation/dutchf3/tests/test_dataloaders.py<gh_stars>100-1000
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
"""
Tests for TrainLoader and TestLoader classes when overriding the file names of the seismic and label data.
"""
import tempfile
import numpy as np
from deepseismic_interpretation.dutchf3.data import (
get_test_loader,
TrainPatchLoaderWithDepth,
TrainSectionLoaderWithDepth,
)
import pytest
import yacs.config
import os
# npy files dimensions
IL = 5
XL = 10
D = 8
N_CLASSES = 2
CONFIG_FILE = "./experiments/interpretation/dutchf3_patch/configs/unet.yaml"
with open(CONFIG_FILE, "rt") as f_read:
config = yacs.config.load_cfg(f_read)
def generate_npy_files(path, data):
np.save(path, data)
def assert_dimensions(test_section_loader):
assert test_section_loader.labels.shape[0] == IL
assert test_section_loader.labels.shape[1] == XL
assert test_section_loader.labels.shape[2] == D
# Because add_section_depth_channels method add
# 2 extra channels to a 1 channel section
assert test_section_loader.seismic.shape[0] == IL
assert test_section_loader.seismic.shape[2] == XL
assert test_section_loader.seismic.shape[3] == D
def test_TestSectionLoader_should_load_data_from_test1_set():
with open(CONFIG_FILE, "rt") as f_read:
config = yacs.config.load_cfg(f_read)
with tempfile.TemporaryDirectory() as data_dir:
os.makedirs(os.path.join(data_dir, "test_once"))
os.makedirs(os.path.join(data_dir, "splits"))
seimic = np.zeros([IL, XL, D])
generate_npy_files(os.path.join(data_dir, "test_once", "test1_seismic.npy"), seimic)
labels = np.ones([IL, XL, D])
generate_npy_files(os.path.join(data_dir, "test_once", "test1_labels.npy"), labels)
txt_path = os.path.join(data_dir, "splits", "section_test1.txt")
open(txt_path, "a").close()
TestSectionLoader = get_test_loader(config)
config.merge_from_list(["DATASET.ROOT", data_dir])
test_set = TestSectionLoader(config, split="test1")
assert_dimensions(test_set)
def test_TestSectionLoader_should_load_data_from_test2_set():
with tempfile.TemporaryDirectory() as data_dir:
os.makedirs(os.path.join(data_dir, "test_once"))
os.makedirs(os.path.join(data_dir, "splits"))
seimic = np.zeros([IL, XL, D])
generate_npy_files(os.path.join(data_dir, "test_once", "test2_seismic.npy"), seimic)
A = np.load(os.path.join(data_dir, "test_once", "test2_seismic.npy"))
labels = np.ones([IL, XL, D])
generate_npy_files(os.path.join(data_dir, "test_once", "test2_labels.npy"), labels)
txt_path = os.path.join(data_dir, "splits", "section_test2.txt")
open(txt_path, "a").close()
TestSectionLoader = get_test_loader(config)
config.merge_from_list(["DATASET.ROOT", data_dir])
test_set = TestSectionLoader(config, split="test2")
assert_dimensions(test_set)
def test_TestSectionLoader_should_load_data_from_path_override_data():
with tempfile.TemporaryDirectory() as data_dir:
os.makedirs(os.path.join(data_dir, "volume_name"))
os.makedirs(os.path.join(data_dir, "splits"))
seimic = np.zeros([IL, XL, D])
generate_npy_files(os.path.join(data_dir, "volume_name", "seismic.npy"), seimic)
labels = np.ones([IL, XL, D])
generate_npy_files(os.path.join(data_dir, "volume_name", "labels.npy"), labels)
txt_path = os.path.join(data_dir, "splits", "section_volume_name.txt")
open(txt_path, "a").close()
TestSectionLoader = get_test_loader(config)
config.merge_from_list(["DATASET.ROOT", data_dir])
test_set = TestSectionLoader(
config,
split="volume_name",
is_transform=True,
augmentations=None,
seismic_path=os.path.join(data_dir, "volume_name", "seismic.npy"),
label_path=os.path.join(data_dir, "volume_name", "labels.npy"),
)
assert_dimensions(test_set)
def test_TrainPatchLoaderWithDepth_should_fail_on_missing_seismic_file(tmpdir):
"""
Check for exception when training param is empty
"""
# Setup
os.makedirs(os.path.join(tmpdir, "volume_name"))
os.makedirs(os.path.join(tmpdir, "splits"))
labels = np.ones([IL, XL, D])
generate_npy_files(os.path.join(tmpdir, "volume_name", "labels.npy"), labels)
txt_path = os.path.join(tmpdir, "splits", "patch_volume_name.txt")
open(txt_path, "a").close()
config.merge_from_list(["DATASET.ROOT", str(tmpdir)])
# Test
with pytest.raises(Exception) as excinfo:
_ = TrainPatchLoaderWithDepth(
config,
split="volume_name",
is_transform=True,
augmentations=None,
seismic_path=os.path.join(tmpdir, "volume_name", "seismic.npy"),
label_path=os.path.join(tmpdir, "volume_name", "labels.npy"),
)
assert "does not exist" in str(excinfo.value)
def test_TrainPatchLoaderWithDepth_should_fail_on_missing_label_file(tmpdir):
"""
Check for exception when training param is empty
"""
# Setup
os.makedirs(os.path.join(tmpdir, "volume_name"))
os.makedirs(os.path.join(tmpdir, "splits"))
seimic = np.zeros([IL, XL, D])
generate_npy_files(os.path.join(tmpdir, "volume_name", "seismic.npy"), seimic)
txt_path = os.path.join(tmpdir, "splits", "patch_volume_name.txt")
open(txt_path, "a").close()
config.merge_from_list(["DATASET.ROOT", str(tmpdir)])
# Test
with pytest.raises(Exception) as excinfo:
_ = TrainPatchLoaderWithDepth(
config,
split="volume_name",
is_transform=True,
augmentations=None,
seismic_path=os.path.join(tmpdir, "volume_name", "seismic.npy"),
label_path=os.path.join(tmpdir, "volume_name", "labels.npy"),
)
assert "does not exist" in str(excinfo.value)
def test_TrainPatchLoaderWithDepth_should_load_with_one_train_and_label_file(tmpdir):
"""
Check for successful class instantiation w/ single npy file for train & label
"""
# Setup
os.makedirs(os.path.join(tmpdir, "volume_name"))
os.makedirs(os.path.join(tmpdir, "splits"))
seimic = np.zeros([IL, XL, D])
generate_npy_files(os.path.join(tmpdir, "volume_name", "seismic.npy"), seimic)
labels = np.ones([IL, XL, D])
generate_npy_files(os.path.join(tmpdir, "volume_name", "labels.npy"), labels)
txt_dir = os.path.join(tmpdir, "splits")
txt_path = os.path.join(txt_dir, "patch_volume_name.txt")
open(txt_path, "a").close()
config.merge_from_list(["DATASET.ROOT", str(tmpdir)])
# Test
train_set = TrainPatchLoaderWithDepth(
config,
split="volume_name",
is_transform=True,
augmentations=None,
seismic_path=os.path.join(tmpdir, "volume_name", "seismic.npy"),
label_path=os.path.join(tmpdir, "volume_name", "labels.npy"),
)
assert train_set.labels.shape == (IL, XL, D + 2 * config.TRAIN.PATCH_SIZE)
assert train_set.seismic.shape == (IL, XL, D + 2 * config.TRAIN.PATCH_SIZE)
|
ISMLnextGen/dynamicCoro.py | Ravenclaw-OIer/ISML_auto_voter | 128 | 12781851 | import asyncio
from threading import Thread
async def production_task():
i = 0
while 1:
# 将consumption这个协程每秒注册一个到运行在线程中的循环,thread_loop每秒会获得一个一直打印i的无限循环任务
asyncio.run_coroutine_threadsafe(consumption(i),
thread_loop) # 注意:run_coroutine_threadsafe 这个方法只能用在运行在线程中的循环事件使用
await asyncio.sleep(2) # 必须加await
i += 1
async def consumption(i):
while True:
print("我是第{}任务".format(i))
await asyncio.sleep(1)
def start_loop(loop):
# 运行事件循环, loop以参数的形式传递进来运行
asyncio.set_event_loop(loop)
loop.run_forever()
#消费者循环
thread_loop = asyncio.new_event_loop() # 获取一个事件循环
run_loop_thread = Thread(target=start_loop, args=(thread_loop,)) # 将次事件循环运行在一个线程中,防止阻塞当前主线程
run_loop_thread.start() # 运行线程,同时协程事件循环也会运行
#生产者循环
advocate_loop = asyncio.get_event_loop() # 将生产任务的协程注册到这个循环中
advocate_loop.run_until_complete(production_task()) # 运行次循环
|
tf_quant_finance/math/qmc/__init__.py | slowy07/tf-quant-finance | 3,138 | 12781861 | # Lint as: python3
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""RQMC support."""
from tf_quant_finance.math.qmc import utils
from tf_quant_finance.math.qmc.digital_net import digital_net_sample
from tf_quant_finance.math.qmc.digital_net import random_digital_shift
from tf_quant_finance.math.qmc.digital_net import random_scrambling_matrices
from tf_quant_finance.math.qmc.digital_net import scramble_generating_matrices
from tf_quant_finance.math.qmc.lattice_rule import lattice_rule_sample
from tf_quant_finance.math.qmc.lattice_rule import random_scrambling_vectors
from tf_quant_finance.math.qmc.sobol import sobol_generating_matrices
from tf_quant_finance.math.qmc.sobol import sobol_sample
from tensorflow.python.util.all_util import remove_undocumented # pylint: disable=g-direct-tensorflow-import
_allowed_symbols = [
'digital_net_sample',
'lattice_rule_sample',
'random_digital_shift',
'random_scrambling_matrices',
'random_scrambling_vectors',
'scramble_generating_matrices',
'sobol_generating_matrices',
'sobol_sample',
'utils',
]
remove_undocumented(__name__, _allowed_symbols)
|
bo/pp/pp_gp_my_distmat.py | ZachZhu7/banana-git | 167 | 12781865 | <gh_stars>100-1000
"""
Classes for GP models without any PP backend, using a given distance matrix.
"""
from argparse import Namespace
import time
import copy
import numpy as np
from scipy.spatial.distance import cdist
from bo.pp.pp_core import DiscPP
from bo.pp.gp.gp_utils import kern_exp_quad, kern_matern32, \
get_cholesky_decomp, solve_upper_triangular, solve_lower_triangular, \
sample_mvn, squared_euc_distmat, kern_distmat
from bo.util.print_utils import suppress_stdout_stderr
class MyGpDistmatPP(DiscPP):
""" GPs using a kernel specified by a given distance matrix, without any PP
backend """
def __init__(self, data=None, modelp=None, printFlag=True):
""" Constructor """
self.set_model_params(modelp)
self.set_data(data)
self.set_model()
super(MyGpDistmatPP,self).__init__()
if printFlag:
self.print_str()
def set_model_params(self, modelp):
""" Set self.modelp """
if modelp is None:
pass #TODO
self.modelp = modelp
def set_data(self, data):
""" Set self.data """
if data is None:
pass #TODO
self.data_init = copy.deepcopy(data)
self.data = copy.deepcopy(self.data_init)
def set_model(self):
""" Set GP regression model """
self.model = self.get_model()
def get_model(self):
""" Returns model object """
return None
def infer_post_and_update_samples(self, print_result=False):
""" Update self.sample_list """
self.sample_list = [Namespace(ls=self.modelp.kernp.ls,
alpha=self.modelp.kernp.alpha,
sigma=self.modelp.kernp.sigma)]
if print_result: self.print_inference_result()
def get_distmat(self, xmat1, xmat2):
""" Get distance matrix """
#return squared_euc_distmat(xmat1, xmat2, .5)
from data import Data
self.distmat = Data.generate_distance_matrix
#print('distmat')
#print(self.distmat(xmat1, xmat2, self.modelp.distance))
return self.distmat(xmat1, xmat2, self.modelp.distance)
def print_inference_result(self):
""" Print results of stan inference """
print('*ls pt est = '+str(self.sample_list[0].ls)+'.')
print('*alpha pt est = '+str(self.sample_list[0].alpha)+'.')
print('*sigma pt est = '+str(self.sample_list[0].sigma)+'.')
print('-----')
def sample_pp_post_pred(self, nsamp, input_list, full_cov=False):
""" Sample from posterior predictive of PP.
Inputs:
input_list - list of np arrays size=(-1,)
Returns:
list (len input_list) of np arrays (size=(nsamp,1))."""
samp = self.sample_list[0]
postmu, postcov = self.gp_post(self.data.X, self.data.y, input_list,
samp.ls, samp.alpha, samp.sigma, full_cov)
if full_cov:
ppred_list = list(sample_mvn(postmu, postcov, nsamp))
else:
ppred_list = list(np.random.normal(postmu.reshape(-1,),
postcov.reshape(-1,),
size=(nsamp, len(input_list))))
return list(np.stack(ppred_list).T), ppred_list
def sample_pp_pred(self, nsamp, input_list, lv=None):
""" Sample from predictive of PP for parameter lv.
Returns: list (len input_list) of np arrays (size (nsamp,1))."""
if lv is None:
lv = self.sample_list[0]
postmu, postcov = self.gp_post(self.data.X, self.data.y, input_list, lv.ls,
lv.alpha, lv.sigma)
pred_list = list(sample_mvn(postmu, postcov, 1)) ###TODO: sample from this mean nsamp times
return list(np.stack(pred_list).T), pred_list
def gp_post(self, x_train_list, y_train_arr, x_pred_list, ls, alpha, sigma,
full_cov=True):
""" Compute parameters of GP posterior """
kernel = lambda a, b, c, d: kern_distmat(a, b, c, d, self.get_distmat)
k11_nonoise = kernel(x_train_list, x_train_list, ls, alpha)
lmat = get_cholesky_decomp(k11_nonoise, sigma, 'try_first')
smat = solve_upper_triangular(lmat.T, solve_lower_triangular(lmat,
y_train_arr))
k21 = kernel(x_pred_list, x_train_list, ls, alpha)
mu2 = k21.dot(smat)
k22 = kernel(x_pred_list, x_pred_list, ls, alpha)
vmat = solve_lower_triangular(lmat, k21.T)
k2 = k22 - vmat.T.dot(vmat)
if full_cov is False:
k2 = np.sqrt(np.diag(k2))
return mu2, k2
# Utilities
def print_str(self):
""" Print a description string """
print('*MyGpDistmatPP with modelp='+str(self.modelp)+'.')
print('-----')
|
python/python-core/datetimes.py | josephobonyo/sigma_coding_youtube | 893 | 12781892 | # import our libraries
import time
import datetime
# get today's date
today = date.today()
print(today)
# create a custom date
future_date = date(2020, 1, 31)
print(future_date)
# let's create a time stamp
time_stamp = time.time()
print(time_stamp)
# create a date from a timestamp
date_stamp = date.fromtimestamp(time_stamp)
print(date_stamp)
# get components of a date
print(date_stamp.year)
print(date_stamp.month)
print(date_stamp.day)
# ------------------------- PART TWO --------------------------
from datetime import datetime, date, time
# create a date and a time
my_date = date(2019, 3, 22)
my_time = time(12, 30)
# create a datetime
my_datetime = datetime.combine(my_date, my_time)
print(my_datetime)
# get the different components
print(my_datetime.year)
print(my_datetime.month)
print(my_datetime.day)
print(my_datetime.hour)
print(my_datetime.minute)
|
sample_project/env/lib/python3.9/site-packages/fontTools/misc/roundTools.py | Istiakmorsalin/ML-Data-Science | 38,667 | 12781897 | """
Various round-to-integer helpers.
"""
import math
import functools
import logging
log = logging.getLogger(__name__)
__all__ = [
"noRound",
"otRound",
"maybeRound",
"roundFunc",
]
def noRound(value):
return value
def otRound(value):
"""Round float value to nearest integer towards ``+Infinity``.
The OpenType spec (in the section on `"normalization" of OpenType Font Variations <https://docs.microsoft.com/en-us/typography/opentype/spec/otvaroverview#coordinate-scales-and-normalization>`_)
defines the required method for converting floating point values to
fixed-point. In particular it specifies the following rounding strategy:
for fractional values of 0.5 and higher, take the next higher integer;
for other fractional values, truncate.
This function rounds the floating-point value according to this strategy
in preparation for conversion to fixed-point.
Args:
value (float): The input floating-point value.
Returns
float: The rounded value.
"""
# See this thread for how we ended up with this implementation:
# https://github.com/fonttools/fonttools/issues/1248#issuecomment-383198166
return int(math.floor(value + 0.5))
def maybeRound(v, tolerance, round=otRound):
rounded = round(v)
return rounded if abs(rounded - v) <= tolerance else v
def roundFunc(tolerance, round=otRound):
if tolerance < 0:
raise ValueError("Rounding tolerance must be positive")
if tolerance == 0:
return noRound
if tolerance >= .5:
return round
return functools.partial(maybeRound, tolerance=tolerance, round=round)
|
office365/sharepoint/tenant/administration/sharing_capabilities.py | wreiner/Office365-REST-Python-Client | 544 | 12781902 | class SharingCapabilities:
def __init__(self):
pass
Disabled = 0
ExternalUserSharingOnly = 1
ExternalUserAndGuestSharing = 2
ExistingExternalUserSharingOnly = 3
|
Classification/LibLinear/src/test/scripts/generate_test_data.py | em3ndez/tribuo | 1,091 | 12781942 | # Copyright (c) 2015-2020, Oracle and/or its affiliates. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
import os
def generate_data(mode='train', problem_type='binary'):
assert mode == 'train' or mode == 'test'
rng = np.random.RandomState(1)
if problem_type == 'binary':
labels = ['POS', 'NEG']
else:
labels = ['POS', 'NEG', 'NEU']
texts = ['aaa', 'bbb', 'ccc']
counts = {label: 0 for label in labels}
if mode == 'train':
n = 1000
else:
n = 100
lns = []
for i in range(n):
y = rng.choice(labels)
counts[y] += 1
x = rng.choice(texts)
lns.append('%s##%s\n' % (y, x))
print(counts)
with open('%s_input_%s.tribuo' % (mode, problem_type), 'w') as f:
for ln in lns:
f.write(ln)
def generate_models():
lltypes = [
'L2R_LR',
'L2R_L2LOSS_SVC_DUAL',
'L2R_L2LOSS_SVC',
'L2R_L1LOSS_SVC_DUAL',
'MCSVM_CS',
'L1R_L2LOSS_SVC',
'L1R_LR',
'L2R_LR_DUAL'
]
for lltype in lltypes:
cmd = './src/test/scripts/generate-model.sh %s %s %s %s' % (lltype, lltype, 'train_input_binary.tribuo', 'test_input_binary.tribuo')
print(cmd)
os.system(cmd)
# multiclass model
lltype = 'L2R_LR'
cmd = './src/test/scripts/generate-model.sh %s %s %s %s' % (lltype, lltype+'_multiclass', 'train_input_multiclass.tribuo', 'test_input_multiclass.tribuo')
print(cmd)
os.system(cmd)
if __name__ == '__main__':
generate_data(mode='train')
generate_data(mode='test')
generate_data(mode='train', problem_type='multiclass')
generate_data(mode='test', problem_type='multiclass')
generate_models()
|
parsers/api_check.py | Bassem95/Test26 | 105 | 12781950 | <reponame>Bassem95/Test26
# -*- coding: utf-8 -*-
#!/usr/bin/python
"""
"""
import requests
import time
from raven import Client
client = Client(
'https://aee9ceb609b549fe8a85339e69c74150:[email protected]/1223891')
key = "<KEY>"
def check_api(word):
query_string = { 'api-key': key, 'q': '"%s"' % word}
req = requests.get('https://api.nytimes.com/svc/search/v2/articlesearch.json', params=query_string, verify=False)
if req.status_code in set([429, 529, 504]):
time.sleep(50)
client.captureMessage("NYT API RATELIMIT")
return check_api(word)
if req.status_code == 500:
client.captureMessage("NYT API 500", extra={
'req':req,
'word': word,
})
return False
result = req.json()
num_results = len(result['response']['docs'])
return num_results < 2
|
hydrachain/examples/native/fungible/test_fungible_contract.py | bts/hydrachain | 406 | 12781954 | from ethereum import tester
import hydrachain.native_contracts as nc
from fungible_contract import Fungible, Transfer, Approval
import ethereum.slogging as slogging
log = slogging.get_logger('test.fungible')
def test_fungible_instance():
state = tester.state()
creator_address = tester.a0
creator_key = tester.k0
nc.registry.register(Fungible)
# Create proxy
EUR_address = nc.tester_create_native_contract_instance(state, creator_key, Fungible)
fungible_as_creator = nc.tester_nac(state, creator_key, EUR_address)
# Initalize fungible with a fixed quantity of fungibles.
fungible_total = 1000000
fungible_as_creator.init(fungible_total)
assert fungible_as_creator.balanceOf(creator_address) == fungible_total
nc.registry.unregister(Fungible)
def test_fungible_template():
"""
Tests;
Fungible initialization as Creator,
Creator sends Fungibles to Alice,
Alice sends Fungibles to Bob,
Bob approves Creator to spend Fungibles on his behalf,
Creator allocates these Fungibles from Bob to Alice,
Testing of non-standardized functions of the Fungible contract.
Events;
Checking logs from Transfer and Approval Events
"""
# Register Contract Fungible
nc.registry.register(Fungible)
# Initialize Participants and Fungible contract
state = tester.state()
logs = []
creator_address = tester.a0
creator_key = tester.k0
alice_address = tester.a1
alice_key = tester.k1
bob_address = tester.a2
bob_key = tester.k2
# Create proxy
nc.listen_logs(state, Transfer, callback=lambda e: logs.append(e))
nc.listen_logs(state, Approval, callback=lambda e: logs.append(e))
fungible_as_creator = nc.tester_nac(state, creator_key, Fungible.address)
# Initalize fungible with a fixed quantity of fungibles.
fungible_total = 1000000
fungible_as_creator.init(fungible_total)
assert fungible_as_creator.balanceOf(creator_address) == fungible_total
# Creator transfers Fungibles to Alice
send_amount_alice = 700000
fungible_as_creator.transfer(alice_address, send_amount_alice)
assert fungible_as_creator.balanceOf(creator_address) == fungible_total - send_amount_alice
assert fungible_as_creator.balanceOf(alice_address) == send_amount_alice
# Check logs data of Transfer Event
assert len(logs) == 1
l = logs[0]
assert l['event_type'] == 'Transfer'
assert l['from'] == creator_address
assert l['to'] == alice_address
# Build transaction Log arguments and check sent amount
assert l['value'] == send_amount_alice
# Alice transfers Fungibles to Bob
send_amount_bob = 400000
# Create proxy for Alice
fungible_as_alice = nc.tester_nac(state, alice_key, Fungible.address)
fungible_as_alice.transfer(bob_address, send_amount_bob)
# Test balances of Creator, Alice and Bob
creator_balance = fungible_total - send_amount_alice
alice_balance = send_amount_alice - send_amount_bob
bob_balance = send_amount_bob
assert fungible_as_alice.balanceOf(creator_address) == creator_balance
assert fungible_as_alice.balanceOf(alice_address) == alice_balance
assert fungible_as_alice.balanceOf(bob_address) == bob_balance
# Create proxy for Bob
fungible_as_bob = nc.tester_nac(state, bob_key, Fungible.address)
approved_amount_bob = 100000
assert fungible_as_bob.allowance(creator_address) == 0
# Bob approves Creator to spend Fungibles
assert fungible_as_bob.allowance(creator_address) == 0
fungible_as_bob.approve(creator_address, approved_amount_bob)
assert fungible_as_bob.allowance(creator_address) == approved_amount_bob
# Test transferFrom function, i.e. direct debit.
fungible_as_creator.transferFrom(bob_address, alice_address, approved_amount_bob)
# Test balances
alice_balance += approved_amount_bob
bob_balance -= approved_amount_bob
assert fungible_as_alice.balanceOf(creator_address) == creator_balance
assert fungible_as_alice.balanceOf(alice_address) == alice_balance
assert fungible_as_alice.balanceOf(bob_address) == bob_balance
# Check logs data of Transfer Event
assert len(logs) == 4
l = logs[-1]
assert l['event_type'] == 'Transfer'
assert l['from'] == bob_address
assert l['to'] == alice_address
# Build transaction Log arguments and check sent amount
assert l['value'] == approved_amount_bob
# Testing account information
# Now we should have three Fungible accounts
assert 3 == fungible_as_alice.num_accounts()
r = fungible_as_creator.get_creator()
assert r == creator_address
r = fungible_as_creator.get_accounts()
assert set(r) == set([creator_address, alice_address, bob_address])
print logs
while logs and logs.pop():
pass
nc.registry.unregister(Fungible)
|
bpy_utilities/material_loader/shaders/source1_shaders/refract.py | tltneon/SourceIO | 199 | 12781969 | <reponame>tltneon/SourceIO<filename>bpy_utilities/material_loader/shaders/source1_shaders/refract.py
import numpy as np
from typing import Iterable
from ...shader_base import Nodes
from ..source1_shader_base import Source1ShaderBase
class Refract(Source1ShaderBase):
SHADER: str = 'refract'
@property
def bumpmap(self):
texture_path = self._vavle_material.get_param('$normalmap', None)
if texture_path is not None:
image = self.load_texture_or_default(texture_path, (0.5, 0.5, 1.0, 1.0))
image = self.convert_normalmap(image)
image.colorspace_settings.is_data = True
image.colorspace_settings.name = 'Non-Color'
return image
return None
@property
def basetexture(self):
texture_path = self._vavle_material.get_param('$basetexture', None)
if texture_path is not None:
return self.load_texture_or_default(texture_path, (0.3, 0, 0.3, 1.0))
return None
@property
def color2(self):
color_value, value_type = self._vavle_material.get_vector('$color2', [1, 1, 1])
divider = 255 if value_type is int else 1
color_value = list(map(lambda a: a / divider, color_value))
if len(color_value) == 1:
color_value = [color_value[0], color_value[0], color_value[0]]
elif len(color_value) > 3:
color_value = color_value[:3]
return color_value
@property
def bluramount(self):
value = self._vavle_material.get_float('$bluramount', 0)
return value
@property
def color(self):
color_value, value_type = self._vavle_material.get_vector('$color', [1, 1, 1])
divider = 255 if value_type is int else 1
color_value = list(map(lambda a: a / divider, color_value))
if len(color_value) == 1:
color_value = [color_value[0], color_value[0], color_value[0]]
elif len(color_value) > 3:
color_value = color_value[:3]
return color_value
@property
def refracttint(self):
color_value, value_type = self._vavle_material.get_vector('$refracttint', [1, 1, 1])
divider = 255 if value_type is int else 1
color_value = list(map(lambda a: a / divider, color_value))
if len(color_value) == 1:
color_value = [color_value[0], color_value[0], color_value[0]]
return color_value
def create_nodes(self, material_name):
if super().create_nodes(material_name) in ['UNKNOWN', 'LOADED']:
return
self.bpy_material.blend_method = 'OPAQUE'
self.bpy_material.shadow_method = 'NONE'
self.bpy_material.use_screen_refraction = True
self.bpy_material.use_backface_culling = True
material_output = self.create_node(Nodes.ShaderNodeOutputMaterial)
shader = self.create_node(Nodes.ShaderNodeBsdfPrincipled, self.SHADER)
self.connect_nodes(shader.outputs['BSDF'], material_output.inputs['Surface'])
basetexture = self.basetexture
if basetexture:
self.create_and_connect_texture_node(basetexture, shader.inputs['Base Color'], name='$basetexture')
bumpmap = self.bumpmap
if bumpmap:
normalmap_node = self.create_node(Nodes.ShaderNodeNormalMap)
self.create_and_connect_texture_node(bumpmap, normalmap_node.inputs['Color'], name='$bumpmap')
self.connect_nodes(normalmap_node.outputs['Normal'], shader.inputs['Normal'])
shader.inputs['Transmission'].default_value = 1.0
shader.inputs['Roughness'].default_value = self.bluramount
|
python_modules/libraries/dagster-papertrail/dagster_papertrail/loggers.py | rpatil524/dagster | 4,606 | 12781980 | <gh_stars>1000+
import logging
import socket
from dagster import Field, IntSource, StringSource, logger
class ContextFilter(logging.Filter):
hostname = socket.gethostname()
def filter(self, record):
record.hostname = ContextFilter.hostname
return True
@logger(
{
"log_level": Field(StringSource, is_required=False, default_value="INFO"),
"name": Field(StringSource, is_required=False, default_value="dagster_papertrail"),
"papertrail_address": Field(StringSource, description="Papertrail URL", is_required=True),
"papertrail_port": Field(IntSource, description="Papertrail port", is_required=True),
},
description="A JSON-formatted console logger",
)
def papertrail_logger(init_context):
"""Use this logger to configure your Dagster pipeline to log to Papertrail. You'll need an
active Papertrail account with URL and port.
Example:
.. code-block:: python
@job(logger_defs={
"console": colored_console_logger,
"papertrail": papertrail_logger,
})
def simple_job():
...
simple_job.execute_in_process(
run_config={
"loggers": {
"console": {
"config": {
"log_level": "INFO",
}
},
"papertrail": {
"config": {
"log_level": "INFO",
"name": "hello_pipeline",
"papertrail_address": "127.0.0.1",
"papertrail_port": 12345,
}
},
}
}
)
"""
level, name, papertrail_address, papertrail_port = (
init_context.logger_config.get(k)
for k in ("log_level", "name", "papertrail_address", "papertrail_port")
)
klass = logging.getLoggerClass()
logger_ = klass(name, level=level)
log_format = "%(asctime)s %(hostname)s " + name + ": %(message)s"
formatter = logging.Formatter(log_format, datefmt="%b %d %H:%M:%S")
handler = logging.handlers.SysLogHandler(address=(papertrail_address, papertrail_port))
handler.addFilter(ContextFilter())
handler.setFormatter(formatter)
logger_.addHandler(handler)
return logger_
|
angr/knowledge_plugins/patches.py | Kyle-Kyle/angr | 6,132 | 12781988 | from typing import Optional, List, Dict
from cle.address_translator import AddressTranslator
from sortedcontainers import SortedDict
from .plugin import KnowledgeBasePlugin
# TODO: Serializable
class Patch:
def __init__(self, addr, new_bytes, comment: Optional[str]=None):
self.addr = addr
self.new_bytes = new_bytes
self.comment = comment
def __len__(self):
return len(self.new_bytes)
class PatchManager(KnowledgeBasePlugin):
"""
A placeholder-style implementation for a binary patch manager. This class should be significantly changed in the
future when all data about loaded binary objects are loaded into angr knowledge base from CLE. As of now, it only
stores byte-level replacements. Other angr components may choose to use or not use information provided by this
manager. In other words, it is not transparent.
Patches should not overlap, but it's user's responsibility to check for and avoid overlapping patches.
"""
def __init__(self, kb):
super().__init__()
self._patches: Dict[int,Patch] = SortedDict()
self._kb = kb
def add_patch(self, addr, new_bytes, comment: Optional[str]=None):
self._patches[addr] = Patch(addr, new_bytes, comment=comment)
def add_patch_obj(self, patch: Patch):
self._patches[patch.addr] = patch
def remove_patch(self, addr):
if addr in self._patches:
del self._patches[addr]
def patch_addrs(self):
return self._patches.keys()
def get_patch(self, addr):
"""
Get patch at the given address.
:param int addr: The address of the patch.
:return: The patch if there is one starting at the address, or None if there isn't any.
:rtype: Patch or None
"""
return self._patches.get(addr, None)
def get_all_patches(self, addr, size):
"""
Retrieve all patches that cover a region specified by [addr, addr+size).
:param int addr: The address of the beginning of the region.
:param int size: Size of the region.
:return: A list of patches.
:rtype: list
"""
patches = [ ]
for patch_addr in self._patches.irange(maximum=addr+size-1, reverse=True):
p = self._patches[patch_addr]
if self.overlap(p.addr, p.addr + len(p), addr, addr+size):
patches.append(p)
else:
break
return patches[::-1]
def keys(self):
return self._patches.keys()
def items(self):
return self._patches.items()
def values(self):
return self._patches.values()
def copy(self):
o = PatchManager(self._kb)
o._patches = self._patches.copy()
@staticmethod
def overlap(a0, a1, b0, b1):
return a0 <= b0 < a1 or a0 <= b1 < a1 or b0 <= a0 < b1
def apply_patches_to_binary(self, binary_bytes: Optional[bytes]=None, patches: Optional[List[Patch]]=None) -> bytes:
if patches is None:
patches = sorted(list(self._patches.values()), key=lambda x: x.addr)
if binary_bytes is None:
with open(self._kb._project.loader.main_object.binary, "rb") as f:
binary_bytes = f.read()
for patch in patches:
# convert addr to file offset
at = AddressTranslator.from_mva(patch.addr, self._kb._project.loader.main_object)
file_offset = at.to_raw()
if file_offset < len(binary_bytes) and file_offset + len(patch.new_bytes) < len(binary_bytes):
binary_bytes = binary_bytes[:file_offset] + \
patch.new_bytes + \
binary_bytes[file_offset + len(patch.new_bytes):]
return binary_bytes
KnowledgeBasePlugin.register_default('patches', PatchManager)
|
fuzzing/kernel/syzkaller-configs/generate_config.py | DBGilles/retrowrite | 478 | 12781992 | <reponame>DBGilles/retrowrite<gh_stars>100-1000
#!/usr/bin/python3
import argparse
import json
import os
def main():
parser = argparse.ArgumentParser(
description='Generate a configuration file for syzkaller')
parser.add_argument('--workdir', help='workdir for syzkaller', required=True)
parser.add_argument('--kernel', help='path to the kernel directory', required=True)
parser.add_argument('--initramfs', help='path to the initramfs', required=True)
parser.add_argument('--image', help='path to the disk image', required=True)
parser.add_argument('--sshkey', help='path to the VM\'s SSH key', required=True)
parser.add_argument('--syzkaller', help='path to syzkaller', required=True)
parser.add_argument('--vms', help='number of VMs', type=int, default=8)
parser.add_argument('--cpus', help='CPUs per VM', type=int, default=2)
parser.add_argument('--mem', help='memory per VM', type=int, default=2048)
parser.add_argument('config', help='path to the original config')
args = parser.parse_args()
with open(args.config) as f:
config = json.load(f)
config['reproduce'] = False
config['vm']['count'] = args.vms
config['vm']['kernel'] = os.path.join(args.kernel, 'arch', 'x86', 'boot',
'bzImage')
config['vm']['initrd'] = args.initramfs
config['vm']['cpu'] = args.cpus
config['vm']['mem'] = args.mem
config['workdir'] = args.workdir
config['kernel_obj'] = args.kernel
config['image'] = args.image
config['sshkey'] = args.sshkey
config['syzkaller'] = args.syzkaller
print(json.dumps(config, indent=4))
if __name__ == '__main__':
main()
|
lectures/solutions/tile_rectify.py | ritamonteiroo/scikit | 453 | 12782008 | from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
from skimage import transform
from skimage.transform import estimate_transform
source = np.array([(129, 72),
(302, 76),
(90, 185),
(326, 193)])
target = np.array([[0, 0],
[400, 0],
[0, 400],
[400, 400]])
tf = estimate_transform('projective', source, target)
H = tf.params # in older versions of skimage, this should be
# H = tf._matrix
print(H)
# H = np.array([[ 3.04026872e+00, 1.04929628e+00, -4.67743998e+02],
# [ -1.44134582e-01, 6.23382067e+00, -4.30241727e+02],
# [ 2.63620673e-05, 4.17694527e-03, 1.00000000e+00]])
def rectify(xy):
x = xy[:, 0]
y = xy[:, 1]
# You must fill in your code here.
#
# Handy functions are:
#
# - np.dot (matrix multiplication)
# - np.ones_like (make an array of ones the same shape as another array)
# - np.column_stack
# - A.T -- type .T after a matrix to transpose it
# - x.reshape -- reshapes the array x
# We need to provide the backward mapping
HH = np.linalg.inv(H)
homogeneous_coordinates = np.column_stack([x, y, np.ones_like(x)])
xyz = np.dot(HH, homogeneous_coordinates.T)
# We want one coordinate per row
xyz = xyz.T
# Turn z into a column vector
z = xyz[:, 2]
z = z.reshape([len(z), 1])
xyz = xyz / z
return xyz[:, :2]
image = plt.imread('../../images/chapel_floor.png')
out = transform.warp(image, rectify, output_shape=(400, 400))
f, (ax0, ax1) = plt.subplots(1, 2, figsize=(8, 4))
ax0.imshow(image)
ax1.imshow(out)
plt.show()
|
python/fleetx/dataset/ctr_data_generator.py | hutuxian/FleetX | 170 | 12782026 | <filename>python/fleetx/dataset/ctr_data_generator.py
#!/usr/bin/python
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# There are 13 integer features and 26 categorical features
import os
import paddle
import paddle.fluid as fluid
import paddle.distributed.fleet as fleet
continous_features = range(1, 14)
categorial_features = range(14, 40)
continous_clip = [20, 600, 100, 50, 64000, 500, 100, 50, 500, 10, 10, 10, 50]
def get_dataloader(inputs,
train_files_path,
sparse_feature_dim,
batch_size,
shuffle=True):
file_list = [
str(train_files_path) + "/%s" % x for x in os.listdir(train_files_path)
]
loader = fluid.io.DataLoader.from_generator(
feed_list=inputs, capacity=64, use_double_buffer=True, iterable=True)
train_generator = CriteoDataset(sparse_feature_dim)
reader = train_generator.train(file_list,
fleet.worker_num(), fleet.worker_index())
if shuffle:
reader = paddle.batch(
paddle.reader.shuffle(
reader, buf_size=batch_size * 100),
batch_size=batch_size)
else:
reader = paddle.batch(reader, batch_size=batch_size)
places = fluid.CPUPlace()
loader.set_sample_list_generator(reader, places)
return loader
class CriteoDataset(object):
def __init__(self, sparse_feature_dim):
self.cont_min_ = [0, -3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
self.cont_max_ = [
20, 600, 100, 50, 64000, 500, 100, 50, 500, 10, 10, 10, 50
]
self.cont_diff_ = [
20, 603, 100, 50, 64000, 500, 100, 50, 500, 10, 10, 10, 50
]
self.hash_dim_ = sparse_feature_dim
# here, training data are lines with line_index < train_idx_
self.train_idx_ = 41256555
self.continuous_range_ = range(1, 14)
self.categorical_range_ = range(14, 40)
def _reader_creator(self, file_list, is_train, trainer_num, trainer_id):
def reader():
for file in file_list:
with open(file, 'r') as f:
line_idx = 0
for line in f:
line_idx += 1
features = line.rstrip('\n').split('\t')
dense_feature = []
sparse_feature = []
for idx in self.continuous_range_:
if features[idx] == '':
dense_feature.append(0.0)
else:
dense_feature.append(
(float(features[idx]) -
self.cont_min_[idx - 1]) /
self.cont_diff_[idx - 1])
for idx in self.categorical_range_:
sparse_feature.append([
hash(str(idx) + features[idx]) % self.hash_dim_
])
label = [int(features[0])]
yield [dense_feature] + sparse_feature + [label]
return reader
def train(self, file_list, trainer_num, trainer_id):
return self._reader_creator(file_list, True, trainer_num, trainer_id)
def test(self, file_list):
return self._reader_creator(file_list, False, 1, 0)
|
solutions/problem_108.py | ksvr444/daily-coding-problem | 1,921 | 12782034 | def can_shift(target, string):
return \
target and string and \
len(target) == len(string) and \
string in target * 2
assert can_shift("abcde", "cdeab")
assert not can_shift("abc", "acb")
|
src/amuse/ext/cloud.py | rknop/amuse | 131 | 12782075 | <reponame>rknop/amuse<filename>src/amuse/ext/cloud.py<gh_stars>100-1000
import inspect
import numpy
from amuse.units import generic_unit_system
from amuse import datamodel
def fill_grid_with_cloud_and_medium(
grid,
center = None,
radius = None,
rho_medium = 1.0 | generic_unit_system.mass / generic_unit_system.length**3,
rho_cloud = 0.1 | generic_unit_system.mass / generic_unit_system.length**3,
gamma = 5.0 / 3.0,
):
pass
def fill_grid_with_spherical_cloud(
grid,
center = None,
radius = None,
rho = 1.0 | generic_unit_system.mass / generic_unit_system.length**3,
rhovx = 0.0 | generic_unit_system.mass / (generic_unit_system.time * generic_unit_system.length**2),
rhovy = 0.0 | generic_unit_system.mass / (generic_unit_system.time * generic_unit_system.length**2),
rhovz = 0.0 | generic_unit_system.mass / (generic_unit_system.time * generic_unit_system.length**2),
energy = 1.0 | generic_unit_system.mass / (generic_unit_system.time**2 * generic_unit_system.length),
subgridsize = 4,
):
radii = (grid.position - center).lengths()
if subgridsize <= 1:
selection = radii <= radius
else:
dr = grid.cellsize().length()
selection = radii < (radius - dr)
grid.rho[selection] = rho(radii) if inspect.isroutine(rho) else rho
grid.rhovx[selection] = rhovx
grid.rhovy[selection] = rhovy
grid.rhovz[selection] = rhovz
grid.energy[selection] = energy
if subgridsize <= 1:
return
selection = numpy.logical_and( radii >= (radius-dr), radii <= (radius+dr))
subgrid = datamodel.Grid.create((subgridsize, subgridsize, subgridsize), grid.cellsize())
subgrid.x -= grid.cellsize()[0] / 2.0
subgrid.y -= grid.cellsize()[1] / 2.0
subgrid.z -= grid.cellsize()[2] / 2.0
x_indices, y_indices, z_indices = grid.indices()
x_indices = x_indices[selection]
y_indices = y_indices[selection]
z_indices = z_indices[selection]
position = subgrid.position
centers = center - grid.position[selection]
subgrid_rho = rho * numpy.ones_like(subgrid.x.number)
subgrid_rhovx = rhovx * numpy.ones_like(subgrid.x.number)
subgrid_rhovy = rhovy * numpy.ones_like(subgrid.x.number)
subgrid_rhovz = rhovz * numpy.ones_like(subgrid.x.number)
subgrid_energy = energy * numpy.ones_like(subgrid.x.number)
update_grid_rho = grid.rho[selection]
update_grid_rhovx = grid.rhovx[selection]
update_grid_rhovy = grid.rhovy[selection]
update_grid_rhovz = grid.rhovz[selection]
update_grid_energy = grid.energy[selection]
for i in range(len(x_indices)):
x_index = x_indices[i]
y_index = y_indices[i]
z_index = z_indices[i]
center_of_cloud_for_subgrid = centers[i]
radii = (position - center_of_cloud_for_subgrid).lengths()
subgrid_rho[...] = update_grid_rho[i]
subgrid_rhovx[...] = update_grid_rhovx[i]
subgrid_rhovy[...] = update_grid_rhovy[i]
subgrid_rhovz[...] = update_grid_rhovz[i]
subgrid_energy[...] = update_grid_energy[i]
subgrid_selection = radii <= radius
subgrid_rho[subgrid_selection] = rho
subgrid_rhovx[subgrid_selection] = rhovx
subgrid_rhovy[subgrid_selection] = rhovy
subgrid_rhovz[subgrid_selection] = rhovz
subgrid_energy[subgrid_selection] = energy
update_grid_rho[i] = subgrid_rho.mean()
update_grid_rhovx[i] = subgrid_rhovx.mean()
update_grid_rhovy[i] = subgrid_rhovy.mean()
update_grid_rhovz[i] = subgrid_rhovz.mean()
update_grid_energy[i] = subgrid_energy.mean()
grid.rho[selection] = update_grid_rho
grid.rhovx[selection] = update_grid_rhovx
grid.rhovy[selection] = update_grid_rhovy
grid.rhovz[selection] = update_grid_rhovz
grid.energy[selection] = update_grid_energy
def fill_grid_with_cloud_shock(
grid,
center = None,
radius = None,
ratio_densities = 10.0,
mach_number = 2.7,
gamma = 5.0/3.0,
subgridsize = 4,
):
velocity_unit = generic_unit_system.length / generic_unit_system.time
momentum_unit = generic_unit_system.mass / (generic_unit_system.time * generic_unit_system.length**2)
density_unit = generic_unit_system.mass / generic_unit_system.length**3
energy_unit = generic_unit_system.mass / (generic_unit_system.time**2 * generic_unit_system.length)
velocity_of_medium = (numpy.sqrt(gamma*(gamma-1.0)*ratio_densities) * mach_number) | velocity_unit
rho_in_cloud = 1.0 | density_unit
rhovx_in_cloud = 0.0 | momentum_unit
rhovy_in_cloud = 0.0 | momentum_unit
rhovz_in_cloud = 0.0 | momentum_unit
energy_in_cloud = 1.0 | energy_unit
rho_in_medium = 1.0 / ratio_densities | density_unit
rhovx_in_medium = 0.0 | momentum_unit
rhovy_in_medium = rho_in_medium * velocity_of_medium
rhovz_in_medium = 0.0 | momentum_unit
energy_in_medium = (1.0 | energy_unit) + (0.5* rho_in_medium * velocity_of_medium**2)
grid.rho = rho_in_medium
grid.rhovx = rhovx_in_medium
grid.rhovy = rhovy_in_medium
grid.rhovz = rhovz_in_medium
grid.energy = energy_in_medium
fill_grid_with_spherical_cloud(grid, center, radius, rho_in_cloud, rhovx_in_cloud, rhovy_in_cloud, rhovz_in_cloud, energy_in_cloud, subgridsize)
|
zkstark/quadratic_prover_test.py | kevaundray/research | 1,351 | 12782116 | import quadratic_provers as q
data = q.eval_across_field([1, 2, 3, 4], 11)
qproof = q.mk_quadratic_proof(data, 4, 11)
assert q.check_quadratic_proof(data, qproof, 4, 5, 11)
data2 = q.eval_across_field(range(36), 97)
cproof = q.mk_column_proof(data2, 36, 97)
assert q.check_column_proof(data2, cproof, 36, 10, 97)
|
fairseq/modules/multibranch.py | ishine/lite-transformer | 543 | 12782205 | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from . import MultiheadAttention
class MultiBranch(nn.Module):
def __init__(self, branches, embed_dim_list):
super().__init__()
self.branches = nn.ModuleList(branches)
self.embed_dim_list = embed_dim_list
def forward(self, query, key, value, key_padding_mask=None, incremental_state=None, need_weights=True, static_kv=False, attn_mask=None):
tgt_len, bsz, embed_size = query.size()
assert sum(self.embed_dim_list) == embed_size
out = []
attn = None
start = 0
for idx, embed_dim in enumerate(self.embed_dim_list):
branch = self.branches[idx]
branch_type = type(branch)
q = query[...,start:start+embed_dim]
if key is not None:
assert value is not None
k, v = key[..., start:start+embed_dim], value[..., start:start+embed_dim]
start += embed_dim
if branch_type == MultiheadAttention:
x, attn = branch(q, k, v, key_padding_mask, incremental_state, need_weights, static_kv, attn_mask)
else:
mask = key_padding_mask
if mask is not None:
q = q.masked_fill(mask.transpose(0, 1).unsqueeze(2), 0)
x = branch(q.contiguous(), incremental_state=incremental_state)
out.append(x)
out = torch.cat(out, dim=-1)
return out, attn |
VSR/DataLoader/Dataset.py | Kadantte/VideoSuperResolution | 1,447 | 12782238 | <reponame>Kadantte/VideoSuperResolution
# Copyright (c) 2017-2020 <NAME>.
# Author: <NAME>
# Email: <EMAIL>
# Update: 2020 - 2 - 7
import re
from concurrent import futures
from pathlib import Path
import copy
import yaml
from .VirtualFile import ImageFile, RawFile
from ..Util import Config, to_list
try:
from yaml import FullLoader as _Loader
except ImportError:
from yaml import Loader as _Loader
IMAGE_SUF = ('PNG', 'JPG', 'JPEG', 'BMP', 'TIFF', 'TIF', 'GIF')
VIDEO_SUF = {
'NV12': 'NV12',
'YUV': 'YV12',
'YV12': 'YV12',
'NV21': 'NV21',
'YV21': 'YV21',
'RGB': 'RGB'
}
def _supported_image(x: Path):
return x.suffix[1:].upper() in IMAGE_SUF
def _supported_video(x: Path):
return x.suffix[1:].upper() in VIDEO_SUF
def _supported_suffix(x: Path):
return _supported_image(x) or _supported_video(x)
class Dataset(object):
""" Make a `dataset` object
"""
def __init__(self, *folders):
self.dirs = list(map(Path, folders))
self.recursive = True
self.glob_patterns = ('*',)
self.inc_patterns = None
self.exc_patterns = None
self.as_video = False
self.compiled = None
def use_like_video_(self):
self.as_video = True
def use_like_video(self):
d = copy.copy(self)
d.compiled = None
d.use_like_video_()
return d
def include_(self, *pattern: str):
self.glob_patterns = list(pattern)
self.inc_patterns = None
def include(self, *pattern: str):
d = copy.copy(self)
d.compiled = None
d.include_(*pattern)
return d
def include_reg_(self, *reg: str):
self.inc_patterns = [re.compile(r) for r in reg]
self.glob_patterns = ('*',)
def include_reg(self, *reg: str):
d = copy.copy(self)
d.compiled = None
d.include_reg_(*reg)
return d
def exclude_(self, *reg: str):
self.exc_patterns = [re.compile(r) for r in reg]
def exclude(self, *reg: str):
d = copy.copy(self)
d.compiled = None
d.exclude_(*reg)
return d
def compile(self):
if self.compiled:
return self.compiled
files = []
def _exc(x: Path):
if self.exc_patterns:
for reg in self.exc_patterns:
if reg.search(str(x.absolute().as_posix())):
return False
return True
def _inc(x: Path):
if self.inc_patterns:
for reg in self.inc_patterns:
if reg.search(str(x.absolute().as_posix())):
return True
return False
for folder in self.dirs:
if not Path(folder).exists():
continue
nodes = []
if folder.is_file():
# if points to a file rather than a directory
nodes.append(folder)
fn_glob = Path.rglob if self.recursive else Path.glob
for pat in self.glob_patterns:
nodes += list(fn_glob(folder, pat))
if self.inc_patterns:
nodes = filter(_inc, nodes)
files += list(filter(_exc, filter(_supported_suffix, nodes)))
image_nodes = list(filter(_supported_image, files))
if not self.as_video:
self.compiled = Container(sorted(image_nodes), self.as_video)
return self.compiled
video_nodes = list(filter(_supported_video, files))
video_nodes += list(map(lambda x: x.parent, image_nodes))
video_nodes = list(set(video_nodes)) # remove duplicated nodes
self.compiled = Container(sorted(video_nodes), self.as_video)
return self.compiled
class Container(object):
"""Frames container
"""
def __init__(self, urls, is_video: bool):
assert isinstance(urls, (list, tuple))
pool = futures.ThreadPoolExecutor(4)
fs = []
self.nodes = []
def _parse_image_node(url: Path):
if url.is_dir():
for i in filter(_supported_image, url.glob('*')):
self.nodes.append(ImageFile(i, rewind=True))
elif _supported_image(url):
self.nodes.append(ImageFile(url, rewind=True))
def _parse_video_node(url: Path):
if _supported_video(url):
size = re.findall("\\d+x\\d+", url.stem)
if size:
size = [int(x) for x in size[0].split('x')]
self.nodes.append(
RawFile(url, VIDEO_SUF[url.suffix[1:].upper()], size,
rewind=True))
elif url.is_dir():
self.nodes.append(ImageFile(url))
for j in urls:
if is_video:
fs.append(pool.submit(_parse_video_node, j))
else:
fs.append(pool.submit(_parse_image_node, j))
futures.as_completed(fs)
pool.shutdown()
self.nodes = sorted(self.nodes, key=lambda x: x.path)
def __getitem__(self, item):
return self.nodes[item]
def __len__(self):
return len(self.nodes)
@property
def capacity(self):
if not self.nodes:
return 0
pos = 0
max_sz = 0
total_frames = 0
for i, n in enumerate(self.nodes):
total_frames += n.frames
if n.size() > max_sz:
max_sz = n.size()
pos = i
shape = self.nodes[pos].shape
max_bpp = 3
return shape[0] * shape[1] * max_bpp * total_frames
def load_datasets(describe_file, key=''):
"""load dataset described in YAML file"""
def _extend_pattern(url):
_url = root / Path(url)
url_p = _url
while True:
try:
if url_p.exists():
break
except OSError:
url_p = url_p.parent
continue
if url_p == url_p.parent:
break
url_p = url_p.parent
# retrieve glob pattern
url_r = str(_url.relative_to(url_p))
if url_r == '.' and url_p.is_dir():
return str(Path(url) / '**/*')
return url
def _get_dataset(desc, use_as_video=None, name=None):
dataset = Config(name=name)
for i in desc:
if i not in ('train', 'val', 'test'):
continue
if isinstance(desc[i], dict):
hr = to_list(desc[i].get('hr'))
lr = to_list(desc[i].get('lr'))
else:
hr = to_list(desc[i])
lr = []
if use_as_video:
hr_pattern = [
x if x not in all_path and x + '[video]' not in all_path else
all_path[x + '[video]'] for x in hr]
lr_pattern = [
x if x not in all_path and x + '[video]' not in all_path else
all_path[x + '[video]'] for x in lr]
else:
hr_pattern = [x if x not in all_path else all_path[x] for x in hr]
lr_pattern = [x if x not in all_path else all_path[x] for x in lr]
hr_data = Dataset(root).include(*(_extend_pattern(x) for x in hr_pattern))
lr_data = Dataset(root).include(
*(_extend_pattern(x) for x in lr_pattern)) if lr_pattern else None
hr_data.recursive = False
if lr_data is not None:
lr_data.recursive = False
if use_as_video:
hr_data.use_like_video_()
if lr_data is not None:
lr_data.use_like_video_()
setattr(dataset, i, Config(hr=hr_data, lr=lr_data))
return dataset
datasets = Config()
with open(describe_file, 'r') as fd:
config = yaml.load(fd, Loader=_Loader)
root = Path(config["Root"])
if not root.is_absolute():
# make `root` relative to the file
root = Path(describe_file).resolve().parent / root
root = root.resolve()
all_path = config["Path"]
if key.upper() in config["Dataset"]:
return _get_dataset(config["Dataset"][key.upper()], name=key)
elif key.upper() + '[video]' in config["Dataset"]:
return _get_dataset(config["Dataset"][key.upper() + '[video]'], True,
name=key)
elif key.upper() in all_path:
return _get_dataset(Config(test=all_path[key.upper()]), name=key)
elif key.upper() + '[video]' in all_path:
return _get_dataset(Config(test=all_path[key.upper() + '[video]']), True,
name=key)
for name, value in config["Dataset"].items():
if '[video]' in name:
name = name.replace('[video]', '')
datasets[name] = _get_dataset(value, True, name=name)
else:
datasets[name] = _get_dataset(value, name=name)
for name in all_path:
if '[video]' in name:
_name = name.replace('[video]', '')
datasets[_name] = _get_dataset(Config(test=all_path[name]), True,
name=_name)
else:
datasets[name] = _get_dataset(Config(test=all_path[name]), name=name)
return datasets
|
tests/casefiles/toplevel_extracode.py | ardovm/wxGlade | 225 | 12782258 | <reponame>ardovm/wxGlade
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
#
# generated by wxGlade
#
import wx
# begin wxGlade: dependencies
# end wxGlade
# begin wxGlade: extracode
# frame extra code
# dialog extra code
# end wxGlade
class MyFrame(wx.Frame):
def __init__(self, *args, **kwds):
# begin wxGlade: MyFrame.__init__
# frame extra code before
kwds["style"] = kwds.get("style", 0) | wx.DEFAULT_FRAME_STYLE
wx.Frame.__init__(self, *args, **kwds)
self.SetSize((400, 300))
self.SetTitle("frame")
sizer_1 = wx.BoxSizer(wx.VERTICAL)
sizer_1.Add((0, 0), 0, 0, 0)
self.SetSizer(sizer_1)
self.Layout()
# frame extra code after
self.Bind(wx.EVT_CLOSE, self.on_close_frame, self)
self.Bind(wx.EVT_MENU_CLOSE, self.on_menu_close_frame, self)
# end wxGlade
def on_close_frame(self, event): # wxGlade: MyFrame.<event_handler>
print("Event handler 'on_close_frame' not implemented!")
event.Skip()
def on_menu_close_frame(self, event): # wxGlade: MyFrame.<event_handler>
print("Event handler 'on_menu_close_frame' not implemented!")
event.Skip()
# end of class MyFrame
class MyDialog(wx.Dialog):
def __init__(self, *args, **kwds):
# begin wxGlade: MyDialog.__init__
# dialog extra code before
kwds["style"] = kwds.get("style", 0) | wx.DEFAULT_DIALOG_STYLE
wx.Dialog.__init__(self, *args, **kwds)
self.SetTitle("dialog")
sizer_1 = wx.BoxSizer(wx.VERTICAL)
sizer_1.Add((0, 0), 0, 0, 0)
self.SetSizer(sizer_1)
sizer_1.Fit(self)
self.Layout()
# dialog extra code after
self.Bind(wx.EVT_CLOSE, self.on_close_dialog, self)
# end wxGlade
def on_close_dialog(self, event): # wxGlade: MyDialog.<event_handler>
print("Event handler 'on_close_dialog' not implemented!")
event.Skip()
# end of class MyDialog
class MyMenuBar(wx.MenuBar):
def __init__(self, *args, **kwds):
# begin wxGlade: MyMenuBar.__init__
# menubar extracode before
wx.MenuBar.__init__(self, *args, **kwds)
# menubar extracode after
# end wxGlade
# end of class MyMenuBar
class wxToolBar(wx.ToolBar):
def __init__(self, *args, **kwds):
# begin wxGlade: wxToolBar.__init__
# toolbar extracode before
kwds["style"] = kwds.get("style", 0)
wx.ToolBar.__init__(self, *args, **kwds)
self.Realize()
# toolbar extracode after
# end wxGlade
# end of class wxToolBar
class MyDialog1(wx.Panel):
def __init__(self, *args, **kwds):
# begin wxGlade: MyDialog1.__init__
# panel extracode before
kwds["style"] = kwds.get("style", 0) | wx.TAB_TRAVERSAL
wx.Panel.__init__(self, *args, **kwds)
sizer_1 = wx.BoxSizer(wx.VERTICAL)
sizer_1.Add((0, 0), 0, 0, 0)
self.SetSizer(sizer_1)
sizer_1.Fit(self)
self.Layout()
# panel extracode after
# end wxGlade
# end of class MyDialog1
class MyApp(wx.App):
def OnInit(self):
self.frame = MyFrame(None, wx.ID_ANY, "")
self.SetTopWindow(self.frame)
self.frame.Show()
return True
# end of class MyApp
if __name__ == "__main__":
app = MyApp(0)
app.MainLoop()
|
utils/utils.py | luowensheng/MCN | 130 | 12782272 | """Miscellaneous utility functions."""
from functools import reduce
from PIL import Image
import numpy as np
from matplotlib.colors import rgb_to_hsv, hsv_to_rgb
import spacy
import re
import cv2
import time
from keras_bert.tokenizer import Tokenizer
from keras_bert.loader import load_trained_model_from_checkpoint, load_vocabulary
from keras_bert import extract_embeddings
import os
def compose(*funcs):
"""Compose arbitrarily many functions, evaluated left to right.
Reference: https://mathieularose.com/function-composition-in-python/
"""
# return lambda x: reduce(lambda v, f: f(v), funcs, x)
if funcs:
return reduce(lambda f, g: lambda *a, **kw: g(f(*a, **kw)), funcs)
else:
raise ValueError('Composition of empty sequence not supported.')
def letterbox_image(image, size):
'''resize image with unchanged aspect ratio using padding'''
iw, ih = image.size
w, h = size
scale = min(w/iw, h/ih)
nw = int(iw*scale)
nh = int(ih*scale)
image = image.resize((nw,nh), Image.BICUBIC)
new_image = Image.new('RGB', size, (128,128,128))
new_image.paste(image, ((w-nw)//2, (h-nh)//2))
return new_image
def rand(a=0, b=1):
return np.random.rand()*(b-a) + a
def get_bert_input(text,vocabs,max_len=512):
tokenizer = Tokenizer(vocabs, cased=False)
token=[]
segment=[]
token, segment = tokenizer.encode(text, max_len=max_len)
token.append(token)
segment.append(segment)
token.extend([0] * (max_len - len(token)))
segment.extend([0] * (max_len - len(token)))
return [token,segment]
def seq_to_list(s):
'''
note: 2018.10.3
use for process sentences
'''
t_str = s.lower()
for i in [r'\?', r'\!', r'\'', r'\"', r'\$', r'\:', r'\@', r'\(', r'\)', r'\,', r'\.', r'\;', r'\n']:
t_str = re.sub(i, '', t_str)
for i in [r'\-', r'\/']:
t_str = re.sub(i, ' ', t_str)
q_list = re.sub(r'\?', '', t_str.lower()).split(' ')
q_list = list(filter(lambda x: len(x) > 0, q_list))
return q_list
def qlist_to_vec(max_length, q_list,embed):
'''
note: 2018.10.3
use for process sentences
'''
glove_matrix = []
glove_dict = {}
q_len = len(q_list)
if q_len > max_length:
q_len = max_length
for i in range(max_length):
if i < q_len:
w=q_list[i]
if w not in glove_dict:
glove_dict[w]=embed(u'%s'%w).vector
glove_matrix.append(glove_dict[w])
else:
glove_matrix.append(np.zeros(300,dtype=float))
return np.array(glove_matrix)
def get_random_data(annotation_line, input_shape,embed,config, train_mode=True, max_boxes=1):
'''random preprocessing for real-time data augmentation'''
SEG_DIR=config['seg_gt_path']
line = annotation_line.split()
h, w = input_shape
stop=len(line)
for i in range(1,len(line)):
if (line[i]=='~'):
stop=i
break
# print(line[1:stop])
box_ = np.array([np.array(list(map(int,box.split(',')))) for box in line[1:stop]])
box=np.zeros([1,5])
seg_id=box_[0][-1]
box[0]=box_[0][:-1]
seg_map=np.load(os.path.join(SEG_DIR,str(seg_id)+'.npy'))
seg_map_ori=np.array(seg_map).astype(np.float32)
seg_map=Image.fromarray(seg_map_ori)
# print(np.shape(box))
# print(box)
#####################################
#sentence process maxlength set to 20 and random choose one for train
sentences=[]
sent_stop=stop+1
for i in range(stop+1,len(line)):
if line[i]=='~':
sentences.append(line[sent_stop:i])
sent_stop=i+1
sentences.append(line[sent_stop:len(line)])
choose_index=np.random.choice(len(sentences))
sentence=sentences[choose_index]
# print(qlist)
if config['use_bert']:
vocabs = load_vocabulary(config['bert_path']+'/vocab.txt')
word_vec=get_bert_input(sentence,vocabs,512)
else:
word_vec=qlist_to_vec(config['word_len'], sentence,embed)
# print(word_vec)
# print(np.shape(word_vec))
#######################################
image = Image.open(os.path.join(config['image_path'],line[0]))
iw, ih = image.size
scale = min(w / iw, h / ih)
nw = int(iw * scale)
nh = int(ih * scale)
dx = (w - nw) // 2
dy = (h - nh) // 2
ori_image = image
image = image.resize((nw, nh), Image.BICUBIC)
new_image = Image.new('RGB', (w, h), (128, 128, 128))
new_image.paste(image, (dx, dy))
image_data = np.array(new_image) / 255.
seg_map = seg_map.resize((nw, nh))
new_map = Image.new('L', (w, h), (0))
new_map.paste(seg_map, (dx, dy))
seg_map_data = np.array(new_map)
seg_map_data = cv2.resize(seg_map_data, (
seg_map_data.shape[0] // config['seg_out_stride'], seg_map_data.shape[0] // config['seg_out_stride']),interpolation=cv2.INTER_NEAREST)
seg_map_data = np.reshape(seg_map_data, [np.shape(seg_map_data)[0], np.shape(seg_map_data)[1], 1])
# print(new_image.size)
# correct boxes
box_data = np.zeros((max_boxes, 5))
if len(box) > 0:
if len(box) > max_boxes: box = box[:max_boxes]
box[:, [0, 2]] = box[:, [0, 2]] * scale + dx
box[:, [1, 3]] = box[:, [1, 3]] * scale + dy
box_data[:len(box)] = box
box_data = box_data[:, 0:4] #delete classfy
if not train_mode:
word_vec=[qlist_to_vec(config['word_len'], sent,embed) for sent in sentences]
return image_data, box_data,word_vec,ori_image,sentences,np.expand_dims(seg_map_ori ,-1)
return image_data, box_data,word_vec,seg_map_data
def lr_step_decay(lr_start=0.001, steps=[30, 40]):
def get_lr(epoch):
decay_rate = len(steps)
for i, e in enumerate(steps):
if epoch < e:
decay_rate = i
break
lr = lr_start / (10 ** (decay_rate))
return lr
return get_lr
#powre decay
def lr_power_decay(lr_start=2.5e-4,lr_power=0.9, warm_up_lr=0.,step_all=45*1414,warm_up_step=1000):
# step_per_epoch=3286
def warm_up(base_lr, lr, cur_step, end_step):
return base_lr + (lr - base_lr) * cur_step / end_step
def get_learningrate(epoch):
if epoch<warm_up_step:
lr = warm_up(warm_up_lr, lr_start, epoch, warm_up_step)
else:
lr = lr_start * ((1 - float(epoch-warm_up_step) / (step_all-warm_up_step)) ** lr_power)
return lr
# print("learning rate is", lr)
return get_learningrate |
saleor/shipping/utils.py | fairhopeweb/saleor | 15,337 | 12782301 | <gh_stars>1000+
from typing import TYPE_CHECKING, Optional
from django_countries import countries
from .interface import ShippingMethodData
if TYPE_CHECKING:
from .models import ShippingMethod
def default_shipping_zone_exists(zone_pk=None):
from .models import ShippingZone
return ShippingZone.objects.exclude(pk=zone_pk).filter(default=True)
def get_countries_without_shipping_zone():
"""Return countries that are not assigned to any shipping zone."""
from .models import ShippingZone
covered_countries = set()
for zone in ShippingZone.objects.all():
covered_countries.update({c.code for c in zone.countries})
return (country[0] for country in countries if country[0] not in covered_countries)
def convert_to_shipping_method_data(
shipping_method: Optional["ShippingMethod"],
) -> Optional["ShippingMethodData"]:
if not shipping_method:
return None
return ShippingMethodData(
id=str(shipping_method.id),
name=shipping_method.name,
price=getattr(shipping_method, "price", None),
description=shipping_method.description,
type=shipping_method.type,
excluded_products=shipping_method.excluded_products,
channel_listings=shipping_method.channel_listings,
minimum_order_weight=shipping_method.minimum_order_weight,
maximum_order_weight=shipping_method.maximum_order_weight,
maximum_delivery_days=shipping_method.maximum_delivery_days,
minimum_delivery_days=shipping_method.minimum_delivery_days,
metadata=shipping_method.metadata,
private_metadata=shipping_method.private_metadata,
)
|
salt/modules/mod_random.py | tomdoherty/salt | 9,425 | 12782321 | <filename>salt/modules/mod_random.py
"""
Provides access to randomness generators.
=========================================
.. versionadded:: 2014.7.0
"""
import base64
import hashlib
import random
import salt.utils.pycrypto
from salt.exceptions import SaltInvocationError
ALGORITHMS_ATTR_NAME = "algorithms_guaranteed"
# Define the module's virtual name
__virtualname__ = "random"
def __virtual__():
return __virtualname__
def hash(value, algorithm="sha512"):
"""
.. versionadded:: 2014.7.0
Encodes a value with the specified encoder.
value
The value to be hashed.
algorithm : sha512
The algorithm to use. May be any valid algorithm supported by
hashlib.
CLI Example:
.. code-block:: bash
salt '*' random.hash 'I am a string' md5
"""
if isinstance(value, str):
# Under Python 3 we must work with bytes
value = value.encode(__salt_system_encoding__)
if hasattr(hashlib, ALGORITHMS_ATTR_NAME) and algorithm in getattr(
hashlib, ALGORITHMS_ATTR_NAME
):
hasher = hashlib.new(algorithm)
hasher.update(value)
out = hasher.hexdigest()
elif hasattr(hashlib, algorithm):
hasher = hashlib.new(algorithm)
hasher.update(value)
out = hasher.hexdigest()
else:
raise SaltInvocationError("You must specify a valid algorithm.")
return out
def str_encode(value, encoder="base64"):
"""
.. versionadded:: 2014.7.0
value
The value to be encoded.
encoder : base64
The encoder to use on the subsequent string.
CLI Example:
.. code-block:: bash
salt '*' random.str_encode 'I am a new string' base64
"""
if isinstance(value, str):
value = value.encode(__salt_system_encoding__)
if encoder == "base64":
try:
out = base64.b64encode(value)
out = out.decode(__salt_system_encoding__)
except TypeError:
raise SaltInvocationError("Value must be an encode-able string")
else:
try:
out = value.encode(encoder)
except LookupError:
raise SaltInvocationError("You must specify a valid encoder")
except AttributeError:
raise SaltInvocationError("Value must be an encode-able string")
return out
def get_str(
length=20,
chars=None,
lowercase=True,
uppercase=True,
digits=True,
punctuation=True,
whitespace=False,
printable=False,
):
"""
.. versionadded:: 2014.7.0
.. versionchanged:: 3004.0
Changed the default character set used to include symbols and implemented arguments to control the used character set.
Returns a random string of the specified length.
length : 20
Any valid number of bytes.
chars : None
.. versionadded:: 3004.0
String with any character that should be used to generate random string.
This argument supersedes all other character controlling arguments.
lowercase : True
.. versionadded:: 3004.0
Use lowercase letters in generated random string.
(see :py:data:`string.ascii_lowercase`)
This argument is superseded by chars.
uppercase : True
.. versionadded:: 3004.0
Use uppercase letters in generated random string.
(see :py:data:`string.ascii_uppercase`)
This argument is superseded by chars.
digits : True
.. versionadded:: 3004.0
Use digits in generated random string.
(see :py:data:`string.digits`)
This argument is superseded by chars.
printable : False
.. versionadded:: 3004.0
Use printable characters in generated random string and includes lowercase, uppercase,
digits, punctuation and whitespace.
(see :py:data:`string.printable`)
It is disabled by default as includes whitespace characters which some systems do not
handle well in passwords.
This argument also supersedes all other classes because it includes them.
This argument is superseded by chars.
punctuation : True
.. versionadded:: 3004.0
Use punctuation characters in generated random string.
(see :py:data:`string.punctuation`)
This argument is superseded by chars.
whitespace : False
.. versionadded:: 3004.0
Use whitespace characters in generated random string.
(see :py:data:`string.whitespace`)
It is disabled by default as some systems do not handle whitespace characters in passwords
well.
This argument is superseded by chars.
CLI Example:
.. code-block:: bash
salt '*' random.get_str 128
salt '*' random.get_str 128 chars='abc123.!()'
salt '*' random.get_str 128 lowercase=False whitespace=True
"""
return salt.utils.pycrypto.secure_password(
length=length,
chars=chars,
lowercase=lowercase,
uppercase=uppercase,
digits=digits,
punctuation=punctuation,
whitespace=whitespace,
printable=printable,
)
def shadow_hash(crypt_salt=None, password=None, algorithm="<PASSWORD>"):
"""
Generates a salted hash suitable for /etc/shadow.
crypt_salt : None
Salt to be used in the generation of the hash. If one is not
provided, a random salt will be generated.
password : None
Value to be salted and hashed. If one is not provided, a random
password will be generated.
algorithm : sha512
Hash algorithm to use.
CLI Example:
.. code-block:: bash
salt '*' random.shadow_hash 'My5alT' 'MyP@asswd' md5
"""
return salt.utils.pycrypto.gen_hash(crypt_salt, password, algorithm)
def rand_int(start=1, end=10, seed=None):
"""
Returns a random integer number between the start and end number.
.. versionadded:: 2015.5.3
start : 1
Any valid integer number
end : 10
Any valid integer number
seed :
Optional hashable object
.. versionchanged:: 2019.2.0
Added seed argument. Will return the same result when run with the same seed.
CLI Example:
.. code-block:: bash
salt '*' random.rand_int 1 10
"""
if seed is not None:
random.seed(seed)
return random.randint(start, end)
def seed(range=10, hash=None):
"""
Returns a random number within a range. Optional hash argument can
be any hashable object. If hash is omitted or None, the id of the minion is used.
.. versionadded:: 2015.8.0
hash: None
Any hashable object.
range: 10
Any valid integer number
CLI Example:
.. code-block:: bash
salt '*' random.seed 10 hash=None
"""
if hash is None:
hash = __grains__["id"]
random.seed(hash)
return random.randrange(range)
|
tests/r/test_friendship.py | hajime9652/observations | 199 | 12782442 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import shutil
import sys
import tempfile
from observations.r.friendship import friendship
def test_friendship():
"""Test module friendship.py by downloading
friendship.csv and testing shape of
extracted data has 0 rows and 7 columns
"""
test_path = tempfile.mkdtemp()
x_train, metadata = friendship(test_path)
try:
assert x_train.shape == (0, 7)
except:
shutil.rmtree(test_path)
raise()
|
DS&Algo Programs in Python/inserting_heap.py | prathimacode-hub/HacktoberFest-2020 | 386 | 12782452 | <reponame>prathimacode-hub/HacktoberFest-2020
import heapq
H = [21,1,45,78,3,5]
# Covert to a heap
heapq.heapify(H)
print(H)
# Add element
heapq.heappush(H,8)
print(H) |
Scripts/sims4communitylib/utils/sims/common_buff_utils.py | ColonolNutty/Sims4CommunityLibrary | 118 | 12782469 | """
The Sims 4 Community Library is licensed under the Creative Commons Attribution 4.0 International public license (CC BY 4.0).
https://creativecommons.org/licenses/by/4.0/
https://creativecommons.org/licenses/by/4.0/legalcode
Copyright (c) COLONOLNUTTY
"""
from typing import Union, List, Tuple, Iterator
from buffs.buff import Buff
from distributor.shared_messages import IconInfoData
from protocolbuffers.Localization_pb2 import LocalizedString
from server_commands.argument_helpers import TunableInstanceParam, OptionalTargetParam
from sims.sim_info import SimInfo
from sims4.commands import Command, CommandType, CheatOutput
from sims4.resources import Types
from sims4communitylib.enums.buffs_enum import CommonBuffId
from sims4communitylib.enums.strings_enum import CommonStringId
from sims4communitylib.enums.types.component_types import CommonComponentType
from sims4communitylib.exceptions.common_exceptions_handler import CommonExceptionHandler
from sims4communitylib.logging.has_class_log import HasClassLog
from sims4communitylib.mod_support.mod_identity import CommonModIdentity
from sims4communitylib.modinfo import ModInfo
from sims4communitylib.notifications.common_basic_notification import CommonBasicNotification
from sims4communitylib.utils.common_component_utils import CommonComponentUtils
from sims4communitylib.utils.localization.common_localization_utils import CommonLocalizationUtils
from sims4communitylib.utils.sims.common_sim_name_utils import CommonSimNameUtils
from sims4communitylib.utils.sims.common_sim_utils import CommonSimUtils
class CommonBuffUtils(HasClassLog):
"""Utilities for manipulating Buffs on Sims.
"""
# noinspection PyMissingOrEmptyDocstring
@classmethod
def get_mod_identity(cls) -> CommonModIdentity:
return ModInfo.get_identity()
# noinspection PyMissingOrEmptyDocstring
@classmethod
def get_log_identifier(cls) -> str:
return 'common_buff_utils'
@staticmethod
def has_fertility_boosting_buff(sim_info: SimInfo) -> bool:
"""has_fertility_boosting_buff(sim_info)
Determine if any fertility boosting buffs are currently active on a sim.
.. note::
Fertility Boosting Buffs:
- Fertility Potion
- Fertility Potion Masterwork
- Fertility Potion Normal
- Fertility Potion Outstanding
- Massage Table Fertility Boost
- Massage Table Fertility Boost Incense
:param sim_info: The Sim to check.
:type sim_info: SimInfo
:return: True, if they have any fertility boosting buffs. False, if not.
:rtype: bool
"""
buff_ids = (
CommonBuffId.OBJECT_HERBALIST_POTION_FERTILITY_POTION,
CommonBuffId.OBJECT_HERBALIST_POTION_FERTILITY_POTION_MASTERWORK,
CommonBuffId.OBJECT_HERBALIST_POTION_FERTILITY_POTION_NORMAL,
CommonBuffId.OBJECT_HERBALIST_POTION_FERTILITY_POTION_OUTSTANDING,
CommonBuffId.OBJECT_MASSAGE_TABLE_FERTILITY_BOOST,
CommonBuffId.OBJECT_MASSAGE_TABLE_FERTILITY_BOOST_INCENSE
)
return CommonBuffUtils.has_buff(sim_info, *buff_ids)
@staticmethod
def has_morning_person_buff(sim_info: SimInfo) -> bool:
"""has_morning_person_buff(sim_info)
Determine if any Morning Person Trait buffs are currently active on a Sim.
:param sim_info: The Sim to check.
:type sim_info: SimInfo
:return: True, if they have any morning person buffs. False, if not.
:rtype: bool
"""
buff_ids = (
CommonBuffId.TRAIT_MORNING_PERSON,
CommonBuffId.TRAIT_MORNING_PERSON_ACTIVE,
CommonBuffId.TRAIT_MORNING_PERSON_CHECK_ACTIVE
)
return CommonBuffUtils.has_buff(sim_info, *buff_ids)
@staticmethod
def has_night_owl_buff(sim_info: SimInfo) -> bool:
"""has_night_owl_buff(sim_info)
Determine if any Night Owl Trait buffs are currently active on a sim.
:param sim_info: The Sim to check.
:type sim_info: SimInfo
:return: True, if they have any night owl buffs. False, if not.
:rtype: bool
"""
buff_ids = (
CommonBuffId.TRAIT_NIGHT_OWL,
CommonBuffId.TRAIT_NIGHT_OWL_ACTIVE,
CommonBuffId.TRAIT_NIGHT_OWL_CHECK_ACTIVE
)
return CommonBuffUtils.has_buff(sim_info, *buff_ids)
@staticmethod
def has_buff(sim_info: SimInfo, *buffs: Union[int, CommonBuffId, Buff]) -> bool:
"""has_buff(sim_info, *buffs)
Determine if any of the specified buffs are currently active on a sim.
:param sim_info: The sim being checked.
:type sim_info: SimInfo
:param buffs: The identifiers of Buffs.
:type buffs: Union[int, CommonBuffId, Buff]
:return: True, if the sim has any of the specified buffs.
:rtype: int
"""
if sim_info is None:
raise AssertionError('Argument sim_info was None')
if not CommonComponentUtils.has_component(sim_info, CommonComponentType.BUFF):
return False
if not buffs:
return False
buff_ids = [CommonBuffUtils.get_buff_id(buff) for buff in buffs]
sim_buff_ids = CommonBuffUtils.get_buff_ids(sim_info)
for sim_buff_id in sim_buff_ids:
if sim_buff_id in buff_ids:
return True
return False
@staticmethod
def get_buffs(sim_info: SimInfo) -> List[Buff]:
"""get_buffs(sim_info)
Retrieve all buffs currently active on a Sim.
:param sim_info: The Sim to retrieve the buffs of.
:type sim_info: SimInfo
:return: A collection of currently active buffs on the Sim.
:rtype: Tuple[Buff]
"""
if sim_info is None:
raise AssertionError('Argument sim_info was None')
if not CommonComponentUtils.has_component(sim_info, CommonComponentType.BUFF):
return list()
from objects.components.buff_component import BuffComponent
buff_component: BuffComponent = CommonComponentUtils.get_component(sim_info, CommonComponentType.BUFF)
buffs = list()
for buff in buff_component:
if buff is None or not isinstance(buff, Buff):
continue
buffs.append(buff)
return buffs
@staticmethod
def get_buff_ids(sim_info: SimInfo) -> List[int]:
"""get_buff_ids(sim_info)
Retrieve decimal identifiers for all Buffs of a sim.
:param sim_info: The sim to checked.
:type sim_info: SimInfo
:return: A collection of Buff identifiers on a Sim.
:rtype: List[int]
"""
if sim_info is None:
raise AssertionError('Argument sim_info was None')
if not CommonComponentUtils.has_component(sim_info, CommonComponentType.BUFF):
return list()
buff_ids = list()
sim_buffs = CommonBuffUtils.get_buffs(sim_info)
for buff in sim_buffs:
buff_id = CommonBuffUtils.get_buff_id(buff)
if buff_id is None:
continue
buff_ids.append(buff_id)
return buff_ids
@classmethod
def add_buff(cls, sim_info: SimInfo, *buffs: Union[int, CommonBuffId], buff_reason: Union[int, str, LocalizedString, CommonStringId]=None) -> bool:
"""add_buff(sim_info, *buffs, buff_reason=None)
Add the specified buffs to a sim.
:param sim_info: The sim to add the specified buffs to.
:type sim_info: SimInfo
:param buffs: An iterable of identifiers of buffs being added.
:type buffs: Union[int, CommonBuffId, Buff]
:param buff_reason: The text that will display when the player hovers over the buffs. What caused the buffs to be added.
:type buff_reason: Union[int, str, LocalizedString, CommonStringId], optional
:return: True, if all of the specified buffs were successfully added. False, if not.
:rtype: bool
"""
if sim_info is None:
raise AssertionError('Argument sim_info was None')
if not CommonComponentUtils.has_component(sim_info, CommonComponentType.BUFF):
cls.get_log().format_with_message('Failed to add Buff to Sim. They did not have a Buff component!', buffs=buffs, sim=sim_info, buff_reason=buff_reason)
return False
localized_buff_reason = None
if buff_reason is not None:
localized_buff_reason = CommonLocalizationUtils.create_localized_string(buff_reason)
has_any = False
success = True
for buff_id in buffs:
buff = CommonBuffUtils.load_buff_by_id(buff_id)
if buff is None:
cls.get_log().format_with_message('No buff found using identifier.', buffs=buffs, sim=sim_info, buff_reason=buff_reason, buff_id=buff_id)
continue
if not sim_info.add_buff_from_op(buff, buff_reason=localized_buff_reason):
cls.get_log().format_with_message('Failed to add buff for unknown reasons.', buff=buff, sim=sim_info, buff_reason=buff_reason)
success = False
else:
cls.get_log().format_with_message('Successfully added buff.', buff=buff, sim=sim_info, buff_reason=buff_reason)
has_any = True
cls.get_log().format_with_message('Finished adding buffs to Sim.', buffs=buffs, sim=sim_info, buff_reason=buff_reason, success=success, has_any=has_any)
return success and has_any
@staticmethod
def remove_buff(sim_info: SimInfo, *buffs: Union[int, CommonBuffId, Buff]) -> bool:
"""remove_buff(sim_info, *buffs)
Remove the specified buffs from a sim.
:param sim_info: The sim to remove the specified buffs from.
:type sim_info: SimInfo
:param buffs: An iterable of identifiers of buffs being removed.
:type buffs: Union[int, CommonBuffId, Buff]
:return: True, if all of the specified buffs were successfully removed. False, if not.
:rtype: bool
"""
if sim_info is None:
raise AssertionError('Argument sim_info was None')
if not CommonComponentUtils.has_component(sim_info, CommonComponentType.BUFF):
return False
has_any = False
success = True
for buff in buffs:
buff = CommonBuffUtils.load_buff_by_id(buff)
if buff is None:
continue
sim_info.remove_buff_by_type(buff)
has_any = True
if CommonBuffUtils.has_buff(sim_info, buff):
success = False
return success and has_any
@staticmethod
def get_buff_id(buff_identifier: Union[int, Buff]) -> Union[int, None]:
"""get_buff_id(buff_identifier)
Retrieve the decimal identifier of a Buff.
:param buff_identifier: The identifier or instance of a Buff.
:type buff_identifier: Union[int, Buff]
:return: The decimal identifier of the Buff or None if the Buff does not have an id.
:rtype: Union[int, None]
"""
if isinstance(buff_identifier, int):
return buff_identifier
return getattr(buff_identifier, 'guid64', None)
@staticmethod
def get_buff_name(buff: Buff) -> Union[str, None]:
"""get_buff_name(buff)
Retrieve the Name of a Buff.
:param buff: An instance of a Buff.
:type buff: Buff
:return: The name of a Buff or None if a problem occurs.
:rtype: Union[str, None]
"""
if buff is None:
return None
# noinspection PyBroadException
try:
return buff.__class__.__name__ or ''
except:
return ''
@staticmethod
def get_buff_names(buffs: Iterator[Buff]) -> Tuple[str]:
"""get_buff_names(buffs)
Retrieve the Names of a collection of Buffs.
:param buffs: A collection of Buff instances.
:type buffs: Iterator[Buff]
:return: A collection of names for all specified Buffs.
:rtype: Tuple[str]
"""
if buffs is None or not buffs:
return tuple()
names: List[str] = []
for buff in buffs:
# noinspection PyBroadException
try:
name = CommonBuffUtils.get_buff_name(buff)
if not name:
continue
except:
continue
names.append(name)
return tuple(names)
@staticmethod
def load_buff_by_id(buff: Union[int, CommonBuffId, Buff]) -> Union[Buff, None]:
"""load_buff_by_id(buff)
Load an instance of a Buff by its identifier.
:param buff: The identifier of a Buff.
:type buff: Union[int, CommonBuffId, Buff]
:return: An instance of a Buff matching the decimal identifier or None if not found.
:rtype: Union[Buff, None]
"""
if isinstance(buff, Buff):
return buff
# noinspection PyBroadException
try:
buff: int = int(buff)
except:
buff: Buff = buff
return buff
from sims4.resources import Types
from sims4communitylib.utils.common_resource_utils import CommonResourceUtils
return CommonResourceUtils.load_instance(Types.BUFF, buff)
@Command('s4clib.add_buff', command_type=CommandType.Live)
def _common_add_buff(buff: TunableInstanceParam(Types.BUFF), opt_sim: OptionalTargetParam=None, buff_reason: str=None, _connection: int=None):
from server_commands.argument_helpers import get_optional_target
output = CheatOutput(_connection)
if buff is None:
output('Failed, Buff not specified or Buff did not exist! s4clib.add_buff <buff_name_or_id> [opt_sim=None]')
return
sim_info = CommonSimUtils.get_sim_info(get_optional_target(opt_sim, _connection))
if sim_info is None:
output('Failed, no Sim was specified or the specified Sim was not found!')
return
sim_name = CommonSimNameUtils.get_full_name(sim_info)
output('Adding buff {} to Sim {}'.format(str(buff), sim_name))
try:
if CommonBuffUtils.add_buff(sim_info, buff, buff_reason=buff_reason):
output('Successfully added buff.')
else:
output('Failed to add buff.')
except Exception as ex:
CommonExceptionHandler.log_exception(ModInfo.get_identity(), 'Failed to add buff {} to Sim {}.'.format(str(buff), sim_name), exception=ex)
output('Failed to add buff {} to Sim {}. {}'.format(str(buff), sim_name, str(ex)))
@Command('s4clib.remove_buff', command_type=CommandType.Live)
def _common_remove_buff(buff: TunableInstanceParam(Types.BUFF), opt_sim: OptionalTargetParam=None, _connection: int=None):
from server_commands.argument_helpers import get_optional_target
output = CheatOutput(_connection)
if buff is None:
output('Failed, Buff not specified or Buff did not exist! s4clib.remove_buff <buff_name_or_id> [opt_sim=None]')
return
sim_info = CommonSimUtils.get_sim_info(get_optional_target(opt_sim, _connection))
if sim_info is None:
output('Failed, no Sim was specified or the specified Sim was not found!')
return
sim_name = CommonSimNameUtils.get_full_name(sim_info)
output('Removing buff {} from Sim {}'.format(str(buff), sim_name))
try:
if CommonBuffUtils.remove_buff(sim_info, buff):
output('Successfully removed buff.')
else:
output('Failed to remove buff.')
except Exception as ex:
CommonExceptionHandler.log_exception(ModInfo.get_identity(), 'Failed to remove buff {} from Sim {}.'.format(str(buff), sim_name), exception=ex)
output('Failed to remove buff {} from Sim {}. {}'.format(str(buff), sim_name, str(ex)))
@Command('s4clib.show_active_buffs', command_type=CommandType.Live)
def _common_show_active_buffs(opt_sim: OptionalTargetParam=None, _connection: int=None):
from server_commands.argument_helpers import get_optional_target
output = CheatOutput(_connection)
sim = get_optional_target(opt_sim, _connection)
sim_info = CommonSimUtils.get_sim_info(sim)
if sim_info is None:
output('Failed, no Sim was specified or the specified Sim was not found!')
return
sim_name = CommonSimNameUtils.get_full_name(sim_info)
output('Showing active buffs of Sim {}'.format(sim_name))
try:
sim_buff_strings: List[str] = list()
for buff in CommonBuffUtils.get_buffs(sim_info):
buff_name = CommonBuffUtils.get_buff_name(buff)
buff_id = CommonBuffUtils.get_buff_id(buff)
sim_buff_strings.append('{} ({})'.format(buff_name, buff_id))
sim_buff_strings = sorted(sim_buff_strings, key=lambda x: x)
sim_buffs = ', '.join(sim_buff_strings)
text = ''
text += 'Active Buffs:\n{}\n\n'.format(sim_buffs)
CommonBasicNotification(
CommonLocalizationUtils.create_localized_string('{} Active Buffs ({})'.format(sim_name, CommonSimUtils.get_sim_id(sim_info))),
CommonLocalizationUtils.create_localized_string(text)
).show(
icon=IconInfoData(obj_instance=CommonSimUtils.get_sim_instance(sim_info))
)
except Exception as ex:
CommonExceptionHandler.log_exception(ModInfo.get_identity(), 'Failed to show active buffs of Sim {}.'.format(sim_name), exception=ex)
output('Failed to show active buffs of Sim {}. {}'.format(sim_name, str(ex)))
|
tests/recipes/test_libffi.py | syrykh/python-for-android | 6,278 | 12782529 | import unittest
from tests.recipes.recipe_lib_test import BaseTestForMakeRecipe
class TestLibffiRecipe(BaseTestForMakeRecipe, unittest.TestCase):
"""
An unittest for recipe :mod:`~pythonforandroid.recipes.libffi`
"""
recipe_name = "libffi"
sh_command_calls = ["./autogen.sh", "autoreconf", "./configure"]
def test_get_include_dirs(self):
list_of_includes = self.recipe.get_include_dirs(self.arch)
self.assertIsInstance(list_of_includes, list)
self.assertTrue(list_of_includes[0].endswith("include"))
|
examples/demo_purge.py | shirui-japina/tensorboardX | 5,378 | 12782537 | from time import sleep
from tensorboardX import SummaryWriter
with SummaryWriter(logdir='runs/purge') as w:
for i in range(100):
w.add_scalar('purgetest', i, i)
sleep(1.0)
with SummaryWriter(logdir='runs/purge', purge_step=42) as w:
# event 42~99 are removed (inclusively)
for i in range(42, 100):
w.add_scalar('purgetest', 42, i)
|
asset/test.py | 745184532/cmdb | 251 | 12782542 | <reponame>745184532/cmdb
import time
print(time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time()))) |
quspin/basis/basis_1d/_check_1d_symm.py | anton-buyskikh/QuSpin | 195 | 12782575 | <gh_stars>100-1000
from __future__ import print_function, division
import warnings
def flip_sublat(opstr,indx,lat=0):
sign = 1
opstr = [str(s) for s in opstr]
for s,i,j in zip(opstr,indx,range(len(indx))):
if ((i % 2) == (lat % 2)):
if (s in ['z','y']):
sign *= -1
elif (s == "+"):
opstr[j] = '-'
elif (s == "-"):
opstr[j] = '+'
return sign,"".join(opstr)
def check_T(sort_opstr,operator_list,L,a):
missing_ops=[]
for i in range(0,L//a,1):
for op in operator_list:
opstr = str(op[0])
indx = list(op[1])
for j,ind in enumerate(indx):
indx[j] = (ind+i*a)%L
new_op = list(op)
new_op[1] = indx
new_op = sort_opstr(new_op)
if not (new_op in operator_list):
missing_ops.append(new_op)
return missing_ops
def check_Z(sort_opstr,operator_list):
missing_ops=[]
odd_ops=[]
for op in operator_list:
opstr = str(op[0])
indx = list(op[1])
if opstr.count("|") == 1:
i = opstr.index("|")
else:
i = len(opstr)
z_count = opstr[:i].count("z")
y_count = opstr[:i].count("y")
if ((y_count + z_count) % 2) != 0:
odd_ops.append(op)
new_op = list(op)
new_op[0] = new_op[0][:i].replace("+","#").replace("-","+").replace("#","-") + op[0][i:]
new_op = sort_opstr(new_op)
if not (new_op in operator_list):
missing_ops.append(new_op)
return odd_ops,missing_ops
def check_P(sort_opstr,operator_list,L):
missing_ops = []
for op in operator_list:
indx = list(op[1])
for j,ind in enumerate(indx):
indx[j] = (L-1-ind) % L
new_op = list(op)
new_op[1] = indx
new_op = sort_opstr(new_op)
if not (new_op in operator_list):
missing_ops.append(new_op)
return missing_ops
def check_PZ(sort_opstr,operator_list,L):
missing_ops = []
for op in operator_list:
opstr = str(op[0])
indx = list(op[1])
if opstr.count("|") == 1:
i = opstr.index("|")
else:
i = len(opstr)
for j,ind in enumerate(indx):
indx[j] = (L-1-ind) % L
sign = (-1)**(opstr[:i].count('z')+opstr.count('y'))
new_op = list(op)
new_op[0] = new_op[0][:i].replace("+","#").replace("-","+").replace("#","-") + op[0][i:]
new_op[1] = indx
new_op[2] *= sign
new_op = sort_opstr(new_op)
if not (new_op in operator_list):
missing_ops.append(new_op)
return missing_ops
def check_ZA(sort_opstr,operator_list):
missing_ops=[]
odd_ops=[]
for op in operator_list:
opstr = str(op[0])
indx = list(op[1])
if opstr.count("|") == 1:
i = opstr.index("|")
else:
i = len(opstr)
sign,new_opstr = flip_sublat(opstr[:i],indx[:i],lat=0)
if sign == -1:
odd_ops.append(op)
new_op = list(op)
new_op[0] = new_opstr + opstr[i:]
new_op = sort_opstr(new_op)
if not (new_op in operator_list):
missing_ops.append(new_op)
return odd_ops,missing_ops
def check_ZB(sort_opstr,operator_list):
missing_ops=[]
odd_ops=[]
for op in operator_list:
opstr = str(op[0])
indx = list(op[1])
if opstr.count("|") == 1:
i = opstr.index("|")
else:
i = len(opstr)
sign,new_opstr = flip_sublat(opstr[:i],indx[:i],lat=1)
if sign == -1:
odd_ops.append(op)
new_op = list(op)
new_op[0] = new_opstr + opstr[i:]
new_op = sort_opstr(new_op)
if not (new_op in operator_list):
missing_ops.append(new_op)
return odd_ops,missing_ops
|
asana/resources/project_memberships.py | FiyaFly/python-asana | 266 | 12782626 | <filename>asana/resources/project_memberships.py
from .gen.project_memberships import _ProjectMemberships
class ProjectMemberships(_ProjectMemberships):
"""Project Memberships resource"""
def find_by_project(self, project, params={}, **options):
"""Returns the compact project membership records for the project.
Parameters
----------
project : {Gid} The project for which to fetch memberships.
[params] : {Object} Parameters for the request
- [user] : {String} If present, the user to filter the memberships to.
"""
path = "/projects/%s/project_memberships" % (project)
return self.client.get_collection(path, params, **options)
def find_by_id(self, project_membership, params={}, **options):
"""Returns the project membership record.
Parameters
----------
project_membership : {Gid} Globally unique identifier for the project membership.
[params] : {Object} Parameters for the request
"""
path = "/project_memberships/%s" % (project_membership)
return self.client.get(path, params, **options)
|
amlb/utils/serialization.py | PGijsbers/automlbenchmark | 282 | 12782644 | <filename>amlb/utils/serialization.py
import logging
import math
import os
import pickle
import re
from typing import Optional
from .core import Namespace as ns, json_dump, json_load
from .process import profile
log = logging.getLogger(__name__)
def _import_data_libraries():
try:
import numpy as np
except ImportError:
np = None
try:
import pandas as pd
except ImportError:
pd = None
try:
import scipy.sparse as sp
except ImportError:
sp = None
return np, pd, sp
ser_config = ns(
# the serializer to use when there's no specific serializer available.
# mainly intended to serialize simple data structures like lists.
# allowed=['pickle', 'json']
fallback_serializer='json',
# if numpy can use pickle to serialize ndarrays,
numpy_allow_pickle=True,
# format used to serialize pandas dataframes/series between processes.
# allowed=['pickle', 'parquet', 'hdf', 'json']
pandas_serializer='parquet',
# the compression format used when serializing pandas dataframes/series.
# allowed=[None, 'infer', 'bz2', 'gzip']
# 'infer' (= None) is the fastest but no compression,
# 'gzip' fast write and read with good compression.
# 'bz2' looks like the best compression/time ratio (faster write, sometimes slightly slower read)
pandas_compression='infer',
# the compression format used when serializing pandas dataframes/series to parquet.
# allowed=[None, 'snappy', 'gzip', 'brotli']
pandas_parquet_compression=None,
# if sparse matrices should be compressed during serialization.
sparse_matrix_compression=True,
# if sparse matrices should be deserialized to some specific format:
# allowed=[None, 'array', 'dense']
# None (no change), 'array' (numpy), 'dense' (dense matrix).
sparse_matrix_deserialized_format=None,
# if sparse dataframes should be deserialized to some specific format:
# allowed=[None, 'array', 'dense']
# None (no change), 'array' (numpy), 'dense' (dense dataframe/series).
sparse_dataframe_deserialized_format=None,
)
__series__ = '_series_'
class SerializationError(Exception):
pass
def is_serializable_data(data):
np, pd, sp = _import_data_libraries()
return isinstance(data, (np.ndarray, sp.spmatrix, pd.DataFrame, pd.Series))
def is_sparse(data):
np, pd, sp = _import_data_libraries()
return ((sp and isinstance(data, sp.spmatrix)) # sparse matrix
or (pd and isinstance(data, pd.Series) and pd.api.types.is_sparse(data.dtype)) # sparse Series
or (pd and isinstance(data, pd.DataFrame) # if one column is sparse, the dataframe is considered as sparse
and any(pd.api.types.is_sparse(dt) for dt in data.dtypes)))
def unsparsify(*data, fmt='dense'):
if len(data) == 1:
return _unsparsify(data[0], fmt=fmt)
else:
return tuple(_unsparsify(d, fmt=fmt) for d in data)
def _unsparsify(data, fmt=None):
"""
:param data: the matrix to process.
:param fmt: one of None, 'array', 'dense'
:return: the original matrix is fmt is None,
a numpy array if fmt is 'array',
a dense version of the data type if fmt is 'dense'.
"""
if fmt is None:
return data
np, pd, sp = _import_data_libraries()
if sp and isinstance(data, sp.spmatrix):
return (data.toarray() if fmt == 'array'
else data.todense() if fmt == 'dense'
else data)
elif pd and isinstance(data, (pd.DataFrame, pd.Series)):
return (data.to_numpy(copy=False) if fmt == 'array'
else _pd_to_dense(pd, data) if fmt == 'dense' and is_sparse(data)
else data)
else:
return data
def _pd_to_dense(pd, df):
if hasattr(df, 'sparse'):
return df.sparse.to_dense()
data = {k: (v.sparse.to_dense() if hasattr(v, 'sparse') else v) for k, v in df.items()}
return pd.DataFrame(data, index=df.index, columns=df.columns)
def _pd_dtypes_to_str(pd, df):
return {k: str(v) for k, v in df.dtypes.items()}
def _pd_dtypes_from_str(pd, dt):
def dt_from_str(s):
m_sparse = re.match(r"Sparse\[(.*)]", s)
if m_sparse:
sub_type, fill_value = [t.strip() for t in m_sparse.group(1).split(",", 1)]
try:
fill_value = eval(fill_value, {'nan': math.nan, '<NA>': pd.NA})
except ValueError:
pass
dt = pd.api.types.pandas_dtype(f"Sparse[{sub_type}]")
return pd.SparseDtype(dt, fill_value=fill_value)
else:
return pd.api.types.pandas_dtype(s)
return {k: dt_from_str(v) for k, v in dt.items()}
@profile(log)
def serialize_data(data, path, config: Optional[ns] = None):
config = (config | ser_config) if config else ser_config
root, ext = os.path.splitext(path)
np, pd, sp = _import_data_libraries()
if np and isinstance(data, np.ndarray):
path = f"{root}.npy"
np.save(path, data, allow_pickle=config.numpy_allow_pickle)
elif sp and isinstance(data, sp.spmatrix):
# use custom extension to recognize sparsed matrices from file name.
# .npz is automatically appended if missing, and can also potentially be used for numpy arrays.
path = f"{root}.spy.npz"
sp.save_npz(path, data, compressed=config.sparse_matrix_compression)
elif pd and isinstance(data, (pd.DataFrame, pd.Series)):
path = f"{root}.pd"
if isinstance(data, pd.DataFrame):
# pandas has this habit of inferring value types when data are loaded from file,
# for example, 'true' and 'false' are converted automatically to booleans, even for column names…
data.rename(str, axis='columns', inplace=True)
ser = config.pandas_serializer
if ser == 'pickle':
data.to_pickle(path, compression=config.pandas_compression)
elif ser == 'parquet':
if isinstance(data, pd.Series):
data = pd.DataFrame({__series__: data})
# parquet serialization doesn't support sparse dataframes
if is_sparse(data):
path = f"{root}.sparse.pd"
dtypes = _pd_dtypes_to_str(pd, data)
json_dump(dtypes, f"{path}.dtypes", style='compact')
data = unsparsify(data)
data.to_parquet(path, compression=config.pandas_parquet_compression)
elif ser == 'hdf':
data.to_hdf(path, os.path.basename(path), mode='w', format='table')
elif ser == 'json':
data.to_json(path, compression=config.pandas_compression)
else: # fallback serializer
if config.fallback_serializer == 'json':
path = f"{root}.json"
json_dump(data, path, style='compact')
else:
path = f"{root}.pkl"
with open(path, 'wb') as f:
pickle.dump(data, f)
return path
@profile(log)
def deserialize_data(path, config: Optional[ns] = None):
config = (config | ser_config) if config else ser_config
np, pd, sp = _import_data_libraries()
base, ext = os.path.splitext(path)
if ext == '.npy':
if np is None:
raise SerializationError(f"Numpy is required to deserialize {path}.")
return np.load(path, allow_pickle=config.numpy_allow_pickle)
elif ext == '.npz':
_, ext2 = os.path.splitext(base)
if ext2 == '.spy':
if sp is None:
raise SerializationError(f"Scipy is required to deserialize {path}.")
sp_matrix = sp.load_npz(path)
return unsparsify(sp_matrix, fmt=config.sparse_matrix_deserialized_format)
else:
if np is None:
raise SerializationError(f"Numpy is required to deserialize {path}.")
with np.load(path, allow_pickle=config.numpy_pickle) as loaded:
return loaded
elif ext == '.pd':
if pd is None:
raise SerializationError(f"Pandas is required to deserialize {path}.")
ser = config.pandas_serializer
df = None
if ser == 'pickle':
df = pd.read_pickle(path, compression=config.pandas_compression)
elif ser == 'parquet':
df = pd.read_parquet(path)
if len(df.columns) == 1 and df.columns[0] == __series__:
df = df.squeeze()
_, ext2 = os.path.splitext(base)
if config.sparse_dataframe_deserialized_format is None and ext2 == '.sparse':
# trying to restore dataframe as sparse if it was as such before serialization
# and if the dataframe format should remain unchanged
j_dtypes = json_load(f"{path}.dtypes")
dtypes = _pd_dtypes_from_str(pd, j_dtypes)
df = df.astype(dtypes, copy=False)
elif ser == 'hdf':
df = pd.read_hdf(path, os.path.basename(path))
elif ser == 'json':
df = pd.read_json(path, compression=config.pandas_compression)
return unsparsify(df, fmt=config.sparse_dataframe_deserialized_format)
elif ext == '.json':
return json_load(path)
elif ext == '.pkl':
with open(path, 'rb') as f:
return pickle.load(f)
else:
raise SerializationError(f"Can not deserialize file `{path}` in unknown format.")
|
lib/galaxy/job_metrics/formatting.py | rikeshi/galaxy | 1,085 | 12782656 | <reponame>rikeshi/galaxy
"""Utilities related to formatting job metrics for human consumption."""
class JobMetricFormatter:
"""Format job metric key-value pairs for human consumption in Web UI."""
def format(self, key, value):
return (str(key), str(value))
def seconds_to_str(value):
"""Convert seconds to a simple simple string describing the amount of time."""
mins, secs = divmod(value, 60)
hours, mins = divmod(mins, 60)
if value < 60:
return f"{secs} second{'s' if secs != 1 else ''}"
elif value < 3600:
return f"{mins} minute{'s' if mins != 1 else ''}"
else:
return f"{hours} hour{'s' if hours != 1 else ''} and {mins} minute{'s' if mins != 1 else ''}"
|
aliyun-python-sdk-nlp-automl/aliyunsdknlp_automl/__init__.py | yndu13/aliyun-openapi-python-sdk | 1,001 | 12782660 | __version__ = '0.0.9' |
sdk/attestation/azure-security-attestation/tests/preparers_async.py | rsdoherty/azure-sdk-for-python | 2,728 | 12782663 | <filename>sdk/attestation/azure-security-attestation/tests/preparers_async.py<gh_stars>1000+
# coding: utf-8
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
from typing import Awaitable, List
from azure.security.attestation import AttestationType
try:
from typing import TYPE_CHECKING
except ImportError:
TYPE_CHECKING = False
from typing import Awaitable, Callable, Dict, Optional, Any, TypeVar, overload
T = TypeVar("T")
def AllAttestationTypes(func: Callable[..., Awaitable[T]] = None, **kwargs: Any):
"""Decorator to apply to function to add attestation_type kwarg for each attestation type."""
async def wrapper(*args, **kwargs) -> Callable[..., Awaitable[T]]:
for attestation_type in [
AttestationType.SGX_ENCLAVE,
AttestationType.OPEN_ENCLAVE,
AttestationType.TPM,
]:
await func(*args, attestation_type=attestation_type, **kwargs)
return wrapper
def AllInstanceTypes(
func: Callable[..., Awaitable[T]] = None, include_shared: bool = True, **kwargs: Any
):
"""Decorator to apply to function to add instance_url kwarg for each instance type."""
async def wrapper(*args, **kwargs) -> Callable[..., Awaitable[T]]:
instances = [] # type:List[str]
instances.append(kwargs.get("attestation_aad_url"))
instances.append(kwargs.get("attestation_isolated_url"))
if include_shared:
instances.append(
"https://shared"
+ kwargs.get("attestation_location_short_name")
+ "."
+ kwargs.get("attestation_location_short_name")
+ ".attest.azure.net"
)
for instance_url in instances:
await func(*args, instance_url=instance_url, **kwargs)
return wrapper
|
mrec/evaluation/__init__.py | imall100/mrec | 392 | 12782669 | class Evaluator(object):
"""
Compute metrics for recommendations that have been written to file.
Parameters
----------
compute_metrics : function(list,list)
The evaluation function which should accept two lists of predicted
and actual item indices.
max_items : int
The number of recommendations needed to compute the evaluation function.
"""
def __init__(self,compute_metrics,max_items):
self.compute_metrics = compute_metrics
self.max_items = max_items
def _add_metrics(self,predicted,actual):
metrics = self.compute_metrics(predicted,actual)
if metrics:
for m,val in metrics.iteritems():
self.cum_metrics[m] += val
self.count += 1
def process(self,testdata,recsfile,start,end,offset=1):
"""
Parameters
----------
testdata : scipy sparse matrix
The test items for each user.
recsfile : str
Filepath to the recommendations. The file should contain TSV
of the form: user, item, score. IMPORTANT: the recommendations must
be sorted by user and score.
start : int
First user to evaluate.
end: int
One after the last user to evaluate.
offset : int
Index offset for users and items in recommendations file.
Returns
-------
cum_metrics : dict
Aggregated metrics i.e. total values for all users.
count : int
The number of users for whom metrics were computed.
"""
from collections import defaultdict
self.cum_metrics = defaultdict(float)
self.count = 0
last_user = start
recs = []
for line in open(recsfile):
user,item,score = line.strip().split('\t')
user = int(user)-1 # convert to 0-indxed
item = int(item)-1
if user >= end:
break
if user < start:
continue
if user != last_user:
self._add_metrics(recs,testdata[last_user,:].indices.tolist())
last_user = user
recs = []
if len(recs) < self.max_items:
recs.append(item)
self._add_metrics(recs,testdata[last_user,:].indices.tolist())
return self.cum_metrics,self.count
|
Data Structures/Linked Lists/Singly Linked List/Single-linked-list-operations.py | siddhi-244/CompetitiveProgrammingQuestionBank | 931 | 12782676 | # A pythom program for all operations performed on singly linked-list.
# Time-Complexity = O(n)
# Space-Complexity = O(n)
class Node:
def __init__(self, data=None, next=None): # Creation of Node
self.data = data
self.next = next
class LinkedList:
def __init__(self):
self.head = None # head points the first node
def print(self):
if self.head is None:
print("Linked list is empty")
return
itr = self.head
llstr = '' # empty string
while itr:
llstr += str(itr.data)+' --> ' if itr.next else str(itr.data)
itr = itr.next
print(llstr)
def length(self): # will calculate length of the linked list
count = 0
itr = self.head
while itr:
count += 1
itr = itr.next
return count
def insert_at_begining(self, data):
node = Node(data, self.head) # Creating a new node calling Node method
self.head = node
def insert_at_end(self, data):
if self.head is None:
self.head = Node(data, None)
return
itr = self.head
while itr.next:
itr = itr.next
itr.next = Node(data, None)
def insert_at(self, index, data):
if index < 0 or index > self.length():
raise Exception("Invalid Index")
if index == 0:
self.insert_at_begining(data)
return
count = 0
itr = self.head
while itr:
if count == index - 1:
node = Node(data, itr.next)
itr.next = node
break
itr = itr.next
count += 1
def remove_at(self, index):
if index < 0 or index >= self.length():
raise Exception("Invalid Index")
if index == 0:
self.head = self.head.next
return
count = 0
itr = self.head
while itr:
if count == index - 1:
itr.next = itr.next.next # to delete the specified node
break
itr = itr.next
count += 1
def insert_values(self, data_list):
self.head = None
for data in data_list:
self.insert_at_end(data)
# removing element at linkedlist with Value
def removeval(self, value):
count = 0
temp = self.head
while temp:
if value != temp.data:
count += 1
temp = temp.next
if count == self.length():
print("Value is not present")
else:
if value == self.head.data:
self.head = self.head.next
return
temp = self.head
while temp:
if value == temp.next.data:
temp.next = temp.next.next
break
temp = temp.next
if __name__ == '__main__':
node1 = LinkedList()
ins = list(input("Enter a values to be inserted by giving space[eg: python c++ java] : ").rstrip().split())
node1.insert_values(ins)
node1.print()
ind = int(input("Enter the index to be added: "))
val = input('Enter the value: ')
node1.insert_at(ind, val)
node1.print()
remm = int(input('Enter the index to be removed: '))
node1.remove_at(remm)
node1.print()
remval = input('Enter the value to be removed: ')
node1.removeval(remval)
node1.print()
inss = list(input("Enter a values to be inserted by giving space[eg: 45 30 22] : ").rstrip().split())
node1.insert_values(inss)
node1. print()
inend = int(input('Enter the number to be inserted at the end: '))
node1.insert_at_end(inend)
node1.print()
remval1 = input('Enter the value to be removed: ')
node1.removeval(remval1)
node1.print()
|
opennre/tokenization/word_piece_tokenizer.py | WinterSoHot/OpenNRE | 3,284 | 12782735 | <filename>opennre/tokenization/word_piece_tokenizer.py
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""WordpieceTokenizer classes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import unicodedata
from .utils import (load_vocab,
convert_to_unicode,
clean_text,
split_on_whitespace,
convert_by_vocab,
tokenize_chinese_chars)
class WordpieceTokenizer(object):
"""Runs WordPiece tokenziation."""
def __init__(self, vocab = None, unk_token="[UNK]", max_input_chars_per_word=200):
self.vocab = load_vocab(vocab)
self.inv_vocab = {v: k for k, v in self.vocab.items()}
self.unk_token = unk_token
self.max_input_chars_per_word = max_input_chars_per_word
def tokenize(self, text):
""" Tokenizes a piece of text into its word pieces. This uses a greedy longest-match-first algorithm to perform tokenization
using the given vocabulary.
For example:
input = "unaffable"
output = ["un", "##aff", "##able"]
Args:
text: A single token or whitespace separated tokens. This should have already been passed through `BasicTokenizer`.
Returns:
output_tokens: A list of wordpiece tokens.
current_positions: A list of the current positions for the original words in text .
"""
text = convert_to_unicode(text)
text = clean_text(text)
text = tokenize_chinese_chars(text)
output_tokens = []
current_positions = []
token_list = split_on_whitespace(text)
for chars in token_list:
if len(chars) > self.max_input_chars_per_word:
output_tokens.append(self.unk_token)
continue
is_bad = False
start = 0
sub_tokens = []
while start < len(chars):
end = len(chars)
if start > 0:
substr = "##" + chars[start:end]
else:
substr = chars[start:end]
cur_substr = None
while start < end:
if substr in self.vocab:
cur_substr = substr
break
end -= 1
substr = substr[:-1]
if cur_substr is None:
is_bad = True
break
else:
sub_tokens.append(cur_substr)
start = end
current_positions.append([])
if is_bad:
current_positions[-1].append(len(output_tokens))
output_tokens.append(self.unk_token)
current_positions[-1].append(len(output_tokens))
else:
current_positions[-1].append(len(output_tokens))
output_tokens.extend(sub_tokens)
current_positions[-1].append(len(output_tokens))
return output_tokens, current_positions
def convert_tokens_to_ids(self, tokens):
return convert_by_vocab(self.vocab, tokens)
def convert_ids_to_tokens(self, ids):
return convert_by_vocab(self.inv_vocab, ids)
|
PyEngine3D/Utilities/Config.py | ubuntunux/PyEngine3D | 121 | 12782761 | <reponame>ubuntunux/PyEngine3D<gh_stars>100-1000
import os
import configparser
import traceback
from . import Logger
# util class
class Empty:
pass
def evaluation(value):
# find value type
try:
evalValue = eval(value)
if type(evalValue) in [int, float, list, tuple, dict]:
return evalValue
except:
return value
def getValue(config, section, option, default_value=None):
return evaluation(config[section][option]) if config.has_option(section, option) else default_value
def setValue(config, section, option, value):
if not config.has_section(section):
config.add_section(section)
config.set(section, option, value)
# ------------------------------ #
# CLASS : Configure
# Usage :
# config = Configure()
# # get value example, section:Screen, option:wdith
# print(config.Screen.width)
# ------------------------------ #
class Config:
def __init__(self, configFilename, log_level=Logger.WARN, prevent_lowercase=True):
self.log_level = log_level
self.isChanged = False
self.filename = configFilename
self.config = configparser.ConfigParser()
self.config.read(configFilename)
# prevent the key value being lowercase
if prevent_lowercase:
self.config.optionxform = lambda option_name: option_name
if self.log_level <= Logger.INFO:
print("Load Config : %s" % self.filename)
# set sections
for section in self.config.sections():
if self.log_level == Logger.DEBUG:
print("[%s]" % section)
if not hasattr(self, section):
setattr(self, section, Empty())
# set value to member variables
current_section = getattr(self, section)
for option in self.config[section]:
value = self.config.get(section, option)
if self.log_level == Logger.DEBUG:
print("%s = %s" % (option, value))
setattr(current_section, option, evaluation(value))
def hasValue(self, section, option):
return self.config.has_option(section, option)
def getValue(self, section, option, default_value=None):
return evaluation(self.config[section][option]) if self.config.has_option(section, option) else default_value
def setValue(self, section, option, value):
# set value
if not self.config.has_section(section):
self.config.add_section(section)
self.config[section][option] = str(value)
# set value to member variables
if not hasattr(self, section):
setattr(self, section, Empty())
self.isChanged = True
elif not self.isChanged:
self.isChanged = value != getattr(self, section)
current_section = getattr(self, section)
setattr(current_section, option, value)
def setDefaultValue(self, section, option, value):
if not self.hasValue(section, option):
self.setValue(section, option, value)
def save(self):
if self.isChanged or not os.path.exists(self.filename):
with open(self.filename, 'w') as configfile:
self.config.write(configfile)
if self.log_level <= Logger.INFO:
print("Saved Config : " + self.filename)
self.isChanged = False
def getFilename(self):
return self.filename
if __name__ == '__main__':
import unittest
class test(unittest.TestCase):
def testConfig(self):
# load test
testConfig = Config("TestConfig.ini", debug=False)
# set value
testConfig.setValue("TestSection", "test_int", 45)
testConfig.setValue("TestSection", "test_float", 0.1)
testConfig.setValue("TestSection", "test_string", "Hello, World")
testConfig.setValue("TestSection", "test_list", [1, 2, 3])
testConfig.setValue("TestSection", "test_tuple", (4, 5, 6))
testConfig.setValue("TestSection", "test_dict", {"x":7.0, "y":8.0})
# call test
self.assertEqual(testConfig.TestSection.test_int, 45)
self.assertEqual(testConfig.TestSection.test_float, 0.1)
self.assertEqual(testConfig.TestSection.test_string, "Hello, World")
self.assertEqual(testConfig.TestSection.test_list, [1, 2, 3])
self.assertEqual(testConfig.TestSection.test_tuple, (4, 5, 6))
self.assertEqual(testConfig.TestSection.test_dict['x'], 7.0)
self.assertEqual(testConfig.TestSection.test_dict['y'], 8.0)
# set value test
testConfig.setValue("TestSection", "test_int", 99)
self.assertEqual(testConfig.TestSection.test_int, 99)
testConfig.save()
unittest.main()
|
test/binaries/foo_v1.py | drmikecrowe/cod | 405 | 12782809 | #!/usr/bin/env python3
"""
Usage: foo [OPTION]...
--foo1 useful option foo
--bar1 useful option bar
"""
import sys
if __name__ == "__main__":
print(__doc__, file=sys.stderr)
|
tests/integration/helper.py | covx/graypy_v6 | 181 | 12782811 | <filename>tests/integration/helper.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""helper functions for testing graypy with a local Graylog instance"""
from time import sleep
from uuid import uuid4
import requests
def get_unique_message():
return str(uuid4())
DEFAULT_FIELDS = [
"message",
"full_message",
"source",
"level",
"func",
"file",
"line",
"module",
"logger_name",
]
BASE_API_URL = 'http://127.0.0.1:9000/api/search/universal/relative?query=message:"{0}"&range=300&fields='
def get_graylog_response(message, fields=None):
"""Search for a given log message (with possible additional fields)
within a local Graylog instance"""
fields = fields if fields else []
tries = 0
while True:
try:
return _parse_api_response(
api_response=_get_api_response(message, fields), wanted_message=message
)
except ValueError:
sleep(2)
if tries == 5:
raise
tries += 1
def _build_api_string(message, fields):
return BASE_API_URL.format(message) + "%2C".join(set(DEFAULT_FIELDS + fields))
def _get_api_response(message, fields):
url = _build_api_string(message, fields)
api_response = requests.get(
url, auth=("admin", "admin"), headers={"accept": "application/json"}
)
return api_response
def _parse_api_response(api_response, wanted_message):
assert api_response.status_code == 200
print(api_response.json())
for message in api_response.json()["messages"]:
if message["message"]["message"] == wanted_message:
return message["message"]
raise ValueError(
"wanted_message: '{}' not within api_response: {}".format(
wanted_message, api_response
)
)
|
tests/test_installation_commands.py | figufema/TesteClone | 1,521 | 12782823 | # -*- coding: utf-8 -*-
# Copyright 2018 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the google.colab._installation_commands package."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import unittest
import IPython
from IPython.utils import io
from google.colab import load_ipython_extension
MOCKED_COMMANDS = {
'pip install pandas':
"""
Requirement already satisfied: pandas in /usr/local/lib/python2.7/dist-packages (0.22.0)
Requirement already satisfied: pytz>=2011k in /usr/local/lib/python2.7/dist-packages (from pandas) (2018.9)
Requirement already satisfied: python-dateutil in /usr/local/lib/python2.7/dist-packages (from pandas) (2.5.3)
Requirement already satisfied: numpy>=1.9.0 in /usr/local/lib/python2.7/dist-packages (from pandas) (1.16.2)
Requirement already satisfied: six>=1.5 in /usr/local/lib/python2.7/dist-packages (from python-dateutil->pandas) (1.11.0)
""",
'pip install -U numpy':
"""
Collecting numpy
Downloading https://files.pythonhosted.org/packages/c4/33/8ec8dcdb4ede5d453047bbdbd01916dbaccdb63e98bba60989718f5f0876/numpy-1.16.2-cp27-cp27mu-manylinux1_x86_64.whl (17.0MB)
100% |============================| 17.0MB 660kB/s
fastai 0.7.0 has requirement torch<0.4, but you'll have torch 1.0.1.post2 which is incompatible.
albumentations 0.1.12 has requirement imgaug<0.2.7,>=0.2.5, but you'll have imgaug 0.2.8 which is incompatible.
featuretools 0.4.1 has requirement pandas>=0.23.0, but you'll have pandas 0.22.0 which is incompatible.
Installing collected packages: numpy
Found existing installation: numpy 1.14.6
Uninstalling numpy-1.14.6:
Successfully uninstalled numpy-1.14.6
Successfully installed numpy-1.16.2
"""
}
class MockInteractiveShell(IPython.InteractiveShell):
"""Interactive shell that mocks some commands."""
def system(self, cmd):
if cmd in MOCKED_COMMANDS:
sys.stderr.write('')
sys.stdout.write(MOCKED_COMMANDS[cmd])
self.user_ns['_exit_code'] = 0
else:
return super(MockInteractiveShell, self).system(cmd)
class InstallationCommandsTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
super(InstallationCommandsTest, cls).setUpClass()
cls.ip = MockInteractiveShell()
load_ipython_extension(cls.ip)
def testPipMagicPandas(self):
output = self.run_cell('%pip install pandas')
self.assertEqual([], output.outputs)
self.assertEqual('', output.stderr)
self.assertIn('pandas', output.stdout)
def testPipMagicNumpy(self):
output = self.run_cell('%pip install -U numpy')
self.assertEqual([], output.outputs)
self.assertEqual('', output.stderr)
self.assertIn('numpy', output.stdout)
def run_cell(self, cell_contents):
with io.capture_output() as captured:
self.ip.run_cell(cell_contents)
return captured
|
sfn-log-export/src/functions/export_status_check/index.py | Domt301/serverless-patterns | 883 | 12782875 | <gh_stars>100-1000
import boto3
log_client = boto3.client('logs')
def handler(event, context):
task_id = event['taskId']
result = log_client.describe_export_tasks(taskId=task_id)
# per documentation, only one export can run at a time per account,
# therefore ensure none are running in this account
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/logs.html#CloudWatchLogs.Client.describe_export_tasks
# result = log_client.describe_export_tasks(statusCode='CANCELLED' | 'PENDING' | 'PENDING_CANCEL' | 'RUNNING')
status = 'RUNNING'
task_status = result.get('exportTasks')
if len(task_status) != 0:
task_status = task_status[0].get('status').get('code')
if task_status not in ['PENDING', 'PENDING_CANCEL', 'RUNNING']:
status = 'NOT_RUNNING'
return {"Status": status}
|
scripts/gen_chainercv_test.py | disktnk/chainer-compiler | 116 | 12782999 | <reponame>disktnk/chainer-compiler
"""Tests for ChainerCV related custom ops."""
import chainer
import chainer.functions as F
import chainer.links as L
import numpy as np
import onnx
import onnx_script
import test_case
_has_chnainercv = True
try:
import chainercv_rpn
except ImportError:
_has_chnainercv = False
def aranges(*shape):
r = np.prod(shape)
v = np.arange(r).reshape(shape).astype(np.float32)
v -= r / 2 + 0.1
return v
def _get_scales():
return (1 / 4, 1 / 8, 1 / 16, 1 / 32, 1 / 64)
def _get_hs(num_channels):
hs = []
for h, w in [(200, 272), (100, 136), (50, 68), (25, 34), (13, 17)]:
hs.append(aranges(1, num_channels, h, w))
return hs
def _get_rpn_locs_confs():
locs = []
confs = []
for i in [163200, 40800, 10200, 2550, 663]:
locs.append(aranges(1, i, 4))
confs.append(aranges(1, i))
return locs, confs
def chainercv_test_rpn_decode(test_name):
rpn = chainercv_rpn.RPN(_get_scales())
hs = _get_hs(1)
locs, confs = _get_rpn_locs_confs()
anchors = rpn.anchors(h.shape[2:] for h in hs)
in_shape = (1, 3, 800, 1088)
rois, roi_indices = rpn.decode(
[chainer.Variable(l) for l in locs],
[chainer.Variable(c) for c in confs],
anchors, in_shape)
gb = onnx_script.GraphBuilder(test_name)
hs_v = [gb.input('hs_%d' % i, h) for i, h in enumerate(hs)]
locs_v = [gb.input('loc_%d' % i, l) for i, l in enumerate(locs)]
confs_v = [gb.input('conf_%d' % i, c) for i, c in enumerate(confs)]
in_shape_v = gb.input('in_shape', np.array(in_shape))
rois_v = 'rois'
roi_indices_v = 'roi_indices'
gb.ChainerDoSomething(hs_v + locs_v + confs_v + [in_shape_v],
outputs=[rois_v, roi_indices_v],
function_name='ChainerCVRPNDecode')
gb.output(rois_v, rois)
gb.output(roi_indices_v, roi_indices)
gb.gen_test()
class TestCase(test_case.TestCase):
def __init__(self, name, func, **kwargs):
super(TestCase, self).__init__('out', name, **kwargs)
self.func = func
def get_tests():
if not _has_chnainercv:
return []
tests = []
def test(name, func, **kwargs):
tests.append(TestCase(name, func, **kwargs))
test('chainercv_test_rpn_decode', chainercv_test_rpn_decode)
return tests
|
events/migrations/0001_initial.py | Akash1S/meethub | 428 | 12783016 | # Generated by Django 2.0.4 on 2018-04-21 15:39
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50, unique=True)),
('description', models.TextField(max_length=500)),
],
),
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('post', models.TextField(max_length=500)),
('created_date', models.DateField(auto_now=True)),
('created_time', models.TimeField(auto_now=True)),
('created_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ('created_date', 'created_time'),
},
),
migrations.CreateModel(
name='Event',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('details', models.TextField(max_length=1000)),
('venue', models.CharField(max_length=50)),
('date', models.DateField(help_text='Please use the following format: <em>YYYY-MM-DD</em>.')),
('time', models.TimeField()),
('attendees', models.ManyToManyField(blank=True, related_name='attending', to=settings.AUTH_USER_MODEL)),
('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='events', to='events.Category')),
('creator', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name_plural': 'events',
'verbose_name': 'event',
},
),
migrations.AddField(
model_name='comment',
name='event',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='comments', to='events.Event'),
),
]
|
packs/reamaze/actions/article_create.py | userlocalhost2000/st2contrib | 164 | 12783028 | <gh_stars>100-1000
from lib.actions import BaseAction
class ArticleCreate(BaseAction):
def run(self, title, body, topic=None, status=0):
if topic:
topic = self._convert_slug(topic)
path = '/topics/%s/articles' % topic
else:
path = '/articles'
payload = self._create_article(title=title, body=body, status=status)
response = self._api_post(path, json=payload)
return response
def _create_article(self, title, body, status=0):
payload = {
'article': {
'title': title,
'body': body,
'status': int(status)
}
}
return payload
|
kitsune/questions/urls_api.py | AndrewDVXI/kitsune | 929 | 12783040 | from rest_framework import routers
from kitsune.questions.api import QuestionViewSet, AnswerViewSet
router = routers.SimpleRouter()
router.register(r"question", QuestionViewSet)
router.register(r"answer", AnswerViewSet)
urlpatterns = router.urls
|
desktop/core/ext-py/urllib3-1.25.8/test/appengine/test_urlfetch.py | yetsun/hue | 5,079 | 12783052 | <reponame>yetsun/hue
"""These tests ensure that when running in App Engine standard with the
App Engine sandbox enabled that urllib3 appropriately uses the App
Engine-patched version of httplib to make requests."""
import httplib
import StringIO
from mock import patch
import pytest
from ..test_no_ssl import TestWithoutSSL
class MockResponse(object):
def __init__(self, content, status_code, content_was_truncated, final_url, headers):
self.content = content
self.status_code = status_code
self.content_was_truncated = content_was_truncated
self.final_url = final_url
self.header_msg = httplib.HTTPMessage(
StringIO.StringIO(
"".join(["%s: %s\n" % (k, v) for k, v in headers.iteritems()] + ["\n"])
)
)
self.headers = headers
@pytest.mark.usefixtures("sandbox")
class TestHTTP(TestWithoutSSL):
def test_urlfetch_called_with_http(self):
"""Check that URLFetch is used to fetch non-https resources."""
resp = MockResponse(
"OK", 200, False, "http://www.google.com", {"content-type": "text/plain"}
)
fetch_patch = patch("google.appengine.api.urlfetch.fetch", return_value=resp)
with fetch_patch as fetch_mock:
import urllib3
pool = urllib3.HTTPConnectionPool("www.google.com", "80")
r = pool.request("GET", "/")
assert r.status == 200, r.data
assert fetch_mock.call_count == 1
@pytest.mark.usefixtures("sandbox")
class TestHTTPS(object):
@pytest.mark.xfail(
reason="This is not yet supported by urlfetch, presence of the ssl "
"module will bypass urlfetch."
)
def test_urlfetch_called_with_https(self):
"""
Check that URLFetch is used when fetching https resources
"""
resp = MockResponse(
"OK", 200, False, "https://www.google.com", {"content-type": "text/plain"}
)
fetch_patch = patch("google.appengine.api.urlfetch.fetch", return_value=resp)
with fetch_patch as fetch_mock:
import urllib3
pool = urllib3.HTTPSConnectionPool("www.google.com", "443")
pool.ConnectionCls = urllib3.connection.UnverifiedHTTPSConnection
r = pool.request("GET", "/")
assert r.status == 200, r.data
assert fetch_mock.call_count == 1
|
RecoEcal/EgammaClusterProducers/python/egammaRechitFilter_cfi.py | ckamtsikis/cmssw | 852 | 12783079 | <filename>RecoEcal/EgammaClusterProducers/python/egammaRechitFilter_cfi.py
import FWCore.ParameterSet.Config as cms
#
# module for filtering of rechits. user provides noise threshold in GeV units
# Author: <NAME>, University of Rome & INFN
#
rechitFilter = cms.EDProducer("RecHitFilter",
noiseEnergyThreshold = cms.double(0.08),
noiseChi2Threshold = cms.double(40),
hitCollection = cms.InputTag('EcalRecHit','EcalRecHitsEB'),
reducedHitCollection = cms.string('FilteredEcalRecHitCollection')
)
|
tools/launch_tensorboard.py | isn-dev/imagenet18 | 716 | 12783081 | <filename>tools/launch_tensorboard.py
#!/usr/bin/env python
# Usage:
# ./launch_tensorboard.py
#
# This will launch r5.large machine on AWS with tensoboard, and print URL
# in the console
import ncluster
ncluster.use_aws()
task = ncluster.make_task('tensorboard',
instance_type='r5.large',
image_name='Deep Learning AMI (Ubuntu) Version 13.0')
task.run('source activate tensorflow_p36')
task.run(f'tensorboard --logdir={task.logdir}/..', non_blocking=True)
print(f"Tensorboard at http://{task.public_ip}:6006")
|
Subsets and Splits