content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
import sys
def getVPCs(account="*", region="*", debug=False, save=False):
"""Retrieve all data on VPCs from an AWS account and optionally save
data on each to a file"""
print("Collecting VPC data...", file=sys.stderr)
vpclist = {}
for vpc in skew.scan("arn:aws:ec2:%s:%s:vpc/*" % (region, account)):
if debug:
msg = "Account: %s, VPCID: %s, Region: %s, CIDR: %s" % (
vpc._client.account_id, vpc.id, vpc._client.region_name,
vpc.data['CidrBlock'])
print(msg, file=sys.stderr)
try:
tags = vpc.data['Tags']
except KeyError:
tags = []
vpclist[vpc.id] = {"CIDR": vpc.data['CidrBlock'],
"Account": vpc._client.account_id,
"Region": vpc._client.region_name,
"Tags": tags,
"Instances":[]}
vpclist['none'] = {"Instances":[]}
print("Collected information on %s VPCs" % len(vpclist), file=sys.stderr)
return vpclist | 4bf661c5dfa437930bb2deec7e93e20fcb4b9260 | 3,652,200 |
def get_cache_timeout():
"""Returns timeout according to COOLOFF_TIME."""
cache_timeout = None
cool_off = settings.AXES_COOLOFF_TIME
if cool_off:
if isinstance(cool_off, (int, float)):
cache_timeout = timedelta(hours=cool_off).total_seconds()
else:
cache_timeout = cool_off.total_seconds()
return cache_timeout | 4804576c2017aa51edb862e75a99e9e193eb3f20 | 3,652,201 |
def find_records(dataset, search_string):
"""Retrieve records filtered on search string.
Parameters:
dataset (list): dataset to be searched
search_string (str): query string
Returns:
list: filtered list of records
"""
records = [] # empty list (accumulator pattern)
for record in dataset:
if search_string.lower() in record.lower(): # case insensitive
records.append(record) # add to new list
return records | c6cbd5c239f410a8658e62c1bbacc877eded5105 | 3,652,202 |
from pattern.en import parse, Sentence
def mood(sentence, **kwargs):
""" Returns IMPERATIVE (command), CONDITIONAL (possibility), SUBJUNCTIVE (wish) or INDICATIVE (fact).
"""
if isinstance(sentence, basestring):
try:
# A Sentence is expected but a string given.
# Attempt to parse the string on-the-fly.
sentence = Sentence(parse(sentence))
except ImportError:
pass
if imperative(sentence, **kwargs):
return IMPERATIVE
if conditional(sentence, **kwargs):
return CONDITIONAL
if subjunctive(sentence, **kwargs):
return SUBJUNCTIVE
else:
return INDICATIVE | 50d495e0a07a89c912288e93bf17a3d72cb78e87 | 3,652,203 |
def create_script(*args, **kwargs):
"""Similar to create_file() but will set permission to 777"""
mode = kwargs.pop("mode", 777)
path = create_file(*args, **kwargs)
path.chmod(mode)
return path | 24a907b8e8cdb51bd1a224283d98756985ea1030 | 3,652,204 |
import torch
def unique(x, dim=None):
"""Unique elements of x and indices of those unique elements
https://github.com/pytorch/pytorch/issues/36748#issuecomment-619514810
e.g.
unique(tensor([
[1, 2, 3],
[1, 2, 4],
[1, 2, 3],
[1, 2, 5]
]), dim=0)
=>
tensor([0, 1, 3])
"""
unique, inverse = torch.unique(
x, sorted=True, return_inverse=True, dim=dim)
perm = torch.arange(inverse.size(0), dtype=inverse.dtype,
device=inverse.device)
inverse, perm = inverse.flip([0]), perm.flip([0])
return inverse.new_empty(unique.size(0)).scatter_(0, inverse, perm) | 8282e6a176b36f75baee8a9101d3cce49f41364d | 3,652,205 |
def _convert_arg(val, name, type, errmsg=None):
""" Convert a Python value in CPO and check its value
Args:
val: Value to convert
name: Argument name
type: Expected type
errmsg: Optional error message
"""
val = build_cpo_expr(val)
assert val.is_kind_of(type), errmsg if errmsg is not None else "Argument '{}' should be a {}".format(name, type.get_public_name())
return val | cf70cc4ec9247ee9941cb715f99180b34f5b378f | 3,652,206 |
import json
def into_json(struct):
"""
Transforms a named tuple into a json object in a nice way.
"""
return json.dumps(_compile(struct), indent=2) | 23470d184caff5540760782c8a1eee1589a61026 | 3,652,207 |
import random
def generate_integer_seeds(signature):
"""Generates a set of seeds. Each seed
is supposed to be included as a file for AFL.
**NOTE** that this method assumes that the signature
only have int types. If a non int type is found,
None is returned.
Param:
(str) signature: fuzzable method signature
Return:
(list) of seeds to include in the analysis
"""
primitives = {'int', 'float', 'long', 'double'}
sm = SootMethod(signature)
param_types = sm.get_parameter_types()
#for param_type in param_types:
#if param_type not in primitives:
# return None
# seeds = itertools.product(_generate_integer_parameter_seed(),
# repeat=len(param_types))
seed = [str(random.randint(-2048, 2048)) for _ in param_types]
return ["\x07".join(seed)]
#return list(map(lambda x: "\x07".join(x), seeds)) | c058f730a6bd41d66ca64e3bfeffc5f4f65c2f60 | 3,652,208 |
from typing import Tuple
def adjust_time(hour: int, minute: int) -> Tuple[int, int]:
"""Adjust time from sunset using offset in config.
Returns:
Tuple[int, int]: (hour, minute) of the adjusted time
"""
today = pendulum.today().at(hour, minute)
today = today.add(hours=config.OFFSET_H, minutes=config.OFFSET_M)
hour = today.hour
minute = today.minute
message = f'Scripts will run at around {hour:02}:{minute:02}'
config.LOGGER.info(message)
return (today.hour, today.minute) | bbd3b170976df934a541db91d728cb68420d0a4b | 3,652,209 |
import os
import random
def read_data_unet(path_images, path_masks, img_dims, norm, stretch, shuffle):
"""
load, crop and normalize image data
option to select images form a folder
:param image_path: path of the folder containing the images [string]
:param mask_path: path of the folder containing the masks [string]
:param scans_to_use: list of 4-zero-padded sequence numbers of the scans to be used in the current training [list of strings]
:param img_dims: [dict]
:param norm: is normalization to the range of [0, 1] required [bool]
:param stretch: is contrast stretch to the range of [0, 255] required [bool]
:return: list - array of normalized depth maps [array of numpy arrays]
"""
images = [] # array of normalized multidiemnsional distance maps (frequencies: 20 - 120MHz)
masks = []
im_names = [fname for fname in os.listdir(path_images) if fname[-4:] == '.jpg'] # image and corresponding mask have the same filename
mask_names = [fname for fname in os.listdir(path_masks) if fname[-4:] == '.jpg'] # image and corresponding mask have the same filename
for ind, im_name in enumerate(im_names):
# load image
im = cv2.imread(os.path.join(path_images, im_name), 0)
if stretch:
im = cv2.normalize(im, im, 0, 255, cv2.NORM_MINMAX)
images.append(im)
# load mask
mask = cv2.imread(os.path.join(path_masks, mask_names[ind]), 0)
masks.append(mask)
if len(images) == 0:
print("No images were read.")
exit(101)
if shuffle:
data = list(zip(images, masks))
random.shuffle(data)
images, masks = zip(*data)
# convert data to ndarrays
images = np.array(images)
images = np.reshape(images, (len(images), img_dims['rows'], img_dims['cols'], img_dims['depth']))
masks = np.array(masks)
masks = np.reshape(masks, (len(masks), img_dims['rows'], img_dims['cols'], 1))
# binarize masks
masks[masks > 0] = 1
masks[masks <= 0] = 0
if norm:
images = images / 255.0
return images, masks | 52cc287c3a1f11414a6393a4aaa91e979edfa7a6 | 3,652,210 |
from functools import reduce
def flatten(l):
"""Flatten 2 list
"""
return reduce(lambda x, y: list(x) + list(y), l, []) | 85b4fad4ef0326304c1ee44714d8132841d13b16 | 3,652,211 |
def _rep_t_s(byte4):
"""
合成置换T', 由非线性变换和线性变换L'复合而成
"""
# 非线性变换
b_array = _non_linear_map(_byte_unpack(byte4))
# 线性变换L'
return _linear_map_s(_byte_pack(b_array)) | d82db42a848cdc84ba042b61a2825113feab5078 | 3,652,212 |
def get_mapping():
"""
Returns a dictionary with the mapping of Spacy dependency labels to a numeric value, spacy dependency annotations
can be found here https://spacy.io/api/annotation
:return: dictionary
"""
keys = ['acl', 'acomp', 'advcl', 'advmod', 'agent', 'amod', 'appos', 'attr', 'aux', 'auxpass', 'case', 'cc', 'ccomp',
'clf', 'compound', 'conj', 'cop', 'csubj', 'csubjpass', 'dative', 'dep', 'det', 'discourse', 'dislocated',
'dobj', 'expl', 'fixed', 'flat', 'goeswith', 'iobj', 'intj', 'list', 'mark',
'meta', 'neg', 'nn', 'nmod', 'nounmod', 'npadvmod', 'npmod', 'nsubj', 'nsubjpass', 'nummod', 'oprd', 'obj', 'obl',
'orphan', 'parataxis', 'pcomp', 'pobj', 'poss', 'preconj', 'predet', 'prep', 'prt', 'punct', 'quantmod', 'relcl',
'reparandum', 'root', 'vocative', 'xcomp', '']
values = list(range(2, len(keys) + 2))
assert len(keys) == len(values)
return dict(zip(keys, values)) | 164d2292bdd573a5805f9a66f685d21aac92061e | 3,652,213 |
def create_root_folder(path: str, name: str) -> int:
"""
Creates a root folder if folder not in database.
Fetches id if folder already in database.
Handles paths with both slash and backslash as separator.
:param path: The path to the folder in the users file system.
:param name: The name of the folder.
:return: The id of the folder.
"""
path = path.replace('\\', '/')
f = Folder.objects.get_or_create(path=path, name=name)[0]
return f.id | 54ab207501490b7f154111d6091e9a04b59603c9 | 3,652,214 |
import logging
def get_document_term_matrix(all_documents):
""" Counts word occurrences by document. Then transform it into a
document-term matrix (dtm).
Returns a Tf-idf matrix, first count word occurrences by document.
This is transformed into a document-term matrix (dtm). This is also
just called a term frequency matrix.
:param all_documents:
:return:
"""
tfidf_vectorizer = TfidfVectorizer(stop_words='english',
ngram_range=(1, 3))
tfidf_matrix = tfidf_vectorizer.fit_transform(all_documents)
terms = tfidf_vectorizer.get_feature_names()
logging.info('Total terms found: %d' % len(terms))
logging.info('(TFM/DTM) Matrix size: %s' % (tfidf_matrix.shape,))
return terms, tfidf_matrix | b43cd43d68f48c0296a37668c5f48375ac7e38df | 3,652,215 |
def f_raw2(coordinate, packedParams):
"""
The raw function call, performs no checks on valid parameters..
:return:
"""
gaussParams = packedParams['pack']
res = 0
for p in gaussParams:
res += gaussian_2d.f_noravel(coordinate, *p)
return res | 2071e4d6c227975b308ea717658d0bf1af567fca | 3,652,216 |
def example_serving_input_fn():
"""Build the serving inputs."""
example_bytestring = tf.placeholder(
shape=[None],
dtype=tf.string,
)
features = tf.parse_example(
example_bytestring,
tf.feature_column.make_parse_example_spec(INPUT_COLUMNS))
return tf.estimator.export.ServingInputReceiver(
features, {'example_proto': example_bytestring}) | d94f7add7ba7a549263a85ef4d21cd78bae298ca | 3,652,217 |
def add_file_uri_to_path(filepath):
"""Add the file uri preix: "file://" to the beginning of a path"""
if not filepath:
return False, "The filepath must be specified"
if filepath.lower().startswith(FILE_URI_PREFIX):
#
#
return True, filepath
updated_fpath = '%s%s' % (FILE_URI_PREFIX, filepath)
return True, updated_fpath | dc0640f6101b92a8623e360ac6782b3b23b6e45d | 3,652,218 |
def np_slope_diff_spacing(z, xspace, yspace):
"""
https://github.com/UP-RS-ESP/TopoMetricUncertainty/blob/master/uncertainty.py
Provides slope in degrees.
"""
dy, dx = np.gradient(z, xspace, yspace)
return np.arctan(np.sqrt(dx*dx+dy*dy))*180/np.pi | 45527de535339a4cae17694dbf5313fb57a02bef | 3,652,219 |
import torch
def build_dist(
cfg, feat_1, feat_2=None, dist_m=None, verbose=False,
):
"""Computes distance.
Args:
input1 (torch.Tensor): 2-D feature matrix.
input2 (torch.Tensor): 2-D feature matrix. (optional)
Returns:
numpy.ndarray: distance matrix.
"""
if dist_m is None:
dist_m = cfg.dist_metric
if dist_m == "euclidean":
if feat_2 is not None:
return compute_euclidean_distance(feat_1, feat_2, cfg.dist_cuda)
else:
return compute_euclidean_distance(feat_1, feat_1, cfg.dist_cuda)
elif dist_m == "cosine":
if feat_2 is not None:
return compute_cosine_distance(feat_1, feat_2, cfg.dist_cuda)
else:
return compute_cosine_distance(feat_1, feat_1, cfg.dist_cuda)
elif dist_m == "jaccard":
if feat_2 is not None:
feat = torch.cat((feat_1, feat_2), dim=0)
else:
feat = feat_1
dist = compute_jaccard_distance(
feat, k1=cfg.k1, k2=cfg.k2, search_option=cfg.search_type, verbose=verbose,
)
if feat_2 is not None:
return dist[: feat_1.size(0), feat_1.size(0) :]
else:
return dist
else:
assert "Unknown distance metric: {}".format(dist_m) | 2022386bf74939f9eb7039af8d67f718121062a0 | 3,652,220 |
def call_inverse_cic_single_omp(img_in,yc1,yc2,yi1,yi2,dsi):
"""
Input:
img_in: Magnification Map
yc1, yc2: Lens position
yi1, yi2: Source position
dsi: pixel size on grid
"""
ny1,ny2 = np.shape(img_in)
img_in = np.array(img_in,dtype=ct.c_float)
yi1 = np.array(yi1,dtype=ct.c_float)
yi2 = np.array(yi2,dtype=ct.c_float)
nlimgs = len(yi1)
img_out = np.zeros((nlimgs),dtype=ct.c_float)
rtf.inverse_cic_omp_single(img_in,yi1,yi2,ct.c_float(yc1),ct.c_float(yc2),ct.c_float(dsi),ct.c_int(ny1),ct.c_int(ny2),ct.c_int(nlimgs),img_out)
return img_out | 286c650117e9c3df17bc081e3d9ddbe47755e010 | 3,652,221 |
def filter_spans(spans):
"""Filter a sequence of spans and remove duplicates or overlaps. Useful for
creating named entities (where one token can only be part of one entity) or
when merging spans with `Retokenizer.merge`. When spans overlap, the (first)
longest span is preferred over shorter spans.
spans (iterable): The spans to filter.
RETURNS (list): The filtered spans.
"""
get_sort_key = lambda span: (span.end - span.start, -span.start)
sorted_spans = sorted(spans, key=get_sort_key, reverse=True)
result = []
seen_tokens = set()
for span in sorted_spans:
# Check for end - 1 here because boundaries are inclusive
if span.start not in seen_tokens and span.end - 1 not in seen_tokens:
result.append(span)
seen_tokens.update(range(span.start, span.end))
result = sorted(result, key=lambda span: span.start)
return result | 3b15a79b14f02ffa870b94eb9b61261c4befc0eb | 3,652,222 |
def get_col_names_for_tick(tick='BCHARTS/BITSTAMPUSD'):
"""
Return the columns available for the tick. Startdate is late by default to avoid getting much data
"""
return quandl.get(tick, start_date=None).columns | d80ae9b93b4bb817306f8ade404a083126507802 | 3,652,223 |
import os
def check_if_needs_inversion(tomodir):
"""check of we need to run CRTomo in a given tomodir
Parameters
----------
tomodir : str
Tomodir to check
Returns
-------
needs_inversion : bool
True if not finished yet
"""
required_files = (
'grid' + os.sep + 'elem.dat',
'grid' + os.sep + 'elec.dat',
'exe' + os.sep + 'crtomo.cfg',
)
needs_inversion = True
for filename in required_files:
if not os.path.isfile(tomodir + os.sep + filename):
needs_inversion = False
# check for crmod OR modeling capabilities
if not os.path.isfile(tomodir + os.sep + 'mod' + os.sep + 'volt.dat'):
if not check_if_needs_modeling(tomodir):
print('no volt.dat and no modeling possible')
needs_inversion = False
# check if finished
inv_ctr_file = tomodir + os.sep + 'inv' + os.sep + 'inv.ctr'
if os.path.isfile(inv_ctr_file):
inv_lines = open(inv_ctr_file, 'r').readlines()
print('inv_lines', inv_lines[-1])
if inv_lines[-1].startswith('***finished***'):
needs_inversion = False
return needs_inversion | 8ba837eb2295f7cc3a5698e2877a310f17b805c5 | 3,652,224 |
def check_result(request):
"""
通过任务id查询任务结果
:param request:
:param task_id:
:return:
"""
task_id = request.data.get('task_id')
if task_id is None:
return Response({'message': '缺少task_id'}, status=status.HTTP_400_BAD_REQUEST)
res = AsyncResult(task_id)
if res.ready(): # 检查指定任务是否已经完成
if res.successful():
return Response(res.result) # res.result为任务函数的返回值,即任务结果
return Response({'message': '任务运行错误:{}'.format(res.result)})
return Response({'message': '任务运行中,稍后查看...'}) | b43001fb598188f030a58166ab445a919b9b1d4e | 3,652,225 |
def get_debug_device():
"""Get the profile to debug from the RIVALCFG_DEVICE environment variable,
if any.
This device should be selected as the one where the commands will be
written, regardless of the selected profile. This is usefull to debug a
mouse that have the same command set than an other one but with a different
product_id.
If the RIVALCFG_PROFILE is defined but the RIVALCFG_DEVICE is not, this
function returns the same output that get_debug_profile()."""
mouse_id = _get_mouse_id_from_env("RIVALCFG_DEVICE")
if mouse_id:
return mouse_id
return _get_mouse_id_from_env("RIVALCFG_PROFILE") | 82e4fce316568df89537d41691f475ef58b5a8a1 | 3,652,226 |
from typing import Tuple
def _remove_node(node: int, meta: IntArray, orig_dest: IntArray) -> Tuple[int, int]:
"""
Parameters
----------
node : int
ID of the node to remove
meta : ndarray
Array with rows containing node, count, and address where
address is used to find the first occurrence in orig_desk
orig_dest : ndarray
Array with rows containing origin and destination nodes
Returns
-------
next_node : int
ID of the next node in the branch
next_count : int
Count of the next node in the branch
Notes
-----
Node has 1 link, so:
1. Remove the forward link
2. Remove the backward link
3. Decrement node's count
4. Decrement next_node's count
"""
# 3. Decrement
meta[node, 1] -= 1
# 1. Remove forewrd link
next_offset = meta[node, 2]
orig, next_node = orig_dest[next_offset]
while next_node == -1:
# Increment since this could have been previously deleted
next_offset += 1
next_orig, next_node = orig_dest[next_offset]
assert orig == next_orig
# 4. Remove next_node's link
orig_dest[next_offset, 1] = -1
# 2. Remove the backward link
# Set reverse to -1
reverse_offset = meta[next_node, 2]
reverse_node = orig_dest[reverse_offset, 1]
while reverse_node != orig:
reverse_offset += 1
reverse_node = orig_dest[reverse_offset, 1]
orig_dest[reverse_offset, 1] = -1
# Step forward
meta[next_node, 1] -= 1
next_count = meta[next_node, 1]
return next_node, next_count | 1112fc1907e3668f5c2d559d78ee1deba8583754 | 3,652,227 |
def forward_many_to_many_without_pr(request):
"""
Return all the stores with associated books, without using prefetch_related.
100ms overall
8ms on queries
11 queries
1 query to fetch all stores:
SELECT "bookstore_store"."id",
"bookstore_store"."name"
FROM "bookstore_store"
10 separate query to fetch books of each store:
SELECT "bookstore_book"."id",
"bookstore_book"."name",
"bookstore_book"."price",
"bookstore_book"."publisher_id"
FROM "bookstore_book"
INNER JOIN "bookstore_bookinstore" ON ("bookstore_book"."id" = "bookstore_bookinstore"."book_id")
WHERE "bookstore_bookinstore"."store_id" = 1
"""
qs = Store.objects.all()
stores = []
for store in qs:
books = [{'id': book.id, 'name': book.name} for book in store.books.all()]
stores.append({'id': store.id, 'name': store.name, 'books': books})
return Response(stores) | c67e3af971c67e37f4221147518a054ee49f19a2 | 3,652,228 |
import itertools
def generate_combinations (n, rlist):
""" from n choose r elements """
combs = [list(itertools.combinations(n, r)) for r in rlist]
combs = [item for sublist in combs for item in sublist]
return combs | 7339b61a7ea76813c8356cf4a87b1e81b67ce10e | 3,652,229 |
import six
import traceback
def conv_datetime(dt, version=2):
"""Converts dt to string like
version 1 = 2014:12:15-00:00:00
version 2 = 2014/12/15 00:00:00
version 3 = 2014/12/15 00:00:00
"""
try:
if isinstance(dt, six.string_types):
if _HAS_PANDAS:
dt = pd.to_datetime(dt)
fmt = DATE_FORMATS[int(version)]
return dt.strftime(fmt)
except (ValueError, TypeError):
logger.error(traceback.format_exc())
logger.warning("conv_datetime returns %s" % dt)
return dt | 19999cd2517832e06949fbb0939d3b622605d047 | 3,652,230 |
def get_description_value(name_of_file):
"""
:param name_of_file: Source file for function.
:return: Description value for particular CVE.
"""
line = name_of_file.readline()
while 'value" :' not in line:
line = name_of_file.readline()
tmp_list = line.split(':')
if len(tmp_list) == 2:
value = tmp_list[1][:]
return value
else:
# When description value contains ":" too.
concatenation = ""
for i in range(1, len(tmp_list)-1):
concatenation = concatenation + tmp_list[i] + ":"
concatenation = concatenation + tmp_list[-1]
return concatenation | a7b0915feff7fcd2a175bdb8fe9af48d0d9f14d7 | 3,652,231 |
def octaves(p, fs, density=False,
frequencies=NOMINAL_OCTAVE_CENTER_FREQUENCIES,
ref=REFERENCE_PRESSURE):
"""Calculate level per 1/1-octave in frequency domain using the FFT.
:param x: Instantaneous signal :math:`x(t)`.
:param fs: Sample frequency.
:param density: Power density instead of power.
:param frequencies: Frequencies.
:param ref: Reference value.
:returns: Tuple. First element is an instance of :class:`OctaveBand`. The second element an array.
.. note:: Based on power spectrum (FFT)
.. seealso:: :attr:`acoustics.bands.OCTAVE_CENTER_FREQUENCIES`
.. note:: Exact center frequencies are always calculated.
"""
fob = OctaveBand(center=frequencies, fraction=1)
f, p = power_spectrum(p, fs)
fnb = EqualBand(f)
power = integrate_bands(p, fnb, fob)
if density:
power /= (fob.bandwidth/fnb.bandwidth)
level = 10.0*np.log10(power / ref**2.0)
return fob, level | 98e1604e95bdcb72fa6d401fee020b973f1f19d4 | 3,652,232 |
def build_embed(**kwargs):
"""Creates a discord embed object."""
return create_embed(**kwargs) | b1a181de866cd78959c5b4bec8e14b30e8cd99d7 | 3,652,233 |
def VAMA(data, period=8, column='close'):
"""
Volume Adjusted Moving Average
:param pd.DataFrame data: pandas DataFrame with open, high, low, close data
:param int period: period used for indicator calculation
:param str column: column used for indicator calculation (default = "close")
:param pd.DataFrame data: pandas DataFrame with open, high, low, close data
"""
return TA.VAMA(data, period, column) | 5133aae3b33e0f8ef3298790a840dd3dcb6053ce | 3,652,234 |
def link_syscall(oldpath, newpath):
"""
http://linux.die.net/man/2/link
"""
# lock to prevent things from changing while we look this up...
filesystemmetadatalock.acquire(True)
# ... but always release it...
try:
trueoldpath = _get_absolute_path(oldpath)
# is the old path there?
if trueoldpath not in fastinodelookuptable:
raise SyscallError("link_syscall","ENOENT","Old path does not exist.")
oldinode = fastinodelookuptable[trueoldpath]
# is oldpath a directory?
if IS_DIR(filesystemmetadata['inodetable'][oldinode]['mode']):
raise SyscallError("link_syscall","EPERM","Old path is a directory.")
# TODO: I should check permissions...
# okay, the old path info seems fine...
truenewpath = _get_absolute_path(newpath)
# does the newpath exist? It shouldn't
if truenewpath in fastinodelookuptable:
raise SyscallError("link_syscall","EEXIST","newpath already exists.")
# okay, it doesn't exist (great!). Does it's parent exist and is it a
# dir?
truenewparentpath = _get_absolute_parent_path(newpath)
if truenewparentpath not in fastinodelookuptable:
raise SyscallError("link_syscall","ENOENT","New path does not exist.")
newparentinode = fastinodelookuptable[truenewparentpath]
if not IS_DIR(filesystemmetadata['inodetable'][newparentinode]['mode']):
raise SyscallError("link_syscall","ENOTDIR","New path's parent is not a directory.")
# TODO: I should check permissions...
# okay, great!!! We're ready to go! Let's make the file...
newfilename = truenewpath.split('/')[-1]
# first, make the directory entry...
filesystemmetadata['inodetable'][newparentinode]['filename_to_inode_dict'][newfilename] = oldinode
# increment the link count on the dir...
filesystemmetadata['inodetable'][newparentinode]['linkcount'] += 1
# ... and the file itself
filesystemmetadata['inodetable'][oldinode]['linkcount'] += 1
# finally, update the fastinodelookuptable and return success!!!
fastinodelookuptable[truenewpath] = oldinode
return 0
finally:
persist_metadata(METADATAFILENAME)
filesystemmetadatalock.release() | ee2964262288f518aab830ae84f777a18044af0a | 3,652,235 |
def doc_vector(text, stop_words, model):
"""
计算文档向量,句子向量求平均
:param text: 需要计算的文档
:param stop_words: 停用词表
:param model: 词向量模型
:return: 文档向量
"""
sen_list = get_sentences(text)
sen_list = [x[1] for x in sen_list]
vector = np.zeros(100, )
length = len(sen_list)
for sentence in sen_list:
sen_vec = sentence_vector(sentence, stop_words, model)
vector += sen_vec
return vector / length | 73652ced2cd8a5fdf264d68be9b78969dfe2c78d | 3,652,236 |
def get_ether_pkt(src, dst, ethertype=ether.ETH_TYPE_IP):
"""Creates a Ether packet"""
return ethernet.ethernet(src=src, dst=dst, ethertype=ethertype) | 94c778e16f680e73a04591e3808fa5d07c8c5afa | 3,652,237 |
import inspect
def _resolve_dependency(param: inspect.Parameter, class_obj: Instantiable , app: App):
"""
Try to get the instance of a parameter from a bound custom resolver for the class which needs it.
if not able to do the above, try to get a registered binding for the parameter's Annotation.
if no binding is registered for the Annotation, get the default value of the parameter
if the default value is empty, try to instantiate the param's Annnotation class
"""
class_context = app.get_custom_resolver(class_obj)
custom_resolver = (class_context.get(param.name)
or class_context.get(param.annotation)) if class_context else None
resolved = custom_resolver() if callable(custom_resolver) else custom_resolver
if resolved:
return resolved
default = param.default
binding = _get_binding(param.annotation, app) or default
if binding == inspect._empty:
if param.annotation != inspect._empty and _is_not_primitive(param.annotation):
annotation_params = _get_init_params(param.annotation)
binding = init_class(param.annotation, app, annotation_params)
else:
raise BindingResolutionException(
f'Cannot resolve param {param.name} of class {class_obj}'
)
return binding() if callable(binding) else binding | 6bbe3f6fd74d037584ab96c761503a2b47cc901a | 3,652,238 |
import re
def parse_compute_hosts(compute_hosts):
""" Transform a coma-separated list of host names into a list.
:param compute_hosts: A coma-separated list of host names.
:type compute_hosts: str
:return: A list of host names.
:rtype: list(str)
"""
return filter(None, re.split('[^a-zA-Z0-9\-_]+', compute_hosts)) | efbfe7b06290b90d044e89343bfd4ecf94733d9c | 3,652,239 |
def get_bbox(mask_frame):
"""
get rectangular bounding box for irregular roi
Args:
mask_frame (np.ndarray): the frame containing the mask
Returns:
bbox (np.ndarray): numpy array containing the indexes of the bounding box
"""
bbox = np.zeros(4)
bbox[0] = np.min(np.where(np.max(mask_frame, axis=0))) # x top left
bbox[1] = np.min(np.where(np.max(mask_frame, axis=1))) # y top left
bbox[2] = np.max(np.where(np.max(mask_frame, axis=0))) - bbox[0] # x size
bbox[3] = np.max(np.where(np.max(mask_frame, axis=1))) - bbox[1] # y size
bbox = np.int64(bbox)
return bbox | e7cc63cc8b5dfa1e4d59b0696f6e9747e33f69bc | 3,652,240 |
def view_create_log_entry_verbose(request):
"""Create a new BehavioralLogEntry. Return the JSON version of the entry"""
return view_create_log_entry(request, is_verbose=True) | d461f9f9893756f8e45aa6578e6fb9625957b415 | 3,652,241 |
import json
import queue
import time
def webhook_server_factory(free_port):
"""For making a server that can accept Onshape webhooks."""
servers = []
threads = []
def _webhook_server_factory():
""" Create a factory to handle webhook notifications coming in.
:param on_recieved: function callback to handle the json response from the webhook.
:return: HTTPServer: server
"""
class myHandler(BaseHTTPRequestHandler):
def __init__(self, request, client_address, server):
super().__init__(request, client_address, server)
# Holds function that deals with the request.
self.on_recieved = None
def do_POST(self):
content_length = int(self.headers["Content-Length"])
body = self.rfile.read(content_length)
unquoted_s = body.decode("utf-8")
data = json.loads(unquoted_s)
server._message_q.put(data)
# Always return a 200 response to indicate it was gotten
self.send_response(200)
self.end_headers()
def do_GET(self):
raise NotImplementedError()
class WebhookServer(HTTPServer):
def __init__(
self, server_address, RequestHandlerClass, bind_and_activate=True
):
super().__init__(server_address, RequestHandlerClass, bind_and_activate)
self._tunneled_url = None
# Used to communicate to the main thread that a message has been recieved and needs to be processed.
# The main thread is responsible for popping messages off as they are processed.
self._message_q = queue.Queue()
@property
def url(self):
return f"http://localhost:{self.server_port}"
@property
def tunneled_url(self):
if not self._tunneled_url:
self._tunneled_url = tunnel(self.server_port)
return self._tunneled_url
def wait_for_message(
self, message_predicate=lambda m: True, seconds_to_wait=5
):
""" Block progress until a certain message is received that satisfies the passed message_predicate
:param message_predicate: blocking function that takes the message and returns True
if it is the 'right one'.
:param seconds_to_wait: seconds to wait for the message. This will throw a StopIteration
if the time runs out without a valid message.
"""
start_time = time.time()
poll_time = seconds_to_wait / 100
while True:
try:
message = self._message_q.get(timeout=poll_time)
if message_predicate(message):
return
self._message_q.task_done()
except queue.Empty:
pass
if time.time() - start_time > seconds_to_wait:
raise TimeoutError()
server = WebhookServer(("localhost", free_port), myHandler)
servers.append(server)
thread = Thread(target=server.serve_forever)
thread.start()
threads.append(thread)
return server
yield _webhook_server_factory
for server, thread in zip(servers, threads):
server.shutdown()
thread.join() | 66a96cdf0226a2e164492d229bd50e675c7ac667 | 3,652,242 |
def generator_validator(file_path: Text):
"""
Validates that the generator module exists and has all the required
methods
"""
if not exists(file_path):
raise ArgumentTypeError(f"File {file_path} could not be found")
try:
module = import_file("generator", file_path)
except SyntaxError:
doing.logger.exception("Syntax error in generator")
raise ArgumentTypeError(f"File {file_path} has a syntax error")
except ImportError:
raise ArgumentTypeError(f"File {file_path} cannot be imported")
except Exception:
doing.logger.exception("Unknown error while importing generator")
raise ArgumentTypeError(f"Unknown error")
if not hasattr(module, "get_source") or not callable(module.get_source):
raise ArgumentTypeError(
f"Generator does not expose a get_source(environment_name) method"
)
if not hasattr(module, "allow_transfer") or not callable(module.allow_transfer):
raise ArgumentTypeError(
f"Generator does not expose a allow_transfer(origin, target) method"
)
if not hasattr(module, "get_backup_dir") or not callable(module.get_backup_dir):
raise ArgumentTypeError(
f"Generator does not expose a get_backup_dir(environment) method"
)
if not hasattr(module, "get_patch") or not callable(module.get_backup_dir):
raise ArgumentTypeError(
f"Generator does not expose a get_patch(origin, target) method"
)
if not hasattr(module, "get_wp_config") or not callable(module.get_backup_dir):
raise ArgumentTypeError(
f"Generator does not expose a get_wp_config(environment) method"
)
return module | 4a1f42198ad8421bcbb333d1bc5cfd1fc6ef0b3e | 3,652,243 |
from typing import Optional
from typing import List
import gzip
import google
def from_bytes(
data: bytes, idx_list: Optional[List[int]] = None, compression: Optional[str] = "gz"
) -> List[ProgramGraph]:
"""Deserialize Program Graphs from a byte array.
:param data: The serialized Program Graphs.
:param idx_list: A zero-based list of graph indices to return. If not
provided, all graphs are returned.
:param compression: Either :code:`gz` for GZip compression (the default), or
:code:`None` for no compression. Compression increases the cost of
serializing and deserializing but can greatly reduce the size of the
serialized graphs.
:return: A list of Program Graphs.
:raise GraphCreationError: If deserialization fails.
"""
decompressors = {
"gz": gzip.decompress,
None: lambda d: d,
}
if compression not in decompressors:
decompressors = ", ".join(sorted(str(x) for x in decompressors))
raise TypeError(
f"Invalid compression argument: {compression}. "
f"Supported compressions: {decompressors}"
)
decompress = decompressors[compression]
graph_list = ProgramGraphList()
try:
graph_list.ParseFromString(decompress(data))
except (gzip.BadGzipFile, google.protobuf.message.DecodeError) as e:
raise GraphCreationError(str(e)) from e
if idx_list:
return [graph_list.graph[i] for i in idx_list]
return list(graph_list.graph) | 3d8e0291af3b283616eef06ac1e8161b248bb4e6 | 3,652,244 |
def runTimer(t):
"""t is timer time in milliseconds"""
blinkrow = 500 #in milliseconds
eachrow = t // 5
for i in range(5):
for _ in range(eachrow//(2*blinkrow)):
display.show(Image(hourglassImages[i+1]))
sleep(blinkrow)
display.show(Image(hourglassImages[i]))
sleep(blinkrow)
if button_b.was_pressed():
return()
display.show(Image.HAPPY)
return() | b3d0fa8c5ac2c179b206a1679e9951e91d9c8b44 | 3,652,245 |
import math
def ascMoveFormula(note_distance, finger_distance, n1, n2, f1, f2):
"""This is for situations where direction of notes and fingers are opposite,
because either way, you want to add the distance between the fingers.
"""
# The math.ceil part is so it really hits a value in our moveHash.
# This could be fixed if I put more resolution into the moveHash
total_distance = math.ceil(note_distance + finger_distance);
# This adds a small amount for every additional halfstep over 24. Fairly
# representative of what it should be.
if total_distance > 24:
return MOVE_HASH[24] + (total_distance - 24) / 5;
else:
cost = MOVE_HASH[total_distance];
cost += colorRules(n1, n2, f1, f2, finger_distance)
return cost | a9f4ef27f1a922eea1f9ec4a400d120e6b0c892b | 3,652,246 |
def create_stomp_connection(garden: Garden) -> Connection:
"""Create a stomp connection wrapper for a garden
Constructs a stomp connection wrapper from the garden's stomp connection parameters.
Will ignore subscribe_destination as the router shouldn't be subscribing to
anything.
Args:
garden: The garden specifying
Returns:
The created connection wrapper
"""
connection_params = garden.connection_params.get("stomp", {})
connection_params = deepcopy(connection_params)
connection_params["subscribe_destination"] = None
return Connection(**connection_params) | 862d6a6862246a02b132313bbf9ab8f62d62a3be | 3,652,247 |
def W_n(S, n_vals, L_vals, J_vals):
""" Field-free energy. Includes extra correction terms.
-- atomic units --
"""
neff = n_vals - get_qd(S, n_vals, L_vals, J_vals)
energy = np.array([])
for i, n in enumerate(n_vals):
en = -0.5 * (neff[i]**-2.0 - 3.0 * alpha**2.0 / (4.0 * n**4.0) + \
mu_M**2.0 * ((1.0 + (5.0 / 6.0) * (alpha * Z)**2.0)/ n**2.0))
energy = np.append(energy, en)
return energy | 2a4aa2ab0c6a7d935547ac18933d835848c45d5e | 3,652,248 |
def obter_movimento_manual(tab, peca): # tabuleiro x peca -> tuplo de posicoes
"""
Recebe uma peca e um tabuleiro e um movimento/posicao introduzidos
manualmente, dependendo na fase em que esta o programa.
Na fase de colocacao, recebe uma string com uma posicao.
Na fase de movimentacao, recebe uma string com duas posicoes.
:param tab: tabuleiro
:param peca: peca
:return: tuplo com posicoes
"""
linhas = obter_str_linhas()
colunas = obter_str_colunas()
if len(obter_posicoes_jogador(tab, peca)) < 3: # fase de colocacao
pos = str(input('Turno do jogador. Escolha uma posicao: '))
if len(pos) == 2 and pos[0] in colunas and pos[1] in linhas:
pos = cria_posicao(pos[0], pos[1])
if eh_posicao_livre(tab, pos):
return pos,
if len(obter_posicoes_jogador(tab, peca)) == 3: # fase de movimentacao
pos = str(input('Turno do jogador. Escolha um movimento: '))
if len(pos) == 4 and pos[0] in colunas and pos[1] in linhas \
and pos[2] in colunas and pos[3] in linhas:
pos1 = cria_posicao(pos[0], pos[1])
pos2 = cria_posicao(pos[2], pos[3])
if obter_peca(tab, pos1) == peca:
if eh_posicao_livre(tab, pos2):
if pos2 in obter_posicoes_adjacentes(pos1):
return pos1, pos2
if posicoes_iguais(pos1, pos2) and \
len(obter_pos_adj_livres(tab, pos1)) == 0:
return pos1, pos2
raise ValueError('obter_movimento_manual: escolha invalida') | 2ac683c047a7852d56b228c11663007b9cdd7a33 | 3,652,249 |
import base64
def parse_header(req_header, taint_value):
"""
从header头中解析污点的位置
"""
header_raw = base64.b64decode(req_header).decode('utf-8').split('\n')
for header in header_raw:
_header_list = header.split(':')
_header_name = _header_list[0]
_header_value = ':'.join(_header_list[1:])
if equals(taint_value, _header_value):
return _header_name | f0ffdf7a823bd07e0648970a7d0791e44ad9c4a9 | 3,652,250 |
from typing import Union
from typing import List
from pathlib import Path
import os
def filter_and_sort_files(
fnames: Union[str, List[str]], return_matches: bool = False
):
"""Find all timestamped data files and sort them by their timestamps"""
if isinstance(fnames, (Path, str)):
fnames = os.listdir(fnames)
# use the timestamps from all valid timestamped
# filenames to sort the files as the first index
# in a tuple
matches = zip(map(fname_re.search, map(str, fnames)), fnames)
tups = [(m.group("t0"), f, m) for m, f in matches if m is not None]
# if return_matches is True, return the match object,
# otherwise just return the raw filename
return_idx = 2 if return_matches else 1
return [t[return_idx] for t in sorted(tups)] | 4670a5a88d257accb42e9feda868be6c6935427f | 3,652,251 |
def fake_batch(obs_space, action_space, batch_size=1):
"""Create a fake SampleBatch compatible with Policy.learn_on_batch."""
samples = {
SampleBatch.CUR_OBS: fake_space_samples(obs_space, batch_size),
SampleBatch.ACTIONS: fake_space_samples(action_space, batch_size),
SampleBatch.REWARDS: np.random.randn(batch_size).astype(np.float32),
SampleBatch.NEXT_OBS: fake_space_samples(obs_space, batch_size),
SampleBatch.DONES: np.random.randn(batch_size) > 0,
}
return SampleBatch(samples) | 1e71e17eecdabd8c95aa517564e494e303e10a4b | 3,652,252 |
import collections
def compute_macro_f1(answer_stats, prefix=''):
"""Computes F1, precision, recall for a list of answer scores.
This computes the *language-wise macro F1*. For minimal answers,
we also compute a partial match score that uses F1, which would be
included in this computation via `answer_stats`.
Args:
answer_stats: List of per-example scores.
prefix (''): Prefix to prepend to score dictionary.
Returns:
Dictionary mapping measurement names to scores.
"""
has_gold, has_pred, f1, _ = list(zip(*answer_stats))
macro_precision = eval_utils.safe_divide(sum(f1), sum(has_pred))
macro_recall = eval_utils.safe_divide(sum(f1), sum(has_gold))
macro_f1 = eval_utils.safe_divide(
2 * macro_precision * macro_recall,
macro_precision + macro_recall)
return collections.OrderedDict({
prefix + 'n': len(answer_stats),
prefix + 'f1': macro_f1,
prefix + 'precision': macro_precision,
prefix + 'recall': macro_recall
}) | 08c774b6a06230c298dcbf031d2e188a643df9ef | 3,652,253 |
def piecewise_linear(x, rng, NUM_PEOPLE):
"""
This function samples the piecewise linear viral_load model
Args:
x ([type]): [description]
rng (np.random.RandomState): random number generator
NUM_PEOPLE (int): [description]
Returns:
np.array: [description]
"""
viral_loads = []
for person in range(NUM_PEOPLE):
plateau_height, plateau_start, plateau_end, recovered = _sample_viral_load_piecewise(rng)
viral_load = []
for time_sample in x:
if time_sample < plateau_start:
cur_viral_load = plateau_height * time_sample / plateau_start
elif time_sample < plateau_end:
cur_viral_load = plateau_height
else:
cur_viral_load = plateau_height - plateau_height * (time_sample - plateau_end) / (recovered - plateau_end)
if cur_viral_load < 0:
cur_viral_load = np.array([0.])
viral_load.append(cur_viral_load)
viral_loads.append(np.array(viral_load, dtype=float).flatten())
viral_loads = np.array(viral_loads)
return viral_loads | 27c27c89769674d7fcf8008f21334ec12acd588c | 3,652,254 |
def mol_sim_matrix(fingerprints1,
fingerprints2,
method='cosine',
filename=None,
max_size=1000,
print_progress=True):
"""Create Matrix of all molecular similarities (based on molecular fingerprints).
If filename is not None, the result will be saved as npy.
To create molecular fingerprints see mol_fingerprints() function from MS_functions.
Args:
----
fingerprints1: list
List of molecular fingerprints (numpy arrays).
fingerprints2: list
List of molecular fingerprints (numpy arrays).
method: str
Method to compare molecular fingerprints. Can be 'cosine', 'dice' etc.
(see scipy.spatial.distance.cdist).
filename: str
Filename to save results to. OR: If file already exists it will be
loaded instead.
max_size: int
Maximum size of (sub) all-vs-all matrix to handle in one go. Will split
up larger matrices into
max_size x max_size matrices.
print_progress: bool, optional
If True, print phase of the run to indicate progress. Default = True.
"""
if filename is not None:
try:
molecular_similarities = np.load(filename)
print("Molecular similarity scores found and loaded.")
collect_new_data = False
except FileNotFoundError:
print("Could not find file ", filename)
print("Molecular scores will be calculated from scratch.")
collect_new_data = True
else:
collect_new_data = True
if collect_new_data:
# Create array of all finterprints
fingerprints_arr1 = np.array(fingerprints1)
fingerprints_arr2 = np.array(fingerprints2)
# Calculate all-vs-all similarity matrix (similarity here= 1-distance )
matrix_size = (fingerprints_arr1.shape[0], fingerprints_arr2.shape[0])
molecular_similarities = np.zeros(matrix_size)
# Split large matrices up into smaller ones to track progress
splits = int(np.ceil(matrix_size[0]/max_size) * np.ceil(matrix_size[1]/max_size))
count_splits = 0
for i in range(int(np.ceil(matrix_size[0]/max_size))):
low1 = i * max_size
high1 = min((i + 1) * max_size, matrix_size[0])
for j in range(int(np.ceil(matrix_size[1]/max_size))):
low2 = j * max_size
high2 = min((j + 1) * max_size, matrix_size[1])
molecular_similarities[low1:high1, low2:high2] = 1 - spatial.distance.cdist(
fingerprints_arr1[low1:high1],
fingerprints_arr2[low2:high2],
method
)
# Track progress:
count_splits += 1
if print_progress:
print('\r',
"Calculated submatrix {} out of {}".format(count_splits, splits),
end="")
if print_progress:
print(20 * '--')
print("Succesfully calculated matrix with all-vs-all molecular similarity values.")
if filename is not None:
np.save(filename, molecular_similarities)
print("Matrix was saved under:", filename)
return molecular_similarities | 43bd1fd26d3c4372d7285c8f93ad3a7e7e7d8a61 | 3,652,255 |
def energies_over_delta(syst, p, k_x):
"""Same as energy_operator(), but returns the
square-root of the eigenvalues"""
operator = energy_operator(syst, p, k_x)
return np.sqrt(np.linalg.eigvalsh(operator)) | b610ca896791d6036c1a5ed1295f3293408f898a | 3,652,256 |
def count_matching(d1: Die, d2: Die, num_rolls: int) -> int:
""" Roll the given dice a number of times and count when they match.
Args:
d1 (Die): One Die object (must not be None)
d2 (Die): Another Die object (must not be None)
num_rolls (int): Positive number of rolls to toss.
Returns:
int number of times both dice showed the same number.
"""
matching = 0
for _ in range(num_rolls):
matching += int(d1.roll() == d2.roll())
return matching | f5e7f119ef639a910b7b824a3aabb47ce7bf1f60 | 3,652,257 |
def autoaugment_preproccess(
input_size,
scale_size,
normalize=None,
pre_transform=True,
**kwargs):
"""
Args:
input_size:
scale_size:
normalize:
pre_transform:
**kwargs:
Returns:
"""
if normalize is None:
normalize = __imagenet_stats
augment = PbaAugment(
input_size,
scale_size,
normalize=normalize,
pre_transform=pre_transform,
**kwargs)
return augment | de24dd19015ff8e4210f98ec5702fbf9ee637ebd | 3,652,258 |
def pubchem_image(cid_or_container, size=500):
"""
Generate HTML code for a PubChem molecular structure graphic and link.
Parameters:
cid_or_container: The CID (int, str) or a subscriptable object that
contains a key ``cid``.
Returns:
HTML code for an image from PubChem.
"""
if type(cid_or_container) in (int, str):
cid = cid_or_container
elif 'cid' in cid_or_container:
cid = cid_or_container['cid']
else:
raise MissingParamError('cid')
cid_url = 'https://pubchem.ncbi.nlm.nih.gov/compound/{}'.format(cid)
imgbase = 'https://pubchem.ncbi.nlm.nih.gov/image/imagefly.cgi?'
params = {'cid': cid, 'width': size, 'height': size}
img_url = imgbase + urlencode(params)
ret = '<a target="_blank" href="{0}"><img src="{1}"></a>'
ret = ret.format(cid_url, img_url)
return ret | ca7f9fd9ad31bf834596460f047bf393e3cfd486 | 3,652,259 |
from typing import Optional
from typing import List
def to_lines(text: str, k: int) -> Optional[List[str]]:
"""
Given a block of text and a maximum line length k, split the text into lines of length at most k.
If this cannot be done, i.e. a word is longer than k, return None.
:param text: the block of text to process
:param k: the maximum length of each line
:return: the list of lines
>>> text = 'the quick brown fox jumps over the lazy dog'
>>> to_lines(text, 4) is None
True
>>> to_lines(text, 5)
['the', 'quick', 'brown', 'fox', 'jumps', 'over', 'the', 'lazy', 'dog']
>>> to_lines(text, 9)
['the quick', 'brown fox', 'jumps', 'over the', 'lazy dog']
>>> to_lines(text, 10)
['the quick', 'brown fox', 'jumps over', 'the lazy', 'dog']
>>> to_lines(text, 12)
['the quick', 'brown fox', 'jumps over', 'the lazy dog']
>>> to_lines('AAAAA', 5)
['AAAAA']
"""
def line_to_str(l: List[str]) -> str:
return ' '.join(l)
# If there is no text or the line length is 0, we can't do anything.
if not text or not k:
return None
# If any word is longer then k, we can't do anything.
words = text.split()
if max(len(word) for word in words) > k:
return None
# Now split the word into lines.
lines = []
line = []
len_so_far = 0
for word in words:
len_word = len(word)
if len_word + len_so_far <= k:
# We add the word to the line plus a blank space afterwards.
# If this is the last word in the line, the blank space will not occur; hence why we check the
# condition <= k rather than < k.
line.append(word)
len_so_far += len_word + 1
else:
# Make the line into a string, add it to lines, and reset everything.
lines.append(line_to_str(line))
line = [word]
len_so_far = len_word + 1
# Last case: if we have a partial line, add it.
if line:
lines.append(line_to_str(line))
# Assert that none of the lines went over the length.
for line in lines:
assert(len(line) <= k)
return lines | 1797f45ce4999a29a9cc74def3f868e473c2775a | 3,652,260 |
def _image_pos(name):
"""
查找指定图片在背景图中的位置
"""
imsrc = ac.imread('images/bg/{}.png'.format(name[1:]))
imobj = ac.imread('images/{}.PNG'.format(name))
# find the match position
pos = ac.find_template(imsrc, imobj)
circle_center_pos = pos['result']
return circle_center_pos | 6d4bd64b252c2acbf86e22e8048150a3e1976045 | 3,652,261 |
from typing import Callable
from typing import Optional
import os
import logging
def handle_greenness_indices(parameters: tuple, input_folder: str, working_folder: str, msg_func: Callable, err_func: Callable) -> \
Optional[dict]:
"""Handle running the greenness algorithm
Arguments:
parameters: the specified parameters for the algorithm
input_folder: the base folder where input files are located
working_folder: the working folder for the algorithm
msg_func: function to write messages to
err_func: function to write errors to
Return:
A dictionary of addittional parameters to pass to the next command or None
"""
json_filename, experiment_file, search_folder, options = _find_parameter_values(parameters,
('found_json_file', 'experimentdata', 'results_search_folder', 'options'))
# Ensure we have our mandatory parameters
_handle_missing_parameters('greenness indices', (json_filename,), ('found_json_file',))
_handle_missing_files('greenness indices', (json_filename,), ('found_json_file',))
# Adjust the found files JSON to point to our output folder - making a best effort if search_folder is None
new_json_filename = _repoint_files_json_dir(json_filename, search_folder, working_folder, working_folder)
if new_json_filename is None:
new_json_filename = json_filename
# Default our options
if options is None:
options = ''
# Add in additional options
if experiment_file is not None:
if os.path.isfile(experiment_file):
options += ' --metadata ' + experiment_file
else:
msg = 'Warning: invalid experiment file specified for greenness indices "%s"' % experiment_file
logging.warning(msg)
msg_func((msg,), True)
# Write the arguments
json_args = {
'GREENNESS_INDICES_OPTIONS': options if options is not None else '',
}
json_file_path = os.path.join(working_folder, 'args.json')
_write_command_json(json_file_path, json_args)
logging.debug("Command JSON: %s", str(json_args))
# Run the command
ret_value = _run_command('greenness-indices', input_folder, working_folder, json_file_path, msg_func, err_func,
[[new_json_filename,'/scif/apps/src/greenness-indices_files.json']])
command_results = None
if ret_value == 0:
command_results = {'results': _get_results_json(input_folder, err_func, True)}
command_results['top_path'] = working_folder
return command_results | 4e80f185ea9732c7a7556999db7a1d8c0f8e38fe | 3,652,262 |
def product_except_self(nums: list[int]) -> list[int]:
"""Computes the product of all the elements of given array at each index excluding the value at that index.
Note: could also take math.prod(nums) and divide out the num at each index,
but corner cases of num_zeros > 1 and num_zeros == 1 make code inelegant.
Args:
nums:
Returns:
Examples:
>>> product_except_self([])
[]
>>> product_except_self([1,2,3,4])
[24, 12, 8, 6]
>>> product_except_self([-1,1,0,-3,3])
[0, 0, 9, 0, 0]
"""
"""ALGORITHM"""
## INITIALIZE VARS ##
nums_sz = len(nums)
# DS's/res
nums_products_except_i = [1] * nums_sz
## Multiply against product of all elements PRECEDING i
total_product = 1
for i in range(nums_sz):
nums_products_except_i[i] *= total_product
total_product *= nums[i]
## Multiply against product of all elements FOLLOWING i
total_product = 1
for i in reversed(range(nums_sz)):
nums_products_except_i[i] *= total_product
total_product *= nums[i]
return nums_products_except_i | 15090d4873b0dec9ea6119e7c097ccda781e51fa | 3,652,263 |
import os
def fetch_pkey():
"""Download private key file from secure S3 bucket"""
s3_client = boto3.client('s3')
s3_client.download_file(S3_BUCKET, BUCKET_KEY, PKEY_FILE)
pkey_filename = PKEY_FILE.replace("/tmp/", "")
if os.path.isfile(PKEY_FILE):
return print(f"{pkey_filename} successfully downloaded from {S3_BUCKET}") | 00fe81e359b5e464e292ef8a0b16a1515b45a66e | 3,652,264 |
import itertools
def so_mörk():
"""Sagnorð."""
return itertools.chain(
# Nafnháttur - nútíð er sleppt og ekki til í þáttíð miðmynd
{"sng---", "sng--þ", "snm---"},
# Boðháttur - alltaf 2.p og nútíð
string_product({"sb"}, MYND, {"2"}, TALA, {"n"}),
# Lýsingarháttur nútíðar
string_product({"slg---", "slm---"}),
# Framsögu- og viðtengingarháttur
string_product({"s"}, {"f", "v"}, MYND, PERSÓNA, TALA, TÍÐ),
# Lýsingarháttur þátíðar - hann virðist vera til í nefnifalli, þolfalli og þágufalli. Setjum líka inn eignarfall til að vera viss.
string_product({"s"}, {"þ"}, MYND, KYN, TALA, FALL),
) | 07a5c79a78083392a5ce96cfb74c13d644a5585e | 3,652,265 |
def _histogram(values, value_range, nbins=100, dtype=tf.int32, name=None):
"""Return histogram of values.
Given the tensor `values`, this operation returns a rank 1 histogram counting
the number of entries in `values` that fell into every bin. The bins are
equal width and determined by the arguments `value_range` and `nbins`.
Args:
values: Numeric `Tensor`.
value_range: Shape [2] `Tensor` of same `dtype` as `values`.
values <= value_range[0] will be mapped to hist[0],
values >= value_range[1] will be mapped to hist[-1].
nbins: Scalar `int32 Tensor`. Number of histogram bins.
dtype: dtype for returned histogram.
name: A name for this operation (defaults to 'histogram').
Returns:
A 1-D `Tensor` holding histogram of values.
"""
with tf.name_scope(name, 'histogram', [values, value_range, nbins]) as scope:
values = tf.convert_to_tensor(values, name='values')
values = tf.reshape(values, [-1])
value_range = tf.convert_to_tensor(value_range, name='value_range')
nbins_float = np.float32(nbins)
# Map tensor values that fall within value_range to [0, 1].
scaled_values = tf.truediv(
values - value_range[0],
value_range[1] - value_range[0],
name='scaled_values')
# map tensor values within the open interval value_range to {0,.., nbins-1},
# values outside the open interval will be zero or less, or nbins or more.
indices = tf.floor(nbins_float * scaled_values, name='indices')
# Clip edge cases (e.g. value = value_range[1]) or "outliers."
indices = tf.cast(
tf.clip_by_value(indices, 0, nbins_float - 1), tf.int32)
return tf.unsorted_segment_sum(
tf.ones_like(indices, dtype=dtype), indices, nbins, name=scope) | d15d1a8aac528c5641e391d56cb8141f07c244c5 | 3,652,266 |
def find_missing_letter(chars):
"""
chars: string of characters
return: missing letter between chars or after
"""
letters = [char for char in chars][0]
chars = [char.lower() for char in chars]
alphabet = [char for char in "abcdefghijklmnopqrstuvwxyz"]
starting_index = alphabet.index(chars[0])
for letter in alphabet[starting_index:]:
if letter not in chars and chars[0].lower() == letters[0]:
return letter
if letter not in chars and chars[0].upper() == letters[0]:
return letter.upper() | bc663b68ac49095285a24b06554337c1582c8199 | 3,652,267 |
def series_to_pyseries(
name: str,
values: "pl.Series",
) -> "PySeries":
"""
Construct a PySeries from a Polars Series.
"""
values.rename(name, in_place=True)
return values.inner() | 958e08e428948adbdf353b911b9e5ab814e06cdb | 3,652,268 |
def _stagefile(coption, source, destination, filesize, is_stagein, setup=None, **kwargs):
"""
Stage the file (stagein or stageout)
:return: destination file details (checksum, checksum_type) in case of success, throw exception in case of failure
:raise: PilotException in case of controlled error
"""
filesize_cmd, checksum_cmd, checksum_type = None, None, None
cmd = '%s -np -f %s %s %s' % (copy_command, coption, source, destination)
if setup:
cmd = "source %s; %s" % (setup, cmd)
#timeout = get_timeout(filesize)
#logger.info("Executing command: %s, timeout=%s" % (cmd, timeout))
rcode, stdout, stderr = execute(cmd, **kwargs)
logger.info('rcode=%d, stdout=%s, stderr=%s', rcode, stdout, stderr)
if rcode: ## error occurred
error = resolve_common_transfer_errors(stdout + stderr, is_stagein=is_stagein)
#rcode = error.get('rcode') ## TO BE IMPLEMENTED
#if not is_stagein and rcode == PilotErrors.ERR_CHKSUMNOTSUP: ## stage-out, on fly checksum verification is not supported .. ignore
# logger.info('stage-out: ignore ERR_CHKSUMNOTSUP error .. will explicitly verify uploaded file')
# return None, None
raise PilotException(error.get('error'), code=error.get('rcode'), state=error.get('state'))
# extract filesize and checksum values from output
if coption != "":
filesize_cmd, checksum_cmd, checksum_type = get_file_info_from_output(stdout + stderr)
## verify transfer by returned checksum or call remote checksum calculation
## to be moved at the base level
is_verified = True ## TO BE IMPLEMENTED LATER
if not is_verified:
rcode = ErrorCodes.GETADMISMATCH if is_stagein else ErrorCodes.PUTADMISMATCH
raise PilotException("Copy command failed", code=rcode, state='AD_MISMATCH')
return filesize_cmd, checksum_cmd, checksum_type | 725fb5d77489746a8ed0dd587a32bda669008456 | 3,652,269 |
def tally_cache_file(results_dir):
"""Return a fake tally cache file for testing."""
file = results_dir / 'tally.npz'
file.touch()
return file | 271c58cc263cf0bfba80f00b0831303326d4d1a8 | 3,652,270 |
import torch
def get_soft_label(cls_label, num_classes):
"""
compute soft label replace one-hot label
:param cls_label:ground truth class label
:param num_classes:mount of classes
:return:
"""
# def metrix_fun(a, b):
# torch.IntTensor(a)
# torch.IntTensor(b)
# metrix_dis = (a - b) ** 2
# return metrix_dis
def metrix_fun(a, b):
a = a.type_as(torch.FloatTensor())
b = b.type_as(torch.FloatTensor())
metrix_dis = (torch.log(a) - torch.log(b)) ** 2
return metrix_dis
def exp(x):
x = x.type_as(torch.FloatTensor())
return torch.exp(x)
rt = torch.IntTensor([cls_label]) # must be torch.IntTensor or torch.LongTensor
rk = torch.IntTensor([idx for idx in range(1, num_classes + 1, 1)])
metrix_vector = exp(-metrix_fun(rt, rk))
return metrix_vector / torch.sum(metrix_vector) | e08e6fc86252bf76dd8a852c63e92776b4fdcfc3 | 3,652,271 |
def get_option(args, config, key, default=None):
"""Gets key option from args if it is provided, otherwise tries to get it from config"""
if hasattr(args, key) and getattr(args, key) is not None:
return getattr(args, key)
return config.get(key, default) | 54d77c6ae3e40b2739156b07747facc4a952c237 | 3,652,272 |
from operator import index
def z_decode(p):
"""
decode php param from string to python
p: bytes
"""
if p[0]==0x4e: #NULL 0x4e-'N'
return None,p[2:]
elif p[0]==0x62: #bool 0x62-'b'
if p[2] == 0x30: # 0x30-'0'
return False,p[4:]
else:
return True,p[4:]
elif p[0]==0x69: #int 0x69-'i'
i = index(p, 0x3b, 1) # 0x3b-';'
return int(p[2:i]),p[i+1:]
elif p[0]==0x64: #double 0x64-'d'
i = index(p, 0x3b, 1) # 0x3b-';'
return float(p[2:i]),p[i+1:]
elif p[0]==0x73: #string 0x73-'s'
len_end = index(p, 0x3a, 2) # 0x3a-':'
str_len = int(p[2:len_end])
end = len_end + 1 + str_len + 2
v = p[(len_end + 2) : (len_end + 2 + str_len)]
return str(v, php_python.CHARSET), p[end+1:]
elif p[0]==0x61: #array 0x61-'a'
list_=[] #数组
dict_={} #字典
flag=True #类型,true-元组 false-字典
second = index(p, 0x3a, 2) # 0x3a-":"
num = int(p[2:second]) #元素数量
pp = p[second+2:] #所有元素
for i in range(num):
key,pp=z_decode(pp) #key解析
if (i == 0): #判断第一个元素key是否int 0
if (not isinstance(key, int)) or (key != 0):
flag = False
val,pp=z_decode(pp) #value解析
list_.append(val)
dict_[key]=val
return (list_, pp[2:]) if flag else (dict_, pp[2:])
else:
return p,'' | e4735532f783bdbcc76512ba414b296b743ebeb7 | 3,652,273 |
import re
def parse_extension(uri):
""" Parse the extension of URI. """
patt = re.compile(r'(\.\w+)')
return re.findall(patt, uri)[-1] | 5ed4eee77b92f04e62390128939113168d715342 | 3,652,274 |
import math
def rotz(ang):
"""
Calculate the transform for rotation around the Z-axis.
Arguments:
angle: Rotation angle in degrees.
Returns:
A 4x4 numpy array of float32 representing a homogeneous coordinates
matrix for rotation around the Z axis.
"""
rad = math.radians(ang)
c = math.cos(rad)
s = math.sin(rad)
return [
[c, -s, 0.0, 0.0],
[s, c, 0.0, 0.0],
[0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 1.0],
] | 4332242c5818ccc00d64cbebf7a861727e080964 | 3,652,275 |
def getBits(data, offset, bits=1):
"""
Get specified bits from integer
>>> bin(getBits(0b0011100,2))
'0b1'
>>> bin(getBits(0b0011100,0,4))
'0b1100'
"""
mask = ((1 << bits) - 1) << offset
return (data & mask) >> offset | 0bdae35f5afa076d0e5a73b91d2743d9cf156f7d | 3,652,276 |
def rescale(img, input_height, input_width):
"""Code from Loading_Pretrained_Models.ipynb - a Caffe2 tutorial"""
aspect = img.shape[1]/float(img.shape[0])
if(aspect>1):
# landscape orientation - wide image
res = int(aspect * input_height)
imgScaled = skimage.transform.resize(img, (input_width, res))
if(aspect<1):
# portrait orientation - tall image
res = int(input_width/aspect)
imgScaled = skimage.transform.resize(img, (res, input_height))
if(aspect == 1):
imgScaled = skimage.transform.resize(img, (input_width, input_height))
return imgScaled | 6d6ac5cf1f496c9b7209e2b665e314c587471e82 | 3,652,277 |
def compute_halfmax_crossings(sig):
"""
Compute threshold_crossing, linearly interpolated.
Note this code assumes there is just one peak in the signal.
"""
half_max = np.max(sig)/2.0
fwhm_set = np.where(sig > half_max)
l_ndx = np.min(fwhm_set) #assumes a clean peak.
if l_ndx > 0:
fwhm_left_ndx = l_ndx - 1 + ((half_max - sig[l_ndx-1]) / (float(sig[l_ndx]) - sig[l_ndx-1]))
else:
fwhm_left_ndx = 0
r_ndx = np.max(fwhm_set) #assumes a clean peak.
if r_ndx < len(sig)-1:
fwhm_right_ndx = r_ndx + ((half_max - sig[r_ndx]) / (float(sig[r_ndx+1]) - sig[r_ndx]))
else:
fwhm_right_ndx = len(sig)-1
return np.array([fwhm_left_ndx,fwhm_right_ndx]) | 81347177aa442bf20ac56194127444ab5948a065 | 3,652,278 |
def quote_identities(expression):
"""
Rewrite sqlglot AST to ensure all identities are quoted.
Example:
>>> import sqlglot
>>> expression = sqlglot.parse_one("SELECT x.a AS a FROM db.x")
>>> quote_identities(expression).sql()
'SELECT "x"."a" AS "a" FROM "db"."x"'
Args:
expression (sqlglot.Expression): expression to quote
Returns:
sqlglot.Expression: quoted expression
"""
def qualify(node):
if isinstance(node, exp.Identifier):
node.set("quoted", True)
return node
return expression.transform(qualify) | 7689e883eb5360bee7e22c57b4e177be2f732e8b | 3,652,279 |
import struct
import zlib
def write_png(data, origin='upper', colormap=None):
"""
Transform an array of data into a PNG string.
This can be written to disk using binary I/O, or encoded using base64
for an inline PNG like this:
>>> png_str = write_png(array)
>>> 'data:image/png;base64,'+png_str.encode('base64')
Inspired from
http://stackoverflow.com/questions/902761/saving-a-numpy-array-as-an-image
Parameters
----------
data: numpy array or equivalent list-like object.
Must be NxM (mono), NxMx3 (RGB) or NxMx4 (RGBA)
origin : ['upper' | 'lower'], optional, default 'upper'
Place the [0,0] index of the array in the upper left or lower left
corner of the axes.
colormap : callable, used only for `mono` image.
Function of the form [x -> (r,g,b)] or [x -> (r,g,b,a)]
for transforming a mono image into RGB.
It must output iterables of length 3 or 4, with values between
0. and 1. Hint: you can use colormaps from `matplotlib.cm`.
Returns
-------
PNG formatted byte string
"""
if np is None:
raise ImportError('The NumPy package is required'
' for this functionality')
if colormap is None:
def colormap(x):
return (x, x, x, 1)
array = np.atleast_3d(data)
height, width, nblayers = array.shape
if nblayers not in [1, 3, 4]:
raise ValueError('Data must be NxM (mono), '
'NxMx3 (RGB), or NxMx4 (RGBA)')
assert array.shape == (height, width, nblayers)
if nblayers == 1:
array = np.array(list(map(colormap, array.ravel())))
nblayers = array.shape[1]
if nblayers not in [3, 4]:
raise ValueError('colormap must provide colors of'
'length 3 (RGB) or 4 (RGBA)')
array = array.reshape((height, width, nblayers))
assert array.shape == (height, width, nblayers)
if nblayers == 3:
array = np.concatenate((array, np.ones((height, width, 1))), axis=2)
nblayers = 4
assert array.shape == (height, width, nblayers)
assert nblayers == 4
# Normalize to uint8 if it isn't already.
if array.dtype != 'uint8':
array = array * 255./array.max(axis=(0, 1)).reshape((1, 1, 4))
array = array.astype('uint8')
# Eventually flip the image.
if origin == 'lower':
array = array[::-1, :, :]
# Transform the array to bytes.
raw_data = b''.join([b'\x00' + array[i, :, :].tobytes()
for i in range(height)])
def png_pack(png_tag, data):
chunk_head = png_tag + data
return (struct.pack('!I', len(data)) +
chunk_head +
struct.pack('!I', 0xFFFFFFFF & zlib.crc32(chunk_head)))
return b''.join([
b'\x89PNG\r\n\x1a\n',
png_pack(b'IHDR', struct.pack('!2I5B', width, height, 8, 6, 0, 0, 0)),
png_pack(b'IDAT', zlib.compress(raw_data, 9)),
png_pack(b'IEND', b'')]) | 516c609bbe19974065a1dc32b34de80bab06c17e | 3,652,280 |
def prepare_url(url, source_url=None):
"""
Operations that purify a url, removes arguments,
redirects, and merges relatives with absolutes.
"""
try:
if source_url is not None:
source_domain = urlparse(source_url).netloc
proper_url = urljoin(source_url, url)
proper_url = redirect_back(proper_url, source_domain)
# proper_url = remove_args(proper_url)
else:
# proper_url = remove_args(url)
proper_url = url
except ValueError as e:
log.critical('url %s failed on err %s' % (url, str(e)))
proper_url = ''
return proper_url | 867d3eb31a20893f6f5be4bd4f6a925f38d7c1a3 | 3,652,281 |
import re
def str_contains_num_version_range_with_x(str):
"""
Check if a string contains a range of number version with x.
:param str: the string to check.
:return: true if the string contains a a range of number version with x, false else.
"""
return bool(re.search(r'\d+((\.\d+)+)?(\.x)? < \d+((\.\d+)+)?(\.x)?', str)) | d11c34d1378b29df279da63882a9e34581fd9c13 | 3,652,282 |
import argparse
def get_args():
"""
gets cli args via the argparse module
"""
msg = "This script records cpu statistics"
# create an instance of parser from the argparse module
parser = argparse.ArgumentParser(description=msg)
# add expected arguments
parser.add_argument('-s', dest='silent', required=False,
action="store_true",
help="dont display statistics to screen")
parser.add_argument('-a', dest='append', required=False,
action="store_true",
help="dont overwrite previous files")
parser.add_argument('-c', dest='convert', required=False,
action="store_true",
help="converts data to human readable")
parser.add_argument('-n', dest='noheader', required=False,
action="store_true", help="dont write header")
parser.add_argument('-R', dest='refresh', required=False)
parser.add_argument('-r', dest='runtime', required=False)
parser.add_argument('-o', dest='outfile', required=False)
args = parser.parse_args()
if args.silent:
silent = True
else:
silent = False
if args.noheader:
noheader = True
else:
noheader = False
if args.append:
append = True
else:
append = False
if args.refresh:
refresh = float(args.refresh)
else:
# default refresh i s 5 seconds
refresh = 5
if args.runtime:
runtime = float(args.runtime)
else:
# default runtime is eight hours
runtime = 28800
if args.outfile:
outfile = args.outfile
else:
outfile = 'memutil.csv'
if args.convert:
convert = True
else:
convert = False
return silent, noheader, refresh, runtime, append, outfile, convert | a97cdbf33710bac17d78581548970e85c2397e15 | 3,652,283 |
from typing import Callable
from typing import Coroutine
def mpc_coro_ignore(
func: Callable[..., Coroutine[SecureElement, None, SecureElement]]
) -> Callable[..., SecureElement]:
"""
A wrapper for an MPC coroutine that ensures that the behaviour of the code is unaffected by
the type annotations.
:param func: The async function to be wrapped
:return: A placeholder for which a result will automatically be set when the coroutine has
finished running
"""
return mpc_coro(func, apply_program_counter_wrapper=False, ignore_type_hints=True) | eefd690145067856709c4edf02958eba428a4313 | 3,652,284 |
def is_all_in_one(config):
"""
Returns True if packstack is running allinone setup, otherwise
returns False.
"""
# Even if some host have been excluded from installation, we must count
# with them when checking all-in-one. MariaDB host should however be
# omitted if we are not installing MariaDB.
return len(filtered_hosts(config, exclude=False, dbhost=True)) == 1 | 35bd2f2edf3f12575612edb27218509db769d68f | 3,652,285 |
def index():
"""
example action using the internationalization operator T and flash
rendered by views/default/index.html or views/generic.html
if you need a simple wiki simply replace the two lines below with:
return auth.wiki()
"""
response.title = 'Award management'
data = {"message": "Put the award links here."}
return data | 6a0e910c793bf538f65826579a9ba6c9401bd4de | 3,652,286 |
import requests
import json
def session_generate(instanceAddress, appSecret): # pragma: no cover
"""
**Deprecated**
Issue a token to authenticate the user.
:param instanceAddress: Specify the misskey instance address.
:param appSecret: Specifies the secret key.
:type instanceAddress: str
:type appSecret: str
:rtype: dict
"""
res = requests.post(f"https://{instanceAddress}/api/auth/session/generate", data=json.dumps({'appSecret': appSecret}), headers={'content-type': 'application/json'})
if res.status_code != 200:
raise MisskeyAPIException('/auth/session/generate', 200, res.status_code, res.text)
else:
return json.loads(res.text) | 69e8b6cf54d92041326cc4f8d2dafa82a0f043a7 | 3,652,287 |
def _dream_proposals( currentVectors, history, dimensions, nChains, DEpairs, gamma, jitter, eps ):
"""
generates and returns proposal vectors given the current states
"""
sampleRange = history.ncombined_history
currentIndex = np.arange(sampleRange - nChains,sampleRange)[:, np.newaxis]
combined_history = history.combined_history
#choose some chains without replacement to combine
chains = _random_no_replace(DEpairs * 2, sampleRange - 1, nChains)
# makes sure we have already selected the current chain so it is not replaced
# this ensures that the the two chosen chains cannot be the same as the chain for which the jump is
chains += (chains >= currentIndex)
chainDifferences = (np.sum(combined_history[chains[:, 0:DEpairs], :], axis = 1) -
np.sum(combined_history[chains[:, DEpairs:(DEpairs*2)], :], axis = 1))
e = np.random.normal(0, jitter, (nChains,dimensions))
E = np.random.normal(0, eps,(nChains,dimensions)) # could replace eps with 1e-6 here
proposalVectors = currentVectors + (1 + e) * gamma[:,np.newaxis] * chainDifferences + E
return proposalVectors | d72075d09589b77f7077a5785db9f77e6a4182b0 | 3,652,288 |
from typing import Optional
import os
import tempfile
import shutil
def handle_compressed_file(
file_prefix: FilePrefix,
datatypes_registry,
ext: str = "auto",
tmp_prefix: Optional[str] = "sniff_uncompress_",
tmp_dir: Optional[str] = None,
in_place: bool = False,
check_content: bool = True,
) -> HandleCompressedFileResponse:
"""
Check uploaded files for compression, check compressed file contents, and uncompress if necessary.
Supports GZip, BZip2, and the first file in a Zip file.
For performance reasons, the temporary file used for uncompression is located in the same directory as the
input/output file. This behavior can be changed with the `tmp_dir` param.
``ext`` as returned will only be changed from the ``ext`` input param if the param was an autodetect type (``auto``)
and the file was sniffed as a keep-compressed datatype.
``is_valid`` as returned will only be set if the file is compressed and contains invalid contents (or the first file
in the case of a zip file), this is so lengthy decompression can be bypassed if there is invalid content in the
first 32KB. Otherwise the caller should be checking content.
"""
CHUNK_SIZE = 2**20 # 1Mb
is_compressed = False
compressed_type = None
keep_compressed = False
is_valid = False
filename = file_prefix.filename
uncompressed_path = filename
tmp_dir = tmp_dir or os.path.dirname(filename)
check_compressed_function = COMPRESSION_CHECK_FUNCTIONS.get(file_prefix.compressed_format)
if check_compressed_function:
is_compressed, is_valid = check_compressed_function(filename, check_content=check_content)
compressed_type = file_prefix.compressed_format
if is_compressed and is_valid:
if ext in AUTO_DETECT_EXTENSIONS:
# attempt to sniff for a keep-compressed datatype (observing the sniff order)
sniff_datatypes = filter(lambda d: getattr(d, "compressed", False), datatypes_registry.sniff_order)
sniffed_ext = run_sniffers_raw(file_prefix, sniff_datatypes)
if sniffed_ext:
ext = sniffed_ext
keep_compressed = True
else:
datatype = datatypes_registry.get_datatype_by_extension(ext)
keep_compressed = getattr(datatype, "compressed", False)
# don't waste time decompressing if we sniff invalid contents
if is_compressed and is_valid and file_prefix.auto_decompress and not keep_compressed:
assert compressed_type # Tell type checker is_compressed will only be true if compressed_type is also set.
with tempfile.NamedTemporaryFile(prefix=tmp_prefix, dir=tmp_dir, delete=False) as uncompressed:
with DECOMPRESSION_FUNCTIONS[compressed_type](filename) as compressed_file:
# TODO: it'd be ideal to convert to posix newlines and space-to-tab here as well
try:
for chunk in file_reader(compressed_file, CHUNK_SIZE):
if not chunk:
break
uncompressed.write(chunk)
except OSError as e:
os.remove(uncompressed.name)
raise OSError(
"Problem uncompressing {} data, please try retrieving the data uncompressed: {}".format(
compressed_type, util.unicodify(e)
)
)
finally:
is_compressed = False
uncompressed_path = uncompressed.name
if in_place:
# Replace the compressed file with the uncompressed file
shutil.move(uncompressed_path, filename)
uncompressed_path = filename
elif not is_compressed or not check_content:
is_valid = True
return HandleCompressedFileResponse(is_valid, ext, uncompressed_path, compressed_type, is_compressed) | 980857aece095f894580b0d523dc10b4c94a6977 | 3,652,289 |
import copy
import json
def compress_r_params(r_params_dict):
"""
Convert a dictionary of r_paramsters to a compressed string format
Parameters
----------
r_params_dict: Dictionary
dictionary with parameters for weighting matrix. Proper fields
and formats depend on the mode of data_weighting.
data_weighting == 'dayenu':
dictionary with fields
'filter_centers', list of floats (or float) specifying the (delay) channel numbers
at which to center filtering windows. Can specify fractional channel number.
'filter_half_widths', list of floats (or float) specifying the width of each
filter window in (delay) channel numbers. Can specify fractional channel number.
'filter_factors', list of floats (or float) specifying how much power within each filter window
is to be suppressed.
Returns
-------
string containing r_params dictionary in json format and only containing one
copy of each unique dictionary with a list of associated baselines.
"""
if r_params_dict == {} or r_params_dict is None:
return ''
else:
r_params_unique = {}
r_params_unique_bls = {}
r_params_index = -1
for rp in r_params_dict:
#do not include data set in tuple key
already_in = False
for rpu in r_params_unique:
if r_params_unique[rpu] == r_params_dict[rp]:
r_params_unique_bls[rpu] += [rp,]
already_in = True
if not already_in:
r_params_index += 1
r_params_unique[r_params_index] = copy.copy(r_params_dict[rp])
r_params_unique_bls[r_params_index] = [rp,]
for rpi in r_params_unique:
r_params_unique[rpi]['baselines'] = r_params_unique_bls[rpi]
r_params_str = json.dumps(r_params_unique)
return r_params_str | 4e56badfb7ea773d9d1104b134aa61b83d8d2f2f | 3,652,290 |
def replace_ext(filename, oldext, newext):
"""Safely replaces a file extension new a new one"""
if filename.endswith(oldext):
return filename[:-len(oldext)] + newext
else:
raise Exception("file '%s' does not have extension '%s'" %
(filename, oldext)) | 33ab99860cfe90b72388635d5d958abe431fa45e | 3,652,291 |
def getAllArt():
"""
1/ verify if user is authenticated (login)
2/ if yes he can post a new article
if not he can only read article
"""
if request.method == "GET":
articles = actualArticle.getAll()
return articles
elif request.method == 'PUT':
if 'logged_in' in session:
response = actualArticle.crud(request, id)
articles = actualArticle.getAll()
return articles
else:
message="To add a new article you have to login"
return message | d7506d4fbe3b4f670c30ce18501fe6ea4d410169 | 3,652,292 |
from typing import Union
from typing import Tuple
import warnings
def normalize_citation(line: str) -> Union[Tuple[str, str], Tuple[None, str]]:
"""Normalize a citation string that might be a crazy URL from a publisher."""
warnings.warn("this function has been externalized to :func:`citation_url.parse`")
return citation_url.parse(line) | c004e75aae0bcb625d8906421057a484b6831606 | 3,652,293 |
import joblib
import os
def cnn_predict_grid(data_in=None,
win_sizes=[((int(8), int(5)), 2, 1),((int(10), int(6)), 3, 2),((int(13), int(8)), 4, 3)],
problim = 0.95,
model_fpath=model_fpath,
scaler_fpath=scaler_fpath,
nc_fpath='D:/Master/data/cmems_data/global_10km/noland/phys_noland_2016_060.nc',
storedir=None):
""" Test the model using multiple sliding windows, there will be multiple returned predictions
data in: [lon,lat,x,y,ssl,uvel,vvel]
storedir: path to directory for storing image of predicted grid, if None, no image is stored"""
print("\n\n")
lon,lat,x,y,ssl,uvel,vvel = data_in
# Recreate the exact same model purely from the file
custom_objects = {
"f1_m": f1_m,
"precision_m": precision_m,
"recall_m": recall_m
}
clf = load_model(model_fpath, custom_objects=custom_objects)
scaler = joblib.load(scaler_fpath) # Import the std sklearn scaler model
nx, ny = ssl.shape
# Create canvas to show the cv2 rectangles around predictions
fig, ax = plt.subplots(figsize=(15, 12))
n=-1
color_array = np.sqrt(((uvel.T-n)/2)**2 + ((vvel.T-n)/2)**2)
# x and y needs to be equally spaced for streamplot
if not (same_dist_elems(x) or same_dist_elems(y)):
x, y = np.arange(len(x)), np.arange(len(y))
ax.contourf(x, y, ssl.T, cmap='rainbow', levels=150)
ax.streamplot(x, y, uvel.T, vvel.T, color=color_array, density=10)
#ax.quiver(x, y, uvel.T, vvel.T, scale=3)
fig.subplots_adjust(0,0,1,1)
fig.canvas.draw()
im = np.frombuffer(fig.canvas.tostring_rgb(), dtype=np.uint8)
im = im.reshape(fig.canvas.get_width_height()[::-1] + (3,))
imCopy = cv2.cvtColor(im,cv2.COLOR_RGB2BGR)
imH, imW, _ = imCopy.shape # col, row
winScaleW, winScaleH = imW*1.0/nx, imH*1.0/ny # Scalar coeff from dataset to cv2 image
# Define what variables are used as channel, if only uvel and vvel it should be [1,2]
to_be_scaled = [1,2]
data = [ssl, uvel, vvel]
# Holds rectangle coordinates with dataset and image indexes
cyc_r, acyc_r = [], []
cyc_r_im, acyc_r_im = [], []
print("++ Performing sliding window and predicting using pre-trained CNN model")
# Loop over different window sizes, they will be resized down to correct dimensiona anyways
for wSize, wStep, hStep in win_sizes:
# loop over the sliding window of indeces
for rectIdx, (i, j, (xIdxs, yIdxs)) in enumerate(sliding_window(ssl, wStep, hStep, windowSize=wSize)):
if xIdxs[-1] >= nx or yIdxs[-1] >= ny:
continue
winW2, winH2 = winW*6, winH*6
winSize = (winH2, winW2)
masked = False # Continue if window hits land
data_window, data_scaled_window = [], []
for c in range(len(data)):
# Creates window, checks if masked, if not returns the window
a = check_window(data[c], xIdxs, yIdxs)
if a is None:
masked = True
break
# append window if not masked
data_window.append( a )
# Resize the original window to CNN input dim
data_window[c] = cv2.resize(data_window[c], dsize=(winSize), interpolation=cv2.INTER_CUBIC)
if c in to_be_scaled:
# Create a copy of window to be scaled
data_scaled_window.append(data_window[c].copy())
k = len(data_scaled_window) - 1
# Flatten array before applying scalar
data_scaled_window[k] = data_scaled_window[k].flatten()
# Scale the data
data_scaled_window[k] = scaler[k].transform([data_scaled_window[k]])[0]
# Reshape scaled data to original shape
data_scaled_window[k] = data_scaled_window[k].reshape(winW2, winH2)
# continue to next window if mask (land) is present
if masked: continue
# Transfrom input window to CNN input format
X_cnn = np.zeros((1,winW2,winH2,nChannels))
for lo in range(winW2): # Row
for la in range(winH2): # Column
for c in range(nChannels): # Channels
X_cnn[0,lo,la,c] = data_scaled_window[c][lo,la]
# Predict and receive probability
prob = clf.predict(X_cnn)
# This is the size of the current sliding window
nxWin, nyWin = len(xIdxs), len(yIdxs)
# y starts in top left for cv2, want it to be bottom left
xr, yr = int(winScaleW*(i)), int(winScaleH*(ny-j)) # rect coords
xrW, yrW= int(winScaleW*nxWin), int(winScaleH*nyWin) # rect width
# If either cyclone or acyclone are above probability limit, we have a prediction
if any(p >= problim for p in prob[0,1:]):
if prob[0,1] >= problim:
acyc_r.append([i, j, i + nxWin, j + nyWin])
acyc_r_im.append([xr, yr, xr + xrW, yr - xrW])
cv2.rectangle(imCopy, (xr, yr), (xr + xrW, yr - xrW), (217, 83, 25), 2)
#print('anti-cyclone | prob: {}'.format(prob[0,1]*100))
else:
cyc_r.append([i, j, i + nxWin, j + nyWin])
cyc_r_im.append([xr, yr, xr + xrW, yr - xrW])
cv2.rectangle(imCopy, (xr, yr), (xr + xrW, yr - xrW), (0, 76, 217), 2)
#print('cyclone | prob: {}'.format(prob[0,2]*100))
# We want to return both grouped and ungrouped predictions, in case user wants different grouping
# Predictions need at least 2 rectangles with 20% overlap to be a final prediciton
cyc_r_im_grouped, _ = cv2.groupRectangles(rectList=cyc_r_im, groupThreshold=1, eps=0.2)
acyc_r_im_grouped, _ = cv2.groupRectangles(rectList=acyc_r_im, groupThreshold=1, eps=0.2)
# if a store directory is defined, create and store an image of both grouped and ungrouped
# predicted grid at location
imgdir = 'C:/Users/47415/Master/images/compare/'
if isinstance(storedir, str):
if not os.path.isdir(imgdir + storedir):
os.makedirs(imgdir + storedir)
cv2.imwrite(imgdir + f'{storedir}/full_pred_grid.png', imCopy)
imCopy = cv2.cvtColor(im,cv2.COLOR_RGB2BGR)
draw_rectangles(imCopy, cyc_r_im_grouped, lon, lat, winScaleW, winScaleH, 'cyclone')
draw_rectangles(imCopy, acyc_r_im_grouped, lon, lat, winScaleW, winScaleH, 'anti-cyclone')
cv2.imwrite(imgdir + f'{storedir}/grouped_pred_grid.png', imCopy)
#cv2.imshow("Window", imCopy)
#cv2.waitKey(0)
plt.close(fig)
return cyc_r, acyc_r, cyc_r_im_grouped, acyc_r_im_grouped | 4d29cea83917b32969d3e9b0b271e337508757b7 | 3,652,294 |
def connect_syndicate( username=CONFIG.SYNDICATE_OPENCLOUD_USER, password=CONFIG.SYNDICATE_OPENCLOUD_PASSWORD, user_pkey_pem=CONFIG.SYNDICATE_OPENCLOUD_PKEY ):
"""
Connect to the OpenCloud Syndicate SMI, using the OpenCloud user credentials.
"""
debug = True
if hasattr(CONFIG, "DEBUG"):
debug = CONFIG.DEBUG
client = syntool.Client( username, CONFIG.SYNDICATE_SMI_URL,
password=password,
user_pkey_pem=user_pkey_pem,
debug=debug )
return client | 61530129f45c89d6b735979dfa2fec347d364a06 | 3,652,295 |
def get_all_comb_pairs(M, b_monitor=False):
"""returns all possible combination pairs from M repeated measurements (M choose 2)
Args:
M (int): number of measurements per
Returns:
indices1, incides2
"""
indices1 = np.zeros(int(M*(M-1)/2))
indices2 = np.zeros(int(M*(M-1)/2))
qq = 0
for q0 in range(M):
dt = q0+1
for q1 in range(M-q0-1):
indices1[qq] = q1
indices2[qq] = q1+dt
qq += 1
if b_monitor:
print("indices1:", indices1)
print("indices2:", indices2)
return (indices1, indices2) | 9b537d040463e6db2a0bd0616bc3110cf0c9e7f6 | 3,652,296 |
def gomc_sim_completed_properly(job, control_filename_str):
"""General check to see if the gomc simulation was completed properly."""
with job:
job_run_properly_bool = False
output_log_file = "out_{}.dat".format(control_filename_str)
if job.isfile(output_log_file):
# with open(f"workspace/{job.id}/{output_log_file}", "r") as fp:
with open(f"{output_log_file}", "r") as fp:
out_gomc = fp.readlines()
for i, line in enumerate(out_gomc):
if "Move" in line:
split_move_line = line.split()
if (
split_move_line[0] == "Move"
and split_move_line[1] == "Type"
and split_move_line[2] == "Mol."
and split_move_line[3] == "Kind"
):
job_run_properly_bool = True
else:
job_run_properly_bool = False
return job_run_properly_bool | 95c5b9dec8e38f5a06ad4031c72d14629ebdefe3 | 3,652,297 |
import functools
import time
def debounce(timeout, **kwargs):
"""Use:
@debounce(text=lambda t: t.id, ...)
def on_message(self, foo=..., bar=..., text=None, ...)"""
keys = sorted(kwargs.items())
def wrapper(f):
@functools.wraps(f)
def handler(self, *args, **kwargs):
# Construct a tuple of keys from the input args
key = tuple(fn(kwargs.get(k)) for k, fn in keys)
curr = set()
if hasattr(self, '__debounce_curr'):
curr = self.__debounce_curr
prev = set()
if hasattr(self, '__debounce_prev'):
prev = self.__debounce_prev
now = time.time()
tick = time.time()
if hasattr(self, '__debounce_tick'):
tick = self.__debounce_tick
# Check the current and previous sets, if present
if key in curr or key in prev:
return
# Rotate and update
if now > tick:
prev = curr
curr = set()
tick = now + timeout
curr.add(key)
self.__debounce_curr = curr
self.__debounce_prev = prev
self.__debounce_tick = tick
# Call the wrapped function
return f(self, *args, **kwargs)
return handler
return wrapper | ac457a68834f1a6305ff4be8f7f19607f17e95fb | 3,652,298 |
def isolated_add_event(event, quiet=True):
"""
Add an event object, but in its own transaction, not bound to an existing transaction scope
Returns a dict object of the event as was added to the system
:param event: event object
:param quiet: boolean indicating if false then exceptions on event add should be swallowed to prevent blocking the caller. If false, exceptions are raised
:return:
"""
with session_scope() as session:
return add_event_json(event.to_dict(), session, quiet) | e2a20c21738c42dbed36d545b127131c13f98c59 | 3,652,299 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.