content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def train_on(text):
""" Return a dictionary whose keys are alle the tuple of len PREFIX
of consecutive words inside text, and whose value is the list of
every single word which follows that tuple inside the text. For ex:
{('Happy', 'birthday'): ['to', 'dear'] ...} """
words = text.split()
assert len(words) > PREFIX
training = defaultdict(list)
for i in range(0, len(words) - PREFIX):
duo = tuple(words[i:i + PREFIX])
following = words[i + PREFIX]
training[duo].append(following)
return training | 40230bbb346cb4c98d6694fb0d18652e7d6bd4e7 | 3,652,550 |
def learning_rate_decay(alpha, decay_rate, global_step, decay_step):
"""learning_rate_decay: updates the learning rate using
inverse time decay in numpy
Args:
alpha : is the original learning rate
decay_rate : is the weight used to determine the
rate at which alpha will decay
global_step : is the number of passes of gradient
descent that have elapsed
decay_step : is the number of passes of gradient descent
that should occur before alpha is decayed further
Returns:
the updated value for alpha
"""
alpha = alpha / (1 + decay_rate * int(global_step / decay_step))
return alpha | a98f893acc7f14dafcf2dea551df4eb44da07bc4 | 3,652,551 |
def update_studio(request):
"""updates the studio
"""
studio_id = request.params.get('studio_id')
studio = Studio.query.filter_by(id=studio_id).first()
name = request.params.get('name', None)
dwh = request.params.get('dwh', None)
wh_mon_start = get_time(request, 'mon_start')
wh_mon_end = get_time(request, 'mon_end')
wh_tue_start = get_time(request, 'tue_start')
wh_tue_end = get_time(request, 'tue_end')
wh_wed_start = get_time(request, 'wed_start')
wh_wed_end = get_time(request, 'wed_end')
wh_thu_start = get_time(request, 'thu_start')
wh_thu_end = get_time(request, 'thu_end')
wh_fri_start = get_time(request, 'fri_start')
wh_fri_end = get_time(request, 'fri_end')
wh_sat_start = get_time(request, 'sat_start')
wh_sat_end = get_time(request, 'sat_end')
wh_sun_start = get_time(request, 'sun_start')
wh_sun_end = get_time(request, 'sun_end')
if studio and name and dwh:
# update new studio
studio.name = name
studio.daily_working_hours = int(dwh)
wh = WorkingHours()
def set_wh_for_day(day, start, end):
if start != end:
wh[day] = [[start.seconds/60, end.seconds/60]]
else:
wh[day] = []
set_wh_for_day('mon', wh_mon_start, wh_mon_end)
set_wh_for_day('tue', wh_tue_start, wh_tue_end)
set_wh_for_day('wed', wh_wed_start, wh_wed_end)
set_wh_for_day('thu', wh_thu_start, wh_thu_end)
set_wh_for_day('fri', wh_fri_start, wh_fri_end)
set_wh_for_day('sat', wh_sat_start, wh_sat_end)
set_wh_for_day('sun', wh_sun_start, wh_sun_end)
studio.working_hours = wh
DBSession.add(studio)
# Commit will be handled by the zope transaction extension
return HTTPOk() | 2fbdcbd04bb0ec7d0b2f5790e59e9211c831066f | 3,652,552 |
def flip_coin(num_of_experiments = 1000, num_of_flips = 30):
"""
Flip the coin `num_of_flips` times and repeat this experiment `num_of_experiments` times. And
return the number of heads grouped together in all the experiments.
"""
all_heads = []
for i in range(num_of_experiments):
heads = tails = 0
for counter in range(num_of_flips):
num = np.random.randint(0,2)
if num == 0:
heads += 1
else:
tails += 1
all_heads.append(heads)
# group the number of heads in all the experiments
flip_heads = []
for flip in range(num_of_flips + 1):
num_of_heads = 0
for h in all_heads:
if h == flip:
num_of_heads += 1
flip_heads.append(num_of_heads)
return flip_heads | 24ccd52693233f93f5c0bb7bb4f09220e86f320c | 3,652,553 |
from pathlib import Path
def get_questions(
path: str,
uid2idx: dict = None,
path_data: Path = None,
) -> po.DataFrame:
"""
Identify correct answer text and filter out wrong distractors from question string
Get tokens and lemmas
Get explanation sentence ids and roles
"""
# Dropping questions without explanations hurts score
df = po.read_csv(path, sep="\t")
df = add_q_reformat(df)
# Preprocess texts
tokens, lemmas = preprocess_texts(df.q_reformat.tolist(), path_data)
df["tokens"], df["lemmas"], df["embedding"] = tokens, lemmas, None
# Get explanation uids and roles
exp_uids = []
exp_roles = []
exp_idxs = []
for exp_string in df.explanation.values:
_uids, _roles = extract_explanation(exp_string)
uids = []
roles = []
idxs = []
assert len(_uids) == len(_roles)
for i in range(len(_uids)):
if _uids[i] not in uid2idx:
continue
uids.append(_uids[i])
roles.append(_roles[i])
idxs.append(uid2idx[_uids[i]])
exp_uids.append(uids)
exp_roles.append(roles)
exp_idxs.append(idxs)
df["exp_uids"], df["exp_roles"], df[
"exp_idxs"] = exp_uids, exp_roles, exp_idxs
print(df.shape)
return df | 877c75f20b7b766655ecda5dc4bc63ada7ee593c | 3,652,554 |
def simple_command(device, cmd_id, data=None, receive=True):
"""
Raises:
HIDException -> if reading/writing to the USB device failed:
KBProtocolException -> if the packet is too large
"""
cmd_packet = bytearray(EP_VENDOR_SIZE)
cmd_packet[0] = cmd_id
# Optional data component
if data != None:
data = bytearray(data)
if len(data) > (EP_VENDOR_SIZE-1):
raise KBProtocolException("Data can't fit in one packet. Got {} "
"bytes, max is {}".format(len(data), EP_VENDOR_SIZE))
for i, byte in enumerate(data):
cmd_packet[i+1] = byte
device.write(cmd_packet)
if receive:
response = device.read()
packet_type = response[0]
while packet_type != cmd_id and packet_type != CMD_ERROR_CODE: # ignore other packets
response = device.read(timeout=2)
if response == None:
device.write(cmd_packet)
else:
packet_type = response[0]
if response[0] == CMD_ERROR_CODE:
raise_error_code(response[1])
elif response[0] != cmd_id:
raise KBProtocolException("Unexpected packet with packet_id: {}"
.format(response[0]))
return response[1:]
else:
return None | 57a5e237f2296fec1563c125cb934ce1914d8bac | 3,652,555 |
def dbopen(dbname, perm = 'r'):
"""Open a Datascope database"""
return Dbptr(dbname, perm) | 08a083def4f792927232eff5d625ae4e6f3355fb | 3,652,556 |
def to_nx(dsk):
"""
Code mainly identical to dask.dot.to_graphviz and kept compatible.
"""
collapse_outputs = False
verbose = False
data_attributes = {}
function_attributes = {}
g = nx.DiGraph()
seen = set()
connected = set()
for k, v in dsk.items():
k_name = name(k)
if istask(v):
func_name = name((k, "function")) if not collapse_outputs else k_name
if collapse_outputs or func_name not in seen:
seen.add(func_name)
attrs = function_attributes.get(k, {}).copy()
attrs.setdefault("label", key_split(k))
attrs.setdefault("shape", "circle")
g.add_node(func_name, **attrs)
if not collapse_outputs:
g.add_edge(func_name, k_name)
connected.add(func_name)
connected.add(k_name)
for dep in get_dependencies(dsk, k):
dep_name = name(dep)
if dep_name not in seen:
seen.add(dep_name)
attrs = data_attributes.get(dep, {}).copy()
attrs.setdefault("label", box_label(dep, verbose))
attrs.setdefault("shape", "box")
g.add_node(dep_name, **attrs)
g.add_edge(dep_name, func_name)
connected.add(dep_name)
connected.add(func_name)
elif ishashable(v) and v in dsk:
v_name = name(v)
g.add_edge(v_name, k_name)
connected.add(v_name)
connected.add(k_name)
if (not collapse_outputs or k_name in connected) and k_name not in seen:
seen.add(k_name)
attrs = data_attributes.get(k, {}).copy()
attrs.setdefault("label", box_label(k, verbose))
attrs.setdefault("shape", "box")
g.add_node(k_name, **attrs)
assert nx.dag.is_directed_acyclic_graph(g)
return g | 140b6a74ce7e75ddbc906bc4b4c7330e7585e0d8 | 3,652,557 |
def predict(model, img_base64):
"""
Returns the prediction for a given image.
Params:
model: the neural network (classifier).
"""
return model.predict_disease(img_base64) | 545a98dd682b81a1662878f91091615871562226 | 3,652,558 |
import hashlib
def get_hash(x: str):
"""Generate a hash from a string."""
h = hashlib.md5(x.encode())
return h.hexdigest() | 538c936c29867bb934776333fb2dcc73c06e23d0 | 3,652,559 |
def pair_force(r1, r2, par1, par2, sigma_c, box, r_cut, lj=True, coulomb=True):
"""Compute the sum of the Lennard Jones force and the short ranged part
of the Coulomb force between two particles.
Arguments:
r1 (ndarray): A one dimensional numpy-array with d elements (position of the first particle)
r2 (ndarray): A one dimensional numpy-array with d elements (position of the second particle)
par1 (ndarray): A one dimensional numpy-array with 4 elements (charge, epsillon, sigma, mass) for the first particle
par2 (ndarray): A one dimensional numpy-array with 4 elements (charge, epsillon, sigma, mass) for the second particle
sigma_c (float): A positive float (width of the gaussian distribution used to shield the particle)
box (ndarray): A one dimensional numpy-array with d elements (size of preriodic box)
r_cut (float): A positive float (cutoff radius)
lj (boolean): If True the Lannard Jones force is calculated
coulomb (boolean): If True the Coulomb force is calculated
Returns:
force * direction (ndarray): A one dimensional numpy-array with d elements (force acting on the first particle)
"""
dist = pbc(r1 - r2, box)
r12 = np.linalg.norm(dist)
force = 0
if r12 <= r_cut:
if lj:
epsilon = calc_eps(par1[1], par2[1])
sigma_lj = calc_sig(par2[2], par2[2])
rs = sigma_lj / r12
force += 24 * epsilon / r12 * (2 * rs**12 - rs**6)
if coulomb:
q1 = par1[0]
q2 = par2[0]
f1 = erfc(r12 / (np.sqrt(2) * sigma_c)) / r12
f2 = np.sqrt(2 / np.pi) / sigma_c * np.exp(- r12**2 / (2 * sigma_c**2))
force += q1 * q2 / (4 * np.pi * eps * r12) * (f1 + f2)
direction = dist / r12
return force * direction | 10c6eee7547f94c06e650a0a738aace3380de454 | 3,652,560 |
def delete_network_acl_entry(client, network_acl_id, num=100, egress=False, dry=True):
"""
Delete a network acl entry
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ec2.html#EC2.Client.delete_network_acl_entry
"""
try:
response = client.delete_network_acl_entry( Egress=egress, NetworkAclId=network_acl_id, RuleNumber=num, DryRun=dry)
print('Deleted %s %s' % (network_acl_id, ('(dry)' if dry else '')))
return response
except Exception as err:
handle(err) | e27e476f2fe37e7e0150a97ebd4b5e3cb93e86b1 | 3,652,561 |
import time
def mp_run(data, process_num, func, *args):
""" run func with multi process
"""
level_start = time.time()
partn = max(len(data) / process_num, 1)
start = 0
p_idx = 0
ps = []
while start < len(data):
local_data = data[start:start + partn]
start += partn
p = mp.Process(target=func, args=(local_data, p_idx) + args)
ps.append(p)
p.start()
p_idx += 1
for p in ps:
p.join()
for p in ps:
p.terminate()
return p_idx | 13576bb107eae5a49063bcba3d698eeb957dbb1e | 3,652,562 |
def spell(corpus):
"""
Train a Spelling Normalizer
Parameters
----------
corpus : list of strings. Prefer to feed with malaya.load_malay_dictionary().
Returns
-------
SPELL_NORMALIZE: Trained malaya.normalizer._SPELL_NORMALIZE class
"""
if not isinstance(corpus, list):
raise ValueError('corpus must be a list')
if not isinstance(corpus[0], str):
raise ValueError('corpus must be list of strings')
return _SPELL_NORMALIZE([unidecode(w) for w in corpus]) | 1aee5a941e1553f50540a5327ee0e3c4d1ce0bd3 | 3,652,563 |
def client_id_to_org_type_id(client_id):
"""
Client ID should be a string: "g:" + self._options['org'] + ":" +
self._options['type'] + ":" + self._options['id'],
"""
split = client_id.split(':')
if len(split) != 4:
raise InvalidClientId()
org = split[1]
device_type = split[2]
device_id = split[3]
return (org, device_type, device_id) | 475058962f81760dc65b19ddbdc1d74e0ec2f55e | 3,652,565 |
def get_total_implements():
"""Obtiene los implementos totales solicitados en prestamos."""
total_implements = 0
for i in Loans.objects.all():
total_implements += i.ammount_implements
return total_implements | 5b8e2b21f8c31e33c60518fd4fba20eded614f05 | 3,652,566 |
from typing import Optional
from typing import Union
def _parse_maybe_array(
type_name: str, innermost_type: Optional[Union[ast_nodes.ValueType,
ast_nodes.PointerType]]
) -> Union[ast_nodes.ValueType, ast_nodes.PointerType, ast_nodes.ArrayType]:
"""Internal-only helper that parses a type that may be an array type."""
array_match = ARRAY_EXTENTS_PATTERN.search(type_name)
if array_match:
extents = tuple(
int(s.strip()) for s in ARRAY_N_PATTERN.findall(array_match.group(0)))
inner_type_str = type_name[:array_match.start()]
return ast_nodes.ArrayType(
inner_type=_parse_maybe_pointer(inner_type_str.strip(), innermost_type),
extents=extents)
else:
return _parse_maybe_pointer(type_name, innermost_type) | 8a284083e604688c2a1eff8767b6cb31b493cb07 | 3,652,567 |
def ema_decay_schedule(
base_rate: jnp.ndarray,
step: jnp.ndarray,
total_steps: jnp.ndarray,
use_schedule: bool,
) -> jnp.ndarray:
"""Anneals decay rate to 1 with cosine schedule."""
if not use_schedule:
return base_rate
multiplier = _cosine_decay(step, total_steps, 1.)
return 1. - (1. - base_rate) * multiplier | a6269162e1a93544031b241ff43e043971bec488 | 3,652,568 |
from typing import Callable
def _kill_filter(mm: MergedMiningCoordinator, filter_fn: Callable[[MergedMiningStratumProtocol], bool]) -> int:
""" Kill all workers that the filter `fltr` returns true for.
"""
count = 0
for protocol in filter(filter_fn, mm.miner_protocols.values()):
count += 1
protocol.transport.abort()
return count | 8a73427e46a418bf1d3ba974f73992dce0f1ad8c | 3,652,569 |
def get_node_layer_sort_preference(device_role):
"""Layer priority selection function
Layer sort preference is designed as numeric value.
This function identifies it by LAYERS_SORT_ORDER
object position by default. With numeric values,
the logic may be improved without changes on NeXt app side.
0(null) results undefined layer position in NeXt UI.
Valid indexes start with 1.
"""
for i, role in enumerate(LAYERS_SORT_ORDER, start=1):
if device_role == role:
return i
return 1 | 08fbdbcb272664498d3709ffc9f49dbb2042fef2 | 3,652,570 |
def is_anagram(s,t):
"""True if strings s and t are anagrams.
"""
# We can use sorted() on a string, which will give a list of characters
# == will then compare two lists of characters, now sorted.
return sorted(s)==sorted(t) | 2b615f8180bcaa598e24c0772893c9a528bc5153 | 3,652,571 |
def f1_score(labels, predict, name=None):
"""
Streaming f1 score.
"""
predictions = tf.floor(predict + 0.5)
with tf.variable_scope(name, 'f1', (labels, predictions)):
epsilon = 1e-7
_, tp = tf.metrics.true_positives(labels, predictions)
_, fn = tf.metrics.false_negatives(labels, predictions)
_, fp = tf.metrics.false_positives(labels, predictions)
precision = tf.div(tp, epsilon + tp + fp, name='precision')
recall = tf.div(tp, epsilon + tp + fn, name='recall')
f1 = 2.0 * precision * recall / (precision + recall + epsilon)
return f1 | 243612cad4ca1a876ccfbccfe55fdeeed893d644 | 3,652,572 |
import requests
def test_notify_matrix_plugin_fetch(mock_post, mock_get):
"""
API: NotifyMatrix() Server Fetch/API Tests
"""
# Disable Throttling to speed testing
plugins.NotifyBase.request_rate_per_sec = 0
response_obj = {
'room_id': '!abc123:localhost',
'room_alias': '#abc123:localhost',
'joined_rooms': ['!abc123:localhost', '!def456:localhost'],
# Login details
'access_token': 'abcd1234',
'user_id': '@apprise:localhost',
'home_server': 'localhost',
}
def fetch_failed(url, *args, **kwargs):
# Default configuration
request = mock.Mock()
request.status_code = requests.codes.ok
request.content = dumps(response_obj)
if url.find('/rooms/') > -1:
# over-ride on room query
request.status_code = 403
request.content = dumps({
u'errcode': u'M_UNKNOWN',
u'error': u'Internal server error',
})
return request
mock_get.side_effect = fetch_failed
mock_post.side_effect = fetch_failed
obj = plugins.NotifyMatrix(
user='user', password='passwd', include_image=True)
assert isinstance(obj, plugins.NotifyMatrix) is True
# We would hve failed to send our image notification
assert obj.send(user='test', password='passwd', body="test") is False
# Do the same query with no images to fetch
asset = AppriseAsset(image_path_mask=False, image_url_mask=False)
obj = plugins.NotifyMatrix(user='user', password='passwd', asset=asset)
assert isinstance(obj, plugins.NotifyMatrix) is True
# We would hve failed to send our notification
assert obj.send(user='test', password='passwd', body="test") is False
# Disable Throttling to speed testing
plugins.NotifyBase.request_rate_per_sec = 0
response_obj = {
# Registration
'access_token': 'abcd1234',
'user_id': '@apprise:localhost',
'home_server': 'localhost',
# For room joining
'room_id': '!abc123:localhost',
}
# Default configuration
mock_get.side_effect = None
mock_post.side_effect = None
request = mock.Mock()
request.status_code = requests.codes.ok
request.content = dumps(response_obj)
mock_post.return_value = request
mock_get.return_value = request
obj = plugins.NotifyMatrix(include_image=True)
assert isinstance(obj, plugins.NotifyMatrix) is True
assert obj.access_token is None
assert obj._register() is True
assert obj.access_token is not None
# Cause retries
request.status_code = 429
request.content = dumps({
'retry_after_ms': 1,
})
code, response = obj._fetch('/retry/apprise/unit/test')
assert code is False
request.content = dumps({
'error': {
'retry_after_ms': 1,
}
})
code, response = obj._fetch('/retry/apprise/unit/test')
assert code is False
request.content = dumps({
'error': {}
})
code, response = obj._fetch('/retry/apprise/unit/test')
assert code is False | 27dde8766cdfd136104e647a5a97416a69982cb5 | 3,652,573 |
import copy
def site_summary_data(query, notime=True, extra="(1=1)"):
"""
Summary of jobs in different states for errors page to indicate if the errors caused by massive site failures or not
"""
summary = []
summaryResources = []
# remove jobstatus from the query
if 'jobstatus__in' in query:
del query['jobstatus__in']
# remove the time window limit for active jobs table
querynotime = copy.deepcopy(query)
if notime:
if 'modificationtime__castdate__range' in querynotime:
del querynotime['modificationtime__castdate__range']
ejquery = {'jobstatus__in': ['failed', 'finished', 'closed', 'cancelled']}
jvalues = ('cloud', 'computingsite', 'jobstatus', 'resourcetype', 'corecount')
orderby = ('cloud', 'computingsite', 'jobstatus')
summaryResources.extend(
Jobsactive4.objects.filter(**querynotime).exclude(**ejquery).values(*jvalues).extra(where=[extra]).annotate(Count('jobstatus')).order_by(*orderby))
summaryResources.extend(
Jobsactive4.objects.filter(**query).filter(**ejquery).values(*jvalues).extra(where=[extra]).annotate(Count('jobstatus')).order_by(*orderby))
summaryResources.extend(
Jobsdefined4.objects.filter(**querynotime).values(*jvalues).extra(where=[extra]).annotate(Count('jobstatus')).order_by(*orderby))
summaryResources.extend(
Jobswaiting4.objects.filter(**querynotime).values(*jvalues).extra(where=[extra]).annotate(Count('jobstatus')).order_by(*orderby))
summaryResources.extend(
Jobsarchived4.objects.filter(**query).values(*jvalues).extra(where=[extra]).annotate(Count('jobstatus')).order_by(*orderby))
summaryResourcesDict = {}
actualcorecount = 0
for sumS in summaryResources:
if sumS['corecount'] is None:
actualcorecount = 1
else:
actualcorecount = sumS['corecount']
if sumS['cloud'] not in summaryResourcesDict:
summaryResourcesDict[sumS['cloud']] = {}
if sumS['computingsite'] not in summaryResourcesDict[sumS['cloud']]:
summaryResourcesDict[sumS['cloud']][sumS['computingsite']] = {}
if sumS['jobstatus'] not in summaryResourcesDict[sumS['cloud']][sumS['computingsite']]:
summaryResourcesDict[sumS['cloud']][sumS['computingsite']][sumS['jobstatus']] = {}
if sumS['resourcetype'] not in summaryResourcesDict[sumS['cloud']][sumS['computingsite']][sumS['jobstatus']]:
summaryResourcesDict[sumS['cloud']][sumS['computingsite']][sumS['jobstatus']][sumS['resourcetype']] = {
'jobstatus__count': 0,
'corecount': actualcorecount
}
summaryResourcesDict[sumS['cloud']][sumS['computingsite']][sumS['jobstatus']][sumS['resourcetype']]['jobstatus__count'] += sumS['jobstatus__count']
summaryList = []
obj = {}
for cloud in summaryResourcesDict.keys():
for site in summaryResourcesDict[cloud].keys():
for jobstatus in summaryResourcesDict[cloud][site].keys():
jobscount =0
obj['resource'] = {}
for i, resource in enumerate(summaryResourcesDict[cloud][site][jobstatus]):
if resource not in obj['resource']:
obj['resource'][resource] = {}
obj['resource'][resource]['jobstatus__count'] = {}
if resource not in obj['resource']:
obj['resource'][resource] = {}
obj['resource'][resource]['corecount'] = {}
obj['resource'][resource]['jobstatus__count'] = summaryResourcesDict[cloud][site][jobstatus][resource]['jobstatus__count']
obj['resource'][resource]['corecount'] = summaryResourcesDict[cloud][site][jobstatus][resource]['corecount']
jobscount += summaryResourcesDict[cloud][site][jobstatus][resource]['jobstatus__count']
if i == len(summaryResourcesDict[cloud][site][jobstatus]) - 1:
obj['cloud'] = cloud
obj['computingsite'] = site
obj['jobstatus'] = jobstatus
obj['jobstatus__count'] = jobscount
summaryList.append(obj)
obj = {}
return summaryList | 010ca33e4de15c74199fbf54c565119f493698cc | 3,652,574 |
def Epsilon(u):
"""Vector symmetric gradient."""
return Sym(Grad(u.transpose())) | ed1d163ca031ada0d1645029690fa53c3d2acfa0 | 3,652,575 |
def at(seq, msg, cmd=None, *args, **kwargs):
"""Output the comwdg"""
return translator(seq)(*COMWDG_CMD)() | dd98234261731c3048444ab7d99ec6ed34eb62f1 | 3,652,576 |
def get_directory(f):
"""Get a directory in the form of a list of entries."""
entries = []
while 1:
line = f.readline()
if not line:
print '(Unexpected EOF from server)'
break
if line[-2:] == CRLF:
line = line[:-2]
elif line[-1:] in CRLF:
line = line[:-1]
if line == '.':
break
if not line:
print '(Empty line from server)'
continue
gtype = line[0]
parts = line[1:].split(TAB)
if len(parts) < 4:
print '(Bad line from server: %r)' % (line,)
continue
if len(parts) > 4:
if parts[4:] != ['+']:
print '(Extra info from server:',
print parts[4:], ')'
else:
parts.append('')
parts.insert(0, gtype)
entries.append(parts)
return entries | fdd83e040f23f5ab84e0eb7cef457dfd66159f78 | 3,652,577 |
def get_worker_status(worker):
"""Retrieve worker status by worker ID from redis."""
set_redis_worker_status_pool()
global WORKER_STATUS_POOL
# retrieve worker status
r = StrictRedis(connection_pool=WORKER_STATUS_POOL)
res = r.get(WORKER_STATUS_KEY_TMPL % worker)
return res.decode() if hasattr(res, "decode") else res | 886817f7995bc8259891b10699ec4d26587e0653 | 3,652,579 |
def lif_r_psc_aibs_converter(config, syn_tau=[5.5, 8.5, 2.8, 5.8]):
"""Creates a nest glif_lif_r_psc object"""
coeffs = config['coeffs']
threshold_params = config['threshold_dynamics_method']['params']
reset_params = config['voltage_reset_method']['params']
params = {'V_th': coeffs['th_inf'] * config['th_inf'] * 1.0e03 + config['El_reference'] * 1.0e03,
'g': coeffs['G'] / config['R_input'] * 1.0e09,
'E_L': config['El'] * 1.0e03 + config['El_reference'] * 1.0e03,
'C_m': coeffs['C'] * config['C'] * 1.0e12,
't_ref': config['spike_cut_length'] * config['dt'] * 1.0e03,
'a_spike': threshold_params['a_spike'] * 1.0e03,
'b_spike': threshold_params['b_spike'] * 1.0e-03,
'a_reset': reset_params['a'],
'b_reset': reset_params['b'] * 1.0e03,
'tau_syn': syn_tau, # in ms
'V_dynamics_method': 'linear_exact'}
return params | 091e45f44f9c777dac6c2b35fd51459a7947e301 | 3,652,580 |
def get_bigwig_values(bigwig_path, chrom_name, chrom_end, chrom_start=0):
"""
Get the values for a genomic region of interest from a bigwig file.
:param bigwig_path: Path to the bigwig file
:param chrom_name: Chromosome name
:param chrom_end: chromosome end
:param chrom_start: chromosome start
:return: Bigwig values from the region given
"""
with pyBigWig.open(bigwig_path) as input_bw:
return np.nan_to_num(input_bw.values(chrom_name, chrom_start, chrom_end, numpy=True)) | 37fe5a40a5fde1ccaee7cac32d8b9beb68a65c51 | 3,652,581 |
def get_successors(state, maxwords):
"""Traverses state graph to find valid anagrams."""
terminal = len(state['chars']) == 0
# Check whether the state is invalid and should be pruned
if not is_valid(state['anagram'], terminal, maxwords):
return []
# If valid terminal state, stop search and return
if terminal:
return [state['anagram']]
# Continue to recursively explore subsequent states
next_states = []
for c in state['chars']:
chars = state['chars'].copy()
chars.subtract({c: 1})
if chars[c] == 0:
del chars[c]
next_states.append({
'anagram': state['anagram'] + c,
'chars': chars,
})
# Add an additional next state for word breaks
if state['anagram'] != '' and state['anagram'][-1] != ' ':
next_states.append({
'anagram': state['anagram'] + ' ',
'chars': state['chars'],
})
anagrams = []
for next_state in next_states:
anagrams += get_successors(next_state, maxwords=maxwords)
return anagrams | 9c842edc378a781195ef41ed58c7952f216b642e | 3,652,583 |
def read_and_parse_cdl_file(file_name):
"""
Reads relevant information from a "cdl" file
"""
if file_name is None:
return None
wl_map = {}
bl_map = {}
colclk_wl_map = {}
# Parse line-by-line
with open(file_name, "r") as fp:
for line in fp:
line = line.strip()
if not line:
continue
if line.startswith("#"):
continue
fields = split_cdl_line(line)
if not fields:
continue
# Row definition
if fields[0] == "define_row":
wl_idx = fields.index("-WL_range")
row = 0
for pair in fields[wl_idx+1]:
if isinstance(pair, list) and len(pair) == 2:
wl_map[row] = (int(pair[0]), int(pair[1]),)
row += 1
# Clock column definition
elif fields[0] == "define_colclk_instances":
wl_idx = fields.index("-WL_Port")
row_idx = fields.index("-row")
wl = int(fields[wl_idx+1])
row = int(fields[row_idx+1])
colclk_wl_map[row] = (wl, wl,)
# Column definition
elif fields[0] == "define_column":
bl_idx = fields.index("-BL_range")
col = 0
for pair in fields[bl_idx+1]:
if isinstance(pair, list) and len(pair) == 2:
bl_map[col] = (int(pair[0]), int(pair[1]),)
col += 1
data = {
"colclk_wl_map": colclk_wl_map,
"wl_map": wl_map,
"bl_map": bl_map,
}
return data | e1bfbb75f473932861bb2e804dd0609c62544cf3 | 3,652,584 |
def detect_outlier_at_index(
srs: pd.Series,
idx: int,
n_samples: int,
z_score_threshold: float,
) -> bool:
"""
Check if a value at index `idx` in a series is an outlier.
The passed series is supposed to be ordered by increasing timestamps.
This function
- detects z-score window index boundaries with respeect to index order and number of samples
- computes the z-score of the current element with respect to the z-score window values
- compares the z-score to the threshold to declare the current element an outlier
:param srs: input series
:param idx: numerical index of a value to check
:param n_samples: number of samples in z-score window
:param z_score_threshold: threshold to mark a value as an outlier based on
its z-score in the window
:return: whether the element at index idx is an outlier
"""
# Set z-score window boundaries.
window_first_index = max(0, idx - n_samples)
# Get a series window to compute z-score for.
window_srs = srs.iloc[window_first_index : idx + 1]
# Compute z-score of a value at index.
z_score = (srs.iloc[idx] - window_srs.mean()) / window_srs.std()
# Return if a value at index is an outlier.
# Done via `<=` since a series can contain None values that should be detected
# as well but will result to NaN if compared to the threshold directly.
is_outlier = not (abs(z_score) <= z_score_threshold)
return is_outlier | 65a4d7e661f6cf4641d9cd82d1bb31c5e2d21616 | 3,652,585 |
def _organize_parameter(parameter):
"""
Convert operation parameter message to its dict format.
Args:
parameter (OperationParameter): Operation parameter message.
Returns:
dict, operation parameter.
"""
parameter_result = dict()
parameter_keys = [
'mapStr',
'mapBool',
'mapInt',
'mapDouble',
]
for parameter_key in parameter_keys:
base_attr = getattr(parameter, parameter_key)
parameter_value = dict(base_attr)
# convert str 'None' to None
for key, value in parameter_value.items():
if value == 'None':
parameter_value[key] = None
parameter_result.update(parameter_value)
# drop `mapStrList` and `strValue` keys in result parameter
str_list_para = dict(getattr(parameter, 'mapStrList'))
result_str_list_para = dict()
for key, value in str_list_para.items():
str_list_para_list = list()
for str_ele in getattr(value, 'strValue'):
str_list_para_list.append(str_ele)
str_list_para_list = list(map(lambda x: None if x == '' else x, str_list_para_list))
result_str_list_para[key] = str_list_para_list
parameter_result.update(result_str_list_para)
return parameter_result | 8cbd7c863bb244e71266a573ba756647d0ba13ea | 3,652,586 |
def colorpicker(request):
"""
Controller for the app home page.
"""
my_param = MyParamColor()
context = get_context(request, my_param)
return render(request, 'tethys_django_form_tutorial/colorpicker.html', context) | 071f587683a24c101a7963a3934c989570c0fa66 | 3,652,587 |
def translate_date(default=defaults.get('language')):
"""Parse/translate a date."""
d = request.args.get('date')
if not d:
raise RuntimeError(_('Date is mandatory.'))
dest_lang = request.args.get('dest') if request.args.get('dest') else default
variation = request.args.get('variation') if request.args.get('variation') else 'short'
d_list = d.split('/')
if request.args.get('src') == 'es':
d = date(year=int(d_list[2]), month=int(d_list[1]), day=int(d_list[0]))
else:
d = date(*d_list)
return render_template_string(source=get_date(d=d, f=variation, l=dest_lang)) | ada6f4416e227414dfc6f32fc3387c8b38830e70 | 3,652,588 |
from typing import Any
from typing import Union
from typing import Optional
def check_call(
*command: Any,
working_directory: Union[PathLike, str] = ".",
verbose: bool = False,
quoted: bool = False,
**kwargs: Any,
) -> Optional[str]:
"""Proxy for subprocess.check_call"""
return check_run(
*command, working_directory=working_directory, verbose=verbose, quoted=quoted, **kwargs
) | 384cd78599355e694445a7c682613672bba374a1 | 3,652,589 |
from typing import Tuple
def fit_client(client: Client, weights: Weights) -> Tuple[Weights, int]:
"""Refine weights on a single client."""
return client.fit(weights) | db8e6003f452a5147274ac6e83df7d216ca46c91 | 3,652,590 |
def _find_rpms_in_packages(koji_api, name_list, major_minor):
"""
Given a list of package names, look up the RPMs that are built in them.
Of course, this is an inexact science to do generically; contents can
vary from build to build, and multiple packages could build the same RPM name.
We will first look for the latest build in the tags for the given
major_minor version. If not there, we will look in brew for the package
name and choose the latest build.
:koji_api: existing brew connection
:name_list: list of package names to search for
:major_minor: minor version of OCP to search for builds in
Returns: a map of package_name: set(rpm_names)
"""
rpms_for_package = {}
tags = _tags_for_version(major_minor)
for package in name_list:
for tag in tags:
for build in koji_api.getLatestBuilds(tag=tag, package=package):
rpm_list = set(rpm["name"] for rpm in koji_api.listBuildRPMs(build["build_id"]))
rpms_for_package.setdefault(package, set()).update(rpm_list)
if package not in rpms_for_package:
# it wasn't in our tags; look for it by name
pkg_info = koji_api.getPackage(package)
if not pkg_info:
continue
latest_builds = koji_api.listBuilds(packageID=pkg_info["id"], state=1, queryOpts=dict(limit=1))
if not latest_builds:
continue
rpm_list = set(rpm["name"] for rpm in koji_api.listBuildRPMs(latest_builds[0]["build_id"]))
rpms_for_package[package] = set(rpm_list)
return rpms_for_package | edfb55f0b6997d8f930c8d93c2ee1be1c111bcfc | 3,652,591 |
def calculate_algorithm_tags(analyses):
"""
Calculate the algorithm tags (eg. "ip", True) that should be applied to a sample document based on a list of its
associated analyses.
:param analyses: the analyses to calculate tags for
:type analyses: list
:return: algorithm tags to apply to the sample document
:rtype: dict
"""
pathoscope = False
nuvs = False
for analysis in analyses:
if pathoscope is not True and analysis["algorithm"] in PATHOSCOPE_TASK_NAMES:
pathoscope = analysis["ready"] or "ip" or pathoscope
if nuvs is not True and analysis["algorithm"] == "nuvs":
nuvs = analysis["ready"] or "ip" or nuvs
if pathoscope is True and nuvs is True:
break
return {
"pathoscope": pathoscope,
"nuvs": nuvs
} | b2b13e3a0ccd21f446c5406baa966b2c0c4c6be9 | 3,652,592 |
import json
def open_json(filepath):
"""
Returns open .json file in python as a list.
:param: .json file path
:returns: list
:rvalue: str
"""
with open(filepath) as f:
notes = json.load(f)
return notes | a7cae15880ee1caaaf7bfa8c1aec98f5f83debe7 | 3,652,593 |
import json
def remove_user_list():
"""
Endpoint to remove a specific list or a complete user
---
tags:
- User Methods
parameters:
- name: user
type: string
in: query
required: true
description: user you want to query
- name: list
type: string
in: query
required: false
description: specific list that belong to a user
responses:
400:
description: Incorrect dbs used
200:
description: Your list was deleted
"""
to_remove_user = request.args.get('user')
if to_remove_user is not None:
validation = data_validator.validate_json_for_user(to_remove_user)
to_remove_list = request.args.get('list', default=None)
if to_remove_list is not None:
data_validator.validate_json_for_list(to_remove_list)
return_object = logic_for_users_controller_delete.worker_for_delete(to_remove_user, to_remove_list)
if return_object['status_code'] is 200:
res = json.dumps(return_object['body'], indent=4)
return res, 200
else:
res = json.dumps(return_object['body'].__dict__, indent=4)
return res, return_object['status_code'] | b53660edd56fcf5bbe061331d5f2b8756f621dd8 | 3,652,594 |
import requests
def upload(f, content_type, token, api_key):
"""Upload a file with the given content type to Climate
This example supports files up to 5 MiB (5,242,880 bytes).
Returns The upload id if the upload is successful, False otherwise.
"""
uri = '{}/v4/uploads'.format(api_uri)
headers = {
'authorization': bearer_token(token),
'x-api-key': api_key
}
md5 = file.md5(f)
length = file.length(f)
data = {
'md5': md5,
'length': length,
'contentType': content_type
}
# initiate upload
res = requests.post(uri, headers=headers, json=data)
Logger().info(to_curl(res.request))
if res.status_code == 201:
upload_id = res.json()
Logger().info("Upload Id: %s" % upload_id)
put_uri = '{}/{}'.format(uri, upload_id)
# for this example, size is assumed to be small enough for a
# single upload (less than or equal to 5 MiB)
headers['content-range'] = 'bytes {}-{}/{}'.format(0,
(length - 1),
length)
headers['content-type'] = binary_content_type
f.seek(0)
# send image
for position in range(0, length, CHUNK_SIZE):
buf = f.read(CHUNK_SIZE)
headers['content-range'] = 'bytes {}-{}/{}'.format(
position, position + len(buf) - 1, length)
try:
res = requests.put(put_uri, headers=headers, data=buf)
Logger().info(headers)
except Exception as e:
Logger().error("Exception: %s" % e)
if res.status_code == 204:
return upload_id
return False | d6ead1f029811ec5894848b71841fd008068cee0 | 3,652,595 |
def get_node_name_centres(nodeset: Nodeset, coordinates_field: Field, name_field: Field):
"""
Find mean locations of node coordinate with the same names.
:param nodeset: Zinc Nodeset or NodesetGroup to search.
:param coordinates_field: The coordinate field to evaluate.
:param name_field: The name field to match.
:return: Dict of names -> coordinates.
"""
components_count = coordinates_field.getNumberOfComponents()
fieldmodule = nodeset.getFieldmodule()
fieldcache = fieldmodule.createFieldcache()
name_records = {} # name -> (coordinates, count)
nodeiter = nodeset.createNodeiterator()
node = nodeiter.next()
while node.isValid():
fieldcache.setNode(node)
name = name_field.evaluateString(fieldcache)
coordinates_result, coordinates = coordinates_field.evaluateReal(fieldcache, components_count)
if name and (coordinates_result == RESULT_OK):
name_record = name_records.get(name)
if name_record:
name_centre = name_record[0]
for c in range(components_count):
name_centre[c] += coordinates[c]
name_record[1] += 1
else:
name_records[name] = [ coordinates, 1 ]
node = nodeiter.next()
# divide centre coordinates by count
name_centres = {}
for name in name_records:
name_record = name_records[name]
name_count = name_record[1]
name_centre = name_record[0]
if name_count > 1:
scale = 1.0/name_count
for c in range(components_count):
name_centre[c] *= scale
name_centres[name] = name_centre
return name_centres | 2dc1e670999d9491e52efce02e5d7ecd22b75226 | 3,652,596 |
from enum import Enum
def pred(a):
"""
pred :: a -> a
the predecessor of a value. For numeric types, pred subtracts 1.
"""
return Enum[a].pred(a) | 070bf20e7b7ecd694806e78bd705e872b2fd8464 | 3,652,597 |
def pascal_to_snake(pascal_string):
"""Return a snake_string for a given PascalString."""
camel_string = _pascal_to_camel(pascal_string)
snake_string = _camel_to_snake(camel_string)
return "".join(snake_string) | 69c54fd8600878af2a8d168659a781b8389419ce | 3,652,599 |
def collect_targets_from_attrs(rule_attrs, attrs):
"""Returns a list of targets from the given attributes."""
result = []
for attr_name in attrs:
_collect_target_from_attr(rule_attrs, attr_name, result)
return [target for target in result if is_valid_aspect_target(target)] | 6be1731049f6970004763f5e9ec7d0a3bde76189 | 3,652,600 |
from typing import Tuple
def extract_codes(text: str) -> Tuple[str, ...]:
"""Extract names of warnings from full warning text."""
match = CODES_PAT.search(text)
if not match:
raise ValueError("No warning code found")
return tuple(match.group(1).split(",")) | 6727049c195197ed2407f30093c362a2c6f35cd4 | 3,652,601 |
def task_list(request, pk):
"""
View to get task list based on user list for forms
"""
user_model = User.objects.filter(is_staff=False)
task_model = Task.objects.filter(user=pk)
user_detail = User.objects.get(pk=pk)
query = request.GET.get('q')
if query:
task_model = task_model.filter(
Q(title__icontains=query)
)
return render(request, 'home.html',
{"user_model": user_model, 'task_model': task_model, 'user_detail': user_detail}) | dbb6545ca66a367b2b3e89a494ac8a9bbdbbb341 | 3,652,602 |
import json
def load_credentials():
"""
load_credentials
:return: dict
"""
with open("credentials.json", "r", encoding="UTF-8") as stream:
content = json.loads(stream.read())
return content | 2f08fc4e897a7c7eb91de804158ee67cd91635d0 | 3,652,603 |
def get_utm_string_from_sr(spatialreference):
"""
return utm zone string from spatial reference instance
"""
zone_number = spatialreference.GetUTMZone()
if zone_number > 0:
return str(zone_number) + 'N'
elif zone_number < 0:
return str(abs(zone_number)) + 'S'
else:
return str(zone_number) | 50f01758f7ee29f1b994d36cda34b6b36157fd9e | 3,652,604 |
def messages_count(name):
"""
Get message count for queue
curl -X GET -H 'Accept: application/json' http://localhost:8080/queues/C13470112/msgs/count
curl -X GET -H 'Accept: application/json' 83.212.127.232:8080/queues/C13470112/msgs/count
"""
conn = get_conn()
queue = conn.get_queue(name)
count = queue.count()
resp = "Queue "+name+" has "+str(count)+" messages\n"
return Response(response=resp, mimetype="application/json") | 86abcbc6a9bb81f0ce8a6a19941761c042f5a7e9 | 3,652,605 |
def return_intersect(cameraList):
"""
Calculates the intersection of the Camera objects in the *cameraList*.
Function returns an empty Camera if there exists no intersection.
Parameters:
cameraList : *list* of *camera.Camera* objects
A list of cameras from the camera.Camera class, each containing
a *poly* and a *coordsList*.
Returns:
intersectCam : *camera.Camera* object
An object from the camera.Camera class that is the
intersection between all cameras in the cameraList. If there
exists no intersection between any cameras in the camerList,
an empty Camera will be returned.
"""
intersectCam = None
for camera in cameraList:
if intersectCam is None: # Initiates the intersectCam variable
intersectCam = camera
else:
intersectCam = intersectCam.intersect(camera)
return intersectCam | a47613b8d79c4a4535cd5e7e07aa3b26dea019a5 | 3,652,606 |
def import_measurements(task, subject, gsrn, session):
"""
Imports measurements for a single MeteringPoint, and starts a
start_submit_measurement_pipeline() pipeline for each of the newly
imported measurements.
:param celery.Task task:
:param str subject:
:param str gsrn:
:param sqlalchemy.orm.Session session:
"""
__log_extra = {
'gsrn': gsrn,
'subject': subject,
'pipeline': 'import_measurements',
'task': 'import_measurements',
}
@atomic
def __import_measurements(session):
"""
Import and save to DB as an atomic operation
"""
return importer.import_measurements_for(meteringpoint, session)
# Load MeteringPoint from DB
try:
meteringpoint = MeteringPointQuery(session) \
.is_active() \
.has_gsrn(gsrn) \
.one()
except orm.exc.NoResultFound:
raise
except Exception as e:
raise task.retry(exc=e)
# Import measurements into DB
try:
measurements = __import_measurements()
except Exception as e:
logger.exception('Failed to import measurements from ElOverblik, retrying...', extra=__log_extra)
raise task.retry(exc=e)
# Submit each measurement to ledger in parallel
for measurement in measurements:
task = build_submit_measurement_pipeline(
measurement, meteringpoint, session)
task.apply_async()
# if measurements:
# tasks = [
# build_submit_measurement_pipeline(measurement, meteringpoint, session)
# for measurement in measurements
# ]
#
# group(*tasks).apply_async() | 6f0fc4aec546c5cf7b23bf2471ac625639e9dbbb | 3,652,609 |
import pandas
from typing import List
def add_agg_series_to_df(
df: pandas.DataFrame, grouped_levels: List[str], bottom_levels: List[str]
) -> pandas.DataFrame:
"""
Add aggregate series columns to wide dataframe.
Parameters
----------
df : pandas.DataFrame
Wide dataframe containing bottom level series.
grouped_levels : List[str]
Grouped level, underscore delimited, column names.
bottom_levels : List[str]
Bottom level, underscore delimited, column names.
Returns
-------
pandas.DataFrame
Wide dataframe with all series in hierarchy.
"""
component_cols = _get_bl(grouped_levels, bottom_levels)
# Add series as specified grouping levels
for i, cols in enumerate(component_cols):
df[grouped_levels[i]] = df[cols].sum(axis=1)
return df | 7c3b7b526c394c8a24bf754365dbc809476b7336 | 3,652,610 |
def conv_relu_pool_forward(x, w, b, conv_param, pool_param):
"""
Convenience layer that performs a convolution, a ReLU, and a pool.
Inputs:
- x: Input to the convolutional layer
- w, b, conv_param: Weights and parameters for the convolutional layer
- pool_param: Parameters for the pooling layer
Returns a tuple of:
- out: Output from the pooling layer
- cache: Object to give to the backward pass
"""
convOut, conv_cache = layers.conv_forward(x, w, b, conv_param)
reluOut, relu_cache = layers.relu_forward(convOut)
out, pool_cache = layers.max_pool_forward(reluOut, pool_param)
cache = (conv_cache, relu_cache, pool_cache)
return out, cache | d9a32950d1b56b4843938b339c7233e7fc87c5cc | 3,652,611 |
def avg_pixelwise_var(images_seen: np.int16):
"""
Computes the variance for every pixel p across all images, resulting in a matrix holding
the variance for eack pixel p, then calculates the average of that variance across all
pixels. This allows us to compensate for different fov sizes.
Note: images are normalized to [-1,1] before calculations
Params
------
images_seen
A numpy matrix holding numpy versions of all of our images
Returns
-------
The aaverage pixelwise variation across all images, as a float
"""
# Computes the variance
images = (images_seen.astype(np.float32) - 127.5) / 127.5 # Normalize to [-1,1]
variance_matrix = np.var(images, 0)
# Returns the average of that variance
return(np.sum(variance_matrix)/variance_matrix.size) | 4b6196ddd25c0cd3ad0cd7cb1928b99772aa563f | 3,652,612 |
def get_r2_matrix(ts):
"""
Returns the matrix for the specified tree sequence. This is computed
via a straightforward Python algorithm.
"""
n = ts.get_sample_size()
m = ts.get_num_mutations()
A = np.zeros((m, m), dtype=float)
for t1 in ts.trees():
for sA in t1.sites():
assert len(sA.mutations) == 1
mA = sA.mutations[0]
A[sA.id, sA.id] = 1
fA = t1.get_num_samples(mA.node) / n
samples = list(t1.samples(mA.node))
for t2 in ts.trees(tracked_samples=samples):
for sB in t2.sites():
assert len(sB.mutations) == 1
mB = sB.mutations[0]
if sB.position > sA.position:
fB = t2.get_num_samples(mB.node) / n
fAB = t2.get_num_tracked_samples(mB.node) / n
D = fAB - fA * fB
r2 = D * D / (fA * fB * (1 - fA) * (1 - fB))
A[sA.id, sB.id] = r2
A[sB.id, sA.id] = r2
return A | e6a3eca421c40c9b9bbe218e7f6179eda0e07a00 | 3,652,613 |
def omdb_title(
api_key: str,
id_imdb: str = None,
media: str = None,
title: str = None,
season: int = None,
episode: int = None,
year: int = None,
plot: str = None,
cache: bool = True,
) -> dict:
"""
Looks up media by id using the Open Movie Database.
Online docs: http://www.omdbapi.com/#parameters
"""
if (not title and not id_imdb) or (title and id_imdb):
raise MnamerException("either id_imdb or title must be specified")
elif plot and plot not in OMDB_PLOT_TYPES:
raise MnamerException(
"plot must be one of %s" % ",".join(OMDB_PLOT_TYPES)
)
url = "http://www.omdbapi.com"
parameters = {
"apikey": api_key,
"i": id_imdb,
"t": title,
"y": year,
"season": season,
"episode": episode,
"type": media,
"plot": plot,
}
parameters = clean_dict(parameters)
status, content = request_json(url, parameters, cache=cache)
error = content.get("Error") if isinstance(content, dict) else None
if status == 401:
raise MnamerException("invalid API key")
elif status != 200 or not isinstance(content, dict):
raise MnamerNetworkException("OMDb down or unavailable?")
elif error:
raise MnamerNotFoundException(error)
return content | 54efaba216b7de203fe6960f58a8ebb93b980c4c | 3,652,615 |
def get_status(addr):
"""Get the current status of a minecraft server.
addr -- server address
Returns an mcstatus object.
"""
server = MinecraftServer.lookup(addr)
try:
return server.status()
except Exception:
return None | 9e5a346d3cec803005ef0c65d24f929b56dfa68f | 3,652,616 |
def calculate_losses(estimator, input_fn, labels):
"""Get predictions and losses for samples.
The assumptions are 1) the loss is cross-entropy loss, and 2) user have
specified prediction mode to return predictions, e.g.,
when mode == tf.estimator.ModeKeys.PREDICT, the model function returns
tf.estimator.EstimatorSpec(mode=mode, predictions=tf.nn.softmax(logits)).
Args:
estimator: model to make prediction
input_fn: input function to be used in estimator.predict
labels: array of size (n_samples, ), true labels of samples (integer valued)
Returns:
preds: probability vector of each sample
loss: cross entropy loss of each sample
"""
pred = np.array(list(estimator.predict(input_fn=input_fn)))
loss = log_loss(labels, pred)
return pred, loss | 1a25519d661a6de185c39bb9c65a23a3eea71971 | 3,652,617 |
def text_cleaning(value, stopwords=None):
"""Applies the four cleaning funtions to a value.
Turns value into string, makes lowercase, strips trailing and leading spaces, and removes digits, punctuation, and stopwords
Args:
value (str): string to be cleaned
Returns:
str_out (str): string after cleaning
"""
value = str_lower_strip(value)
value = remove_digits(value)
value = remove_punctuation(value)
value = remove_stopwords(value, stopwords)
str_out = value
return str_out | 291f4150601b7537cbb4d10cb53598dcb9a83829 | 3,652,618 |
def calc_pts_lag(npts=20):
"""
Returns Gauss-Laguerre quadrature points rescaled for line scan integration
Parameters
----------
npts : {15, 20, 25}, optional
The number of points to
Notes
-----
The scale is set internally as the best rescaling for a line scan
integral; it was checked numerically for the allowed npts.
Acceptable pts/scls/approximate line integral scan error:
(pts, scl ) : ERR
------------------------------------
(15, 0.072144) : 0.002193
(20, 0.051532) : 0.001498
(25, 0.043266) : 0.001209
The previous HG(20) error was ~0.13ish
"""
scl = { 15:0.072144,
20:0.051532,
25:0.043266}[npts]
pts0, wts0 = np.polynomial.laguerre.laggauss(npts)
pts = np.sinh(pts0*scl)
wts = scl*wts0*np.cosh(pts0*scl)*np.exp(pts0)
return pts, wts | dc491bc8dd46f81809a0dc06da8c123357736622 | 3,652,619 |
def APPEND(*ext, **kw):
"""Decorator to call XDWAPI with trailing arguments *ext.
N.B. Decorated function must be of the same name as XDWAPI's one.
"""
def deco(api):
@wraps(api)
def func(*args, **kw):
args = list(args)
if "codepage" in kw:
args.append(kw["codepage"])
args.extend(ext)
return TRY(getattr(DLL, api.__name__), *args)
return func
return deco | c73dc1b192835a0eefa53f660b1af4626a3ab75c | 3,652,620 |
def stations_within_radius(stations, centre, r):
"""Returns a list of all stations (type MonitoringStation) within radius r of a geographic coordinate x."""
stations_inside_radius = []
for station, distance in stations_by_distance(stations, centre):
# Check if distance is inside the requried radius
if distance < r:
stations_inside_radius.append(station)
# Return the list
return stations_inside_radius | 8182bdfc0d46ee64e98358c06b3d4787a0f1fa52 | 3,652,621 |
def manage_topseller(request, template_name="manage/marketing/topseller.html"):
"""
"""
inline = manage_topseller_inline(request, as_string=True)
# amount options
amount_options = []
for value in (10, 25, 50, 100):
amount_options.append({
"value": value,
"selected": value == request.session.get("topseller-amount")
})
return render_to_string(template_name, request=request, context={
"topseller_inline": inline,
"amount_options": amount_options,
}) | e9af634b66a7f7631a0bb7633cc445f05efb615a | 3,652,622 |
from typing import Optional
def embedded_services(request: FixtureRequest) -> Optional[str]:
"""
Enable parametrization for the same cli option
"""
return getattr(request, 'param', None) or request.config.getoption('embedded_services', None) | 908a48d9fa8696e6970fe5884632f1b373063667 | 3,652,624 |
def vigenere(plaintext,cypher):
"""Implementation of vigenere cypher"""
i = 0
cyphertext = ""
for character in plaintext:
n = ord(cypher[i%len(cypher)].lower())-97
new_char = rot_char(character, n)
cyphertext += new_char
if new_char != ' ':
i += 1
return cyphertext | 2b5cdd839bcfc0e55cdac65f9752cf88bd34c2e2 | 3,652,625 |
def get_local_unit_slip_vector_SS(strike, dip, rake):
"""
Compute the STRIKE SLIP components of a unit slip vector.
Args:
strike (float): Clockwise angle (deg) from north of the line at the
intersection of the rupture plane and the horizontal plane.
dip (float): Angle (degrees) between rupture plane and the horizontal
plane normal to the strike (0-90 using right hand rule).
rake (float): Direction of motion of the hanging wall relative to the
foot wall, as measured by the angle (deg) from the strike vector.
Returns:
Vector: Unit slip vector in 'local' N-S, E-W, U-D coordinates.
"""
strike = np.radians(strike)
dip = np.radians(dip)
rake = np.radians(rake)
sx = np.cos(rake) * np.sin(strike)
sy = np.cos(rake) * np.cos(strike)
sz = 0.0
return Vector(sx, sy, sz) | dddedaeaabe91137c38bbaa2ec2bb1d42a6629c7 | 3,652,626 |
def get_country_code(country_name):
"""Gets the code of the country given its name"""
for code, name in COUNTRIES.items():
if name == country_name:
return code | bb4a3eebae0b14fc8207ef4301812d3d305a8dfd | 3,652,627 |
def build_model(cfg, train_cfg=None, test_cfg=None):
"""Build model."""
if train_cfg is None and test_cfg is None:
return build(cfg, MODELS)
else:
return build(cfg, MODELS, dict(train_cfg=train_cfg, test_cfg=test_cfg)) | f47aa433bf2cbd637e9ea2e8e842bab9feb12ab1 | 3,652,628 |
def find_instruction_type(opcode: str) -> InstructionType:
"""Finds instruction type for object instruction
Parameters
----------
opcode : str
opcode of instruction in hex
Returns
-------
InstructionType
type of instruction using InstructionType enum
"""
# R type instructions always have opcode = 00
if opcode == "00":
i_type = InstructionType.R
# I type instructions have opcode > 03
elif opcode > "03":
i_type = InstructionType.I
return i_type | a8f5002834f9e9e847ef4f848a13f6e8037948f6 | 3,652,629 |
import string
def gen_tier_id(inst, id_base, tier_type=None, alignment=None, no_hyphenate=False):
"""
Unified method to generate a tier ID string. (See: https://github.com/goodmami/xigt/wiki/Conventions)
"""
# In order to number this item correctly, we need to decide how many tiers of the same type
# there are. This is done by systematically adding filters to the list.
filters = []
# First, do we align with another item? (Either segmentation, alignment, or head/dep)
if alignment is not None:
filters.append(lambda x: aln_match(alignment)(x) or seg_match(alignment)(x) or ref_match(x, alignment, DS_HEAD_ATTRIBUTE))
# Next, does the type match ours?
if tier_type is not None:
filters.append(type_match(tier_type))
# Get the number of tiers that match this.
if not filters:
prev_tiers = []
num_tiers = 0
else:
prev_tiers = inst.findall(others=filters)
num_tiers = len(prev_tiers)
id_str = id_base
# Now, if we have specified the alignment, we also want to prepend
# that to the generated id string.
if alignment is not None:
if no_hyphenate:
return '{}{}'.format(alignment, id_str)
else:
id_str = '{}-{}'.format(alignment, id_str)
# Finally, if we have multiple tiers of the same type that annotate the
# same item, we should append a letter for the different analyses.
if num_tiers > 0 and inst.find(id=id_str) is not None:
while True:
letters = string.ascii_lowercase
assert num_tiers < 26, "More than 26 alternative analyses not currently supported"
potential_id = id_str + '_{}'.format(letters[num_tiers])
if inst.find(id=potential_id) is None:
id_str = potential_id
break
else:
num_tiers += 1
return id_str | f21b94677efe25e545d7efd99c68ed1722018c35 | 3,652,630 |
def create_temporary_file(filename, contents=""):
""" Decorator for constructing a file which is available
during a single test and is deleted afterwards.
Example usage::
@grader.test
@create_temporary_file('hello.txt', 'Hello world!')
def hook_test(m):
with open('hello.txt') as file:
txt = file.read()
"""
def _inner(test_function):
before_test(create_file(filename, contents))(test_function)
after_test(delete_file(filename))(test_function)
return test_function
return _inner | b4ce96e0d239acc379d78b7c13042cea5c0a4fe0 | 3,652,631 |
def find_post_translational_modifications(filter=None, page=0, pageSize=100): # noqa: E501
"""Find values for an specific property, for example possible taxonomy values for Organism property
# noqa: E501
:param filter: Keyword to filter the list of possible values
:type filter: str
:param page: Number of the page with the possible values for the property
:type page: int
:param pageSize: Number of values with the possible values for the property
:type pageSize: int
:rtype: List[PostTranslationalModification]
"""
unimod_database = UnimodDatabase()
l = unimod_database.search_mods_by_keyword(keyword=filter)
list_found = l[(page * pageSize):(page * pageSize) + pageSize]
return list_found | 9c9d196a7d0d3e8c3b2725247504cecf822ac541 | 3,652,632 |
def random_vector(A, b):
"""
Generates a random vector satisfying Ax <= b through rejection
sampling.
"""
dimension = A.shape[1]
not_feasible = True
while not_feasible == True:
config.reject_counter = config.reject_counter + 1
if config.reject_counter == config.milestone:
config.milestone = config.milestone * 10
print(config.reject_counter, 'random vectors have been generated so far')
rand_vec = np.random.uniform(-0.5, 0.5, dimension)
if np.all(np.dot(A, rand_vec) <= b) == True:
not_feasible = False
return rand_vec | e710ef0a3e49fc7834850465f11232df546b944d | 3,652,633 |
from .transform import mapi
from typing import Callable
def mapi(mapper: Callable[[TSource, int], TResult]) -> Projection[TSource, TResult]:
"""Returns an observable sequence whose elements are the result of
invoking the mapper function and incorporating the element's index
on each element of the source."""
return mapi(mapper) | e640a1a4b68b9115ca2358502b675e4d6710ea83 | 3,652,634 |
def game_to_screen(position):
"""
Converts coordinates from game view into screen coordinates for mouse interaction
"""
return (GAME_LEFT + position[0], GAME_TOP + position[1]) | 2176d74a98db1e226dc960b14db35af303bfe9ec | 3,652,635 |
def get_graph_params(filename, nsize=1):
"""Load and process graph adjacency matrix and upsampling/downsampling matrices."""
data = np.load(filename, encoding='latin1')
A = data['A']
U = data['U']
D = data['D']
U, D = scipy_to_pytorch(A, U, D)
A = [adjmat_sparse(a, nsize=nsize) for a in A]
return A, U, D | 5c0671dbe7cd2f56aace9319f78289b1e34defa4 | 3,652,636 |
from . import computers
def _(dbmodel, backend):
"""
get_backend_entity for DummyModel DbComputer.
DummyModel instances are created when QueryBuilder queries the Django backend.
"""
djcomputer_instance = djmodels.DbComputer(
id=dbmodel.id,
uuid=dbmodel.uuid,
name=dbmodel.name,
hostname=dbmodel.hostname,
description=dbmodel.description,
transport_type=dbmodel.transport_type,
scheduler_type=dbmodel.scheduler_type,
metadata=dbmodel.metadata
)
return computers.DjangoComputer.from_dbmodel(djcomputer_instance, backend) | dd9dc5eeb0dcd54816675bd2dc19e5a0fc10a59a | 3,652,637 |
import six
def retrieve(filename, conf, return_format='dict', save_to_local=False, delete_remote=False, timeout=60):
"""Retrieving Processed Session File from server via sFTP
1. Get xml file string from server and return object
2. If save_to_local, save to local file system
Args:
filename: filename of file in outbound folder at remote server with '.asc' as extension.
conf: An instance of utils.Configuration.
return_format: Return format. The default is ‘dict’. Could be one of ‘dict’, ‘object’ or ‘xml’.
save_to_local: whether save file to local. default is false.
delete_remote: If delete the remote file after download. The default is False
timeout: Timeout in second for ssh connection for sftp.
Returns:
response XML in desired format.
Raises:
Exception depends on when get it.
"""
if not isinstance(conf, utils.Configuration):
raise utils.VantivException('conf must be an instance of utils.Configuration')
if not isinstance(filename, six.string_types) or len(filename) < 4:
raise utils.VantivException('filename must be a string, and at least 4 chars')
if not isinstance(timeout, six.integer_types) or timeout < 0:
raise utils.VantivException('timeout must be an positive int')
response_xml = _get_file_str_from_sftp(filename, conf, delete_remote, timeout)
if save_to_local:
_save_str_file(response_xml, conf.batch_response_path, filename)
return _generate_response(response_xml, return_format, conf) | 1ebf550b8a9be3019ed851a5e4571ed9a72f3e44 | 3,652,638 |
def get_simulate_func_options(
params,
options,
method="n_step_ahead_with_sampling",
df=None,
n_simulation_periods=None,
):
"""Rewrite respy's get_simulation_function such that options can be passed
and therefore the seed be changed before any run. Documentation is adapted
from :func:`respy.simulate.get_simulate_func()`
Parameters
----------
params : pandas.DataFrame
DataFrame containing the model parameters.
options : dict
Dictionary containing the model options.
method : {"n_step_ahead_with_sampling", "n_step_ahead_with_data", "one_step_ahead"}
The simulation method which can be one of three and is explained in more detail
in :func:`respy.simulate.simulate()`.
df : pandas.DataFrame or None, default None
DataFrame containing one or multiple observations per individual.
n_simulation_periods : int or None, default None
Simulate data for a number of periods. This options does not affect
``options["n_periods"]`` which controls the number of periods for which decision
rules are computed.
Returns
-------
simulate_function : :func:`simulate`
Simulation function where all arguments except the parameter vector
and the options are set.
"""
optim_paras, options = process_params_and_options(params, options)
n_simulation_periods, options = _harmonize_simulation_arguments(
method,
df,
n_simulation_periods,
options,
)
df = _process_input_df_for_simulation(df, method, options, optim_paras)
solve = get_solve_func(params, options)
n_observations = (
df.shape[0]
if method == "one_step_ahead"
else df.shape[0] * n_simulation_periods
)
shape = (n_observations, len(optim_paras["choices"]))
base_draws_sim = create_base_draws(
shape,
next(options["simulation_seed_startup"]),
"random",
)
base_draws_wage = create_base_draws(
shape,
next(options["simulation_seed_startup"]),
"random",
)
simulate_function = partial(
simulate,
base_draws_sim=base_draws_sim,
base_draws_wage=base_draws_wage,
df=df,
method=method,
n_simulation_periods=n_simulation_periods,
solve=solve,
)
return simulate_function | 9d77730facb29d460c958033873bb2ce02f5a9ed | 3,652,639 |
def get_host_user_and_ssh_key_path(instance_name, project, zone):
"""Return a tuple of (hostname, username and ssh_key_path)."""
output = api.local(
'gcloud compute ssh --project "%s" --zone "%s" %s --dry-run' %
(project, zone, instance_name),
capture=True)
print output
m = re.match('/usr/bin/ssh .*-i ([^ ]+)(?: -o [^ ]+)* ([^ ]+)@([^ ]+)',
output)
return (m.group(3), m.group(2), m.group(1)) | eabc88808fe0b73e9df507ea0397c3b9eb38a8de | 3,652,641 |
def TDataStd_TreeNode_Find(*args):
"""
* class methods working on the node =================================== Returns true if the tree node T is found on the label L. Otherwise, false is returned.
:param L:
:type L: TDF_Label &
:param T:
:type T: Handle_TDataStd_TreeNode &
:rtype: bool
"""
return _TDataStd.TDataStd_TreeNode_Find(*args) | 6c37e5f05627287eab4c4c13a21d92aa6e4e6a1a | 3,652,642 |
def make_group_corr_mat(df):
"""
This function reads in each subject's aal roi time series files and creates roi-roi correlation matrices
for each subject and then sums them all together. The final output is a 3d matrix of all subjects
roi-roi correlations, a mean roi-roi correlation matrix and a roi-roi covariance matrix.
**NOTE WELL** This returns correlations transformed by the Fisher z, aka arctanh, function.
"""
# for each subject do the following
for i, (sub, f_id) in enumerate(df[['SUB_ID', 'FILE_ID']].values):
#read each subjects aal roi time series files
ts_df = pd.read_table('DATA/{}_rois_aal.1D'.format(f_id))
#create a correlation matrix from the roi all time series files
corr_mat_r = ts_df.corr()
#the correlations need to be transformed to Fisher z, which is
#equivalent to the arctanh function.
corr_mat_z = np.arctanh(corr_mat_r)
#for the first subject, add a correlation matrix of zeros that is the same dimensions as the aal roi-roi matrix
if i == 0:
all_corr_mat = np.zeros([corr_mat_z.shape[0], corr_mat_z.shape[1], len(df)])
#now add the correlation matrix you just created for each subject to the all_corr_mat matrix (3D)
all_corr_mat[:, :, i] = corr_mat_z
#create the mean correlation matrix (ignore nas - sometime there are some...)
av_corr_mat = np.nanmean(all_corr_mat, axis=2)
#create the group covariance matrix (ignore nas - sometime there are some...)
var_corr_mat = np.nanvar(all_corr_mat, axis=2)
return all_corr_mat, av_corr_mat, var_corr_mat | 4d30136e8ce46e984c0039ddaca26efd04f231b9 | 3,652,643 |
def get_tradedate(begin, end):
"""
get tradedate between begin date and end date
Params:
begin:
str,eg: '1999-01-01'
end:
str,eg: '2017-12-31'
Return:
pd.DataFrame
"""
try:
conn = pymysql.connect(**config)
cursor = conn.cursor()
query = "SELECT calendar_date FROM trade_calendar WHERE is_trade_day= 1 AND \
calendar_date>='" + begin + "' AND calendar_date<='" + end + "';"
cursor.execute(query)
date = pd.DataFrame(list(cursor.fetchall()))
date.columns = ['date']
date = pd.DataFrame(pd.to_datetime(date['date']))
return date
finally:
if conn:
conn.close() | 9464caee65f12b9704e63068e159494baad25e6a | 3,652,644 |
def collect_tweet(update: Update, context: CallbackContext) -> int:
"""Tweet caption collection for tweet without attachments"""
logger.info("'{update.message.text}' tweet type selected")
update.message.reply_text("Enter the tweet")
return TWEET | 6fc25efa4dc10f2316b70ff18f9a5c77b83c1e4a | 3,652,645 |
from typing import Dict
def get_unhandled_crictical_errors(req:HttpRequest, n:int):
"""
Preprocess errors before injection
and gets `n` unhandled errors
Typical Return Value if `n` errors were found...
{"es":[
{
"id": "192.168.1.51",
"title":"hey there"
}
]
}
"""
errors:Dict = {} # define return object
# get the neccessary errors
errors_query = Error.objects.filter(isHandled=False, ecode__lt=2000, ecode__gt=1000)[:n]
es_tmp:list = []
for error in errors_query:
e_tmp:dict
victim:object
# e_tmp["id"] = error.victim
code = error.ecode
# if ecode > 1500, then it belongs to the child
if code > 1500 and code < 2000:
victim = Child.objects.get(ip=error.victim).first()
e_tmp["id"] = victim.nickname | victim.ip
# if not then belongs to smart task
elif code > 1000 and code < 1500:
e_tmp["id"] = STask.objects.get(sid=error.victim).first().name
# rarely error record may be corrupted
else:
raise Exception(f"Given ecode{error.ecode} in the error{error.eid} obj is invalid")
e_tmp["title"] = get_error_title(error.ecode)
es_tmp.append(e_tmp)
del e_tmp, victim
# compile the return object
errors["es"] = es_tmp
del es_tmp
return JsonResponse(errors) | ae2afdeda89f9a9d946fa60a8ba5e15277388e50 | 3,652,646 |
def ADO_mappings(N, K, level_cutoff):
"""
ADO (auxilary density operators) are indexed by a N by (K + 1) matrix
consisting of non-negative integers.
ADO_mappings calculates all possible matrices "ado_index" of size
N by (K+1) where np.sum(m) < level_cutoff
Parameters
----------
N : integer
number of states
K : integer
number of exponentials to include in the spectral density
correlation function
level_cutoff : integer
number of levels at which to terminate the heiarchy expansion
Returns
-------
ind_to_mat : list of matrices
maps index to np.array
mat_to_ind : function
maps the np.array to the index
---------------------------------------------------------------------------
Define S to be the set of all matrices of size N by (K + 1) with
non-negative integer values.
Define level L_i as:
L_i = {m \in S | np.sum(m) == i}
L_i can be found using the multichoose function. We will preserve the order
that multichoose uses in ordering L_i
L_i corresponds to the set of ADOs in the ith heiarchy.
L_0 is a singleton set, corresponding to the RDO (reduced density matrix)
"""
bins = N * (K + 1)
permutations = []
for c in range(level_cutoff):
permutations.extend(multichoose(bins, c))
inverted_permutations = {tuple(v): i for i, v in enumerate(permutations)}
def mat_to_ind(mat):
"""maps np.array to index"""
vec = mat.flatten()
try:
return inverted_permutations[tuple(vec)]
except KeyError:
return None
ind_to_mat = [np.array(vec).reshape((N, K + 1)) for vec in permutations]
return ind_to_mat, mat_to_ind | a76da5569863ea8d17ec248eb09b2b6e5a300ad2 | 3,652,647 |
def f_beta(precision, recall, beta):
"""
Returns the F score for precision, recall and a beta parameter
:param precision: a double with the precision value
:param recall: a double with the recall value
:param beta: a double with the beta parameter of the F measure, which gives more or less weight to precision vs. recall
:return: a double value of the f(beta) measure.
"""
if np.isnan(precision) or np.isnan(recall) or (precision == 0 and recall == 0):
return np.nan
return ((1 + beta ** 2) * precision * recall) / (((beta ** 2) * precision) + recall) | be6c2b011c51d58d4b5f943671cd53b45632b48f | 3,652,648 |
import ntpath, os, yaml
def get_cfg(existing_cfg, _log):
"""
generates
"""
_sanity_check(existing_cfg, _log)
with open(os.path.join(os.path.dirname(__file__), "{}.yml".format(ntpath.basename(__file__).split(".")[0])),
'r') as stream:
try:
ret = yaml.load(stream)
except yaml.YAMLError as exc:
assert "Default config yaml for '{}' not found!".format(os.path.splitext(__file__)[0])
return ret | 3d69096ebc1b78ad52dcc5b35b225ccfea5ff189 | 3,652,650 |
import struct
def readShort(f):
"""Read 2 bytes as BE integer in file f"""
read_bytes = f.read(2)
return struct.unpack(">h", read_bytes)[0] | 1b31c2285d055df3c128e8158dcc67eb6c0a2b18 | 3,652,653 |
def get_color(thing):
"""Get color for thing.
:param thing: Thing to get color for.
:return: Color tuple if rule exists otherwise None.
"""
for rule in _COLOR_RULES:
color = rule(thing)
if color is not None:
return color
return None | 79620c0ec8d5e9a153038b9b6a65f36158dce255 | 3,652,654 |
def build_table(infos):
""" Builds markdown table. """
table_str = '| '
for key in infos[0].keys():
table_str += key + ' | '
table_str += '\n'
table_str += '| '
for key in infos[0].keys():
table_str += '--- | '
table_str += '\n'
for info in infos:
table_str += '| '
for value in info.values():
table_str += str(value) + ' | '
table_str += '\n'
return table_str | 8d31e6abc9edd0014acbac3570e4a2bc711baa4a | 3,652,655 |
import six
import threading
def notify_telegram(title, content, token=None, chat=None, mention_user=None, **kwargs):
"""
Sends a telegram notification and returns *True* on success. The communication with the telegram
API might have some delays and is therefore handled by a thread.
"""
# test import
cfg = Config.instance()
# get default token and chat
if not token:
token = cfg.get_expanded("notifications", "telegram_token")
if not chat:
chat = cfg.get_expanded("notifications", "telegram_chat")
if not token or not chat:
logger.warning("cannot send Telegram notification, token ({}) or chat ({}) empty".format(
token, chat))
return False
# append the user to mention to the title
# unless explicitly set to empty string
mention_text = ""
if mention_user is None:
mention_user = cfg.get_expanded("notifications", "telegram_mention_user")
if mention_user:
mention_text = " (@{})".format(mention_user)
# request data for the API call
request = {
"parse_mode": "Markdown",
}
# standard or attachment content?
if isinstance(content, six.string_types):
request["text"] = "{}{}\n\n{}".format(title, mention_text, content)
else:
# content is a dict, add some formatting
request["text"] = "{}{}\n\n".format(title, mention_text)
for key, value in content.items():
request["text"] += "_{}_: {}\n".format(key, value)
# extend by arbitrary kwargs
request.update(kwargs)
# threaded, non-blocking API communication
thread = threading.Thread(target=_notify_telegram, args=(token, chat, request))
thread.start()
return True | a736025f5c6a6acff634f325ecbad0e591f30174 | 3,652,656 |
def convert_to_dapr_duration(td: timedelta) -> str:
"""Converts date.timedelta to Dapr duration format.
Args:
td (datetime.timedelta): python datetime object.
Returns:
str: dapr duration format string.
"""
total_minutes, secs = divmod(td.total_seconds(), 60.0)
hours, mins = divmod(total_minutes, 60.0)
return f'{hours:.0f}h{mins:.0f}m{secs:.0f}s' | 729cde6d2dccea1c8fa36eec506ee8ee6ea34b6e | 3,652,657 |
def get_slot(handler_input, slot_name):
# type: (HandlerInput, AnyStr) -> Optional[Slot]
"""Return the slot information from intent request.
The method retrieves the slot information
:py:class:`ask_sdk_model.slot.Slot` from the input intent request
for the given ``slot_name``. More information on the slots can be
found here :
https://developer.amazon.com/docs/custom-skills/request-types-reference.html#slot-object
If there is no such slot, then a ``None``
is returned. If the input request is not an
:py:class:`ask_sdk_model.intent_request.IntentRequest`, a
:py:class:`TypeError` is raised.
:param handler_input: The handler input instance that is generally
passed in the sdk's request and exception components
:type handler_input: ask_sdk_core.handler_input.HandlerInput
:param slot_name: Name of the slot that needs to be retrieved
:type slot_name: str
:return: Slot information for the provided slot name if it exists,
or a `None` value
:rtype: Optional[ask_sdk_model.slot.Slot]
:raises: TypeError if the input is not an IntentRequest
"""
request = handler_input.request_envelope.request
if isinstance(request, IntentRequest):
if request.intent.slots is not None:
return request.intent.slots.get(slot_name, None)
else:
return None
raise TypeError("The provided request is not an IntentRequest") | c564f3b82fb21c12b81d1fda0214c330e7355080 | 3,652,658 |
from typing import Callable
def password_to_key(
hash_implementation: Callable[[bytes], TDigestable], padding_length: int
) -> Callable[[bytes, bytes], bytes]:
"""
Create a helper function to convert passwords to SNMP compliant keys
according to :rfc:`3414`.
>>> hasher = password_to_key(hashlib.sha1, 20)
>>> key = hasher(b"mypasswd", b"target-engine-id")
>>> key.hex()
'999ec23ca66b9d3f187ab5208840c30b0450b452'
:param hash_implementation: A callable that creates an object with a
".digest()" method from a bytes-object. Usable examples are
`hashlib.md5` and `hashlib.sha1`
:param padding_length: The padding length to be used during hashing (as
defined in the SNMP rfc)
:returns: A callable which can be used to derive an SNMP compliant key
from a password.
"""
@lru_cache(maxsize=None)
def hasher(password: bytes, engine_id: bytes) -> bytes:
"""
Derive a key from a password and engine-id.
:param password: The user password
:param engine_id: The target engine ID
:returns: The derived key
"""
# Repeat the password for a total of 1MB worth of data (as per SNMP rfc)
hash_size = 1024 * 1024
num_words = hash_size // len(password)
tmp = (password * (num_words + 1))[:hash_size]
hash_instance = hash_implementation(tmp)
key = hash_instance.digest()
localised_buffer = (
key[:padding_length] + engine_id + key[:padding_length]
)
final_key = hash_implementation(localised_buffer).digest()
return final_key
hasher.__name__ = f"<hasher:{hash_implementation}>" # type: ignore
return hasher | 3f638afaa5c950f70edf39ca699701ec1709729e | 3,652,659 |
import re
def categorize_tag_key_characters(OSM_FILE = "data\\round_rock.xml", category = 'Summary'):
"""Categorizes attributes into those with:
all lower character, all lower after colon(:),
containing special/problem characters and
all all others that were not listed in above
which includes uppercase characters and/or
multiple colons.
Keyword arguments:
OSM_File -- .osm or .xml file (default "data\\round_rock.xml")
category -- print specific keys of categories of characters from regex search
(default 'Summary' ['All', 'lower', 'lower_colon', 'porblemchars', 'other'])
"""
if category == 'All':
category = ('lower', 'lower_colon', 'porblemchars', 'other')
category_list = list(category)
lower = re.compile(r'^([a-z]|_)*$')
lower_colon = re.compile(r'^([a-z]|_)*:([a-z]|_)*$')
problemchars = re.compile(r'[=\+/&<>;\'"\?%#$@\,\. \t\r\n]')
lower_set = set()
lower_colon_set = set()
problemchars_set = set()
other_set = set()
def key_type(element, keys):
if element.tag == "tag":
if lower.match(element.attrib['k']):
lower_set.add(element.attrib['k'])
keys["lower"] += 1
elif lower_colon.match(element.attrib['k']):
lower_colon_set.add(element.attrib['k'])
keys["lower_colon"] += 1
elif problemchars.match(element.attrib['k']):
problemchars_set.add(element.attrib['k'])
keys["problemchars"] += 1
else:
other_set.add(element.attrib['k'])
keys["other"] += 1
return keys
def process_map(filename):
keys = {"lower": 0, "lower_colon": 0, "problemchars": 0, "other": 0}
for _, element in ET.iterparse(filename):
keys = key_type(element, keys)
print(keys)
print(
"\nThere are:\n\
{} unique keys in lower,\n\
{} unique keys in lower_colon,\n\
{} unique keys in problemchars and\n\
{} unique keys in other.\n"
.format(len(lower_set), len(lower_colon_set), len(problemchars_set), len(other_set))
)
if 'lower' in category_list:
print('\n\nlower set has {} items. The unique items are: \n\n{} \n\n'
.format(keys["lower"], sorted(lower_set)))
if 'lower_colon' in category_list:
print('lower_colon set has {} items. The unique items are: \n\n{} \n\n'
.format(keys["lower_colon"], sorted(lower_colon_set)))
if 'problemchars' in category_list:
print('problemchars set has {} items. The unique items are: \n\n{} \n\n'
.format(keys["problemchars"], sorted(problemchars_set)))
if 'other' in category_list:
print('other set has {} items. The unique items are: \n\n{} \n\n'
.format(keys["other"], sorted(other_set)))
return keys
keys_dicts = process_map(OSM_FILE)
return keys_dicts | 4e2f6c6a24a14114ce8f5c8d2855847859ad4d8f | 3,652,660 |
def rotate_left(value, count, nbits, offset):
"""
Rotate a value to the left (or right)
@param value: value to rotate
@param count: number of times to rotate. negative counter means
rotate to the right
@param nbits: number of bits to rotate
@param offset: offset of the first bit to rotate
@return: the value with the specified field rotated
all other bits are not modified
"""
assert offset >= 0, "offset must be >= 0"
assert nbits > 0, "nbits must be > 0"
mask = 2**(offset+nbits) - 2**offset
tmp = value & mask
if count > 0:
for x in xrange(count):
if (tmp >> (offset+nbits-1)) & 1:
tmp = (tmp << 1) | (1 << offset)
else:
tmp = (tmp << 1)
else:
for x in xrange(-count):
if (tmp >> offset) & 1:
tmp = (tmp >> 1) | (1 << (offset+nbits-1))
else:
tmp = (tmp >> 1)
value = (value-(value&mask)) | (tmp & mask)
return value | ed24a0a958bed1ab1a01c4a858bfba0fd163e2fd | 3,652,661 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.